Botan 3.6.0
Crypto and TLS for C&
frodo_matrix.cpp
Go to the documentation of this file.
1/*
2 * FrodoKEM matrix logic
3 * Based on the MIT licensed reference implementation by the designers
4 * (https://github.com/microsoft/PQCrypto-LWEKE/tree/master/src)
5 *
6 * The Fellowship of the FrodoKEM:
7 * (C) 2023 Jack Lloyd
8 * 2023 René Meusel, Amos Treiber - Rohde & Schwarz Cybersecurity
9 *
10 * Botan is released under the Simplified BSD License (see license.txt)
11 */
12
13#include <botan/internal/frodo_matrix.h>
14
15#include <botan/assert.h>
16#include <botan/frodokem.h>
17#include <botan/hex.h>
18#include <botan/mem_ops.h>
19#include <botan/xof.h>
20#include <botan/internal/bit_ops.h>
21#include <botan/internal/frodo_constants.h>
22#include <botan/internal/loadstor.h>
23#include <botan/internal/stl_util.h>
24
25#if defined(BOTAN_HAS_FRODOKEM_AES)
26 #include <botan/internal/frodo_aes_generator.h>
27#endif
28
29#if defined(BOTAN_HAS_FRODOKEM_SHAKE)
30 #include <botan/internal/frodo_shake_generator.h>
31#endif
32
33#include <array>
34#include <cmath>
35#include <cstdint>
36#include <memory>
37#include <span>
38#include <utility>
39#include <vector>
40
41namespace Botan {
42
43namespace {
44
45secure_vector<uint16_t> make_elements_vector(const FrodoMatrix::Dimensions& dimensions) {
46 return secure_vector<uint16_t>(static_cast<size_t>(std::get<0>(dimensions)) * std::get<1>(dimensions));
47}
48
49std::function<void(std::span<uint8_t> out, uint16_t i)> make_row_generator(const FrodoKEMConstants& constants,
50 StrongSpan<const FrodoSeedA> seed_a) {
51#if defined(BOTAN_HAS_FRODOKEM_AES)
52 if(constants.mode().is_aes()) {
53 return create_aes_row_generator(constants, seed_a);
54 }
55#endif
56
57#if defined(BOTAN_HAS_FRODOKEM_SHAKE)
58 if(constants.mode().is_shake()) {
59 return create_shake_row_generator(constants, seed_a);
60 }
61#endif
62
63 // If we don't have AES in this build, the instantiation of the FrodoKEM instance
64 // is blocked upstream already. Hence, assert is save here.
66}
67
68} // namespace
69
71 const Dimensions& dimensions,
73 BOTAN_ASSERT_NOMSG(r.size() % 2 == 0);
74 const auto n = r.size() / 2;
75
76 auto elements = make_elements_vector(dimensions);
77 BOTAN_ASSERT_NOMSG(n == elements.size());
78
79 load_le<uint16_t>(elements.data(), r.data(), n);
80
81 for(auto& elem : elements) {
82 const auto prnd = CT::value_barrier(static_cast<uint16_t>(elem >> 1)); // Drop the least significant bit
83 const auto sign = CT::Mask<uint16_t>::expand_bit(elem, 0); // Pick the least significant bit
84
85 uint32_t sample = 0; // Avoid integral promotion
86
87 // No need to compare with the last value.
88 for(size_t j = 0; j < constants.cdf_table_len() - 1; ++j) {
89 // Constant time comparison: 1 if CDF_TABLE[j] < s, 0 otherwise.
90 sample += CT::Mask<uint16_t>::is_lt(constants.cdf_table_at(j), prnd).if_set_return(1);
91 }
92 // Assuming that sign is either 0 or 1, flips sample iff sign = 1
93 const uint16_t sample_u16 = static_cast<uint16_t>(sample);
94
95 elem = sign.select(~sample_u16 + 1, sample_u16);
96 }
97
98 return FrodoMatrix(dimensions, std::move(elements));
99}
100
102 const FrodoKEMConstants& constants, Botan::XOF& shake) {
103 return [&constants, &shake](const FrodoMatrix::Dimensions& dimensions) mutable {
104 return sample(constants,
106 shake.output<FrodoSampleR>(sizeof(uint16_t) * std::get<0>(dimensions) * std::get<1>(dimensions)));
107 };
108}
109
111 m_dim1(std::get<0>(dims)), m_dim2(std::get<1>(dims)), m_elements(make_elements_vector(dims)) {}
112
114 const FrodoMatrix& s,
115 const FrodoMatrix& e,
117 BOTAN_ASSERT(std::get<0>(e.dimensions()) == std::get<1>(s.dimensions()) &&
118 std::get<1>(e.dimensions()) == std::get<0>(s.dimensions()),
119 "FrodoMatrix dimension mismatch of E and S");
120 BOTAN_ASSERT(std::get<0>(e.dimensions()) == constants.n() && std::get<1>(e.dimensions()) == constants.n_bar(),
121 "FrodoMatrix dimension mismatch of new matrix dimensions and E");
122
123 auto elements = make_elements_vector(e.dimensions());
124 auto row_generator = make_row_generator(constants, seed_a);
125
126 /*
127 We perform 4 invocations of SHAKE128 per iteration to obtain n 16-bit values per invocation.
128 a_row_data contains the 16-bit values of the current 4 rows. a_row_data_bytes represents the corresponding bytes.
129 */
130 std::vector<uint16_t> a_row_data(4 * constants.n(), 0);
131 // TODO: maybe use std::as_bytes() instead
132 // (take extra care, as it produces a std::span<std::byte>)
133 std::span<uint8_t> a_row_data_bytes(reinterpret_cast<uint8_t*>(a_row_data.data()),
134 sizeof(uint16_t) * a_row_data.size());
135
136 for(size_t i = 0; i < constants.n(); i += 4) {
137 auto a_row = BufferStuffer(a_row_data_bytes);
138
139 // Do 4 invocations to fill 4 rows
140 row_generator(a_row.next(constants.n() * sizeof(uint16_t)), static_cast<uint16_t>(i + 0));
141 row_generator(a_row.next(constants.n() * sizeof(uint16_t)), static_cast<uint16_t>(i + 1));
142 row_generator(a_row.next(constants.n() * sizeof(uint16_t)), static_cast<uint16_t>(i + 2));
143 row_generator(a_row.next(constants.n() * sizeof(uint16_t)), static_cast<uint16_t>(i + 3));
144
145 // Use generated bytes to fill 16-bit data
146 load_le<uint16_t>(a_row_data.data(), a_row_data_bytes.data(), 4 * constants.n());
147
148 for(size_t k = 0; k < constants.n_bar(); ++k) {
149 std::array<uint16_t, 4> sum = {0};
150 for(size_t j = 0; j < constants.n(); ++j) { // Matrix-vector multiplication
151 // Note: we use uint32_t for `sp` to avoid an integral promotion to `int`
152 // when multiplying `sp` with other row values. Otherwise we might
153 // suffer from undefined behaviour due to a signed integer overflow.
154 // See: https://learn.microsoft.com/en-us/cpp/cpp/standard-conversions#integral-promotions
155 const uint32_t sp = s.elements_at(k * constants.n() + j);
156
157 // Go through four lines with same sp
158 sum.at(0) += static_cast<uint16_t>(a_row_data.at(0 * constants.n() + j) * sp);
159 sum.at(1) += static_cast<uint16_t>(a_row_data.at(1 * constants.n() + j) * sp);
160 sum.at(2) += static_cast<uint16_t>(a_row_data.at(2 * constants.n() + j) * sp);
161 sum.at(3) += static_cast<uint16_t>(a_row_data.at(3 * constants.n() + j) * sp);
162 }
163 elements.at((i + 0) * constants.n_bar() + k) = e.elements_at((i + 0) * constants.n_bar() + k) + sum.at(0);
164 elements.at((i + 3) * constants.n_bar() + k) = e.elements_at((i + 3) * constants.n_bar() + k) + sum.at(3);
165 elements.at((i + 2) * constants.n_bar() + k) = e.elements_at((i + 2) * constants.n_bar() + k) + sum.at(2);
166 elements.at((i + 1) * constants.n_bar() + k) = e.elements_at((i + 1) * constants.n_bar() + k) + sum.at(1);
167 }
168 }
169
170 return FrodoMatrix(e.dimensions(), std::move(elements));
171}
172
174 const FrodoMatrix& s,
175 const FrodoMatrix& e,
177 BOTAN_ASSERT(std::get<0>(e.dimensions()) == std::get<0>(s.dimensions()) &&
178 std::get<1>(e.dimensions()) == std::get<1>(s.dimensions()),
179 "FrodoMatrix dimension mismatch of E and S");
180 BOTAN_ASSERT(std::get<0>(e.dimensions()) == constants.n_bar() && std::get<1>(e.dimensions()) == constants.n(),
181 "FrodoMatrix dimension mismatch of new matrix dimensions and E");
182
183 auto elements = e.m_elements;
184 auto row_generator = make_row_generator(constants, seed_a);
185
186 /*
187 We perform 8 invocations of SHAKE128 per iteration to obtain n 16-bit values per invocation.
188 a_row_data contains the 16-bit values of the current 8 rows. a_row_data_bytes represents the corresponding bytes.
189 */
190 std::vector<uint16_t> a_row_data(8 * constants.n(), 0);
191 // TODO: maybe use std::as_bytes()
192 std::span<uint8_t> a_row_data_bytes(reinterpret_cast<uint8_t*>(a_row_data.data()),
193 sizeof(uint16_t) * a_row_data.size());
194
195 // Start matrix multiplication
196 for(size_t i = 0; i < constants.n(); i += 8) {
197 auto a_row = BufferStuffer(a_row_data_bytes);
198
199 // Do 8 invocations to fill 8 rows
200 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 0));
201 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 1));
202 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 2));
203 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 3));
204 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 4));
205 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 5));
206 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 6));
207 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 7));
208
209 // Use generated bytes to fill 16-bit data
210 load_le<uint16_t>(a_row_data.data(), a_row_data_bytes.data(), 8 * constants.n());
211
212 for(size_t j = 0; j < constants.n_bar(); ++j) {
213 uint16_t sum = 0;
214 std::array<uint32_t /* to avoid integral promotion */, 8> sp;
215 for(size_t p = 0; p < 8; ++p) {
216 sp[p] = s.elements_at(j * constants.n() + i + p);
217 }
218 for(size_t q = 0; q < constants.n(); ++q) {
219 sum = elements.at(j * constants.n() + q);
220 for(size_t p = 0; p < 8; ++p) {
221 sum += static_cast<uint16_t>(sp[p] * a_row_data.at(p * constants.n() + q));
222 }
223 elements.at(j * constants.n() + q) = sum;
224 }
225 }
226 }
227
228 return FrodoMatrix(e.dimensions(), std::move(elements));
229}
230
232 const FrodoMatrix& b,
233 const FrodoMatrix& s,
234 const FrodoMatrix& e) {
235 BOTAN_ASSERT(std::get<0>(b.dimensions()) == std::get<1>(s.dimensions()) &&
236 std::get<1>(b.dimensions()) == std::get<0>(s.dimensions()),
237 "FrodoMatrix dimension mismatch of B and S");
238 BOTAN_ASSERT(std::get<0>(b.dimensions()) == constants.n() && std::get<1>(b.dimensions()) == constants.n_bar(),
239 "FrodoMatrix dimension mismatch of B");
240 BOTAN_ASSERT(std::get<0>(e.dimensions()) == constants.n_bar() && std::get<1>(e.dimensions()) == constants.n_bar(),
241 "FrodoMatrix dimension mismatch of E");
242
243 auto elements = make_elements_vector(e.dimensions());
244
245 for(size_t k = 0; k < constants.n_bar(); ++k) {
246 for(size_t i = 0; i < constants.n_bar(); ++i) {
247 elements.at(k * constants.n_bar() + i) = e.elements_at(k * constants.n_bar() + i);
248 for(size_t j = 0; j < constants.n(); ++j) {
249 elements.at(k * constants.n_bar() + i) += static_cast<uint16_t>(
250 static_cast<uint32_t /* to avoid integral promotion */>(s.elements_at(k * constants.n() + j)) *
251 b.elements_at(j * constants.n_bar() + i));
252 }
253 }
254 }
255
256 return FrodoMatrix(e.dimensions(), std::move(elements));
257}
258
260 const uint64_t mask = (uint64_t(1) << constants.b()) - 1;
261
262 const auto dimensions = std::make_tuple<size_t, size_t>(constants.n_bar(), constants.n_bar());
263 auto elements = make_elements_vector(dimensions);
264
265 BOTAN_ASSERT_NOMSG(in.size() * 8 == constants.n_bar() * constants.n_bar() * constants.b());
266
267 size_t pos = 0;
268 for(size_t i = 0; i < (constants.n_bar() * constants.n_bar()) / 8; ++i) {
269 uint64_t temp = 0;
270 for(size_t j = 0; j < constants.b(); ++j) {
271 temp |= static_cast<uint64_t /* avoiding integral promotion */>(in[i * constants.b() + j]) << (8 * j);
272 }
273 for(size_t j = 0; j < 8; ++j) {
274 elements.at(pos++) = static_cast<uint16_t>((temp & mask) << (constants.d() - constants.b())); // k*2^(D-B)
275 temp >>= constants.b();
276 }
277 }
278
279 return FrodoMatrix(dimensions, std::move(elements));
280}
281
283 // Addition is defined for n_bar x n_bar matrices only
284 BOTAN_ASSERT_NOMSG(a.dimensions() == b.dimensions());
285 BOTAN_ASSERT_NOMSG(std::get<0>(a.dimensions()) == constants.n_bar() &&
286 std::get<1>(a.dimensions()) == constants.n_bar());
287
288 auto elements = make_elements_vector(a.dimensions());
289
290 for(size_t i = 0; i < constants.n_bar() * constants.n_bar(); ++i) {
291 elements.at(i) = a.elements_at(i) + b.elements_at(i);
292 }
293
294 return FrodoMatrix(a.dimensions(), std::move(elements));
295}
296
298 // Subtraction is defined for n_bar x n_bar matrices only
299 BOTAN_ASSERT_NOMSG(a.dimensions() == b.dimensions());
300 BOTAN_ASSERT_NOMSG(std::get<0>(a.dimensions()) == constants.n_bar() &&
301 std::get<1>(a.dimensions()) == constants.n_bar());
302
303 auto elements = make_elements_vector(a.dimensions());
304
305 for(size_t i = 0; i < constants.n_bar() * constants.n_bar(); ++i) {
306 elements.at(i) = a.elements_at(i) - b.elements_at(i);
307 }
308
309 return FrodoMatrix(a.dimensions(), std::move(elements));
310}
311
314 // TODO: Possibly use range-based comparison after #3715 is merged
315 return CT::is_equal(reinterpret_cast<const uint8_t*>(m_elements.data()),
316 reinterpret_cast<const uint8_t*>(other.m_elements.data()),
317 sizeof(decltype(m_elements)::value_type) * m_elements.size());
318}
319
321 Dimensions dimensions = {constants.n_bar(), constants.n_bar()};
322 auto elements = make_elements_vector(dimensions);
323
324 for(size_t i = 0; i < constants.n_bar(); ++i) {
325 for(size_t j = 0; j < constants.n_bar(); ++j) {
326 auto& current = elements.at(i * constants.n_bar() + j);
327 current = 0;
328 for(size_t k = 0; k < constants.n(); ++k) {
329 // Explicitly store the values in 32-bit variables to avoid integral promotion
330 const uint32_t b_ink = b.elements_at(i * constants.n() + k);
331
332 // Since the input is s^T, we multiply the i-th row of b with the j-th row of s^t
333 const uint32_t s_ink = s.elements_at(j * constants.n() + k);
334
335 current += static_cast<uint16_t>(b_ink * s_ink);
336 }
337 }
338 }
339
340 return FrodoMatrix(dimensions, std::move(elements));
341}
342
344 const size_t outlen = packed_size(constants);
345 BOTAN_ASSERT_NOMSG(out.size() == outlen);
346
347 size_t i = 0; // whole bytes already filled in
348 size_t j = 0; // whole uint16_t already copied
349 uint16_t w = 0; // the leftover, not yet copied
350 uint8_t bits = 0; // the number of lsb in w
351
352 while(i < outlen && (j < element_count() || ((j == element_count()) && (bits > 0)))) {
353 /*
354 in: | | |********|********|
355 ^
356 j
357 w : | ****|
358 ^
359 bits
360 out:|**|**|**|**|**|**|**|**|* |
361 ^^
362 ib
363 */
364 uint8_t b = 0; // bits in out[i] already filled in
365 while(b < 8) {
366 const uint8_t nbits = std::min(static_cast<uint8_t>(8 - b), bits);
367 const uint16_t mask = static_cast<uint16_t>(1 << nbits) - 1;
368 const auto t = static_cast<uint8_t>((w >> (bits - nbits)) & mask); // the bits to copy from w to out
369 out[i] = out[i] + static_cast<uint8_t>(t << (8 - b - nbits));
370 b += nbits;
371 bits -= nbits;
372
373 if(bits == 0) {
374 if(j < element_count()) {
375 w = m_elements.at(j);
376 bits = static_cast<uint8_t>(constants.d());
377 j++;
378 } else {
379 break; // the input vector is exhausted
380 }
381 }
382 }
383 if(b == 8) { // out[i] is filled in
384 i++;
385 }
386 }
387}
388
390 FrodoSerializedMatrix out(2 * m_elements.size());
391
392 for(unsigned int i = 0; i < m_elements.size(); ++i) {
393 store_le(m_elements.at(i), out.data() + 2 * i);
394 }
395
396 return out;
397}
398
400 const size_t nwords = (constants.n_bar() * constants.n_bar()) / 8;
401 const uint16_t maskex = static_cast<uint16_t>(1 << constants.b()) - 1;
402 const uint16_t maskq = static_cast<uint16_t>(1 << constants.d()) - 1;
403
404 FrodoPlaintext out(nwords * constants.b());
405
406 size_t index = 0;
407 for(size_t i = 0; i < nwords; i++) {
408 uint64_t templong = 0;
409 for(size_t j = 0; j < 8; j++) {
410 const auto temp =
411 static_cast<uint16_t>(((m_elements.at(index) & maskq) + (1 << (constants.d() - constants.b() - 1))) >>
412 (constants.d() - constants.b()));
413 templong |= static_cast<uint64_t>(temp & maskex) << (constants.b() * j);
414 index++;
415 }
416 for(size_t j = 0; j < constants.b(); j++) {
417 out[i * constants.b() + j] = (templong >> (8 * j)) & 0xFF;
418 }
419 }
420
421 return out;
422}
423
425 const Dimensions& dimensions,
427 const uint8_t lsb = static_cast<uint8_t>(constants.d());
428 const size_t inlen = packed_bytes.size();
429 const size_t outlen = static_cast<size_t>(std::get<0>(dimensions)) * std::get<1>(dimensions);
430
431 BOTAN_ASSERT_NOMSG(inlen == ceil_tobytes(outlen * lsb));
432
433 auto elements = make_elements_vector(dimensions);
434
435 size_t i = 0; // whole uint16_t already filled in
436 size_t j = 0; // whole bytes already copied
437 uint8_t w = 0; // the leftover, not yet copied
438 uint8_t bits = 0; // the number of lsb bits of w
439
440 while(i < outlen && (j < inlen || ((j == inlen) && (bits > 0)))) {
441 /*
442 in: | | | | | | |**|**|...
443 ^
444 j
445 w : | *|
446 ^
447 bits
448 out:| *****| *****| *** | |...
449 ^ ^
450 i b
451 */
452 uint8_t b = 0; // bits in out[i] already filled in
453 while(b < lsb) {
454 const uint8_t nbits = std::min(static_cast<uint8_t>(lsb - b), bits);
455 const uint16_t mask = static_cast<uint16_t>(1 << nbits) - 1;
456 uint8_t t = (w >> (bits - nbits)) & mask; // the bits to copy from w to out
457
458 elements.at(i) = elements.at(i) + static_cast<uint16_t>(t << (lsb - b - nbits));
459 b += nbits;
460 bits -= nbits;
461 w &= static_cast<uint8_t>(~(mask << bits)); // not strictly necessary; mostly for debugging
462
463 if(bits == 0) {
464 if(j < inlen) {
465 w = packed_bytes[j];
466 bits = 8;
467 j++;
468 } else {
469 break; // the input vector is exhausted
470 }
471 }
472 }
473 if(b == lsb) { // out[i] is filled in
474 i++;
475 }
476 }
477
478 return FrodoMatrix(dimensions, std::move(elements));
479}
480
482 auto elements = make_elements_vector(dimensions);
483 BOTAN_ASSERT_NOMSG(elements.size() * 2 == bytes.size());
484 load_le<uint16_t>(elements.data(), bytes.data(), elements.size());
485 return FrodoMatrix(dimensions, std::move(elements));
486}
487
489 // Reduction is inherent if D is 16, because we use uint16_t in m_elements
490 if(constants.d() < sizeof(decltype(m_elements)::value_type) * 8) {
491 const uint16_t mask = static_cast<uint16_t>(1 << constants.d()) - 1;
492 for(auto& elem : m_elements) {
493 elem = elem & mask; // mod q
494 }
495 }
496}
497
498} // namespace Botan
#define BOTAN_ASSERT_NOMSG(expr)
Definition assert.h:59
#define BOTAN_ASSERT(expr, assertion_made)
Definition assert.h:50
#define BOTAN_ASSERT_UNREACHABLE()
Definition assert.h:137
Helper class to ease in-place marshalling of concatenated fixed-length values.
Definition stl_util.h:142
static constexpr Mask< T > expand_bit(T v, size_t bit)
Definition ct_utils.h:413
static constexpr Mask< T > is_lt(T x, T y)
Definition ct_utils.h:442
uint16_t cdf_table_at(size_t i) const
static FrodoMatrix mul_add_sa_plus_e(const FrodoKEMConstants &constants, const FrodoMatrix &s, const FrodoMatrix &e, StrongSpan< const FrodoSeedA > seed_a)
static std::function< FrodoMatrix(const Dimensions &dimensions)> make_sample_generator(const FrodoKEMConstants &constants, Botan::XOF &shake)
static FrodoMatrix mul_add_sb_plus_e(const FrodoKEMConstants &constants, const FrodoMatrix &b, const FrodoMatrix &s, const FrodoMatrix &e)
void reduce(const FrodoKEMConstants &constants)
static FrodoMatrix mul_add_as_plus_e(const FrodoKEMConstants &constants, const FrodoMatrix &s, const FrodoMatrix &e, StrongSpan< const FrodoSeedA > seed_a)
static FrodoMatrix sub(const FrodoKEMConstants &constants, const FrodoMatrix &a, const FrodoMatrix &b)
FrodoPlaintext decode(const FrodoKEMConstants &constants) const
static FrodoMatrix add(const FrodoKEMConstants &constants, const FrodoMatrix &a, const FrodoMatrix &b)
std::tuple< size_t, size_t > Dimensions
static FrodoMatrix sample(const FrodoKEMConstants &constants, const Dimensions &dimensions, StrongSpan< const FrodoSampleR > r)
CT::Mask< uint8_t > constant_time_compare(const FrodoMatrix &other) const
FrodoPackedMatrix pack(const FrodoKEMConstants &constants) const
static FrodoMatrix encode(const FrodoKEMConstants &constants, StrongSpan< const FrodoPlaintext > in)
Dimensions dimensions() const
FrodoMatrix(Dimensions dims)
static FrodoMatrix unpack(const FrodoKEMConstants &constants, const Dimensions &dimensions, StrongSpan< const FrodoPackedMatrix > packed_bytes)
uint16_t elements_at(size_t i) const
size_t element_count() const
static FrodoMatrix mul_bs(const FrodoKEMConstants &constants, const FrodoMatrix &b_p, const FrodoMatrix &s)
static FrodoMatrix deserialize(const Dimensions &dimensions, StrongSpan< const FrodoSerializedMatrix > bytes)
size_t packed_size(const FrodoKEMConstants &constants) const
FrodoSerializedMatrix serialize() const
decltype(auto) data() noexcept(noexcept(this->m_span.data()))
decltype(auto) size() const noexcept(noexcept(this->m_span.size()))
T output(size_t bytes)
Definition xof.h:155
decltype(auto) data() noexcept(noexcept(this->get().data()))
constexpr T value_barrier(T x)
Definition ct_utils.h:252
constexpr CT::Mask< T > is_equal(const T x[], const T y[], size_t len)
Definition ct_utils.h:759
auto create_shake_row_generator(const FrodoKEMConstants &constants, StrongSpan< const FrodoSeedA > seed_a)
constexpr auto store_le(ParamTs &&... params)
Definition loadstor.h:764
auto create_aes_row_generator(const FrodoKEMConstants &constants, StrongSpan< const FrodoSeedA > seed_a)
constexpr auto load_le(ParamTs &&... params)
Definition loadstor.h:521
const SIMD_8x32 & b
std::vector< T, secure_allocator< T > > secure_vector
Definition secmem.h:61
constexpr T ceil_tobytes(T bits)
Definition bit_ops.h:157