Botan 3.4.0
Crypto and TLS for C&
frodo_matrix.cpp
Go to the documentation of this file.
1/*
2 * FrodoKEM matrix logic
3 * Based on the MIT licensed reference implementation by the designers
4 * (https://github.com/microsoft/PQCrypto-LWEKE/tree/master/src)
5 *
6 * The Fellowship of the FrodoKEM:
7 * (C) 2023 Jack Lloyd
8 * 2023 René Meusel, Amos Treiber - Rohde & Schwarz Cybersecurity
9 *
10 * Botan is released under the Simplified BSD License (see license.txt)
11 */
12
13#include <botan/internal/frodo_matrix.h>
14
15#include <botan/assert.h>
16#include <botan/frodokem.h>
17#include <botan/hex.h>
18#include <botan/mem_ops.h>
19#include <botan/xof.h>
20#include <botan/internal/bit_ops.h>
21#include <botan/internal/frodo_constants.h>
22#include <botan/internal/loadstor.h>
23#include <botan/internal/stl_util.h>
24
25#if defined(BOTAN_HAS_FRODOKEM_AES)
26 #include <botan/internal/frodo_aes_generator.h>
27#endif
28
29#if defined(BOTAN_HAS_FRODOKEM_SHAKE)
30 #include <botan/internal/frodo_shake_generator.h>
31#endif
32
33#include <array>
34#include <cmath>
35#include <cstdint>
36#include <memory>
37#include <span>
38#include <utility>
39#include <vector>
40
41namespace Botan {
42
43namespace {
44
45secure_vector<uint16_t> make_elements_vector(const FrodoMatrix::Dimensions& dimensions) {
46 return secure_vector<uint16_t>(static_cast<size_t>(std::get<0>(dimensions)) * std::get<1>(dimensions));
47}
48
49std::function<void(std::span<uint8_t> out, uint16_t i)> make_row_generator(const FrodoKEMConstants& constants,
50 StrongSpan<const FrodoSeedA> seed_a) {
51#if defined(BOTAN_HAS_FRODOKEM_AES)
52 if(constants.mode().is_aes()) {
53 return create_aes_row_generator(constants, seed_a);
54 }
55#endif
56
57#if defined(BOTAN_HAS_FRODOKEM_SHAKE)
58 if(constants.mode().is_shake()) {
59 return create_shake_row_generator(constants, seed_a);
60 }
61#endif
62
63 // If we don't have AES in this build, the instantiation of the FrodoKEM instance
64 // is blocked upstream already. Hence, assert is save here.
66}
67
68} // namespace
69
71 const Dimensions& dimensions,
73 BOTAN_ASSERT_NOMSG(r.size() % 2 == 0);
74 const auto n = r.size() / 2;
75
76 auto elements = make_elements_vector(dimensions);
77 BOTAN_ASSERT_NOMSG(n == elements.size());
78
79 load_le<uint16_t>(elements.data(), r.data(), n);
80
81 for(size_t i = 0; i < n; ++i) {
82 uint32_t sample = 0; // Avoid integral promotion
83 const uint16_t prnd = elements.at(i) >> 1; // Drop the least significant bit
84 const uint16_t sign = elements.at(i) & 0x1; // Pick the least significant bit
85
86 // No need to compare with the last value.
87 for(size_t j = 0; j < constants.cdf_table_len() - 1; ++j) {
88 // Constant time comparison: 1 if CDF_TABLE[j] < s, 0 otherwise.
89 sample += CT::Mask<uint16_t>::is_lt(constants.cdf_table_at(j), prnd).if_set_return(1);
90 }
91 // Assuming that sign is either 0 or 1, flips sample iff sign = 1
92 elements.at(i) = static_cast<uint16_t>((-sign) ^ sample) + sign;
93 }
94
95 return FrodoMatrix(dimensions, std::move(elements));
96}
97
99 const FrodoKEMConstants& constants, Botan::XOF& shake) {
100 return [&constants, &shake](const FrodoMatrix::Dimensions& dimensions) mutable {
101 return sample(constants,
103 shake.output<FrodoSampleR>(sizeof(uint16_t) * std::get<0>(dimensions) * std::get<1>(dimensions)));
104 };
105}
106
108 m_dim1(std::get<0>(dims)), m_dim2(std::get<1>(dims)), m_elements(make_elements_vector(dims)) {}
109
111 const FrodoMatrix& s,
112 const FrodoMatrix& e,
114 BOTAN_ASSERT(std::get<0>(e.dimensions()) == std::get<1>(s.dimensions()) &&
115 std::get<1>(e.dimensions()) == std::get<0>(s.dimensions()),
116 "FrodoMatrix dimension mismatch of E and S");
117 BOTAN_ASSERT(std::get<0>(e.dimensions()) == constants.n() && std::get<1>(e.dimensions()) == constants.n_bar(),
118 "FrodoMatrix dimension mismatch of new matrix dimensions and E");
119
120 auto elements = make_elements_vector(e.dimensions());
121 auto row_generator = make_row_generator(constants, seed_a);
122
123 /*
124 We perform 4 invocations of SHAKE128 per iteration to obtain n 16-bit values per invocation.
125 a_row_data contains the 16-bit values of the current 4 rows. a_row_data_bytes represents the corresponding bytes.
126 */
127 std::vector<uint16_t> a_row_data(4 * constants.n(), 0);
128 // TODO: maybe use std::as_bytes() instead
129 // (take extra care, as it produces a std::span<std::byte>)
130 std::span<uint8_t> a_row_data_bytes(reinterpret_cast<uint8_t*>(a_row_data.data()),
131 sizeof(uint16_t) * a_row_data.size());
132
133 for(size_t i = 0; i < constants.n(); i += 4) {
134 auto a_row = BufferStuffer(a_row_data_bytes);
135
136 // Do 4 invocations to fill 4 rows
137 row_generator(a_row.next(constants.n() * sizeof(uint16_t)), static_cast<uint16_t>(i + 0));
138 row_generator(a_row.next(constants.n() * sizeof(uint16_t)), static_cast<uint16_t>(i + 1));
139 row_generator(a_row.next(constants.n() * sizeof(uint16_t)), static_cast<uint16_t>(i + 2));
140 row_generator(a_row.next(constants.n() * sizeof(uint16_t)), static_cast<uint16_t>(i + 3));
141
142 // Use generated bytes to fill 16-bit data
143 load_le<uint16_t>(a_row_data.data(), a_row_data_bytes.data(), 4 * constants.n());
144
145 for(size_t k = 0; k < constants.n_bar(); ++k) {
146 std::array<uint16_t, 4> sum = {0};
147 for(size_t j = 0; j < constants.n(); ++j) { // Matrix-vector multiplication
148 // Note: we use uint32_t for `sp` to avoid an integral promotion to `int`
149 // when multiplying `sp` with other row values. Otherwise we might
150 // suffer from undefined behaviour due to a signed integer overflow.
151 // See: https://learn.microsoft.com/en-us/cpp/cpp/standard-conversions#integral-promotions
152 const uint32_t sp = s.elements_at(k * constants.n() + j);
153
154 // Go through four lines with same sp
155 sum.at(0) += static_cast<uint16_t>(a_row_data.at(0 * constants.n() + j) * sp);
156 sum.at(1) += static_cast<uint16_t>(a_row_data.at(1 * constants.n() + j) * sp);
157 sum.at(2) += static_cast<uint16_t>(a_row_data.at(2 * constants.n() + j) * sp);
158 sum.at(3) += static_cast<uint16_t>(a_row_data.at(3 * constants.n() + j) * sp);
159 }
160 elements.at((i + 0) * constants.n_bar() + k) = e.elements_at((i + 0) * constants.n_bar() + k) + sum.at(0);
161 elements.at((i + 3) * constants.n_bar() + k) = e.elements_at((i + 3) * constants.n_bar() + k) + sum.at(3);
162 elements.at((i + 2) * constants.n_bar() + k) = e.elements_at((i + 2) * constants.n_bar() + k) + sum.at(2);
163 elements.at((i + 1) * constants.n_bar() + k) = e.elements_at((i + 1) * constants.n_bar() + k) + sum.at(1);
164 }
165 }
166
167 return FrodoMatrix(e.dimensions(), std::move(elements));
168}
169
171 const FrodoMatrix& s,
172 const FrodoMatrix& e,
174 BOTAN_ASSERT(std::get<0>(e.dimensions()) == std::get<0>(s.dimensions()) &&
175 std::get<1>(e.dimensions()) == std::get<1>(s.dimensions()),
176 "FrodoMatrix dimension mismatch of E and S");
177 BOTAN_ASSERT(std::get<0>(e.dimensions()) == constants.n_bar() && std::get<1>(e.dimensions()) == constants.n(),
178 "FrodoMatrix dimension mismatch of new matrix dimensions and E");
179
180 auto elements = e.m_elements;
181 auto row_generator = make_row_generator(constants, seed_a);
182
183 /*
184 We perform 8 invocations of SHAKE128 per iteration to obtain n 16-bit values per invocation.
185 a_row_data contains the 16-bit values of the current 8 rows. a_row_data_bytes represents the corresponding bytes.
186 */
187 std::vector<uint16_t> a_row_data(8 * constants.n(), 0);
188 // TODO: maybe use std::as_bytes()
189 std::span<uint8_t> a_row_data_bytes(reinterpret_cast<uint8_t*>(a_row_data.data()),
190 sizeof(uint16_t) * a_row_data.size());
191
192 // Start matrix multiplication
193 for(size_t i = 0; i < constants.n(); i += 8) {
194 auto a_row = BufferStuffer(a_row_data_bytes);
195
196 // Do 8 invocations to fill 8 rows
197 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 0));
198 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 1));
199 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 2));
200 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 3));
201 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 4));
202 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 5));
203 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 6));
204 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 7));
205
206 // Use generated bytes to fill 16-bit data
207 load_le<uint16_t>(a_row_data.data(), a_row_data_bytes.data(), 8 * constants.n());
208
209 for(size_t j = 0; j < constants.n_bar(); ++j) {
210 uint16_t sum = 0;
211 std::array<uint32_t /* to avoid integral promotion */, 8> sp;
212 for(size_t p = 0; p < 8; ++p) {
213 sp[p] = s.elements_at(j * constants.n() + i + p);
214 }
215 for(size_t q = 0; q < constants.n(); ++q) {
216 sum = elements.at(j * constants.n() + q);
217 for(size_t p = 0; p < 8; ++p) {
218 sum += static_cast<uint16_t>(sp[p] * a_row_data.at(p * constants.n() + q));
219 }
220 elements.at(j * constants.n() + q) = sum;
221 }
222 }
223 }
224
225 return FrodoMatrix(e.dimensions(), std::move(elements));
226}
227
229 const FrodoMatrix& b,
230 const FrodoMatrix& s,
231 const FrodoMatrix& e) {
232 BOTAN_ASSERT(std::get<0>(b.dimensions()) == std::get<1>(s.dimensions()) &&
233 std::get<1>(b.dimensions()) == std::get<0>(s.dimensions()),
234 "FrodoMatrix dimension mismatch of B and S");
235 BOTAN_ASSERT(std::get<0>(b.dimensions()) == constants.n() && std::get<1>(b.dimensions()) == constants.n_bar(),
236 "FrodoMatrix dimension mismatch of B");
237 BOTAN_ASSERT(std::get<0>(e.dimensions()) == constants.n_bar() && std::get<1>(e.dimensions()) == constants.n_bar(),
238 "FrodoMatrix dimension mismatch of E");
239
240 auto elements = make_elements_vector(e.dimensions());
241
242 for(size_t k = 0; k < constants.n_bar(); ++k) {
243 for(size_t i = 0; i < constants.n_bar(); ++i) {
244 elements.at(k * constants.n_bar() + i) = e.elements_at(k * constants.n_bar() + i);
245 for(size_t j = 0; j < constants.n(); ++j) {
246 elements.at(k * constants.n_bar() + i) += static_cast<uint16_t>(
247 static_cast<uint32_t /* to avoid integral promotion */>(s.elements_at(k * constants.n() + j)) *
248 b.elements_at(j * constants.n_bar() + i));
249 }
250 }
251 }
252
253 return FrodoMatrix(e.dimensions(), std::move(elements));
254}
255
257 const uint64_t mask = (uint64_t(1) << constants.b()) - 1;
258
259 const auto dimensions = std::make_tuple<size_t, size_t>(constants.n_bar(), constants.n_bar());
260 auto elements = make_elements_vector(dimensions);
261
262 BOTAN_ASSERT_NOMSG(in.size() * 8 == constants.n_bar() * constants.n_bar() * constants.b());
263
264 size_t pos = 0;
265 for(size_t i = 0; i < (constants.n_bar() * constants.n_bar()) / 8; ++i) {
266 uint64_t temp = 0;
267 for(size_t j = 0; j < constants.b(); ++j) {
268 temp |= static_cast<uint64_t /* avoiding integral promotion */>(in[i * constants.b() + j]) << (8 * j);
269 }
270 for(size_t j = 0; j < 8; ++j) {
271 elements.at(pos++) = static_cast<uint16_t>((temp & mask) << (constants.d() - constants.b())); // k*2^(D-B)
272 temp >>= constants.b();
273 }
274 }
275
276 return FrodoMatrix(dimensions, std::move(elements));
277}
278
280 // Addition is defined for n_bar x n_bar matrices only
282 BOTAN_ASSERT_NOMSG(std::get<0>(a.dimensions()) == constants.n_bar() &&
283 std::get<1>(a.dimensions()) == constants.n_bar());
284
285 auto elements = make_elements_vector(a.dimensions());
286
287 for(size_t i = 0; i < constants.n_bar() * constants.n_bar(); ++i) {
288 elements.at(i) = a.elements_at(i) + b.elements_at(i);
289 }
290
291 return FrodoMatrix(a.dimensions(), std::move(elements));
292}
293
295 // Subtraction is defined for n_bar x n_bar matrices only
297 BOTAN_ASSERT_NOMSG(std::get<0>(a.dimensions()) == constants.n_bar() &&
298 std::get<1>(a.dimensions()) == constants.n_bar());
299
300 auto elements = make_elements_vector(a.dimensions());
301
302 for(size_t i = 0; i < constants.n_bar() * constants.n_bar(); ++i) {
303 elements.at(i) = a.elements_at(i) - b.elements_at(i);
304 }
305
306 return FrodoMatrix(a.dimensions(), std::move(elements));
307}
308
311 // TODO: Possibly use range-based comparison after #3715 is merged
312 return CT::is_equal(reinterpret_cast<const uint8_t*>(m_elements.data()),
313 reinterpret_cast<const uint8_t*>(other.m_elements.data()),
314 sizeof(decltype(m_elements)::value_type) * m_elements.size());
315}
316
318 Dimensions dimensions = {constants.n_bar(), constants.n_bar()};
319 auto elements = make_elements_vector(dimensions);
320
321 for(size_t i = 0; i < constants.n_bar(); ++i) {
322 for(size_t j = 0; j < constants.n_bar(); ++j) {
323 auto& current = elements.at(i * constants.n_bar() + j);
324 current = 0;
325 for(size_t k = 0; k < constants.n(); ++k) {
326 // Explicitly store the values in 32-bit variables to avoid integral promotion
327 const uint32_t b_ink = b.elements_at(i * constants.n() + k);
328
329 // Since the input is s^T, we multiply the i-th row of b with the j-th row of s^t
330 const uint32_t s_ink = s.elements_at(j * constants.n() + k);
331
332 current += static_cast<uint16_t>(b_ink * s_ink);
333 }
334 }
335 }
336
337 return FrodoMatrix(dimensions, std::move(elements));
338}
339
341 const size_t outlen = packed_size(constants);
342 BOTAN_ASSERT_NOMSG(out.size() == outlen);
343
344 size_t i = 0; // whole bytes already filled in
345 size_t j = 0; // whole uint16_t already copied
346 uint16_t w = 0; // the leftover, not yet copied
347 uint8_t bits = 0; // the number of lsb in w
348
349 while(i < outlen && (j < element_count() || ((j == element_count()) && (bits > 0)))) {
350 /*
351 in: | | |********|********|
352 ^
353 j
354 w : | ****|
355 ^
356 bits
357 out:|**|**|**|**|**|**|**|**|* |
358 ^^
359 ib
360 */
361 uint8_t b = 0; // bits in out[i] already filled in
362 while(b < 8) {
363 const uint8_t nbits = std::min(static_cast<uint8_t>(8 - b), bits);
364 const uint16_t mask = static_cast<uint16_t>(1 << nbits) - 1;
365 const auto t = static_cast<uint8_t>((w >> (bits - nbits)) & mask); // the bits to copy from w to out
366 out[i] = out[i] + static_cast<uint8_t>(t << (8 - b - nbits));
367 b += nbits;
368 bits -= nbits;
369
370 if(bits == 0) {
371 if(j < element_count()) {
372 w = m_elements.at(j);
373 bits = static_cast<uint8_t>(constants.d());
374 j++;
375 } else {
376 break; // the input vector is exhausted
377 }
378 }
379 }
380 if(b == 8) { // out[i] is filled in
381 i++;
382 }
383 }
384}
385
387 FrodoSerializedMatrix out(2 * m_elements.size());
388
389 for(unsigned int i = 0; i < m_elements.size(); ++i) {
390 store_le(m_elements.at(i), out.data() + 2 * i);
391 }
392
393 return out;
394}
395
397 const size_t nwords = (constants.n_bar() * constants.n_bar()) / 8;
398 const uint16_t maskex = static_cast<uint16_t>(1 << constants.b()) - 1;
399 const uint16_t maskq = static_cast<uint16_t>(1 << constants.d()) - 1;
400
401 FrodoPlaintext out(nwords * constants.b());
402
403 size_t index = 0;
404 for(size_t i = 0; i < nwords; i++) {
405 uint64_t templong = 0;
406 for(size_t j = 0; j < 8; j++) {
407 const auto temp =
408 static_cast<uint16_t>(((m_elements.at(index) & maskq) + (1 << (constants.d() - constants.b() - 1))) >>
409 (constants.d() - constants.b()));
410 templong |= static_cast<uint64_t>(temp & maskex) << (constants.b() * j);
411 index++;
412 }
413 for(size_t j = 0; j < constants.b(); j++) {
414 out[i * constants.b() + j] = (templong >> (8 * j)) & 0xFF;
415 }
416 }
417
418 return out;
419}
420
422 const Dimensions& dimensions,
424 const uint8_t lsb = static_cast<uint8_t>(constants.d());
425 const size_t inlen = packed_bytes.size();
426 const size_t outlen = static_cast<size_t>(std::get<0>(dimensions)) * std::get<1>(dimensions);
427
428 BOTAN_ASSERT_NOMSG(inlen == ceil_tobytes(outlen * lsb));
429
430 auto elements = make_elements_vector(dimensions);
431
432 size_t i = 0; // whole uint16_t already filled in
433 size_t j = 0; // whole bytes already copied
434 uint8_t w = 0; // the leftover, not yet copied
435 uint8_t bits = 0; // the number of lsb bits of w
436
437 while(i < outlen && (j < inlen || ((j == inlen) && (bits > 0)))) {
438 /*
439 in: | | | | | | |**|**|...
440 ^
441 j
442 w : | *|
443 ^
444 bits
445 out:| *****| *****| *** | |...
446 ^ ^
447 i b
448 */
449 uint8_t b = 0; // bits in out[i] already filled in
450 while(b < lsb) {
451 const uint8_t nbits = std::min(static_cast<uint8_t>(lsb - b), bits);
452 const uint16_t mask = static_cast<uint16_t>(1 << nbits) - 1;
453 uint8_t t = (w >> (bits - nbits)) & mask; // the bits to copy from w to out
454
455 elements.at(i) = elements.at(i) + static_cast<uint16_t>(t << (lsb - b - nbits));
456 b += nbits;
457 bits -= nbits;
458 w &= static_cast<uint8_t>(~(mask << bits)); // not strictly necessary; mostly for debugging
459
460 if(bits == 0) {
461 if(j < inlen) {
462 w = packed_bytes[j];
463 bits = 8;
464 j++;
465 } else {
466 break; // the input vector is exhausted
467 }
468 }
469 }
470 if(b == lsb) { // out[i] is filled in
471 i++;
472 }
473 }
474
475 return FrodoMatrix(dimensions, std::move(elements));
476}
477
479 auto elements = make_elements_vector(dimensions);
480 BOTAN_ASSERT_NOMSG(elements.size() * 2 == bytes.size());
481 load_le<uint16_t>(elements.data(), bytes.data(), elements.size());
482 return FrodoMatrix(dimensions, std::move(elements));
483}
484
486 // Reduction is inherent if D is 16, because we use uint16_t in m_elements
487 if(constants.d() < sizeof(decltype(m_elements)::value_type) * 8) {
488 const uint16_t mask = static_cast<uint16_t>(1 << constants.d()) - 1;
489 for(auto& elem : m_elements) {
490 elem = elem & mask; // mod q
491 }
492 }
493}
494
495} // namespace Botan
#define BOTAN_ASSERT_NOMSG(expr)
Definition assert.h:59
#define BOTAN_ASSERT(expr, assertion_made)
Definition assert.h:50
#define BOTAN_ASSERT_UNREACHABLE()
Definition assert.h:137
Helper class to ease in-place marshalling of concatenated fixed-length values.
Definition stl_util.h:200
static constexpr Mask< T > is_lt(T x, T y)
Definition ct_utils.h:139
uint16_t cdf_table_at(size_t i) const
static std::function< FrodoMatrix(const Dimensions &dimensions) make_sample_generator)(const FrodoKEMConstants &constants, Botan::XOF &shake)
static FrodoMatrix mul_add_sa_plus_e(const FrodoKEMConstants &constants, const FrodoMatrix &s, const FrodoMatrix &e, StrongSpan< const FrodoSeedA > seed_a)
static FrodoMatrix mul_add_sb_plus_e(const FrodoKEMConstants &constants, const FrodoMatrix &b, const FrodoMatrix &s, const FrodoMatrix &e)
void reduce(const FrodoKEMConstants &constants)
static FrodoMatrix mul_add_as_plus_e(const FrodoKEMConstants &constants, const FrodoMatrix &s, const FrodoMatrix &e, StrongSpan< const FrodoSeedA > seed_a)
static FrodoMatrix sub(const FrodoKEMConstants &constants, const FrodoMatrix &a, const FrodoMatrix &b)
FrodoPlaintext decode(const FrodoKEMConstants &constants) const
static FrodoMatrix add(const FrodoKEMConstants &constants, const FrodoMatrix &a, const FrodoMatrix &b)
std::tuple< size_t, size_t > Dimensions
static FrodoMatrix sample(const FrodoKEMConstants &constants, const Dimensions &dimensions, StrongSpan< const FrodoSampleR > r)
CT::Mask< uint8_t > constant_time_compare(const FrodoMatrix &other) const
FrodoPackedMatrix pack(const FrodoKEMConstants &constants) const
static FrodoMatrix encode(const FrodoKEMConstants &constants, StrongSpan< const FrodoPlaintext > in)
Dimensions dimensions() const
FrodoMatrix(Dimensions dims)
static FrodoMatrix unpack(const FrodoKEMConstants &constants, const Dimensions &dimensions, StrongSpan< const FrodoPackedMatrix > packed_bytes)
uint16_t elements_at(size_t i) const
size_t element_count() const
static FrodoMatrix mul_bs(const FrodoKEMConstants &constants, const FrodoMatrix &b_p, const FrodoMatrix &s)
static FrodoMatrix deserialize(const Dimensions &dimensions, StrongSpan< const FrodoSerializedMatrix > bytes)
size_t packed_size(const FrodoKEMConstants &constants) const
FrodoSerializedMatrix serialize() const
decltype(auto) data() noexcept(noexcept(this->m_span.data()))
decltype(auto) size() const noexcept(noexcept(this->m_span.size()))
T output(size_t bytes)
Definition xof.h:155
decltype(auto) data() noexcept(noexcept(this->get().data()))
constexpr CT::Mask< T > is_equal(const T x[], const T y[], size_t len)
Definition ct_utils.h:345
auto create_shake_row_generator(const FrodoKEMConstants &constants, StrongSpan< const FrodoSeedA > seed_a)
constexpr auto store_le(ParamTs &&... params)
Definition loadstor.h:702
auto create_aes_row_generator(const FrodoKEMConstants &constants, StrongSpan< const FrodoSeedA > seed_a)
constexpr T ceil_tobytes(T bits)
Definition bit_ops.h:144