Botan 3.11.0
Crypto and TLS for C&
frodo_matrix.cpp
Go to the documentation of this file.
1/*
2 * FrodoKEM matrix logic
3 * Based on the MIT licensed reference implementation by the designers
4 * (https://github.com/microsoft/PQCrypto-LWEKE/tree/master)
5 *
6 * The Fellowship of the FrodoKEM:
7 * (C) 2023 Jack Lloyd
8 * 2023 René Meusel, Amos Treiber - Rohde & Schwarz Cybersecurity
9 *
10 * Botan is released under the Simplified BSD License (see license.txt)
11 */
12
13#include <botan/internal/frodo_matrix.h>
14
15#include <botan/assert.h>
16#include <botan/xof.h>
17#include <botan/internal/bit_ops.h>
18#include <botan/internal/buffer_stuffer.h>
19#include <botan/internal/frodo_constants.h>
20#include <botan/internal/loadstor.h>
21#include <array>
22#include <span>
23#include <utility>
24#include <vector>
25
26#if defined(BOTAN_HAS_FRODOKEM_AES)
27 #include <botan/internal/frodo_aes_generator.h>
28#endif
29
30#if defined(BOTAN_HAS_FRODOKEM_SHAKE)
31 #include <botan/internal/frodo_shake_generator.h>
32#endif
33
34namespace Botan {
35
36namespace {
37
38secure_vector<uint16_t> make_elements_vector(const FrodoMatrix::Dimensions& dimensions) {
39 return secure_vector<uint16_t>(static_cast<size_t>(std::get<0>(dimensions)) * std::get<1>(dimensions));
40}
41
42std::function<void(std::span<uint8_t> out, uint16_t i)> make_row_generator(const FrodoKEMConstants& constants,
44#if defined(BOTAN_HAS_FRODOKEM_AES)
45 if(constants.mode().is_aes()) {
46 return create_aes_row_generator(constants, seed_a);
47 }
48#endif
49
50#if defined(BOTAN_HAS_FRODOKEM_SHAKE)
51 if(constants.mode().is_shake()) {
52 return create_shake_row_generator(constants, seed_a);
53 }
54#endif
55
56 // If we don't have AES in this build, the instantiation of the FrodoKEM instance
57 // is blocked upstream already. Hence, assert is save here.
59}
60
61} // namespace
62
66 BOTAN_ASSERT_NOMSG(r.size() % 2 == 0);
67 const auto n = r.size() / 2;
68
69 auto elements = make_elements_vector(dimensions);
70 BOTAN_ASSERT_NOMSG(n == elements.size());
71
72 load_le<uint16_t>(elements.data(), r.data(), n);
73
74 for(auto& elem : elements) {
75 const auto prnd = CT::value_barrier(static_cast<uint16_t>(elem >> 1)); // Drop the least significant bit
76 const auto sign = CT::Mask<uint16_t>::expand_bit(elem, 0); // Pick the least significant bit
77
78 uint32_t sample = 0; // Avoid integral promotion
79
80 // No need to compare with the last value.
81 for(size_t j = 0; j < constants.cdf_table_len() - 1; ++j) {
82 // Constant time comparison: 1 if CDF_TABLE[j] < s, 0 otherwise.
83 sample += CT::Mask<uint16_t>::is_lt(constants.cdf_table_at(j), prnd).if_set_return(1);
84 }
85 // Assuming that sign is either 0 or 1, flips sample iff sign = 1
86 const uint16_t sample_u16 = static_cast<uint16_t>(sample);
87
88 elem = sign.select(~sample_u16 + 1, sample_u16);
89 }
90
91 return FrodoMatrix(dimensions, std::move(elements));
92}
93
95 const FrodoKEMConstants& constants, Botan::XOF& shake) {
96 return [&constants, &shake](const FrodoMatrix::Dimensions& dimensions) mutable {
97 return sample(constants,
99 shake.output<FrodoSampleR>(sizeof(uint16_t) * std::get<0>(dimensions) * std::get<1>(dimensions)));
100 };
101}
102
104 m_dim1(std::get<0>(dims)), m_dim2(std::get<1>(dims)), m_elements(make_elements_vector(dims)) {}
105
107 const FrodoMatrix& s,
108 const FrodoMatrix& e,
110 BOTAN_ASSERT(std::get<0>(e.dimensions()) == std::get<1>(s.dimensions()) &&
111 std::get<1>(e.dimensions()) == std::get<0>(s.dimensions()),
112 "FrodoMatrix dimension mismatch of E and S");
113 BOTAN_ASSERT(std::get<0>(e.dimensions()) == constants.n() && std::get<1>(e.dimensions()) == constants.n_bar(),
114 "FrodoMatrix dimension mismatch of new matrix dimensions and E");
115
116 auto elements = make_elements_vector(e.dimensions());
117 auto row_generator = make_row_generator(constants, seed_a);
118
119 /*
120 We perform 4 invocations of SHAKE128 per iteration to obtain n 16-bit values per invocation.
121 a_row_data contains the 16-bit values of the current 4 rows. a_row_data_bytes represents the corresponding bytes.
122 */
123 std::vector<uint16_t> a_row_data(4 * constants.n(), 0);
124 // TODO: maybe use std::as_bytes() instead
125 // (take extra care, as it produces a std::span<std::byte>)
126 const std::span<uint8_t> a_row_data_bytes(reinterpret_cast<uint8_t*>(a_row_data.data()),
127 sizeof(uint16_t) * a_row_data.size());
128
129 for(size_t i = 0; i < constants.n(); i += 4) {
130 auto a_row = BufferStuffer(a_row_data_bytes);
131
132 // Do 4 invocations to fill 4 rows
133 row_generator(a_row.next(constants.n() * sizeof(uint16_t)), static_cast<uint16_t>(i + 0));
134 row_generator(a_row.next(constants.n() * sizeof(uint16_t)), static_cast<uint16_t>(i + 1));
135 row_generator(a_row.next(constants.n() * sizeof(uint16_t)), static_cast<uint16_t>(i + 2));
136 row_generator(a_row.next(constants.n() * sizeof(uint16_t)), static_cast<uint16_t>(i + 3));
137
138 // Use generated bytes to fill 16-bit data
139 load_le<uint16_t>(a_row_data.data(), a_row_data_bytes.data(), 4 * constants.n());
140
141 for(size_t k = 0; k < constants.n_bar(); ++k) {
142 std::array<uint16_t, 4> sum = {0};
143 for(size_t j = 0; j < constants.n(); ++j) { // Matrix-vector multiplication
144 // Note: we use uint32_t for `sp` to avoid an integral promotion to `int`
145 // when multiplying `sp` with other row values. Otherwise we might
146 // suffer from undefined behaviour due to a signed integer overflow.
147 // See: https://learn.microsoft.com/en-us/cpp/cpp/standard-conversions#integral-promotions
148 const uint32_t sp = s.elements_at(k * constants.n() + j);
149
150 // Go through four lines with same sp
151 sum.at(0) += static_cast<uint16_t>(a_row_data.at(0 * constants.n() + j) * sp);
152 sum.at(1) += static_cast<uint16_t>(a_row_data.at(1 * constants.n() + j) * sp);
153 sum.at(2) += static_cast<uint16_t>(a_row_data.at(2 * constants.n() + j) * sp);
154 sum.at(3) += static_cast<uint16_t>(a_row_data.at(3 * constants.n() + j) * sp);
155 }
156 elements.at((i + 0) * constants.n_bar() + k) = e.elements_at((i + 0) * constants.n_bar() + k) + sum.at(0);
157 elements.at((i + 3) * constants.n_bar() + k) = e.elements_at((i + 3) * constants.n_bar() + k) + sum.at(3);
158 elements.at((i + 2) * constants.n_bar() + k) = e.elements_at((i + 2) * constants.n_bar() + k) + sum.at(2);
159 elements.at((i + 1) * constants.n_bar() + k) = e.elements_at((i + 1) * constants.n_bar() + k) + sum.at(1);
160 }
161 }
162
163 return FrodoMatrix(e.dimensions(), std::move(elements));
164}
165
167 const FrodoMatrix& s,
168 const FrodoMatrix& e,
170 BOTAN_ASSERT(std::get<0>(e.dimensions()) == std::get<0>(s.dimensions()) &&
171 std::get<1>(e.dimensions()) == std::get<1>(s.dimensions()),
172 "FrodoMatrix dimension mismatch of E and S");
173 BOTAN_ASSERT(std::get<0>(e.dimensions()) == constants.n_bar() && std::get<1>(e.dimensions()) == constants.n(),
174 "FrodoMatrix dimension mismatch of new matrix dimensions and E");
175
176 auto elements = e.m_elements;
177 auto row_generator = make_row_generator(constants, seed_a);
178
179 /*
180 We perform 8 invocations of SHAKE128 per iteration to obtain n 16-bit values per invocation.
181 a_row_data contains the 16-bit values of the current 8 rows. a_row_data_bytes represents the corresponding bytes.
182 */
183 std::vector<uint16_t> a_row_data(8 * constants.n(), 0);
184 // TODO: maybe use std::as_bytes()
185 const std::span<uint8_t> a_row_data_bytes(reinterpret_cast<uint8_t*>(a_row_data.data()),
186 sizeof(uint16_t) * a_row_data.size());
187
188 // Start matrix multiplication
189 for(size_t i = 0; i < constants.n(); i += 8) {
190 auto a_row = BufferStuffer(a_row_data_bytes);
191
192 // Do 8 invocations to fill 8 rows
193 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 0));
194 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 1));
195 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 2));
196 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 3));
197 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 4));
198 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 5));
199 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 6));
200 row_generator(a_row.next(sizeof(uint16_t) * constants.n()), static_cast<uint16_t>(i + 7));
201
202 // Use generated bytes to fill 16-bit data
203 load_le<uint16_t>(a_row_data.data(), a_row_data_bytes.data(), 8 * constants.n());
204
205 for(size_t j = 0; j < constants.n_bar(); ++j) {
206 uint16_t sum = 0;
207 std::array<uint32_t /* to avoid integral promotion */, 8> sp{};
208 for(size_t p = 0; p < 8; ++p) {
209 sp[p] = s.elements_at(j * constants.n() + i + p);
210 }
211 for(size_t q = 0; q < constants.n(); ++q) {
212 sum = elements.at(j * constants.n() + q);
213 for(size_t p = 0; p < 8; ++p) {
214 sum += static_cast<uint16_t>(sp[p] * a_row_data.at(p * constants.n() + q));
215 }
216 elements.at(j * constants.n() + q) = sum;
217 }
218 }
219 }
220
221 return FrodoMatrix(e.dimensions(), std::move(elements));
222}
223
225 const FrodoMatrix& b,
226 const FrodoMatrix& s,
227 const FrodoMatrix& e) {
228 BOTAN_ASSERT(std::get<0>(b.dimensions()) == std::get<1>(s.dimensions()) &&
229 std::get<1>(b.dimensions()) == std::get<0>(s.dimensions()),
230 "FrodoMatrix dimension mismatch of B and S");
231 BOTAN_ASSERT(std::get<0>(b.dimensions()) == constants.n() && std::get<1>(b.dimensions()) == constants.n_bar(),
232 "FrodoMatrix dimension mismatch of B");
233 BOTAN_ASSERT(std::get<0>(e.dimensions()) == constants.n_bar() && std::get<1>(e.dimensions()) == constants.n_bar(),
234 "FrodoMatrix dimension mismatch of E");
235
236 auto elements = make_elements_vector(e.dimensions());
237
238 for(size_t k = 0; k < constants.n_bar(); ++k) {
239 for(size_t i = 0; i < constants.n_bar(); ++i) {
240 elements.at(k * constants.n_bar() + i) = e.elements_at(k * constants.n_bar() + i);
241 for(size_t j = 0; j < constants.n(); ++j) {
242 elements.at(k * constants.n_bar() + i) += static_cast<uint16_t>(
243 static_cast<uint32_t /* to avoid integral promotion */>(s.elements_at(k * constants.n() + j)) *
244 b.elements_at(j * constants.n_bar() + i));
245 }
246 }
247 }
248
249 return FrodoMatrix(e.dimensions(), std::move(elements));
250}
251
253 const uint64_t mask = (uint64_t(1) << constants.b()) - 1;
254
255 const auto dimensions = std::make_tuple<size_t, size_t>(constants.n_bar(), constants.n_bar());
256 auto elements = make_elements_vector(dimensions);
257
258 BOTAN_ASSERT_NOMSG(in.size() * 8 == constants.n_bar() * constants.n_bar() * constants.b());
259
260 size_t pos = 0;
261 for(size_t i = 0; i < (constants.n_bar() * constants.n_bar()) / 8; ++i) {
262 uint64_t temp = 0;
263 for(size_t j = 0; j < constants.b(); ++j) {
264 temp |= static_cast<uint64_t /* avoiding integral promotion */>(in[i * constants.b() + j]) << (8 * j);
265 }
266 for(size_t j = 0; j < 8; ++j) {
267 elements.at(pos++) = static_cast<uint16_t>((temp & mask) << (constants.d() - constants.b())); // k*2^(D-B)
268 temp >>= constants.b();
269 }
270 }
271
272 return FrodoMatrix(dimensions, std::move(elements));
273}
274
276 // Addition is defined for n_bar x n_bar matrices only
278 BOTAN_ASSERT_NOMSG(std::get<0>(a.dimensions()) == constants.n_bar() &&
279 std::get<1>(a.dimensions()) == constants.n_bar());
280
281 auto elements = make_elements_vector(a.dimensions());
282
283 for(size_t i = 0; i < constants.n_bar() * constants.n_bar(); ++i) {
284 elements.at(i) = a.elements_at(i) + b.elements_at(i);
285 }
286
287 return FrodoMatrix(a.dimensions(), std::move(elements));
288}
289
291 // Subtraction is defined for n_bar x n_bar matrices only
293 BOTAN_ASSERT_NOMSG(std::get<0>(a.dimensions()) == constants.n_bar() &&
294 std::get<1>(a.dimensions()) == constants.n_bar());
295
296 auto elements = make_elements_vector(a.dimensions());
297
298 for(size_t i = 0; i < constants.n_bar() * constants.n_bar(); ++i) {
299 elements.at(i) = a.elements_at(i) - b.elements_at(i);
300 }
301
302 return FrodoMatrix(a.dimensions(), std::move(elements));
303}
304
307 // TODO: Possibly use range-based comparison after #3715 is merged
308 return CT::is_equal(reinterpret_cast<const uint8_t*>(m_elements.data()),
309 reinterpret_cast<const uint8_t*>(other.m_elements.data()),
310 sizeof(decltype(m_elements)::value_type) * m_elements.size());
311}
312
314 const Dimensions dimensions = {constants.n_bar(), constants.n_bar()};
315 auto elements = make_elements_vector(dimensions);
316
317 for(size_t i = 0; i < constants.n_bar(); ++i) {
318 for(size_t j = 0; j < constants.n_bar(); ++j) {
319 auto& current = elements.at(i * constants.n_bar() + j);
320 current = 0;
321 for(size_t k = 0; k < constants.n(); ++k) {
322 // Explicitly store the values in 32-bit variables to avoid integral promotion
323 const uint32_t b_ink = b.elements_at(i * constants.n() + k);
324
325 // Since the input is s^T, we multiply the i-th row of b with the j-th row of s^t
326 const uint32_t s_ink = s.elements_at(j * constants.n() + k);
327
328 current += static_cast<uint16_t>(b_ink * s_ink);
329 }
330 }
331 }
332
333 return FrodoMatrix(dimensions, std::move(elements));
334}
335
337 const size_t outlen = packed_size(constants);
338 BOTAN_ASSERT_NOMSG(out.size() == outlen);
339
340 size_t i = 0; // whole bytes already filled in
341 size_t j = 0; // whole uint16_t already copied
342 uint16_t w = 0; // the leftover, not yet copied
343 uint8_t bits = 0; // the number of lsb in w
344
345 while(i < outlen && (j < element_count() || ((j == element_count()) && (bits > 0)))) {
346 /*
347 in: | | |********|********|
348 ^
349 j
350 w : | ****|
351 ^
352 bits
353 out:|**|**|**|**|**|**|**|**|* |
354 ^^
355 ib
356 */
357 uint8_t b = 0; // bits in out[i] already filled in
358 while(b < 8) {
359 const uint8_t nbits = std::min(static_cast<uint8_t>(8 - b), bits);
360 const uint16_t mask = static_cast<uint16_t>(1 << nbits) - 1;
361 const auto t = static_cast<uint8_t>((w >> (bits - nbits)) & mask); // the bits to copy from w to out
362 out[i] = out[i] + static_cast<uint8_t>(t << (8 - b - nbits));
363 b += nbits;
364 bits -= nbits;
365
366 if(bits == 0) {
367 if(j < element_count()) {
368 w = m_elements.at(j);
369 bits = static_cast<uint8_t>(constants.d());
370 j++;
371 } else {
372 break; // the input vector is exhausted
373 }
374 }
375 }
376 if(b == 8) { // out[i] is filled in
377 i++;
378 }
379 }
380}
381
383 FrodoSerializedMatrix out(2 * m_elements.size());
384
385 for(unsigned int i = 0; i < m_elements.size(); ++i) {
386 store_le(m_elements.at(i), out.data() + 2 * i);
387 }
388
389 return out;
390}
391
393 const size_t nwords = (constants.n_bar() * constants.n_bar()) / 8;
394 const uint16_t maskex = static_cast<uint16_t>(1 << constants.b()) - 1;
395 const uint16_t maskq = static_cast<uint16_t>(1 << constants.d()) - 1;
396
397 FrodoPlaintext out(nwords * constants.b());
398
399 size_t index = 0;
400 for(size_t i = 0; i < nwords; i++) {
401 uint64_t templong = 0;
402 for(size_t j = 0; j < 8; j++) {
403 const auto temp =
404 static_cast<uint16_t>(((m_elements.at(index) & maskq) + (1 << (constants.d() - constants.b() - 1))) >>
405 (constants.d() - constants.b()));
406 templong |= static_cast<uint64_t>(temp & maskex) << (constants.b() * j);
407 index++;
408 }
409 for(size_t j = 0; j < constants.b(); j++) {
410 out[i * constants.b() + j] = (templong >> (8 * j)) & 0xFF;
411 }
412 }
413
414 return out;
415}
416
418 const Dimensions& dimensions,
420 const uint8_t lsb = static_cast<uint8_t>(constants.d());
421 const size_t inlen = packed_bytes.size();
422 const size_t outlen = static_cast<size_t>(std::get<0>(dimensions)) * std::get<1>(dimensions);
423
424 BOTAN_ASSERT_NOMSG(inlen == ceil_tobytes(outlen * lsb));
425
426 auto elements = make_elements_vector(dimensions);
427
428 size_t i = 0; // whole uint16_t already filled in
429 size_t j = 0; // whole bytes already copied
430 uint8_t w = 0; // the leftover, not yet copied
431 uint8_t bits = 0; // the number of lsb bits of w
432
433 while(i < outlen && (j < inlen || ((j == inlen) && (bits > 0)))) {
434 /*
435 in: | | | | | | |**|**|...
436 ^
437 j
438 w : | *|
439 ^
440 bits
441 out:| *****| *****| *** | |...
442 ^ ^
443 i b
444 */
445 uint8_t b = 0; // bits in out[i] already filled in
446 while(b < lsb) {
447 const uint8_t nbits = std::min(static_cast<uint8_t>(lsb - b), bits);
448 const uint16_t mask = static_cast<uint16_t>(1 << nbits) - 1;
449 const uint8_t t = (w >> (bits - nbits)) & mask; // the bits to copy from w to out
450
451 elements.at(i) = elements.at(i) + static_cast<uint16_t>(t << (lsb - b - nbits));
452 b += nbits;
453 bits -= nbits;
454 w &= static_cast<uint8_t>(~(mask << bits)); // not strictly necessary; mostly for debugging
455
456 if(bits == 0) {
457 if(j < inlen) {
458 w = packed_bytes[j];
459 bits = 8;
460 j++;
461 } else {
462 break; // the input vector is exhausted
463 }
464 }
465 }
466 if(b == lsb) { // out[i] is filled in
467 i++;
468 }
469 }
470
471 return FrodoMatrix(dimensions, std::move(elements));
472}
473
475 auto elements = make_elements_vector(dimensions);
476 BOTAN_ASSERT_NOMSG(elements.size() * 2 == bytes.size());
477 load_le<uint16_t>(elements.data(), bytes.data(), elements.size());
478 return FrodoMatrix(dimensions, std::move(elements));
479}
480
482 // Reduction is inherent if D is 16, because we use uint16_t in m_elements
483 if(constants.d() < sizeof(decltype(m_elements)::value_type) * 8) {
484 const uint16_t mask = static_cast<uint16_t>(1 << constants.d()) - 1;
485 for(auto& elem : m_elements) {
486 elem = elem & mask; // mod q
487 }
488 }
489}
490
491} // namespace Botan
#define BOTAN_ASSERT_NOMSG(expr)
Definition assert.h:75
#define BOTAN_ASSERT(expr, assertion_made)
Definition assert.h:62
#define BOTAN_ASSERT_UNREACHABLE()
Definition assert.h:163
Helper class to ease in-place marshalling of concatenated fixed-length values.
static constexpr Mask< T > expand_bit(T v, size_t bit)
Definition ct_utils.h:421
static constexpr Mask< T > is_lt(T x, T y)
Definition ct_utils.h:450
uint16_t cdf_table_at(size_t i) const
static FrodoMatrix mul_add_sa_plus_e(const FrodoKEMConstants &constants, const FrodoMatrix &s, const FrodoMatrix &e, StrongSpan< const FrodoSeedA > seed_a)
static std::function< FrodoMatrix(const Dimensions &dimensions)> make_sample_generator(const FrodoKEMConstants &constants, Botan::XOF &shake)
static FrodoMatrix mul_add_sb_plus_e(const FrodoKEMConstants &constants, const FrodoMatrix &b, const FrodoMatrix &s, const FrodoMatrix &e)
void reduce(const FrodoKEMConstants &constants)
static FrodoMatrix mul_add_as_plus_e(const FrodoKEMConstants &constants, const FrodoMatrix &s, const FrodoMatrix &e, StrongSpan< const FrodoSeedA > seed_a)
static FrodoMatrix sub(const FrodoKEMConstants &constants, const FrodoMatrix &a, const FrodoMatrix &b)
FrodoPlaintext decode(const FrodoKEMConstants &constants) const
static FrodoMatrix add(const FrodoKEMConstants &constants, const FrodoMatrix &a, const FrodoMatrix &b)
std::tuple< size_t, size_t > Dimensions
static FrodoMatrix sample(const FrodoKEMConstants &constants, const Dimensions &dimensions, StrongSpan< const FrodoSampleR > r)
CT::Mask< uint8_t > constant_time_compare(const FrodoMatrix &other) const
FrodoPackedMatrix pack(const FrodoKEMConstants &constants) const
static FrodoMatrix encode(const FrodoKEMConstants &constants, StrongSpan< const FrodoPlaintext > in)
Dimensions dimensions() const
FrodoMatrix(Dimensions dims)
static FrodoMatrix unpack(const FrodoKEMConstants &constants, const Dimensions &dimensions, StrongSpan< const FrodoPackedMatrix > packed_bytes)
uint16_t elements_at(size_t i) const
size_t element_count() const
static FrodoMatrix mul_bs(const FrodoKEMConstants &constants, const FrodoMatrix &b_p, const FrodoMatrix &s)
static FrodoMatrix deserialize(const Dimensions &dimensions, StrongSpan< const FrodoSerializedMatrix > bytes)
size_t packed_size(const FrodoKEMConstants &constants) const
FrodoSerializedMatrix serialize() const
decltype(auto) data() noexcept(noexcept(this->m_span.data()))
decltype(auto) size() const noexcept(noexcept(this->m_span.size()))
T output(size_t bytes)
Definition xof.h:153
decltype(auto) data() noexcept(noexcept(this->get().data()))
constexpr T value_barrier(T x)
constexpr CT::Mask< T > is_equal(const T x[], const T y[], size_t len)
Definition ct_utils.h:798
Strong< secure_vector< uint8_t >, struct FrodoSerializedMatrix_ > FrodoSerializedMatrix
Definition frodo_types.h:44
auto create_shake_row_generator(const FrodoKEMConstants &constants, StrongSpan< const FrodoSeedA > seed_a)
constexpr auto store_le(ParamTs &&... params)
Definition loadstor.h:736
auto create_aes_row_generator(const FrodoKEMConstants &constants, StrongSpan< const FrodoSeedA > seed_a)
BOTAN_FORCE_INLINE constexpr T ceil_tobytes(T bits)
Definition bit_ops.h:175
constexpr auto load_le(ParamTs &&... params)
Definition loadstor.h:495
std::vector< T, secure_allocator< T > > secure_vector
Definition secmem.h:68
Strong< secure_vector< uint8_t >, struct FrodoPlaintext_ > FrodoPlaintext
Definition frodo_types.h:50
Strong< secure_vector< uint8_t >, struct FrodoSampleR_ > FrodoSampleR
Definition frodo_types.h:35