Botan 3.7.1
Crypto and TLS for C&
pcurves_impl.h
Go to the documentation of this file.
1/*
2* (C) 2024,2025 Jack Lloyd
3*
4* Botan is released under the Simplified BSD License (see license.txt)
5*/
6
7#ifndef BOTAN_PCURVES_IMPL_H_
8#define BOTAN_PCURVES_IMPL_H_
9
10#include <botan/rng.h>
11#include <botan/internal/ct_utils.h>
12#include <botan/internal/loadstor.h>
13#include <botan/internal/pcurves_util.h>
14#include <botan/internal/stl_util.h>
15#include <vector>
16
17#if defined(BOTAN_HAS_XMD)
18 #include <botan/internal/xmd.h>
19#endif
20
21namespace Botan {
22
23/*
24This file implements a system for compile-time instantiation of elliptic curve arithmetic.
25
26All computations including point multiplication are implemented to be constant time,
27with the exception of any function which includes "vartime" or equivalent in its
28name. Randomization techniques (scalar blinding, point rerandomization) are also
29used, largely to guard against situations where a compiler inserts a conditional jump
30where not expected.
31
32A specific elliptic curve is created by creating a set of EllipticCurveParameters,
33which are templatized over the relevant constants (p, a, b, etc) and then
34passing that set of parameters to an EllipticCurve template.
35
36For a simple example of how these are used see pcurves_brainpool256r1.cpp
37
38The system also includes various hooks which allow for specialized representations of
39field elements (for curves where a modular reduction technique faster than Montgomery
40is available) and to provide pre-computed addition chains for field and scalar
41inversions. See pcurves_secp256r1.cpp or pcurves_secp256k1.cpp for examples with all
42the bells and whistles.
43*/
44
45namespace {
46
47/**
48* Montomgomery Representation of Integers
49*
50* Integers modulo a prime (IntMod, see below) use some representation that
51* allows for fast arithmetic.
52*
53* The default representation used is Montgomery arithmetic. Curves with
54* specialized fields (eg Mersenne primes, Solinas primes, or Crandall primes)
55* provide a different type as the FieldRep parameter to the EllipticCurve
56* template.
57*
58* Since the curve parameters are public and known at compile time, we can
59* similarly compute the Montgomery parameters at compile time.
60*/
61template <typename Params>
62class MontgomeryRep final {
63 public:
64 using Self = MontgomeryRep<Params>;
65
66 static constexpr auto P = Params::P;
67 static constexpr size_t N = Params::N;
68 typedef typename Params::W W;
69
70 static_assert(N > 0 && (Params::P[0] & 1) == 1, "Invalid Montgomery modulus");
71
72 static constexpr auto P_dash = monty_inverse(P[0]);
73
74 static constexpr auto R1 = montygomery_r(P);
75 static constexpr auto R2 = mul_mod(R1, R1, P);
76 static constexpr auto R3 = mul_mod(R1, R2, P);
77
78 /**
79 * Return the constant one, pre-converted into Montgomery form
80 */
81 constexpr static std::array<W, N> one() { return R1; }
82
83 /**
84 * Modular reduction
85 */
86 constexpr static std::array<W, N> redc(const std::array<W, 2 * N>& z) {
87 if constexpr(P_dash == 1) {
88 return monty_redc_pdash1(z, P);
89 } else {
90 return monty_redc(z, P, P_dash);
91 }
92 }
93
94 /**
95 * Convert an integer into Montgomery representation
96 */
97 constexpr static std::array<W, N> to_rep(const std::array<W, N>& x) {
98 std::array<W, 2 * N> z;
99 comba_mul<N>(z.data(), x.data(), R2.data());
100 return Self::redc(z);
101 }
102
103 /**
104 * Wide reduction modulo the prime
105 *
106 * Modular reduces an input of up to twice the length of the modulus, and
107 * converts it into Montgomery form.
108 */
109 constexpr static std::array<W, N> wide_to_rep(const std::array<W, 2 * N>& x) {
110 auto redc_x = Self::redc(x);
111 std::array<W, 2 * N> z;
112 comba_mul<N>(z.data(), redc_x.data(), R3.data());
113 return Self::redc(z);
114 }
115
116 /**
117 * Convert an integer out of Montgomery representation
118 */
119 constexpr static std::array<W, N> from_rep(const std::array<W, N>& z) {
120 std::array<W, 2 * N> ze = {};
121 copy_mem(std::span{ze}.template first<N>(), z);
122 return Self::redc(ze);
123 }
124};
125
126/**
127* Integers Modulo (a Prime)
128*
129* This is used to store and manipulate integers modulo the field (for the affine
130* x/y or Jacobian x/y/z coordinates) and group order (for scalar arithmetic).
131*
132* This class is parameterized by Rep which handles the modular reduction step,
133* as well (if required) any conversions into or out of the inner
134* representation. This is primarily for Montgomery arithmetic; specialized
135* reduction methods instead keep the integer in the "standard" form.
136*
137* _Most_ of the code in this class does work for arbitrary moduli. However
138* at least div2 and invert make assumptions that the modulus is prime.
139*
140* Any function that does not contain "vartime" or equivalent in the name is
141* written such that it does not leak information about its arguments via control
142* flow or memory access patterns.
143*/
144template <typename Rep>
145class IntMod final {
146 private:
147 static constexpr auto P = Rep::P;
148 static constexpr size_t N = Rep::N;
149 typedef typename Rep::W W;
150
151 static constexpr auto P_MINUS_2 = p_minus<2>(P);
152
153 public:
154 static constexpr size_t BITS = count_bits(P);
155 static constexpr size_t BYTES = (BITS + 7) / 8;
156
157 static constexpr auto P_MOD_4 = P[0] % 4;
158
159 using Self = IntMod<Rep>;
160
161 // Default value is zero
162 constexpr IntMod() : m_val({}) {}
163
164 IntMod(const Self& other) = default;
165 IntMod(Self&& other) = default;
166 IntMod& operator=(const Self& other) = default;
167 IntMod& operator=(Self&& other) = default;
168
169 /**
170 * Return integer zero
171 *
172 * Note this assumes that the representation of zero is an all zero
173 * sequence of words. This is true for both Montgomery and standard
174 * representations.
175 */
176 static constexpr Self zero() { return Self(std::array<W, N>{0}); }
177
178 /**
179 * Return integer one
180 */
181 static constexpr Self one() { return Self(Rep::one()); }
182
183 /**
184 * Consume an array of words and convert it to an IntMod
185 *
186 * This handles the Montgomery conversion, if required.
187 *
188 * Note that this function assumes that `w` represents an integer that is
189 * less than the modulus.
190 */
191 template <size_t L>
192 static constexpr Self from_words(std::array<W, L> w) {
193 if constexpr(L == N) {
194 return Self(Rep::to_rep(w));
195 } else {
196 static_assert(L < N);
197 std::array<W, N> ew = {};
198 copy_mem(std::span{ew}.template first<L>(), w);
199 return Self(Rep::to_rep(ew));
200 }
201 }
202
203 /**
204 * Check in constant time if this is equal to zero
205 */
206 constexpr CT::Choice is_zero() const { return CT::all_zeros(m_val.data(), m_val.size()).as_choice(); }
207
208 /**
209 * Check in constant time if this not equal to zero
210 */
211 constexpr CT::Choice is_nonzero() const { return !is_zero(); }
212
213 /**
214 * Check in constant time if this equal to one
215 */
216 constexpr CT::Choice is_one() const { return (*this == Self::one()); }
217
218 /**
219 * Check in constant time if this is an even integer
220 */
221 constexpr CT::Choice is_even() const {
222 auto v = Rep::from_rep(m_val);
223 return !CT::Choice::from_int(v[0] & 0x01);
224 }
225
226 /**
227 * Modular addition; return c = a + b
228 */
229 friend constexpr Self operator+(const Self& a, const Self& b) {
230 std::array<W, N> t;
231 W carry = bigint_add<W, N>(t, a.value(), b.value());
232
233 std::array<W, N> r;
234 bigint_monty_maybe_sub<N>(r.data(), carry, t.data(), P.data());
235 return Self(r);
236 }
237
238 /**
239 * Modular subtraction; return c = a - b
240 */
241 friend constexpr Self operator-(const Self& a, const Self& b) {
242 std::array<W, N> r;
243 word carry = bigint_sub3(r.data(), a.data(), N, b.data(), N);
244 bigint_cnd_add(carry, r.data(), N, P.data(), N);
245 return Self(r);
246 }
247
248 /**
249 * Return the value of this divided by 2
250 */
251 Self div2() const {
252 // The inverse of 2 modulo P is (P/2)+1; this avoids a constexpr time
253 // general inversion, which some compilers can't handle
254 constexpr auto INV_2 = p_div_2_plus_1(Rep::P);
255
256 // We could multiply by INV_2 but there is a better way ...
257
258 std::array<W, N> t = value();
259 W borrow = shift_right<1>(t);
260
261 // If value was odd, add (P/2)+1
262 bigint_cnd_add(borrow, t.data(), N, INV_2.data(), N);
263
264 return Self(t);
265 }
266
267 /// Return (*this) multiplied by 2
268 constexpr Self mul2() const {
269 std::array<W, N> t = value();
270 W carry = shift_left<1>(t);
271
272 std::array<W, N> r;
273 bigint_monty_maybe_sub<N>(r.data(), carry, t.data(), P.data());
274 return Self(r);
275 }
276
277 /// Return (*this) multiplied by 3
278 constexpr Self mul3() const { return mul2() + (*this); }
279
280 /// Return (*this) multiplied by 4
281 constexpr Self mul4() const { return mul2().mul2(); }
282
283 /// Return (*this) multiplied by 8
284 constexpr Self mul8() const { return mul2().mul2().mul2(); }
285
286 /**
287 * Modular multiplication; return c = a * b
288 */
289 friend constexpr Self operator*(const Self& a, const Self& b) {
290 std::array<W, 2 * N> z;
291 comba_mul<N>(z.data(), a.data(), b.data());
292 return Self(Rep::redc(z));
293 }
294
295 /**
296 * Modular multiplication; set this to this * other
297 */
298 constexpr Self& operator*=(const Self& other) {
299 std::array<W, 2 * N> z;
300 comba_mul<N>(z.data(), data(), other.data());
301 m_val = Rep::redc(z);
302 return (*this);
303 }
304
305 /**
306 * Conditional assignment
307 *
308 * If `cond` is true, sets `x` to `nx`
309 */
310 static constexpr void conditional_assign(Self& x, CT::Choice cond, const Self& nx) {
311 const W mask = CT::Mask<W>::from_choice(cond).value();
312
313 for(size_t i = 0; i != N; ++i) {
314 x.m_val[i] = choose(mask, nx.m_val[i], x.m_val[i]);
315 }
316 }
317
318 /**
319 * Conditional assignment
320 *
321 * If `cond` is true, sets `x` to `nx` and `y` to `ny`
322 */
323 static constexpr void conditional_assign(Self& x, Self& y, CT::Choice cond, const Self& nx, const Self& ny) {
324 const W mask = CT::Mask<W>::from_choice(cond).value();
325
326 for(size_t i = 0; i != N; ++i) {
327 x.m_val[i] = choose(mask, nx.m_val[i], x.m_val[i]);
328 y.m_val[i] = choose(mask, ny.m_val[i], y.m_val[i]);
329 }
330 }
331
332 /**
333 * Conditional assignment
334 *
335 * If `cond` is true, sets `x` to `nx`, `y` to `ny`, and `z` to `nz`
336 */
337 static constexpr void conditional_assign(
338 Self& x, Self& y, Self& z, CT::Choice cond, const Self& nx, const Self& ny, const Self& nz) {
339 const W mask = CT::Mask<W>::from_choice(cond).value();
340
341 for(size_t i = 0; i != N; ++i) {
342 x.m_val[i] = choose(mask, nx.m_val[i], x.m_val[i]);
343 y.m_val[i] = choose(mask, ny.m_val[i], y.m_val[i]);
344 z.m_val[i] = choose(mask, nz.m_val[i], z.m_val[i]);
345 }
346 }
347
348 /**
349 * Modular squaring
350 *
351 * Returns the square of this after modular reduction
352 */
353 constexpr Self square() const {
354 std::array<W, 2 * N> z;
355 comba_sqr<N>(z.data(), this->data());
356 return Self(Rep::redc(z));
357 }
358
359 /**
360 * Repeated modular squaring
361 *
362 * Returns the nth square of this
363 *
364 * (Alternate view, returns this raised to the 2^nth power)
365 */
366 constexpr void square_n(size_t n) {
367 std::array<W, 2 * N> z;
368 for(size_t i = 0; i != n; ++i) {
369 comba_sqr<N>(z.data(), this->data());
370 m_val = Rep::redc(z);
371 }
372 }
373
374 /**
375 * Modular negation
376 *
377 * Returns the additive inverse of (*this)
378 */
379 constexpr Self negate() const {
380 auto x_is_zero = CT::all_zeros(this->data(), N);
381
382 std::array<W, N> r;
383 bigint_sub3(r.data(), P.data(), N, this->data(), N);
384 x_is_zero.if_set_zero_out(r.data(), N);
385 return Self(r);
386 }
387
388 /**
389 * Modular Exponentiation (Variable Time)
390 *
391 * This function is variable time with respect to the exponent. It should
392 * only be used when exp is not secret. In the current code, `exp` is
393 * always a compile-time constant.
394 *
395 * This function should not leak any information about this, since the
396 * value being operated on may be a secret.
397 *
398 * TODO: this interface should be changed so that the exponent is always a
399 * compile-time constant; this should allow some interesting optimizations.
400 */
401 constexpr Self pow_vartime(const std::array<W, N>& exp) const {
402 constexpr size_t WindowBits = (Self::BITS <= 256) ? 4 : 5;
403 constexpr size_t WindowElements = (1 << WindowBits) - 1;
404
405 constexpr size_t Windows = (Self::BITS + WindowBits - 1) / WindowBits;
406
407 /*
408 A simple fixed width window modular multiplication.
409
410 TODO: investigate using sliding window here
411 */
412
413 std::array<Self, WindowElements> tbl;
414
415 tbl[0] = (*this);
416
417 for(size_t i = 1; i != WindowElements; ++i) {
418 if(i % 2 == 1) {
419 tbl[i] = tbl[i / 2].square();
420 } else {
421 tbl[i] = tbl[i - 1] * tbl[0];
422 }
423 }
424
425 auto r = Self::one();
426
427 const size_t w0 = read_window_bits<WindowBits>(std::span{exp}, (Windows - 1) * WindowBits);
428
429 if(w0 > 0) {
430 r = tbl[w0 - 1];
431 }
432
433 for(size_t i = 1; i != Windows; ++i) {
434 r.square_n(WindowBits);
435
436 const size_t w = read_window_bits<WindowBits>(std::span{exp}, (Windows - i - 1) * WindowBits);
437
438 if(w > 0) {
439 r *= tbl[w - 1];
440 }
441 }
442
443 return r;
444 }
445
446 /**
447 * Returns the modular inverse, or 0 if no modular inverse exists.
448 *
449 * If the modulus is prime the only value that has no modular inverse is 0.
450 *
451 * This uses Fermat's little theorem, and so assumes that p is prime
452 *
453 * Since P is public, P-2 is as well, thus using a variable time modular
454 * exponentiation routine is safe.
455 *
456 * This function is only used if the curve does not provide an addition
457 * chain for specific inversions (see for example pcurves_secp256r1.cpp)
458 */
459 constexpr Self invert() const { return pow_vartime(Self::P_MINUS_2); }
460
461 /**
462 * Return the modular square root if it exists
463 *
464 * The CT::Choice indicates if the square root exists or not.
465 */
466 constexpr std::pair<Self, CT::Choice> sqrt() const {
467 if constexpr(Self::P_MOD_4 == 3) {
468 // The easy case for square root is when p == 3 (mod 4)
469
470 constexpr auto P_PLUS_1_OVER_4 = p_plus_1_over_4(P);
471 auto z = pow_vartime(P_PLUS_1_OVER_4);
472 const CT::Choice correct = (z.square() == *this);
473 // Zero out the return value if it would otherwise be incorrect
474 Self::conditional_assign(z, !correct, Self::zero());
475 return {z, correct};
476 } else {
477 // Shanks-Tonelli, following I.4 in RFC 9380
478
479 /*
480 Constants:
481 1. c1, the largest integer such that 2^c1 divides q - 1.
482 2. c2 = (q - 1) / (2^c1) # Integer arithmetic
483 3. c3 = (c2 - 1) / 2 # Integer arithmetic
484 4. c4, a non-square value in F
485 5. c5 = c4^c2 in F
486 */
487 constexpr auto C1_C2 = shanks_tonelli_c1c2(Self::P);
488 constexpr std::array<W, N> C3 = shanks_tonelli_c3(C1_C2.second);
489 constexpr std::array<W, N> P_MINUS_1_OVER_2 = p_minus_1_over_2(Self::P);
490 constexpr Self C4 = shanks_tonelli_c4<Self>(P_MINUS_1_OVER_2);
491 constexpr Self C5 = C4.pow_vartime(C1_C2.second);
492
493 const Self& x = (*this);
494
495 auto z = x.pow_vartime(C3);
496 auto t = z.square();
497 t *= x;
498 z *= x;
499 auto b = t;
500 auto c = C5;
501
502 for(size_t i = C1_C2.first; i >= 2; i--) {
503 b.square_n(i - 2);
504 const CT::Choice e = b.is_one();
505 Self::conditional_assign(z, !e, z * c);
506 c.square_n(1);
507 Self::conditional_assign(t, !e, t * c);
508 b = t;
509 }
510
511 const CT::Choice correct = (z.square() == *this);
512 Self::conditional_assign(z, !correct, Self::zero());
513 return {z, correct};
514 }
515 }
516
517 /**
518 * Constant time integer equality test
519 *
520 * Since both this and other are in Montgomery representation (if applicable),
521 * we can always compare the words directly, without having to convert out.
522 */
523 constexpr CT::Choice operator==(const Self& other) const {
524 return CT::is_equal(this->data(), other.data(), N).as_choice();
525 }
526
527 /**
528 * Constant time integer inequality test
529 */
530 constexpr CT::Choice operator!=(const Self& other) const { return !(*this == other); }
531
532 /**
533 * Convert the integer to standard representation and return the sequence of words
534 */
535 constexpr std::array<W, Self::N> to_words() const { return Rep::from_rep(m_val); }
536
537 /**
538 * Serialize the integer to a bytestring
539 */
540 constexpr void serialize_to(std::span<uint8_t, Self::BYTES> bytes) const {
541 auto v = Rep::from_rep(m_val);
542 std::reverse(v.begin(), v.end());
543
544 if constexpr(Self::BYTES == N * WordInfo<W>::bytes) {
545 store_be(bytes, v);
546 } else {
547 // Remove leading zero bytes
548 const auto padded_bytes = store_be(v);
549 constexpr size_t extra = N * WordInfo<W>::bytes - Self::BYTES;
550 copy_mem(bytes, std::span{padded_bytes}.template subspan<extra, Self::BYTES>());
551 }
552 }
553
554 /**
555 * Store the raw words to an array
556 *
557 * See pcurves_wrap.h for why/where this is used
558 */
559 template <size_t L>
560 std::array<W, L> stash_value() const {
561 static_assert(L >= N);
562 std::array<W, L> stash = {};
563 for(size_t i = 0; i != N; ++i) {
564 stash[i] = m_val[i];
565 }
566 return stash;
567 }
568
569 /**
570 * Restore the value previously stashed
571 *
572 * See pcurves_wrap.h for why/where this is used
573 */
574 template <size_t L>
575 static Self from_stash(const std::array<W, L>& stash) {
576 static_assert(L >= N);
577 std::array<W, N> val = {};
578 for(size_t i = 0; i != N; ++i) {
579 val[i] = stash[i];
580 }
581 return Self(val);
582 }
583
584 /**
585 * Deserialize an integer from a bytestring
586 *
587 * Returns nullopt if the input is an encoding greater than or equal P
588 *
589 * This function also requires that the bytestring be exactly of the expected
590 * length; short bytestrings, or a long bytestring with leading zero bytes, are
591 * also rejected.
592 */
593 static std::optional<Self> deserialize(std::span<const uint8_t> bytes) {
594 if(bytes.size() != Self::BYTES) {
595 return {};
596 }
597
598 const auto words = bytes_to_words<W, N, BYTES>(bytes.first<Self::BYTES>());
599
600 if(!bigint_ct_is_lt(words.data(), N, P.data(), N).as_bool()) {
601 return {};
602 }
603
604 // Safe because we checked above that words is an integer < P
605 return Self::from_words(words);
606 }
607
608 /**
609 * Modular reduce a larger input
610 *
611 * This takes a bytestring that is at most twice the length of the modulus, and
612 * modular reduces it.
613 */
614 template <size_t L>
615 static constexpr Self from_wide_bytes(std::span<const uint8_t, L> bytes) {
616 static_assert(8 * L <= 2 * Self::BITS);
617 std::array<uint8_t, 2 * BYTES> padded_bytes = {};
618 copy_mem(std::span{padded_bytes}.template last<L>(), bytes);
619 return Self(Rep::wide_to_rep(bytes_to_words<W, 2 * N, 2 * BYTES>(std::span{padded_bytes})));
620 }
621
622 /**
623 * Modular reduce a larger input
624 *
625 * This takes a bytestring that is at most twice the length of the modulus, and
626 * modular reduces it.
627 */
628 static constexpr std::optional<Self> from_wide_bytes_varlen(std::span<const uint8_t> bytes) {
629 if(bytes.size() > 2 * Self::BYTES) {
630 return {};
631 }
632
633 std::array<uint8_t, 2 * Self::BYTES> padded_bytes = {};
634 copy_mem(std::span{padded_bytes}.last(bytes.size()), bytes);
635 return Self(Rep::wide_to_rep(bytes_to_words<W, 2 * N, 2 * BYTES>(std::span{padded_bytes})));
636 }
637
638 /**
639 * Return a random integer value in [1,p)
640 *
641 * This uses rejection sampling. This could have alternatively been implemented
642 * by oversampling the random number generator and then performing a wide
643 * reduction. The main reason that approach is avoided here is because it makes
644 * testing ECDSA-style known answer tests more difficult.
645 *
646 * This function avoids returning zero since in almost all contexts where a
647 * random integer is desired we want a random integer in Z_p*
648 */
649 static Self random(RandomNumberGenerator& rng) {
650 constexpr size_t MAX_ATTEMPTS = 1000;
651
652 std::array<uint8_t, Self::BYTES> buf;
653
654 for(size_t i = 0; i != MAX_ATTEMPTS; ++i) {
655 rng.randomize(buf);
656
657 // Zero off high bits that if set would certainly cause us
658 // to be out of range
659 if constexpr(Self::BITS % 8 != 0) {
660 constexpr uint8_t mask = 0xFF >> (8 - (Self::BITS % 8));
661 buf[0] &= mask;
662 }
663
664 if(auto s = Self::deserialize(buf)) {
665 if(s.value().is_nonzero().as_bool()) {
666 return s.value();
667 }
668 }
669 }
670
671 throw Internal_Error("Failed to generate random Scalar within bounded number of attempts");
672 }
673
674 /**
675 * Create a small compile time constant
676 *
677 * Notice this function is consteval, and so can only be called at compile time
678 */
679 static consteval Self constant(int8_t x) {
680 std::array<W, 1> v;
681 v[0] = (x >= 0) ? x : -x;
682 auto s = Self::from_words(v);
683 return (x >= 0) ? s : s.negate();
684 }
685
686 constexpr void _const_time_poison() const { CT::poison(m_val); }
687
688 constexpr void _const_time_unpoison() const { CT::unpoison(m_val); }
689
690 private:
691 constexpr const std::array<W, N>& value() const { return m_val; }
692
693 constexpr const W* data() const { return m_val.data(); }
694
695 explicit constexpr IntMod(std::array<W, N> v) : m_val(v) {}
696
697 std::array<W, N> m_val;
698};
699
700/**
701* Affine Curve Point
702*
703* This contains a pair of integers (x,y) which satisfy the curve equation
704*/
705template <typename FieldElement, typename Params>
706class AffineCurvePoint final {
707 public:
708 // We can't pass a FieldElement directly because FieldElement is
709 // not "structural" due to having private members, so instead
710 // recreate it here from the words.
711 static constexpr FieldElement A = FieldElement::from_words(Params::AW);
712 static constexpr FieldElement B = FieldElement::from_words(Params::BW);
713
714 static constexpr size_t BYTES = 1 + 2 * FieldElement::BYTES;
715 static constexpr size_t COMPRESSED_BYTES = 1 + FieldElement::BYTES;
716
717 using Self = AffineCurvePoint<FieldElement, Params>;
718
719 constexpr AffineCurvePoint(const FieldElement& x, const FieldElement& y) : m_x(x), m_y(y) {}
720
721 constexpr AffineCurvePoint() : m_x(FieldElement::zero()), m_y(FieldElement::zero()) {}
722
723 static constexpr Self identity() { return Self(FieldElement::zero(), FieldElement::zero()); }
724
725 constexpr CT::Choice is_identity() const { return x().is_zero() && y().is_zero(); }
726
727 AffineCurvePoint(const Self& other) = default;
728 AffineCurvePoint(Self&& other) = default;
729 AffineCurvePoint& operator=(const Self& other) = default;
730 AffineCurvePoint& operator=(Self&& other) = default;
731
732 constexpr Self negate() const { return Self(x(), y().negate()); }
733
734 /**
735 * Serialize the point in uncompressed format
736 */
737 constexpr void serialize_to(std::span<uint8_t, Self::BYTES> bytes) const {
738 BOTAN_STATE_CHECK(this->is_identity().as_bool() == false);
739 BufferStuffer pack(bytes);
740 pack.append(0x04);
741 x().serialize_to(pack.next<FieldElement::BYTES>());
742 y().serialize_to(pack.next<FieldElement::BYTES>());
743 BOTAN_DEBUG_ASSERT(pack.full());
744 }
745
746 /**
747 * Serialize the point in compressed format
748 */
749 constexpr void serialize_compressed_to(std::span<uint8_t, Self::COMPRESSED_BYTES> bytes) const {
750 BOTAN_STATE_CHECK(this->is_identity().as_bool() == false);
751 const uint8_t hdr = CT::Mask<uint8_t>::from_choice(y().is_even()).select(0x02, 0x03);
752
753 BufferStuffer pack(bytes);
754 pack.append(hdr);
755 x().serialize_to(pack.next<FieldElement::BYTES>());
756 BOTAN_DEBUG_ASSERT(pack.full());
757 }
758
759 /**
760 * Serialize the affine x coordinate only
761 */
762 constexpr void serialize_x_to(std::span<uint8_t, FieldElement::BYTES> bytes) const {
763 BOTAN_STATE_CHECK(this->is_identity().as_bool() == false);
764 x().serialize_to(bytes);
765 }
766
767 /**
768 * If idx is zero then return the identity element. Otherwise return pts[idx - 1]
769 *
770 * Returns the identity element also if idx is out of range
771 */
772 static constexpr auto ct_select(std::span<const Self> pts, size_t idx) {
773 auto result = Self::identity();
774
775 // Intentionally wrapping; set to maximum size_t if idx == 0
776 const size_t idx1 = static_cast<size_t>(idx - 1);
777 for(size_t i = 0; i != pts.size(); ++i) {
778 const auto found = CT::Mask<size_t>::is_equal(idx1, i).as_choice();
779 result.conditional_assign(found, pts[i]);
780 }
781
782 return result;
783 }
784
785 /**
786 * Return (x^3 + A*x + B) mod p
787 */
788 static constexpr FieldElement x3_ax_b(const FieldElement& x) { return (x.square() + Self::A) * x + Self::B; }
789
790 /**
791 * Point deserialization
792 *
793 * This accepts compressed or uncompressed formats.
794 *
795 * It also currently accepts the deprecated hybrid format.
796 * TODO(Botan4): remove support for decoding hybrid points
797 */
798 static std::optional<Self> deserialize(std::span<const uint8_t> bytes) {
799 if(bytes.size() == Self::BYTES) {
800 if(bytes[0] == 0x04) {
801 auto x = FieldElement::deserialize(bytes.subspan(1, FieldElement::BYTES));
802 auto y = FieldElement::deserialize(bytes.subspan(1 + FieldElement::BYTES, FieldElement::BYTES));
803
804 if(x && y) {
805 const auto lhs = (*y).square();
806 const auto rhs = Self::x3_ax_b(*x);
807 if((lhs == rhs).as_bool()) {
808 return Self(*x, *y);
809 }
810 }
811 } else if(bytes[0] == 0x06 || bytes[0] == 0x07) {
812 // Deprecated "hybrid" encoding
813 const CT::Choice y_is_even = CT::Mask<uint8_t>::is_equal(bytes[0], 0x06).as_choice();
814 auto x = FieldElement::deserialize(bytes.subspan(1, FieldElement::BYTES));
815 auto y = FieldElement::deserialize(bytes.subspan(1 + FieldElement::BYTES, FieldElement::BYTES));
816
817 if(x && y && (y_is_even == y->is_even()).as_bool()) {
818 const auto lhs = (*y).square();
819 const auto rhs = Self::x3_ax_b(*x);
820 if((lhs == rhs).as_bool()) {
821 return Self(*x, *y);
822 }
823 }
824 }
825 } else if(bytes.size() == Self::COMPRESSED_BYTES) {
826 if(bytes[0] == 0x02 || bytes[0] == 0x03) {
827 const CT::Choice y_is_even = CT::Mask<uint8_t>::is_equal(bytes[0], 0x02).as_choice();
828
829 if(auto x = FieldElement::deserialize(bytes.subspan(1, FieldElement::BYTES))) {
830 auto [y, is_square] = x3_ax_b(*x).sqrt();
831
832 if(is_square.as_bool()) {
833 const auto flip_y = y_is_even != y.is_even();
834 FieldElement::conditional_assign(y, flip_y, y.negate());
835 return Self(*x, y);
836 }
837 }
838 }
839 } else if(bytes.size() == 1 && bytes[0] == 0x00) {
840 // See SEC1 section 2.3.4
841 return Self::identity();
842 }
843
844 return {};
845 }
846
847 /**
848 * Return the affine x coordinate
849 */
850 constexpr const FieldElement& x() const { return m_x; }
851
852 /**
853 * Return the affine y coordinate
854 */
855 constexpr const FieldElement& y() const { return m_y; }
856
857 /**
858 * Conditional assignment of an affine point
859 */
860 constexpr void conditional_assign(CT::Choice cond, const Self& pt) {
861 FieldElement::conditional_assign(m_x, m_y, cond, pt.x(), pt.y());
862 }
863
864 constexpr void _const_time_poison() const { CT::poison_all(m_x, m_y); }
865
866 constexpr void _const_time_unpoison() const { CT::unpoison_all(m_x, m_y); }
867
868 private:
869 FieldElement m_x;
870 FieldElement m_y;
871};
872
873/**
874* Projective curve point
875*
876* This uses Jacobian coordinates
877*/
878template <typename FieldElement, typename Params>
879class ProjectiveCurvePoint {
880 public:
881 // We can't pass a FieldElement directly because FieldElement is
882 // not "structural" due to having private members, so instead
883 // recreate it here from the words.
884 static constexpr FieldElement A = FieldElement::from_words(Params::AW);
885
886 static constexpr bool A_is_zero = A.is_zero().as_bool();
887 static constexpr bool A_is_minus_3 = (A == FieldElement::constant(-3)).as_bool();
888
889 using Self = ProjectiveCurvePoint<FieldElement, Params>;
890 using AffinePoint = AffineCurvePoint<FieldElement, Params>;
891
892 /**
893 * Convert a point from affine to projective form
894 */
895 static constexpr Self from_affine(const AffinePoint& pt) {
896 if(pt.is_identity().as_bool()) {
897 return Self::identity();
898 } else {
899 return ProjectiveCurvePoint(pt.x(), pt.y());
900 }
901 }
902
903 /**
904 * Return the identity element
905 */
906 static constexpr Self identity() { return Self(FieldElement::zero(), FieldElement::one(), FieldElement::zero()); }
907
908 /**
909 * Default constructor: the identity element
910 */
911 constexpr ProjectiveCurvePoint() :
912 m_x(FieldElement::zero()), m_y(FieldElement::one()), m_z(FieldElement::zero()) {}
913
914 /**
915 * Affine constructor: take x/y coordinates
916 */
917 constexpr ProjectiveCurvePoint(const FieldElement& x, const FieldElement& y) :
918 m_x(x), m_y(y), m_z(FieldElement::one()) {}
919
920 /**
921 * Projective constructor: take x/y/z coordinates
922 */
923 constexpr ProjectiveCurvePoint(const FieldElement& x, const FieldElement& y, const FieldElement& z) :
924 m_x(x), m_y(y), m_z(z) {}
925
926 ProjectiveCurvePoint(const Self& other) = default;
927 ProjectiveCurvePoint(Self&& other) = default;
928 ProjectiveCurvePoint& operator=(const Self& other) = default;
929 ProjectiveCurvePoint& operator=(Self&& other) = default;
930
931 friend constexpr Self operator+(const Self& a, const Self& b) { return Self::add(a, b); }
932
933 friend constexpr Self operator+(const Self& a, const AffinePoint& b) { return Self::add_mixed(a, b); }
934
935 friend constexpr Self operator+(const AffinePoint& a, const Self& b) { return Self::add_mixed(b, a); }
936
937 constexpr Self& operator+=(const Self& other) {
938 (*this) = (*this) + other;
939 return (*this);
940 }
941
942 constexpr Self& operator+=(const AffinePoint& other) {
943 (*this) = (*this) + other;
944 return (*this);
945 }
946
947 friend constexpr Self operator-(const Self& a, const Self& b) { return a + b.negate(); }
948
949 constexpr CT::Choice is_identity() const { return z().is_zero(); }
950
951 constexpr void conditional_assign(CT::Choice cond, const Self& pt) {
952 FieldElement::conditional_assign(m_x, m_y, m_z, cond, pt.x(), pt.y(), pt.z());
953 }
954
955 /**
956 * Mixed (projective + affine) point addition
957 */
958 constexpr static Self add_mixed(const Self& a, const AffinePoint& b) {
959 const auto a_is_identity = a.is_identity();
960 const auto b_is_identity = b.is_identity();
961 if((a_is_identity && b_is_identity).as_bool()) {
962 return Self::identity();
963 }
964
965 /*
966 https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-1998-cmo-2
967
968 Cost: 8M + 3S + 6add + 1*2
969 */
970
971 const auto Z1Z1 = a.z().square();
972 const auto U2 = b.x() * Z1Z1;
973 const auto S2 = b.y() * a.z() * Z1Z1;
974 const auto H = U2 - a.x();
975 const auto r = S2 - a.y();
976
977 // If r == H == 0 then we are in the doubling case
978 // For a == -b we compute the correct result because
979 // H will be zero, leading to Z3 being zero also
980 if((r.is_zero() && H.is_zero()).as_bool()) {
981 return a.dbl();
982 }
983
984 const auto HH = H.square();
985 const auto HHH = H * HH;
986 const auto V = a.x() * HH;
987 const auto t2 = r.square();
988 const auto t3 = V + V;
989 const auto t4 = t2 - HHH;
990 auto X3 = t4 - t3;
991 const auto t5 = V - X3;
992 const auto t6 = a.y() * HHH;
993 const auto t7 = r * t5;
994 auto Y3 = t7 - t6;
995 auto Z3 = a.z() * H;
996
997 // if a is identity then return b
998 FieldElement::conditional_assign(X3, Y3, Z3, a_is_identity, b.x(), b.y(), FieldElement::one());
999
1000 // if b is identity then return a
1001 FieldElement::conditional_assign(X3, Y3, Z3, b_is_identity, a.x(), a.y(), a.z());
1002
1003 return Self(X3, Y3, Z3);
1004 }
1005
1006 /**
1007 * Projective point addition
1008 */
1009 constexpr static Self add(const Self& a, const Self& b) {
1010 const auto a_is_identity = a.is_identity();
1011 const auto b_is_identity = b.is_identity();
1012
1013 if((a_is_identity && b_is_identity).as_bool()) {
1014 return Self::identity();
1015 }
1016
1017 /*
1018 https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-1998-cmo-2
1019
1020 Cost: 12M + 4S + 6add + 1*2
1021 */
1022
1023 const auto Z1Z1 = a.z().square();
1024 const auto Z2Z2 = b.z().square();
1025 const auto U1 = a.x() * Z2Z2;
1026 const auto U2 = b.x() * Z1Z1;
1027 const auto S1 = a.y() * b.z() * Z2Z2;
1028 const auto S2 = b.y() * a.z() * Z1Z1;
1029 const auto H = U2 - U1;
1030 const auto r = S2 - S1;
1031
1032 // If a == -b then H == 0 && r != 0, in which case
1033 // at the end we'll set z = a.z * b.z * H = 0, resulting
1034 // in the correct output (point at infinity)
1035 if((r.is_zero() && H.is_zero()).as_bool()) {
1036 return a.dbl();
1037 }
1038
1039 const auto HH = H.square();
1040 const auto HHH = H * HH;
1041 const auto V = U1 * HH;
1042 const auto t2 = r.square();
1043 const auto t3 = V + V;
1044 const auto t4 = t2 - HHH;
1045 auto X3 = t4 - t3;
1046 const auto t5 = V - X3;
1047 const auto t6 = S1 * HHH;
1048 const auto t7 = r * t5;
1049 auto Y3 = t7 - t6;
1050 const auto t8 = b.z() * H;
1051 auto Z3 = a.z() * t8;
1052
1053 // if a is identity then return b
1054 FieldElement::conditional_assign(X3, Y3, Z3, a_is_identity, b.x(), b.y(), b.z());
1055
1056 // if b is identity then return a
1057 FieldElement::conditional_assign(X3, Y3, Z3, b_is_identity, a.x(), a.y(), a.z());
1058
1059 return Self(X3, Y3, Z3);
1060 }
1061
1062 /**
1063 * Iterated point doubling
1064 */
1065 constexpr Self dbl_n(size_t n) const {
1066 /*
1067 Repeated doubling using an adaptation of Algorithm 3.23 in
1068 "Guide To Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone)
1069
1070 Curiously the book gives the algorithm only for A == -3, but
1071 the largest gains come from applying it to the generic A case,
1072 where it saves 2 squarings per iteration.
1073
1074 For A == 0
1075 Pay 1*2 + 1half to save n*(1*4 + 1*8)
1076
1077 For A == -3:
1078 Pay 2S + 1*2 + 1half to save n*(1A + 1*4 + 1*8) + 1M
1079
1080 For generic A:
1081 Pay 2S + 1*2 + 1half to save n*(2S + 1*4 + 1*8)
1082 */
1083
1084 if constexpr(Self::A_is_zero) {
1085 auto nx = x();
1086 auto ny = y().mul2();
1087 auto nz = z();
1088
1089 while(n > 0) {
1090 const auto ny2 = ny.square();
1091 const auto ny4 = ny2.square();
1092 const auto t1 = nx.square().mul3();
1093 const auto t2 = nx * ny2;
1094 nx = t1.square() - t2.mul2();
1095 nz *= ny;
1096 ny = t1 * (t2 - nx).mul2() - ny4;
1097 n--;
1098 }
1099 return Self(nx, ny.div2(), nz);
1100 } else {
1101 auto nx = x();
1102 auto ny = y().mul2();
1103 auto nz = z();
1104 auto w = nz.square().square();
1105
1106 if constexpr(!Self::A_is_minus_3) {
1107 w *= A;
1108 }
1109
1110 while(n > 0) {
1111 const auto ny2 = ny.square();
1112 const auto ny4 = ny2.square();
1113 FieldElement t1;
1114 if constexpr(Self::A_is_minus_3) {
1115 t1 = (nx.square() - w).mul3();
1116 } else {
1117 t1 = nx.square().mul3() + w;
1118 }
1119 const auto t2 = nx * ny2;
1120 nx = t1.square() - t2.mul2();
1121 nz *= ny;
1122 ny = t1 * (t2 - nx).mul2() - ny4;
1123 n--;
1124 if(n > 0) {
1125 w *= ny4;
1126 }
1127 }
1128 return Self(nx, ny.div2(), nz);
1129 }
1130 }
1131
1132 /**
1133 * Point doubling
1134 */
1135 constexpr Self dbl() const {
1136 /*
1137 Using https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#doubling-dbl-1998-cmo-2
1138
1139 Cost (generic A): 4M + 6S + 4A + 2*2 + 1*3 + 1*4 + 1*8
1140 Cost (A == -3): 4M + 4S + 5A + 2*2 + 1*3 + 1*4 + 1*8
1141 Cost (A == 0): 3M + 4S + 3A + 2*2 + 1*3 + 1*4 + 1*8
1142 */
1143
1144 FieldElement m = FieldElement::zero();
1145
1146 if constexpr(Self::A_is_minus_3) {
1147 /*
1148 if a == -3 then
1149 3*x^2 + a*z^4 == 3*x^2 - 3*z^4 == 3*(x^2-z^4) == 3*(x-z^2)*(x+z^2)
1150
1151 Cost: 1M + 1S + 2A + 1*3
1152 */
1153 const auto z2 = z().square();
1154 m = (x() - z2).mul3() * (x() + z2);
1155 } else if constexpr(Self::A_is_zero) {
1156 // If a == 0 then 3*x^2 + a*z^4 == 3*x^2
1157 // Cost: 1S + 1*3
1158 m = x().square().mul3();
1159 } else {
1160 // Cost: 1M + 3S + 1A + 1*3
1161 const auto z2 = z().square();
1162 m = x().square().mul3() + A * z2.square();
1163 }
1164
1165 // Remaining cost: 3M + 3S + 3A + 2*2 + 1*4 + 1*8
1166 const auto y2 = y().square();
1167 const auto s = x().mul4() * y2;
1168 const auto nx = m.square() - s.mul2();
1169 const auto ny = m * (s - nx) - y2.square().mul8();
1170 const auto nz = y().mul2() * z();
1171
1172 return Self(nx, ny, nz);
1173 }
1174
1175 /**
1176 * Point negation
1177 */
1178 constexpr Self negate() const { return Self(x(), y().negate(), z()); }
1179
1180 /**
1181 * Randomize the point representation
1182 *
1183 * Projective coordinates are redundant; if (x,y,z) is a projective
1184 * point then so is (x*r^2,y*r^3,z*r) for any non-zero r.
1185 */
1186 void randomize_rep(RandomNumberGenerator& rng) {
1187 // In certain contexts we may be called with a Null_RNG; in that case the
1188 // caller is accepting that randomization will not occur
1189
1190 if(rng.is_seeded()) {
1191 auto r = FieldElement::random(rng);
1192
1193 auto r2 = r.square();
1194 auto r3 = r2 * r;
1195
1196 m_x *= r2;
1197 m_y *= r3;
1198 m_z *= r;
1199 }
1200 }
1201
1202 /**
1203 * Return the projective x coordinate
1204 */
1205 constexpr const FieldElement& x() const { return m_x; }
1206
1207 /**
1208 * Return the projective y coordinate
1209 */
1210 constexpr const FieldElement& y() const { return m_y; }
1211
1212 /**
1213 * Return the projective z coordinate
1214 */
1215 constexpr const FieldElement& z() const { return m_z; }
1216
1217 constexpr void _const_time_poison() const { CT::poison_all(m_x, m_y, m_z); }
1218
1219 constexpr void _const_time_unpoison() const { CT::unpoison_all(m_x, m_y, m_z); }
1220
1221 private:
1222 FieldElement m_x;
1223 FieldElement m_y;
1224 FieldElement m_z;
1225};
1226
1227/**
1228* Elliptic Curve Parameters
1229*
1230* These are constructed using compile time strings which contain the relevant values
1231* (P, A, B, the group order, and the group generator x/y coordinates)
1232*/
1233template <StringLiteral PS,
1234 StringLiteral AS,
1235 StringLiteral BS,
1236 StringLiteral NS,
1237 StringLiteral GXS,
1238 StringLiteral GYS,
1239 int8_t ZI = 0>
1240class EllipticCurveParameters {
1241 public:
1242 typedef word W;
1243
1244 static constexpr auto PW = hex_to_words<W>(PS.value);
1245 static constexpr auto NW = hex_to_words<W>(NS.value);
1246 static constexpr auto AW = hex_to_words<W>(AS.value);
1247 static constexpr auto BW = hex_to_words<W>(BS.value);
1248 static constexpr auto GXW = hex_to_words<W>(GXS.value);
1249 static constexpr auto GYW = hex_to_words<W>(GYS.value);
1250
1251 static constexpr int8_t Z = ZI;
1252};
1253
1254/**
1255* This exists soley as a hack which somewhat reduces symbol lengths
1256*/
1257template <WordType WI, size_t NI, std::array<WI, NI> PI>
1258struct IntParams {
1259 public:
1260 typedef WI W;
1261 static constexpr size_t N = NI;
1262 static constexpr auto P = PI;
1263};
1264
1265/**
1266* Elliptic Curve
1267*
1268* Takes as input a set of parameters, and instantiates the elliptic curve
1269*/
1270template <typename Params, template <typename FieldParamsT> typename FieldRep = MontgomeryRep>
1271class EllipticCurve {
1272 public:
1273 typedef typename Params::W W;
1274
1275 static constexpr auto PW = Params::PW;
1276 static constexpr auto NW = Params::NW;
1277 static constexpr auto AW = Params::AW;
1278
1279 // Simplifying assumption
1280 static_assert(PW.size() == NW.size());
1281
1282 class ScalarParams final : public IntParams<W, NW.size(), NW> {};
1283
1284 using Scalar = IntMod<MontgomeryRep<ScalarParams>>;
1285
1286 class FieldParams final : public IntParams<W, PW.size(), PW> {};
1287
1288 using FieldElement = IntMod<FieldRep<FieldParams>>;
1289
1290 using AffinePoint = AffineCurvePoint<FieldElement, Params>;
1291 using ProjectivePoint = ProjectiveCurvePoint<FieldElement, Params>;
1292
1293 static constexpr size_t OrderBits = Scalar::BITS;
1294 static constexpr size_t PrimeFieldBits = FieldElement::BITS;
1295
1296 static constexpr FieldElement A = FieldElement::from_words(Params::AW);
1297 static constexpr FieldElement B = FieldElement::from_words(Params::BW);
1298
1299 static_assert(B.is_nonzero().as_bool(), "B must be non-zero");
1300
1301 static constexpr AffinePoint G =
1302 AffinePoint(FieldElement::from_words(Params::GXW), FieldElement::from_words(Params::GYW));
1303
1304 static constexpr FieldElement SSWU_Z = FieldElement::constant(Params::Z);
1305
1306 static constexpr bool ValidForSswuHash =
1307 (Params::Z != 0 && A.is_nonzero().as_bool() && B.is_nonzero().as_bool() && FieldElement::P_MOD_4 == 3);
1308
1309 static constexpr bool OrderIsLessThanField = bigint_cmp(NW.data(), NW.size(), PW.data(), PW.size()) == -1;
1310};
1311
1312/**
1313* Field inversion concept
1314*
1315* This concept checks if the FieldElement supports fe_invert2
1316*/
1317template <typename C>
1318concept curve_supports_fe_invert2 = requires(const typename C::FieldElement& fe) {
1319 { C::fe_invert2(fe) } -> std::same_as<typename C::FieldElement>;
1320};
1321
1322/**
1323* Field inversion
1324*
1325* Uses the specialized fe_invert2 if available, or otherwise the standard
1326* (FLT-based) field inversion.
1327*/
1328template <typename C>
1329inline constexpr auto invert_field_element(const typename C::FieldElement& fe) {
1330 if constexpr(curve_supports_fe_invert2<C>) {
1331 return C::fe_invert2(fe) * fe;
1332 } else {
1333 return fe.invert();
1334 }
1335}
1336
1337/**
1338* Convert a projective point into affine
1339*/
1340template <typename C>
1341auto to_affine(const typename C::ProjectivePoint& pt) {
1342 // Not strictly required right? - default should work as long
1343 // as (0,0) is identity and invert returns 0 on 0
1344 if(pt.is_identity().as_bool()) {
1345 return C::AffinePoint::identity();
1346 }
1347
1348 if constexpr(curve_supports_fe_invert2<C>) {
1349 const auto z2_inv = C::fe_invert2(pt.z());
1350 const auto z3_inv = z2_inv.square() * pt.z();
1351 return typename C::AffinePoint(pt.x() * z2_inv, pt.y() * z3_inv);
1352 } else {
1353 const auto z_inv = invert_field_element<C>(pt.z());
1354 const auto z2_inv = z_inv.square();
1355 const auto z3_inv = z_inv * z2_inv;
1356 return typename C::AffinePoint(pt.x() * z2_inv, pt.y() * z3_inv);
1357 }
1358}
1359
1360/**
1361* Convert a projective point into affine and return x coordinate only
1362*/
1363template <typename C>
1364auto to_affine_x(const typename C::ProjectivePoint& pt) {
1365 if constexpr(curve_supports_fe_invert2<C>) {
1366 return pt.x() * C::fe_invert2(pt.z());
1367 } else {
1368 const auto z_inv = invert_field_element<C>(pt.z());
1369 const auto z2_inv = z_inv.square();
1370 return pt.x() * z2_inv;
1371 }
1372}
1373
1374/**
1375* Batch projective->affine conversion
1376*/
1377template <typename C>
1378auto to_affine_batch(std::span<const typename C::ProjectivePoint> projective) {
1379 typedef typename C::AffinePoint AffinePoint;
1380 typedef typename C::FieldElement FieldElement;
1381
1382 const size_t N = projective.size();
1383 std::vector<AffinePoint> affine(N, AffinePoint::identity());
1384
1385 CT::Choice any_identity = CT::Choice::no();
1386
1387 for(const auto& pt : projective) {
1388 any_identity = any_identity || pt.is_identity();
1389 }
1390
1391 if(N <= 2 || any_identity.as_bool()) {
1392 // If there are identity elements, using the batch inversion gets
1393 // tricky. It can be done, but this should be a rare situation so
1394 // just punt to the serial conversion if it occurs
1395 for(size_t i = 0; i != N; ++i) {
1396 affine[i] = to_affine<C>(projective[i]);
1397 }
1398 } else {
1399 std::vector<FieldElement> c(N);
1400
1401 /*
1402 Batch projective->affine using Montgomery's trick
1403
1404 See Algorithm 2.26 in "Guide to Elliptic Curve Cryptography"
1405 (Hankerson, Menezes, Vanstone)
1406 */
1407
1408 c[0] = projective[0].z();
1409 for(size_t i = 1; i != N; ++i) {
1410 c[i] = c[i - 1] * projective[i].z();
1411 }
1412
1413 auto s_inv = invert_field_element<C>(c[N - 1]);
1414
1415 for(size_t i = N - 1; i > 0; --i) {
1416 const auto& p = projective[i];
1417
1418 const auto z_inv = s_inv * c[i - 1];
1419 const auto z2_inv = z_inv.square();
1420 const auto z3_inv = z_inv * z2_inv;
1421
1422 s_inv = s_inv * p.z();
1423
1424 affine[i] = AffinePoint(p.x() * z2_inv, p.y() * z3_inv);
1425 }
1426
1427 const auto z2_inv = s_inv.square();
1428 const auto z3_inv = s_inv * z2_inv;
1429 affine[0] = AffinePoint(projective[0].x() * z2_inv, projective[0].y() * z3_inv);
1430 }
1431
1432 return affine;
1433}
1434
1435/**
1436* Blinded Scalar
1437*
1438* This randomizes the scalar representation by computing s + n*k,
1439* where n is the group order and k is a random value
1440*
1441* Note that the field arithmetic and point multiplication algorithms
1442* implemented in this file are already constant time; blinding is used here as
1443* an additional precaution to guard against compilers introducing conditional
1444* jumps where not expected.
1445*
1446* If you would like a "go faster" button, change the BlindingEnabled variable
1447* below to false.
1448*/
1449template <typename C, size_t WindowBits>
1450class BlindedScalarBits final {
1451 private:
1452 typedef typename C::W W;
1453
1454 static constexpr bool BlindingEnabled = true;
1455
1456 // Decide size of scalar blinding factor based on bitlength of the scalar
1457 //
1458 // This can return any value between 0 and the scalar bit length, as long
1459 // as it is a multiple of the word size.
1460 static constexpr size_t blinding_bits(size_t sb) {
1461 constexpr size_t wb = WordInfo<W>::bits;
1462
1463 static_assert(wb == 32 || wb == 64, "Unexpected W size");
1464
1465 if(sb == 521) {
1466 /*
1467 Treat P-521 as if it was a 512 bit field; otherwise it is penalized
1468 by the below computation, using either 160 or 192 bits of blinding
1469 (depending on wb), vs 128 bits used for 512 bit groups.
1470 */
1471 return blinding_bits(512);
1472 } else {
1473 // For blinding use 1/4 the order, rounded up to the next word
1474 return ((sb / 4 + wb - 1) / wb) * wb;
1475 }
1476 }
1477
1478 static constexpr size_t BlindingBits = blinding_bits(C::OrderBits);
1479
1480 static_assert(BlindingBits % WordInfo<W>::bits == 0);
1481 static_assert(BlindingBits < C::Scalar::BITS);
1482
1483 public:
1484 static constexpr size_t Bits = C::Scalar::BITS + (BlindingEnabled ? BlindingBits : 0);
1485 static constexpr size_t Bytes = (Bits + 7) / 8;
1486
1487 BlindedScalarBits(const typename C::Scalar& scalar, RandomNumberGenerator& rng) {
1488 if constexpr(BlindingEnabled) {
1489 constexpr size_t mask_words = BlindingBits / WordInfo<W>::bits;
1490 constexpr size_t mask_bytes = mask_words * WordInfo<W>::bytes;
1491
1492 constexpr size_t n_words = C::NW.size();
1493
1494 uint8_t maskb[mask_bytes] = {0};
1495 if(rng.is_seeded()) {
1496 rng.randomize(maskb, mask_bytes);
1497 } else {
1498 // If we don't have an RNG we don't have many good options. We
1499 // could just omit the blinding entirely, but this changes the
1500 // size of the blinded scalar, which we're expecting otherwise is
1501 // knowable at compile time. So generate a mask by XORing the
1502 // bytes of the scalar together. At worst, it's equivalent to
1503 // omitting the blinding entirely.
1504
1505 std::array<uint8_t, C::Scalar::BYTES> sbytes;
1506 scalar.serialize_to(sbytes);
1507 for(size_t i = 0; i != sbytes.size(); ++i) {
1508 maskb[i % mask_bytes] ^= sbytes[i];
1509 }
1510 }
1511
1512 W mask[n_words] = {0};
1513 load_le(mask, maskb, mask_words);
1514 mask[mask_words - 1] |= WordInfo<W>::top_bit;
1515 mask[0] |= 1;
1516
1517 W mask_n[2 * n_words] = {0};
1518
1519 const auto sw = scalar.to_words();
1520
1521 // Compute masked scalar s + k*n
1522 comba_mul<n_words>(mask_n, mask, C::NW.data());
1523 bigint_add2_nc(mask_n, 2 * n_words, sw.data(), sw.size());
1524
1525 std::reverse(mask_n, mask_n + 2 * n_words);
1526 m_bytes = store_be<std::vector<uint8_t>>(mask_n);
1527 } else {
1528 static_assert(Bytes == C::Scalar::BYTES);
1529 m_bytes.resize(Bytes);
1530 scalar.serialize_to(std::span{m_bytes}.template first<Bytes>());
1531 }
1532
1533 CT::poison(m_bytes.data(), m_bytes.size());
1534 }
1535
1536 size_t get_window(size_t offset) const {
1537 // Extract a WindowBits sized window out of s, depending on offset.
1538 return read_window_bits<WindowBits>(std::span{m_bytes}, offset);
1539 }
1540
1541 ~BlindedScalarBits() {
1542 secure_scrub_memory(m_bytes.data(), m_bytes.size());
1543 CT::unpoison(m_bytes.data(), m_bytes.size());
1544 }
1545
1546 private:
1547 // TODO this could be a fixed size array
1548 std::vector<uint8_t> m_bytes;
1549};
1550
1551template <typename C, size_t WindowBits>
1552class UnblindedScalarBits final {
1553 public:
1554 static constexpr size_t Bits = C::Scalar::BITS;
1555
1556 UnblindedScalarBits(const typename C::Scalar& scalar) { scalar.serialize_to(std::span{m_bytes}); }
1557
1558 size_t get_window(size_t offset) const {
1559 // Extract a WindowBits sized window out of s, depending on offset.
1560 return read_window_bits<WindowBits>(std::span{m_bytes}, offset);
1561 }
1562
1563 private:
1564 std::array<uint8_t, C::Scalar::BYTES> m_bytes;
1565};
1566
1567/**
1568* Base point precomputation table
1569*
1570* This algorithm works by precomputing a set of points such that
1571* the online phase of the point multiplication can be effected by
1572* a sequence of point additions.
1573*
1574* The tables, even for W = 1, are large and costly to precompute, so
1575* this is only used for the base point.
1576*
1577* The online phase of the algorithm uess `ceil(SB/W)` additions,
1578* and no point doublings. The table is of size
1579* `ceil(SB + W - 1)/W * ((1 << W) - 1)`
1580* where SB is the bit length of the (blinded) scalar.
1581*
1582* Each window of the scalar is associated with a window in the table.
1583* The table windows are unique to that offset within the scalar.
1584*
1585* The simplest version to understand is when W = 1. There the table
1586* consists of [P, 2*P, 4*P, ..., 2^N*P] where N is the bit length of
1587* the group order. The online phase consists of conditionally adding
1588* table[i] depending on if bit i of the scalar is set or not.
1589*
1590* When W = 2, the scalar is examined 2 bits at a time, and the table
1591* for a window index `I` is [(2^I)*P, (2^(I+1))*P, (2^I+2^(I+1))*P].
1592*
1593* This extends similarly for larger W
1594*
1595* At a certain point, the side channel silent table lookup becomes the
1596* dominating cost
1597*
1598* For all W, each window in the table has an implicit element of
1599* the identity element which is used if the scalar bits were all zero.
1600* This is omitted to save space; AffinePoint::ct_select is designed
1601* to assist in this by returning the identity element if its index
1602* argument is zero, or otherwise it returns table[idx - 1]
1603*/
1604template <typename C, size_t W>
1605class PrecomputedBaseMulTable final {
1606 public:
1607 typedef typename C::Scalar Scalar;
1608 typedef typename C::AffinePoint AffinePoint;
1609 typedef typename C::ProjectivePoint ProjectivePoint;
1610
1611 static constexpr size_t WindowBits = W;
1612 static_assert(WindowBits >= 1 && WindowBits <= 8);
1613
1614 using BlindedScalar = BlindedScalarBits<C, WindowBits>;
1615
1616 static constexpr size_t Windows = (BlindedScalar::Bits + WindowBits - 1) / WindowBits;
1617
1618 static_assert(Windows > 1);
1619
1620 // 2^W elements, less the identity element
1621 static constexpr size_t WindowElements = (1 << WindowBits) - 1;
1622
1623 static constexpr size_t TableSize = Windows * WindowElements;
1624
1625 PrecomputedBaseMulTable(const AffinePoint& p) : m_table{} {
1626 std::vector<ProjectivePoint> table;
1627 table.reserve(TableSize);
1628
1629 auto accum = ProjectivePoint::from_affine(p);
1630
1631 for(size_t i = 0; i != TableSize; i += WindowElements) {
1632 table.push_back(accum);
1633
1634 for(size_t j = 1; j != WindowElements; ++j) {
1635 if(j % 2 == 1) {
1636 table.emplace_back(table[i + j / 2].dbl());
1637 } else {
1638 table.emplace_back(table[i + j - 1] + table[i]);
1639 }
1640 }
1641
1642 accum = table[i + (WindowElements / 2)].dbl();
1643 }
1644
1645 m_table = to_affine_batch<C>(table);
1646 }
1647
1648 ProjectivePoint mul(const Scalar& s, RandomNumberGenerator& rng) const {
1649 const BlindedScalar bits(s, rng);
1650
1651 // TODO: C++23 - use std::mdspan to access m_table
1652 auto table = std::span{m_table};
1653
1654 auto accum = [&]() {
1655 const size_t w_0 = bits.get_window(0);
1656 const auto tbl_0 = table.first(WindowElements);
1657 auto pt = ProjectivePoint::from_affine(AffinePoint::ct_select(tbl_0, w_0));
1658 CT::poison(pt);
1659 pt.randomize_rep(rng);
1660 return pt;
1661 }();
1662
1663 for(size_t i = 1; i != Windows; ++i) {
1664 const size_t w_i = bits.get_window(WindowBits * i);
1665 const auto tbl_i = table.subspan(WindowElements * i, WindowElements);
1666
1667 /*
1668 None of these additions can be doublings, because in each iteration, the
1669 discrete logarithms of the points we're selecting out of the table are
1670 larger than the largest possible dlog of accum.
1671 */
1672 accum += AffinePoint::ct_select(tbl_i, w_i);
1673
1674 if(i <= 3) {
1675 accum.randomize_rep(rng);
1676 }
1677 }
1678
1679 CT::unpoison(accum);
1680 return accum;
1681 }
1682
1683 private:
1684 std::vector<AffinePoint> m_table;
1685};
1686
1687/**
1688* Precomputed point multiplication table
1689*
1690* This is a standard fixed window multiplication using W-bit wide window.
1691*/
1692template <typename C, size_t W>
1693class WindowedMulTable final {
1694 public:
1695 typedef typename C::Scalar Scalar;
1696 typedef typename C::AffinePoint AffinePoint;
1697 typedef typename C::ProjectivePoint ProjectivePoint;
1698
1699 static constexpr size_t WindowBits = W;
1700 static_assert(WindowBits >= 1 && WindowBits <= 8);
1701
1702 using BlindedScalar = BlindedScalarBits<C, WindowBits>;
1703
1704 static constexpr size_t Windows = (BlindedScalar::Bits + WindowBits - 1) / WindowBits;
1705
1706 static_assert(Windows > 1);
1707
1708 // 2^W elements, less the identity element
1709 static constexpr size_t TableSize = (1 << WindowBits) - 1;
1710
1711 WindowedMulTable(const AffinePoint& p) : m_table{} {
1712 std::vector<ProjectivePoint> table;
1713 table.reserve(TableSize);
1714
1715 table.push_back(ProjectivePoint::from_affine(p));
1716 for(size_t i = 1; i != TableSize; ++i) {
1717 if(i % 2 == 1) {
1718 table.push_back(table[i / 2].dbl());
1719 } else {
1720 table.push_back(table[i - 1] + p);
1721 }
1722 }
1723
1724 m_table = to_affine_batch<C>(table);
1725 }
1726
1727 ProjectivePoint mul(const Scalar& s, RandomNumberGenerator& rng) const {
1728 const BlindedScalar bits(s, rng);
1729
1730 auto accum = [&]() {
1731 const size_t w_0 = bits.get_window((Windows - 1) * WindowBits);
1732 // Guaranteed because we set the high bit of the randomizer
1733 BOTAN_DEBUG_ASSERT(w_0 != 0);
1734 auto pt = ProjectivePoint::from_affine(AffinePoint::ct_select(m_table, w_0));
1735 CT::poison(pt);
1736 pt.randomize_rep(rng);
1737 return pt;
1738 }();
1739
1740 for(size_t i = 1; i != Windows; ++i) {
1741 accum = accum.dbl_n(WindowBits);
1742 const size_t w_i = bits.get_window((Windows - i - 1) * WindowBits);
1743
1744 /*
1745 This point addition cannot be a doubling (except once)
1746
1747 Consider the sequence of points that are operated on, and specifically
1748 their discrete logarithms. We start out at the point at infinity
1749 (dlog 0) and then add the initial window which is precisely P*w_0
1750
1751 We then perform WindowBits doublings, so accum's dlog at the point
1752 of the addition in the first iteration of the loop (when i == 1) is
1753 at least 2^W * w_0.
1754
1755 Since we know w_0 > 0, then in every iteration of the loop, accums
1756 dlog will always be greater than the dlog of the table element we
1757 just looked up (something between 0 and 2^W-1), and thus the
1758 addition into accum cannot be a doubling.
1759
1760 However due to blinding this argument fails, since we perform
1761 multiplications using a scalar that is larger than the group
1762 order. In this case it's possible that the dlog of accum becomes
1763 `order + x` (or, effectively, `x`) and `x` is smaller than 2^W.
1764 In this case, a doubling may occur. Future iterations of the loop
1765 cannot be doublings by the same argument above. Since the blinding
1766 factor is always less than the group order (substantially so),
1767 it is not possible for the dlog of accum to overflow a second time.
1768 */
1769 accum += AffinePoint::ct_select(m_table, w_i);
1770
1771 if(i <= 3) {
1772 accum.randomize_rep(rng);
1773 }
1774 }
1775
1776 CT::unpoison(accum);
1777 return accum;
1778 }
1779
1780 private:
1781 std::vector<AffinePoint> m_table;
1782};
1783
1784/**
1785* Effect 2-ary multiplication ie x*G + y*H
1786*
1787* This is done using a windowed variant of what is usually called
1788* Shamir's trick.
1789*
1790* The W = 1 case is simple; we precompute an extra point GH = G + H,
1791* and then examine 1 bit in each of x and y. If one or the other bits
1792* are set then add G or H resp. If both bits are set, add GH.
1793*
1794* The example below is a precomputed table for W=2. The flattened table
1795* begins at (x_i,y_i) = (1,0), i.e. the identity element is omitted.
1796* The indices in each cell refer to the cell's location in m_table.
1797*
1798* x-> 0 1 2 3
1799* 0 |/ (ident) |0 x |1 2x |2 3x |
1800* 1 |3 y |4 x+y |5 2x+y |6 3x+y |
1801* y = 2 |7 2y |8 x+2y |9 2(x+y) |10 3x+2y |
1802* 3 |11 3y |12 x+3y |13 2x+3y |14 3x+3y |
1803*/
1804template <typename C, size_t W>
1805class WindowedMul2Table final {
1806 public:
1807 // We look at W bits of each scalar per iteration
1808 static_assert(W >= 1 && W <= 4);
1809
1810 typedef typename C::Scalar Scalar;
1811 typedef typename C::AffinePoint AffinePoint;
1812 typedef typename C::ProjectivePoint ProjectivePoint;
1813
1814 static constexpr size_t WindowBits = W;
1815
1816 static constexpr size_t WindowSize = (1 << WindowBits);
1817
1818 // 2^(2*W) elements, less the identity element
1819 static constexpr size_t TableSize = (1 << (2 * WindowBits)) - 1;
1820
1821 WindowedMul2Table(const AffinePoint& x, const AffinePoint& y) {
1822 std::vector<ProjectivePoint> table;
1823 table.reserve(TableSize);
1824
1825 for(size_t i = 0; i != TableSize; ++i) {
1826 const size_t t_i = (i + 1);
1827 const size_t x_i = t_i % WindowSize;
1828 const size_t y_i = (t_i >> WindowBits) % WindowSize;
1829
1830 // Returns x_i * x + y_i * y
1831 auto next_tbl_e = [&]() {
1832 if(x_i % 2 == 0 && y_i % 2 == 0) {
1833 // Where possible using doubling (eg indices 1, 7, 9 in
1834 // the table above)
1835 return table[(t_i / 2) - 1].dbl();
1836 } else if(x_i > 0 && y_i > 0) {
1837 // A combination of x and y
1838 if(x_i == 1) {
1839 return x + table[(y_i << WindowBits) - 1];
1840 } else if(y_i == 1) {
1841 return table[x_i - 1] + y;
1842 } else {
1843 return table[x_i - 1] + table[(y_i << WindowBits) - 1];
1844 }
1845 } else if(x_i > 0 && y_i == 0) {
1846 // A multiple of x without a y component
1847 if(x_i == 1) {
1848 // Just x
1849 return ProjectivePoint::from_affine(x);
1850 } else {
1851 // x * x_{i-1}
1852 return x + table[x_i - 1 - 1];
1853 }
1854 } else if(x_i == 0 && y_i > 0) {
1855 if(y_i == 1) {
1856 // Just y
1857 return ProjectivePoint::from_affine(y);
1858 } else {
1859 // y * y_{i-1}
1860 return y + table[((y_i - 1) << WindowBits) - 1];
1861 }
1862 } else {
1864 }
1865 };
1866
1867 table.emplace_back(next_tbl_e());
1868 }
1869
1870 m_table = to_affine_batch<C>(table);
1871 }
1872
1873 /**
1874 * Constant time 2-ary multiplication
1875 */
1876 ProjectivePoint mul2(const Scalar& s1, const Scalar& s2, RandomNumberGenerator& rng) const {
1877 using BlindedScalar = BlindedScalarBits<C, WindowBits>;
1878
1879 BlindedScalar bits1(s1, rng);
1880 BlindedScalar bits2(s2, rng);
1881
1882 constexpr size_t Windows = (BlindedScalar::Bits + WindowBits - 1) / WindowBits;
1883
1884 auto accum = ProjectivePoint::identity();
1885
1886 for(size_t i = 0; i != Windows; ++i) {
1887 if(i > 0) {
1888 accum = accum.dbl_n(WindowBits);
1889 }
1890
1891 const size_t w_1 = bits1.get_window((Windows - i - 1) * WindowBits);
1892 const size_t w_2 = bits2.get_window((Windows - i - 1) * WindowBits);
1893 const size_t window = w_1 + (w_2 << WindowBits);
1894 accum += AffinePoint::ct_select(m_table, window);
1895
1896 if(i <= 3) {
1897 accum.randomize_rep(rng);
1898 }
1899 }
1900
1901 return accum;
1902 }
1903
1904 /**
1905 * Variable time 2-ary multiplication
1906 *
1907 * A common use of 2-ary multiplication is when verifying the commitments
1908 * of an elliptic curve signature. Since in this case the inputs are all
1909 * public, there is no problem with variable time computation.
1910 *
1911 * TODO in the future we could use joint sparse form here.
1912 */
1913 ProjectivePoint mul2_vartime(const Scalar& s1, const Scalar& s2) const {
1914 constexpr size_t Windows = (Scalar::BITS + WindowBits - 1) / WindowBits;
1915
1916 const UnblindedScalarBits<C, W> bits1(s1);
1917 const UnblindedScalarBits<C, W> bits2(s2);
1918
1919 auto accum = ProjectivePoint::identity();
1920
1921 for(size_t i = 0; i != Windows; ++i) {
1922 if(i > 0) {
1923 accum = accum.dbl_n(WindowBits);
1924 }
1925
1926 const size_t w_1 = bits1.get_window((Windows - i - 1) * WindowBits);
1927 const size_t w_2 = bits2.get_window((Windows - i - 1) * WindowBits);
1928
1929 const size_t window = w_1 + (w_2 << WindowBits);
1930
1931 if(window > 0) {
1932 accum += m_table[window - 1];
1933 }
1934 }
1935
1936 return accum;
1937 }
1938
1939 private:
1940 std::vector<AffinePoint> m_table;
1941};
1942
1943/**
1944* SSWU constant C2 - (B / (Z * A))
1945*
1946* See RFC 9380 section 6.6.2
1947*/
1948template <typename C>
1949const auto& SSWU_C2()
1950 requires C::ValidForSswuHash
1951{
1952 // TODO(Botan4) Make this a constexpr once compilers have caught up
1953 static const typename C::FieldElement C2 = C::B * invert_field_element<C>(C::SSWU_Z * C::A);
1954 return C2;
1955}
1956
1957/**
1958* SSWU constant C1 - (-B / A)
1959*
1960* See RFC 9380 section 6.6.2
1961*/
1962template <typename C>
1963const auto& SSWU_C1()
1964 requires C::ValidForSswuHash
1965{
1966 // TODO(Botan4) Make this a constexpr
1967 // We derive it from C2 to avoid a second inversion
1968 static const typename C::FieldElement C1 = (SSWU_C2<C>() * C::SSWU_Z).negate();
1969 return C1;
1970}
1971
1972/**
1973* Map to curve (SSWU)
1974*
1975* See RFC 9380 ("Hashing to Elliptic Curves") section 6.6.2
1976*/
1977template <typename C>
1978inline auto map_to_curve_sswu(const typename C::FieldElement& u) -> typename C::AffinePoint {
1979 CT::poison(u);
1980 const auto z_u2 = C::SSWU_Z * u.square(); // z * u^2
1981 const auto z2_u4 = z_u2.square();
1982 const auto tv1 = invert_field_element<C>(z2_u4 + z_u2);
1983 auto x1 = SSWU_C1<C>() * (C::FieldElement::one() + tv1);
1984 C::FieldElement::conditional_assign(x1, tv1.is_zero(), SSWU_C2<C>());
1985 const auto gx1 = C::AffinePoint::x3_ax_b(x1);
1986
1987 const auto x2 = z_u2 * x1;
1988 const auto gx2 = C::AffinePoint::x3_ax_b(x2);
1989
1990 // Will be zero if gx1 is not a square
1991 const auto [gx1_sqrt, gx1_is_square] = gx1.sqrt();
1992
1993 auto x = x2;
1994 // By design one of gx1 and gx2 must be a quadratic residue
1995 auto y = gx2.sqrt().first;
1996
1997 C::FieldElement::conditional_assign(x, y, gx1_is_square, x1, gx1_sqrt);
1998
1999 const auto flip_y = y.is_even() != u.is_even();
2000 C::FieldElement::conditional_assign(y, flip_y, y.negate());
2001
2002 auto pt = typename C::AffinePoint(x, y);
2003
2004 CT::unpoison(pt);
2005 return pt;
2006}
2007
2008/**
2009* Hash to curve (SSWU); RFC 9380
2010*
2011* Hashes the input using XMD and the specified hash function, producing either one or
2012* two field elements `u`/(`u0`,`u1`) resp. These are then mapped to curve point(s)
2013* using SSWU, and if a pair of points were generated these are combined using point
2014* addition.
2015*/
2016template <typename C, bool RO>
2017 requires C::ValidForSswuHash
2018inline auto hash_to_curve_sswu(std::string_view hash, std::span<const uint8_t> pw, std::span<const uint8_t> dst)
2019 -> std::conditional_t<RO, typename C::ProjectivePoint, typename C::AffinePoint> {
2020#if defined(BOTAN_HAS_XMD)
2021 constexpr size_t SecurityLevel = (C::OrderBits + 1) / 2;
2022 constexpr size_t L = (C::PrimeFieldBits + SecurityLevel + 7) / 8;
2023 constexpr size_t Cnt = RO ? 2 : 1;
2024
2025 std::array<uint8_t, L * Cnt> xmd;
2026
2027 expand_message_xmd(hash, xmd, pw, dst);
2028
2029 if constexpr(RO) {
2030 const auto u0 = C::FieldElement::from_wide_bytes(std::span<const uint8_t, L>(xmd.data(), L));
2031 const auto u1 = C::FieldElement::from_wide_bytes(std::span<const uint8_t, L>(xmd.data() + L, L));
2032
2033 auto accum = C::ProjectivePoint::from_affine(map_to_curve_sswu<C>(u0));
2034 accum += map_to_curve_sswu<C>(u1);
2035 return accum;
2036 } else {
2037 const auto u = C::FieldElement::from_wide_bytes(std::span<const uint8_t, L>(xmd.data(), L));
2038 return map_to_curve_sswu<C>(u);
2039 }
2040#else
2041 BOTAN_UNUSED(hash, pw, dst);
2042 throw Not_Implemented("Hash to curve not available due to missing XMD");
2043#endif
2044}
2045
2046} // namespace
2047
2048} // namespace Botan
2049
2050#endif
#define BOTAN_UNUSED
Definition assert.h:118
#define BOTAN_DEBUG_ASSERT(expr)
Definition assert.h:98
#define BOTAN_STATE_CHECK(expr)
Definition assert.h:41
#define BOTAN_ASSERT_UNREACHABLE()
Definition assert.h:137
static constexpr Choice from_int(T v)
Definition ct_utils.h:311
static constexpr Choice no()
Definition ct_utils.h:327
static constexpr Mask< T > from_choice(Choice c)
Definition ct_utils.h:413
static constexpr Mask< T > is_equal(T x, T y)
Definition ct_utils.h:453
static FE_25519 invert(const FE_25519 &a)
int(* final)(unsigned char *, CTX *)
constexpr void pack(const Polynomial< PolyTrait, D > &p, BufferStuffer &stuffer, MapFnT map)
constexpr void unpoison_all(Ts &&... ts)
Definition ct_utils.h:201
constexpr void poison_all(Ts &&... ts)
Definition ct_utils.h:195
constexpr CT::Mask< T > is_equal(const T x[], const T y[], size_t len)
Definition ct_utils.h:788
constexpr void unpoison(const T *p, size_t n)
Definition ct_utils.h:64
constexpr CT::Mask< T > all_zeros(const T elem[], size_t len)
Definition ct_utils.h:775
constexpr void poison(const T *p, size_t n)
Definition ct_utils.h:53
constexpr auto bigint_add(std::span< W, N > z, std::span< const W, N > x, std::span< const W, N > y) -> W
Definition mp_core.h:238
constexpr W shift_left(std::array< W, N > &x)
Definition mp_core.h:817
constexpr void comba_sqr(W z[2 *N], const W x[N])
Definition mp_core.h:940
BigInt operator*(const BigInt &x, const BigInt &y)
Definition big_ops3.cpp:46
constexpr void comba_mul(W z[2 *N], const W x[N], const W y[N])
Definition mp_core.h:904
BigInt square(const BigInt &x)
Definition numthry.cpp:157
OctetString operator+(const OctetString &k1, const OctetString &k2)
Definition symkey.cpp:99
constexpr T choose(T mask, T a, T b)
Definition bit_ops.h:204
constexpr auto bigint_sub3(W z[], const W x[], size_t x_size, const W y[], size_t y_size) -> W
Definition mp_core.h:322
constexpr auto monty_inverse(W a) -> W
Definition mp_core.h:788
FE_25519 fe
Definition ed25519_fe.h:140
BigInt operator-(const BigInt &x, const BigInt &y)
Definition bigint.h:1094
bool operator!=(const AlgorithmIdentifier &a1, const AlgorithmIdentifier &a2)
Definition alg_id.cpp:69
void secure_scrub_memory(void *ptr, size_t n)
Definition mem_utils.cpp:19
constexpr int32_t bigint_cmp(const W x[], size_t x_size, const W y[], size_t y_size)
Definition mp_core.h:573
void expand_message_xmd(std::string_view hash_fn, std::span< uint8_t > output, std::span< const uint8_t > input, std::span< const uint8_t > domain_sep)
Definition xmd.cpp:16
bool operator==(const AlgorithmIdentifier &a1, const AlgorithmIdentifier &a2)
Definition alg_id.cpp:54
constexpr void bigint_monty_maybe_sub(size_t N, W z[], W x0, const W x[], const W p[])
Definition mp_core.h:355
constexpr W bigint_cnd_add(W cnd, W x[], size_t x_size, const W y[], size_t y_size)
Definition mp_core.h:43
void carry(int64_t &h0, int64_t &h1)
std::vector< T, Alloc > & operator+=(std::vector< T, Alloc > &out, const std::vector< T, Alloc2 > &in)
Definition secmem.h:80
constexpr auto load_le(ParamTs &&... params)
Definition loadstor.h:521
constexpr auto bigint_ct_is_lt(const W x[], size_t x_size, const W y[], size_t y_size, bool lt_or_equal=false) -> CT::Mask< W >
Definition mp_core.h:620
const SIMD_8x32 & b
constexpr auto hex_to_words(const char(&s)[N])
Definition mp_core.h:846
constexpr auto bigint_add2_nc(W x[], size_t x_size, const W y[], size_t y_size) -> W
Definition mp_core.h:187
constexpr void copy_mem(T *out, const T *in, size_t n)
Definition mem_ops.h:147
constexpr auto store_be(ParamTs &&... params)
Definition loadstor.h:773
constexpr W shift_right(std::array< W, N > &x)
Definition mp_core.h:831
constexpr auto operator*=(Strong< T1, Tags... > &a, T2 b)