35#include <botan/curve25519.h>
36#include <botan/internal/mul128.h>
37#include <botan/internal/ct_utils.h>
38#include <botan/internal/donna128.h>
39#include <botan/internal/loadstor.h>
45#if !defined(BOTAN_TARGET_HAS_NATIVE_UINT128)
46typedef donna128 uint128_t;
50inline void fsum(uint64_t out[5],
const uint64_t in[5])
65inline void fdifference_backwards(uint64_t out[5],
const uint64_t in[5])
68 const uint64_t two54m152 = (
static_cast<uint64_t
>(1) << 54) - 152;
69 const uint64_t two54m8 = (
static_cast<uint64_t
>(1) << 54) - 8;
71 out[0] = in[0] + two54m152 - out[0];
72 out[1] = in[1] + two54m8 - out[1];
73 out[2] = in[2] + two54m8 - out[2];
74 out[3] = in[3] + two54m8 - out[3];
75 out[4] = in[4] + two54m8 - out[4];
78inline void fadd_sub(uint64_t x[5],
85 fdifference_backwards(x, tmp);
89inline void fscalar_product(uint64_t out[5],
const uint64_t in[5],
const uint64_t scalar)
91 uint128_t a = uint128_t(in[0]) * scalar;
92 out[0] = a & 0x7ffffffffffff;
95 out[1] = a & 0x7ffffffffffff;
98 out[2] = a & 0x7ffffffffffff;
100 a = uint128_t(in[3]) * scalar +
carry_shift(a, 51);
101 out[3] = a & 0x7ffffffffffff;
103 a = uint128_t(in[4]) * scalar +
carry_shift(a, 51);
104 out[4] = a & 0x7ffffffffffff;
117inline void fmul(uint64_t out[5],
const uint64_t in[5],
const uint64_t in2[5])
119 const uint128_t s0 = in2[0];
120 const uint128_t s1 = in2[1];
121 const uint128_t s2 = in2[2];
122 const uint128_t s3 = in2[3];
123 const uint128_t s4 = in2[4];
131 uint128_t t0 = r0 * s0;
132 uint128_t t1 = r0 * s1 + r1 * s0;
133 uint128_t t2 = r0 * s2 + r2 * s0 + r1 * s1;
134 uint128_t t3 = r0 * s3 + r3 * s0 + r1 * s2 + r2 * s1;
135 uint128_t t4 = r0 * s4 + r4 * s0 + r3 * s1 + r1 * s3 + r2 * s2;
142 t0 += r4 * s1 + r1 * s4 + r2 * s3 + r3 * s2;
143 t1 += r4 * s2 + r2 * s4 + r3 * s3;
144 t2 += r4 * s3 + r3 * s4;
147 r0 = t0 & 0x7ffffffffffff; t1 +=
carry_shift(t0, 51);
148 r1 = t1 & 0x7ffffffffffff; t2 +=
carry_shift(t1, 51);
149 r2 = t2 & 0x7ffffffffffff; t3 +=
carry_shift(t2, 51);
150 r3 = t3 & 0x7ffffffffffff; t4 +=
carry_shift(t3, 51);
151 r4 = t4 & 0x7ffffffffffff; uint64_t c =
carry_shift(t4, 51);
153 r0 += c * 19; c = r0 >> 51; r0 = r0 & 0x7ffffffffffff;
154 r1 += c; c = r1 >> 51; r1 = r1 & 0x7ffffffffffff;
164inline void fsquare(uint64_t out[5],
const uint64_t in[5],
size_t count = 1)
172 for(
size_t i = 0; i != count; ++i)
174 const uint64_t d0 = r0 * 2;
175 const uint64_t d1 = r1 * 2;
176 const uint64_t d2 = r2 * 2 * 19;
177 const uint64_t d419 = r4 * 19;
178 const uint64_t d4 = d419 * 2;
180 uint128_t t0 = uint128_t(r0) * r0 + uint128_t(d4) * r1 + uint128_t(d2) * (r3 );
181 uint128_t t1 = uint128_t(d0) * r1 + uint128_t(d4) * r2 + uint128_t(r3) * (r3 * 19);
182 uint128_t t2 = uint128_t(d0) * r2 + uint128_t(r1) * r1 + uint128_t(d4) * (r3 );
183 uint128_t t3 = uint128_t(d0) * r3 + uint128_t(d1) * r2 + uint128_t(r4) * (d419 );
184 uint128_t t4 = uint128_t(d0) * r4 + uint128_t(d1) * r3 + uint128_t(r2) * (r2 );
186 r0 = t0 & 0x7ffffffffffff; t1 +=
carry_shift(t0, 51);
187 r1 = t1 & 0x7ffffffffffff; t2 +=
carry_shift(t1, 51);
188 r2 = t2 & 0x7ffffffffffff; t3 +=
carry_shift(t2, 51);
189 r3 = t3 & 0x7ffffffffffff; t4 +=
carry_shift(t3, 51);
190 r4 = t4 & 0x7ffffffffffff; uint64_t c =
carry_shift(t4, 51);
192 r0 += c * 19; c = r0 >> 51; r0 = r0 & 0x7ffffffffffff;
193 r1 += c; c = r1 >> 51; r1 = r1 & 0x7ffffffffffff;
205inline void fexpand(uint64_t *out,
const uint8_t *in)
217inline void fcontract(uint8_t *out,
const uint64_t input[5])
219 uint128_t t0 = input[0];
220 uint128_t t1 = input[1];
221 uint128_t t2 = input[2];
222 uint128_t t3 = input[3];
223 uint128_t t4 = input[4];
225 for(
size_t i = 0; i != 2; ++i)
227 t1 += t0 >> 51; t0 &= 0x7ffffffffffff;
228 t2 += t1 >> 51; t1 &= 0x7ffffffffffff;
229 t3 += t2 >> 51; t2 &= 0x7ffffffffffff;
230 t4 += t3 >> 51; t3 &= 0x7ffffffffffff;
231 t0 += (t4 >> 51) * 19; t4 &= 0x7ffffffffffff;
239 t1 += t0 >> 51; t0 &= 0x7ffffffffffff;
240 t2 += t1 >> 51; t1 &= 0x7ffffffffffff;
241 t3 += t2 >> 51; t2 &= 0x7ffffffffffff;
242 t4 += t3 >> 51; t3 &= 0x7ffffffffffff;
243 t0 += (t4 >> 51) * 19; t4 &= 0x7ffffffffffff;
247 t0 += 0x8000000000000 - 19;
248 t1 += 0x8000000000000 - 1;
249 t2 += 0x8000000000000 - 1;
250 t3 += 0x8000000000000 - 1;
251 t4 += 0x8000000000000 - 1;
255 t1 += t0 >> 51; t0 &= 0x7ffffffffffff;
256 t2 += t1 >> 51; t1 &= 0x7ffffffffffff;
257 t3 += t2 >> 51; t2 &= 0x7ffffffffffff;
258 t4 += t3 >> 51; t3 &= 0x7ffffffffffff;
259 t4 &= 0x7ffffffffffff;
277void fmonty(uint64_t result_two_q_x[5],
278 uint64_t result_two_q_z[5],
279 uint64_t result_q_plus_q_dash_x[5],
280 uint64_t result_q_plus_q_dash_z[5],
283 uint64_t in_q_dash_x[5],
284 uint64_t in_q_dash_z[5],
285 const uint64_t q_minus_q_dash[5])
292 uint64_t zzzprime[5];
294 fadd_sub(in_q_z, in_q_x);
295 fadd_sub(in_q_dash_z, in_q_dash_x);
297 fmul(xxprime, in_q_dash_x, in_q_z);
298 fmul(zzprime, in_q_dash_z, in_q_x);
300 fadd_sub(zzprime, xxprime);
302 fsquare(result_q_plus_q_dash_x, xxprime);
303 fsquare(zzzprime, zzprime);
304 fmul(result_q_plus_q_dash_z, zzzprime, q_minus_q_dash);
308 fmul(result_two_q_x, xx, zz);
310 fdifference_backwards(zz, xx);
311 fscalar_product(zzz, zz, 121665);
314 fmul(result_two_q_z, zz, zzz);
324inline void swap_conditional(uint64_t a[5], uint64_t b[5],
325 uint64_t c[5], uint64_t d[5],
328 const uint64_t swap = 0 - iswap;
330 for(
size_t i = 0; i < 5; ++i)
332 const uint64_t x0 = swap & (a[i] ^ b[i]);
333 const uint64_t x1 = swap & (c[i] ^ d[i]);
347void cmult(uint64_t resultx[5], uint64_t resultz[5],
const uint8_t n[32],
const uint64_t q[5])
360 for(
size_t i = 0; i < 32; ++i)
362 const uint64_t bit0 = (n[31 - i] >> 7) & 1;
363 const uint64_t bit1 = (n[31 - i] >> 6) & 1;
364 const uint64_t bit2 = (n[31 - i] >> 5) & 1;
365 const uint64_t bit3 = (n[31 - i] >> 4) & 1;
366 const uint64_t bit4 = (n[31 - i] >> 3) & 1;
367 const uint64_t bit5 = (n[31 - i] >> 2) & 1;
368 const uint64_t bit6 = (n[31 - i] >> 1) & 1;
369 const uint64_t bit7 = (n[31 - i] >> 0) & 1;
371 swap_conditional(c, a, d, b, bit0);
372 fmonty(g, h, e, f, c, d, a, b, q);
374 swap_conditional(g, e, h, f, bit0 ^ bit1);
375 fmonty(c, d, a, b, g, h, e, f, q);
377 swap_conditional(c, a, d, b, bit1 ^ bit2);
378 fmonty(g, h, e, f, c, d, a, b, q);
380 swap_conditional(g, e, h, f, bit2 ^ bit3);
381 fmonty(c, d, a, b, g, h, e, f, q);
383 swap_conditional(c, a, d, b, bit3 ^ bit4);
384 fmonty(g, h, e, f, c, d, a, b, q);
386 swap_conditional(g, e, h, f, bit4 ^ bit5);
387 fmonty(c, d, a, b, g, h, e, f, q);
389 swap_conditional(c, a, d, b, bit5 ^ bit6);
390 fmonty(g, h, e, f, c, d, a, b, q);
392 swap_conditional(g, e, h, f, bit6 ^ bit7);
393 fmonty(c, d, a, b, g, h, e, f, q);
395 swap_conditional(c, a, d, b, bit7);
406void crecip(uint64_t out[5],
const uint64_t z[5])
440curve25519_donna(uint8_t mypublic[32],
const uint8_t secret[32],
const uint8_t basepoint[32])
445 uint64_t bp[5], x[5], z[5], zmone[5];
453 fexpand(bp, basepoint);
457 fcontract(mypublic, z);
void poison(const T *p, size_t n)
void unpoison(const T *p, size_t n)
constexpr void store_le(uint16_t in, uint8_t out[2])
constexpr void copy_mem(T *out, const T *in, size_t n)
uint64_t carry_shift(const donna128 &a, size_t shift)
uint64_t combine_lower(const donna128 &a, size_t s1, const donna128 &b, size_t s2)
void curve25519_donna(uint8_t mypublic[32], const uint8_t secret[32], const uint8_t basepoint[32])
constexpr uint64_t load_le< uint64_t >(const uint8_t in[], size_t off)