Botan 3.6.1
Crypto and TLS for C&
donna.cpp
Go to the documentation of this file.
1/*
2* Based on curve25519-donna-c64.c from https://github.com/agl/curve25519-donna
3* revision 80ad9b9930c9baef5829dd2a235b6b7646d32a8e
4*
5* Further changes
6* (C) 2014,2018 Jack Lloyd
7*
8* Botan is released under the Simplified BSD License (see license.txt)
9*/
10
11/* Copyright 2008, Google Inc.
12* All rights reserved.
13*
14* Code released into the public domain.
15*
16* curve25519-donna: Curve25519 elliptic curve, public key function
17*
18* https://code.google.com/p/curve25519-donna/
19*
20* Adam Langley <agl@imperialviolet.org>
21*
22* Derived from public domain C code by Daniel J. Bernstein <djb@cr.yp.to>
23*
24* More information about curve25519 can be found here
25* https://cr.yp.to/ecdh.html
26*
27* djb's sample implementation of curve25519 is written in a special assembly
28* language called qhasm and uses the floating point registers.
29*
30* This is, almost, a clean room reimplementation from the curve25519 paper. It
31* uses many of the tricks described therein. Only the crecip function is taken
32* from the sample implementation.
33*/
34
35#include <botan/x25519.h>
36
37#include <botan/internal/ct_utils.h>
38#include <botan/internal/donna128.h>
39#include <botan/internal/loadstor.h>
40#include <botan/internal/mul128.h>
41
42namespace Botan {
43
44namespace {
45
46#if !defined(BOTAN_TARGET_HAS_NATIVE_UINT128)
47typedef donna128 uint128_t;
48#endif
49
50/* Sum two numbers: output += in */
51inline void fsum(uint64_t out[5], const uint64_t in[5]) {
52 out[0] += in[0];
53 out[1] += in[1];
54 out[2] += in[2];
55 out[3] += in[3];
56 out[4] += in[4];
57}
58
59/* Find the difference of two numbers: out = in - out
60* (note the order of the arguments!)
61*
62* Assumes that out[i] < 2**52
63* On return, out[i] < 2**55
64*/
65inline void fdifference_backwards(uint64_t out[5], const uint64_t in[5]) {
66 /* 152 is 19 << 3 */
67 const uint64_t two54m152 = (static_cast<uint64_t>(1) << 54) - 152;
68 const uint64_t two54m8 = (static_cast<uint64_t>(1) << 54) - 8;
69
70 out[0] = in[0] + two54m152 - out[0];
71 out[1] = in[1] + two54m8 - out[1];
72 out[2] = in[2] + two54m8 - out[2];
73 out[3] = in[3] + two54m8 - out[3];
74 out[4] = in[4] + two54m8 - out[4];
75}
76
77inline void fadd_sub(uint64_t x[5], uint64_t y[5]) {
78 // TODO merge these and avoid the tmp array
79 uint64_t tmp[5];
80 copy_mem(tmp, y, 5);
81 fsum(y, x);
82 fdifference_backwards(x, tmp); // does x - z
83}
84
85const uint64_t MASK_63 = 0x7ffffffffffff;
86
87/* Multiply a number by a scalar: out = in * scalar */
88inline void fscalar_product(uint64_t out[5], const uint64_t in[5], const uint64_t scalar) {
89 uint128_t a = uint128_t(in[0]) * scalar;
90 out[0] = a & MASK_63;
91
92 a = uint128_t(in[1]) * scalar + carry_shift(a, 51);
93 out[1] = a & MASK_63;
94
95 a = uint128_t(in[2]) * scalar + carry_shift(a, 51);
96 out[2] = a & MASK_63;
97
98 a = uint128_t(in[3]) * scalar + carry_shift(a, 51);
99 out[3] = a & MASK_63;
100
101 a = uint128_t(in[4]) * scalar + carry_shift(a, 51);
102 out[4] = a & MASK_63;
103
104 out[0] += carry_shift(a, 51) * 19;
105}
106
107/* Multiply two numbers: out = in2 * in
108*
109* out must be distinct to both inputs. The inputs are reduced coefficient
110* form, the output is not.
111*
112* Assumes that in[i] < 2**55 and likewise for in2.
113* On return, out[i] < 2**52
114*/
115inline void fmul(uint64_t out[5], const uint64_t in[5], const uint64_t in2[5]) {
116 const uint128_t s0 = in2[0];
117 const uint128_t s1 = in2[1];
118 const uint128_t s2 = in2[2];
119 const uint128_t s3 = in2[3];
120 const uint128_t s4 = in2[4];
121
122 uint64_t r0 = in[0];
123 uint64_t r1 = in[1];
124 uint64_t r2 = in[2];
125 uint64_t r3 = in[3];
126 uint64_t r4 = in[4];
127
128 uint128_t t0 = r0 * s0;
129 uint128_t t1 = r0 * s1 + r1 * s0;
130 uint128_t t2 = r0 * s2 + r2 * s0 + r1 * s1;
131 uint128_t t3 = r0 * s3 + r3 * s0 + r1 * s2 + r2 * s1;
132 uint128_t t4 = r0 * s4 + r4 * s0 + r3 * s1 + r1 * s3 + r2 * s2;
133
134 r4 *= 19;
135 r1 *= 19;
136 r2 *= 19;
137 r3 *= 19;
138
139 t0 += r4 * s1 + r1 * s4 + r2 * s3 + r3 * s2;
140 t1 += r4 * s2 + r2 * s4 + r3 * s3;
141 t2 += r4 * s3 + r3 * s4;
142 t3 += r4 * s4;
143
144 r0 = t0 & MASK_63;
145 t1 += carry_shift(t0, 51);
146 r1 = t1 & MASK_63;
147 t2 += carry_shift(t1, 51);
148 r2 = t2 & MASK_63;
149 t3 += carry_shift(t2, 51);
150 r3 = t3 & MASK_63;
151 t4 += carry_shift(t3, 51);
152 r4 = t4 & MASK_63;
153 uint64_t c = carry_shift(t4, 51);
154
155 r0 += c * 19;
156 c = r0 >> 51;
157 r0 = r0 & MASK_63;
158 r1 += c;
159 c = r1 >> 51;
160 r1 = r1 & MASK_63;
161 r2 += c;
162
163 out[0] = r0;
164 out[1] = r1;
165 out[2] = r2;
166 out[3] = r3;
167 out[4] = r4;
168}
169
170inline void fsquare(uint64_t out[5], const uint64_t in[5], size_t count = 1) {
171 uint64_t r0 = in[0];
172 uint64_t r1 = in[1];
173 uint64_t r2 = in[2];
174 uint64_t r3 = in[3];
175 uint64_t r4 = in[4];
176
177 for(size_t i = 0; i != count; ++i) {
178 const uint64_t d0 = r0 * 2;
179 const uint64_t d1 = r1 * 2;
180 const uint64_t d2 = r2 * 2 * 19;
181 const uint64_t d419 = r4 * 19;
182 const uint64_t d4 = d419 * 2;
183
184 uint128_t t0 = uint128_t(r0) * r0 + uint128_t(d4) * r1 + uint128_t(d2) * (r3);
185 uint128_t t1 = uint128_t(d0) * r1 + uint128_t(d4) * r2 + uint128_t(r3) * (r3 * 19);
186 uint128_t t2 = uint128_t(d0) * r2 + uint128_t(r1) * r1 + uint128_t(d4) * (r3);
187 uint128_t t3 = uint128_t(d0) * r3 + uint128_t(d1) * r2 + uint128_t(r4) * (d419);
188 uint128_t t4 = uint128_t(d0) * r4 + uint128_t(d1) * r3 + uint128_t(r2) * (r2);
189
190 r0 = t0 & MASK_63;
191 t1 += carry_shift(t0, 51);
192 r1 = t1 & MASK_63;
193 t2 += carry_shift(t1, 51);
194 r2 = t2 & MASK_63;
195 t3 += carry_shift(t2, 51);
196 r3 = t3 & MASK_63;
197 t4 += carry_shift(t3, 51);
198 r4 = t4 & MASK_63;
199 uint64_t c = carry_shift(t4, 51);
200
201 r0 += c * 19;
202 c = r0 >> 51;
203 r0 = r0 & MASK_63;
204 r1 += c;
205 c = r1 >> 51;
206 r1 = r1 & MASK_63;
207 r2 += c;
208 }
209
210 out[0] = r0;
211 out[1] = r1;
212 out[2] = r2;
213 out[3] = r3;
214 out[4] = r4;
215}
216
217/* Take a little-endian, 32-byte number and expand it into polynomial form */
218inline void fexpand(uint64_t* out, const uint8_t* in) {
219 out[0] = load_le<uint64_t>(in, 0) & MASK_63;
220 out[1] = (load_le<uint64_t>(in + 6, 0) >> 3) & MASK_63;
221 out[2] = (load_le<uint64_t>(in + 12, 0) >> 6) & MASK_63;
222 out[3] = (load_le<uint64_t>(in + 19, 0) >> 1) & MASK_63;
223 out[4] = (load_le<uint64_t>(in + 24, 0) >> 12) & MASK_63;
224}
225
226/* Take a fully reduced polynomial form number and contract it into a
227* little-endian, 32-byte array
228*/
229inline void fcontract(uint8_t* out, const uint64_t input[5]) {
230 uint128_t t0 = input[0];
231 uint128_t t1 = input[1];
232 uint128_t t2 = input[2];
233 uint128_t t3 = input[3];
234 uint128_t t4 = input[4];
235
236 for(size_t i = 0; i != 2; ++i) {
237 t1 += t0 >> 51;
238 t0 &= MASK_63;
239 t2 += t1 >> 51;
240 t1 &= MASK_63;
241 t3 += t2 >> 51;
242 t2 &= MASK_63;
243 t4 += t3 >> 51;
244 t3 &= MASK_63;
245 t0 += (t4 >> 51) * 19;
246 t4 &= MASK_63;
247 }
248
249 /* now t is between 0 and 2^255-1, properly carried. */
250 /* case 1: between 0 and 2^255-20. case 2: between 2^255-19 and 2^255-1. */
251
252 t0 += 19;
253
254 t1 += t0 >> 51;
255 t0 &= MASK_63;
256 t2 += t1 >> 51;
257 t1 &= MASK_63;
258 t3 += t2 >> 51;
259 t2 &= MASK_63;
260 t4 += t3 >> 51;
261 t3 &= MASK_63;
262 t0 += (t4 >> 51) * 19;
263 t4 &= MASK_63;
264
265 /* now between 19 and 2^255-1 in both cases, and offset by 19. */
266
267 t0 += 0x8000000000000 - 19;
268 t1 += 0x8000000000000 - 1;
269 t2 += 0x8000000000000 - 1;
270 t3 += 0x8000000000000 - 1;
271 t4 += 0x8000000000000 - 1;
272
273 /* now between 2^255 and 2^256-20, and offset by 2^255. */
274
275 t1 += t0 >> 51;
276 t0 &= MASK_63;
277 t2 += t1 >> 51;
278 t1 &= MASK_63;
279 t3 += t2 >> 51;
280 t2 &= MASK_63;
281 t4 += t3 >> 51;
282 t3 &= MASK_63;
283 t4 &= MASK_63;
284
285 store_le(out,
286 combine_lower(t0, 0, t1, 51),
287 combine_lower(t1, 13, t2, 38),
288 combine_lower(t2, 26, t3, 25),
289 combine_lower(t3, 39, t4, 12));
290}
291
292/* Input: Q, Q', Q-Q'
293* Out: 2Q, Q+Q'
294*
295* result.two_q (2*Q): long form
296* result.q_plus_q_dash (Q + Q): long form
297* in_q: short form, destroyed
298* in_q_dash: short form, destroyed
299* in_q_minus_q_dash: short form, preserved
300*/
301void fmonty(uint64_t result_two_q_x[5],
302 uint64_t result_two_q_z[5],
303 uint64_t result_q_plus_q_dash_x[5],
304 uint64_t result_q_plus_q_dash_z[5],
305 uint64_t in_q_x[5],
306 uint64_t in_q_z[5],
307 uint64_t in_q_dash_x[5],
308 uint64_t in_q_dash_z[5],
309 const uint64_t q_minus_q_dash[5]) {
310 uint64_t zzz[5];
311 uint64_t xx[5];
312 uint64_t zz[5];
313 uint64_t xxprime[5];
314 uint64_t zzprime[5];
315 uint64_t zzzprime[5];
316
317 fadd_sub(in_q_z, in_q_x);
318 fadd_sub(in_q_dash_z, in_q_dash_x);
319
320 fmul(xxprime, in_q_dash_x, in_q_z);
321 fmul(zzprime, in_q_dash_z, in_q_x);
322
323 fadd_sub(zzprime, xxprime);
324
325 fsquare(result_q_plus_q_dash_x, xxprime);
326 fsquare(zzzprime, zzprime);
327 fmul(result_q_plus_q_dash_z, zzzprime, q_minus_q_dash);
328
329 fsquare(xx, in_q_x);
330 fsquare(zz, in_q_z);
331 fmul(result_two_q_x, xx, zz);
332
333 fdifference_backwards(zz, xx); // does zz = xx - zz
334 fscalar_product(zzz, zz, 121665);
335 fsum(zzz, xx);
336
337 fmul(result_two_q_z, zz, zzz);
338}
339
340/*
341* Maybe swap the contents of two uint64_t arrays (@a and @b),
342* Param @iswap is assumed to be either 0 or 1
343*
344* This function performs the swap without leaking any side-channel
345* information.
346*/
347inline void swap_conditional(uint64_t a[5], uint64_t b[5], uint64_t c[5], uint64_t d[5], CT::Mask<uint64_t> swap) {
348 for(size_t i = 0; i < 5; ++i) {
349 const uint64_t x0 = swap.if_set_return(a[i] ^ b[i]);
350 a[i] ^= x0;
351 b[i] ^= x0;
352
353 const uint64_t x1 = swap.if_set_return(c[i] ^ d[i]);
354 c[i] ^= x1;
355 d[i] ^= x1;
356 }
357}
358
359/* Calculates nQ where Q is the x-coordinate of a point on the curve
360*
361* resultx/resultz: the x/z coordinate of the resulting curve point (short form)
362* n: a little endian, 32-byte number
363* q: a point of the curve (short form)
364*/
365void cmult(uint64_t resultx[5], uint64_t resultz[5], const uint8_t n[32], const uint64_t q[5]) {
366 uint64_t a[5] = {0}; // nqpqx
367 uint64_t b[5] = {1}; // npqpz
368 uint64_t c[5] = {1}; // nqx
369 uint64_t d[5] = {0}; // nqz
370 uint64_t e[5] = {0}; // npqqx2
371 uint64_t f[5] = {1}; // npqqz2
372 uint64_t g[5] = {0}; // nqx2
373 uint64_t h[5] = {1}; // nqz2
374
375 copy_mem(a, q, 5);
376
377 for(size_t i = 0; i < 32; ++i) {
378 const uint64_t si = n[31 - i];
379 const auto bit0 = CT::Mask<uint64_t>::expand_bit(si, 7);
380 const auto bit1 = CT::Mask<uint64_t>::expand_bit(si, 6);
381 const auto bit2 = CT::Mask<uint64_t>::expand_bit(si, 5);
382 const auto bit3 = CT::Mask<uint64_t>::expand_bit(si, 4);
383 const auto bit4 = CT::Mask<uint64_t>::expand_bit(si, 3);
384 const auto bit5 = CT::Mask<uint64_t>::expand_bit(si, 2);
385 const auto bit6 = CT::Mask<uint64_t>::expand_bit(si, 1);
386 const auto bit7 = CT::Mask<uint64_t>::expand_bit(si, 0);
387
388 swap_conditional(c, a, d, b, bit0);
389 fmonty(g, h, e, f, c, d, a, b, q);
390
391 swap_conditional(g, e, h, f, bit0 ^ bit1);
392 fmonty(c, d, a, b, g, h, e, f, q);
393
394 swap_conditional(c, a, d, b, bit1 ^ bit2);
395 fmonty(g, h, e, f, c, d, a, b, q);
396
397 swap_conditional(g, e, h, f, bit2 ^ bit3);
398 fmonty(c, d, a, b, g, h, e, f, q);
399
400 swap_conditional(c, a, d, b, bit3 ^ bit4);
401 fmonty(g, h, e, f, c, d, a, b, q);
402
403 swap_conditional(g, e, h, f, bit4 ^ bit5);
404 fmonty(c, d, a, b, g, h, e, f, q);
405
406 swap_conditional(c, a, d, b, bit5 ^ bit6);
407 fmonty(g, h, e, f, c, d, a, b, q);
408
409 swap_conditional(g, e, h, f, bit6 ^ bit7);
410 fmonty(c, d, a, b, g, h, e, f, q);
411
412 swap_conditional(c, a, d, b, bit7);
413 }
414
415 copy_mem(resultx, c, 5);
416 copy_mem(resultz, d, 5);
417}
418
419// -----------------------------------------------------------------------------
420// Shamelessly copied from djb's code, tightened a little
421// -----------------------------------------------------------------------------
422void crecip(uint64_t out[5], const uint64_t z[5]) {
423 uint64_t a[5];
424 uint64_t b[5];
425 uint64_t c[5];
426 uint64_t t0[5];
427
428 fsquare(a, z); // 2
429 fsquare(t0, a, 2); // 8
430 fmul(b, t0, z); // 9
431 fmul(a, b, a); // 11
432 fsquare(t0, a); // 22
433 fmul(b, t0, b); // 2^5 - 2^0 = 31
434 fsquare(t0, b, 5); // 2^10 - 2^5
435 fmul(b, t0, b); // 2^10 - 2^0
436 fsquare(t0, b, 10); // 2^20 - 2^10
437 fmul(c, t0, b); // 2^20 - 2^0
438 fsquare(t0, c, 20); // 2^40 - 2^20
439 fmul(t0, t0, c); // 2^40 - 2^0
440 fsquare(t0, t0, 10); // 2^50 - 2^10
441 fmul(b, t0, b); // 2^50 - 2^0
442 fsquare(t0, b, 50); // 2^100 - 2^50
443 fmul(c, t0, b); // 2^100 - 2^0
444 fsquare(t0, c, 100); // 2^200 - 2^100
445 fmul(t0, t0, c); // 2^200 - 2^0
446 fsquare(t0, t0, 50); // 2^250 - 2^50
447 fmul(t0, t0, b); // 2^250 - 2^0
448 fsquare(t0, t0, 5); // 2^255 - 2^5
449 fmul(out, t0, a); // 2^255 - 21
450}
451
452} // namespace
453
454void curve25519_donna(uint8_t mypublic[32], const uint8_t secret[32], const uint8_t basepoint[32]) {
455 CT::poison(secret, 32);
456 CT::poison(basepoint, 32);
457
458 uint64_t bp[5], x[5], z[5], zmone[5];
459 uint8_t e[32];
460
461 copy_mem(e, secret, 32);
462 e[0] &= 248;
463 e[31] &= 127;
464 e[31] |= 64;
465
466 fexpand(bp, basepoint);
467 cmult(x, z, e, bp);
468 crecip(zmone, z);
469 fmul(z, x, zmone);
470 fcontract(mypublic, z);
471
472 CT::unpoison(secret, 32);
473 CT::unpoison(basepoint, 32);
474 CT::unpoison(mypublic, 32);
475}
476
477} // namespace Botan
static constexpr Mask< T > expand_bit(T v, size_t bit)
Definition ct_utils.h:413
constexpr void unpoison(const T *p, size_t n)
Definition ct_utils.h:64
constexpr void poison(const T *p, size_t n)
Definition ct_utils.h:53
constexpr uint64_t carry_shift(const donna128 &a, size_t shift)
Definition donna128.h:133
constexpr auto store_le(ParamTs &&... params)
Definition loadstor.h:764
constexpr auto load_le(ParamTs &&... params)
Definition loadstor.h:521
void curve25519_donna(uint8_t mypublic[32], const uint8_t secret[32], const uint8_t basepoint[32])
Definition donna.cpp:454
const SIMD_8x32 & b
constexpr void copy_mem(T *out, const T *in, size_t n)
Definition mem_ops.h:146
constexpr uint64_t combine_lower(const donna128 &a, size_t s1, const donna128 &b, size_t s2)
Definition donna128.h:137