Botan  2.6.0
Crypto and TLS for C++11
donna.cpp
Go to the documentation of this file.
1 /*
2 * Based on curve25519-donna-c64.c from github.com/agl/curve25519-donna
3 * revision 80ad9b9930c9baef5829dd2a235b6b7646d32a8e
4 *
5 * Further changes
6 * (C) 2014,2018 Jack Lloyd
7 *
8 * Botan is released under the Simplified BSD License (see license.txt)
9 */
10 
11 /* Copyright 2008, Google Inc.
12 * All rights reserved.
13 *
14 * Code released into the public domain.
15 *
16 * curve25519-donna: Curve25519 elliptic curve, public key function
17 *
18 * https://code.google.com/p/curve25519-donna/
19 *
20 * Adam Langley <agl@imperialviolet.org>
21 *
22 * Derived from public domain C code by Daniel J. Bernstein <djb@cr.yp.to>
23 *
24 * More information about curve25519 can be found here
25 * https://cr.yp.to/ecdh.html
26 *
27 * djb's sample implementation of curve25519 is written in a special assembly
28 * language called qhasm and uses the floating point registers.
29 *
30 * This is, almost, a clean room reimplementation from the curve25519 paper. It
31 * uses many of the tricks described therein. Only the crecip function is taken
32 * from the sample implementation.
33 */
34 
35 #include <botan/curve25519.h>
36 #include <botan/mul128.h>
37 #include <botan/internal/ct_utils.h>
38 #include <botan/internal/donna128.h>
39 #include <botan/loadstor.h>
40 
41 namespace Botan {
42 
43 namespace {
44 
45 #if !defined(BOTAN_TARGET_HAS_NATIVE_UINT128)
46 typedef donna128 uint128_t;
47 #endif
48 
49 /* Sum two numbers: output += in */
50 inline void fsum(uint64_t out[5], const uint64_t in[5])
51  {
52  out[0] += in[0];
53  out[1] += in[1];
54  out[2] += in[2];
55  out[3] += in[3];
56  out[4] += in[4];
57  }
58 
59 /* Find the difference of two numbers: out = in - out
60 * (note the order of the arguments!)
61 *
62 * Assumes that out[i] < 2**52
63 * On return, out[i] < 2**55
64 */
65 inline void fdifference_backwards(uint64_t out[5], const uint64_t in[5])
66  {
67  /* 152 is 19 << 3 */
68  const uint64_t two54m152 = (static_cast<uint64_t>(1) << 54) - 152;
69  const uint64_t two54m8 = (static_cast<uint64_t>(1) << 54) - 8;
70 
71  out[0] = in[0] + two54m152 - out[0];
72  out[1] = in[1] + two54m8 - out[1];
73  out[2] = in[2] + two54m8 - out[2];
74  out[3] = in[3] + two54m8 - out[3];
75  out[4] = in[4] + two54m8 - out[4];
76  }
77 
78 inline void fadd_sub(uint64_t x[5],
79  uint64_t y[5])
80  {
81  // TODO merge these and avoid the tmp array
82  uint64_t tmp[5];
83  copy_mem(tmp, y, 5);
84  fsum(y, x);
85  fdifference_backwards(x, tmp); // does x - z
86  }
87 
88 /* Multiply a number by a scalar: out = in * scalar */
89 inline void fscalar_product(uint64_t out[5], const uint64_t in[5], const uint64_t scalar)
90  {
91  uint128_t a = uint128_t(in[0]) * scalar;
92  out[0] = a & 0x7ffffffffffff;
93 
94  a = uint128_t(in[1]) * scalar + carry_shift(a, 51);
95  out[1] = a & 0x7ffffffffffff;
96 
97  a = uint128_t(in[2]) * scalar + carry_shift(a, 51);
98  out[2] = a & 0x7ffffffffffff;
99 
100  a = uint128_t(in[3]) * scalar + carry_shift(a, 51);
101  out[3] = a & 0x7ffffffffffff;
102 
103  a = uint128_t(in[4]) * scalar + carry_shift(a, 51);
104  out[4] = a & 0x7ffffffffffff;
105 
106  out[0] += carry_shift(a, 51) * 19;
107  }
108 
109 /* Multiply two numbers: out = in2 * in
110 *
111 * out must be distinct to both inputs. The inputs are reduced coefficient
112 * form, the output is not.
113 *
114 * Assumes that in[i] < 2**55 and likewise for in2.
115 * On return, out[i] < 2**52
116 */
117 inline void fmul(uint64_t out[5], const uint64_t in[5], const uint64_t in2[5])
118  {
119  const uint128_t s0 = in2[0];
120  const uint128_t s1 = in2[1];
121  const uint128_t s2 = in2[2];
122  const uint128_t s3 = in2[3];
123  const uint128_t s4 = in2[4];
124 
125  uint64_t r0 = in[0];
126  uint64_t r1 = in[1];
127  uint64_t r2 = in[2];
128  uint64_t r3 = in[3];
129  uint64_t r4 = in[4];
130 
131  uint128_t t0 = r0 * s0;
132  uint128_t t1 = r0 * s1 + r1 * s0;
133  uint128_t t2 = r0 * s2 + r2 * s0 + r1 * s1;
134  uint128_t t3 = r0 * s3 + r3 * s0 + r1 * s2 + r2 * s1;
135  uint128_t t4 = r0 * s4 + r4 * s0 + r3 * s1 + r1 * s3 + r2 * s2;
136 
137  r4 *= 19;
138  r1 *= 19;
139  r2 *= 19;
140  r3 *= 19;
141 
142  t0 += r4 * s1 + r1 * s4 + r2 * s3 + r3 * s2;
143  t1 += r4 * s2 + r2 * s4 + r3 * s3;
144  t2 += r4 * s3 + r3 * s4;
145  t3 += r4 * s4;
146 
147  r0 = t0 & 0x7ffffffffffff; t1 += carry_shift(t0, 51);
148  r1 = t1 & 0x7ffffffffffff; t2 += carry_shift(t1, 51);
149  r2 = t2 & 0x7ffffffffffff; t3 += carry_shift(t2, 51);
150  r3 = t3 & 0x7ffffffffffff; t4 += carry_shift(t3, 51);
151  r4 = t4 & 0x7ffffffffffff; uint64_t c = carry_shift(t4, 51);
152 
153  r0 += c * 19; c = r0 >> 51; r0 = r0 & 0x7ffffffffffff;
154  r1 += c; c = r1 >> 51; r1 = r1 & 0x7ffffffffffff;
155  r2 += c;
156 
157  out[0] = r0;
158  out[1] = r1;
159  out[2] = r2;
160  out[3] = r3;
161  out[4] = r4;
162  }
163 
164 inline void fsquare_times(uint64_t out[5], const uint64_t in[5], size_t count)
165  {
166  uint64_t r0 = in[0];
167  uint64_t r1 = in[1];
168  uint64_t r2 = in[2];
169  uint64_t r3 = in[3];
170  uint64_t r4 = in[4];
171 
172  for(size_t i = 0; i != count; ++i)
173  {
174  const uint64_t d0 = r0 * 2;
175  const uint64_t d1 = r1 * 2;
176  const uint64_t d2 = r2 * 2 * 19;
177  const uint64_t d419 = r4 * 19;
178  const uint64_t d4 = d419 * 2;
179 
180  uint128_t t0 = uint128_t(r0) * r0 + uint128_t(d4) * r1 + uint128_t(d2) * (r3 );
181  uint128_t t1 = uint128_t(d0) * r1 + uint128_t(d4) * r2 + uint128_t(r3) * (r3 * 19);
182  uint128_t t2 = uint128_t(d0) * r2 + uint128_t(r1) * r1 + uint128_t(d4) * (r3 );
183  uint128_t t3 = uint128_t(d0) * r3 + uint128_t(d1) * r2 + uint128_t(r4) * (d419 );
184  uint128_t t4 = uint128_t(d0) * r4 + uint128_t(d1) * r3 + uint128_t(r2) * (r2 );
185 
186  r0 = t0 & 0x7ffffffffffff; t1 += carry_shift(t0, 51);
187  r1 = t1 & 0x7ffffffffffff; t2 += carry_shift(t1, 51);
188  r2 = t2 & 0x7ffffffffffff; t3 += carry_shift(t2, 51);
189  r3 = t3 & 0x7ffffffffffff; t4 += carry_shift(t3, 51);
190  r4 = t4 & 0x7ffffffffffff; uint64_t c = carry_shift(t4, 51);
191 
192  r0 += c * 19; c = r0 >> 51; r0 = r0 & 0x7ffffffffffff;
193  r1 += c; c = r1 >> 51; r1 = r1 & 0x7ffffffffffff;
194  r2 += c;
195  }
196 
197  out[0] = r0;
198  out[1] = r1;
199  out[2] = r2;
200  out[3] = r3;
201  out[4] = r4;
202  }
203 
204 inline void fsquare(uint64_t out[5], const uint64_t in[5])
205  {
206  return fsquare_times(out, in, 1);
207  }
208 
209 /* Take a little-endian, 32-byte number and expand it into polynomial form */
210 inline void fexpand(uint64_t *out, const uint8_t *in)
211  {
212  out[0] = load_le<uint64_t>(in, 0) & 0x7ffffffffffff;
213  out[1] = (load_le<uint64_t>(in+6, 0) >> 3) & 0x7ffffffffffff;
214  out[2] = (load_le<uint64_t>(in+12, 0) >> 6) & 0x7ffffffffffff;
215  out[3] = (load_le<uint64_t>(in+19, 0) >> 1) & 0x7ffffffffffff;
216  out[4] = (load_le<uint64_t>(in+24, 0) >> 12) & 0x7ffffffffffff;
217  }
218 
219 /* Take a fully reduced polynomial form number and contract it into a
220 * little-endian, 32-byte array
221 */
222 inline void fcontract(uint8_t *out, const uint64_t input[5])
223  {
224  uint128_t t0 = input[0];
225  uint128_t t1 = input[1];
226  uint128_t t2 = input[2];
227  uint128_t t3 = input[3];
228  uint128_t t4 = input[4];
229 
230  for(size_t i = 0; i != 2; ++i)
231  {
232  t1 += t0 >> 51; t0 &= 0x7ffffffffffff;
233  t2 += t1 >> 51; t1 &= 0x7ffffffffffff;
234  t3 += t2 >> 51; t2 &= 0x7ffffffffffff;
235  t4 += t3 >> 51; t3 &= 0x7ffffffffffff;
236  t0 += (t4 >> 51) * 19; t4 &= 0x7ffffffffffff;
237  }
238 
239  /* now t is between 0 and 2^255-1, properly carried. */
240  /* case 1: between 0 and 2^255-20. case 2: between 2^255-19 and 2^255-1. */
241 
242  t0 += 19;
243 
244  t1 += t0 >> 51; t0 &= 0x7ffffffffffff;
245  t2 += t1 >> 51; t1 &= 0x7ffffffffffff;
246  t3 += t2 >> 51; t2 &= 0x7ffffffffffff;
247  t4 += t3 >> 51; t3 &= 0x7ffffffffffff;
248  t0 += (t4 >> 51) * 19; t4 &= 0x7ffffffffffff;
249 
250  /* now between 19 and 2^255-1 in both cases, and offset by 19. */
251 
252  t0 += 0x8000000000000 - 19;
253  t1 += 0x8000000000000 - 1;
254  t2 += 0x8000000000000 - 1;
255  t3 += 0x8000000000000 - 1;
256  t4 += 0x8000000000000 - 1;
257 
258  /* now between 2^255 and 2^256-20, and offset by 2^255. */
259 
260  t1 += t0 >> 51; t0 &= 0x7ffffffffffff;
261  t2 += t1 >> 51; t1 &= 0x7ffffffffffff;
262  t3 += t2 >> 51; t2 &= 0x7ffffffffffff;
263  t4 += t3 >> 51; t3 &= 0x7ffffffffffff;
264  t4 &= 0x7ffffffffffff;
265 
266  store_le(out,
267  combine_lower(t0, 0, t1, 51),
268  combine_lower(t1, 13, t2, 38),
269  combine_lower(t2, 26, t3, 25),
270  combine_lower(t3, 39, t4, 12));
271  }
272 
273 /* Input: Q, Q', Q-Q'
274 * Out: 2Q, Q+Q'
275 *
276 * result.two_q (2*Q): long form
277 * result.q_plus_q_dash (Q + Q): long form
278 * in_q: short form, destroyed
279 * in_q_dash: short form, destroyed
280 * in_q_minus_q_dash: short form, preserved
281 */
282 
283 void fmonty(uint64_t result_two_q_x[5],
284  uint64_t result_two_q_z[5],
285  uint64_t result_q_plus_q_dash_x[5],
286  uint64_t result_q_plus_q_dash_z[5],
287  uint64_t in_q_x[5],
288  uint64_t in_q_z[5],
289  uint64_t in_q_dash_x[5],
290  uint64_t in_q_dash_z[5],
291  const uint64_t q_minus_q_dash[5])
292  {
293  uint64_t zzz[5];
294  uint64_t xx[5];
295  uint64_t zz[5];
296  uint64_t xxprime[5];
297  uint64_t zzprime[5];
298  uint64_t zzzprime[5];
299 
300  fadd_sub(in_q_z, in_q_x);
301  fadd_sub(in_q_dash_z, in_q_dash_x);
302 
303  fmul(xxprime, in_q_dash_x, in_q_z);
304  fmul(zzprime, in_q_dash_z, in_q_x);
305 
306  fadd_sub(zzprime, xxprime);
307 
308  fsquare(result_q_plus_q_dash_x, xxprime);
309  fsquare(zzzprime, zzprime);
310  fmul(result_q_plus_q_dash_z, zzzprime, q_minus_q_dash);
311 
312  fsquare(xx, in_q_x);
313  fsquare(zz, in_q_z);
314  fmul(result_two_q_x, xx, zz);
315 
316  fdifference_backwards(zz, xx); // does zz = xx - zz
317  fscalar_product(zzz, zz, 121665);
318  fsum(zzz, xx);
319 
320  fmul(result_two_q_z, zz, zzz);
321  }
322 
323 /*
324 * Maybe swap the contents of two uint64_t arrays (@a and @b),
325 * Param @iswap is assumed to be either 0 or 1
326 *
327 * This function performs the swap without leaking any side-channel
328 * information.
329 */
330 void swap_conditional(uint64_t a[5], uint64_t b[5], uint64_t iswap)
331  {
332  const uint64_t swap = static_cast<uint64_t>(-static_cast<int64_t>(iswap));
333 
334  for(size_t i = 0; i < 5; ++i)
335  {
336  const uint64_t x = swap & (a[i] ^ b[i]);
337  a[i] ^= x;
338  b[i] ^= x;
339  }
340  }
341 
342 /* Calculates nQ where Q is the x-coordinate of a point on the curve
343 *
344 * resultx/resultz: the x/z coordinate of the resulting curve point (short form)
345 * n: a little endian, 32-byte number
346 * q: a point of the curve (short form)
347 */
348 void cmult(uint64_t resultx[5], uint64_t resultz[5], const uint8_t n[32], const uint64_t q[5])
349  {
350  uint64_t a[5] = {0}; // nqpqx
351  uint64_t b[5] = {1}; // npqpz
352  uint64_t c[5] = {1}; // nqx
353  uint64_t d[5] = {0}; // nqz
354  uint64_t e[5] = {0}; // npqqx2
355  uint64_t f[5] = {1}; // npqqz2
356  uint64_t g[5] = {0}; // nqx2
357  uint64_t h[5] = {1}; // nqz2
358 
359  copy_mem(a, q, 5);
360 
361  for(size_t i = 0; i < 32; ++i)
362  {
363  const uint64_t bit0 = (n[31 - i] >> 7) & 1;
364  const uint64_t bit1 = (n[31 - i] >> 6) & 1;
365  const uint64_t bit2 = (n[31 - i] >> 5) & 1;
366  const uint64_t bit3 = (n[31 - i] >> 4) & 1;
367  const uint64_t bit4 = (n[31 - i] >> 3) & 1;
368  const uint64_t bit5 = (n[31 - i] >> 2) & 1;
369  const uint64_t bit6 = (n[31 - i] >> 1) & 1;
370  const uint64_t bit7 = (n[31 - i] >> 0) & 1;
371 
372  swap_conditional(c, a, bit0);
373  swap_conditional(d, b, bit0);
374  fmonty(g, h, e, f, c, d, a, b, q);
375 
376  swap_conditional(g, e, bit0 ^ bit1);
377  swap_conditional(h, f, bit0 ^ bit1);
378  fmonty(c, d, a, b, g, h, e, f, q);
379 
380  swap_conditional(c, a, bit1 ^ bit2);
381  swap_conditional(d, b, bit1 ^ bit2);
382  fmonty(g, h, e, f, c, d, a, b, q);
383 
384  swap_conditional(g, e, bit2 ^ bit3);
385  swap_conditional(h, f, bit2 ^ bit3);
386  fmonty(c, d, a, b, g, h, e, f, q);
387 
388  swap_conditional(c, a, bit3 ^ bit4);
389  swap_conditional(d, b, bit3 ^ bit4);
390  fmonty(g, h, e, f, c, d, a, b, q);
391 
392  swap_conditional(g, e, bit4 ^ bit5);
393  swap_conditional(h, f, bit4 ^ bit5);
394  fmonty(c, d, a, b, g, h, e, f, q);
395 
396  swap_conditional(c, a, bit5 ^ bit6);
397  swap_conditional(d, b, bit5 ^ bit6);
398  fmonty(g, h, e, f, c, d, a, b, q);
399 
400  swap_conditional(g, e, bit6 ^ bit7);
401  swap_conditional(h, f, bit6 ^ bit7);
402  fmonty(c, d, a, b, g, h, e, f, q);
403 
404  swap_conditional(c, a, bit7);
405  swap_conditional(d, b, bit7);
406  }
407 
408  copy_mem(resultx, c, 5);
409  copy_mem(resultz, d, 5);
410  }
411 
412 
413 // -----------------------------------------------------------------------------
414 // Shamelessly copied from djb's code, tightened a little
415 // -----------------------------------------------------------------------------
416 void crecip(uint64_t out[5], const uint64_t z[5])
417  {
418  uint64_t a[5];
419  uint64_t b[5];
420  uint64_t c[5];
421  uint64_t t0[5];
422 
423  /* 2 */ fsquare(a, z); // a = 2
424  /* 8 */ fsquare_times(t0, a, 2);
425  /* 9 */ fmul(b, t0, z); // b = 9
426  /* 11 */ fmul(a, b, a); // a = 11
427  /* 22 */ fsquare(t0, a);
428  /* 2^5 - 2^0 = 31 */ fmul(b, t0, b);
429  /* 2^10 - 2^5 */ fsquare_times(t0, b, 5);
430  /* 2^10 - 2^0 */ fmul(b, t0, b);
431  /* 2^20 - 2^10 */ fsquare_times(t0, b, 10);
432  /* 2^20 - 2^0 */ fmul(c, t0, b);
433  /* 2^40 - 2^20 */ fsquare_times(t0, c, 20);
434  /* 2^40 - 2^0 */ fmul(t0, t0, c);
435  /* 2^50 - 2^10 */ fsquare_times(t0, t0, 10);
436  /* 2^50 - 2^0 */ fmul(b, t0, b);
437  /* 2^100 - 2^50 */ fsquare_times(t0, b, 50);
438  /* 2^100 - 2^0 */ fmul(c, t0, b);
439  /* 2^200 - 2^100 */ fsquare_times(t0, c, 100);
440  /* 2^200 - 2^0 */ fmul(t0, t0, c);
441  /* 2^250 - 2^50 */ fsquare_times(t0, t0, 50);
442  /* 2^250 - 2^0 */ fmul(t0, t0, b);
443  /* 2^255 - 2^5 */ fsquare_times(t0, t0, 5);
444  /* 2^255 - 21 */ fmul(out, t0, a);
445  }
446 
447 }
448 
449 void
450 curve25519_donna(uint8_t *mypublic, const uint8_t *secret, const uint8_t *basepoint)
451  {
452  CT::poison(secret, 32);
453  CT::poison(basepoint, 32);
454 
455  uint64_t bp[5], x[5], z[5], zmone[5];
456  uint8_t e[32];
457 
458  copy_mem(e, secret, 32);
459  e[ 0] &= 248;
460  e[31] &= 127;
461  e[31] |= 64;
462 
463  fexpand(bp, basepoint);
464  cmult(x, z, e, bp);
465  crecip(zmone, z);
466  fmul(z, x, zmone);
467  fcontract(mypublic, z);
468 
469  CT::unpoison(secret, 32);
470  CT::unpoison(basepoint, 32);
471  CT::unpoison(mypublic, 32);
472  }
473 
474 }
void curve25519_donna(uint8_t mypublic[32], const uint8_t secret[32], const uint8_t basepoint[32])
void poison(const T *p, size_t n)
Definition: ct_utils.h:46
uint64_t carry_shift(const donna128 &a, size_t shift)
Definition: donna128.h:116
uint64_t combine_lower(const donna128 &a, size_t s1, const donna128 &b, size_t s2)
Definition: donna128.h:121
uint64_t load_le< uint64_t >(const uint8_t in[], size_t off)
Definition: loadstor.h:235
void copy_mem(T *out, const T *in, size_t n)
Definition: mem_ops.h:108
Definition: alg_id.cpp:13
void unpoison(const T *p, size_t n)
Definition: ct_utils.h:57
void store_le(uint16_t in, uint8_t out[2])
Definition: loadstor.h:450