1 /******************************************************************************
2 * Copyright © 2014-2018 The SuperNET Developers. *
4 * See the AUTHORS, DEVELOPER-AGREEMENT and LICENSE files at *
5 * the top-level directory of this distribution for the individual copyright *
6 * holder information and the developer policies on copyright and licensing. *
8 * Unless otherwise agreed in a custom licensing agreement, no part of the *
9 * SuperNET software, including this file may be copied, modified, propagated *
10 * or distributed except according to the terms contained in the LICENSE file *
12 * Removal or modification of this copyright notice is prohibited. *
14 ******************************************************************************/
16 #ifndef H_KOMODO25519_H
17 #define H_KOMODO25519_H
18 // derived from curve25519_donna
26 bits320 fmul(const bits320 in2,const bits320 in);
27 bits320 fexpand(bits256 basepoint);
28 bits256 fcontract(const bits320 input);
29 void cmult(bits320 *resultx,bits320 *resultz,bits256 secret,const bits320 q);
30 bits320 crecip(const bits320 z);
31 bits256 curve25519(bits256 mysecret,bits256 basepoint);
33 // Sum two numbers: output += in
34 static inline bits320 fsum(bits320 output,bits320 in)
38 output.ulongs[i] += in.ulongs[i];
42 static inline void fdifference_backwards(uint64_t *out,const uint64_t *in)
44 static const uint64_t two54m152 = (((uint64_t)1) << 54) - 152; // 152 is 19 << 3
45 static const uint64_t two54m8 = (((uint64_t)1) << 54) - 8;
47 out[0] = in[0] + two54m152 - out[0];
49 out[i] = in[i] + two54m8 - out[i];
52 void store_limb(uint8_t *out,uint64_t in)
55 for (i=0; i<8; i++,in>>=8)
59 static inline uint64_t load_limb(uint8_t *in)
63 (((uint64_t)in[1]) << 8) |
64 (((uint64_t)in[2]) << 16) |
65 (((uint64_t)in[3]) << 24) |
66 (((uint64_t)in[4]) << 32) |
67 (((uint64_t)in[5]) << 40) |
68 (((uint64_t)in[6]) << 48) |
69 (((uint64_t)in[7]) << 56);
72 // Take a little-endian, 32-byte number and expand it into polynomial form
73 bits320 fexpand(bits256 basepoint)
76 out.ulongs[0] = load_limb(basepoint.bytes) & 0x7ffffffffffffLL;
77 out.ulongs[1] = (load_limb(basepoint.bytes+6) >> 3) & 0x7ffffffffffffLL;
78 out.ulongs[2] = (load_limb(basepoint.bytes+12) >> 6) & 0x7ffffffffffffLL;
79 out.ulongs[3] = (load_limb(basepoint.bytes+19) >> 1) & 0x7ffffffffffffLL;
80 out.ulongs[4] = (load_limb(basepoint.bytes+24) >> 12) & 0x7ffffffffffffLL;
84 #if defined(__amd64) || defined(__aarch64__)
85 // donna: special gcc mode for 128-bit integers. It's implemented on 64-bit platforms only as far as I know.
86 typedef unsigned uint128_t __attribute__((mode(TI)));
88 // Multiply a number by a scalar: output = in * scalar
89 static inline bits320 fscalar_product(const bits320 in,const uint64_t scalar)
91 int32_t i; uint128_t a = 0; bits320 output;
92 a = ((uint128_t)in.ulongs[0]) * scalar;
93 output.ulongs[0] = ((uint64_t)a) & 0x7ffffffffffffLL;
96 a = ((uint128_t)in.ulongs[i]) * scalar + ((uint64_t) (a >> 51));
97 output.ulongs[i] = ((uint64_t)a) & 0x7ffffffffffffLL;
99 output.ulongs[0] += (a >> 51) * 19;
103 // Multiply two numbers: output = in2 * in
104 // output must be distinct to both inputs. The inputs are reduced coefficient form, the output is not.
105 // Assumes that in[i] < 2**55 and likewise for in2. On return, output[i] < 2**52
106 bits320 fmul(const bits320 in2,const bits320 in)
108 uint128_t t[5]; uint64_t r0,r1,r2,r3,r4,s0,s1,s2,s3,s4,c; bits320 out;
109 r0 = in.ulongs[0], r1 = in.ulongs[1], r2 = in.ulongs[2], r3 = in.ulongs[3], r4 = in.ulongs[4];
110 s0 = in2.ulongs[0], s1 = in2.ulongs[1], s2 = in2.ulongs[2], s3 = in2.ulongs[3], s4 = in2.ulongs[4];
111 t[0] = ((uint128_t) r0) * s0;
112 t[1] = ((uint128_t) r0) * s1 + ((uint128_t) r1) * s0;
113 t[2] = ((uint128_t) r0) * s2 + ((uint128_t) r2) * s0 + ((uint128_t) r1) * s1;
114 t[3] = ((uint128_t) r0) * s3 + ((uint128_t) r3) * s0 + ((uint128_t) r1) * s2 + ((uint128_t) r2) * s1;
115 t[4] = ((uint128_t) r0) * s4 + ((uint128_t) r4) * s0 + ((uint128_t) r3) * s1 + ((uint128_t) r1) * s3 + ((uint128_t) r2) * s2;
116 r4 *= 19, r1 *= 19, r2 *= 19, r3 *= 19;
117 t[0] += ((uint128_t) r4) * s1 + ((uint128_t) r1) * s4 + ((uint128_t) r2) * s3 + ((uint128_t) r3) * s2;
118 t[1] += ((uint128_t) r4) * s2 + ((uint128_t) r2) * s4 + ((uint128_t) r3) * s3;
119 t[2] += ((uint128_t) r4) * s3 + ((uint128_t) r3) * s4;
120 t[3] += ((uint128_t) r4) * s4;
121 r0 = (uint64_t)t[0] & 0x7ffffffffffffLL; c = (uint64_t)(t[0] >> 51);
122 t[1] += c; r1 = (uint64_t)t[1] & 0x7ffffffffffffLL; c = (uint64_t)(t[1] >> 51);
123 t[2] += c; r2 = (uint64_t)t[2] & 0x7ffffffffffffLL; c = (uint64_t)(t[2] >> 51);
124 t[3] += c; r3 = (uint64_t)t[3] & 0x7ffffffffffffLL; c = (uint64_t)(t[3] >> 51);
125 t[4] += c; r4 = (uint64_t)t[4] & 0x7ffffffffffffLL; c = (uint64_t)(t[4] >> 51);
126 r0 += c * 19; c = r0 >> 51; r0 = r0 & 0x7ffffffffffffLL;
127 r1 += c; c = r1 >> 51; r1 = r1 & 0x7ffffffffffffLL;
129 out.ulongs[0] = r0, out.ulongs[1] = r1, out.ulongs[2] = r2, out.ulongs[3] = r3, out.ulongs[4] = r4;
133 bits320 fsquare_times(const bits320 in,uint64_t count)
135 uint128_t t[5]; uint64_t r0,r1,r2,r3,r4,c,d0,d1,d2,d4,d419; bits320 out;
136 r0 = in.ulongs[0], r1 = in.ulongs[1], r2 = in.ulongs[2], r3 = in.ulongs[3], r4 = in.ulongs[4];
144 t[0] = ((uint128_t) r0) * r0 + ((uint128_t) d4) * r1 + (((uint128_t) d2) * (r3 ));
145 t[1] = ((uint128_t) d0) * r1 + ((uint128_t) d4) * r2 + (((uint128_t) r3) * (r3 * 19));
146 t[2] = ((uint128_t) d0) * r2 + ((uint128_t) r1) * r1 + (((uint128_t) d4) * (r3 ));
147 t[3] = ((uint128_t) d0) * r3 + ((uint128_t) d1) * r2 + (((uint128_t) r4) * (d419 ));
148 t[4] = ((uint128_t) d0) * r4 + ((uint128_t) d1) * r3 + (((uint128_t) r2) * (r2 ));
150 r0 = (uint64_t)t[0] & 0x7ffffffffffffLL; c = (uint64_t)(t[0] >> 51);
151 t[1] += c; r1 = (uint64_t)t[1] & 0x7ffffffffffffLL; c = (uint64_t)(t[1] >> 51);
152 t[2] += c; r2 = (uint64_t)t[2] & 0x7ffffffffffffLL; c = (uint64_t)(t[2] >> 51);
153 t[3] += c; r3 = (uint64_t)t[3] & 0x7ffffffffffffL; c = (uint64_t)(t[3] >> 51);
154 t[4] += c; r4 = (uint64_t)t[4] & 0x7ffffffffffffLL; c = (uint64_t)(t[4] >> 51);
155 r0 += c * 19; c = r0 >> 51; r0 = r0 & 0x7ffffffffffffLL;
156 r1 += c; c = r1 >> 51; r1 = r1 & 0x7ffffffffffffLL;
159 out.ulongs[0] = r0, out.ulongs[1] = r1, out.ulongs[2] = r2, out.ulongs[3] = r3, out.ulongs[4] = r4;
163 static inline void fcontract_iter(uint128_t t[5],int32_t flag)
165 int32_t i; uint64_t mask = 0x7ffffffffffffLL;
167 t[i+1] += t[i] >> 51, t[i] &= mask;
169 t[0] += 19 * (t[4] >> 51); t[4] &= mask;
172 // donna: Take a fully reduced polynomial form number and contract it into a little-endian, 32-byte array
173 bits256 fcontract(const bits320 input)
175 uint128_t t[5]; int32_t i; bits256 out;
177 t[i] = input.ulongs[i];
178 fcontract_iter(t,1), fcontract_iter(t,1);
179 // donna: now t is between 0 and 2^255-1, properly carried.
180 // donna: case 1: between 0 and 2^255-20. case 2: between 2^255-19 and 2^255-1.
181 t[0] += 19, fcontract_iter(t,1);
182 // now between 19 and 2^255-1 in both cases, and offset by 19.
183 t[0] += 0x8000000000000 - 19;
185 t[i] += 0x8000000000000 - 1;
186 // now between 2^255 and 2^256-20, and offset by 2^255.
188 store_limb(out.bytes,t[0] | (t[1] << 51));
189 store_limb(out.bytes+8,(t[1] >> 13) | (t[2] << 38));
190 store_limb(out.bytes+16,(t[2] >> 26) | (t[3] << 25));
191 store_limb(out.bytes+24,(t[3] >> 39) | (t[4] << 12));
195 bits256 curve25519(bits256 mysecret,bits256 basepoint)
198 mysecret.bytes[0] &= 0xf8, mysecret.bytes[31] &= 0x7f, mysecret.bytes[31] |= 0x40;
199 bp = fexpand(basepoint);
200 cmult(&x,&z,mysecret,bp);
201 return(fcontract(fmul(x,crecip(z))));
205 // from curve25519-donna.c
208 typedef int64_t limb;
210 /* Multiply a number by a scalar: output = in * scalar */
211 static void fscalar_product32(limb *output, const limb *in, const limb scalar) {
213 for (i = 0; i < 10; ++i) {
214 output[i] = in[i] * scalar;
218 /* Multiply two numbers: output = in2 * in
220 * output must be distinct to both inputs. The inputs are reduced coefficient
221 * form, the output is not.
223 * output[x] <= 14 * the largest product of the input limbs.
224 static void fproduct(limb *output, const limb *in2, const limb *in) {
225 output[0] = ((limb) ((s32) in2[0])) * ((s32) in[0]);
226 output[1] = ((limb) ((s32) in2[0])) * ((s32) in[1]) +
227 ((limb) ((s32) in2[1])) * ((s32) in[0]);
228 output[2] = 2 * ((limb) ((s32) in2[1])) * ((s32) in[1]) +
229 ((limb) ((s32) in2[0])) * ((s32) in[2]) +
230 ((limb) ((s32) in2[2])) * ((s32) in[0]);
231 output[3] = ((limb) ((s32) in2[1])) * ((s32) in[2]) +
232 ((limb) ((s32) in2[2])) * ((s32) in[1]) +
233 ((limb) ((s32) in2[0])) * ((s32) in[3]) +
234 ((limb) ((s32) in2[3])) * ((s32) in[0]);
235 output[4] = ((limb) ((s32) in2[2])) * ((s32) in[2]) +
236 2 * (((limb) ((s32) in2[1])) * ((s32) in[3]) +
237 ((limb) ((s32) in2[3])) * ((s32) in[1])) +
238 ((limb) ((s32) in2[0])) * ((s32) in[4]) +
239 ((limb) ((s32) in2[4])) * ((s32) in[0]);
240 output[5] = ((limb) ((s32) in2[2])) * ((s32) in[3]) +
241 ((limb) ((s32) in2[3])) * ((s32) in[2]) +
242 ((limb) ((s32) in2[1])) * ((s32) in[4]) +
243 ((limb) ((s32) in2[4])) * ((s32) in[1]) +
244 ((limb) ((s32) in2[0])) * ((s32) in[5]) +
245 ((limb) ((s32) in2[5])) * ((s32) in[0]);
246 output[6] = 2 * (((limb) ((s32) in2[3])) * ((s32) in[3]) +
247 ((limb) ((s32) in2[1])) * ((s32) in[5]) +
248 ((limb) ((s32) in2[5])) * ((s32) in[1])) +
249 ((limb) ((s32) in2[2])) * ((s32) in[4]) +
250 ((limb) ((s32) in2[4])) * ((s32) in[2]) +
251 ((limb) ((s32) in2[0])) * ((s32) in[6]) +
252 ((limb) ((s32) in2[6])) * ((s32) in[0]);
253 output[7] = ((limb) ((s32) in2[3])) * ((s32) in[4]) +
254 ((limb) ((s32) in2[4])) * ((s32) in[3]) +
255 ((limb) ((s32) in2[2])) * ((s32) in[5]) +
256 ((limb) ((s32) in2[5])) * ((s32) in[2]) +
257 ((limb) ((s32) in2[1])) * ((s32) in[6]) +
258 ((limb) ((s32) in2[6])) * ((s32) in[1]) +
259 ((limb) ((s32) in2[0])) * ((s32) in[7]) +
260 ((limb) ((s32) in2[7])) * ((s32) in[0]);
261 output[8] = ((limb) ((s32) in2[4])) * ((s32) in[4]) +
262 2 * (((limb) ((s32) in2[3])) * ((s32) in[5]) +
263 ((limb) ((s32) in2[5])) * ((s32) in[3]) +
264 ((limb) ((s32) in2[1])) * ((s32) in[7]) +
265 ((limb) ((s32) in2[7])) * ((s32) in[1])) +
266 ((limb) ((s32) in2[2])) * ((s32) in[6]) +
267 ((limb) ((s32) in2[6])) * ((s32) in[2]) +
268 ((limb) ((s32) in2[0])) * ((s32) in[8]) +
269 ((limb) ((s32) in2[8])) * ((s32) in[0]);
270 output[9] = ((limb) ((s32) in2[4])) * ((s32) in[5]) +
271 ((limb) ((s32) in2[5])) * ((s32) in[4]) +
272 ((limb) ((s32) in2[3])) * ((s32) in[6]) +
273 ((limb) ((s32) in2[6])) * ((s32) in[3]) +
274 ((limb) ((s32) in2[2])) * ((s32) in[7]) +
275 ((limb) ((s32) in2[7])) * ((s32) in[2]) +
276 ((limb) ((s32) in2[1])) * ((s32) in[8]) +
277 ((limb) ((s32) in2[8])) * ((s32) in[1]) +
278 ((limb) ((s32) in2[0])) * ((s32) in[9]) +
279 ((limb) ((s32) in2[9])) * ((s32) in[0]);
280 output[10] = 2 * (((limb) ((s32) in2[5])) * ((s32) in[5]) +
281 ((limb) ((s32) in2[3])) * ((s32) in[7]) +
282 ((limb) ((s32) in2[7])) * ((s32) in[3]) +
283 ((limb) ((s32) in2[1])) * ((s32) in[9]) +
284 ((limb) ((s32) in2[9])) * ((s32) in[1])) +
285 ((limb) ((s32) in2[4])) * ((s32) in[6]) +
286 ((limb) ((s32) in2[6])) * ((s32) in[4]) +
287 ((limb) ((s32) in2[2])) * ((s32) in[8]) +
288 ((limb) ((s32) in2[8])) * ((s32) in[2]);
289 output[11] = ((limb) ((s32) in2[5])) * ((s32) in[6]) +
290 ((limb) ((s32) in2[6])) * ((s32) in[5]) +
291 ((limb) ((s32) in2[4])) * ((s32) in[7]) +
292 ((limb) ((s32) in2[7])) * ((s32) in[4]) +
293 ((limb) ((s32) in2[3])) * ((s32) in[8]) +
294 ((limb) ((s32) in2[8])) * ((s32) in[3]) +
295 ((limb) ((s32) in2[2])) * ((s32) in[9]) +
296 ((limb) ((s32) in2[9])) * ((s32) in[2]);
297 output[12] = ((limb) ((s32) in2[6])) * ((s32) in[6]) +
298 2 * (((limb) ((s32) in2[5])) * ((s32) in[7]) +
299 ((limb) ((s32) in2[7])) * ((s32) in[5]) +
300 ((limb) ((s32) in2[3])) * ((s32) in[9]) +
301 ((limb) ((s32) in2[9])) * ((s32) in[3])) +
302 ((limb) ((s32) in2[4])) * ((s32) in[8]) +
303 ((limb) ((s32) in2[8])) * ((s32) in[4]);
304 output[13] = ((limb) ((s32) in2[6])) * ((s32) in[7]) +
305 ((limb) ((s32) in2[7])) * ((s32) in[6]) +
306 ((limb) ((s32) in2[5])) * ((s32) in[8]) +
307 ((limb) ((s32) in2[8])) * ((s32) in[5]) +
308 ((limb) ((s32) in2[4])) * ((s32) in[9]) +
309 ((limb) ((s32) in2[9])) * ((s32) in[4]);
310 output[14] = 2 * (((limb) ((s32) in2[7])) * ((s32) in[7]) +
311 ((limb) ((s32) in2[5])) * ((s32) in[9]) +
312 ((limb) ((s32) in2[9])) * ((s32) in[5])) +
313 ((limb) ((s32) in2[6])) * ((s32) in[8]) +
314 ((limb) ((s32) in2[8])) * ((s32) in[6]);
315 output[15] = ((limb) ((s32) in2[7])) * ((s32) in[8]) +
316 ((limb) ((s32) in2[8])) * ((s32) in[7]) +
317 ((limb) ((s32) in2[6])) * ((s32) in[9]) +
318 ((limb) ((s32) in2[9])) * ((s32) in[6]);
319 output[16] = ((limb) ((s32) in2[8])) * ((s32) in[8]) +
320 2 * (((limb) ((s32) in2[7])) * ((s32) in[9]) +
321 ((limb) ((s32) in2[9])) * ((s32) in[7]));
322 output[17] = ((limb) ((s32) in2[8])) * ((s32) in[9]) +
323 ((limb) ((s32) in2[9])) * ((s32) in[8]);
324 output[18] = 2 * ((limb) ((s32) in2[9])) * ((s32) in[9]);
327 /* Reduce a long form to a short form by taking the input mod 2^255 - 19.
329 * On entry: |output[i]| < 14*2^54
330 * On exit: |output[0..8]| < 280*2^54 */
331 static void freduce_degree(limb *output) {
332 /* Each of these shifts and adds ends up multiplying the value by 19.
334 * For output[0..8], the absolute entry value is < 14*2^54 and we add, at
335 * most, 19*14*2^54 thus, on exit, |output[0..8]| < 280*2^54. */
336 output[8] += output[18] << 4;
337 output[8] += output[18] << 1;
338 output[8] += output[18];
339 output[7] += output[17] << 4;
340 output[7] += output[17] << 1;
341 output[7] += output[17];
342 output[6] += output[16] << 4;
343 output[6] += output[16] << 1;
344 output[6] += output[16];
345 output[5] += output[15] << 4;
346 output[5] += output[15] << 1;
347 output[5] += output[15];
348 output[4] += output[14] << 4;
349 output[4] += output[14] << 1;
350 output[4] += output[14];
351 output[3] += output[13] << 4;
352 output[3] += output[13] << 1;
353 output[3] += output[13];
354 output[2] += output[12] << 4;
355 output[2] += output[12] << 1;
356 output[2] += output[12];
357 output[1] += output[11] << 4;
358 output[1] += output[11] << 1;
359 output[1] += output[11];
360 output[0] += output[10] << 4;
361 output[0] += output[10] << 1;
362 output[0] += output[10];
366 #error "This code only works on a two's complement system"
369 /* return v / 2^26, using only shifts and adds.
371 * On entry: v can take any value. */
373 div_by_2_26(const limb v)
375 /* High word of v; no shift needed. */
376 const uint32_t highword = (uint32_t) (((uint64_t) v) >> 32);
377 /* Set to all 1s if v was negative; else set to 0s. */
378 const int32_t sign = ((int32_t) highword) >> 31;
379 /* Set to 0x3ffffff if v was negative; else set to 0. */
380 const int32_t roundoff = ((uint32_t) sign) >> 6;
381 /* Should return v / (1<<26) */
382 return (v + roundoff) >> 26;
385 /* return v / (2^25), using only shifts and adds.
387 * On entry: v can take any value. */
389 div_by_2_25(const limb v)
391 /* High word of v; no shift needed*/
392 const uint32_t highword = (uint32_t) (((uint64_t) v) >> 32);
393 /* Set to all 1s if v was negative; else set to 0s. */
394 const int32_t sign = ((int32_t) highword) >> 31;
395 /* Set to 0x1ffffff if v was negative; else set to 0. */
396 const int32_t roundoff = ((uint32_t) sign) >> 7;
397 /* Should return v / (1<<25) */
398 return (v + roundoff) >> 25;
401 /* Reduce all coefficients of the short form input so that |x| < 2^26.
403 * On entry: |output[i]| < 280*2^54 */
404 static void freduce_coefficients(limb *output) {
409 for (i = 0; i < 10; i += 2) {
410 limb over = div_by_2_26(output[i]);
411 /* The entry condition (that |output[i]| < 280*2^54) means that over is, at
412 * most, 280*2^28 in the first iteration of this loop. This is added to the
413 * next limb and we can approximate the resulting bound of that limb by
415 output[i] -= over << 26;
418 /* For the first iteration, |output[i+1]| < 281*2^54, thus |over| <
419 * 281*2^29. When this is added to the next limb, the resulting bound can
420 * be approximated as 281*2^54.
422 * For subsequent iterations of the loop, 281*2^54 remains a conservative
423 * bound and no overflow occurs. */
424 over = div_by_2_25(output[i+1]);
425 output[i+1] -= over << 25;
428 /* Now |output[10]| < 281*2^29 and all other coefficients are reduced. */
429 output[0] += output[10] << 4;
430 output[0] += output[10] << 1;
431 output[0] += output[10];
435 /* Now output[1..9] are reduced, and |output[0]| < 2^26 + 19*281*2^29
436 * So |over| will be no more than 2^16. */
438 limb over = div_by_2_26(output[0]);
439 output[0] -= over << 26;
443 /* Now output[0,2..9] are reduced, and |output[1]| < 2^25 + 2^16 < 2^26. The
444 * bound on |output[1]| is sufficient to meet our needs. */
447 /* A helpful wrapper around fproduct: output = in * in2.
449 * On entry: |in[i]| < 2^27 and |in2[i]| < 2^27.
451 * output must be distinct to both inputs. The output is reduced degree
452 * (indeed, one need only provide storage for 10 limbs) and |output[i]| < 2^26.
453 static void fmul32(limb *output, const limb *in, const limb *in2)
456 fproduct(t, in, in2);
459 freduce_coefficients(t);
461 memcpy(output, t, sizeof(limb) * 10);
464 /* Square a number: output = in**2
466 * output must be distinct from the input. The inputs are reduced coefficient
467 * form, the output is not.
469 * output[x] <= 14 * the largest product of the input limbs. */
470 static void fsquare_inner(limb *output, const limb *in) {
471 output[0] = ((limb) ((s32) in[0])) * ((s32) in[0]);
472 output[1] = 2 * ((limb) ((s32) in[0])) * ((s32) in[1]);
473 output[2] = 2 * (((limb) ((s32) in[1])) * ((s32) in[1]) +
474 ((limb) ((s32) in[0])) * ((s32) in[2]));
475 output[3] = 2 * (((limb) ((s32) in[1])) * ((s32) in[2]) +
476 ((limb) ((s32) in[0])) * ((s32) in[3]));
477 output[4] = ((limb) ((s32) in[2])) * ((s32) in[2]) +
478 4 * ((limb) ((s32) in[1])) * ((s32) in[3]) +
479 2 * ((limb) ((s32) in[0])) * ((s32) in[4]);
480 output[5] = 2 * (((limb) ((s32) in[2])) * ((s32) in[3]) +
481 ((limb) ((s32) in[1])) * ((s32) in[4]) +
482 ((limb) ((s32) in[0])) * ((s32) in[5]));
483 output[6] = 2 * (((limb) ((s32) in[3])) * ((s32) in[3]) +
484 ((limb) ((s32) in[2])) * ((s32) in[4]) +
485 ((limb) ((s32) in[0])) * ((s32) in[6]) +
486 2 * ((limb) ((s32) in[1])) * ((s32) in[5]));
487 output[7] = 2 * (((limb) ((s32) in[3])) * ((s32) in[4]) +
488 ((limb) ((s32) in[2])) * ((s32) in[5]) +
489 ((limb) ((s32) in[1])) * ((s32) in[6]) +
490 ((limb) ((s32) in[0])) * ((s32) in[7]));
491 output[8] = ((limb) ((s32) in[4])) * ((s32) in[4]) +
492 2 * (((limb) ((s32) in[2])) * ((s32) in[6]) +
493 ((limb) ((s32) in[0])) * ((s32) in[8]) +
494 2 * (((limb) ((s32) in[1])) * ((s32) in[7]) +
495 ((limb) ((s32) in[3])) * ((s32) in[5])));
496 output[9] = 2 * (((limb) ((s32) in[4])) * ((s32) in[5]) +
497 ((limb) ((s32) in[3])) * ((s32) in[6]) +
498 ((limb) ((s32) in[2])) * ((s32) in[7]) +
499 ((limb) ((s32) in[1])) * ((s32) in[8]) +
500 ((limb) ((s32) in[0])) * ((s32) in[9]));
501 output[10] = 2 * (((limb) ((s32) in[5])) * ((s32) in[5]) +
502 ((limb) ((s32) in[4])) * ((s32) in[6]) +
503 ((limb) ((s32) in[2])) * ((s32) in[8]) +
504 2 * (((limb) ((s32) in[3])) * ((s32) in[7]) +
505 ((limb) ((s32) in[1])) * ((s32) in[9])));
506 output[11] = 2 * (((limb) ((s32) in[5])) * ((s32) in[6]) +
507 ((limb) ((s32) in[4])) * ((s32) in[7]) +
508 ((limb) ((s32) in[3])) * ((s32) in[8]) +
509 ((limb) ((s32) in[2])) * ((s32) in[9]));
510 output[12] = ((limb) ((s32) in[6])) * ((s32) in[6]) +
511 2 * (((limb) ((s32) in[4])) * ((s32) in[8]) +
512 2 * (((limb) ((s32) in[5])) * ((s32) in[7]) +
513 ((limb) ((s32) in[3])) * ((s32) in[9])));
514 output[13] = 2 * (((limb) ((s32) in[6])) * ((s32) in[7]) +
515 ((limb) ((s32) in[5])) * ((s32) in[8]) +
516 ((limb) ((s32) in[4])) * ((s32) in[9]));
517 output[14] = 2 * (((limb) ((s32) in[7])) * ((s32) in[7]) +
518 ((limb) ((s32) in[6])) * ((s32) in[8]) +
519 2 * ((limb) ((s32) in[5])) * ((s32) in[9]));
520 output[15] = 2 * (((limb) ((s32) in[7])) * ((s32) in[8]) +
521 ((limb) ((s32) in[6])) * ((s32) in[9]));
522 output[16] = ((limb) ((s32) in[8])) * ((s32) in[8]) +
523 4 * ((limb) ((s32) in[7])) * ((s32) in[9]);
524 output[17] = 2 * ((limb) ((s32) in[8])) * ((s32) in[9]);
525 output[18] = 2 * ((limb) ((s32) in[9])) * ((s32) in[9]);
528 /* fsquare sets output = in^2.
530 * On entry: The |in| argument is in reduced coefficients form and |in[i]| <
533 * On exit: The |output| argument is in reduced coefficients form (indeed, one
534 * need only provide storage for 10 limbs) and |out[i]| < 2^26. */
536 fsquare32(limb *output, const limb *in) {
538 fsquare_inner(t, in);
539 /* |t[i]| < 14*2^54 because the largest product of two limbs will be <
540 * 2^(27+27) and fsquare_inner adds together, at most, 14 of those
543 freduce_coefficients(t);
545 memcpy(output, t, sizeof(limb) * 10);
548 #if (-32 >> 1) != -16
549 #error "This code only works when >> does sign-extension on negative numbers"
552 /* s32_eq returns 0xffffffff iff a == b and zero otherwise. */
553 static s32 s32_eq(s32 a, s32 b) {
563 /* s32_gte returns 0xffffffff if a >= b and zero otherwise, where a and b are
564 * both non-negative. */
565 static s32 s32_gte(s32 a, s32 b) {
567 /* a >= 0 iff a >= b. */
571 /* Take a fully reduced polynomial form number and contract it into a
572 * little-endian, 32-byte array.
574 * On entry: |input_limbs[i]| < 2^26 */
575 static void fcontract32(u8 *output, limb *input_limbs)
582 /* |input_limbs[i]| < 2^26, so it's valid to convert to an s32. */
583 for (i = 0; i < 10; i++)
584 input[i] = (s32)input_limbs[i];
586 for (j = 0; j < 2; ++j) {
587 for (i = 0; i < 9; ++i) {
589 /* This calculation is a time-invariant way to make input[i]
590 * non-negative by borrowing from the next-larger limb. */
591 const s32 mask = input[i] >> 31;
592 const s32 carry = -((input[i] & mask) >> 25);
593 input[i] = input[i] + (carry << 25);
594 input[i+1] = input[i+1] - carry;
596 const s32 mask = input[i] >> 31;
597 const s32 carry = -((input[i] & mask) >> 26);
598 input[i] = input[i] + (carry << 26);
599 input[i+1] = input[i+1] - carry;
603 /* There's no greater limb for input[9] to borrow from, but we can multiply
604 * by 19 and borrow from input[0], which is valid mod 2^255-19. */
606 const s32 mask = input[9] >> 31;
607 const s32 carry = -((input[9] & mask) >> 25);
608 input[9] = input[9] + (carry << 25);
609 input[0] = input[0] - (carry * 19);
612 /* After the first iteration, input[1..9] are non-negative and fit within
613 * 25 or 26 bits, depending on position. However, input[0] may be
617 /* The first borrow-propagation pass above ended with every limb
618 except (possibly) input[0] non-negative.
620 If input[0] was negative after the first pass, then it was because of a
621 carry from input[9]. On entry, input[9] < 2^26 so the carry was, at most,
622 one, since (2**26-1) >> 25 = 1. Thus input[0] >= -19.
624 In the second pass, each limb is decreased by at most one. Thus the second
625 borrow-propagation pass could only have wrapped around to decrease
626 input[0] again if the first pass left input[0] negative *and* input[1]
627 through input[9] were all zero. In that case, input[1] is now 2^25 - 1,
628 and this last borrow-propagation step will leave input[1] non-negative. */
630 const s32 mask = input[0] >> 31;
631 const s32 carry = -((input[0] & mask) >> 26);
632 input[0] = input[0] + (carry << 26);
633 input[1] = input[1] - carry;
636 /* All input[i] are now non-negative. However, there might be values between
637 * 2^25 and 2^26 in a limb which is, nominally, 25 bits wide. */
638 for (j = 0; j < 2; j++) {
639 for (i = 0; i < 9; i++) {
641 const s32 carry = input[i] >> 25;
642 input[i] &= 0x1ffffff;
645 const s32 carry = input[i] >> 26;
646 input[i] &= 0x3ffffff;
652 const s32 carry = input[9] >> 25;
653 input[9] &= 0x1ffffff;
654 input[0] += 19*carry;
658 /* If the first carry-chain pass, just above, ended up with a carry from
659 * input[9], and that caused input[0] to be out-of-bounds, then input[0] was
660 * < 2^26 + 2*19, because the carry was, at most, two.
662 * If the second pass carried from input[9] again then input[0] is < 2*19 and
663 * the input[9] -> input[0] carry didn't push input[0] out of bounds. */
665 /* It still remains the case that input might be between 2^255-19 and 2^255.
666 * In this case, input[1..9] must take their maximum value and input[0] must
667 * be >= (2^255-19) & 0x3ffffff, which is 0x3ffffed. */
668 mask = s32_gte(input[0], 0x3ffffed);
669 for (i = 1; i < 10; i++) {
671 mask &= s32_eq(input[i], 0x1ffffff);
673 mask &= s32_eq(input[i], 0x3ffffff);
677 /* mask is either 0xffffffff (if input >= 2^255-19) and zero otherwise. Thus
678 * this conditionally subtracts 2^255-19. */
679 input[0] -= mask & 0x3ffffed;
681 for (i = 1; i < 10; i++) {
683 input[i] -= mask & 0x1ffffff;
685 input[i] -= mask & 0x3ffffff;
698 output[s+0] |= input[i] & 0xff; \
699 output[s+1] = (input[i] >> 8) & 0xff; \
700 output[s+2] = (input[i] >> 16) & 0xff; \
701 output[s+3] = (input[i] >> 24) & 0xff;
717 bits320 bits320_limbs(limb limbs[10])
719 bits320 output; int32_t i;
721 output.uints[i] = limbs[i];
725 static inline bits320 fscalar_product(const bits320 in,const uint64_t scalar)
727 limb output[10],input[10]; int32_t i;
729 input[i] = in.uints[i];
730 fscalar_product32(output,input,scalar);
731 return(bits320_limbs(output));
734 static inline bits320 fsquare_times(const bits320 in,uint64_t count)
736 limb output[10],input[10]; int32_t i;
738 input[i] = in.uints[i];
739 for (i=0; i<count; i++)
741 fsquare32(output,input);
742 memcpy(input,output,sizeof(input));
744 return(bits320_limbs(output));
747 bits256 fmul_donna(bits256 a,bits256 b);
748 bits256 crecip_donna(bits256 a);
750 bits256 fcontract(const bits320 in)
752 bits256 contracted; limb input[10]; int32_t i;
754 input[i] = in.uints[i];
755 fcontract32(contracted.bytes,input);
759 bits320 fmul(const bits320 in,const bits320 in2)
761 /*limb output[11],input[10],input2[10]; int32_t i;
764 input[i] = in.uints[i];
765 input2[i] = in2.uints[i];
767 fmul32(output,input,input2);
768 return(bits320_limbs(output));*/
770 mulval = fmul_donna(fcontract(in),fcontract(in2));
771 return(fexpand(mulval));
774 bits256 curve25519(bits256 mysecret,bits256 theirpublic)
776 int32_t curve25519_donna(uint8_t *mypublic,const uint8_t *secret,const uint8_t *basepoint);
778 mysecret.bytes[0] &= 0xf8, mysecret.bytes[31] &= 0x7f, mysecret.bytes[31] |= 0x40;
779 curve25519_donna(&rawkey.bytes[0],&mysecret.bytes[0],&theirpublic.bytes[0]);
786 // Input: Q, Q', Q-Q' -> Output: 2Q, Q+Q'
787 // x2 z2: long form && x3 z3: long form
788 // x z: short form, destroyed && xprime zprime: short form, destroyed
789 // qmqp: short form, preserved
791 fmonty(bits320 *x2, bits320 *z2, // output 2Q
792 bits320 *x3, bits320 *z3, // output Q + Q'
793 bits320 *x, bits320 *z, // input Q
794 bits320 *xprime, bits320 *zprime, // input Q'
795 const bits320 qmqp) // input Q - Q'
797 bits320 origx,origxprime,zzz,xx,zz,xxprime,zzprime;
799 *x = fsum(*x,*z), fdifference_backwards(z->ulongs,origx.ulongs); // does x - z
800 origxprime = *xprime;
801 *xprime = fsum(*xprime,*zprime), fdifference_backwards(zprime->ulongs,origxprime.ulongs);
802 xxprime = fmul(*xprime,*z), zzprime = fmul(*x,*zprime);
803 origxprime = xxprime;
804 xxprime = fsum(xxprime,zzprime), fdifference_backwards(zzprime.ulongs,origxprime.ulongs);
805 *x3 = fsquare_times(xxprime,1), *z3 = fmul(fsquare_times(zzprime,1),qmqp);
806 xx = fsquare_times(*x,1), zz = fsquare_times(*z,1);
808 fdifference_backwards(zz.ulongs,xx.ulongs); // does zz = xx - zz
809 zzz = fscalar_product(zz,121665);
810 *z2 = fmul(zz,fsum(zzz,xx));
813 // -----------------------------------------------------------------------------
814 // Maybe swap the contents of two limb arrays (@a and @b), each @len elements
815 // long. Perform the swap iff @swap is non-zero.
816 // This function performs the swap without leaking any side-channel information.
817 // -----------------------------------------------------------------------------
818 static inline void swap_conditional(bits320 *a,bits320 *b,uint64_t iswap)
820 int32_t i; const uint64_t swap = -iswap;
823 const uint64_t x = swap & (a->ulongs[i] ^ b->ulongs[i]);
824 a->ulongs[i] ^= x, b->ulongs[i] ^= x;
828 // Calculates nQ where Q is the x-coordinate of a point on the curve
829 // resultx/resultz: the x coordinate of the resulting curve point (short form)
830 // n: a little endian, 32-byte number
831 // q: a point of the curve (short form)
832 void cmult(bits320 *resultx,bits320 *resultz,bits256 secret,const bits320 q)
834 int32_t i,j; bits320 a,b,c,d,e,f,g,h,*t;
835 bits320 Zero320bits,One320bits, *nqpqx = &a,*nqpqz = &b,*nqx = &c,*nqz = &d,*nqpqx2 = &e,*nqpqz2 = &f,*nqx2 = &g,*nqz2 = &h;
836 memset(&Zero320bits,0,sizeof(Zero320bits));
837 memset(&One320bits,0,sizeof(One320bits)), One320bits.ulongs[0] = 1;
838 a = d = e = g = Zero320bits, b = c = f = h = One320bits;
842 uint8_t byte = secret.bytes[31 - i];
845 const uint64_t bit = byte >> 7;
846 swap_conditional(nqx,nqpqx,bit), swap_conditional(nqz,nqpqz,bit);
847 fmonty(nqx2,nqz2,nqpqx2,nqpqz2,nqx,nqz,nqpqx,nqpqz,q);
848 swap_conditional(nqx2,nqpqx2,bit), swap_conditional(nqz2,nqpqz2,bit);
849 t = nqx, nqx = nqx2, nqx2 = t;
850 t = nqz, nqz = nqz2, nqz2 = t;
851 t = nqpqx, nqpqx = nqpqx2, nqpqx2 = t;
852 t = nqpqz, nqpqz = nqpqz2, nqpqz2 = t;
856 *resultx = *nqx, *resultz = *nqz;
859 // Shamelessly copied from donna's code that copied djb's code, changed a little
860 inline bits320 crecip(const bits320 z)
863 /* 2 */ a = fsquare_times(z, 1); // a = 2
864 /* 8 */ t0 = fsquare_times(a, 2);
865 /* 9 */ b = fmul(t0, z); // b = 9
866 /* 11 */ a = fmul(b, a); // a = 11
867 /* 22 */ t0 = fsquare_times(a, 1);
868 /* 2^5 - 2^0 = 31 */ b = fmul(t0, b);
869 /* 2^10 - 2^5 */ t0 = fsquare_times(b, 5);
870 /* 2^10 - 2^0 */ b = fmul(t0, b);
871 /* 2^20 - 2^10 */ t0 = fsquare_times(b, 10);
872 /* 2^20 - 2^0 */ c = fmul(t0, b);
873 /* 2^40 - 2^20 */ t0 = fsquare_times(c, 20);
874 /* 2^40 - 2^0 */ t0 = fmul(t0, c);
875 /* 2^50 - 2^10 */ t0 = fsquare_times(t0, 10);
876 /* 2^50 - 2^0 */ b = fmul(t0, b);
877 /* 2^100 - 2^50 */ t0 = fsquare_times(b, 50);
878 /* 2^100 - 2^0 */ c = fmul(t0, b);
879 /* 2^200 - 2^100 */ t0 = fsquare_times(c, 100);
880 /* 2^200 - 2^0 */ t0 = fmul(t0, c);
881 /* 2^250 - 2^50 */ t0 = fsquare_times(t0, 50);
882 /* 2^250 - 2^0 */ t0 = fmul(t0, b);
883 /* 2^255 - 2^5 */ t0 = fsquare_times(t0, 5);
884 /* 2^255 - 21 */ return(fmul(t0, a));
888 void OS_randombytes(unsigned char *x,long xlen);
891 bits256 rand256(int32_t privkeyflag)
895 OS_randombytes(randval.bytes,sizeof(randval));
897 randombytes_buf(randval.bytes,sizeof(randval));
899 if ( privkeyflag != 0 )
900 randval.bytes[0] &= 0xf8, randval.bytes[31] &= 0x7f, randval.bytes[31] |= 0x40;
904 bits256 curve25519_basepoint9()
907 memset(&basepoint,0,sizeof(basepoint));
908 basepoint.bytes[0] = 9;
912 bits256 curve25519_keypair(bits256 *pubkeyp)
915 privkey = rand256(1);
916 *pubkeyp = curve25519(privkey,curve25519_basepoint9());
917 //printf("[%llx %llx] ",privkey.txid,(*pubkeyp).txid);
921 bits256 curve25519_shared(bits256 privkey,bits256 otherpub)
924 shared = curve25519(privkey,otherpub);
925 vcalc_sha256(0,hash.bytes,shared.bytes,sizeof(shared));
926 //printf("priv.%llx pub.%llx shared.%llx -> hash.%llx\n",privkey.txid,pubkey.txid,shared.txid,hash.txid);
927 //hash.bytes[0] &= 0xf8, hash.bytes[31] &= 0x7f, hash.bytes[31] |= 64;
931 int32_t curve25519_donna(uint8_t *mypublic,const uint8_t *secret,const uint8_t *basepoint)
934 memcpy(p.bytes,secret,sizeof(p));
935 memcpy(bp.bytes,basepoint,sizeof(bp));
936 val = curve25519(p,bp);
937 memcpy(mypublic,val.bytes,sizeof(val));
941 uint64_t conv_NXTpassword(unsigned char *mysecret,unsigned char *mypublic,uint8_t *pass,int32_t passlen)
943 static uint8_t basepoint[32] = {9};
944 uint64_t addr; uint8_t hash[32];
945 if ( pass != 0 && passlen != 0 )
946 vcalc_sha256(0,mysecret,pass,passlen);
947 mysecret[0] &= 248, mysecret[31] &= 127, mysecret[31] |= 64;
948 curve25519_donna(mypublic,mysecret,basepoint);
949 vcalc_sha256(0,hash,mypublic,32);
950 memcpy(&addr,hash,sizeof(addr));
954 uint256 komodo_kvprivkey(uint256 *pubkeyp,char *passphrase)
957 conv_NXTpassword((uint8_t *)&privkey,(uint8_t *)pubkeyp,(uint8_t *)passphrase,(int32_t)strlen(passphrase));
961 uint256 komodo_kvsig(uint8_t *buf,int32_t len,uint256 _privkey)
963 bits256 sig,hash,otherpub,checksig,pubkey,privkey; uint256 usig;
964 memcpy(&privkey,&_privkey,sizeof(privkey));
965 vcalc_sha256(0,hash.bytes,buf,len);
966 otherpub = curve25519(hash,curve25519_basepoint9());
967 pubkey = curve25519(privkey,curve25519_basepoint9());
968 sig = curve25519_shared(privkey,otherpub);
969 checksig = curve25519_shared(hash,pubkey);
970 /*int32_t i; for (i=0; i<len; i++)
971 printf("%02x",buf[i]);
974 printf("%02x",((uint8_t *)&privkey)[i]);
977 printf("%02x",((uint8_t *)&pubkey)[i]);
978 printf(" pubkey\n");*/
979 memcpy(&usig,&sig,sizeof(usig));
983 int32_t komodo_kvsigverify(uint8_t *buf,int32_t len,uint256 _pubkey,uint256 sig)
985 bits256 hash,checksig,pubkey; static uint256 zeroes;
986 memcpy(&pubkey,&_pubkey,sizeof(pubkey));
987 if ( memcmp(&pubkey,&zeroes,sizeof(pubkey)) != 0 )
989 vcalc_sha256(0,hash.bytes,buf,len);
990 checksig = curve25519_shared(hash,pubkey);
991 /*int32_t i; for (i=0; i<len; i++)
992 printf("%02x",buf[i]);
995 printf("%02x",((uint8_t *)&hash)[i]);
998 printf("%02x",((uint8_t *)&pubkey)[i]);
999 printf(" verify pubkey\n");
1000 for (i=0; i<32; i++)
1001 printf("%02x",((uint8_t *)&sig)[i]);
1003 for (i=0; i<32; i++)
1004 printf("%02x",((uint8_t *)&checksig)[i]);
1005 printf(" checksig\n");*/
1006 if ( memcmp(&checksig,&sig,sizeof(sig)) != 0 )
1008 //else printf("VALIDATED\n");