]>
Commit | Line | Data |
---|---|---|
6b5cfbb4 | 1 | /****************************************************************************** |
713c2a94 | 2 | * Copyright © 2014-2018 The SuperNET Developers. * |
6b5cfbb4 | 3 | * * |
4 | * See the AUTHORS, DEVELOPER-AGREEMENT and LICENSE files at * | |
5 | * the top-level directory of this distribution for the individual copyright * | |
6 | * holder information and the developer policies on copyright and licensing. * | |
7 | * * | |
8 | * Unless otherwise agreed in a custom licensing agreement, no part of the * | |
9 | * SuperNET software, including this file may be copied, modified, propagated * | |
10 | * or distributed except according to the terms contained in the LICENSE file * | |
11 | * * | |
12 | * Removal or modification of this copyright notice is prohibited. * | |
13 | * * | |
14 | ******************************************************************************/ | |
15 | ||
16 | #ifndef H_KOMODO25519_H | |
17 | #define H_KOMODO25519_H | |
18 | // derived from curve25519_donna | |
19 | ||
20 | #include <stdint.h> | |
21 | #include <memory.h> | |
22 | #include <string.h> | |
9d365796 | 23 | #ifdef _WIN32 |
24 | #include <sodium.h> | |
25 | #endif | |
6b5cfbb4 | 26 | bits320 fmul(const bits320 in2,const bits320 in); |
27 | bits320 fexpand(bits256 basepoint); | |
28 | bits256 fcontract(const bits320 input); | |
29 | void cmult(bits320 *resultx,bits320 *resultz,bits256 secret,const bits320 q); | |
30 | bits320 crecip(const bits320 z); | |
31 | bits256 curve25519(bits256 mysecret,bits256 basepoint); | |
32 | ||
33 | // Sum two numbers: output += in | |
34 | static inline bits320 fsum(bits320 output,bits320 in) | |
35 | { | |
36 | int32_t i; | |
37 | for (i=0; i<5; i++) | |
38 | output.ulongs[i] += in.ulongs[i]; | |
39 | return(output); | |
40 | } | |
41 | ||
42 | static inline void fdifference_backwards(uint64_t *out,const uint64_t *in) | |
43 | { | |
44 | static const uint64_t two54m152 = (((uint64_t)1) << 54) - 152; // 152 is 19 << 3 | |
45 | static const uint64_t two54m8 = (((uint64_t)1) << 54) - 8; | |
46 | int32_t i; | |
47 | out[0] = in[0] + two54m152 - out[0]; | |
48 | for (i=1; i<5; i++) | |
49 | out[i] = in[i] + two54m8 - out[i]; | |
50 | } | |
51 | ||
52 | void store_limb(uint8_t *out,uint64_t in) | |
53 | { | |
54 | int32_t i; | |
55 | for (i=0; i<8; i++,in>>=8) | |
56 | out[i] = (in & 0xff); | |
57 | } | |
58 | ||
59 | static inline uint64_t load_limb(uint8_t *in) | |
60 | { | |
61 | return | |
62 | ((uint64_t)in[0]) | | |
63 | (((uint64_t)in[1]) << 8) | | |
64 | (((uint64_t)in[2]) << 16) | | |
65 | (((uint64_t)in[3]) << 24) | | |
66 | (((uint64_t)in[4]) << 32) | | |
67 | (((uint64_t)in[5]) << 40) | | |
68 | (((uint64_t)in[6]) << 48) | | |
69 | (((uint64_t)in[7]) << 56); | |
70 | } | |
71 | ||
72 | // Take a little-endian, 32-byte number and expand it into polynomial form | |
73 | bits320 fexpand(bits256 basepoint) | |
74 | { | |
75 | bits320 out; | |
76 | out.ulongs[0] = load_limb(basepoint.bytes) & 0x7ffffffffffffLL; | |
77 | out.ulongs[1] = (load_limb(basepoint.bytes+6) >> 3) & 0x7ffffffffffffLL; | |
78 | out.ulongs[2] = (load_limb(basepoint.bytes+12) >> 6) & 0x7ffffffffffffLL; | |
79 | out.ulongs[3] = (load_limb(basepoint.bytes+19) >> 1) & 0x7ffffffffffffLL; | |
80 | out.ulongs[4] = (load_limb(basepoint.bytes+24) >> 12) & 0x7ffffffffffffLL; | |
81 | return(out); | |
82 | } | |
83 | ||
2d338362 | 84 | #if defined(__amd64) || defined(__aarch64__) |
6b5cfbb4 | 85 | // donna: special gcc mode for 128-bit integers. It's implemented on 64-bit platforms only as far as I know. |
86 | typedef unsigned uint128_t __attribute__((mode(TI))); | |
87 | ||
88 | // Multiply a number by a scalar: output = in * scalar | |
89 | static inline bits320 fscalar_product(const bits320 in,const uint64_t scalar) | |
90 | { | |
91 | int32_t i; uint128_t a = 0; bits320 output; | |
92 | a = ((uint128_t)in.ulongs[0]) * scalar; | |
93 | output.ulongs[0] = ((uint64_t)a) & 0x7ffffffffffffLL; | |
94 | for (i=1; i<5; i++) | |
95 | { | |
96 | a = ((uint128_t)in.ulongs[i]) * scalar + ((uint64_t) (a >> 51)); | |
97 | output.ulongs[i] = ((uint64_t)a) & 0x7ffffffffffffLL; | |
98 | } | |
99 | output.ulongs[0] += (a >> 51) * 19; | |
100 | return(output); | |
101 | } | |
102 | ||
103 | // Multiply two numbers: output = in2 * in | |
104 | // output must be distinct to both inputs. The inputs are reduced coefficient form, the output is not. | |
105 | // Assumes that in[i] < 2**55 and likewise for in2. On return, output[i] < 2**52 | |
106 | bits320 fmul(const bits320 in2,const bits320 in) | |
107 | { | |
108 | uint128_t t[5]; uint64_t r0,r1,r2,r3,r4,s0,s1,s2,s3,s4,c; bits320 out; | |
109 | r0 = in.ulongs[0], r1 = in.ulongs[1], r2 = in.ulongs[2], r3 = in.ulongs[3], r4 = in.ulongs[4]; | |
110 | s0 = in2.ulongs[0], s1 = in2.ulongs[1], s2 = in2.ulongs[2], s3 = in2.ulongs[3], s4 = in2.ulongs[4]; | |
111 | t[0] = ((uint128_t) r0) * s0; | |
112 | t[1] = ((uint128_t) r0) * s1 + ((uint128_t) r1) * s0; | |
113 | t[2] = ((uint128_t) r0) * s2 + ((uint128_t) r2) * s0 + ((uint128_t) r1) * s1; | |
114 | t[3] = ((uint128_t) r0) * s3 + ((uint128_t) r3) * s0 + ((uint128_t) r1) * s2 + ((uint128_t) r2) * s1; | |
115 | t[4] = ((uint128_t) r0) * s4 + ((uint128_t) r4) * s0 + ((uint128_t) r3) * s1 + ((uint128_t) r1) * s3 + ((uint128_t) r2) * s2; | |
116 | r4 *= 19, r1 *= 19, r2 *= 19, r3 *= 19; | |
117 | t[0] += ((uint128_t) r4) * s1 + ((uint128_t) r1) * s4 + ((uint128_t) r2) * s3 + ((uint128_t) r3) * s2; | |
118 | t[1] += ((uint128_t) r4) * s2 + ((uint128_t) r2) * s4 + ((uint128_t) r3) * s3; | |
119 | t[2] += ((uint128_t) r4) * s3 + ((uint128_t) r3) * s4; | |
120 | t[3] += ((uint128_t) r4) * s4; | |
121 | r0 = (uint64_t)t[0] & 0x7ffffffffffffLL; c = (uint64_t)(t[0] >> 51); | |
122 | t[1] += c; r1 = (uint64_t)t[1] & 0x7ffffffffffffLL; c = (uint64_t)(t[1] >> 51); | |
123 | t[2] += c; r2 = (uint64_t)t[2] & 0x7ffffffffffffLL; c = (uint64_t)(t[2] >> 51); | |
124 | t[3] += c; r3 = (uint64_t)t[3] & 0x7ffffffffffffLL; c = (uint64_t)(t[3] >> 51); | |
125 | t[4] += c; r4 = (uint64_t)t[4] & 0x7ffffffffffffLL; c = (uint64_t)(t[4] >> 51); | |
126 | r0 += c * 19; c = r0 >> 51; r0 = r0 & 0x7ffffffffffffLL; | |
127 | r1 += c; c = r1 >> 51; r1 = r1 & 0x7ffffffffffffLL; | |
128 | r2 += c; | |
129 | out.ulongs[0] = r0, out.ulongs[1] = r1, out.ulongs[2] = r2, out.ulongs[3] = r3, out.ulongs[4] = r4; | |
130 | return(out); | |
131 | } | |
132 | ||
133 | bits320 fsquare_times(const bits320 in,uint64_t count) | |
134 | { | |
135 | uint128_t t[5]; uint64_t r0,r1,r2,r3,r4,c,d0,d1,d2,d4,d419; bits320 out; | |
136 | r0 = in.ulongs[0], r1 = in.ulongs[1], r2 = in.ulongs[2], r3 = in.ulongs[3], r4 = in.ulongs[4]; | |
137 | do | |
138 | { | |
139 | d0 = r0 * 2; | |
140 | d1 = r1 * 2; | |
141 | d2 = r2 * 2 * 19; | |
142 | d419 = r4 * 19; | |
143 | d4 = d419 * 2; | |
144 | t[0] = ((uint128_t) r0) * r0 + ((uint128_t) d4) * r1 + (((uint128_t) d2) * (r3 )); | |
145 | t[1] = ((uint128_t) d0) * r1 + ((uint128_t) d4) * r2 + (((uint128_t) r3) * (r3 * 19)); | |
146 | t[2] = ((uint128_t) d0) * r2 + ((uint128_t) r1) * r1 + (((uint128_t) d4) * (r3 )); | |
147 | t[3] = ((uint128_t) d0) * r3 + ((uint128_t) d1) * r2 + (((uint128_t) r4) * (d419 )); | |
148 | t[4] = ((uint128_t) d0) * r4 + ((uint128_t) d1) * r3 + (((uint128_t) r2) * (r2 )); | |
9d365796 | 149 | |
6b5cfbb4 | 150 | r0 = (uint64_t)t[0] & 0x7ffffffffffffLL; c = (uint64_t)(t[0] >> 51); |
151 | t[1] += c; r1 = (uint64_t)t[1] & 0x7ffffffffffffLL; c = (uint64_t)(t[1] >> 51); | |
152 | t[2] += c; r2 = (uint64_t)t[2] & 0x7ffffffffffffLL; c = (uint64_t)(t[2] >> 51); | |
153 | t[3] += c; r3 = (uint64_t)t[3] & 0x7ffffffffffffL; c = (uint64_t)(t[3] >> 51); | |
154 | t[4] += c; r4 = (uint64_t)t[4] & 0x7ffffffffffffLL; c = (uint64_t)(t[4] >> 51); | |
155 | r0 += c * 19; c = r0 >> 51; r0 = r0 & 0x7ffffffffffffLL; | |
156 | r1 += c; c = r1 >> 51; r1 = r1 & 0x7ffffffffffffLL; | |
157 | r2 += c; | |
158 | } while( --count ); | |
159 | out.ulongs[0] = r0, out.ulongs[1] = r1, out.ulongs[2] = r2, out.ulongs[3] = r3, out.ulongs[4] = r4; | |
160 | return(out); | |
161 | } | |
162 | ||
163 | static inline void fcontract_iter(uint128_t t[5],int32_t flag) | |
164 | { | |
165 | int32_t i; uint64_t mask = 0x7ffffffffffffLL; | |
166 | for (i=0; i<4; i++) | |
167 | t[i+1] += t[i] >> 51, t[i] &= mask; | |
168 | if ( flag != 0 ) | |
169 | t[0] += 19 * (t[4] >> 51); t[4] &= mask; | |
170 | } | |
171 | ||
172 | // donna: Take a fully reduced polynomial form number and contract it into a little-endian, 32-byte array | |
173 | bits256 fcontract(const bits320 input) | |
174 | { | |
175 | uint128_t t[5]; int32_t i; bits256 out; | |
176 | for (i=0; i<5; i++) | |
177 | t[i] = input.ulongs[i]; | |
178 | fcontract_iter(t,1), fcontract_iter(t,1); | |
179 | // donna: now t is between 0 and 2^255-1, properly carried. | |
180 | // donna: case 1: between 0 and 2^255-20. case 2: between 2^255-19 and 2^255-1. | |
181 | t[0] += 19, fcontract_iter(t,1); | |
182 | // now between 19 and 2^255-1 in both cases, and offset by 19. | |
183 | t[0] += 0x8000000000000 - 19; | |
184 | for (i=1; i<5; i++) | |
185 | t[i] += 0x8000000000000 - 1; | |
186 | // now between 2^255 and 2^256-20, and offset by 2^255. | |
187 | fcontract_iter(t,0); | |
188 | store_limb(out.bytes,t[0] | (t[1] << 51)); | |
189 | store_limb(out.bytes+8,(t[1] >> 13) | (t[2] << 38)); | |
190 | store_limb(out.bytes+16,(t[2] >> 26) | (t[3] << 25)); | |
191 | store_limb(out.bytes+24,(t[3] >> 39) | (t[4] << 12)); | |
192 | return(out); | |
193 | } | |
194 | ||
195 | bits256 curve25519(bits256 mysecret,bits256 basepoint) | |
196 | { | |
197 | bits320 bp,x,z; | |
198 | mysecret.bytes[0] &= 0xf8, mysecret.bytes[31] &= 0x7f, mysecret.bytes[31] |= 0x40; | |
199 | bp = fexpand(basepoint); | |
200 | cmult(&x,&z,mysecret,bp); | |
201 | return(fcontract(fmul(x,crecip(z)))); | |
202 | } | |
203 | ||
204 | #else | |
205 | // from curve25519-donna.c | |
206 | typedef uint8_t u8; | |
207 | typedef int32_t s32; | |
208 | typedef int64_t limb; | |
209 | ||
210 | /* Multiply a number by a scalar: output = in * scalar */ | |
211 | static void fscalar_product32(limb *output, const limb *in, const limb scalar) { | |
212 | unsigned i; | |
213 | for (i = 0; i < 10; ++i) { | |
214 | output[i] = in[i] * scalar; | |
215 | } | |
216 | } | |
217 | ||
218 | /* Multiply two numbers: output = in2 * in | |
219 | * | |
220 | * output must be distinct to both inputs. The inputs are reduced coefficient | |
221 | * form, the output is not. | |
222 | * | |
223 | * output[x] <= 14 * the largest product of the input limbs. | |
224 | static void fproduct(limb *output, const limb *in2, const limb *in) { | |
225 | output[0] = ((limb) ((s32) in2[0])) * ((s32) in[0]); | |
226 | output[1] = ((limb) ((s32) in2[0])) * ((s32) in[1]) + | |
227 | ((limb) ((s32) in2[1])) * ((s32) in[0]); | |
228 | output[2] = 2 * ((limb) ((s32) in2[1])) * ((s32) in[1]) + | |
229 | ((limb) ((s32) in2[0])) * ((s32) in[2]) + | |
230 | ((limb) ((s32) in2[2])) * ((s32) in[0]); | |
231 | output[3] = ((limb) ((s32) in2[1])) * ((s32) in[2]) + | |
232 | ((limb) ((s32) in2[2])) * ((s32) in[1]) + | |
233 | ((limb) ((s32) in2[0])) * ((s32) in[3]) + | |
234 | ((limb) ((s32) in2[3])) * ((s32) in[0]); | |
235 | output[4] = ((limb) ((s32) in2[2])) * ((s32) in[2]) + | |
236 | 2 * (((limb) ((s32) in2[1])) * ((s32) in[3]) + | |
237 | ((limb) ((s32) in2[3])) * ((s32) in[1])) + | |
238 | ((limb) ((s32) in2[0])) * ((s32) in[4]) + | |
239 | ((limb) ((s32) in2[4])) * ((s32) in[0]); | |
240 | output[5] = ((limb) ((s32) in2[2])) * ((s32) in[3]) + | |
241 | ((limb) ((s32) in2[3])) * ((s32) in[2]) + | |
242 | ((limb) ((s32) in2[1])) * ((s32) in[4]) + | |
243 | ((limb) ((s32) in2[4])) * ((s32) in[1]) + | |
244 | ((limb) ((s32) in2[0])) * ((s32) in[5]) + | |
245 | ((limb) ((s32) in2[5])) * ((s32) in[0]); | |
246 | output[6] = 2 * (((limb) ((s32) in2[3])) * ((s32) in[3]) + | |
247 | ((limb) ((s32) in2[1])) * ((s32) in[5]) + | |
248 | ((limb) ((s32) in2[5])) * ((s32) in[1])) + | |
249 | ((limb) ((s32) in2[2])) * ((s32) in[4]) + | |
250 | ((limb) ((s32) in2[4])) * ((s32) in[2]) + | |
251 | ((limb) ((s32) in2[0])) * ((s32) in[6]) + | |
252 | ((limb) ((s32) in2[6])) * ((s32) in[0]); | |
253 | output[7] = ((limb) ((s32) in2[3])) * ((s32) in[4]) + | |
254 | ((limb) ((s32) in2[4])) * ((s32) in[3]) + | |
255 | ((limb) ((s32) in2[2])) * ((s32) in[5]) + | |
256 | ((limb) ((s32) in2[5])) * ((s32) in[2]) + | |
257 | ((limb) ((s32) in2[1])) * ((s32) in[6]) + | |
258 | ((limb) ((s32) in2[6])) * ((s32) in[1]) + | |
259 | ((limb) ((s32) in2[0])) * ((s32) in[7]) + | |
260 | ((limb) ((s32) in2[7])) * ((s32) in[0]); | |
261 | output[8] = ((limb) ((s32) in2[4])) * ((s32) in[4]) + | |
262 | 2 * (((limb) ((s32) in2[3])) * ((s32) in[5]) + | |
263 | ((limb) ((s32) in2[5])) * ((s32) in[3]) + | |
264 | ((limb) ((s32) in2[1])) * ((s32) in[7]) + | |
265 | ((limb) ((s32) in2[7])) * ((s32) in[1])) + | |
266 | ((limb) ((s32) in2[2])) * ((s32) in[6]) + | |
267 | ((limb) ((s32) in2[6])) * ((s32) in[2]) + | |
268 | ((limb) ((s32) in2[0])) * ((s32) in[8]) + | |
269 | ((limb) ((s32) in2[8])) * ((s32) in[0]); | |
270 | output[9] = ((limb) ((s32) in2[4])) * ((s32) in[5]) + | |
271 | ((limb) ((s32) in2[5])) * ((s32) in[4]) + | |
272 | ((limb) ((s32) in2[3])) * ((s32) in[6]) + | |
273 | ((limb) ((s32) in2[6])) * ((s32) in[3]) + | |
274 | ((limb) ((s32) in2[2])) * ((s32) in[7]) + | |
275 | ((limb) ((s32) in2[7])) * ((s32) in[2]) + | |
276 | ((limb) ((s32) in2[1])) * ((s32) in[8]) + | |
277 | ((limb) ((s32) in2[8])) * ((s32) in[1]) + | |
278 | ((limb) ((s32) in2[0])) * ((s32) in[9]) + | |
279 | ((limb) ((s32) in2[9])) * ((s32) in[0]); | |
280 | output[10] = 2 * (((limb) ((s32) in2[5])) * ((s32) in[5]) + | |
281 | ((limb) ((s32) in2[3])) * ((s32) in[7]) + | |
282 | ((limb) ((s32) in2[7])) * ((s32) in[3]) + | |
283 | ((limb) ((s32) in2[1])) * ((s32) in[9]) + | |
284 | ((limb) ((s32) in2[9])) * ((s32) in[1])) + | |
285 | ((limb) ((s32) in2[4])) * ((s32) in[6]) + | |
286 | ((limb) ((s32) in2[6])) * ((s32) in[4]) + | |
287 | ((limb) ((s32) in2[2])) * ((s32) in[8]) + | |
288 | ((limb) ((s32) in2[8])) * ((s32) in[2]); | |
289 | output[11] = ((limb) ((s32) in2[5])) * ((s32) in[6]) + | |
290 | ((limb) ((s32) in2[6])) * ((s32) in[5]) + | |
291 | ((limb) ((s32) in2[4])) * ((s32) in[7]) + | |
292 | ((limb) ((s32) in2[7])) * ((s32) in[4]) + | |
293 | ((limb) ((s32) in2[3])) * ((s32) in[8]) + | |
294 | ((limb) ((s32) in2[8])) * ((s32) in[3]) + | |
295 | ((limb) ((s32) in2[2])) * ((s32) in[9]) + | |
296 | ((limb) ((s32) in2[9])) * ((s32) in[2]); | |
297 | output[12] = ((limb) ((s32) in2[6])) * ((s32) in[6]) + | |
298 | 2 * (((limb) ((s32) in2[5])) * ((s32) in[7]) + | |
299 | ((limb) ((s32) in2[7])) * ((s32) in[5]) + | |
300 | ((limb) ((s32) in2[3])) * ((s32) in[9]) + | |
301 | ((limb) ((s32) in2[9])) * ((s32) in[3])) + | |
302 | ((limb) ((s32) in2[4])) * ((s32) in[8]) + | |
303 | ((limb) ((s32) in2[8])) * ((s32) in[4]); | |
304 | output[13] = ((limb) ((s32) in2[6])) * ((s32) in[7]) + | |
305 | ((limb) ((s32) in2[7])) * ((s32) in[6]) + | |
306 | ((limb) ((s32) in2[5])) * ((s32) in[8]) + | |
307 | ((limb) ((s32) in2[8])) * ((s32) in[5]) + | |
308 | ((limb) ((s32) in2[4])) * ((s32) in[9]) + | |
309 | ((limb) ((s32) in2[9])) * ((s32) in[4]); | |
310 | output[14] = 2 * (((limb) ((s32) in2[7])) * ((s32) in[7]) + | |
311 | ((limb) ((s32) in2[5])) * ((s32) in[9]) + | |
312 | ((limb) ((s32) in2[9])) * ((s32) in[5])) + | |
313 | ((limb) ((s32) in2[6])) * ((s32) in[8]) + | |
314 | ((limb) ((s32) in2[8])) * ((s32) in[6]); | |
315 | output[15] = ((limb) ((s32) in2[7])) * ((s32) in[8]) + | |
316 | ((limb) ((s32) in2[8])) * ((s32) in[7]) + | |
317 | ((limb) ((s32) in2[6])) * ((s32) in[9]) + | |
318 | ((limb) ((s32) in2[9])) * ((s32) in[6]); | |
319 | output[16] = ((limb) ((s32) in2[8])) * ((s32) in[8]) + | |
320 | 2 * (((limb) ((s32) in2[7])) * ((s32) in[9]) + | |
321 | ((limb) ((s32) in2[9])) * ((s32) in[7])); | |
322 | output[17] = ((limb) ((s32) in2[8])) * ((s32) in[9]) + | |
323 | ((limb) ((s32) in2[9])) * ((s32) in[8]); | |
324 | output[18] = 2 * ((limb) ((s32) in2[9])) * ((s32) in[9]); | |
325 | }*/ | |
326 | ||
327 | /* Reduce a long form to a short form by taking the input mod 2^255 - 19. | |
328 | * | |
329 | * On entry: |output[i]| < 14*2^54 | |
330 | * On exit: |output[0..8]| < 280*2^54 */ | |
331 | static void freduce_degree(limb *output) { | |
332 | /* Each of these shifts and adds ends up multiplying the value by 19. | |
333 | * | |
334 | * For output[0..8], the absolute entry value is < 14*2^54 and we add, at | |
335 | * most, 19*14*2^54 thus, on exit, |output[0..8]| < 280*2^54. */ | |
336 | output[8] += output[18] << 4; | |
337 | output[8] += output[18] << 1; | |
338 | output[8] += output[18]; | |
339 | output[7] += output[17] << 4; | |
340 | output[7] += output[17] << 1; | |
341 | output[7] += output[17]; | |
342 | output[6] += output[16] << 4; | |
343 | output[6] += output[16] << 1; | |
344 | output[6] += output[16]; | |
345 | output[5] += output[15] << 4; | |
346 | output[5] += output[15] << 1; | |
347 | output[5] += output[15]; | |
348 | output[4] += output[14] << 4; | |
349 | output[4] += output[14] << 1; | |
350 | output[4] += output[14]; | |
351 | output[3] += output[13] << 4; | |
352 | output[3] += output[13] << 1; | |
353 | output[3] += output[13]; | |
354 | output[2] += output[12] << 4; | |
355 | output[2] += output[12] << 1; | |
356 | output[2] += output[12]; | |
357 | output[1] += output[11] << 4; | |
358 | output[1] += output[11] << 1; | |
359 | output[1] += output[11]; | |
360 | output[0] += output[10] << 4; | |
361 | output[0] += output[10] << 1; | |
362 | output[0] += output[10]; | |
363 | } | |
364 | ||
365 | #if (-1 & 3) != 3 | |
366 | #error "This code only works on a two's complement system" | |
367 | #endif | |
368 | ||
369 | /* return v / 2^26, using only shifts and adds. | |
370 | * | |
371 | * On entry: v can take any value. */ | |
372 | static inline limb | |
373 | div_by_2_26(const limb v) | |
374 | { | |
375 | /* High word of v; no shift needed. */ | |
376 | const uint32_t highword = (uint32_t) (((uint64_t) v) >> 32); | |
377 | /* Set to all 1s if v was negative; else set to 0s. */ | |
378 | const int32_t sign = ((int32_t) highword) >> 31; | |
379 | /* Set to 0x3ffffff if v was negative; else set to 0. */ | |
380 | const int32_t roundoff = ((uint32_t) sign) >> 6; | |
381 | /* Should return v / (1<<26) */ | |
382 | return (v + roundoff) >> 26; | |
383 | } | |
384 | ||
385 | /* return v / (2^25), using only shifts and adds. | |
386 | * | |
387 | * On entry: v can take any value. */ | |
388 | static inline limb | |
389 | div_by_2_25(const limb v) | |
390 | { | |
391 | /* High word of v; no shift needed*/ | |
392 | const uint32_t highword = (uint32_t) (((uint64_t) v) >> 32); | |
393 | /* Set to all 1s if v was negative; else set to 0s. */ | |
394 | const int32_t sign = ((int32_t) highword) >> 31; | |
395 | /* Set to 0x1ffffff if v was negative; else set to 0. */ | |
396 | const int32_t roundoff = ((uint32_t) sign) >> 7; | |
397 | /* Should return v / (1<<25) */ | |
398 | return (v + roundoff) >> 25; | |
399 | } | |
400 | ||
401 | /* Reduce all coefficients of the short form input so that |x| < 2^26. | |
402 | * | |
403 | * On entry: |output[i]| < 280*2^54 */ | |
404 | static void freduce_coefficients(limb *output) { | |
405 | unsigned i; | |
9d365796 | 406 | |
6b5cfbb4 | 407 | output[10] = 0; |
9d365796 | 408 | |
6b5cfbb4 | 409 | for (i = 0; i < 10; i += 2) { |
410 | limb over = div_by_2_26(output[i]); | |
411 | /* The entry condition (that |output[i]| < 280*2^54) means that over is, at | |
412 | * most, 280*2^28 in the first iteration of this loop. This is added to the | |
413 | * next limb and we can approximate the resulting bound of that limb by | |
414 | * 281*2^54. */ | |
415 | output[i] -= over << 26; | |
416 | output[i+1] += over; | |
9d365796 | 417 | |
6b5cfbb4 | 418 | /* For the first iteration, |output[i+1]| < 281*2^54, thus |over| < |
419 | * 281*2^29. When this is added to the next limb, the resulting bound can | |
420 | * be approximated as 281*2^54. | |
421 | * | |
422 | * For subsequent iterations of the loop, 281*2^54 remains a conservative | |
423 | * bound and no overflow occurs. */ | |
424 | over = div_by_2_25(output[i+1]); | |
425 | output[i+1] -= over << 25; | |
426 | output[i+2] += over; | |
427 | } | |
428 | /* Now |output[10]| < 281*2^29 and all other coefficients are reduced. */ | |
429 | output[0] += output[10] << 4; | |
430 | output[0] += output[10] << 1; | |
431 | output[0] += output[10]; | |
9d365796 | 432 | |
6b5cfbb4 | 433 | output[10] = 0; |
9d365796 | 434 | |
6b5cfbb4 | 435 | /* Now output[1..9] are reduced, and |output[0]| < 2^26 + 19*281*2^29 |
436 | * So |over| will be no more than 2^16. */ | |
437 | { | |
438 | limb over = div_by_2_26(output[0]); | |
439 | output[0] -= over << 26; | |
440 | output[1] += over; | |
441 | } | |
9d365796 | 442 | |
6b5cfbb4 | 443 | /* Now output[0,2..9] are reduced, and |output[1]| < 2^25 + 2^16 < 2^26. The |
444 | * bound on |output[1]| is sufficient to meet our needs. */ | |
445 | } | |
446 | ||
447 | /* A helpful wrapper around fproduct: output = in * in2. | |
448 | * | |
449 | * On entry: |in[i]| < 2^27 and |in2[i]| < 2^27. | |
450 | * | |
451 | * output must be distinct to both inputs. The output is reduced degree | |
452 | * (indeed, one need only provide storage for 10 limbs) and |output[i]| < 2^26. | |
453 | static void fmul32(limb *output, const limb *in, const limb *in2) | |
454 | { | |
455 | limb t[19]; | |
456 | fproduct(t, in, in2); | |
457 | //|t[i]| < 14*2^54 | |
458 | freduce_degree(t); | |
459 | freduce_coefficients(t); | |
460 | // |t[i]| < 2^26 | |
461 | memcpy(output, t, sizeof(limb) * 10); | |
462 | }*/ | |
463 | ||
464 | /* Square a number: output = in**2 | |
465 | * | |
466 | * output must be distinct from the input. The inputs are reduced coefficient | |
467 | * form, the output is not. | |
468 | * | |
469 | * output[x] <= 14 * the largest product of the input limbs. */ | |
470 | static void fsquare_inner(limb *output, const limb *in) { | |
471 | output[0] = ((limb) ((s32) in[0])) * ((s32) in[0]); | |
472 | output[1] = 2 * ((limb) ((s32) in[0])) * ((s32) in[1]); | |
473 | output[2] = 2 * (((limb) ((s32) in[1])) * ((s32) in[1]) + | |
474 | ((limb) ((s32) in[0])) * ((s32) in[2])); | |
475 | output[3] = 2 * (((limb) ((s32) in[1])) * ((s32) in[2]) + | |
476 | ((limb) ((s32) in[0])) * ((s32) in[3])); | |
477 | output[4] = ((limb) ((s32) in[2])) * ((s32) in[2]) + | |
478 | 4 * ((limb) ((s32) in[1])) * ((s32) in[3]) + | |
479 | 2 * ((limb) ((s32) in[0])) * ((s32) in[4]); | |
480 | output[5] = 2 * (((limb) ((s32) in[2])) * ((s32) in[3]) + | |
481 | ((limb) ((s32) in[1])) * ((s32) in[4]) + | |
482 | ((limb) ((s32) in[0])) * ((s32) in[5])); | |
483 | output[6] = 2 * (((limb) ((s32) in[3])) * ((s32) in[3]) + | |
484 | ((limb) ((s32) in[2])) * ((s32) in[4]) + | |
485 | ((limb) ((s32) in[0])) * ((s32) in[6]) + | |
486 | 2 * ((limb) ((s32) in[1])) * ((s32) in[5])); | |
487 | output[7] = 2 * (((limb) ((s32) in[3])) * ((s32) in[4]) + | |
488 | ((limb) ((s32) in[2])) * ((s32) in[5]) + | |
489 | ((limb) ((s32) in[1])) * ((s32) in[6]) + | |
490 | ((limb) ((s32) in[0])) * ((s32) in[7])); | |
491 | output[8] = ((limb) ((s32) in[4])) * ((s32) in[4]) + | |
492 | 2 * (((limb) ((s32) in[2])) * ((s32) in[6]) + | |
493 | ((limb) ((s32) in[0])) * ((s32) in[8]) + | |
494 | 2 * (((limb) ((s32) in[1])) * ((s32) in[7]) + | |
495 | ((limb) ((s32) in[3])) * ((s32) in[5]))); | |
496 | output[9] = 2 * (((limb) ((s32) in[4])) * ((s32) in[5]) + | |
497 | ((limb) ((s32) in[3])) * ((s32) in[6]) + | |
498 | ((limb) ((s32) in[2])) * ((s32) in[7]) + | |
499 | ((limb) ((s32) in[1])) * ((s32) in[8]) + | |
500 | ((limb) ((s32) in[0])) * ((s32) in[9])); | |
501 | output[10] = 2 * (((limb) ((s32) in[5])) * ((s32) in[5]) + | |
502 | ((limb) ((s32) in[4])) * ((s32) in[6]) + | |
503 | ((limb) ((s32) in[2])) * ((s32) in[8]) + | |
504 | 2 * (((limb) ((s32) in[3])) * ((s32) in[7]) + | |
505 | ((limb) ((s32) in[1])) * ((s32) in[9]))); | |
506 | output[11] = 2 * (((limb) ((s32) in[5])) * ((s32) in[6]) + | |
507 | ((limb) ((s32) in[4])) * ((s32) in[7]) + | |
508 | ((limb) ((s32) in[3])) * ((s32) in[8]) + | |
509 | ((limb) ((s32) in[2])) * ((s32) in[9])); | |
510 | output[12] = ((limb) ((s32) in[6])) * ((s32) in[6]) + | |
511 | 2 * (((limb) ((s32) in[4])) * ((s32) in[8]) + | |
512 | 2 * (((limb) ((s32) in[5])) * ((s32) in[7]) + | |
513 | ((limb) ((s32) in[3])) * ((s32) in[9]))); | |
514 | output[13] = 2 * (((limb) ((s32) in[6])) * ((s32) in[7]) + | |
515 | ((limb) ((s32) in[5])) * ((s32) in[8]) + | |
516 | ((limb) ((s32) in[4])) * ((s32) in[9])); | |
517 | output[14] = 2 * (((limb) ((s32) in[7])) * ((s32) in[7]) + | |
518 | ((limb) ((s32) in[6])) * ((s32) in[8]) + | |
519 | 2 * ((limb) ((s32) in[5])) * ((s32) in[9])); | |
520 | output[15] = 2 * (((limb) ((s32) in[7])) * ((s32) in[8]) + | |
521 | ((limb) ((s32) in[6])) * ((s32) in[9])); | |
522 | output[16] = ((limb) ((s32) in[8])) * ((s32) in[8]) + | |
523 | 4 * ((limb) ((s32) in[7])) * ((s32) in[9]); | |
524 | output[17] = 2 * ((limb) ((s32) in[8])) * ((s32) in[9]); | |
525 | output[18] = 2 * ((limb) ((s32) in[9])) * ((s32) in[9]); | |
526 | } | |
527 | ||
528 | /* fsquare sets output = in^2. | |
529 | * | |
530 | * On entry: The |in| argument is in reduced coefficients form and |in[i]| < | |
531 | * 2^27. | |
532 | * | |
533 | * On exit: The |output| argument is in reduced coefficients form (indeed, one | |
534 | * need only provide storage for 10 limbs) and |out[i]| < 2^26. */ | |
535 | static void | |
536 | fsquare32(limb *output, const limb *in) { | |
537 | limb t[19]; | |
538 | fsquare_inner(t, in); | |
539 | /* |t[i]| < 14*2^54 because the largest product of two limbs will be < | |
540 | * 2^(27+27) and fsquare_inner adds together, at most, 14 of those | |
541 | * products. */ | |
542 | freduce_degree(t); | |
543 | freduce_coefficients(t); | |
544 | /* |t[i]| < 2^26 */ | |
545 | memcpy(output, t, sizeof(limb) * 10); | |
546 | } | |
547 | ||
548 | #if (-32 >> 1) != -16 | |
549 | #error "This code only works when >> does sign-extension on negative numbers" | |
550 | #endif | |
551 | ||
552 | /* s32_eq returns 0xffffffff iff a == b and zero otherwise. */ | |
553 | static s32 s32_eq(s32 a, s32 b) { | |
554 | a = ~(a ^ b); | |
555 | a &= a << 16; | |
556 | a &= a << 8; | |
557 | a &= a << 4; | |
558 | a &= a << 2; | |
559 | a &= a << 1; | |
560 | return a >> 31; | |
561 | } | |
562 | ||
563 | /* s32_gte returns 0xffffffff if a >= b and zero otherwise, where a and b are | |
564 | * both non-negative. */ | |
565 | static s32 s32_gte(s32 a, s32 b) { | |
566 | a -= b; | |
567 | /* a >= 0 iff a >= b. */ | |
568 | return ~(a >> 31); | |
569 | } | |
570 | ||
571 | /* Take a fully reduced polynomial form number and contract it into a | |
572 | * little-endian, 32-byte array. | |
573 | * | |
574 | * On entry: |input_limbs[i]| < 2^26 */ | |
575 | static void fcontract32(u8 *output, limb *input_limbs) | |
576 | { | |
577 | int i; | |
578 | int j; | |
579 | s32 input[10]; | |
580 | s32 mask; | |
9d365796 | 581 | |
6b5cfbb4 | 582 | /* |input_limbs[i]| < 2^26, so it's valid to convert to an s32. */ |
583 | for (i = 0; i < 10; i++) | |
584 | input[i] = (s32)input_limbs[i]; | |
9d365796 | 585 | |
6b5cfbb4 | 586 | for (j = 0; j < 2; ++j) { |
587 | for (i = 0; i < 9; ++i) { | |
588 | if ((i & 1) == 1) { | |
589 | /* This calculation is a time-invariant way to make input[i] | |
590 | * non-negative by borrowing from the next-larger limb. */ | |
591 | const s32 mask = input[i] >> 31; | |
592 | const s32 carry = -((input[i] & mask) >> 25); | |
593 | input[i] = input[i] + (carry << 25); | |
594 | input[i+1] = input[i+1] - carry; | |
595 | } else { | |
596 | const s32 mask = input[i] >> 31; | |
597 | const s32 carry = -((input[i] & mask) >> 26); | |
598 | input[i] = input[i] + (carry << 26); | |
599 | input[i+1] = input[i+1] - carry; | |
600 | } | |
601 | } | |
9d365796 | 602 | |
6b5cfbb4 | 603 | /* There's no greater limb for input[9] to borrow from, but we can multiply |
604 | * by 19 and borrow from input[0], which is valid mod 2^255-19. */ | |
605 | { | |
606 | const s32 mask = input[9] >> 31; | |
607 | const s32 carry = -((input[9] & mask) >> 25); | |
608 | input[9] = input[9] + (carry << 25); | |
609 | input[0] = input[0] - (carry * 19); | |
610 | } | |
9d365796 | 611 | |
6b5cfbb4 | 612 | /* After the first iteration, input[1..9] are non-negative and fit within |
613 | * 25 or 26 bits, depending on position. However, input[0] may be | |
614 | * negative. */ | |
615 | } | |
9d365796 | 616 | |
6b5cfbb4 | 617 | /* The first borrow-propagation pass above ended with every limb |
618 | except (possibly) input[0] non-negative. | |
9d365796 | 619 | |
6b5cfbb4 | 620 | If input[0] was negative after the first pass, then it was because of a |
621 | carry from input[9]. On entry, input[9] < 2^26 so the carry was, at most, | |
622 | one, since (2**26-1) >> 25 = 1. Thus input[0] >= -19. | |
9d365796 | 623 | |
6b5cfbb4 | 624 | In the second pass, each limb is decreased by at most one. Thus the second |
625 | borrow-propagation pass could only have wrapped around to decrease | |
626 | input[0] again if the first pass left input[0] negative *and* input[1] | |
627 | through input[9] were all zero. In that case, input[1] is now 2^25 - 1, | |
628 | and this last borrow-propagation step will leave input[1] non-negative. */ | |
629 | { | |
630 | const s32 mask = input[0] >> 31; | |
631 | const s32 carry = -((input[0] & mask) >> 26); | |
632 | input[0] = input[0] + (carry << 26); | |
633 | input[1] = input[1] - carry; | |
634 | } | |
9d365796 | 635 | |
6b5cfbb4 | 636 | /* All input[i] are now non-negative. However, there might be values between |
637 | * 2^25 and 2^26 in a limb which is, nominally, 25 bits wide. */ | |
638 | for (j = 0; j < 2; j++) { | |
639 | for (i = 0; i < 9; i++) { | |
640 | if ((i & 1) == 1) { | |
641 | const s32 carry = input[i] >> 25; | |
642 | input[i] &= 0x1ffffff; | |
643 | input[i+1] += carry; | |
644 | } else { | |
645 | const s32 carry = input[i] >> 26; | |
646 | input[i] &= 0x3ffffff; | |
647 | input[i+1] += carry; | |
648 | } | |
649 | } | |
9d365796 | 650 | |
6b5cfbb4 | 651 | { |
652 | const s32 carry = input[9] >> 25; | |
653 | input[9] &= 0x1ffffff; | |
654 | input[0] += 19*carry; | |
655 | } | |
656 | } | |
9d365796 | 657 | |
6b5cfbb4 | 658 | /* If the first carry-chain pass, just above, ended up with a carry from |
659 | * input[9], and that caused input[0] to be out-of-bounds, then input[0] was | |
660 | * < 2^26 + 2*19, because the carry was, at most, two. | |
661 | * | |
662 | * If the second pass carried from input[9] again then input[0] is < 2*19 and | |
663 | * the input[9] -> input[0] carry didn't push input[0] out of bounds. */ | |
9d365796 | 664 | |
6b5cfbb4 | 665 | /* It still remains the case that input might be between 2^255-19 and 2^255. |
666 | * In this case, input[1..9] must take their maximum value and input[0] must | |
667 | * be >= (2^255-19) & 0x3ffffff, which is 0x3ffffed. */ | |
668 | mask = s32_gte(input[0], 0x3ffffed); | |
669 | for (i = 1; i < 10; i++) { | |
670 | if ((i & 1) == 1) { | |
671 | mask &= s32_eq(input[i], 0x1ffffff); | |
672 | } else { | |
673 | mask &= s32_eq(input[i], 0x3ffffff); | |
674 | } | |
675 | } | |
9d365796 | 676 | |
6b5cfbb4 | 677 | /* mask is either 0xffffffff (if input >= 2^255-19) and zero otherwise. Thus |
678 | * this conditionally subtracts 2^255-19. */ | |
679 | input[0] -= mask & 0x3ffffed; | |
9d365796 | 680 | |
6b5cfbb4 | 681 | for (i = 1; i < 10; i++) { |
682 | if ((i & 1) == 1) { | |
683 | input[i] -= mask & 0x1ffffff; | |
684 | } else { | |
685 | input[i] -= mask & 0x3ffffff; | |
686 | } | |
687 | } | |
9d365796 | 688 | |
6b5cfbb4 | 689 | input[1] <<= 2; |
690 | input[2] <<= 3; | |
691 | input[3] <<= 5; | |
692 | input[4] <<= 6; | |
693 | input[6] <<= 1; | |
694 | input[7] <<= 3; | |
695 | input[8] <<= 4; | |
696 | input[9] <<= 6; | |
697 | #define F(i, s) \ | |
698 | output[s+0] |= input[i] & 0xff; \ | |
699 | output[s+1] = (input[i] >> 8) & 0xff; \ | |
700 | output[s+2] = (input[i] >> 16) & 0xff; \ | |
701 | output[s+3] = (input[i] >> 24) & 0xff; | |
702 | output[0] = 0; | |
703 | output[16] = 0; | |
704 | F(0,0); | |
705 | F(1,3); | |
706 | F(2,6); | |
707 | F(3,9); | |
708 | F(4,12); | |
709 | F(5,16); | |
710 | F(6,19); | |
711 | F(7,22); | |
712 | F(8,25); | |
713 | F(9,28); | |
714 | #undef F | |
715 | } | |
716 | ||
717 | bits320 bits320_limbs(limb limbs[10]) | |
718 | { | |
719 | bits320 output; int32_t i; | |
720 | for (i=0; i<10; i++) | |
721 | output.uints[i] = limbs[i]; | |
722 | return(output); | |
723 | } | |
724 | ||
725 | static inline bits320 fscalar_product(const bits320 in,const uint64_t scalar) | |
726 | { | |
727 | limb output[10],input[10]; int32_t i; | |
728 | for (i=0; i<10; i++) | |
729 | input[i] = in.uints[i]; | |
730 | fscalar_product32(output,input,scalar); | |
731 | return(bits320_limbs(output)); | |
732 | } | |
733 | ||
734 | static inline bits320 fsquare_times(const bits320 in,uint64_t count) | |
735 | { | |
736 | limb output[10],input[10]; int32_t i; | |
737 | for (i=0; i<10; i++) | |
738 | input[i] = in.uints[i]; | |
739 | for (i=0; i<count; i++) | |
740 | { | |
741 | fsquare32(output,input); | |
742 | memcpy(input,output,sizeof(input)); | |
743 | } | |
744 | return(bits320_limbs(output)); | |
745 | } | |
746 | ||
747 | bits256 fmul_donna(bits256 a,bits256 b); | |
748 | bits256 crecip_donna(bits256 a); | |
749 | ||
750 | bits256 fcontract(const bits320 in) | |
751 | { | |
752 | bits256 contracted; limb input[10]; int32_t i; | |
753 | for (i=0; i<10; i++) | |
754 | input[i] = in.uints[i]; | |
755 | fcontract32(contracted.bytes,input); | |
756 | return(contracted); | |
757 | } | |
758 | ||
759 | bits320 fmul(const bits320 in,const bits320 in2) | |
760 | { | |
761 | /*limb output[11],input[10],input2[10]; int32_t i; | |
762 | for (i=0; i<10; i++) | |
763 | { | |
764 | input[i] = in.uints[i]; | |
765 | input2[i] = in2.uints[i]; | |
766 | } | |
767 | fmul32(output,input,input2); | |
768 | return(bits320_limbs(output));*/ | |
769 | bits256 mulval; | |
770 | mulval = fmul_donna(fcontract(in),fcontract(in2)); | |
771 | return(fexpand(mulval)); | |
772 | } | |
773 | ||
774 | bits256 curve25519(bits256 mysecret,bits256 theirpublic) | |
775 | { | |
776 | int32_t curve25519_donna(uint8_t *mypublic,const uint8_t *secret,const uint8_t *basepoint); | |
777 | bits256 rawkey; | |
778 | mysecret.bytes[0] &= 0xf8, mysecret.bytes[31] &= 0x7f, mysecret.bytes[31] |= 0x40; | |
779 | curve25519_donna(&rawkey.bytes[0],&mysecret.bytes[0],&theirpublic.bytes[0]); | |
780 | return(rawkey); | |
781 | } | |
782 | ||
783 | #endif | |
784 | ||
785 | ||
786 | // Input: Q, Q', Q-Q' -> Output: 2Q, Q+Q' | |
787 | // x2 z2: long form && x3 z3: long form | |
788 | // x z: short form, destroyed && xprime zprime: short form, destroyed | |
789 | // qmqp: short form, preserved | |
790 | static inline void | |
791 | fmonty(bits320 *x2, bits320 *z2, // output 2Q | |
792 | bits320 *x3, bits320 *z3, // output Q + Q' | |
793 | bits320 *x, bits320 *z, // input Q | |
794 | bits320 *xprime, bits320 *zprime, // input Q' | |
795 | const bits320 qmqp) // input Q - Q' | |
796 | { | |
797 | bits320 origx,origxprime,zzz,xx,zz,xxprime,zzprime; | |
798 | origx = *x; | |
799 | *x = fsum(*x,*z), fdifference_backwards(z->ulongs,origx.ulongs); // does x - z | |
800 | origxprime = *xprime; | |
801 | *xprime = fsum(*xprime,*zprime), fdifference_backwards(zprime->ulongs,origxprime.ulongs); | |
802 | xxprime = fmul(*xprime,*z), zzprime = fmul(*x,*zprime); | |
803 | origxprime = xxprime; | |
804 | xxprime = fsum(xxprime,zzprime), fdifference_backwards(zzprime.ulongs,origxprime.ulongs); | |
805 | *x3 = fsquare_times(xxprime,1), *z3 = fmul(fsquare_times(zzprime,1),qmqp); | |
806 | xx = fsquare_times(*x,1), zz = fsquare_times(*z,1); | |
807 | *x2 = fmul(xx,zz); | |
808 | fdifference_backwards(zz.ulongs,xx.ulongs); // does zz = xx - zz | |
809 | zzz = fscalar_product(zz,121665); | |
810 | *z2 = fmul(zz,fsum(zzz,xx)); | |
811 | } | |
812 | ||
813 | // ----------------------------------------------------------------------------- | |
814 | // Maybe swap the contents of two limb arrays (@a and @b), each @len elements | |
815 | // long. Perform the swap iff @swap is non-zero. | |
816 | // This function performs the swap without leaking any side-channel information. | |
817 | // ----------------------------------------------------------------------------- | |
818 | static inline void swap_conditional(bits320 *a,bits320 *b,uint64_t iswap) | |
819 | { | |
820 | int32_t i; const uint64_t swap = -iswap; | |
821 | for (i=0; i<5; ++i) | |
822 | { | |
823 | const uint64_t x = swap & (a->ulongs[i] ^ b->ulongs[i]); | |
824 | a->ulongs[i] ^= x, b->ulongs[i] ^= x; | |
825 | } | |
826 | } | |
827 | ||
828 | // Calculates nQ where Q is the x-coordinate of a point on the curve | |
829 | // resultx/resultz: the x coordinate of the resulting curve point (short form) | |
830 | // n: a little endian, 32-byte number | |
831 | // q: a point of the curve (short form) | |
832 | void cmult(bits320 *resultx,bits320 *resultz,bits256 secret,const bits320 q) | |
833 | { | |
834 | int32_t i,j; bits320 a,b,c,d,e,f,g,h,*t; | |
835 | bits320 Zero320bits,One320bits, *nqpqx = &a,*nqpqz = &b,*nqx = &c,*nqz = &d,*nqpqx2 = &e,*nqpqz2 = &f,*nqx2 = &g,*nqz2 = &h; | |
836 | memset(&Zero320bits,0,sizeof(Zero320bits)); | |
837 | memset(&One320bits,0,sizeof(One320bits)), One320bits.ulongs[0] = 1; | |
838 | a = d = e = g = Zero320bits, b = c = f = h = One320bits; | |
839 | *nqpqx = q; | |
840 | for (i=0; i<32; i++) | |
841 | { | |
842 | uint8_t byte = secret.bytes[31 - i]; | |
843 | for (j=0; j<8; j++) | |
844 | { | |
845 | const uint64_t bit = byte >> 7; | |
846 | swap_conditional(nqx,nqpqx,bit), swap_conditional(nqz,nqpqz,bit); | |
847 | fmonty(nqx2,nqz2,nqpqx2,nqpqz2,nqx,nqz,nqpqx,nqpqz,q); | |
848 | swap_conditional(nqx2,nqpqx2,bit), swap_conditional(nqz2,nqpqz2,bit); | |
849 | t = nqx, nqx = nqx2, nqx2 = t; | |
850 | t = nqz, nqz = nqz2, nqz2 = t; | |
851 | t = nqpqx, nqpqx = nqpqx2, nqpqx2 = t; | |
852 | t = nqpqz, nqpqz = nqpqz2, nqpqz2 = t; | |
853 | byte <<= 1; | |
854 | } | |
855 | } | |
856 | *resultx = *nqx, *resultz = *nqz; | |
857 | } | |
858 | ||
859 | // Shamelessly copied from donna's code that copied djb's code, changed a little | |
860 | inline bits320 crecip(const bits320 z) | |
861 | { | |
862 | bits320 a,t0,b,c; | |
863 | /* 2 */ a = fsquare_times(z, 1); // a = 2 | |
864 | /* 8 */ t0 = fsquare_times(a, 2); | |
865 | /* 9 */ b = fmul(t0, z); // b = 9 | |
866 | /* 11 */ a = fmul(b, a); // a = 11 | |
867 | /* 22 */ t0 = fsquare_times(a, 1); | |
868 | /* 2^5 - 2^0 = 31 */ b = fmul(t0, b); | |
869 | /* 2^10 - 2^5 */ t0 = fsquare_times(b, 5); | |
870 | /* 2^10 - 2^0 */ b = fmul(t0, b); | |
871 | /* 2^20 - 2^10 */ t0 = fsquare_times(b, 10); | |
872 | /* 2^20 - 2^0 */ c = fmul(t0, b); | |
873 | /* 2^40 - 2^20 */ t0 = fsquare_times(c, 20); | |
874 | /* 2^40 - 2^0 */ t0 = fmul(t0, c); | |
875 | /* 2^50 - 2^10 */ t0 = fsquare_times(t0, 10); | |
876 | /* 2^50 - 2^0 */ b = fmul(t0, b); | |
877 | /* 2^100 - 2^50 */ t0 = fsquare_times(b, 50); | |
878 | /* 2^100 - 2^0 */ c = fmul(t0, b); | |
879 | /* 2^200 - 2^100 */ t0 = fsquare_times(c, 100); | |
880 | /* 2^200 - 2^0 */ t0 = fmul(t0, c); | |
881 | /* 2^250 - 2^50 */ t0 = fsquare_times(t0, 50); | |
882 | /* 2^250 - 2^0 */ t0 = fmul(t0, b); | |
883 | /* 2^255 - 2^5 */ t0 = fsquare_times(t0, 5); | |
884 | /* 2^255 - 21 */ return(fmul(t0, a)); | |
885 | } | |
886 | ||
9d365796 | 887 | #ifndef _WIN32 |
6b5cfbb4 | 888 | void OS_randombytes(unsigned char *x,long xlen); |
9d365796 | 889 | #endif |
6b5cfbb4 | 890 | |
891 | bits256 rand256(int32_t privkeyflag) | |
892 | { | |
893 | bits256 randval; | |
6f90d1a8 | 894 | #ifndef _WIN32 |
6b5cfbb4 | 895 | OS_randombytes(randval.bytes,sizeof(randval)); |
9d365796 | 896 | #else |
897 | randombytes_buf(randval.bytes,sizeof(randval)); | |
898 | #endif | |
6b5cfbb4 | 899 | if ( privkeyflag != 0 ) |
900 | randval.bytes[0] &= 0xf8, randval.bytes[31] &= 0x7f, randval.bytes[31] |= 0x40; | |
901 | return(randval); | |
902 | } | |
903 | ||
904 | bits256 curve25519_basepoint9() | |
905 | { | |
906 | bits256 basepoint; | |
907 | memset(&basepoint,0,sizeof(basepoint)); | |
908 | basepoint.bytes[0] = 9; | |
909 | return(basepoint); | |
910 | } | |
911 | ||
912 | bits256 curve25519_keypair(bits256 *pubkeyp) | |
913 | { | |
914 | bits256 privkey; | |
915 | privkey = rand256(1); | |
916 | *pubkeyp = curve25519(privkey,curve25519_basepoint9()); | |
917 | //printf("[%llx %llx] ",privkey.txid,(*pubkeyp).txid); | |
918 | return(privkey); | |
919 | } | |
920 | ||
921 | bits256 curve25519_shared(bits256 privkey,bits256 otherpub) | |
922 | { | |
923 | bits256 shared,hash; | |
924 | shared = curve25519(privkey,otherpub); | |
925 | vcalc_sha256(0,hash.bytes,shared.bytes,sizeof(shared)); | |
926 | //printf("priv.%llx pub.%llx shared.%llx -> hash.%llx\n",privkey.txid,pubkey.txid,shared.txid,hash.txid); | |
927 | //hash.bytes[0] &= 0xf8, hash.bytes[31] &= 0x7f, hash.bytes[31] |= 64; | |
928 | return(hash); | |
929 | } | |
930 | ||
931 | int32_t curve25519_donna(uint8_t *mypublic,const uint8_t *secret,const uint8_t *basepoint) | |
932 | { | |
933 | bits256 val,p,bp; | |
934 | memcpy(p.bytes,secret,sizeof(p)); | |
935 | memcpy(bp.bytes,basepoint,sizeof(bp)); | |
936 | val = curve25519(p,bp); | |
937 | memcpy(mypublic,val.bytes,sizeof(val)); | |
938 | return(0); | |
939 | } | |
940 | ||
941 | uint64_t conv_NXTpassword(unsigned char *mysecret,unsigned char *mypublic,uint8_t *pass,int32_t passlen) | |
942 | { | |
943 | static uint8_t basepoint[32] = {9}; | |
944 | uint64_t addr; uint8_t hash[32]; | |
945 | if ( pass != 0 && passlen != 0 ) | |
946 | vcalc_sha256(0,mysecret,pass,passlen); | |
947 | mysecret[0] &= 248, mysecret[31] &= 127, mysecret[31] |= 64; | |
948 | curve25519_donna(mypublic,mysecret,basepoint); | |
949 | vcalc_sha256(0,hash,mypublic,32); | |
950 | memcpy(&addr,hash,sizeof(addr)); | |
951 | return(addr); | |
952 | } | |
953 | ||
a8fadb18 | 954 | uint256 komodo_kvprivkey(uint256 *pubkeyp,char *passphrase) |
6b5cfbb4 | 955 | { |
a9aca4da | 956 | uint256 privkey; |
957 | conv_NXTpassword((uint8_t *)&privkey,(uint8_t *)pubkeyp,(uint8_t *)passphrase,(int32_t)strlen(passphrase)); | |
958 | return(privkey); | |
6b5cfbb4 | 959 | } |
960 | ||
66dc0c54 | 961 | uint256 komodo_kvsig(uint8_t *buf,int32_t len,uint256 _privkey) |
6b5cfbb4 | 962 | { |
66dc0c54 | 963 | bits256 sig,hash,otherpub,checksig,pubkey,privkey; uint256 usig; |
964 | memcpy(&privkey,&_privkey,sizeof(privkey)); | |
6b5cfbb4 | 965 | vcalc_sha256(0,hash.bytes,buf,len); |
966 | otherpub = curve25519(hash,curve25519_basepoint9()); | |
66dc0c54 | 967 | pubkey = curve25519(privkey,curve25519_basepoint9()); |
968 | sig = curve25519_shared(privkey,otherpub); | |
969 | checksig = curve25519_shared(hash,pubkey); | |
ebeae1eb | 970 | /*int32_t i; for (i=0; i<len; i++) |
66dc0c54 | 971 | printf("%02x",buf[i]); |
972 | printf(" -> "); | |
33cdc2e4 | 973 | for (i=0; i<32; i++) |
55791792 | 974 | printf("%02x",((uint8_t *)&privkey)[i]); |
66dc0c54 | 975 | printf(" -> "); |
976 | for (i=0; i<32; i++) | |
467666f5 | 977 | printf("%02x",((uint8_t *)&pubkey)[i]); |
ebeae1eb | 978 | printf(" pubkey\n");*/ |
a9aca4da | 979 | memcpy(&usig,&sig,sizeof(usig)); |
980 | return(usig); | |
6b5cfbb4 | 981 | } |
982 | ||
66dc0c54 | 983 | int32_t komodo_kvsigverify(uint8_t *buf,int32_t len,uint256 _pubkey,uint256 sig) |
6b5cfbb4 | 984 | { |
66dc0c54 | 985 | bits256 hash,checksig,pubkey; static uint256 zeroes; |
986 | memcpy(&pubkey,&_pubkey,sizeof(pubkey)); | |
d5f1c653 | 987 | if ( memcmp(&pubkey,&zeroes,sizeof(pubkey)) != 0 ) |
988 | { | |
989 | vcalc_sha256(0,hash.bytes,buf,len); | |
467666f5 | 990 | checksig = curve25519_shared(hash,pubkey); |
ebeae1eb | 991 | /*int32_t i; for (i=0; i<len; i++) |
66dc0c54 | 992 | printf("%02x",buf[i]); |
993 | printf(" -> "); | |
994 | for (i=0; i<32; i++) | |
467666f5 | 995 | printf("%02x",((uint8_t *)&hash)[i]); |
996 | printf(" -> "); | |
997 | for (i=0; i<32; i++) | |
998 | printf("%02x",((uint8_t *)&pubkey)[i]); | |
a663f236 | 999 | printf(" verify pubkey\n"); |
70b7f3ea | 1000 | for (i=0; i<32; i++) |
1001 | printf("%02x",((uint8_t *)&sig)[i]); | |
1002 | printf(" sig vs"); | |
1003 | for (i=0; i<32; i++) | |
1004 | printf("%02x",((uint8_t *)&checksig)[i]); | |
ebeae1eb | 1005 | printf(" checksig\n");*/ |
d5f1c653 | 1006 | if ( memcmp(&checksig,&sig,sizeof(sig)) != 0 ) |
1007 | return(-1); | |
ebeae1eb | 1008 | //else printf("VALIDATED\n"); |
d5f1c653 | 1009 | } |
1010 | return(0); | |
6b5cfbb4 | 1011 | } |
1012 | ||
6b5cfbb4 | 1013 | #endif |