1 /**********************************************************************
2 * Copyright (c) 2013, 2014 Pieter Wuille *
3 * Distributed under the MIT software license, see the accompanying *
4 * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
5 **********************************************************************/
7 #ifndef _SECP256K1_FIELD_REPR_IMPL_H_
8 #define _SECP256K1_FIELD_REPR_IMPL_H_
17 static void secp256k1_fe_verify(const secp256k1_fe_t *a) {
18 const uint32_t *d = a->n;
19 int m = a->normalized ? 1 : 2 * a->magnitude, r = 1;
20 r &= (d[0] <= 0x3FFFFFFUL * m);
21 r &= (d[1] <= 0x3FFFFFFUL * m);
22 r &= (d[2] <= 0x3FFFFFFUL * m);
23 r &= (d[3] <= 0x3FFFFFFUL * m);
24 r &= (d[4] <= 0x3FFFFFFUL * m);
25 r &= (d[5] <= 0x3FFFFFFUL * m);
26 r &= (d[6] <= 0x3FFFFFFUL * m);
27 r &= (d[7] <= 0x3FFFFFFUL * m);
28 r &= (d[8] <= 0x3FFFFFFUL * m);
29 r &= (d[9] <= 0x03FFFFFUL * m);
30 r &= (a->magnitude >= 0);
31 r &= (a->magnitude <= 32);
33 r &= (a->magnitude <= 1);
34 if (r && (d[9] == 0x03FFFFFUL)) {
35 uint32_t mid = d[8] & d[7] & d[6] & d[5] & d[4] & d[3] & d[2];
36 if (mid == 0x3FFFFFFUL) {
37 r &= ((d[1] + 0x40UL + ((d[0] + 0x3D1UL) >> 26)) <= 0x3FFFFFFUL);
44 static void secp256k1_fe_verify(const secp256k1_fe_t *a) {
49 static void secp256k1_fe_normalize(secp256k1_fe_t *r) {
50 uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
51 t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
53 /* Reduce t9 at the start so there will be at most a single carry from the first pass */
55 uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
57 /* The first pass ensures the magnitude is 1, ... */
58 t0 += x * 0x3D1UL; t1 += (x << 6);
59 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
60 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
61 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; m = t2;
62 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; m &= t3;
63 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; m &= t4;
64 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; m &= t5;
65 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; m &= t6;
66 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; m &= t7;
67 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; m &= t8;
69 /* ... except for a possible carry at bit 22 of t9 (i.e. bit 256 of the field element) */
70 VERIFY_CHECK(t9 >> 23 == 0);
72 /* At most a single final reduction is needed; check if the value is >= the field characteristic */
73 x = (t9 >> 22) | ((t9 == 0x03FFFFFUL) & (m == 0x3FFFFFFUL)
74 & ((t1 + 0x40UL + ((t0 + 0x3D1UL) >> 26)) > 0x3FFFFFFUL));
76 /* Apply the final reduction (for constant-time behaviour, we do it always) */
77 t0 += x * 0x3D1UL; t1 += (x << 6);
78 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
79 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
80 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
81 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
82 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
83 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
84 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
85 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
86 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
88 /* If t9 didn't carry to bit 22 already, then it should have after any final reduction */
89 VERIFY_CHECK(t9 >> 22 == x);
91 /* Mask off the possible multiple of 2^256 from the final reduction */
94 r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
95 r->n[5] = t5; r->n[6] = t6; r->n[7] = t7; r->n[8] = t8; r->n[9] = t9;
100 secp256k1_fe_verify(r);
104 static void secp256k1_fe_normalize_weak(secp256k1_fe_t *r) {
105 uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
106 t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
108 /* Reduce t9 at the start so there will be at most a single carry from the first pass */
109 uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
111 /* The first pass ensures the magnitude is 1, ... */
112 t0 += x * 0x3D1UL; t1 += (x << 6);
113 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
114 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
115 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
116 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
117 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
118 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
119 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
120 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
121 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
123 /* ... except for a possible carry at bit 22 of t9 (i.e. bit 256 of the field element) */
124 VERIFY_CHECK(t9 >> 23 == 0);
126 r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
127 r->n[5] = t5; r->n[6] = t6; r->n[7] = t7; r->n[8] = t8; r->n[9] = t9;
131 secp256k1_fe_verify(r);
135 static void secp256k1_fe_normalize_var(secp256k1_fe_t *r) {
136 uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
137 t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
139 /* Reduce t9 at the start so there will be at most a single carry from the first pass */
141 uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
143 /* The first pass ensures the magnitude is 1, ... */
144 t0 += x * 0x3D1UL; t1 += (x << 6);
145 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
146 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
147 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; m = t2;
148 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; m &= t3;
149 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; m &= t4;
150 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; m &= t5;
151 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; m &= t6;
152 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; m &= t7;
153 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; m &= t8;
155 /* ... except for a possible carry at bit 22 of t9 (i.e. bit 256 of the field element) */
156 VERIFY_CHECK(t9 >> 23 == 0);
158 /* At most a single final reduction is needed; check if the value is >= the field characteristic */
159 x = (t9 >> 22) | ((t9 == 0x03FFFFFUL) & (m == 0x3FFFFFFUL)
160 & ((t1 + 0x40UL + ((t0 + 0x3D1UL) >> 26)) > 0x3FFFFFFUL));
163 t0 += 0x3D1UL; t1 += (x << 6);
164 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
165 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
166 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
167 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
168 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
169 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
170 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
171 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
172 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
174 /* If t9 didn't carry to bit 22 already, then it should have after any final reduction */
175 VERIFY_CHECK(t9 >> 22 == x);
177 /* Mask off the possible multiple of 2^256 from the final reduction */
181 r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
182 r->n[5] = t5; r->n[6] = t6; r->n[7] = t7; r->n[8] = t8; r->n[9] = t9;
187 secp256k1_fe_verify(r);
191 static int secp256k1_fe_normalizes_to_zero(secp256k1_fe_t *r) {
192 uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4],
193 t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9];
195 /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */
198 /* Reduce t9 at the start so there will be at most a single carry from the first pass */
199 uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
201 /* The first pass ensures the magnitude is 1, ... */
202 t0 += x * 0x3D1UL; t1 += (x << 6);
203 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL; z0 = t0; z1 = t0 ^ 0x3D0UL;
204 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; z0 |= t1; z1 &= t1 ^ 0x40UL;
205 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; z0 |= t2; z1 &= t2;
206 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; z0 |= t3; z1 &= t3;
207 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; z0 |= t4; z1 &= t4;
208 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; z0 |= t5; z1 &= t5;
209 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; z0 |= t6; z1 &= t6;
210 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; z0 |= t7; z1 &= t7;
211 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; z0 |= t8; z1 &= t8;
212 z0 |= t9; z1 &= t9 ^ 0x3C00000UL;
214 /* ... except for a possible carry at bit 22 of t9 (i.e. bit 256 of the field element) */
215 VERIFY_CHECK(t9 >> 23 == 0);
217 return (z0 == 0) | (z1 == 0x3FFFFFFUL);
220 static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe_t *r) {
221 uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9;
228 /* Reduce t9 at the start so there will be at most a single carry from the first pass */
231 /* The first pass ensures the magnitude is 1, ... */
234 /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */
235 z0 = t0 & 0x3FFFFFFUL;
238 /* Fast return path should catch the majority of cases */
239 if ((z0 != 0UL) & (z1 != 0x3FFFFFFUL)) {
255 t1 += (t0 >> 26); t0 = z0;
256 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; z0 |= t1; z1 &= t1 ^ 0x40UL;
257 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; z0 |= t2; z1 &= t2;
258 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; z0 |= t3; z1 &= t3;
259 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; z0 |= t4; z1 &= t4;
260 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; z0 |= t5; z1 &= t5;
261 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; z0 |= t6; z1 &= t6;
262 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; z0 |= t7; z1 &= t7;
263 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; z0 |= t8; z1 &= t8;
264 z0 |= t9; z1 &= t9 ^ 0x3C00000UL;
266 /* ... except for a possible carry at bit 22 of t9 (i.e. bit 256 of the field element) */
267 VERIFY_CHECK(t9 >> 23 == 0);
269 return (z0 == 0) | (z1 == 0x3FFFFFFUL);
272 SECP256K1_INLINE static void secp256k1_fe_set_int(secp256k1_fe_t *r, int a) {
274 r->n[1] = r->n[2] = r->n[3] = r->n[4] = r->n[5] = r->n[6] = r->n[7] = r->n[8] = r->n[9] = 0;
278 secp256k1_fe_verify(r);
282 SECP256K1_INLINE static int secp256k1_fe_is_zero(const secp256k1_fe_t *a) {
283 const uint32_t *t = a->n;
285 VERIFY_CHECK(a->normalized);
286 secp256k1_fe_verify(a);
288 return (t[0] | t[1] | t[2] | t[3] | t[4] | t[5] | t[6] | t[7] | t[8] | t[9]) == 0;
291 SECP256K1_INLINE static int secp256k1_fe_is_odd(const secp256k1_fe_t *a) {
293 VERIFY_CHECK(a->normalized);
294 secp256k1_fe_verify(a);
299 SECP256K1_INLINE static void secp256k1_fe_clear(secp256k1_fe_t *a) {
305 for (i=0; i<10; i++) {
310 static int secp256k1_fe_cmp_var(const secp256k1_fe_t *a, const secp256k1_fe_t *b) {
313 VERIFY_CHECK(a->normalized);
314 VERIFY_CHECK(b->normalized);
315 secp256k1_fe_verify(a);
316 secp256k1_fe_verify(b);
318 for (i = 9; i >= 0; i--) {
319 if (a->n[i] > b->n[i]) {
322 if (a->n[i] < b->n[i]) {
329 static int secp256k1_fe_set_b32(secp256k1_fe_t *r, const unsigned char *a) {
331 r->n[0] = r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0;
332 r->n[5] = r->n[6] = r->n[7] = r->n[8] = r->n[9] = 0;
333 for (i=0; i<32; i++) {
335 for (j=0; j<4; j++) {
336 int limb = (8*i+2*j)/26;
337 int shift = (8*i+2*j)%26;
338 r->n[limb] |= (uint32_t)((a[31-i] >> (2*j)) & 0x3) << shift;
341 if (r->n[9] == 0x3FFFFFUL && (r->n[8] & r->n[7] & r->n[6] & r->n[5] & r->n[4] & r->n[3] & r->n[2]) == 0x3FFFFFFUL && (r->n[1] + 0x40UL + ((r->n[0] + 0x3D1UL) >> 26)) > 0x3FFFFFFUL) {
347 secp256k1_fe_verify(r);
352 /** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */
353 static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe_t *a) {
356 VERIFY_CHECK(a->normalized);
357 secp256k1_fe_verify(a);
359 for (i=0; i<32; i++) {
362 for (j=0; j<4; j++) {
363 int limb = (8*i+2*j)/26;
364 int shift = (8*i+2*j)%26;
365 c |= ((a->n[limb] >> shift) & 0x3) << (2 * j);
371 SECP256K1_INLINE static void secp256k1_fe_negate(secp256k1_fe_t *r, const secp256k1_fe_t *a, int m) {
373 VERIFY_CHECK(a->magnitude <= m);
374 secp256k1_fe_verify(a);
376 r->n[0] = 0x3FFFC2FUL * 2 * (m + 1) - a->n[0];
377 r->n[1] = 0x3FFFFBFUL * 2 * (m + 1) - a->n[1];
378 r->n[2] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[2];
379 r->n[3] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[3];
380 r->n[4] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[4];
381 r->n[5] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[5];
382 r->n[6] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[6];
383 r->n[7] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[7];
384 r->n[8] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[8];
385 r->n[9] = 0x03FFFFFUL * 2 * (m + 1) - a->n[9];
387 r->magnitude = m + 1;
389 secp256k1_fe_verify(r);
393 SECP256K1_INLINE static void secp256k1_fe_mul_int(secp256k1_fe_t *r, int a) {
407 secp256k1_fe_verify(r);
411 SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe_t *r, const secp256k1_fe_t *a) {
413 secp256k1_fe_verify(a);
426 r->magnitude += a->magnitude;
428 secp256k1_fe_verify(r);
433 #define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0)
435 #define VERIFY_BITS(x, n) do { } while(0)
438 SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) {
440 uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8;
441 uint32_t t9, t1, t0, t2, t3, t4, t5, t6, t7;
442 const uint32_t M = 0x3FFFFFFUL, R0 = 0x3D10UL, R1 = 0x400UL;
444 VERIFY_BITS(a[0], 30);
445 VERIFY_BITS(a[1], 30);
446 VERIFY_BITS(a[2], 30);
447 VERIFY_BITS(a[3], 30);
448 VERIFY_BITS(a[4], 30);
449 VERIFY_BITS(a[5], 30);
450 VERIFY_BITS(a[6], 30);
451 VERIFY_BITS(a[7], 30);
452 VERIFY_BITS(a[8], 30);
453 VERIFY_BITS(a[9], 26);
454 VERIFY_BITS(b[0], 30);
455 VERIFY_BITS(b[1], 30);
456 VERIFY_BITS(b[2], 30);
457 VERIFY_BITS(b[3], 30);
458 VERIFY_BITS(b[4], 30);
459 VERIFY_BITS(b[5], 30);
460 VERIFY_BITS(b[6], 30);
461 VERIFY_BITS(b[7], 30);
462 VERIFY_BITS(b[8], 30);
463 VERIFY_BITS(b[9], 26);
465 /** [... a b c] is a shorthand for ... + a<<52 + b<<26 + c<<0 mod n.
466 * px is a shorthand for sum(a[i]*b[x-i], i=0..x).
467 * Note that [x 0 0 0 0 0 0 0 0 0 0] = [x*R1 x*R0].
470 d = (uint64_t)a[0] * b[9]
471 + (uint64_t)a[1] * b[8]
472 + (uint64_t)a[2] * b[7]
473 + (uint64_t)a[3] * b[6]
474 + (uint64_t)a[4] * b[5]
475 + (uint64_t)a[5] * b[4]
476 + (uint64_t)a[6] * b[3]
477 + (uint64_t)a[7] * b[2]
478 + (uint64_t)a[8] * b[1]
479 + (uint64_t)a[9] * b[0];
480 /* VERIFY_BITS(d, 64); */
481 /* [d 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] */
482 t9 = d & M; d >>= 26;
485 /* [d t9 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] */
487 c = (uint64_t)a[0] * b[0];
489 /* [d t9 0 0 0 0 0 0 0 0 c] = [p9 0 0 0 0 0 0 0 0 p0] */
490 d += (uint64_t)a[1] * b[9]
491 + (uint64_t)a[2] * b[8]
492 + (uint64_t)a[3] * b[7]
493 + (uint64_t)a[4] * b[6]
494 + (uint64_t)a[5] * b[5]
495 + (uint64_t)a[6] * b[4]
496 + (uint64_t)a[7] * b[3]
497 + (uint64_t)a[8] * b[2]
498 + (uint64_t)a[9] * b[1];
500 /* [d t9 0 0 0 0 0 0 0 0 c] = [p10 p9 0 0 0 0 0 0 0 0 p0] */
501 u0 = d & M; d >>= 26; c += u0 * R0;
505 /* [d u0 t9 0 0 0 0 0 0 0 0 c-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */
506 t0 = c & M; c >>= 26; c += u0 * R1;
509 /* [d u0 t9 0 0 0 0 0 0 0 c-u0*R1 t0-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */
510 /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */
512 c += (uint64_t)a[0] * b[1]
513 + (uint64_t)a[1] * b[0];
515 /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 p1 p0] */
516 d += (uint64_t)a[2] * b[9]
517 + (uint64_t)a[3] * b[8]
518 + (uint64_t)a[4] * b[7]
519 + (uint64_t)a[5] * b[6]
520 + (uint64_t)a[6] * b[5]
521 + (uint64_t)a[7] * b[4]
522 + (uint64_t)a[8] * b[3]
523 + (uint64_t)a[9] * b[2];
525 /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */
526 u1 = d & M; d >>= 26; c += u1 * R0;
530 /* [d u1 0 t9 0 0 0 0 0 0 0 c-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */
531 t1 = c & M; c >>= 26; c += u1 * R1;
534 /* [d u1 0 t9 0 0 0 0 0 0 c-u1*R1 t1-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */
535 /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */
537 c += (uint64_t)a[0] * b[2]
538 + (uint64_t)a[1] * b[1]
539 + (uint64_t)a[2] * b[0];
541 /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */
542 d += (uint64_t)a[3] * b[9]
543 + (uint64_t)a[4] * b[8]
544 + (uint64_t)a[5] * b[7]
545 + (uint64_t)a[6] * b[6]
546 + (uint64_t)a[7] * b[5]
547 + (uint64_t)a[8] * b[4]
548 + (uint64_t)a[9] * b[3];
550 /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */
551 u2 = d & M; d >>= 26; c += u2 * R0;
555 /* [d u2 0 0 t9 0 0 0 0 0 0 c-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */
556 t2 = c & M; c >>= 26; c += u2 * R1;
559 /* [d u2 0 0 t9 0 0 0 0 0 c-u2*R1 t2-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */
560 /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */
562 c += (uint64_t)a[0] * b[3]
563 + (uint64_t)a[1] * b[2]
564 + (uint64_t)a[2] * b[1]
565 + (uint64_t)a[3] * b[0];
567 /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */
568 d += (uint64_t)a[4] * b[9]
569 + (uint64_t)a[5] * b[8]
570 + (uint64_t)a[6] * b[7]
571 + (uint64_t)a[7] * b[6]
572 + (uint64_t)a[8] * b[5]
573 + (uint64_t)a[9] * b[4];
575 /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */
576 u3 = d & M; d >>= 26; c += u3 * R0;
579 /* VERIFY_BITS(c, 64); */
580 /* [d u3 0 0 0 t9 0 0 0 0 0 c-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */
581 t3 = c & M; c >>= 26; c += u3 * R1;
584 /* [d u3 0 0 0 t9 0 0 0 0 c-u3*R1 t3-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */
585 /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */
587 c += (uint64_t)a[0] * b[4]
588 + (uint64_t)a[1] * b[3]
589 + (uint64_t)a[2] * b[2]
590 + (uint64_t)a[3] * b[1]
591 + (uint64_t)a[4] * b[0];
593 /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */
594 d += (uint64_t)a[5] * b[9]
595 + (uint64_t)a[6] * b[8]
596 + (uint64_t)a[7] * b[7]
597 + (uint64_t)a[8] * b[6]
598 + (uint64_t)a[9] * b[5];
600 /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */
601 u4 = d & M; d >>= 26; c += u4 * R0;
604 /* VERIFY_BITS(c, 64); */
605 /* [d u4 0 0 0 0 t9 0 0 0 0 c-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */
606 t4 = c & M; c >>= 26; c += u4 * R1;
609 /* [d u4 0 0 0 0 t9 0 0 0 c-u4*R1 t4-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */
610 /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */
612 c += (uint64_t)a[0] * b[5]
613 + (uint64_t)a[1] * b[4]
614 + (uint64_t)a[2] * b[3]
615 + (uint64_t)a[3] * b[2]
616 + (uint64_t)a[4] * b[1]
617 + (uint64_t)a[5] * b[0];
619 /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */
620 d += (uint64_t)a[6] * b[9]
621 + (uint64_t)a[7] * b[8]
622 + (uint64_t)a[8] * b[7]
623 + (uint64_t)a[9] * b[6];
625 /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */
626 u5 = d & M; d >>= 26; c += u5 * R0;
629 /* VERIFY_BITS(c, 64); */
630 /* [d u5 0 0 0 0 0 t9 0 0 0 c-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */
631 t5 = c & M; c >>= 26; c += u5 * R1;
634 /* [d u5 0 0 0 0 0 t9 0 0 c-u5*R1 t5-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */
635 /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */
637 c += (uint64_t)a[0] * b[6]
638 + (uint64_t)a[1] * b[5]
639 + (uint64_t)a[2] * b[4]
640 + (uint64_t)a[3] * b[3]
641 + (uint64_t)a[4] * b[2]
642 + (uint64_t)a[5] * b[1]
643 + (uint64_t)a[6] * b[0];
645 /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */
646 d += (uint64_t)a[7] * b[9]
647 + (uint64_t)a[8] * b[8]
648 + (uint64_t)a[9] * b[7];
650 /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */
651 u6 = d & M; d >>= 26; c += u6 * R0;
654 /* VERIFY_BITS(c, 64); */
655 /* [d u6 0 0 0 0 0 0 t9 0 0 c-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */
656 t6 = c & M; c >>= 26; c += u6 * R1;
659 /* [d u6 0 0 0 0 0 0 t9 0 c-u6*R1 t6-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */
660 /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */
662 c += (uint64_t)a[0] * b[7]
663 + (uint64_t)a[1] * b[6]
664 + (uint64_t)a[2] * b[5]
665 + (uint64_t)a[3] * b[4]
666 + (uint64_t)a[4] * b[3]
667 + (uint64_t)a[5] * b[2]
668 + (uint64_t)a[6] * b[1]
669 + (uint64_t)a[7] * b[0];
670 /* VERIFY_BITS(c, 64); */
671 VERIFY_CHECK(c <= 0x8000007C00000007ULL);
672 /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */
673 d += (uint64_t)a[8] * b[9]
674 + (uint64_t)a[9] * b[8];
676 /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */
677 u7 = d & M; d >>= 26; c += u7 * R0;
680 /* VERIFY_BITS(c, 64); */
681 VERIFY_CHECK(c <= 0x800001703FFFC2F7ULL);
682 /* [d u7 0 0 0 0 0 0 0 t9 0 c-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */
683 t7 = c & M; c >>= 26; c += u7 * R1;
686 /* [d u7 0 0 0 0 0 0 0 t9 c-u7*R1 t7-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */
687 /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */
689 c += (uint64_t)a[0] * b[8]
690 + (uint64_t)a[1] * b[7]
691 + (uint64_t)a[2] * b[6]
692 + (uint64_t)a[3] * b[5]
693 + (uint64_t)a[4] * b[4]
694 + (uint64_t)a[5] * b[3]
695 + (uint64_t)a[6] * b[2]
696 + (uint64_t)a[7] * b[1]
697 + (uint64_t)a[8] * b[0];
698 /* VERIFY_BITS(c, 64); */
699 VERIFY_CHECK(c <= 0x9000007B80000008ULL);
700 /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
701 d += (uint64_t)a[9] * b[9];
703 /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
704 u8 = d & M; d >>= 26; c += u8 * R0;
707 /* VERIFY_BITS(c, 64); */
708 VERIFY_CHECK(c <= 0x9000016FBFFFC2F8ULL);
709 /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
712 VERIFY_BITS(r[3], 26);
713 /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
715 VERIFY_BITS(r[4], 26);
716 /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
718 VERIFY_BITS(r[5], 26);
719 /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
721 VERIFY_BITS(r[6], 26);
722 /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
724 VERIFY_BITS(r[7], 26);
725 /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
727 r[8] = c & M; c >>= 26; c += u8 * R1;
728 VERIFY_BITS(r[8], 26);
730 /* [d u8 0 0 0 0 0 0 0 0 t9+c-u8*R1 r8-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
731 /* [d 0 0 0 0 0 0 0 0 0 t9+c r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
734 /* [d 0 0 0 0 0 0 0 0 0 c-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
735 r[9] = c & (M >> 4); c >>= 22; c += d * (R1 << 4);
736 VERIFY_BITS(r[9], 22);
738 /* [d 0 0 0 0 0 0 0 0 r9+((c-d*R1<<4)<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
739 /* [d 0 0 0 0 0 0 0 -d*R1 r9+(c<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
740 /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
742 d = c * (R0 >> 4) + t0;
744 /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 d-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
745 r[0] = d & M; d >>= 26;
746 VERIFY_BITS(r[0], 26);
748 /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1+d r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
749 d += c * (R1 >> 4) + t1;
751 VERIFY_CHECK(d <= 0x10000003FFFFBFULL);
752 /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 d-c*R1>>4 r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
753 /* [r9 r8 r7 r6 r5 r4 r3 t2 d r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
754 r[1] = d & M; d >>= 26;
755 VERIFY_BITS(r[1], 26);
757 VERIFY_CHECK(d <= 0x4000000ULL);
758 /* [r9 r8 r7 r6 r5 r4 r3 t2+d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
761 /* [r9 r8 r7 r6 r5 r4 r3 d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
763 VERIFY_BITS(r[2], 27);
764 /* [r9 r8 r7 r6 r5 r4 r3 r2 r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
767 SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t *a) {
769 uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8;
770 uint32_t t9, t0, t1, t2, t3, t4, t5, t6, t7;
771 const uint32_t M = 0x3FFFFFFUL, R0 = 0x3D10UL, R1 = 0x400UL;
773 VERIFY_BITS(a[0], 30);
774 VERIFY_BITS(a[1], 30);
775 VERIFY_BITS(a[2], 30);
776 VERIFY_BITS(a[3], 30);
777 VERIFY_BITS(a[4], 30);
778 VERIFY_BITS(a[5], 30);
779 VERIFY_BITS(a[6], 30);
780 VERIFY_BITS(a[7], 30);
781 VERIFY_BITS(a[8], 30);
782 VERIFY_BITS(a[9], 26);
784 /** [... a b c] is a shorthand for ... + a<<52 + b<<26 + c<<0 mod n.
785 * px is a shorthand for sum(a[i]*a[x-i], i=0..x).
786 * Note that [x 0 0 0 0 0 0 0 0 0 0] = [x*R1 x*R0].
789 d = (uint64_t)(a[0]*2) * a[9]
790 + (uint64_t)(a[1]*2) * a[8]
791 + (uint64_t)(a[2]*2) * a[7]
792 + (uint64_t)(a[3]*2) * a[6]
793 + (uint64_t)(a[4]*2) * a[5];
794 /* VERIFY_BITS(d, 64); */
795 /* [d 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] */
796 t9 = d & M; d >>= 26;
799 /* [d t9 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] */
801 c = (uint64_t)a[0] * a[0];
803 /* [d t9 0 0 0 0 0 0 0 0 c] = [p9 0 0 0 0 0 0 0 0 p0] */
804 d += (uint64_t)(a[1]*2) * a[9]
805 + (uint64_t)(a[2]*2) * a[8]
806 + (uint64_t)(a[3]*2) * a[7]
807 + (uint64_t)(a[4]*2) * a[6]
808 + (uint64_t)a[5] * a[5];
810 /* [d t9 0 0 0 0 0 0 0 0 c] = [p10 p9 0 0 0 0 0 0 0 0 p0] */
811 u0 = d & M; d >>= 26; c += u0 * R0;
815 /* [d u0 t9 0 0 0 0 0 0 0 0 c-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */
816 t0 = c & M; c >>= 26; c += u0 * R1;
819 /* [d u0 t9 0 0 0 0 0 0 0 c-u0*R1 t0-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */
820 /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */
822 c += (uint64_t)(a[0]*2) * a[1];
824 /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 p1 p0] */
825 d += (uint64_t)(a[2]*2) * a[9]
826 + (uint64_t)(a[3]*2) * a[8]
827 + (uint64_t)(a[4]*2) * a[7]
828 + (uint64_t)(a[5]*2) * a[6];
830 /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */
831 u1 = d & M; d >>= 26; c += u1 * R0;
835 /* [d u1 0 t9 0 0 0 0 0 0 0 c-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */
836 t1 = c & M; c >>= 26; c += u1 * R1;
839 /* [d u1 0 t9 0 0 0 0 0 0 c-u1*R1 t1-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */
840 /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */
842 c += (uint64_t)(a[0]*2) * a[2]
843 + (uint64_t)a[1] * a[1];
845 /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */
846 d += (uint64_t)(a[3]*2) * a[9]
847 + (uint64_t)(a[4]*2) * a[8]
848 + (uint64_t)(a[5]*2) * a[7]
849 + (uint64_t)a[6] * a[6];
851 /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */
852 u2 = d & M; d >>= 26; c += u2 * R0;
856 /* [d u2 0 0 t9 0 0 0 0 0 0 c-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */
857 t2 = c & M; c >>= 26; c += u2 * R1;
860 /* [d u2 0 0 t9 0 0 0 0 0 c-u2*R1 t2-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */
861 /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */
863 c += (uint64_t)(a[0]*2) * a[3]
864 + (uint64_t)(a[1]*2) * a[2];
866 /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */
867 d += (uint64_t)(a[4]*2) * a[9]
868 + (uint64_t)(a[5]*2) * a[8]
869 + (uint64_t)(a[6]*2) * a[7];
871 /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */
872 u3 = d & M; d >>= 26; c += u3 * R0;
875 /* VERIFY_BITS(c, 64); */
876 /* [d u3 0 0 0 t9 0 0 0 0 0 c-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */
877 t3 = c & M; c >>= 26; c += u3 * R1;
880 /* [d u3 0 0 0 t9 0 0 0 0 c-u3*R1 t3-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */
881 /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */
883 c += (uint64_t)(a[0]*2) * a[4]
884 + (uint64_t)(a[1]*2) * a[3]
885 + (uint64_t)a[2] * a[2];
887 /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */
888 d += (uint64_t)(a[5]*2) * a[9]
889 + (uint64_t)(a[6]*2) * a[8]
890 + (uint64_t)a[7] * a[7];
892 /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */
893 u4 = d & M; d >>= 26; c += u4 * R0;
896 /* VERIFY_BITS(c, 64); */
897 /* [d u4 0 0 0 0 t9 0 0 0 0 c-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */
898 t4 = c & M; c >>= 26; c += u4 * R1;
901 /* [d u4 0 0 0 0 t9 0 0 0 c-u4*R1 t4-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */
902 /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */
904 c += (uint64_t)(a[0]*2) * a[5]
905 + (uint64_t)(a[1]*2) * a[4]
906 + (uint64_t)(a[2]*2) * a[3];
908 /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */
909 d += (uint64_t)(a[6]*2) * a[9]
910 + (uint64_t)(a[7]*2) * a[8];
912 /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */
913 u5 = d & M; d >>= 26; c += u5 * R0;
916 /* VERIFY_BITS(c, 64); */
917 /* [d u5 0 0 0 0 0 t9 0 0 0 c-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */
918 t5 = c & M; c >>= 26; c += u5 * R1;
921 /* [d u5 0 0 0 0 0 t9 0 0 c-u5*R1 t5-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */
922 /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */
924 c += (uint64_t)(a[0]*2) * a[6]
925 + (uint64_t)(a[1]*2) * a[5]
926 + (uint64_t)(a[2]*2) * a[4]
927 + (uint64_t)a[3] * a[3];
929 /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */
930 d += (uint64_t)(a[7]*2) * a[9]
931 + (uint64_t)a[8] * a[8];
933 /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */
934 u6 = d & M; d >>= 26; c += u6 * R0;
937 /* VERIFY_BITS(c, 64); */
938 /* [d u6 0 0 0 0 0 0 t9 0 0 c-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */
939 t6 = c & M; c >>= 26; c += u6 * R1;
942 /* [d u6 0 0 0 0 0 0 t9 0 c-u6*R1 t6-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */
943 /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */
945 c += (uint64_t)(a[0]*2) * a[7]
946 + (uint64_t)(a[1]*2) * a[6]
947 + (uint64_t)(a[2]*2) * a[5]
948 + (uint64_t)(a[3]*2) * a[4];
949 /* VERIFY_BITS(c, 64); */
950 VERIFY_CHECK(c <= 0x8000007C00000007ULL);
951 /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */
952 d += (uint64_t)(a[8]*2) * a[9];
954 /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */
955 u7 = d & M; d >>= 26; c += u7 * R0;
958 /* VERIFY_BITS(c, 64); */
959 VERIFY_CHECK(c <= 0x800001703FFFC2F7ULL);
960 /* [d u7 0 0 0 0 0 0 0 t9 0 c-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */
961 t7 = c & M; c >>= 26; c += u7 * R1;
964 /* [d u7 0 0 0 0 0 0 0 t9 c-u7*R1 t7-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */
965 /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */
967 c += (uint64_t)(a[0]*2) * a[8]
968 + (uint64_t)(a[1]*2) * a[7]
969 + (uint64_t)(a[2]*2) * a[6]
970 + (uint64_t)(a[3]*2) * a[5]
971 + (uint64_t)a[4] * a[4];
972 /* VERIFY_BITS(c, 64); */
973 VERIFY_CHECK(c <= 0x9000007B80000008ULL);
974 /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
975 d += (uint64_t)a[9] * a[9];
977 /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
978 u8 = d & M; d >>= 26; c += u8 * R0;
981 /* VERIFY_BITS(c, 64); */
982 VERIFY_CHECK(c <= 0x9000016FBFFFC2F8ULL);
983 /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
986 VERIFY_BITS(r[3], 26);
987 /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
989 VERIFY_BITS(r[4], 26);
990 /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
992 VERIFY_BITS(r[5], 26);
993 /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
995 VERIFY_BITS(r[6], 26);
996 /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
998 VERIFY_BITS(r[7], 26);
999 /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
1001 r[8] = c & M; c >>= 26; c += u8 * R1;
1002 VERIFY_BITS(r[8], 26);
1004 /* [d u8 0 0 0 0 0 0 0 0 t9+c-u8*R1 r8-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
1005 /* [d 0 0 0 0 0 0 0 0 0 t9+c r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
1008 /* [d 0 0 0 0 0 0 0 0 0 c-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
1009 r[9] = c & (M >> 4); c >>= 22; c += d * (R1 << 4);
1010 VERIFY_BITS(r[9], 22);
1012 /* [d 0 0 0 0 0 0 0 0 r9+((c-d*R1<<4)<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
1013 /* [d 0 0 0 0 0 0 0 -d*R1 r9+(c<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
1014 /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
1016 d = c * (R0 >> 4) + t0;
1018 /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 d-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
1019 r[0] = d & M; d >>= 26;
1020 VERIFY_BITS(r[0], 26);
1022 /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1+d r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
1023 d += c * (R1 >> 4) + t1;
1025 VERIFY_CHECK(d <= 0x10000003FFFFBFULL);
1026 /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 d-c*R1>>4 r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
1027 /* [r9 r8 r7 r6 r5 r4 r3 t2 d r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
1028 r[1] = d & M; d >>= 26;
1029 VERIFY_BITS(r[1], 26);
1031 VERIFY_CHECK(d <= 0x4000000ULL);
1032 /* [r9 r8 r7 r6 r5 r4 r3 t2+d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
1035 /* [r9 r8 r7 r6 r5 r4 r3 d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
1037 VERIFY_BITS(r[2], 27);
1038 /* [r9 r8 r7 r6 r5 r4 r3 r2 r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */
1042 static void secp256k1_fe_mul(secp256k1_fe_t *r, const secp256k1_fe_t *a, const secp256k1_fe_t * SECP256K1_RESTRICT b) {
1044 VERIFY_CHECK(a->magnitude <= 8);
1045 VERIFY_CHECK(b->magnitude <= 8);
1046 secp256k1_fe_verify(a);
1047 secp256k1_fe_verify(b);
1048 VERIFY_CHECK(r != b);
1050 secp256k1_fe_mul_inner(r->n, a->n, b->n);
1054 secp256k1_fe_verify(r);
1058 static void secp256k1_fe_sqr(secp256k1_fe_t *r, const secp256k1_fe_t *a) {
1060 VERIFY_CHECK(a->magnitude <= 8);
1061 secp256k1_fe_verify(a);
1063 secp256k1_fe_sqr_inner(r->n, a->n);
1067 secp256k1_fe_verify(r);
1071 static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe_t *r, const secp256k1_fe_t *a, int flag) {
1072 uint32_t mask0, mask1;
1073 mask0 = flag + ~((uint32_t)0);
1075 r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
1076 r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
1077 r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1);
1078 r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
1079 r->n[4] = (r->n[4] & mask0) | (a->n[4] & mask1);
1080 r->n[5] = (r->n[5] & mask0) | (a->n[5] & mask1);
1081 r->n[6] = (r->n[6] & mask0) | (a->n[6] & mask1);
1082 r->n[7] = (r->n[7] & mask0) | (a->n[7] & mask1);
1083 r->n[8] = (r->n[8] & mask0) | (a->n[8] & mask1);
1084 r->n[9] = (r->n[9] & mask0) | (a->n[9] & mask1);
1086 if (a->magnitude > r->magnitude) {
1087 r->magnitude = a->magnitude;
1089 r->normalized &= a->normalized;
1093 static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage_t *r, const secp256k1_fe_storage_t *a, int flag) {
1094 uint32_t mask0, mask1;
1095 mask0 = flag + ~((uint32_t)0);
1097 r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
1098 r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
1099 r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1);
1100 r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
1101 r->n[4] = (r->n[4] & mask0) | (a->n[4] & mask1);
1102 r->n[5] = (r->n[5] & mask0) | (a->n[5] & mask1);
1103 r->n[6] = (r->n[6] & mask0) | (a->n[6] & mask1);
1104 r->n[7] = (r->n[7] & mask0) | (a->n[7] & mask1);
1107 static void secp256k1_fe_to_storage(secp256k1_fe_storage_t *r, const secp256k1_fe_t *a) {
1109 VERIFY_CHECK(a->normalized);
1111 r->n[0] = a->n[0] | a->n[1] << 26;
1112 r->n[1] = a->n[1] >> 6 | a->n[2] << 20;
1113 r->n[2] = a->n[2] >> 12 | a->n[3] << 14;
1114 r->n[3] = a->n[3] >> 18 | a->n[4] << 8;
1115 r->n[4] = a->n[4] >> 24 | a->n[5] << 2 | a->n[6] << 28;
1116 r->n[5] = a->n[6] >> 4 | a->n[7] << 22;
1117 r->n[6] = a->n[7] >> 10 | a->n[8] << 16;
1118 r->n[7] = a->n[8] >> 16 | a->n[9] << 10;
1121 static SECP256K1_INLINE void secp256k1_fe_from_storage(secp256k1_fe_t *r, const secp256k1_fe_storage_t *a) {
1122 r->n[0] = a->n[0] & 0x3FFFFFFUL;
1123 r->n[1] = a->n[0] >> 26 | ((a->n[1] << 6) & 0x3FFFFFFUL);
1124 r->n[2] = a->n[1] >> 20 | ((a->n[2] << 12) & 0x3FFFFFFUL);
1125 r->n[3] = a->n[2] >> 14 | ((a->n[3] << 18) & 0x3FFFFFFUL);
1126 r->n[4] = a->n[3] >> 8 | ((a->n[4] << 24) & 0x3FFFFFFUL);
1127 r->n[5] = (a->n[4] >> 2) & 0x3FFFFFFUL;
1128 r->n[6] = a->n[4] >> 28 | ((a->n[5] << 4) & 0x3FFFFFFUL);
1129 r->n[7] = a->n[5] >> 22 | ((a->n[6] << 10) & 0x3FFFFFFUL);
1130 r->n[8] = a->n[6] >> 16 | ((a->n[7] << 16) & 0x3FFFFFFUL);
1131 r->n[9] = a->n[7] >> 10;