1 /**********************************************************************
2 * Copyright (c) 2013, 2014 Pieter Wuille *
3 * Distributed under the MIT software license, see the accompanying *
4 * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
5 **********************************************************************/
7 #ifndef _SECP256K1_GROUP_IMPL_H_
8 #define _SECP256K1_GROUP_IMPL_H_
16 /** Generator for secp256k1, value 'g' defined in
17 * "Standards for Efficient Cryptography" (SEC2) 2.7.1.
19 static const secp256k1_ge_t secp256k1_ge_const_g = SECP256K1_GE_CONST(
20 0x79BE667EUL, 0xF9DCBBACUL, 0x55A06295UL, 0xCE870B07UL,
21 0x029BFCDBUL, 0x2DCE28D9UL, 0x59F2815BUL, 0x16F81798UL,
22 0x483ADA77UL, 0x26A3C465UL, 0x5DA4FBFCUL, 0x0E1108A8UL,
23 0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL
26 static void secp256k1_ge_set_gej_zinv(secp256k1_ge_t *r, const secp256k1_gej_t *a, const secp256k1_fe_t *zi) {
29 secp256k1_fe_sqr(&zi2, zi);
30 secp256k1_fe_mul(&zi3, &zi2, zi);
31 secp256k1_fe_mul(&r->x, &a->x, &zi2);
32 secp256k1_fe_mul(&r->y, &a->y, &zi3);
33 r->infinity = a->infinity;
36 static void secp256k1_ge_set_infinity(secp256k1_ge_t *r) {
40 static void secp256k1_ge_set_xy(secp256k1_ge_t *r, const secp256k1_fe_t *x, const secp256k1_fe_t *y) {
46 static int secp256k1_ge_is_infinity(const secp256k1_ge_t *a) {
50 static void secp256k1_ge_neg(secp256k1_ge_t *r, const secp256k1_ge_t *a) {
52 secp256k1_fe_normalize_weak(&r->y);
53 secp256k1_fe_negate(&r->y, &r->y, 1);
56 static void secp256k1_ge_set_gej(secp256k1_ge_t *r, secp256k1_gej_t *a) {
57 secp256k1_fe_t z2, z3;
58 r->infinity = a->infinity;
59 secp256k1_fe_inv(&a->z, &a->z);
60 secp256k1_fe_sqr(&z2, &a->z);
61 secp256k1_fe_mul(&z3, &a->z, &z2);
62 secp256k1_fe_mul(&a->x, &a->x, &z2);
63 secp256k1_fe_mul(&a->y, &a->y, &z3);
64 secp256k1_fe_set_int(&a->z, 1);
69 static void secp256k1_ge_set_gej_var(secp256k1_ge_t *r, secp256k1_gej_t *a) {
70 secp256k1_fe_t z2, z3;
71 r->infinity = a->infinity;
75 secp256k1_fe_inv_var(&a->z, &a->z);
76 secp256k1_fe_sqr(&z2, &a->z);
77 secp256k1_fe_mul(&z3, &a->z, &z2);
78 secp256k1_fe_mul(&a->x, &a->x, &z2);
79 secp256k1_fe_mul(&a->y, &a->y, &z3);
80 secp256k1_fe_set_int(&a->z, 1);
85 static void secp256k1_ge_set_all_gej_var(size_t len, secp256k1_ge_t *r, const secp256k1_gej_t *a, const callback_t *cb) {
90 az = (secp256k1_fe_t *)checked_malloc(cb, sizeof(secp256k1_fe_t) * len);
91 for (i = 0; i < len; i++) {
97 azi = (secp256k1_fe_t *)checked_malloc(cb, sizeof(secp256k1_fe_t) * count);
98 secp256k1_fe_inv_all_var(count, azi, az);
102 for (i = 0; i < len; i++) {
103 r[i].infinity = a[i].infinity;
104 if (!a[i].infinity) {
105 secp256k1_ge_set_gej_zinv(&r[i], &a[i], &azi[count++]);
111 static void secp256k1_ge_set_table_gej_var(size_t len, secp256k1_ge_t *r, const secp256k1_gej_t *a, const secp256k1_fe_t *zr) {
118 /* Compute the inverse of the last z coordinate, and use it to compute the last affine output. */
119 secp256k1_fe_inv(&zi, &a[i].z);
120 secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zi);
122 /* Work out way backwards, using the z-ratios to scale the x/y values. */
124 secp256k1_fe_mul(&zi, &zi, &zr[i]);
126 secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zi);
130 static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge_t *r, secp256k1_fe_t *globalz, const secp256k1_gej_t *a, const secp256k1_fe_t *zr) {
137 /* The z of the final point gives us the "global Z" for the table. */
144 /* Work our way backwards, using the z-ratios to scale the x/y values. */
147 secp256k1_fe_mul(&zs, &zs, &zr[i]);
150 secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zs);
154 static void secp256k1_gej_set_infinity(secp256k1_gej_t *r) {
156 secp256k1_fe_set_int(&r->x, 0);
157 secp256k1_fe_set_int(&r->y, 0);
158 secp256k1_fe_set_int(&r->z, 0);
161 static void secp256k1_gej_set_xy(secp256k1_gej_t *r, const secp256k1_fe_t *x, const secp256k1_fe_t *y) {
165 secp256k1_fe_set_int(&r->z, 1);
168 static void secp256k1_gej_clear(secp256k1_gej_t *r) {
170 secp256k1_fe_clear(&r->x);
171 secp256k1_fe_clear(&r->y);
172 secp256k1_fe_clear(&r->z);
175 static void secp256k1_ge_clear(secp256k1_ge_t *r) {
177 secp256k1_fe_clear(&r->x);
178 secp256k1_fe_clear(&r->y);
181 static int secp256k1_ge_set_xo_var(secp256k1_ge_t *r, const secp256k1_fe_t *x, int odd) {
182 secp256k1_fe_t x2, x3, c;
184 secp256k1_fe_sqr(&x2, x);
185 secp256k1_fe_mul(&x3, x, &x2);
187 secp256k1_fe_set_int(&c, 7);
188 secp256k1_fe_add(&c, &x3);
189 if (!secp256k1_fe_sqrt_var(&r->y, &c)) {
192 secp256k1_fe_normalize_var(&r->y);
193 if (secp256k1_fe_is_odd(&r->y) != odd) {
194 secp256k1_fe_negate(&r->y, &r->y, 1);
199 static void secp256k1_gej_set_ge(secp256k1_gej_t *r, const secp256k1_ge_t *a) {
200 r->infinity = a->infinity;
203 secp256k1_fe_set_int(&r->z, 1);
206 static int secp256k1_gej_eq_x_var(const secp256k1_fe_t *x, const secp256k1_gej_t *a) {
207 secp256k1_fe_t r, r2;
208 VERIFY_CHECK(!a->infinity);
209 secp256k1_fe_sqr(&r, &a->z); secp256k1_fe_mul(&r, &r, x);
210 r2 = a->x; secp256k1_fe_normalize_weak(&r2);
211 return secp256k1_fe_equal_var(&r, &r2);
214 static void secp256k1_gej_neg(secp256k1_gej_t *r, const secp256k1_gej_t *a) {
215 r->infinity = a->infinity;
219 secp256k1_fe_normalize_weak(&r->y);
220 secp256k1_fe_negate(&r->y, &r->y, 1);
223 static int secp256k1_gej_is_infinity(const secp256k1_gej_t *a) {
227 static int secp256k1_gej_is_valid_var(const secp256k1_gej_t *a) {
228 secp256k1_fe_t y2, x3, z2, z6;
233 * (Y/Z^3)^2 = (X/Z^2)^3 + 7
234 * Y^2 / Z^6 = X^3 / Z^6 + 7
237 secp256k1_fe_sqr(&y2, &a->y);
238 secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
239 secp256k1_fe_sqr(&z2, &a->z);
240 secp256k1_fe_sqr(&z6, &z2); secp256k1_fe_mul(&z6, &z6, &z2);
241 secp256k1_fe_mul_int(&z6, 7);
242 secp256k1_fe_add(&x3, &z6);
243 secp256k1_fe_normalize_weak(&x3);
244 return secp256k1_fe_equal_var(&y2, &x3);
247 static int secp256k1_ge_is_valid_var(const secp256k1_ge_t *a) {
248 secp256k1_fe_t y2, x3, c;
253 secp256k1_fe_sqr(&y2, &a->y);
254 secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);
255 secp256k1_fe_set_int(&c, 7);
256 secp256k1_fe_add(&x3, &c);
257 secp256k1_fe_normalize_weak(&x3);
258 return secp256k1_fe_equal_var(&y2, &x3);
261 static void secp256k1_gej_double_var(secp256k1_gej_t *r, const secp256k1_gej_t *a, secp256k1_fe_t *rzr) {
262 /* Operations: 3 mul, 4 sqr, 0 normalize, 12 mul_int/add/negate */
263 secp256k1_fe_t t1,t2,t3,t4;
264 /** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity,
265 * Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have
266 * y=0, x^3 must be -7 mod p. However, -7 has no cube root mod p.
268 r->infinity = a->infinity;
271 secp256k1_fe_set_int(rzr, 1);
278 secp256k1_fe_normalize_weak(rzr);
279 secp256k1_fe_mul_int(rzr, 2);
282 secp256k1_fe_mul(&r->z, &a->z, &a->y);
283 secp256k1_fe_mul_int(&r->z, 2); /* Z' = 2*Y*Z (2) */
284 secp256k1_fe_sqr(&t1, &a->x);
285 secp256k1_fe_mul_int(&t1, 3); /* T1 = 3*X^2 (3) */
286 secp256k1_fe_sqr(&t2, &t1); /* T2 = 9*X^4 (1) */
287 secp256k1_fe_sqr(&t3, &a->y);
288 secp256k1_fe_mul_int(&t3, 2); /* T3 = 2*Y^2 (2) */
289 secp256k1_fe_sqr(&t4, &t3);
290 secp256k1_fe_mul_int(&t4, 2); /* T4 = 8*Y^4 (2) */
291 secp256k1_fe_mul(&t3, &t3, &a->x); /* T3 = 2*X*Y^2 (1) */
293 secp256k1_fe_mul_int(&r->x, 4); /* X' = 8*X*Y^2 (4) */
294 secp256k1_fe_negate(&r->x, &r->x, 4); /* X' = -8*X*Y^2 (5) */
295 secp256k1_fe_add(&r->x, &t2); /* X' = 9*X^4 - 8*X*Y^2 (6) */
296 secp256k1_fe_negate(&t2, &t2, 1); /* T2 = -9*X^4 (2) */
297 secp256k1_fe_mul_int(&t3, 6); /* T3 = 12*X*Y^2 (6) */
298 secp256k1_fe_add(&t3, &t2); /* T3 = 12*X*Y^2 - 9*X^4 (8) */
299 secp256k1_fe_mul(&r->y, &t1, &t3); /* Y' = 36*X^3*Y^2 - 27*X^6 (1) */
300 secp256k1_fe_negate(&t2, &t4, 2); /* T2 = -8*Y^4 (3) */
301 secp256k1_fe_add(&r->y, &t2); /* Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) */
304 static SECP256K1_INLINE void secp256k1_gej_double_nonzero(secp256k1_gej_t *r, const secp256k1_gej_t *a, secp256k1_fe_t *rzr) {
305 VERIFY_CHECK(!secp256k1_gej_is_infinity(a));
306 secp256k1_gej_double_var(r, a, rzr);
309 static void secp256k1_gej_add_var(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_gej_t *b, secp256k1_fe_t *rzr) {
310 /* Operations: 12 mul, 4 sqr, 2 normalize, 12 mul_int/add/negate */
311 secp256k1_fe_t z22, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
314 VERIFY_CHECK(rzr == NULL);
321 secp256k1_fe_set_int(rzr, 1);
328 secp256k1_fe_sqr(&z22, &b->z);
329 secp256k1_fe_sqr(&z12, &a->z);
330 secp256k1_fe_mul(&u1, &a->x, &z22);
331 secp256k1_fe_mul(&u2, &b->x, &z12);
332 secp256k1_fe_mul(&s1, &a->y, &z22); secp256k1_fe_mul(&s1, &s1, &b->z);
333 secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z);
334 secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2);
335 secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2);
336 if (secp256k1_fe_normalizes_to_zero_var(&h)) {
337 if (secp256k1_fe_normalizes_to_zero_var(&i)) {
338 secp256k1_gej_double_var(r, a, rzr);
341 secp256k1_fe_set_int(rzr, 0);
347 secp256k1_fe_sqr(&i2, &i);
348 secp256k1_fe_sqr(&h2, &h);
349 secp256k1_fe_mul(&h3, &h, &h2);
350 secp256k1_fe_mul(&h, &h, &b->z);
354 secp256k1_fe_mul(&r->z, &a->z, &h);
355 secp256k1_fe_mul(&t, &u1, &h2);
356 r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2);
357 secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i);
358 secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1);
359 secp256k1_fe_add(&r->y, &h3);
362 static void secp256k1_gej_add_ge_var(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_ge_t *b, secp256k1_fe_t *rzr) {
363 /* 8 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */
364 secp256k1_fe_t z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
366 VERIFY_CHECK(rzr == NULL);
367 secp256k1_gej_set_ge(r, b);
372 secp256k1_fe_set_int(rzr, 1);
379 secp256k1_fe_sqr(&z12, &a->z);
380 u1 = a->x; secp256k1_fe_normalize_weak(&u1);
381 secp256k1_fe_mul(&u2, &b->x, &z12);
382 s1 = a->y; secp256k1_fe_normalize_weak(&s1);
383 secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z);
384 secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2);
385 secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2);
386 if (secp256k1_fe_normalizes_to_zero_var(&h)) {
387 if (secp256k1_fe_normalizes_to_zero_var(&i)) {
388 secp256k1_gej_double_var(r, a, rzr);
391 secp256k1_fe_set_int(rzr, 0);
397 secp256k1_fe_sqr(&i2, &i);
398 secp256k1_fe_sqr(&h2, &h);
399 secp256k1_fe_mul(&h3, &h, &h2);
403 secp256k1_fe_mul(&r->z, &a->z, &h);
404 secp256k1_fe_mul(&t, &u1, &h2);
405 r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2);
406 secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i);
407 secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1);
408 secp256k1_fe_add(&r->y, &h3);
411 static void secp256k1_gej_add_zinv_var(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_ge_t *b, const secp256k1_fe_t *bzinv) {
412 /* 9 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */
413 secp256k1_fe_t az, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t;
420 secp256k1_fe_t bzinv2, bzinv3;
421 r->infinity = b->infinity;
422 secp256k1_fe_sqr(&bzinv2, bzinv);
423 secp256k1_fe_mul(&bzinv3, &bzinv2, bzinv);
424 secp256k1_fe_mul(&r->x, &b->x, &bzinv2);
425 secp256k1_fe_mul(&r->y, &b->y, &bzinv3);
426 secp256k1_fe_set_int(&r->z, 1);
431 /** We need to calculate (rx,ry,rz) = (ax,ay,az) + (bx,by,1/bzinv). Due to
432 * secp256k1's isomorphism we can multiply the Z coordinates on both sides
433 * by bzinv, and get: (rx,ry,rz*bzinv) = (ax,ay,az*bzinv) + (bx,by,1).
434 * This means that (rx,ry,rz) can be calculated as
435 * (ax,ay,az*bzinv) + (bx,by,1), when not applying the bzinv factor to rz.
436 * The variable az below holds the modified Z coordinate for a, which is used
437 * for the computation of rx and ry, but not for rz.
439 secp256k1_fe_mul(&az, &a->z, bzinv);
441 secp256k1_fe_sqr(&z12, &az);
442 u1 = a->x; secp256k1_fe_normalize_weak(&u1);
443 secp256k1_fe_mul(&u2, &b->x, &z12);
444 s1 = a->y; secp256k1_fe_normalize_weak(&s1);
445 secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &az);
446 secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2);
447 secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2);
448 if (secp256k1_fe_normalizes_to_zero_var(&h)) {
449 if (secp256k1_fe_normalizes_to_zero_var(&i)) {
450 secp256k1_gej_double_var(r, a, NULL);
456 secp256k1_fe_sqr(&i2, &i);
457 secp256k1_fe_sqr(&h2, &h);
458 secp256k1_fe_mul(&h3, &h, &h2);
459 r->z = a->z; secp256k1_fe_mul(&r->z, &r->z, &h);
460 secp256k1_fe_mul(&t, &u1, &h2);
461 r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2);
462 secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i);
463 secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1);
464 secp256k1_fe_add(&r->y, &h3);
468 static void secp256k1_gej_add_ge(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_ge_t *b) {
469 /* Operations: 7 mul, 5 sqr, 4 normalize, 21 mul_int/add/negate/cmov */
470 static const secp256k1_fe_t fe_1 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1);
471 secp256k1_fe_t zz, u1, u2, s1, s2, t, tt, m, n, q, rr;
472 secp256k1_fe_t m_alt, rr_alt;
473 int infinity, degenerate;
474 VERIFY_CHECK(!b->infinity);
475 VERIFY_CHECK(a->infinity == 0 || a->infinity == 1);
478 * Eric Brier and Marc Joye, Weierstrass Elliptic Curves and Side-Channel Attacks.
479 * In D. Naccache and P. Paillier, Eds., Public Key Cryptography, vol. 2274 of Lecture Notes in Computer Science, pages 335-345. Springer-Verlag, 2002.
480 * we find as solution for a unified addition/doubling formula:
481 * lambda = ((x1 + x2)^2 - x1 * x2 + a) / (y1 + y2), with a = 0 for secp256k1's curve equation.
482 * x3 = lambda^2 - (x1 + x2)
483 * 2*y3 = lambda * (x1 + x2 - 2 * x3) - (y1 + y2).
485 * Substituting x_i = Xi / Zi^2 and yi = Yi / Zi^3, for i=1,2,3, gives:
486 * U1 = X1*Z2^2, U2 = X2*Z1^2
487 * S1 = Y1*Z2^3, S2 = Y2*Z1^3
494 * Y3 = 4*(R*(3*Q-2*R^2)-M^4)
496 * (Note that the paper uses xi = Xi / Zi and yi = Yi / Zi instead.)
498 * This formula has the benefit of being the same for both addition
499 * of distinct points and doubling. However, it breaks down in the
500 * case that either point is infinity, or that y1 = -y2. We handle
501 * these cases in the following ways:
503 * - If b is infinity we simply bail by means of a VERIFY_CHECK.
505 * - If a is infinity, we detect this, and at the end of the
506 * computation replace the result (which will be meaningless,
507 * but we compute to be constant-time) with b.x : b.y : 1.
509 * - If a = -b, we have y1 = -y2, which is a degenerate case.
510 * But here the answer is infinity, so we simply set the
511 * infinity flag of the result, overriding the computed values
512 * without even needing to cmov.
514 * - If y1 = -y2 but x1 != x2, which does occur thanks to certain
515 * properties of our curve (specifically, 1 has nontrivial cube
516 * roots in our field, and the curve equation has no x coefficient)
517 * then the answer is not infinity but also not given by the above
518 * equation. In this case, we cmov in place an alternate expression
519 * for lambda. Specifically (y1 - y2)/(x1 - x2). Where both these
520 * expressions for lambda are defined, they are equal, and can be
521 * obtained from each other by multiplication by (y1 + y2)/(y1 + y2)
522 * then substitution of x^3 + 7 for y^2 (using the curve equation).
523 * For all pairs of nonzero points (a, b) at least one is defined,
524 * so this covers everything.
527 secp256k1_fe_sqr(&zz, &a->z); /* z = Z1^2 */
528 u1 = a->x; secp256k1_fe_normalize_weak(&u1); /* u1 = U1 = X1*Z2^2 (1) */
529 secp256k1_fe_mul(&u2, &b->x, &zz); /* u2 = U2 = X2*Z1^2 (1) */
530 s1 = a->y; secp256k1_fe_normalize_weak(&s1); /* s1 = S1 = Y1*Z2^3 (1) */
531 secp256k1_fe_mul(&s2, &b->y, &zz); /* s2 = Y2*Z2^2 (1) */
532 secp256k1_fe_mul(&s2, &s2, &a->z); /* s2 = S2 = Y2*Z1^3 (1) */
533 t = u1; secp256k1_fe_add(&t, &u2); /* t = T = U1+U2 (2) */
534 m = s1; secp256k1_fe_add(&m, &s2); /* m = M = S1+S2 (2) */
535 secp256k1_fe_sqr(&rr, &t); /* rr = T^2 (1) */
536 secp256k1_fe_negate(&m_alt, &u2, 1); /* Malt = -X2*Z1^2 */
537 secp256k1_fe_mul(&tt, &u1, &m_alt); /* tt = -U1*U2 (2) */
538 secp256k1_fe_add(&rr, &tt); /* rr = R = T^2-U1*U2 (3) */
539 /** If lambda = R/M = 0/0 we have a problem (except in the "trivial"
540 * case that Z = z1z2 = 0, and this is special-cased later on). */
541 degenerate = secp256k1_fe_normalizes_to_zero(&m) &
542 secp256k1_fe_normalizes_to_zero(&rr);
543 /* This only occurs when y1 == -y2 and x1^3 == x2^3, but x1 != x2.
544 * This means either x1 == beta*x2 or beta*x1 == x2, where beta is
545 * a nontrivial cube root of one. In either case, an alternate
546 * non-indeterminate expression for lambda is (y1 - y2)/(x1 - x2),
547 * so we set R/M equal to this. */
549 secp256k1_fe_mul_int(&rr_alt, 2); /* rr = Y1*Z2^3 - Y2*Z1^3 (2) */
550 secp256k1_fe_add(&m_alt, &u1); /* Malt = X1*Z2^2 - X2*Z1^2 */
552 secp256k1_fe_cmov(&rr_alt, &rr, !degenerate);
553 secp256k1_fe_cmov(&m_alt, &m, !degenerate);
554 /* Now Ralt / Malt = lambda and is guaranteed not to be 0/0.
555 * From here on out Ralt and Malt represent the numerator
556 * and denominator of lambda; R and M represent the explicit
557 * expressions x1^2 + x2^2 + x1x2 and y1 + y2. */
558 secp256k1_fe_sqr(&n, &m_alt); /* n = Malt^2 (1) */
559 secp256k1_fe_mul(&q, &n, &t); /* q = Q = T*Malt^2 (1) */
560 /* These two lines use the observation that either M == Malt or M == 0,
561 * so M^3 * Malt is either Malt^4 (which is computed by squaring), or
562 * zero (which is "computed" by cmov). So the cost is one squaring
563 * versus two multiplications. */
564 secp256k1_fe_sqr(&n, &n);
565 secp256k1_fe_cmov(&n, &m, degenerate); /* n = M^3 * Malt (2) */
566 secp256k1_fe_sqr(&t, &rr_alt); /* t = Ralt^2 (1) */
567 secp256k1_fe_mul(&r->z, &a->z, &m_alt); /* r->z = Malt*Z (1) */
568 infinity = secp256k1_fe_normalizes_to_zero(&r->z) * (1 - a->infinity);
569 secp256k1_fe_mul_int(&r->z, 2); /* r->z = Z3 = 2*Malt*Z (2) */
570 secp256k1_fe_negate(&q, &q, 1); /* q = -Q (2) */
571 secp256k1_fe_add(&t, &q); /* t = Ralt^2-Q (3) */
572 secp256k1_fe_normalize_weak(&t);
573 r->x = t; /* r->x = Ralt^2-Q (1) */
574 secp256k1_fe_mul_int(&t, 2); /* t = 2*x3 (2) */
575 secp256k1_fe_add(&t, &q); /* t = 2*x3 - Q: (4) */
576 secp256k1_fe_mul(&t, &t, &rr_alt); /* t = Ralt*(2*x3 - Q) (1) */
577 secp256k1_fe_add(&t, &n); /* t = Ralt*(2*x3 - Q) + M^3*Malt (3) */
578 secp256k1_fe_negate(&r->y, &t, 3); /* r->y = Ralt*(Q - 2x3) - M^3*Malt (4) */
579 secp256k1_fe_normalize_weak(&r->y);
580 secp256k1_fe_mul_int(&r->x, 4); /* r->x = X3 = 4*(Ralt^2-Q) */
581 secp256k1_fe_mul_int(&r->y, 4); /* r->y = Y3 = 4*Ralt*(Q - 2x3) - 4*M^3*Malt (4) */
583 /** In case a->infinity == 1, replace r with (b->x, b->y, 1). */
584 secp256k1_fe_cmov(&r->x, &b->x, a->infinity);
585 secp256k1_fe_cmov(&r->y, &b->y, a->infinity);
586 secp256k1_fe_cmov(&r->z, &fe_1, a->infinity);
587 r->infinity = infinity;
590 static void secp256k1_gej_rescale(secp256k1_gej_t *r, const secp256k1_fe_t *s) {
591 /* Operations: 4 mul, 1 sqr */
593 VERIFY_CHECK(!secp256k1_fe_is_zero(s));
594 secp256k1_fe_sqr(&zz, s);
595 secp256k1_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */
596 secp256k1_fe_mul(&r->y, &r->y, &zz);
597 secp256k1_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */
598 secp256k1_fe_mul(&r->z, &r->z, s); /* r->z *= s */
601 static void secp256k1_ge_to_storage(secp256k1_ge_storage_t *r, const secp256k1_ge_t *a) {
603 VERIFY_CHECK(!a->infinity);
605 secp256k1_fe_normalize(&x);
607 secp256k1_fe_normalize(&y);
608 secp256k1_fe_to_storage(&r->x, &x);
609 secp256k1_fe_to_storage(&r->y, &y);
612 static void secp256k1_ge_from_storage(secp256k1_ge_t *r, const secp256k1_ge_storage_t *a) {
613 secp256k1_fe_from_storage(&r->x, &a->x);
614 secp256k1_fe_from_storage(&r->y, &a->y);
618 static SECP256K1_INLINE void secp256k1_ge_storage_cmov(secp256k1_ge_storage_t *r, const secp256k1_ge_storage_t *a, int flag) {
619 secp256k1_fe_storage_cmov(&r->x, &a->x, flag);
620 secp256k1_fe_storage_cmov(&r->y, &a->y, flag);
623 #ifdef USE_ENDOMORPHISM
624 static void secp256k1_ge_mul_lambda(secp256k1_ge_t *r, const secp256k1_ge_t *a) {
625 static const secp256k1_fe_t beta = SECP256K1_FE_CONST(
626 0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul,
627 0x9cf04975ul, 0x12f58995ul, 0xc1396c28ul, 0x719501eeul
630 secp256k1_fe_mul(&r->x, &r->x, &beta);