1 /**********************************************************************
2 * Copyright (c) 2014 Pieter Wuille *
3 * Distributed under the MIT software license, see the accompanying *
4 * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
5 **********************************************************************/
7 #ifndef SECP256K1_SCALAR_IMPL_H
8 #define SECP256K1_SCALAR_IMPL_H
17 #if defined HAVE_CONFIG_H
18 #include "libsecp256k1-config.h"
21 #if defined(EXHAUSTIVE_TEST_ORDER)
22 #include "scalar_low_impl.h"
23 #elif defined(SECP256K1_WIDEMUL_INT128)
24 #include "scalar_4x64_impl.h"
25 #elif defined(SECP256K1_WIDEMUL_INT64)
26 #include "scalar_8x32_impl.h"
28 #error "Please select wide multiplication implementation"
31 static const secp256k1_scalar secp256k1_scalar_one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1);
32 static const secp256k1_scalar secp256k1_scalar_zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
35 static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a) {
37 secp256k1_scalar_get_b32(c, a);
38 secp256k1_num_set_bin(r, c, 32);
41 /** secp256k1 curve order, see secp256k1_ecdsa_const_order_as_fe in ecdsa_impl.h */
42 static void secp256k1_scalar_order_get_num(secp256k1_num *r) {
43 #if defined(EXHAUSTIVE_TEST_ORDER)
44 static const unsigned char order[32] = {
48 0,0,0,0,0,0,0,EXHAUSTIVE_TEST_ORDER
51 static const unsigned char order[32] = {
52 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
53 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,
54 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,
55 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41
58 secp256k1_num_set_bin(r, order, 32);
62 static int secp256k1_scalar_set_b32_seckey(secp256k1_scalar *r, const unsigned char *bin) {
64 secp256k1_scalar_set_b32(r, bin, &overflow);
65 return (!overflow) & (!secp256k1_scalar_is_zero(r));
68 static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
69 #if defined(EXHAUSTIVE_TEST_ORDER)
72 for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++)
73 if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1)
75 /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus
76 * have a composite group order; fix it in exhaustive_tests.c). */
77 VERIFY_CHECK(*r != 0);
82 /* First compute xN as x ^ (2^N - 1) for some values of N,
83 * and uM as x ^ M for some values of M. */
84 secp256k1_scalar x2, x3, x6, x8, x14, x28, x56, x112, x126;
85 secp256k1_scalar u2, u5, u9, u11, u13;
87 secp256k1_scalar_sqr(&u2, x);
88 secp256k1_scalar_mul(&x2, &u2, x);
89 secp256k1_scalar_mul(&u5, &u2, &x2);
90 secp256k1_scalar_mul(&x3, &u5, &u2);
91 secp256k1_scalar_mul(&u9, &x3, &u2);
92 secp256k1_scalar_mul(&u11, &u9, &u2);
93 secp256k1_scalar_mul(&u13, &u11, &u2);
95 secp256k1_scalar_sqr(&x6, &u13);
96 secp256k1_scalar_sqr(&x6, &x6);
97 secp256k1_scalar_mul(&x6, &x6, &u11);
99 secp256k1_scalar_sqr(&x8, &x6);
100 secp256k1_scalar_sqr(&x8, &x8);
101 secp256k1_scalar_mul(&x8, &x8, &x2);
103 secp256k1_scalar_sqr(&x14, &x8);
104 for (i = 0; i < 5; i++) {
105 secp256k1_scalar_sqr(&x14, &x14);
107 secp256k1_scalar_mul(&x14, &x14, &x6);
109 secp256k1_scalar_sqr(&x28, &x14);
110 for (i = 0; i < 13; i++) {
111 secp256k1_scalar_sqr(&x28, &x28);
113 secp256k1_scalar_mul(&x28, &x28, &x14);
115 secp256k1_scalar_sqr(&x56, &x28);
116 for (i = 0; i < 27; i++) {
117 secp256k1_scalar_sqr(&x56, &x56);
119 secp256k1_scalar_mul(&x56, &x56, &x28);
121 secp256k1_scalar_sqr(&x112, &x56);
122 for (i = 0; i < 55; i++) {
123 secp256k1_scalar_sqr(&x112, &x112);
125 secp256k1_scalar_mul(&x112, &x112, &x56);
127 secp256k1_scalar_sqr(&x126, &x112);
128 for (i = 0; i < 13; i++) {
129 secp256k1_scalar_sqr(&x126, &x126);
131 secp256k1_scalar_mul(&x126, &x126, &x14);
133 /* Then accumulate the final result (t starts at x126). */
135 for (i = 0; i < 3; i++) {
136 secp256k1_scalar_sqr(t, t);
138 secp256k1_scalar_mul(t, t, &u5); /* 101 */
139 for (i = 0; i < 4; i++) { /* 0 */
140 secp256k1_scalar_sqr(t, t);
142 secp256k1_scalar_mul(t, t, &x3); /* 111 */
143 for (i = 0; i < 4; i++) { /* 0 */
144 secp256k1_scalar_sqr(t, t);
146 secp256k1_scalar_mul(t, t, &u5); /* 101 */
147 for (i = 0; i < 5; i++) { /* 0 */
148 secp256k1_scalar_sqr(t, t);
150 secp256k1_scalar_mul(t, t, &u11); /* 1011 */
151 for (i = 0; i < 4; i++) {
152 secp256k1_scalar_sqr(t, t);
154 secp256k1_scalar_mul(t, t, &u11); /* 1011 */
155 for (i = 0; i < 4; i++) { /* 0 */
156 secp256k1_scalar_sqr(t, t);
158 secp256k1_scalar_mul(t, t, &x3); /* 111 */
159 for (i = 0; i < 5; i++) { /* 00 */
160 secp256k1_scalar_sqr(t, t);
162 secp256k1_scalar_mul(t, t, &x3); /* 111 */
163 for (i = 0; i < 6; i++) { /* 00 */
164 secp256k1_scalar_sqr(t, t);
166 secp256k1_scalar_mul(t, t, &u13); /* 1101 */
167 for (i = 0; i < 4; i++) { /* 0 */
168 secp256k1_scalar_sqr(t, t);
170 secp256k1_scalar_mul(t, t, &u5); /* 101 */
171 for (i = 0; i < 3; i++) {
172 secp256k1_scalar_sqr(t, t);
174 secp256k1_scalar_mul(t, t, &x3); /* 111 */
175 for (i = 0; i < 5; i++) { /* 0 */
176 secp256k1_scalar_sqr(t, t);
178 secp256k1_scalar_mul(t, t, &u9); /* 1001 */
179 for (i = 0; i < 6; i++) { /* 000 */
180 secp256k1_scalar_sqr(t, t);
182 secp256k1_scalar_mul(t, t, &u5); /* 101 */
183 for (i = 0; i < 10; i++) { /* 0000000 */
184 secp256k1_scalar_sqr(t, t);
186 secp256k1_scalar_mul(t, t, &x3); /* 111 */
187 for (i = 0; i < 4; i++) { /* 0 */
188 secp256k1_scalar_sqr(t, t);
190 secp256k1_scalar_mul(t, t, &x3); /* 111 */
191 for (i = 0; i < 9; i++) { /* 0 */
192 secp256k1_scalar_sqr(t, t);
194 secp256k1_scalar_mul(t, t, &x8); /* 11111111 */
195 for (i = 0; i < 5; i++) { /* 0 */
196 secp256k1_scalar_sqr(t, t);
198 secp256k1_scalar_mul(t, t, &u9); /* 1001 */
199 for (i = 0; i < 6; i++) { /* 00 */
200 secp256k1_scalar_sqr(t, t);
202 secp256k1_scalar_mul(t, t, &u11); /* 1011 */
203 for (i = 0; i < 4; i++) {
204 secp256k1_scalar_sqr(t, t);
206 secp256k1_scalar_mul(t, t, &u13); /* 1101 */
207 for (i = 0; i < 5; i++) {
208 secp256k1_scalar_sqr(t, t);
210 secp256k1_scalar_mul(t, t, &x2); /* 11 */
211 for (i = 0; i < 6; i++) { /* 00 */
212 secp256k1_scalar_sqr(t, t);
214 secp256k1_scalar_mul(t, t, &u13); /* 1101 */
215 for (i = 0; i < 10; i++) { /* 000000 */
216 secp256k1_scalar_sqr(t, t);
218 secp256k1_scalar_mul(t, t, &u13); /* 1101 */
219 for (i = 0; i < 4; i++) {
220 secp256k1_scalar_sqr(t, t);
222 secp256k1_scalar_mul(t, t, &u9); /* 1001 */
223 for (i = 0; i < 6; i++) { /* 00000 */
224 secp256k1_scalar_sqr(t, t);
226 secp256k1_scalar_mul(t, t, x); /* 1 */
227 for (i = 0; i < 8; i++) { /* 00 */
228 secp256k1_scalar_sqr(t, t);
230 secp256k1_scalar_mul(r, t, &x6); /* 111111 */
233 SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
234 return !(a->d[0] & 1);
238 static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
239 #if defined(USE_SCALAR_INV_BUILTIN)
240 secp256k1_scalar_inverse(r, x);
241 #elif defined(USE_SCALAR_INV_NUM)
244 secp256k1_scalar t = *x;
245 secp256k1_scalar_get_b32(b, &t);
246 secp256k1_num_set_bin(&n, b, 32);
247 secp256k1_scalar_order_get_num(&m);
248 secp256k1_num_mod_inverse(&n, &n, &m);
249 secp256k1_num_get_bin(b, 32, &n);
250 secp256k1_scalar_set_b32(r, b, NULL);
251 /* Verify that the inverse was computed correctly, without GMP code. */
252 secp256k1_scalar_mul(&t, &t, r);
253 CHECK(secp256k1_scalar_is_one(&t));
255 #error "Please select scalar inverse implementation"
259 /* These parameters are generated using sage/gen_exhaustive_groups.sage. */
260 #if defined(EXHAUSTIVE_TEST_ORDER)
261 # if EXHAUSTIVE_TEST_ORDER == 13
262 # define EXHAUSTIVE_TEST_LAMBDA 9
263 # elif EXHAUSTIVE_TEST_ORDER == 199
264 # define EXHAUSTIVE_TEST_LAMBDA 92
266 # error No known lambda for the specified exhaustive test group order.
270 * Find k1 and k2 given k, such that k1 + k2 * lambda == k mod n; unlike in the
271 * full case we don't bother making k1 and k2 be small, we just want them to be
272 * nontrivial to get full test coverage for the exhaustive tests. We therefore
273 * (arbitrarily) set k2 = k + 5 and k1 = k - k2 * lambda.
275 static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
276 *r2 = (*a + 5) % EXHAUSTIVE_TEST_ORDER;
277 *r1 = (*a + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER;
281 * The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where
283 static const secp256k1_scalar secp256k1_const_lambda = SECP256K1_SCALAR_CONST(
284 0x5363AD4CUL, 0xC05C30E0UL, 0xA5261C02UL, 0x8812645AUL,
285 0x122E22EAUL, 0x20816678UL, 0xDF02967CUL, 0x1B23BD72UL
289 static void secp256k1_scalar_split_lambda_verify(const secp256k1_scalar *r1, const secp256k1_scalar *r2, const secp256k1_scalar *k);
293 * Both lambda and beta are primitive cube roots of unity. That is lamba^3 == 1 mod n and
294 * beta^3 == 1 mod p, where n is the curve order and p is the field order.
296 * Futhermore, because (X^3 - 1) = (X - 1)(X^2 + X + 1), the primitive cube roots of unity are
297 * roots of X^2 + X + 1. Therefore lambda^2 + lamba == -1 mod n and beta^2 + beta == -1 mod p.
298 * (The other primitive cube roots of unity are lambda^2 and beta^2 respectively.)
300 * Let l = -1/2 + i*sqrt(3)/2, the complex root of X^2 + X + 1. We can define a ring
301 * homomorphism phi : Z[l] -> Z_n where phi(a + b*l) == a + b*lambda mod n. The kernel of phi
302 * is a lattice over Z[l] (considering Z[l] as a Z-module). This lattice is generated by a
303 * reduced basis {a1 + b1*l, a2 + b2*l} where
305 * - a1 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
306 * - b1 = -{0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3}
307 * - a2 = {0x01,0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6,0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8}
308 * - b2 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
310 * "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm
311 * (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1
312 * and k2 have a small size.
314 * The algorithm computes c1 = round(b2 * k / n) and c2 = round((-b1) * k / n), and gives
315 * k1 = k - (c1*a1 + c2*a2) and k2 = -(c1*b1 + c2*b2). Instead, we use modular arithmetic, and
316 * compute k - k2 * lambda (mod n) which is equivalent to k1 (mod n), avoiding the need for
317 * the constants a1 and a2.
319 * g1, g2 are precomputed constants used to replace division with a rounded multiplication
320 * when decomposing the scalar for an endomorphism-based point multiplication.
322 * The possibility of using precomputed estimates is mentioned in "Guide to Elliptic Curve
323 * Cryptography" (Hankerson, Menezes, Vanstone) in section 3.5.
325 * The derivation is described in the paper "Efficient Software Implementation of Public-Key
326 * Cryptography on Sensor Networks Using the MSP430X Microcontroller" (Gouvea, Oliveira, Lopez),
327 * Section 4.3 (here we use a somewhat higher-precision estimate):
329 * g1 = round(2^384 * b2/d)
330 * g2 = round(2^384 * (-b1)/d)
332 * (Note that d is also equal to the curve order, n, here because [a1,b1] and [a2,b2]
333 * can be found as outputs of the Extended Euclidean Algorithm on inputs n and lambda).
335 * The function below splits k into r1 and r2, such that
336 * - r1 + lambda * r2 == k (mod n)
337 * - either r1 < 2^128 or -r1 mod n < 2^128
338 * - either r2 < 2^128 or -r2 mod n < 2^128
342 static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
343 secp256k1_scalar c1, c2;
344 static const secp256k1_scalar minus_b1 = SECP256K1_SCALAR_CONST(
345 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL,
346 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C3UL
348 static const secp256k1_scalar minus_b2 = SECP256K1_SCALAR_CONST(
349 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL,
350 0x8A280AC5UL, 0x0774346DUL, 0xD765CDA8UL, 0x3DB1562CUL
352 static const secp256k1_scalar g1 = SECP256K1_SCALAR_CONST(
353 0x3086D221UL, 0xA7D46BCDUL, 0xE86C90E4UL, 0x9284EB15UL,
354 0x3DAA8A14UL, 0x71E8CA7FUL, 0xE893209AUL, 0x45DBB031UL
356 static const secp256k1_scalar g2 = SECP256K1_SCALAR_CONST(
357 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C4UL,
358 0x221208ACUL, 0x9DF506C6UL, 0x1571B4AEUL, 0x8AC47F71UL
360 VERIFY_CHECK(r1 != k);
361 VERIFY_CHECK(r2 != k);
362 /* these _var calls are constant time since the shift amount is constant */
363 secp256k1_scalar_mul_shift_var(&c1, k, &g1, 384);
364 secp256k1_scalar_mul_shift_var(&c2, k, &g2, 384);
365 secp256k1_scalar_mul(&c1, &c1, &minus_b1);
366 secp256k1_scalar_mul(&c2, &c2, &minus_b2);
367 secp256k1_scalar_add(r2, &c1, &c2);
368 secp256k1_scalar_mul(r1, r2, &secp256k1_const_lambda);
369 secp256k1_scalar_negate(r1, r1);
370 secp256k1_scalar_add(r1, r1, k);
373 secp256k1_scalar_split_lambda_verify(r1, r2, k);
379 * Proof for secp256k1_scalar_split_lambda's bounds.
382 * - epsilon1 = 2^256 * |g1/2^384 - b2/d|
383 * - epsilon2 = 2^256 * |g2/2^384 - (-b1)/d|
384 * - c1 = round(k*g1/2^384)
385 * - c2 = round(k*g2/2^384)
387 * Lemma 1: |c1 - k*b2/d| < 2^-1 + epsilon1
391 * |c1 - k*g1/2^384 + k*g1/2^384 - k*b2/d|
392 * <= {triangle inequality}
393 * |c1 - k*g1/2^384| + |k*g1/2^384 - k*b2/d|
395 * |c1 - k*g1/2^384| + k*|g1/2^384 - b2/d|
396 * < {rounding in c1 and 0 <= k < 2^256}
397 * 2^-1 + 2^256 * |g1/2^384 - b2/d|
398 * = {definition of epsilon1}
401 * Lemma 2: |c2 - k*(-b1)/d| < 2^-1 + epsilon2
405 * |c2 - k*g2/2^384 + k*g2/2^384 - k*(-b1)/d|
406 * <= {triangle inequality}
407 * |c2 - k*g2/2^384| + |k*g2/2^384 - k*(-b1)/d|
409 * |c2 - k*g2/2^384| + k*|g2/2^384 - (-b1)/d|
410 * < {rounding in c2 and 0 <= k < 2^256}
411 * 2^-1 + 2^256 * |g2/2^384 - (-b1)/d|
412 * = {definition of epsilon2}
416 * - k1 = k - c1*a1 - c2*a2
417 * - k2 = - c1*b1 - c2*b2
419 * Lemma 3: |k1| < (a1 + a2 + 1)/2 < 2^128
422 * = {definition of k1}
423 * |k - c1*a1 - c2*a2|
424 * = {(a1*b2 - b1*a2)/n = 1}
425 * |k*(a1*b2 - b1*a2)/n - c1*a1 - c2*a2|
427 * |a1*(k*b2/n - c1) + a2*(k*(-b1)/n - c2)|
428 * <= {triangle inequality}
429 * a1*|k*b2/n - c1| + a2*|k*(-b1)/n - c2|
430 * < {Lemma 1 and Lemma 2}
431 * a1*(2^-1 + epslion1) + a2*(2^-1 + epsilon2)
432 * < {rounding up to an integer}
434 * < {rounding up to a power of 2}
437 * Lemma 4: |k2| < (-b1 + b2)/2 + 1 < 2^128
440 * = {definition of k2}
442 * = {(b1*b2 - b1*b2)/n = 0}
443 * |k*(b1*b2 - b1*b2)/n - c1*b1 - c2*b2|
445 * |b1*(k*b2/n - c1) + b2*(k*(-b1)/n - c2)|
446 * <= {triangle inequality}
447 * (-b1)*|k*b2/n - c1| + b2*|k*(-b1)/n - c2|
448 * < {Lemma 1 and Lemma 2}
449 * (-b1)*(2^-1 + epslion1) + b2*(2^-1 + epsilon2)
450 * < {rounding up to an integer}
452 * < {rounding up to a power of 2}
457 * - r1 = k - r2*lambda mod n.
459 * Notice that r1 is defined such that r1 + r2 * lambda == k (mod n).
461 * Lemma 5: r1 == k1 mod n.
464 * == {definition of r1 and r2}
466 * == {definition of k2}
467 * k - (- c1*b1 - c2*b2)*lambda
469 * k + c1*b1*lambda + c2*b2*lambda
470 * == {a1 + b1*lambda == 0 mod n and a2 + b2*lambda == 0 mod n}
472 * == {definition of k1}
475 * From Lemma 3, Lemma 4, Lemma 5 and the definition of r2, we can conclude that
477 * - either r1 < 2^128 or -r1 mod n < 2^128
478 * - either r2 < 2^128 or -r2 mod n < 2^128.
482 static void secp256k1_scalar_split_lambda_verify(const secp256k1_scalar *r1, const secp256k1_scalar *r2, const secp256k1_scalar *k) {
484 unsigned char buf1[32];
485 unsigned char buf2[32];
487 /* (a1 + a2 + 1)/2 is 0xa2a8918ca85bafe22016d0b917e4dd77 */
488 static const unsigned char k1_bound[32] = {
489 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
490 0xa2, 0xa8, 0x91, 0x8c, 0xa8, 0x5b, 0xaf, 0xe2, 0x20, 0x16, 0xd0, 0xb9, 0x17, 0xe4, 0xdd, 0x77
493 /* (-b1 + b2)/2 + 1 is 0x8a65287bd47179fb2be08846cea267ed */
494 static const unsigned char k2_bound[32] = {
495 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
496 0x8a, 0x65, 0x28, 0x7b, 0xd4, 0x71, 0x79, 0xfb, 0x2b, 0xe0, 0x88, 0x46, 0xce, 0xa2, 0x67, 0xed
499 secp256k1_scalar_mul(&s, &secp256k1_const_lambda, r2);
500 secp256k1_scalar_add(&s, &s, r1);
501 VERIFY_CHECK(secp256k1_scalar_eq(&s, k));
503 secp256k1_scalar_negate(&s, r1);
504 secp256k1_scalar_get_b32(buf1, r1);
505 secp256k1_scalar_get_b32(buf2, &s);
506 VERIFY_CHECK(secp256k1_memcmp_var(buf1, k1_bound, 32) < 0 || secp256k1_memcmp_var(buf2, k1_bound, 32) < 0);
508 secp256k1_scalar_negate(&s, r2);
509 secp256k1_scalar_get_b32(buf1, r2);
510 secp256k1_scalar_get_b32(buf2, &s);
511 VERIFY_CHECK(secp256k1_memcmp_var(buf1, k2_bound, 32) < 0 || secp256k1_memcmp_var(buf2, k2_bound, 32) < 0);
514 #endif /* !defined(EXHAUSTIVE_TEST_ORDER) */
516 #endif /* SECP256K1_SCALAR_IMPL_H */