1 /*****************************************************************************
2 * Copyright (c) 2013, 2014, 2017 Pieter Wuille, Andrew Poelstra, Jonas Nick *
3 * Distributed under the MIT software license, see the accompanying *
4 * file COPYING or http://www.opensource.org/licenses/mit-license.php. *
5 *****************************************************************************/
7 #ifndef SECP256K1_ECMULT_IMPL_H
8 #define SECP256K1_ECMULT_IMPL_H
17 #if defined(EXHAUSTIVE_TEST_ORDER)
18 /* We need to lower these values for exhaustive tests because
19 * the tables cannot have infinities in them (this breaks the
20 * affine-isomorphism stuff which tracks z-ratios) */
21 # if EXHAUSTIVE_TEST_ORDER > 128
24 # elif EXHAUSTIVE_TEST_ORDER > 8
32 /* optimal for 128-bit and 256-bit exponents. */
34 /** larger numbers may result in slightly better performance, at the cost of
35 exponentially larger precomputed tables. */
36 #ifdef USE_ENDOMORPHISM
37 /** Two tables for window size 15: 1.375 MiB. */
40 /** One table for window size 16: 1.375 MiB. */
45 #ifdef USE_ENDOMORPHISM
50 #define WNAF_SIZE_BITS(bits, w) (((bits) + (w) - 1) / (w))
51 #define WNAF_SIZE(w) WNAF_SIZE_BITS(WNAF_BITS, w)
53 /** The number of entries a table with precomputed multiples needs to have. */
54 #define ECMULT_TABLE_SIZE(w) (1 << ((w)-2))
56 /* The number of objects allocated on the scratch space for ecmult_multi algorithms */
57 #define PIPPENGER_SCRATCH_OBJECTS 6
58 #define STRAUSS_SCRATCH_OBJECTS 6
60 #define PIPPENGER_MAX_BUCKET_WINDOW 12
62 /* Minimum number of points for which pippenger_wnaf is faster than strauss wnaf */
63 #ifdef USE_ENDOMORPHISM
64 #define ECMULT_PIPPENGER_THRESHOLD 88
66 #define ECMULT_PIPPENGER_THRESHOLD 160
69 #ifdef USE_ENDOMORPHISM
70 #define ECMULT_MAX_POINTS_PER_BATCH 5000000
72 #define ECMULT_MAX_POINTS_PER_BATCH 10000000
75 /** Fill a table 'prej' with precomputed odd multiples of a. Prej will contain
76 * the values [1*a,3*a,...,(2*n-1)*a], so it space for n values. zr[0] will
77 * contain prej[0].z / a.z. The other zr[i] values = prej[i].z / prej[i-1].z.
78 * Prej's Z values are undefined, except for the last value.
80 static void secp256k1_ecmult_odd_multiples_table(int n, secp256k1_gej *prej, secp256k1_fe *zr, const secp256k1_gej *a) {
82 secp256k1_ge a_ge, d_ge;
85 VERIFY_CHECK(!a->infinity);
87 secp256k1_gej_double_var(&d, a, NULL);
90 * Perform the additions on an isomorphism where 'd' is affine: drop the z coordinate
91 * of 'd', and scale the 1P starting value's x/y coordinates without changing its z.
97 secp256k1_ge_set_gej_zinv(&a_ge, a, &d.z);
101 prej[0].infinity = 0;
104 for (i = 1; i < n; i++) {
105 secp256k1_gej_add_ge_var(&prej[i], &prej[i-1], &d_ge, &zr[i]);
109 * Each point in 'prej' has a z coordinate too small by a factor of 'd.z'. Only
110 * the final point's z coordinate is actually used though, so just update that.
112 secp256k1_fe_mul(&prej[n-1].z, &prej[n-1].z, &d.z);
115 /** Fill a table 'pre' with precomputed odd multiples of a.
117 * There are two versions of this function:
118 * - secp256k1_ecmult_odd_multiples_table_globalz_windowa which brings its
119 * resulting point set to a single constant Z denominator, stores the X and Y
120 * coordinates as ge_storage points in pre, and stores the global Z in rz.
121 * It only operates on tables sized for WINDOW_A wnaf multiples.
122 * - secp256k1_ecmult_odd_multiples_table_storage_var, which converts its
123 * resulting point set to actually affine points, and stores those in pre.
124 * It operates on tables of any size, but uses heap-allocated temporaries.
126 * To compute a*P + b*G, we compute a table for P using the first function,
127 * and for G using the second (which requires an inverse, but it only needs to
130 static void secp256k1_ecmult_odd_multiples_table_globalz_windowa(secp256k1_ge *pre, secp256k1_fe *globalz, const secp256k1_gej *a) {
131 secp256k1_gej prej[ECMULT_TABLE_SIZE(WINDOW_A)];
132 secp256k1_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)];
134 /* Compute the odd multiples in Jacobian form. */
135 secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), prej, zr, a);
136 /* Bring them to the same Z denominator. */
137 secp256k1_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A), pre, globalz, prej, zr);
140 static void secp256k1_ecmult_odd_multiples_table_storage_var(const int n, secp256k1_ge_storage *pre, const secp256k1_gej *a) {
142 secp256k1_ge d_ge, p_ge;
146 secp256k1_fe dx_over_dz_squared;
149 VERIFY_CHECK(!a->infinity);
151 secp256k1_gej_double_var(&d, a, NULL);
153 /* First, we perform all the additions in an isomorphic curve obtained by multiplying
154 * all `z` coordinates by 1/`d.z`. In these coordinates `d` is affine so we can use
155 * `secp256k1_gej_add_ge_var` to perform the additions. For each addition, we store
156 * the resulting y-coordinate and the z-ratio, since we only have enough memory to
157 * store two field elements. These are sufficient to efficiently undo the isomorphism
158 * and recompute all the `x`s.
164 secp256k1_ge_set_gej_zinv(&p_ge, a, &d.z);
170 for (i = 0; i < (n - 1); i++) {
171 secp256k1_fe_normalize_var(&pj.y);
172 secp256k1_fe_to_storage(&pre[i].y, &pj.y);
173 secp256k1_gej_add_ge_var(&pj, &pj, &d_ge, &zr);
174 secp256k1_fe_normalize_var(&zr);
175 secp256k1_fe_to_storage(&pre[i].x, &zr);
178 /* Invert d.z in the same batch, preserving pj.z so we can extract 1/d.z */
179 secp256k1_fe_mul(&zi, &pj.z, &d.z);
180 secp256k1_fe_inv_var(&zi, &zi);
182 /* Directly set `pre[n - 1]` to `pj`, saving the inverted z-coordinate so
183 * that we can combine it with the saved z-ratios to compute the other zs
184 * without any more inversions. */
185 secp256k1_ge_set_gej_zinv(&p_ge, &pj, &zi);
186 secp256k1_ge_to_storage(&pre[n - 1], &p_ge);
188 /* Compute the actual x-coordinate of D, which will be needed below. */
189 secp256k1_fe_mul(&d.z, &zi, &pj.z); /* d.z = 1/d.z */
190 secp256k1_fe_sqr(&dx_over_dz_squared, &d.z);
191 secp256k1_fe_mul(&dx_over_dz_squared, &dx_over_dz_squared, &d.x);
193 /* Going into the second loop, we have set `pre[n-1]` to its final affine
194 * form, but still need to set `pre[i]` for `i` in 0 through `n-2`. We
195 * have `zi = (p.z * d.z)^-1`, where
197 * `p.z` is the z-coordinate of the point on the isomorphic curve
198 * which was ultimately assigned to `pre[n-1]`.
199 * `d.z` is the multiplier that must be applied to all z-coordinates
200 * to move from our isomorphic curve back to secp256k1; so the
201 * product `p.z * d.z` is the z-coordinate of the secp256k1
202 * point assigned to `pre[n-1]`.
204 * All subsequent inverse-z-coordinates can be obtained by multiplying this
205 * factor by successive z-ratios, which is much more efficient than directly
206 * computing each one.
208 * Importantly, these inverse-zs will be coordinates of points on secp256k1,
209 * while our other stored values come from computations on the isomorphic
210 * curve. So in the below loop, we will take care not to actually use `zi`
211 * or any derived values until we're back on secp256k1.
215 secp256k1_fe zi2, zi3;
216 const secp256k1_fe *rzr;
219 secp256k1_ge_from_storage(&p_ge, &pre[i]);
221 /* For each remaining point, we extract the z-ratio from the stored
222 * x-coordinate, compute its z^-1 from that, and compute the full
223 * point from that. */
225 secp256k1_fe_mul(&zi, &zi, rzr);
226 secp256k1_fe_sqr(&zi2, &zi);
227 secp256k1_fe_mul(&zi3, &zi2, &zi);
228 /* To compute the actual x-coordinate, we use the stored z ratio and
229 * y-coordinate, which we obtained from `secp256k1_gej_add_ge_var`
230 * in the loop above, as well as the inverse of the square of its
231 * z-coordinate. We store the latter in the `zi2` variable, which is
232 * computed iteratively starting from the overall Z inverse then
233 * multiplying by each z-ratio in turn.
235 * Denoting the z-ratio as `rzr`, we observe that it is equal to `h`
236 * from the inside of the above `gej_add_ge_var` call. This satisfies
238 * rzr = d_x * z^2 - x * d_z^2
240 * where (`d_x`, `d_z`) are Jacobian coordinates of `D` and `(x, z)`
241 * are Jacobian coordinates of our desired point -- except both are on
242 * the isomorphic curve that we were using when we called `gej_add_ge_var`.
243 * To get back to secp256k1, we must multiply both `z`s by `d_z`, or
244 * equivalently divide both `x`s by `d_z^2`. Our equation then becomes
246 * rzr = d_x * z^2 / d_z^2 - x
248 * (The left-hand-side, being a ratio of z-coordinates, is unaffected
249 * by the isomorphism.)
251 * Rearranging to solve for `x`, we have
253 * x = d_x * z^2 / d_z^2 - rzr
255 * But what we actually want is the affine coordinate `X = x/z^2`,
258 * X = d_x / d_z^2 - rzr / z^2
259 * = dx_over_dz_squared - rzr * zi2
261 secp256k1_fe_mul(&p_ge.x, rzr, &zi2);
262 secp256k1_fe_negate(&p_ge.x, &p_ge.x, 1);
263 secp256k1_fe_add(&p_ge.x, &dx_over_dz_squared);
264 /* y is stored_y/z^3, as we expect */
265 secp256k1_fe_mul(&p_ge.y, &p_ge.y, &zi3);
267 secp256k1_ge_to_storage(&pre[i], &p_ge);
271 /** The following two macro retrieves a particular odd multiple from a table
272 * of precomputed multiples. */
273 #define ECMULT_TABLE_GET_GE(r,pre,n,w) do { \
274 VERIFY_CHECK(((n) & 1) == 1); \
275 VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \
276 VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \
278 *(r) = (pre)[((n)-1)/2]; \
280 *(r) = (pre)[(-(n)-1)/2]; \
281 secp256k1_fe_negate(&((r)->y), &((r)->y), 1); \
285 #define ECMULT_TABLE_GET_GE_STORAGE(r,pre,n,w) do { \
286 VERIFY_CHECK(((n) & 1) == 1); \
287 VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \
288 VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \
290 secp256k1_ge_from_storage((r), &(pre)[((n)-1)/2]); \
292 secp256k1_ge_from_storage((r), &(pre)[(-(n)-1)/2]); \
293 secp256k1_fe_negate(&((r)->y), &((r)->y), 1); \
297 static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx) {
299 #ifdef USE_ENDOMORPHISM
300 ctx->pre_g_128 = NULL;
304 static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, const secp256k1_callback *cb) {
307 if (ctx->pre_g != NULL) {
311 /* get the generator */
312 secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g);
314 ctx->pre_g = (secp256k1_ge_storage (*)[])checked_malloc(cb, sizeof((*ctx->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G));
316 /* precompute the tables with odd multiples */
317 secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj);
319 #ifdef USE_ENDOMORPHISM
321 secp256k1_gej g_128j;
324 ctx->pre_g_128 = (secp256k1_ge_storage (*)[])checked_malloc(cb, sizeof((*ctx->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G));
326 /* calculate 2^128*generator */
328 for (i = 0; i < 128; i++) {
329 secp256k1_gej_double_var(&g_128j, &g_128j, NULL);
331 secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g_128, &g_128j);
336 static void secp256k1_ecmult_context_clone(secp256k1_ecmult_context *dst,
337 const secp256k1_ecmult_context *src, const secp256k1_callback *cb) {
338 if (src->pre_g == NULL) {
341 size_t size = sizeof((*dst->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G);
342 dst->pre_g = (secp256k1_ge_storage (*)[])checked_malloc(cb, size);
343 memcpy(dst->pre_g, src->pre_g, size);
345 #ifdef USE_ENDOMORPHISM
346 if (src->pre_g_128 == NULL) {
347 dst->pre_g_128 = NULL;
349 size_t size = sizeof((*dst->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G);
350 dst->pre_g_128 = (secp256k1_ge_storage (*)[])checked_malloc(cb, size);
351 memcpy(dst->pre_g_128, src->pre_g_128, size);
356 static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx) {
357 return ctx->pre_g != NULL;
360 static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context *ctx) {
362 #ifdef USE_ENDOMORPHISM
363 free(ctx->pre_g_128);
365 secp256k1_ecmult_context_init(ctx);
368 /** Convert a number to WNAF notation. The number becomes represented by sum(2^i * wnaf[i], i=0..bits),
369 * with the following guarantees:
370 * - each wnaf[i] is either 0, or an odd integer between -(1<<(w-1) - 1) and (1<<(w-1) - 1)
371 * - two non-zero entries in wnaf are separated by at least w-1 zeroes.
372 * - the number of set values in wnaf is returned. This number is at most 256, and at most one more
373 * than the number of bits in the (absolute value) of the input.
375 static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a, int w) {
376 secp256k1_scalar s = *a;
377 int last_set_bit = -1;
382 VERIFY_CHECK(wnaf != NULL);
383 VERIFY_CHECK(0 <= len && len <= 256);
384 VERIFY_CHECK(a != NULL);
385 VERIFY_CHECK(2 <= w && w <= 31);
387 memset(wnaf, 0, len * sizeof(wnaf[0]));
389 if (secp256k1_scalar_get_bits(&s, 255, 1)) {
390 secp256k1_scalar_negate(&s, &s);
397 if (secp256k1_scalar_get_bits(&s, bit, 1) == (unsigned int)carry) {
403 if (now > len - bit) {
407 word = secp256k1_scalar_get_bits_var(&s, bit, now) + carry;
409 carry = (word >> (w-1)) & 1;
412 wnaf[bit] = sign * word;
420 CHECK(secp256k1_scalar_get_bits(&s, bit++, 1) == 0);
423 return last_set_bit + 1;
426 struct secp256k1_strauss_point_state {
427 #ifdef USE_ENDOMORPHISM
428 secp256k1_scalar na_1, na_lam;
430 int wnaf_na_lam[130];
440 struct secp256k1_strauss_state {
444 #ifdef USE_ENDOMORPHISM
445 secp256k1_ge* pre_a_lam;
447 struct secp256k1_strauss_point_state* ps;
450 static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, const struct secp256k1_strauss_state *state, secp256k1_gej *r, int num, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) {
453 #ifdef USE_ENDOMORPHISM
454 /* Splitted G factors. */
455 secp256k1_scalar ng_1, ng_128;
458 int wnaf_ng_128[129];
469 for (np = 0; np < num; ++np) {
470 if (secp256k1_scalar_is_zero(&na[np]) || secp256k1_gej_is_infinity(&a[np])) {
473 state->ps[no].input_pos = np;
474 #ifdef USE_ENDOMORPHISM
475 /* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */
476 secp256k1_scalar_split_lambda(&state->ps[no].na_1, &state->ps[no].na_lam, &na[np]);
478 /* build wnaf representation for na_1 and na_lam. */
479 state->ps[no].bits_na_1 = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_1, 130, &state->ps[no].na_1, WINDOW_A);
480 state->ps[no].bits_na_lam = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_lam, 130, &state->ps[no].na_lam, WINDOW_A);
481 VERIFY_CHECK(state->ps[no].bits_na_1 <= 130);
482 VERIFY_CHECK(state->ps[no].bits_na_lam <= 130);
483 if (state->ps[no].bits_na_1 > bits) {
484 bits = state->ps[no].bits_na_1;
486 if (state->ps[no].bits_na_lam > bits) {
487 bits = state->ps[no].bits_na_lam;
490 /* build wnaf representation for na. */
491 state->ps[no].bits_na = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na, 256, &na[np], WINDOW_A);
492 if (state->ps[no].bits_na > bits) {
493 bits = state->ps[no].bits_na;
499 /* Calculate odd multiples of a.
500 * All multiples are brought to the same Z 'denominator', which is stored
501 * in Z. Due to secp256k1' isomorphism we can do all operations pretending
502 * that the Z coordinate was 1, use affine addition formulae, and correct
503 * the Z coordinate of the result once at the end.
504 * The exception is the precomputed G table points, which are actually
505 * affine. Compared to the base used for other points, they have a Z ratio
506 * of 1/Z, so we can use secp256k1_gej_add_zinv_var, which uses the same
507 * isomorphism to efficiently add with a known Z inverse.
510 /* Compute the odd multiples in Jacobian form. */
511 secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->prej, state->zr, &a[state->ps[0].input_pos]);
512 for (np = 1; np < no; ++np) {
513 secp256k1_gej tmp = a[state->ps[np].input_pos];
515 secp256k1_fe_normalize_var(&(state->prej[(np - 1) * ECMULT_TABLE_SIZE(WINDOW_A) + ECMULT_TABLE_SIZE(WINDOW_A) - 1].z));
517 secp256k1_gej_rescale(&tmp, &(state->prej[(np - 1) * ECMULT_TABLE_SIZE(WINDOW_A) + ECMULT_TABLE_SIZE(WINDOW_A) - 1].z));
518 secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->prej + np * ECMULT_TABLE_SIZE(WINDOW_A), state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), &tmp);
519 secp256k1_fe_mul(state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), &(a[state->ps[np].input_pos].z));
521 /* Bring them to the same Z denominator. */
522 secp256k1_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A) * no, state->pre_a, &Z, state->prej, state->zr);
524 secp256k1_fe_set_int(&Z, 1);
527 #ifdef USE_ENDOMORPHISM
528 for (np = 0; np < no; ++np) {
529 for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
530 secp256k1_ge_mul_lambda(&state->pre_a_lam[np * ECMULT_TABLE_SIZE(WINDOW_A) + i], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + i]);
535 /* split ng into ng_1 and ng_128 (where gn = gn_1 + gn_128*2^128, and gn_1 and gn_128 are ~128 bit) */
536 secp256k1_scalar_split_128(&ng_1, &ng_128, ng);
538 /* Build wnaf representation for ng_1 and ng_128 */
539 bits_ng_1 = secp256k1_ecmult_wnaf(wnaf_ng_1, 129, &ng_1, WINDOW_G);
540 bits_ng_128 = secp256k1_ecmult_wnaf(wnaf_ng_128, 129, &ng_128, WINDOW_G);
541 if (bits_ng_1 > bits) {
544 if (bits_ng_128 > bits) {
550 bits_ng = secp256k1_ecmult_wnaf(wnaf_ng, 256, ng, WINDOW_G);
551 if (bits_ng > bits) {
557 secp256k1_gej_set_infinity(r);
559 for (i = bits - 1; i >= 0; i--) {
561 secp256k1_gej_double_var(r, r, NULL);
562 #ifdef USE_ENDOMORPHISM
563 for (np = 0; np < no; ++np) {
564 if (i < state->ps[np].bits_na_1 && (n = state->ps[np].wnaf_na_1[i])) {
565 ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A);
566 secp256k1_gej_add_ge_var(r, r, &tmpa, NULL);
568 if (i < state->ps[np].bits_na_lam && (n = state->ps[np].wnaf_na_lam[i])) {
569 ECMULT_TABLE_GET_GE(&tmpa, state->pre_a_lam + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A);
570 secp256k1_gej_add_ge_var(r, r, &tmpa, NULL);
573 if (i < bits_ng_1 && (n = wnaf_ng_1[i])) {
574 ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G);
575 secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z);
577 if (i < bits_ng_128 && (n = wnaf_ng_128[i])) {
578 ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g_128, n, WINDOW_G);
579 secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z);
582 for (np = 0; np < no; ++np) {
583 if (i < state->ps[np].bits_na && (n = state->ps[np].wnaf_na[i])) {
584 ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A);
585 secp256k1_gej_add_ge_var(r, r, &tmpa, NULL);
588 if (i < bits_ng && (n = wnaf_ng[i])) {
589 ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G);
590 secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z);
596 secp256k1_fe_mul(&r->z, &r->z, &Z);
600 static void secp256k1_ecmult(const secp256k1_ecmult_context *ctx, secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) {
601 secp256k1_gej prej[ECMULT_TABLE_SIZE(WINDOW_A)];
602 secp256k1_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)];
603 secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)];
604 struct secp256k1_strauss_point_state ps[1];
605 #ifdef USE_ENDOMORPHISM
606 secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
608 struct secp256k1_strauss_state state;
613 #ifdef USE_ENDOMORPHISM
614 state.pre_a_lam = pre_a_lam;
617 secp256k1_ecmult_strauss_wnaf(ctx, &state, r, 1, a, na, ng);
620 static size_t secp256k1_strauss_scratch_size(size_t n_points) {
621 #ifdef USE_ENDOMORPHISM
622 static const size_t point_size = (2 * sizeof(secp256k1_ge) + sizeof(secp256k1_gej) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar);
624 static const size_t point_size = (sizeof(secp256k1_ge) + sizeof(secp256k1_gej) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar);
626 return n_points*point_size;
629 static int secp256k1_ecmult_strauss_batch(const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
630 secp256k1_gej* points;
631 secp256k1_scalar* scalars;
632 struct secp256k1_strauss_state state;
635 secp256k1_gej_set_infinity(r);
636 if (inp_g_sc == NULL && n_points == 0) {
640 if (!secp256k1_scratch_allocate_frame(scratch, secp256k1_strauss_scratch_size(n_points), STRAUSS_SCRATCH_OBJECTS)) {
643 points = (secp256k1_gej*)secp256k1_scratch_alloc(scratch, n_points * sizeof(secp256k1_gej));
644 scalars = (secp256k1_scalar*)secp256k1_scratch_alloc(scratch, n_points * sizeof(secp256k1_scalar));
645 state.prej = (secp256k1_gej*)secp256k1_scratch_alloc(scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_gej));
646 state.zr = (secp256k1_fe*)secp256k1_scratch_alloc(scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_fe));
647 #ifdef USE_ENDOMORPHISM
648 state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(scratch, n_points * 2 * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
649 state.pre_a_lam = state.pre_a + n_points * ECMULT_TABLE_SIZE(WINDOW_A);
651 state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
653 state.ps = (struct secp256k1_strauss_point_state*)secp256k1_scratch_alloc(scratch, n_points * sizeof(struct secp256k1_strauss_point_state));
655 for (i = 0; i < n_points; i++) {
657 if (!cb(&scalars[i], &point, i+cb_offset, cbdata)) {
658 secp256k1_scratch_deallocate_frame(scratch);
661 secp256k1_gej_set_ge(&points[i], &point);
663 secp256k1_ecmult_strauss_wnaf(ctx, &state, r, n_points, points, scalars, inp_g_sc);
664 secp256k1_scratch_deallocate_frame(scratch);
668 /* Wrapper for secp256k1_ecmult_multi_func interface */
669 static int secp256k1_ecmult_strauss_batch_single(const secp256k1_ecmult_context *actx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
670 return secp256k1_ecmult_strauss_batch(actx, scratch, r, inp_g_sc, cb, cbdata, n, 0);
673 static size_t secp256k1_strauss_max_points(secp256k1_scratch *scratch) {
674 return secp256k1_scratch_max_allocation(scratch, STRAUSS_SCRATCH_OBJECTS) / secp256k1_strauss_scratch_size(1);
677 /** Convert a number to WNAF notation.
678 * The number becomes represented by sum(2^{wi} * wnaf[i], i=0..WNAF_SIZE(w)+1) - return_val.
679 * It has the following guarantees:
680 * - each wnaf[i] is either 0 or an odd integer between -(1 << w) and (1 << w)
681 * - the number of words set is always WNAF_SIZE(w)
682 * - the returned skew is 0 or 1
684 static int secp256k1_wnaf_fixed(int *wnaf, const secp256k1_scalar *s, int w) {
689 const secp256k1_scalar *work = s;
691 if (secp256k1_scalar_is_zero(s)) {
692 for (pos = 0; pos < WNAF_SIZE(w); pos++) {
698 if (secp256k1_scalar_is_even(s)) {
702 wnaf[0] = secp256k1_scalar_get_bits_var(work, 0, w) + skew;
703 /* Compute last window size. Relevant when window size doesn't divide the
704 * number of bits in the scalar */
705 last_w = WNAF_BITS - (WNAF_SIZE(w) - 1) * w;
707 /* Store the position of the first nonzero word in max_pos to allow
708 * skipping leading zeros when calculating the wnaf. */
709 for (pos = WNAF_SIZE(w) - 1; pos > 0; pos--) {
710 int val = secp256k1_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w);
719 while (pos <= max_pos) {
720 int val = secp256k1_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w);
721 if ((val & 1) == 0) {
722 wnaf[pos - 1] -= (1 << w);
723 wnaf[pos] = (val + 1);
727 /* Set a coefficient to zero if it is 1 or -1 and the proceeding digit
728 * is strictly negative or strictly positive respectively. Only change
729 * coefficients at previous positions because above code assumes that
730 * wnaf[pos - 1] is odd.
732 if (pos >= 2 && ((wnaf[pos - 1] == 1 && wnaf[pos - 2] < 0) || (wnaf[pos - 1] == -1 && wnaf[pos - 2] > 0))) {
733 if (wnaf[pos - 1] == 1) {
734 wnaf[pos - 2] += 1 << w;
736 wnaf[pos - 2] -= 1 << w;
746 struct secp256k1_pippenger_point_state {
751 struct secp256k1_pippenger_state {
753 struct secp256k1_pippenger_point_state* ps;
757 * pippenger_wnaf computes the result of a multi-point multiplication as
758 * follows: The scalars are brought into wnaf with n_wnaf elements each. Then
759 * for every i < n_wnaf, first each point is added to a "bucket" corresponding
760 * to the point's wnaf[i]. Second, the buckets are added together such that
761 * r += 1*bucket[0] + 3*bucket[1] + 5*bucket[2] + ...
763 static int secp256k1_ecmult_pippenger_wnaf(secp256k1_gej *buckets, int bucket_window, struct secp256k1_pippenger_state *state, secp256k1_gej *r, const secp256k1_scalar *sc, const secp256k1_ge *pt, size_t num) {
764 size_t n_wnaf = WNAF_SIZE(bucket_window+1);
770 for (np = 0; np < num; ++np) {
771 if (secp256k1_scalar_is_zero(&sc[np]) || secp256k1_ge_is_infinity(&pt[np])) {
774 state->ps[no].input_pos = np;
775 state->ps[no].skew_na = secp256k1_wnaf_fixed(&state->wnaf_na[no*n_wnaf], &sc[np], bucket_window+1);
778 secp256k1_gej_set_infinity(r);
784 for (i = n_wnaf - 1; i >= 0; i--) {
785 secp256k1_gej running_sum;
787 for(j = 0; j < ECMULT_TABLE_SIZE(bucket_window+2); j++) {
788 secp256k1_gej_set_infinity(&buckets[j]);
791 for (np = 0; np < no; ++np) {
792 int n = state->wnaf_na[np*n_wnaf + i];
793 struct secp256k1_pippenger_point_state point_state = state->ps[np];
798 /* correct for wnaf skew */
799 int skew = point_state.skew_na;
801 secp256k1_ge_neg(&tmp, &pt[point_state.input_pos]);
802 secp256k1_gej_add_ge_var(&buckets[0], &buckets[0], &tmp, NULL);
807 secp256k1_gej_add_ge_var(&buckets[idx], &buckets[idx], &pt[point_state.input_pos], NULL);
810 secp256k1_ge_neg(&tmp, &pt[point_state.input_pos]);
811 secp256k1_gej_add_ge_var(&buckets[idx], &buckets[idx], &tmp, NULL);
815 for(j = 0; j < bucket_window; j++) {
816 secp256k1_gej_double_var(r, r, NULL);
819 secp256k1_gej_set_infinity(&running_sum);
820 /* Accumulate the sum: bucket[0] + 3*bucket[1] + 5*bucket[2] + 7*bucket[3] + ...
821 * = bucket[0] + bucket[1] + bucket[2] + bucket[3] + ...
822 * + 2 * (bucket[1] + 2*bucket[2] + 3*bucket[3] + ...)
823 * using an intermediate running sum:
824 * running_sum = bucket[0] + bucket[1] + bucket[2] + ...
826 * The doubling is done implicitly by deferring the final window doubling (of 'r').
828 for(j = ECMULT_TABLE_SIZE(bucket_window+2) - 1; j > 0; j--) {
829 secp256k1_gej_add_var(&running_sum, &running_sum, &buckets[j], NULL);
830 secp256k1_gej_add_var(r, r, &running_sum, NULL);
833 secp256k1_gej_add_var(&running_sum, &running_sum, &buckets[0], NULL);
834 secp256k1_gej_double_var(r, r, NULL);
835 secp256k1_gej_add_var(r, r, &running_sum, NULL);
841 * Returns optimal bucket_window (number of bits of a scalar represented by a
842 * set of buckets) for a given number of points.
844 static int secp256k1_pippenger_bucket_window(size_t n) {
845 #ifdef USE_ENDOMORPHISM
850 } else if (n <= 20) {
852 } else if (n <= 57) {
854 } else if (n <= 136) {
856 } else if (n <= 235) {
858 } else if (n <= 1260) {
860 } else if (n <= 4420) {
862 } else if (n <= 7880) {
864 } else if (n <= 16050) {
867 return PIPPENGER_MAX_BUCKET_WINDOW;
872 } else if (n <= 11) {
874 } else if (n <= 45) {
876 } else if (n <= 100) {
878 } else if (n <= 275) {
880 } else if (n <= 625) {
882 } else if (n <= 1850) {
884 } else if (n <= 3400) {
886 } else if (n <= 9630) {
888 } else if (n <= 17900) {
890 } else if (n <= 32800) {
893 return PIPPENGER_MAX_BUCKET_WINDOW;
899 * Returns the maximum optimal number of points for a bucket_window.
901 static size_t secp256k1_pippenger_bucket_window_inv(int bucket_window) {
902 switch(bucket_window) {
903 #ifdef USE_ENDOMORPHISM
913 case 10: return 7880;
914 case 11: return 16050;
915 case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX;
926 case 10: return 17900;
927 case 11: return 32800;
928 case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX;
935 #ifdef USE_ENDOMORPHISM
936 SECP256K1_INLINE static void secp256k1_ecmult_endo_split(secp256k1_scalar *s1, secp256k1_scalar *s2, secp256k1_ge *p1, secp256k1_ge *p2) {
937 secp256k1_scalar tmp = *s1;
938 secp256k1_scalar_split_lambda(s1, s2, &tmp);
939 secp256k1_ge_mul_lambda(p2, p1);
941 if (secp256k1_scalar_is_high(s1)) {
942 secp256k1_scalar_negate(s1, s1);
943 secp256k1_ge_neg(p1, p1);
945 if (secp256k1_scalar_is_high(s2)) {
946 secp256k1_scalar_negate(s2, s2);
947 secp256k1_ge_neg(p2, p2);
953 * Returns the scratch size required for a given number of points (excluding
954 * base point G) without considering alignment.
956 static size_t secp256k1_pippenger_scratch_size(size_t n_points, int bucket_window) {
957 #ifdef USE_ENDOMORPHISM
958 size_t entries = 2*n_points + 2;
960 size_t entries = n_points + 1;
962 size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int);
963 return ((1<<bucket_window) * sizeof(secp256k1_gej) + sizeof(struct secp256k1_pippenger_state) + entries * entry_size);
966 static int secp256k1_ecmult_pippenger_batch(const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
967 /* Use 2(n+1) with the endomorphism, n+1 without, when calculating batch
968 * sizes. The reason for +1 is that we add the G scalar to the list of
970 #ifdef USE_ENDOMORPHISM
971 size_t entries = 2*n_points + 2;
973 size_t entries = n_points + 1;
975 secp256k1_ge *points;
976 secp256k1_scalar *scalars;
977 secp256k1_gej *buckets;
978 struct secp256k1_pippenger_state *state_space;
980 size_t point_idx = 0;
985 secp256k1_gej_set_infinity(r);
986 if (inp_g_sc == NULL && n_points == 0) {
990 bucket_window = secp256k1_pippenger_bucket_window(n_points);
991 if (!secp256k1_scratch_allocate_frame(scratch, secp256k1_pippenger_scratch_size(n_points, bucket_window), PIPPENGER_SCRATCH_OBJECTS)) {
994 points = (secp256k1_ge *) secp256k1_scratch_alloc(scratch, entries * sizeof(*points));
995 scalars = (secp256k1_scalar *) secp256k1_scratch_alloc(scratch, entries * sizeof(*scalars));
996 state_space = (struct secp256k1_pippenger_state *) secp256k1_scratch_alloc(scratch, sizeof(*state_space));
997 state_space->ps = (struct secp256k1_pippenger_point_state *) secp256k1_scratch_alloc(scratch, entries * sizeof(*state_space->ps));
998 state_space->wnaf_na = (int *) secp256k1_scratch_alloc(scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int));
999 buckets = (secp256k1_gej *) secp256k1_scratch_alloc(scratch, (1<<bucket_window) * sizeof(*buckets));
1001 if (inp_g_sc != NULL) {
1002 scalars[0] = *inp_g_sc;
1003 points[0] = secp256k1_ge_const_g;
1005 #ifdef USE_ENDOMORPHISM
1006 secp256k1_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]);
1011 while (point_idx < n_points) {
1012 if (!cb(&scalars[idx], &points[idx], point_idx + cb_offset, cbdata)) {
1013 secp256k1_scratch_deallocate_frame(scratch);
1017 #ifdef USE_ENDOMORPHISM
1018 secp256k1_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]);
1024 secp256k1_ecmult_pippenger_wnaf(buckets, bucket_window, state_space, r, scalars, points, idx);
1027 for(i = 0; (size_t)i < idx; i++) {
1028 secp256k1_scalar_clear(&scalars[i]);
1029 state_space->ps[i].skew_na = 0;
1030 for(j = 0; j < WNAF_SIZE(bucket_window+1); j++) {
1031 state_space->wnaf_na[i * WNAF_SIZE(bucket_window+1) + j] = 0;
1034 for(i = 0; i < 1<<bucket_window; i++) {
1035 secp256k1_gej_clear(&buckets[i]);
1037 secp256k1_scratch_deallocate_frame(scratch);
1041 /* Wrapper for secp256k1_ecmult_multi_func interface */
1042 static int secp256k1_ecmult_pippenger_batch_single(const secp256k1_ecmult_context *actx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
1043 return secp256k1_ecmult_pippenger_batch(actx, scratch, r, inp_g_sc, cb, cbdata, n, 0);
1047 * Returns the maximum number of points in addition to G that can be used with
1048 * a given scratch space. The function ensures that fewer points may also be
1051 static size_t secp256k1_pippenger_max_points(secp256k1_scratch *scratch) {
1052 size_t max_alloc = secp256k1_scratch_max_allocation(scratch, PIPPENGER_SCRATCH_OBJECTS);
1056 for (bucket_window = 1; bucket_window <= PIPPENGER_MAX_BUCKET_WINDOW; bucket_window++) {
1058 size_t max_points = secp256k1_pippenger_bucket_window_inv(bucket_window);
1059 size_t space_for_points;
1060 size_t space_overhead;
1061 size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int);
1063 #ifdef USE_ENDOMORPHISM
1064 entry_size = 2*entry_size;
1066 space_overhead = ((1<<bucket_window) * sizeof(secp256k1_gej) + entry_size + sizeof(struct secp256k1_pippenger_state));
1067 if (space_overhead > max_alloc) {
1070 space_for_points = max_alloc - space_overhead;
1072 n_points = space_for_points/entry_size;
1073 n_points = n_points > max_points ? max_points : n_points;
1074 if (n_points > res) {
1077 if (n_points < max_points) {
1078 /* A larger bucket_window may support even more points. But if we
1079 * would choose that then the caller couldn't safely use any number
1080 * smaller than what this function returns */
1087 /* Computes ecmult_multi by simply multiplying and adding each point. Does not
1088 * require a scratch space */
1089 static int secp256k1_ecmult_multi_simple_var(const secp256k1_ecmult_context *ctx, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points) {
1091 secp256k1_scalar szero;
1094 secp256k1_scalar_set_int(&szero, 0);
1095 secp256k1_gej_set_infinity(r);
1096 secp256k1_gej_set_infinity(&tmpj);
1097 /* r = inp_g_sc*G */
1098 secp256k1_ecmult(ctx, r, &tmpj, &szero, inp_g_sc);
1099 for (point_idx = 0; point_idx < n_points; point_idx++) {
1101 secp256k1_gej pointj;
1102 secp256k1_scalar scalar;
1103 if (!cb(&scalar, &point, point_idx, cbdata)) {
1106 /* r += scalar*point */
1107 secp256k1_gej_set_ge(&pointj, &point);
1108 secp256k1_ecmult(ctx, &tmpj, &pointj, &scalar, NULL);
1109 secp256k1_gej_add_var(r, r, &tmpj, NULL);
1114 typedef int (*secp256k1_ecmult_multi_func)(const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t);
1115 static int secp256k1_ecmult_multi_var(const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
1118 int (*f)(const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t, size_t);
1121 size_t n_batch_points;
1123 secp256k1_gej_set_infinity(r);
1124 if (inp_g_sc == NULL && n == 0) {
1126 } else if (n == 0) {
1127 secp256k1_scalar szero;
1128 secp256k1_scalar_set_int(&szero, 0);
1129 secp256k1_ecmult(ctx, r, r, &szero, inp_g_sc);
1132 if (scratch == NULL) {
1133 return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n);
1136 max_points = secp256k1_pippenger_max_points(scratch);
1137 if (max_points == 0) {
1139 } else if (max_points > ECMULT_MAX_POINTS_PER_BATCH) {
1140 max_points = ECMULT_MAX_POINTS_PER_BATCH;
1142 n_batches = (n+max_points-1)/max_points;
1143 n_batch_points = (n+n_batches-1)/n_batches;
1145 if (n_batch_points >= ECMULT_PIPPENGER_THRESHOLD) {
1146 f = secp256k1_ecmult_pippenger_batch;
1148 max_points = secp256k1_strauss_max_points(scratch);
1149 if (max_points == 0) {
1152 n_batches = (n+max_points-1)/max_points;
1153 n_batch_points = (n+n_batches-1)/n_batches;
1154 f = secp256k1_ecmult_strauss_batch;
1156 for(i = 0; i < n_batches; i++) {
1157 size_t nbp = n < n_batch_points ? n : n_batch_points;
1158 size_t offset = n_batch_points*i;
1160 if (!f(ctx, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) {
1163 secp256k1_gej_add_var(r, r, &tmp, NULL);
1169 #endif /* SECP256K1_ECMULT_IMPL_H */