]> Git Repo - secp256k1.git/blame - src/ecmult_impl.h
Merge #640: scalar_impl.h: fix includes
[secp256k1.git] / src / ecmult_impl.h
CommitLineData
355a38f1
JN
1/*****************************************************************************
2 * Copyright (c) 2013, 2014, 2017 Pieter Wuille, Andrew Poelstra, Jonas Nick *
3 * Distributed under the MIT software license, see the accompanying *
4 * file COPYING or http://www.opensource.org/licenses/mit-license.php. *
5 *****************************************************************************/
0a433ea2 6
abe2d3e8
DR
7#ifndef SECP256K1_ECMULT_IMPL_H
8#define SECP256K1_ECMULT_IMPL_H
7a4b7691 9
20b8877b 10#include <string.h>
36b22c93 11#include <stdint.h>
20b8877b 12
ef020de1 13#include "util.h"
11ab5622 14#include "group.h"
0b730597 15#include "scalar.h"
11ab5622 16#include "ecmult.h"
607884fc 17
20b8877b 18#if defined(EXHAUSTIVE_TEST_ORDER)
83836a95
AP
19/* We need to lower these values for exhaustive tests because
20 * the tables cannot have infinities in them (this breaks the
21 * affine-isomorphism stuff which tracks z-ratios) */
20b8877b 22# if EXHAUSTIVE_TEST_ORDER > 128
83836a95 23# define WINDOW_A 5
20b8877b
AP
24# define WINDOW_G 8
25# elif EXHAUSTIVE_TEST_ORDER > 8
83836a95 26# define WINDOW_A 4
20b8877b
AP
27# define WINDOW_G 4
28# else
83836a95 29# define WINDOW_A 2
20b8877b
AP
30# define WINDOW_G 2
31# endif
32#else
83836a95 33/* optimal for 128-bit and 256-bit exponents. */
2842dc52
TR
34# define WINDOW_A 5
35/** Larger values for ECMULT_WINDOW_SIZE result in possibly better
36 * performance at the cost of an exponentially larger precomputed
37 * table. The exact table size is
38 * (1 << (WINDOW_G - 2)) * sizeof(secp256k1_ge_storage) bytes,
39 * where sizeof(secp256k1_ge_storage) is typically 64 bytes but can
40 * be larger due to platform-specific padding and alignment.
41 * If the endomorphism optimization is enabled (USE_ENDOMORMPHSIM)
42 * two tables of this size are used instead of only one.
43 */
44# define WINDOW_G ECMULT_WINDOW_SIZE
665775b2 45#endif
2842dc52
TR
46
47/* Noone will ever need more than a window size of 24. The code might
48 * be correct for larger values of ECMULT_WINDOW_SIZE but this is not
49 * not tested.
50 *
51 * The following limitations are known, and there are probably more:
52 * If WINDOW_G > 27 and size_t has 32 bits, then the code is incorrect
53 * because the size of the memory object that we allocate (in bytes)
54 * will not fit in a size_t.
55 * If WINDOW_G > 31 and int has 32 bits, then the code is incorrect
56 * because certain expressions will overflow.
57 */
58#if ECMULT_WINDOW_SIZE < 2 || ECMULT_WINDOW_SIZE > 24
59# error Set ECMULT_WINDOW_SIZE to an integer in range [2..24].
20b8877b 60#endif
607884fc 61
355a38f1
JN
62#ifdef USE_ENDOMORPHISM
63 #define WNAF_BITS 128
64#else
65 #define WNAF_BITS 256
66#endif
7c1b91ba
AP
67#define WNAF_SIZE_BITS(bits, w) (((bits) + (w) - 1) / (w))
68#define WNAF_SIZE(w) WNAF_SIZE_BITS(WNAF_BITS, w)
355a38f1 69
4f9791ab
PD
70/** The number of entries a table with precomputed multiples needs to have. */
71#define ECMULT_TABLE_SIZE(w) (1 << ((w)-2))
72
355a38f1
JN
73/* The number of objects allocated on the scratch space for ecmult_multi algorithms */
74#define PIPPENGER_SCRATCH_OBJECTS 6
75#define STRAUSS_SCRATCH_OBJECTS 6
76
36b22c93
JN
77#define PIPPENGER_MAX_BUCKET_WINDOW 12
78
355a38f1
JN
79/* Minimum number of points for which pippenger_wnaf is faster than strauss wnaf */
80#ifdef USE_ENDOMORPHISM
d2f9c6b5 81 #define ECMULT_PIPPENGER_THRESHOLD 88
355a38f1 82#else
d2f9c6b5 83 #define ECMULT_PIPPENGER_THRESHOLD 160
355a38f1
JN
84#endif
85
36b22c93
JN
86#ifdef USE_ENDOMORPHISM
87 #define ECMULT_MAX_POINTS_PER_BATCH 5000000
88#else
89 #define ECMULT_MAX_POINTS_PER_BATCH 10000000
90#endif
91
4f9791ab
PD
92/** Fill a table 'prej' with precomputed odd multiples of a. Prej will contain
93 * the values [1*a,3*a,...,(2*n-1)*a], so it space for n values. zr[0] will
94 * contain prej[0].z / a.z. The other zr[i] values = prej[i].z / prej[i-1].z.
2d5a186c 95 * Prej's Z values are undefined, except for the last value.
b1483f87 96 */
dd891e0e
PW
97static void secp256k1_ecmult_odd_multiples_table(int n, secp256k1_gej *prej, secp256k1_fe *zr, const secp256k1_gej *a) {
98 secp256k1_gej d;
99 secp256k1_ge a_ge, d_ge;
f735446c 100 int i;
4f9791ab
PD
101
102 VERIFY_CHECK(!a->infinity);
103
2d5a186c
PD
104 secp256k1_gej_double_var(&d, a, NULL);
105
106 /*
107 * Perform the additions on an isomorphism where 'd' is affine: drop the z coordinate
108 * of 'd', and scale the 1P starting value's x/y coordinates without changing its z.
109 */
110 d_ge.x = d.x;
111 d_ge.y = d.y;
112 d_ge.infinity = 0;
113
114 secp256k1_ge_set_gej_zinv(&a_ge, a, &d.z);
115 prej[0].x = a_ge.x;
116 prej[0].y = a_ge.y;
117 prej[0].z = a->z;
118 prej[0].infinity = 0;
119
120 zr[0] = d.z;
4f9791ab 121 for (i = 1; i < n; i++) {
2d5a186c 122 secp256k1_gej_add_ge_var(&prej[i], &prej[i-1], &d_ge, &zr[i]);
26320197 123 }
2d5a186c
PD
124
125 /*
126 * Each point in 'prej' has a z coordinate too small by a factor of 'd.z'. Only
127 * the final point's z coordinate is actually used though, so just update that.
128 */
129 secp256k1_fe_mul(&prej[n-1].z, &prej[n-1].z, &d.z);
b1483f87 130}
f11ff5be 131
4f9791ab
PD
132/** Fill a table 'pre' with precomputed odd multiples of a.
133 *
134 * There are two versions of this function:
135 * - secp256k1_ecmult_odd_multiples_table_globalz_windowa which brings its
136 * resulting point set to a single constant Z denominator, stores the X and Y
137 * coordinates as ge_storage points in pre, and stores the global Z in rz.
138 * It only operates on tables sized for WINDOW_A wnaf multiples.
139 * - secp256k1_ecmult_odd_multiples_table_storage_var, which converts its
140 * resulting point set to actually affine points, and stores those in pre.
141 * It operates on tables of any size, but uses heap-allocated temporaries.
142 *
143 * To compute a*P + b*G, we compute a table for P using the first function,
144 * and for G using the second (which requires an inverse, but it only needs to
145 * happen once).
146 */
dd891e0e
PW
147static void secp256k1_ecmult_odd_multiples_table_globalz_windowa(secp256k1_ge *pre, secp256k1_fe *globalz, const secp256k1_gej *a) {
148 secp256k1_gej prej[ECMULT_TABLE_SIZE(WINDOW_A)];
149 secp256k1_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)];
4f9791ab
PD
150
151 /* Compute the odd multiples in Jacobian form. */
152 secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), prej, zr, a);
153 /* Bring them to the same Z denominator. */
154 secp256k1_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A), pre, globalz, prej, zr);
155}
156
47045270
AP
157static void secp256k1_ecmult_odd_multiples_table_storage_var(const int n, secp256k1_ge_storage *pre, const secp256k1_gej *a) {
158 secp256k1_gej d;
efa783f8 159 secp256k1_ge d_ge, p_ge;
47045270
AP
160 secp256k1_gej pj;
161 secp256k1_fe zi;
162 secp256k1_fe zr;
163 secp256k1_fe dx_over_dz_squared;
f735446c 164 int i;
4f9791ab 165
47045270
AP
166 VERIFY_CHECK(!a->infinity);
167
168 secp256k1_gej_double_var(&d, a, NULL);
169
170 /* First, we perform all the additions in an isomorphic curve obtained by multiplying
171 * all `z` coordinates by 1/`d.z`. In these coordinates `d` is affine so we can use
172 * `secp256k1_gej_add_ge_var` to perform the additions. For each addition, we store
173 * the resulting y-coordinate and the z-ratio, since we only have enough memory to
174 * store two field elements. These are sufficient to efficiently undo the isomorphism
175 * and recompute all the `x`s.
176 */
177 d_ge.x = d.x;
178 d_ge.y = d.y;
179 d_ge.infinity = 0;
180
efa783f8
PD
181 secp256k1_ge_set_gej_zinv(&p_ge, a, &d.z);
182 pj.x = p_ge.x;
183 pj.y = p_ge.y;
47045270
AP
184 pj.z = a->z;
185 pj.infinity = 0;
186
efa783f8
PD
187 for (i = 0; i < (n - 1); i++) {
188 secp256k1_fe_normalize_var(&pj.y);
189 secp256k1_fe_to_storage(&pre[i].y, &pj.y);
47045270
AP
190 secp256k1_gej_add_ge_var(&pj, &pj, &d_ge, &zr);
191 secp256k1_fe_normalize_var(&zr);
192 secp256k1_fe_to_storage(&pre[i].x, &zr);
41f84554 193 }
4f9791ab 194
efa783f8
PD
195 /* Invert d.z in the same batch, preserving pj.z so we can extract 1/d.z */
196 secp256k1_fe_mul(&zi, &pj.z, &d.z);
197 secp256k1_fe_inv_var(&zi, &zi);
198
47045270
AP
199 /* Directly set `pre[n - 1]` to `pj`, saving the inverted z-coordinate so
200 * that we can combine it with the saved z-ratios to compute the other zs
201 * without any more inversions. */
47045270 202 secp256k1_ge_set_gej_zinv(&p_ge, &pj, &zi);
47045270
AP
203 secp256k1_ge_to_storage(&pre[n - 1], &p_ge);
204
205 /* Compute the actual x-coordinate of D, which will be needed below. */
efa783f8 206 secp256k1_fe_mul(&d.z, &zi, &pj.z); /* d.z = 1/d.z */
47045270
AP
207 secp256k1_fe_sqr(&dx_over_dz_squared, &d.z);
208 secp256k1_fe_mul(&dx_over_dz_squared, &dx_over_dz_squared, &d.x);
209
b3bf5f99
AP
210 /* Going into the second loop, we have set `pre[n-1]` to its final affine
211 * form, but still need to set `pre[i]` for `i` in 0 through `n-2`. We
212 * have `zi = (p.z * d.z)^-1`, where
213 *
214 * `p.z` is the z-coordinate of the point on the isomorphic curve
215 * which was ultimately assigned to `pre[n-1]`.
216 * `d.z` is the multiplier that must be applied to all z-coordinates
217 * to move from our isomorphic curve back to secp256k1; so the
218 * product `p.z * d.z` is the z-coordinate of the secp256k1
219 * point assigned to `pre[n-1]`.
220 *
221 * All subsequent inverse-z-coordinates can be obtained by multiplying this
222 * factor by successive z-ratios, which is much more efficient than directly
223 * computing each one.
224 *
225 * Importantly, these inverse-zs will be coordinates of points on secp256k1,
226 * while our other stored values come from computations on the isomorphic
227 * curve. So in the below loop, we will take care not to actually use `zi`
228 * or any derived values until we're back on secp256k1.
229 */
47045270
AP
230 i = n - 1;
231 while (i > 0) {
232 secp256k1_fe zi2, zi3;
efa783f8 233 const secp256k1_fe *rzr;
47045270 234 i--;
efa783f8
PD
235
236 secp256k1_ge_from_storage(&p_ge, &pre[i]);
237
b3bf5f99 238 /* For each remaining point, we extract the z-ratio from the stored
47045270 239 * x-coordinate, compute its z^-1 from that, and compute the full
efa783f8
PD
240 * point from that. */
241 rzr = &p_ge.x;
242 secp256k1_fe_mul(&zi, &zi, rzr);
47045270
AP
243 secp256k1_fe_sqr(&zi2, &zi);
244 secp256k1_fe_mul(&zi3, &zi2, &zi);
245 /* To compute the actual x-coordinate, we use the stored z ratio and
246 * y-coordinate, which we obtained from `secp256k1_gej_add_ge_var`
247 * in the loop above, as well as the inverse of the square of its
248 * z-coordinate. We store the latter in the `zi2` variable, which is
249 * computed iteratively starting from the overall Z inverse then
250 * multiplying by each z-ratio in turn.
251 *
b3bf5f99
AP
252 * Denoting the z-ratio as `rzr`, we observe that it is equal to `h`
253 * from the inside of the above `gej_add_ge_var` call. This satisfies
254 *
255 * rzr = d_x * z^2 - x * d_z^2
256 *
257 * where (`d_x`, `d_z`) are Jacobian coordinates of `D` and `(x, z)`
258 * are Jacobian coordinates of our desired point -- except both are on
259 * the isomorphic curve that we were using when we called `gej_add_ge_var`.
260 * To get back to secp256k1, we must multiply both `z`s by `d_z`, or
261 * equivalently divide both `x`s by `d_z^2`. Our equation then becomes
262 *
263 * rzr = d_x * z^2 / d_z^2 - x
264 *
265 * (The left-hand-side, being a ratio of z-coordinates, is unaffected
266 * by the isomorphism.)
47045270 267 *
b3bf5f99 268 * Rearranging to solve for `x`, we have
47045270 269 *
b3bf5f99 270 * x = d_x * z^2 / d_z^2 - rzr
47045270 271 *
b3bf5f99
AP
272 * But what we actually want is the affine coordinate `X = x/z^2`,
273 * which will satisfy
47045270 274 *
b3bf5f99
AP
275 * X = d_x / d_z^2 - rzr / z^2
276 * = dx_over_dz_squared - rzr * zi2
47045270 277 */
efa783f8 278 secp256k1_fe_mul(&p_ge.x, rzr, &zi2);
47045270
AP
279 secp256k1_fe_negate(&p_ge.x, &p_ge.x, 1);
280 secp256k1_fe_add(&p_ge.x, &dx_over_dz_squared);
281 /* y is stored_y/z^3, as we expect */
efa783f8 282 secp256k1_fe_mul(&p_ge.y, &p_ge.y, &zi3);
47045270
AP
283 /* Store */
284 secp256k1_ge_to_storage(&pre[i], &p_ge);
285 }
b1483f87 286}
f11ff5be 287
b1483f87
PW
288/** The following two macro retrieves a particular odd multiple from a table
289 * of precomputed multiples. */
4f9791ab 290#define ECMULT_TABLE_GET_GE(r,pre,n,w) do { \
1c7fa133
PW
291 VERIFY_CHECK(((n) & 1) == 1); \
292 VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \
293 VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \
26320197 294 if ((n) > 0) { \
b1483f87 295 *(r) = (pre)[((n)-1)/2]; \
26320197 296 } else { \
9bd89c83
RC
297 *(r) = (pre)[(-(n)-1)/2]; \
298 secp256k1_fe_negate(&((r)->y), &((r)->y), 1); \
26320197 299 } \
41f84554 300} while(0)
4f9791ab 301
41f84554
PW
302#define ECMULT_TABLE_GET_GE_STORAGE(r,pre,n,w) do { \
303 VERIFY_CHECK(((n) & 1) == 1); \
304 VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \
305 VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \
26320197 306 if ((n) > 0) { \
41f84554 307 secp256k1_ge_from_storage((r), &(pre)[((n)-1)/2]); \
26320197 308 } else { \
41f84554 309 secp256k1_ge_from_storage((r), &(pre)[(-(n)-1)/2]); \
9bd89c83 310 secp256k1_fe_negate(&((r)->y), &((r)->y), 1); \
41f84554 311 } \
b1483f87 312} while(0)
b1483f87 313
ef020de1
TR
314static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE =
315 ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G))
316#ifdef USE_ENDOMORPHISM
317 + ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G))
318#endif
319 ;
320
dd891e0e 321static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx) {
a9b6595e 322 ctx->pre_g = NULL;
665775b2 323#ifdef USE_ENDOMORPHISM
a9b6595e 324 ctx->pre_g_128 = NULL;
665775b2 325#endif
a9b6595e 326}
b1483f87 327
c4fd5dab 328static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void **prealloc) {
dd891e0e 329 secp256k1_gej gj;
c4fd5dab
TR
330 void* const base = *prealloc;
331 size_t const prealloc_size = SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE;
a9b6595e
PW
332
333 if (ctx->pre_g != NULL) {
b1483f87 334 return;
26320197 335 }
b1483f87 336
71712b27 337 /* get the generator */
f735446c 338 secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g);
b1483f87 339
2842dc52
TR
340 {
341 size_t size = sizeof((*ctx->pre_g)[0]) * ((size_t)ECMULT_TABLE_SIZE(WINDOW_G));
342 /* check for overflow */
343 VERIFY_CHECK(size / sizeof((*ctx->pre_g)[0]) == ((size_t)ECMULT_TABLE_SIZE(WINDOW_G)));
c4fd5dab 344 ctx->pre_g = (secp256k1_ge_storage (*)[])manual_alloc(prealloc, sizeof((*ctx->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G), base, prealloc_size);
2842dc52 345 }
b1483f87 346
71712b27 347 /* precompute the tables with odd multiples */
47045270 348 secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj);
f735446c 349
665775b2 350#ifdef USE_ENDOMORPHISM
f735446c 351 {
dd891e0e 352 secp256k1_gej g_128j;
f735446c 353 int i;
a9b6595e 354
2842dc52
TR
355 size_t size = sizeof((*ctx->pre_g_128)[0]) * ((size_t) ECMULT_TABLE_SIZE(WINDOW_G));
356 /* check for overflow */
357 VERIFY_CHECK(size / sizeof((*ctx->pre_g_128)[0]) == ((size_t)ECMULT_TABLE_SIZE(WINDOW_G)));
c4fd5dab 358 ctx->pre_g_128 = (secp256k1_ge_storage (*)[])manual_alloc(prealloc, sizeof((*ctx->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G), base, prealloc_size);
a9b6595e 359
f735446c
GM
360 /* calculate 2^128*generator */
361 g_128j = gj;
26320197 362 for (i = 0; i < 128; i++) {
4f9791ab 363 secp256k1_gej_double_var(&g_128j, &g_128j, NULL);
26320197 364 }
47045270 365 secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g_128, &g_128j);
f735446c 366 }
665775b2 367#endif
04e34d18
PW
368}
369
c4fd5dab
TR
370static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *dst, const secp256k1_ecmult_context *src) {
371 if (src->pre_g != NULL) {
372 /* We cast to void* first to suppress a -Wcast-align warning. */
373 dst->pre_g = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g) - (unsigned char*)src));
d899b5b6
AP
374 }
375#ifdef USE_ENDOMORPHISM
c4fd5dab
TR
376 if (src->pre_g_128 != NULL) {
377 dst->pre_g_128 = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g_128) - (unsigned char*)src));
d899b5b6
AP
378 }
379#endif
380}
381
dd891e0e 382static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx) {
a9b6595e
PW
383 return ctx->pre_g != NULL;
384}
607884fc 385
dd891e0e 386static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context *ctx) {
a9b6595e 387 secp256k1_ecmult_context_init(ctx);
b1483f87 388}
607884fc 389
b1483f87
PW
390/** Convert a number to WNAF notation. The number becomes represented by sum(2^i * wnaf[i], i=0..bits),
391 * with the following guarantees:
392 * - each wnaf[i] is either 0, or an odd integer between -(1<<(w-1) - 1) and (1<<(w-1) - 1)
393 * - two non-zero entries in wnaf are separated by at least w-1 zeroes.
0b730597 394 * - the number of set values in wnaf is returned. This number is at most 256, and at most one more
55399c23 395 * than the number of bits in the (absolute value) of the input.
b1483f87 396 */
dd891e0e
PW
397static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a, int w) {
398 secp256k1_scalar s = *a;
55399c23 399 int last_set_bit = -1;
f735446c 400 int bit = 0;
f24041d6 401 int sign = 1;
145cc6ea 402 int carry = 0;
f735446c 403
55399c23
PD
404 VERIFY_CHECK(wnaf != NULL);
405 VERIFY_CHECK(0 <= len && len <= 256);
406 VERIFY_CHECK(a != NULL);
407 VERIFY_CHECK(2 <= w && w <= 31);
408
409 memset(wnaf, 0, len * sizeof(wnaf[0]));
410
0b730597
PW
411 if (secp256k1_scalar_get_bits(&s, 255, 1)) {
412 secp256k1_scalar_negate(&s, &s);
f24041d6 413 sign = -1;
0b730597
PW
414 }
415
55399c23 416 while (bit < len) {
f735446c
GM
417 int now;
418 int word;
145cc6ea 419 if (secp256k1_scalar_get_bits(&s, bit, 1) == (unsigned int)carry) {
0b730597
PW
420 bit++;
421 continue;
422 }
145cc6ea 423
f735446c 424 now = w;
55399c23
PD
425 if (now > len - bit) {
426 now = len - bit;
607884fc 427 }
145cc6ea
PD
428
429 word = secp256k1_scalar_get_bits_var(&s, bit, now) + carry;
430
431 carry = (word >> (w-1)) & 1;
432 word -= carry << w;
433
55399c23
PD
434 wnaf[bit] = sign * word;
435 last_set_bit = bit;
436
0b730597 437 bit += now;
607884fc 438 }
55399c23
PD
439#ifdef VERIFY
440 CHECK(carry == 0);
441 while (bit < 256) {
442 CHECK(secp256k1_scalar_get_bits(&s, bit++, 1) == 0);
ef020de1 443 }
55399c23
PD
444#endif
445 return last_set_bit + 1;
607884fc
PW
446}
447
8c1c831b 448struct secp256k1_strauss_point_state {
399c03f2 449#ifdef USE_ENDOMORPHISM
dd891e0e 450 secp256k1_scalar na_1, na_lam;
f735446c
GM
451 int wnaf_na_1[130];
452 int wnaf_na_lam[130];
453 int bits_na_1;
454 int bits_na_lam;
f735446c
GM
455#else
456 int wnaf_na[256];
457 int bits_na;
8c1c831b
PW
458#endif
459 size_t input_pos;
460};
461
462struct secp256k1_strauss_state {
463 secp256k1_gej* prej;
464 secp256k1_fe* zr;
465 secp256k1_ge* pre_a;
466#ifdef USE_ENDOMORPHISM
467 secp256k1_ge* pre_a_lam;
468#endif
469 struct secp256k1_strauss_point_state* ps;
470};
471
472static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, const struct secp256k1_strauss_state *state, secp256k1_gej *r, int num, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) {
473 secp256k1_ge tmpa;
474 secp256k1_fe Z;
475#ifdef USE_ENDOMORPHISM
476 /* Splitted G factors. */
477 secp256k1_scalar ng_1, ng_128;
478 int wnaf_ng_1[129];
479 int bits_ng_1 = 0;
480 int wnaf_ng_128[129];
481 int bits_ng_128 = 0;
482#else
55399c23 483 int wnaf_ng[256];
8c1c831b 484 int bits_ng = 0;
f735446c
GM
485#endif
486 int i;
8c1c831b
PW
487 int bits = 0;
488 int np;
489 int no = 0;
f735446c 490
8c1c831b
PW
491 for (np = 0; np < num; ++np) {
492 if (secp256k1_scalar_is_zero(&na[np]) || secp256k1_gej_is_infinity(&a[np])) {
493 continue;
494 }
495 state->ps[no].input_pos = np;
f735446c 496#ifdef USE_ENDOMORPHISM
8c1c831b
PW
497 /* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */
498 secp256k1_scalar_split_lambda(&state->ps[no].na_1, &state->ps[no].na_lam, &na[np]);
499
500 /* build wnaf representation for na_1 and na_lam. */
501 state->ps[no].bits_na_1 = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_1, 130, &state->ps[no].na_1, WINDOW_A);
502 state->ps[no].bits_na_lam = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_lam, 130, &state->ps[no].na_lam, WINDOW_A);
503 VERIFY_CHECK(state->ps[no].bits_na_1 <= 130);
504 VERIFY_CHECK(state->ps[no].bits_na_lam <= 130);
505 if (state->ps[no].bits_na_1 > bits) {
506 bits = state->ps[no].bits_na_1;
507 }
508 if (state->ps[no].bits_na_lam > bits) {
509 bits = state->ps[no].bits_na_lam;
510 }
399c03f2 511#else
8c1c831b
PW
512 /* build wnaf representation for na. */
513 state->ps[no].bits_na = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na, 256, &na[np], WINDOW_A);
514 if (state->ps[no].bits_na > bits) {
515 bits = state->ps[no].bits_na;
516 }
399c03f2 517#endif
8c1c831b
PW
518 ++no;
519 }
b1483f87 520
4f9791ab
PD
521 /* Calculate odd multiples of a.
522 * All multiples are brought to the same Z 'denominator', which is stored
523 * in Z. Due to secp256k1' isomorphism we can do all operations pretending
524 * that the Z coordinate was 1, use affine addition formulae, and correct
525 * the Z coordinate of the result once at the end.
526 * The exception is the precomputed G table points, which are actually
527 * affine. Compared to the base used for other points, they have a Z ratio
528 * of 1/Z, so we can use secp256k1_gej_add_zinv_var, which uses the same
529 * isomorphism to efficiently add with a known Z inverse.
530 */
8c1c831b
PW
531 if (no > 0) {
532 /* Compute the odd multiples in Jacobian form. */
533 secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->prej, state->zr, &a[state->ps[0].input_pos]);
534 for (np = 1; np < no; ++np) {
535 secp256k1_gej tmp = a[state->ps[np].input_pos];
536#ifdef VERIFY
537 secp256k1_fe_normalize_var(&(state->prej[(np - 1) * ECMULT_TABLE_SIZE(WINDOW_A) + ECMULT_TABLE_SIZE(WINDOW_A) - 1].z));
538#endif
539 secp256k1_gej_rescale(&tmp, &(state->prej[(np - 1) * ECMULT_TABLE_SIZE(WINDOW_A) + ECMULT_TABLE_SIZE(WINDOW_A) - 1].z));
540 secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->prej + np * ECMULT_TABLE_SIZE(WINDOW_A), state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), &tmp);
541 secp256k1_fe_mul(state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), &(a[state->ps[np].input_pos].z));
542 }
543 /* Bring them to the same Z denominator. */
544 secp256k1_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A) * no, state->pre_a, &Z, state->prej, state->zr);
545 } else {
546 secp256k1_fe_set_int(&Z, 1);
547 }
399c03f2 548
d7fd4d0f 549#ifdef USE_ENDOMORPHISM
8c1c831b
PW
550 for (np = 0; np < no; ++np) {
551 for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
552 secp256k1_ge_mul_lambda(&state->pre_a_lam[np * ECMULT_TABLE_SIZE(WINDOW_A) + i], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + i]);
553 }
26320197 554 }
d7fd4d0f 555
8c1c831b
PW
556 if (ng) {
557 /* split ng into ng_1 and ng_128 (where gn = gn_1 + gn_128*2^128, and gn_1 and gn_128 are ~128 bit) */
558 secp256k1_scalar_split_128(&ng_1, &ng_128, ng);
399c03f2 559
8c1c831b
PW
560 /* Build wnaf representation for ng_1 and ng_128 */
561 bits_ng_1 = secp256k1_ecmult_wnaf(wnaf_ng_1, 129, &ng_1, WINDOW_G);
562 bits_ng_128 = secp256k1_ecmult_wnaf(wnaf_ng_128, 129, &ng_128, WINDOW_G);
563 if (bits_ng_1 > bits) {
564 bits = bits_ng_1;
565 }
566 if (bits_ng_128 > bits) {
567 bits = bits_ng_128;
568 }
26320197 569 }
665775b2 570#else
8c1c831b
PW
571 if (ng) {
572 bits_ng = secp256k1_ecmult_wnaf(wnaf_ng, 256, ng, WINDOW_G);
573 if (bits_ng > bits) {
574 bits = bits_ng;
575 }
26320197 576 }
665775b2 577#endif
b1483f87
PW
578
579 secp256k1_gej_set_infinity(r);
607884fc 580
4f9791ab 581 for (i = bits - 1; i >= 0; i--) {
b1483f87 582 int n;
4f9791ab 583 secp256k1_gej_double_var(r, r, NULL);
399c03f2 584#ifdef USE_ENDOMORPHISM
8c1c831b
PW
585 for (np = 0; np < no; ++np) {
586 if (i < state->ps[np].bits_na_1 && (n = state->ps[np].wnaf_na_1[i])) {
587 ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A);
588 secp256k1_gej_add_ge_var(r, r, &tmpa, NULL);
589 }
590 if (i < state->ps[np].bits_na_lam && (n = state->ps[np].wnaf_na_lam[i])) {
591 ECMULT_TABLE_GET_GE(&tmpa, state->pre_a_lam + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A);
592 secp256k1_gej_add_ge_var(r, r, &tmpa, NULL);
593 }
607884fc 594 }
b1483f87 595 if (i < bits_ng_1 && (n = wnaf_ng_1[i])) {
a9b6595e 596 ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G);
4f9791ab 597 secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z);
607884fc 598 }
b1483f87 599 if (i < bits_ng_128 && (n = wnaf_ng_128[i])) {
a9b6595e 600 ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g_128, n, WINDOW_G);
4f9791ab 601 secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z);
607884fc 602 }
665775b2 603#else
8c1c831b
PW
604 for (np = 0; np < no; ++np) {
605 if (i < state->ps[np].bits_na && (n = state->ps[np].wnaf_na[i])) {
606 ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A);
607 secp256k1_gej_add_ge_var(r, r, &tmpa, NULL);
608 }
665775b2
PW
609 }
610 if (i < bits_ng && (n = wnaf_ng[i])) {
a9b6595e 611 ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G);
4f9791ab 612 secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z);
665775b2
PW
613 }
614#endif
607884fc 615 }
4f9791ab
PD
616
617 if (!r->infinity) {
618 secp256k1_fe_mul(&r->z, &r->z, &Z);
619 }
607884fc 620}
7a4b7691 621
8c1c831b
PW
622static void secp256k1_ecmult(const secp256k1_ecmult_context *ctx, secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) {
623 secp256k1_gej prej[ECMULT_TABLE_SIZE(WINDOW_A)];
624 secp256k1_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)];
625 secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)];
626 struct secp256k1_strauss_point_state ps[1];
627#ifdef USE_ENDOMORPHISM
628 secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
629#endif
630 struct secp256k1_strauss_state state;
631
632 state.prej = prej;
633 state.zr = zr;
634 state.pre_a = pre_a;
635#ifdef USE_ENDOMORPHISM
636 state.pre_a_lam = pre_a_lam;
637#endif
638 state.ps = ps;
639 secp256k1_ecmult_strauss_wnaf(ctx, &state, r, 1, a, na, ng);
640}
641
355a38f1
JN
642static size_t secp256k1_strauss_scratch_size(size_t n_points) {
643#ifdef USE_ENDOMORPHISM
644 static const size_t point_size = (2 * sizeof(secp256k1_ge) + sizeof(secp256k1_gej) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar);
645#else
646 static const size_t point_size = (sizeof(secp256k1_ge) + sizeof(secp256k1_gej) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar);
647#endif
648 return n_points*point_size;
649}
650
c2b028a2 651static int secp256k1_ecmult_strauss_batch(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
8c1c831b
PW
652 secp256k1_gej* points;
653 secp256k1_scalar* scalars;
355a38f1
JN
654 struct secp256k1_strauss_state state;
655 size_t i;
98836b11 656 const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch);
355a38f1
JN
657
658 secp256k1_gej_set_infinity(r);
659 if (inp_g_sc == NULL && n_points == 0) {
660 return 1;
661 }
8c1c831b 662
c2b028a2
AP
663 points = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_gej));
664 scalars = (secp256k1_scalar*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_scalar));
665 state.prej = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_gej));
666 state.zr = (secp256k1_fe*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_fe));
8c1c831b 667#ifdef USE_ENDOMORPHISM
c2b028a2 668 state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * 2 * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
355a38f1 669 state.pre_a_lam = state.pre_a + n_points * ECMULT_TABLE_SIZE(WINDOW_A);
8c1c831b 670#else
c2b028a2 671 state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
8c1c831b 672#endif
c2b028a2 673 state.ps = (struct secp256k1_strauss_point_state*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(struct secp256k1_strauss_point_state));
8c1c831b 674
98836b11
AP
675 if (points == NULL || scalars == NULL || state.prej == NULL || state.zr == NULL || state.pre_a == NULL) {
676 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
677 return 0;
678 }
679
355a38f1
JN
680 for (i = 0; i < n_points; i++) {
681 secp256k1_ge point;
6fe50439 682 if (!cb(&scalars[i], &point, i+cb_offset, cbdata)) {
98836b11 683 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
6fe50439
AP
684 return 0;
685 }
355a38f1
JN
686 secp256k1_gej_set_ge(&points[i], &point);
687 }
688 secp256k1_ecmult_strauss_wnaf(ctx, &state, r, n_points, points, scalars, inp_g_sc);
98836b11 689 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
355a38f1
JN
690 return 1;
691}
8c1c831b 692
355a38f1 693/* Wrapper for secp256k1_ecmult_multi_func interface */
c2b028a2
AP
694static int secp256k1_ecmult_strauss_batch_single(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *actx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
695 return secp256k1_ecmult_strauss_batch(error_callback, actx, scratch, r, inp_g_sc, cb, cbdata, n, 0);
355a38f1 696}
8c1c831b 697
c2b028a2
AP
698static size_t secp256k1_strauss_max_points(const secp256k1_callback* error_callback, secp256k1_scratch *scratch) {
699 return secp256k1_scratch_max_allocation(error_callback, scratch, STRAUSS_SCRATCH_OBJECTS) / secp256k1_strauss_scratch_size(1);
36b22c93
JN
700}
701
355a38f1
JN
702/** Convert a number to WNAF notation.
703 * The number becomes represented by sum(2^{wi} * wnaf[i], i=0..WNAF_SIZE(w)+1) - return_val.
704 * It has the following guarantees:
705 * - each wnaf[i] is either 0 or an odd integer between -(1 << w) and (1 << w)
706 * - the number of words set is always WNAF_SIZE(w)
96f68a0a 707 * - the returned skew is 0 or 1
355a38f1
JN
708 */
709static int secp256k1_wnaf_fixed(int *wnaf, const secp256k1_scalar *s, int w) {
355a38f1 710 int skew = 0;
ec0a7b3a
JN
711 int pos;
712 int max_pos;
713 int last_w;
355a38f1
JN
714 const secp256k1_scalar *work = s;
715
716 if (secp256k1_scalar_is_zero(s)) {
9e36d1bf 717 for (pos = 0; pos < WNAF_SIZE(w); pos++) {
355a38f1 718 wnaf[pos] = 0;
8c1c831b 719 }
355a38f1 720 return 0;
8c1c831b
PW
721 }
722
355a38f1 723 if (secp256k1_scalar_is_even(s)) {
355a38f1 724 skew = 1;
355a38f1 725 }
8c1c831b 726
96f68a0a 727 wnaf[0] = secp256k1_scalar_get_bits_var(work, 0, w) + skew;
ec0a7b3a
JN
728 /* Compute last window size. Relevant when window size doesn't divide the
729 * number of bits in the scalar */
730 last_w = WNAF_BITS - (WNAF_SIZE(w) - 1) * w;
731
732 /* Store the position of the first nonzero word in max_pos to allow
733 * skipping leading zeros when calculating the wnaf. */
734 for (pos = WNAF_SIZE(w) - 1; pos > 0; pos--) {
735 int val = secp256k1_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w);
736 if(val != 0) {
737 break;
355a38f1 738 }
ec0a7b3a
JN
739 wnaf[pos] = 0;
740 }
741 max_pos = pos;
742 pos = 1;
743
744 while (pos <= max_pos) {
745 int val = secp256k1_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w);
355a38f1 746 if ((val & 1) == 0) {
96f68a0a
JN
747 wnaf[pos - 1] -= (1 << w);
748 wnaf[pos] = (val + 1);
355a38f1 749 } else {
96f68a0a 750 wnaf[pos] = val;
355a38f1 751 }
6dbb0078
JN
752 /* Set a coefficient to zero if it is 1 or -1 and the proceeding digit
753 * is strictly negative or strictly positive respectively. Only change
754 * coefficients at previous positions because above code assumes that
755 * wnaf[pos - 1] is odd.
756 */
757 if (pos >= 2 && ((wnaf[pos - 1] == 1 && wnaf[pos - 2] < 0) || (wnaf[pos - 1] == -1 && wnaf[pos - 2] > 0))) {
758 if (wnaf[pos - 1] == 1) {
759 wnaf[pos - 2] += 1 << w;
760 } else {
761 wnaf[pos - 2] -= 1 << w;
762 }
763 wnaf[pos - 1] = 0;
355a38f1
JN
764 }
765 ++pos;
766 }
355a38f1
JN
767
768 return skew;
769}
770
771struct secp256k1_pippenger_point_state {
772 int skew_na;
773 size_t input_pos;
774};
775
776struct secp256k1_pippenger_state {
777 int *wnaf_na;
778 struct secp256k1_pippenger_point_state* ps;
779};
780
781/*
782 * pippenger_wnaf computes the result of a multi-point multiplication as
783 * follows: The scalars are brought into wnaf with n_wnaf elements each. Then
784 * for every i < n_wnaf, first each point is added to a "bucket" corresponding
785 * to the point's wnaf[i]. Second, the buckets are added together such that
786 * r += 1*bucket[0] + 3*bucket[1] + 5*bucket[2] + ...
787 */
9b3ff030 788static int secp256k1_ecmult_pippenger_wnaf(secp256k1_gej *buckets, int bucket_window, struct secp256k1_pippenger_state *state, secp256k1_gej *r, const secp256k1_scalar *sc, const secp256k1_ge *pt, size_t num) {
355a38f1
JN
789 size_t n_wnaf = WNAF_SIZE(bucket_window+1);
790 size_t np;
791 size_t no = 0;
792 int i;
793 int j;
794
795 for (np = 0; np < num; ++np) {
796 if (secp256k1_scalar_is_zero(&sc[np]) || secp256k1_ge_is_infinity(&pt[np])) {
797 continue;
798 }
799 state->ps[no].input_pos = np;
800 state->ps[no].skew_na = secp256k1_wnaf_fixed(&state->wnaf_na[no*n_wnaf], &sc[np], bucket_window+1);
801 no++;
802 }
803 secp256k1_gej_set_infinity(r);
804
805 if (no == 0) {
8c1c831b
PW
806 return 1;
807 }
808
355a38f1
JN
809 for (i = n_wnaf - 1; i >= 0; i--) {
810 secp256k1_gej running_sum;
355a38f1
JN
811
812 for(j = 0; j < ECMULT_TABLE_SIZE(bucket_window+2); j++) {
813 secp256k1_gej_set_infinity(&buckets[j]);
814 }
355a38f1
JN
815
816 for (np = 0; np < no; ++np) {
817 int n = state->wnaf_na[np*n_wnaf + i];
818 struct secp256k1_pippenger_point_state point_state = state->ps[np];
819 secp256k1_ge tmp;
820 int idx;
821
355a38f1
JN
822 if (i == 0) {
823 /* correct for wnaf skew */
824 int skew = point_state.skew_na;
825 if (skew) {
826 secp256k1_ge_neg(&tmp, &pt[point_state.input_pos]);
827 secp256k1_gej_add_ge_var(&buckets[0], &buckets[0], &tmp, NULL);
828 }
829 }
355a38f1
JN
830 if (n > 0) {
831 idx = (n - 1)/2;
832 secp256k1_gej_add_ge_var(&buckets[idx], &buckets[idx], &pt[point_state.input_pos], NULL);
833 } else if (n < 0) {
834 idx = -(n + 1)/2;
835 secp256k1_ge_neg(&tmp, &pt[point_state.input_pos]);
836 secp256k1_gej_add_ge_var(&buckets[idx], &buckets[idx], &tmp, NULL);
8c1c831b 837 }
8c1c831b 838 }
4c950bbe
PD
839
840 for(j = 0; j < bucket_window; j++) {
841 secp256k1_gej_double_var(r, r, NULL);
842 }
843
355a38f1 844 secp256k1_gej_set_infinity(&running_sum);
4c950bbe
PD
845 /* Accumulate the sum: bucket[0] + 3*bucket[1] + 5*bucket[2] + 7*bucket[3] + ...
846 * = bucket[0] + bucket[1] + bucket[2] + bucket[3] + ...
847 * + 2 * (bucket[1] + 2*bucket[2] + 3*bucket[3] + ...)
848 * using an intermediate running sum:
355a38f1 849 * running_sum = bucket[0] + bucket[1] + bucket[2] + ...
4c950bbe
PD
850 *
851 * The doubling is done implicitly by deferring the final window doubling (of 'r').
355a38f1 852 */
4c950bbe 853 for(j = ECMULT_TABLE_SIZE(bucket_window+2) - 1; j > 0; j--) {
355a38f1 854 secp256k1_gej_add_var(&running_sum, &running_sum, &buckets[j], NULL);
4c950bbe 855 secp256k1_gej_add_var(r, r, &running_sum, NULL);
355a38f1
JN
856 }
857
4c950bbe
PD
858 secp256k1_gej_add_var(&running_sum, &running_sum, &buckets[0], NULL);
859 secp256k1_gej_double_var(r, r, NULL);
860 secp256k1_gej_add_var(r, r, &running_sum, NULL);
8c1c831b
PW
861 }
862 return 1;
863}
864
355a38f1
JN
865/**
866 * Returns optimal bucket_window (number of bits of a scalar represented by a
867 * set of buckets) for a given number of points.
868 */
869static int secp256k1_pippenger_bucket_window(size_t n) {
870#ifdef USE_ENDOMORPHISM
d2f9c6b5 871 if (n <= 1) {
355a38f1 872 return 1;
d2f9c6b5 873 } else if (n <= 4) {
355a38f1 874 return 2;
d2f9c6b5 875 } else if (n <= 20) {
355a38f1 876 return 3;
d2f9c6b5 877 } else if (n <= 57) {
355a38f1 878 return 4;
d2f9c6b5 879 } else if (n <= 136) {
355a38f1 880 return 5;
d2f9c6b5 881 } else if (n <= 235) {
355a38f1 882 return 6;
d2f9c6b5 883 } else if (n <= 1260) {
355a38f1 884 return 7;
d2f9c6b5 885 } else if (n <= 4420) {
355a38f1 886 return 9;
d2f9c6b5 887 } else if (n <= 7880) {
355a38f1 888 return 10;
d2f9c6b5 889 } else if (n <= 16050) {
355a38f1
JN
890 return 11;
891 } else {
36b22c93 892 return PIPPENGER_MAX_BUCKET_WINDOW;
355a38f1
JN
893 }
894#else
d2f9c6b5 895 if (n <= 1) {
355a38f1 896 return 1;
d2f9c6b5 897 } else if (n <= 11) {
355a38f1 898 return 2;
d2f9c6b5 899 } else if (n <= 45) {
355a38f1
JN
900 return 3;
901 } else if (n <= 100) {
902 return 4;
d2f9c6b5 903 } else if (n <= 275) {
355a38f1 904 return 5;
d2f9c6b5 905 } else if (n <= 625) {
355a38f1 906 return 6;
d2f9c6b5 907 } else if (n <= 1850) {
355a38f1
JN
908 return 7;
909 } else if (n <= 3400) {
910 return 8;
d2f9c6b5 911 } else if (n <= 9630) {
355a38f1 912 return 9;
d2f9c6b5 913 } else if (n <= 17900) {
355a38f1 914 return 10;
d2f9c6b5 915 } else if (n <= 32800) {
355a38f1
JN
916 return 11;
917 } else {
36b22c93 918 return PIPPENGER_MAX_BUCKET_WINDOW;
355a38f1
JN
919 }
920#endif
921}
922
36b22c93
JN
923/**
924 * Returns the maximum optimal number of points for a bucket_window.
925 */
926static size_t secp256k1_pippenger_bucket_window_inv(int bucket_window) {
927 switch(bucket_window) {
928#ifdef USE_ENDOMORPHISM
d2f9c6b5
JN
929 case 1: return 1;
930 case 2: return 4;
931 case 3: return 20;
932 case 4: return 57;
933 case 5: return 136;
934 case 6: return 235;
935 case 7: return 1260;
936 case 8: return 1260;
937 case 9: return 4420;
938 case 10: return 7880;
939 case 11: return 16050;
36b22c93
JN
940 case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX;
941#else
d2f9c6b5
JN
942 case 1: return 1;
943 case 2: return 11;
944 case 3: return 45;
36b22c93 945 case 4: return 100;
d2f9c6b5
JN
946 case 5: return 275;
947 case 6: return 625;
948 case 7: return 1850;
36b22c93 949 case 8: return 3400;
d2f9c6b5
JN
950 case 9: return 9630;
951 case 10: return 17900;
952 case 11: return 32800;
36b22c93
JN
953 case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX;
954#endif
955 }
956 return 0;
957}
958
959
355a38f1
JN
960#ifdef USE_ENDOMORPHISM
961SECP256K1_INLINE static void secp256k1_ecmult_endo_split(secp256k1_scalar *s1, secp256k1_scalar *s2, secp256k1_ge *p1, secp256k1_ge *p2) {
962 secp256k1_scalar tmp = *s1;
963 secp256k1_scalar_split_lambda(s1, s2, &tmp);
964 secp256k1_ge_mul_lambda(p2, p1);
965
966 if (secp256k1_scalar_is_high(s1)) {
967 secp256k1_scalar_negate(s1, s1);
968 secp256k1_ge_neg(p1, p1);
969 }
970 if (secp256k1_scalar_is_high(s2)) {
971 secp256k1_scalar_negate(s2, s2);
972 secp256k1_ge_neg(p2, p2);
973 }
974}
975#endif
976
977/**
978 * Returns the scratch size required for a given number of points (excluding
979 * base point G) without considering alignment.
980 */
981static size_t secp256k1_pippenger_scratch_size(size_t n_points, int bucket_window) {
982#ifdef USE_ENDOMORPHISM
983 size_t entries = 2*n_points + 2;
984#else
985 size_t entries = n_points + 1;
986#endif
987 size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int);
e6d01e93 988 return (sizeof(secp256k1_gej) << bucket_window) + sizeof(struct secp256k1_pippenger_state) + entries * entry_size;
355a38f1
JN
989}
990
c2b028a2 991static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
98836b11 992 const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch);
355a38f1
JN
993 /* Use 2(n+1) with the endomorphism, n+1 without, when calculating batch
994 * sizes. The reason for +1 is that we add the G scalar to the list of
995 * other scalars. */
996#ifdef USE_ENDOMORPHISM
997 size_t entries = 2*n_points + 2;
998#else
999 size_t entries = n_points + 1;
1000#endif
1001 secp256k1_ge *points;
1002 secp256k1_scalar *scalars;
1003 secp256k1_gej *buckets;
1004 struct secp256k1_pippenger_state *state_space;
1005 size_t idx = 0;
1006 size_t point_idx = 0;
1007 int i, j;
1008 int bucket_window;
1009
1010 (void)ctx;
1011 secp256k1_gej_set_infinity(r);
1012 if (inp_g_sc == NULL && n_points == 0) {
1013 return 1;
1014 }
1015
1016 bucket_window = secp256k1_pippenger_bucket_window(n_points);
c2b028a2
AP
1017 points = (secp256k1_ge *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*points));
1018 scalars = (secp256k1_scalar *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*scalars));
1019 state_space = (struct secp256k1_pippenger_state *) secp256k1_scratch_alloc(error_callback, scratch, sizeof(*state_space));
98836b11
AP
1020 if (points == NULL || scalars == NULL || state_space == NULL) {
1021 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
1022 return 0;
1023 }
1024
c2b028a2
AP
1025 state_space->ps = (struct secp256k1_pippenger_point_state *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*state_space->ps));
1026 state_space->wnaf_na = (int *) secp256k1_scratch_alloc(error_callback, scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int));
1027 buckets = (secp256k1_gej *) secp256k1_scratch_alloc(error_callback, scratch, (1<<bucket_window) * sizeof(*buckets));
98836b11
AP
1028 if (state_space->ps == NULL || state_space->wnaf_na == NULL || buckets == NULL) {
1029 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
1030 return 0;
1031 }
355a38f1
JN
1032
1033 if (inp_g_sc != NULL) {
1034 scalars[0] = *inp_g_sc;
1035 points[0] = secp256k1_ge_const_g;
1036 idx++;
1037#ifdef USE_ENDOMORPHISM
1038 secp256k1_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]);
1039 idx++;
1040#endif
1041 }
1042
1043 while (point_idx < n_points) {
1044 if (!cb(&scalars[idx], &points[idx], point_idx + cb_offset, cbdata)) {
98836b11 1045 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
355a38f1
JN
1046 return 0;
1047 }
1048 idx++;
1049#ifdef USE_ENDOMORPHISM
1050 secp256k1_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]);
1051 idx++;
1052#endif
1053 point_idx++;
1054 }
1055
1056 secp256k1_ecmult_pippenger_wnaf(buckets, bucket_window, state_space, r, scalars, points, idx);
1057
1058 /* Clear data */
1059 for(i = 0; (size_t)i < idx; i++) {
1060 secp256k1_scalar_clear(&scalars[i]);
1061 state_space->ps[i].skew_na = 0;
1062 for(j = 0; j < WNAF_SIZE(bucket_window+1); j++) {
1063 state_space->wnaf_na[i * WNAF_SIZE(bucket_window+1) + j] = 0;
1064 }
1065 }
1066 for(i = 0; i < 1<<bucket_window; i++) {
1067 secp256k1_gej_clear(&buckets[i]);
1068 }
98836b11 1069 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
355a38f1
JN
1070 return 1;
1071}
1072
1073/* Wrapper for secp256k1_ecmult_multi_func interface */
c2b028a2
AP
1074static int secp256k1_ecmult_pippenger_batch_single(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *actx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
1075 return secp256k1_ecmult_pippenger_batch(error_callback, actx, scratch, r, inp_g_sc, cb, cbdata, n, 0);
355a38f1
JN
1076}
1077
36b22c93
JN
1078/**
1079 * Returns the maximum number of points in addition to G that can be used with
1080 * a given scratch space. The function ensures that fewer points may also be
1081 * used.
1082 */
c2b028a2
AP
1083static size_t secp256k1_pippenger_max_points(const secp256k1_callback* error_callback, secp256k1_scratch *scratch) {
1084 size_t max_alloc = secp256k1_scratch_max_allocation(error_callback, scratch, PIPPENGER_SCRATCH_OBJECTS);
36b22c93
JN
1085 int bucket_window;
1086 size_t res = 0;
1087
1088 for (bucket_window = 1; bucket_window <= PIPPENGER_MAX_BUCKET_WINDOW; bucket_window++) {
1089 size_t n_points;
1090 size_t max_points = secp256k1_pippenger_bucket_window_inv(bucket_window);
1091 size_t space_for_points;
1092 size_t space_overhead;
1093 size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int);
1094
1095#ifdef USE_ENDOMORPHISM
1096 entry_size = 2*entry_size;
1097#endif
e6d01e93 1098 space_overhead = (sizeof(secp256k1_gej) << bucket_window) + entry_size + sizeof(struct secp256k1_pippenger_state);
36b22c93
JN
1099 if (space_overhead > max_alloc) {
1100 break;
1101 }
1102 space_for_points = max_alloc - space_overhead;
1103
1104 n_points = space_for_points/entry_size;
1105 n_points = n_points > max_points ? max_points : n_points;
1106 if (n_points > res) {
1107 res = n_points;
1108 }
1109 if (n_points < max_points) {
1110 /* A larger bucket_window may support even more points. But if we
1111 * would choose that then the caller couldn't safely use any number
1112 * smaller than what this function returns */
1113 break;
1114 }
1115 }
1116 return res;
1117}
1118
bade6174
JN
1119/* Computes ecmult_multi by simply multiplying and adding each point. Does not
1120 * require a scratch space */
1121static int secp256k1_ecmult_multi_simple_var(const secp256k1_ecmult_context *ctx, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points) {
1122 size_t point_idx;
1123 secp256k1_scalar szero;
1124 secp256k1_gej tmpj;
1125
1126 secp256k1_scalar_set_int(&szero, 0);
1127 secp256k1_gej_set_infinity(r);
1128 secp256k1_gej_set_infinity(&tmpj);
1129 /* r = inp_g_sc*G */
1130 secp256k1_ecmult(ctx, r, &tmpj, &szero, inp_g_sc);
1131 for (point_idx = 0; point_idx < n_points; point_idx++) {
1132 secp256k1_ge point;
1133 secp256k1_gej pointj;
1134 secp256k1_scalar scalar;
1135 if (!cb(&scalar, &point, point_idx, cbdata)) {
1136 return 0;
1137 }
1138 /* r += scalar*point */
1139 secp256k1_gej_set_ge(&pointj, &point);
1140 secp256k1_ecmult(ctx, &tmpj, &pointj, &scalar, NULL);
1141 secp256k1_gej_add_var(r, r, &tmpj, NULL);
1142 }
1143 return 1;
1144}
1145
2277af5f
JN
1146/* Compute the number of batches and the batch size given the maximum batch size and the
1147 * total number of points */
1148static int secp256k1_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n_batch_points, size_t max_n_batch_points, size_t n) {
1149 if (max_n_batch_points == 0) {
1150 return 0;
1151 }
1152 if (max_n_batch_points > ECMULT_MAX_POINTS_PER_BATCH) {
1153 max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH;
1154 }
1155 if (n == 0) {
1156 *n_batches = 0;
1157 *n_batch_points = 0;
1158 return 1;
1159 }
1160 /* Compute ceil(n/max_n_batch_points) and ceil(n/n_batches) */
1161 *n_batches = 1 + (n - 1) / max_n_batch_points;
1162 *n_batch_points = 1 + (n - 1) / *n_batches;
1163 return 1;
1164}
1165
c2b028a2
AP
1166typedef int (*secp256k1_ecmult_multi_func)(const secp256k1_callback* error_callback, const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t);
1167static int secp256k1_ecmult_multi_var(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
355a38f1
JN
1168 size_t i;
1169
c2b028a2 1170 int (*f)(const secp256k1_callback* error_callback, const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t, size_t);
355a38f1
JN
1171 size_t n_batches;
1172 size_t n_batch_points;
1173
1174 secp256k1_gej_set_infinity(r);
1175 if (inp_g_sc == NULL && n == 0) {
1176 return 1;
1177 } else if (n == 0) {
1178 secp256k1_scalar szero;
1179 secp256k1_scalar_set_int(&szero, 0);
1180 secp256k1_ecmult(ctx, r, r, &szero, inp_g_sc);
1181 return 1;
1182 }
bade6174
JN
1183 if (scratch == NULL) {
1184 return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n);
1185 }
355a38f1 1186
9ab96f7b
JN
1187 /* Compute the batch sizes for Pippenger's algorithm given a scratch space. If it's greater than
1188 * a threshold use Pippenger's algorithm. Otherwise use Strauss' algorithm.
1189 * As a first step check if there's enough space for Pippenger's algo (which requires less space
1190 * than Strauss' algo) and if not, use the simple algorithm. */
c2b028a2 1191 if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_pippenger_max_points(error_callback, scratch), n)) {
9ab96f7b 1192 return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n);
36b22c93 1193 }
36b22c93
JN
1194 if (n_batch_points >= ECMULT_PIPPENGER_THRESHOLD) {
1195 f = secp256k1_ecmult_pippenger_batch;
1196 } else {
c2b028a2 1197 if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_strauss_max_points(error_callback, scratch), n)) {
9ab96f7b 1198 return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n);
355a38f1 1199 }
36b22c93
JN
1200 f = secp256k1_ecmult_strauss_batch;
1201 }
1202 for(i = 0; i < n_batches; i++) {
1203 size_t nbp = n < n_batch_points ? n : n_batch_points;
1204 size_t offset = n_batch_points*i;
1205 secp256k1_gej tmp;
c2b028a2 1206 if (!f(error_callback, ctx, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) {
36b22c93 1207 return 0;
355a38f1 1208 }
36b22c93
JN
1209 secp256k1_gej_add_var(r, r, &tmp, NULL);
1210 n -= nbp;
355a38f1 1211 }
355a38f1 1212 return 1;
8c1c831b
PW
1213}
1214
abe2d3e8 1215#endif /* SECP256K1_ECMULT_IMPL_H */
This page took 0.250552 seconds and 4 git commands to generate.