]>
Commit | Line | Data |
---|---|---|
b4ceedf1 | 1 | /*********************************************************************** |
83836a95 | 2 | * Copyright (c) 2016 Andrew Poelstra * |
20b8877b AP |
3 | * Distributed under the MIT software license, see the accompanying * |
4 | * file COPYING or http://www.opensource.org/licenses/mit-license.php.* | |
5 | **********************************************************************/ | |
6 | ||
7 | #if defined HAVE_CONFIG_H | |
8 | #include "libsecp256k1-config.h" | |
9 | #endif | |
10 | ||
11 | #include <stdio.h> | |
12 | #include <stdlib.h> | |
13 | ||
14 | #include <time.h> | |
15 | ||
83836a95 AP |
16 | #undef USE_ECMULT_STATIC_PRECOMPUTATION |
17 | ||
20b8877b | 18 | #ifndef EXHAUSTIVE_TEST_ORDER |
83836a95 AP |
19 | /* see group_impl.h for allowable values */ |
20 | #define EXHAUSTIVE_TEST_ORDER 13 | |
21 | #define EXHAUSTIVE_TEST_LAMBDA 9 /* cube root of 1 mod 13 */ | |
20b8877b AP |
22 | #endif |
23 | ||
24 | #include "include/secp256k1.h" | |
25 | #include "group.h" | |
26 | #include "secp256k1.c" | |
27 | #include "testrand_impl.h" | |
28 | ||
2cee5fd4 AP |
29 | #ifdef ENABLE_MODULE_RECOVERY |
30 | #include "src/modules/recovery/main_impl.h" | |
31 | #include "include/secp256k1_recovery.h" | |
32 | #endif | |
33 | ||
20b8877b AP |
34 | /** stolen from tests.c */ |
35 | void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) { | |
36 | CHECK(a->infinity == b->infinity); | |
37 | if (a->infinity) { | |
38 | return; | |
39 | } | |
40 | CHECK(secp256k1_fe_equal_var(&a->x, &b->x)); | |
41 | CHECK(secp256k1_fe_equal_var(&a->y, &b->y)); | |
42 | } | |
43 | ||
44 | void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) { | |
45 | secp256k1_fe z2s; | |
46 | secp256k1_fe u1, u2, s1, s2; | |
47 | CHECK(a->infinity == b->infinity); | |
48 | if (a->infinity) { | |
49 | return; | |
50 | } | |
51 | /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */ | |
52 | secp256k1_fe_sqr(&z2s, &b->z); | |
53 | secp256k1_fe_mul(&u1, &a->x, &z2s); | |
54 | u2 = b->x; secp256k1_fe_normalize_weak(&u2); | |
55 | secp256k1_fe_mul(&s1, &a->y, &z2s); secp256k1_fe_mul(&s1, &s1, &b->z); | |
56 | s2 = b->y; secp256k1_fe_normalize_weak(&s2); | |
57 | CHECK(secp256k1_fe_equal_var(&u1, &u2)); | |
58 | CHECK(secp256k1_fe_equal_var(&s1, &s2)); | |
59 | } | |
60 | ||
61 | void random_fe(secp256k1_fe *x) { | |
62 | unsigned char bin[32]; | |
63 | do { | |
64 | secp256k1_rand256(bin); | |
65 | if (secp256k1_fe_set_b32(x, bin)) { | |
66 | return; | |
67 | } | |
68 | } while(1); | |
69 | } | |
70 | /** END stolen from tests.c */ | |
71 | ||
83836a95 AP |
72 | int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32, |
73 | const unsigned char *key32, const unsigned char *algo16, | |
74 | void *data, unsigned int attempt) { | |
75 | secp256k1_scalar s; | |
76 | int *idata = data; | |
77 | (void)msg32; | |
78 | (void)key32; | |
79 | (void)algo16; | |
80 | /* Some nonces cannot be used because they'd cause s and/or r to be zero. | |
81 | * The signing function has retry logic here that just re-calls the nonce | |
82 | * function with an increased `attempt`. So if attempt > 0 this means we | |
83 | * need to change the nonce to avoid an infinite loop. */ | |
84 | if (attempt > 0) { | |
678b0e54 | 85 | *idata = (*idata + 1) % EXHAUSTIVE_TEST_ORDER; |
83836a95 AP |
86 | } |
87 | secp256k1_scalar_set_int(&s, *idata); | |
88 | secp256k1_scalar_get_b32(nonce32, &s); | |
89 | return 1; | |
90 | } | |
91 | ||
92 | #ifdef USE_ENDOMORPHISM | |
93 | void test_exhaustive_endomorphism(const secp256k1_ge *group, int order) { | |
94 | int i; | |
95 | for (i = 0; i < order; i++) { | |
96 | secp256k1_ge res; | |
97 | secp256k1_ge_mul_lambda(&res, &group[i]); | |
98 | ge_equals_ge(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res); | |
99 | } | |
100 | } | |
101 | #endif | |
102 | ||
20b8877b AP |
103 | void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj, int order) { |
104 | int i, j; | |
105 | ||
106 | /* Sanity-check (and check infinity functions) */ | |
107 | CHECK(secp256k1_ge_is_infinity(&group[0])); | |
108 | CHECK(secp256k1_gej_is_infinity(&groupj[0])); | |
109 | for (i = 1; i < order; i++) { | |
110 | CHECK(!secp256k1_ge_is_infinity(&group[i])); | |
111 | CHECK(!secp256k1_gej_is_infinity(&groupj[i])); | |
112 | } | |
113 | ||
114 | /* Check all addition formulae */ | |
115 | for (j = 0; j < order; j++) { | |
116 | secp256k1_fe fe_inv; | |
117 | secp256k1_fe_inv(&fe_inv, &groupj[j].z); | |
118 | for (i = 0; i < order; i++) { | |
119 | secp256k1_ge zless_gej; | |
120 | secp256k1_gej tmp; | |
121 | /* add_var */ | |
122 | secp256k1_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL); | |
123 | ge_equals_gej(&group[(i + j) % order], &tmp); | |
124 | /* add_ge */ | |
125 | if (j > 0) { | |
126 | secp256k1_gej_add_ge(&tmp, &groupj[i], &group[j]); | |
127 | ge_equals_gej(&group[(i + j) % order], &tmp); | |
128 | } | |
129 | /* add_ge_var */ | |
130 | secp256k1_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL); | |
131 | ge_equals_gej(&group[(i + j) % order], &tmp); | |
132 | /* add_zinv_var */ | |
133 | zless_gej.infinity = groupj[j].infinity; | |
134 | zless_gej.x = groupj[j].x; | |
135 | zless_gej.y = groupj[j].y; | |
136 | secp256k1_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv); | |
137 | ge_equals_gej(&group[(i + j) % order], &tmp); | |
138 | } | |
139 | } | |
140 | ||
141 | /* Check doubling */ | |
142 | for (i = 0; i < order; i++) { | |
143 | secp256k1_gej tmp; | |
144 | if (i > 0) { | |
2241ae6d | 145 | secp256k1_gej_double_nonzero(&tmp, &groupj[i]); |
20b8877b AP |
146 | ge_equals_gej(&group[(2 * i) % order], &tmp); |
147 | } | |
148 | secp256k1_gej_double_var(&tmp, &groupj[i], NULL); | |
149 | ge_equals_gej(&group[(2 * i) % order], &tmp); | |
150 | } | |
151 | ||
152 | /* Check negation */ | |
153 | for (i = 1; i < order; i++) { | |
154 | secp256k1_ge tmp; | |
155 | secp256k1_gej tmpj; | |
156 | secp256k1_ge_neg(&tmp, &group[i]); | |
157 | ge_equals_ge(&group[order - i], &tmp); | |
158 | secp256k1_gej_neg(&tmpj, &groupj[i]); | |
159 | ge_equals_gej(&group[order - i], &tmpj); | |
160 | } | |
161 | } | |
162 | ||
83836a95 AP |
163 | void test_exhaustive_ecmult(const secp256k1_context *ctx, const secp256k1_ge *group, const secp256k1_gej *groupj, int order) { |
164 | int i, j, r_log; | |
165 | for (r_log = 1; r_log < order; r_log++) { | |
166 | for (j = 0; j < order; j++) { | |
167 | for (i = 0; i < order; i++) { | |
168 | secp256k1_gej tmp; | |
169 | secp256k1_scalar na, ng; | |
170 | secp256k1_scalar_set_int(&na, i); | |
171 | secp256k1_scalar_set_int(&ng, j); | |
20b8877b | 172 | |
83836a95 AP |
173 | secp256k1_ecmult(&ctx->ecmult_ctx, &tmp, &groupj[r_log], &na, &ng); |
174 | ge_equals_gej(&group[(i * r_log + j) % order], &tmp); | |
20b8877b | 175 | |
83836a95 | 176 | if (i > 0) { |
7c1b91ba | 177 | secp256k1_ecmult_const(&tmp, &group[i], &ng, 256); |
83836a95 AP |
178 | ge_equals_gej(&group[(i * j) % order], &tmp); |
179 | } | |
180 | } | |
20b8877b AP |
181 | } |
182 | } | |
183 | } | |
184 | ||
dba5471b AP |
185 | typedef struct { |
186 | secp256k1_scalar sc[2]; | |
187 | secp256k1_ge pt[2]; | |
188 | } ecmult_multi_data; | |
189 | ||
190 | static int ecmult_multi_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *cbdata) { | |
191 | ecmult_multi_data *data = (ecmult_multi_data*) cbdata; | |
192 | *sc = data->sc[idx]; | |
193 | *pt = data->pt[idx]; | |
194 | return 1; | |
195 | } | |
196 | ||
197 | void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { | |
198 | int i, j, k, x, y; | |
6fe50439 | 199 | secp256k1_scratch *scratch = secp256k1_scratch_create(&ctx->error_callback, 4096); |
dba5471b AP |
200 | for (i = 0; i < order; i++) { |
201 | for (j = 0; j < order; j++) { | |
202 | for (k = 0; k < order; k++) { | |
203 | for (x = 0; x < order; x++) { | |
204 | for (y = 0; y < order; y++) { | |
205 | secp256k1_gej tmp; | |
206 | secp256k1_scalar g_sc; | |
207 | ecmult_multi_data data; | |
208 | ||
209 | secp256k1_scalar_set_int(&data.sc[0], i); | |
210 | secp256k1_scalar_set_int(&data.sc[1], j); | |
211 | secp256k1_scalar_set_int(&g_sc, k); | |
212 | data.pt[0] = group[x]; | |
213 | data.pt[1] = group[y]; | |
214 | ||
c2b028a2 | 215 | secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2); |
dba5471b AP |
216 | ge_equals_gej(&group[(i * x + j * y + k) % order], &tmp); |
217 | } | |
218 | } | |
219 | } | |
220 | } | |
221 | } | |
c2b028a2 | 222 | secp256k1_scratch_destroy(&ctx->error_callback, scratch); |
dba5471b AP |
223 | } |
224 | ||
83836a95 AP |
225 | void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k) { |
226 | secp256k1_fe x; | |
227 | unsigned char x_bin[32]; | |
228 | k %= EXHAUSTIVE_TEST_ORDER; | |
229 | x = group[k].x; | |
230 | secp256k1_fe_normalize(&x); | |
231 | secp256k1_fe_get_b32(x_bin, &x); | |
232 | secp256k1_scalar_set_b32(r, x_bin, NULL); | |
233 | } | |
234 | ||
b4ceedf1 AP |
235 | void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { |
236 | int s, r, msg, key; | |
237 | for (s = 1; s < order; s++) { | |
238 | for (r = 1; r < order; r++) { | |
239 | for (msg = 1; msg < order; msg++) { | |
240 | for (key = 1; key < order; key++) { | |
241 | secp256k1_ge nonconst_ge; | |
242 | secp256k1_ecdsa_signature sig; | |
243 | secp256k1_pubkey pk; | |
244 | secp256k1_scalar sk_s, msg_s, r_s, s_s; | |
245 | secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s; | |
246 | int k, should_verify; | |
247 | unsigned char msg32[32]; | |
248 | ||
249 | secp256k1_scalar_set_int(&s_s, s); | |
250 | secp256k1_scalar_set_int(&r_s, r); | |
251 | secp256k1_scalar_set_int(&msg_s, msg); | |
252 | secp256k1_scalar_set_int(&sk_s, key); | |
253 | ||
254 | /* Verify by hand */ | |
255 | /* Run through every k value that gives us this r and check that *one* works. | |
256 | * Note there could be none, there could be multiple, ECDSA is weird. */ | |
257 | should_verify = 0; | |
258 | for (k = 0; k < order; k++) { | |
259 | secp256k1_scalar check_x_s; | |
260 | r_from_k(&check_x_s, group, k); | |
261 | if (r_s == check_x_s) { | |
262 | secp256k1_scalar_set_int(&s_times_k_s, k); | |
263 | secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); | |
264 | secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); | |
265 | secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); | |
266 | should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); | |
267 | } | |
268 | } | |
269 | /* nb we have a "high s" rule */ | |
270 | should_verify &= !secp256k1_scalar_is_high(&s_s); | |
271 | ||
272 | /* Verify by calling verify */ | |
273 | secp256k1_ecdsa_signature_save(&sig, &r_s, &s_s); | |
274 | memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge)); | |
275 | secp256k1_pubkey_save(&pk, &nonconst_ge); | |
276 | secp256k1_scalar_get_b32(msg32, &msg_s); | |
277 | CHECK(should_verify == | |
278 | secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk)); | |
279 | } | |
280 | } | |
83836a95 AP |
281 | } |
282 | } | |
83836a95 AP |
283 | } |
284 | ||
285 | void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { | |
286 | int i, j, k; | |
287 | ||
288 | /* Loop */ | |
289 | for (i = 1; i < order; i++) { /* message */ | |
290 | for (j = 1; j < order; j++) { /* key */ | |
291 | for (k = 1; k < order; k++) { /* nonce */ | |
678b0e54 | 292 | const int starting_k = k; |
83836a95 AP |
293 | secp256k1_ecdsa_signature sig; |
294 | secp256k1_scalar sk, msg, r, s, expected_r; | |
295 | unsigned char sk32[32], msg32[32]; | |
296 | secp256k1_scalar_set_int(&msg, i); | |
297 | secp256k1_scalar_set_int(&sk, j); | |
298 | secp256k1_scalar_get_b32(sk32, &sk); | |
299 | secp256k1_scalar_get_b32(msg32, &msg); | |
300 | ||
301 | secp256k1_ecdsa_sign(ctx, &sig, msg32, sk32, secp256k1_nonce_function_smallint, &k); | |
302 | ||
303 | secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig); | |
304 | /* Note that we compute expected_r *after* signing -- this is important | |
305 | * because our nonce-computing function function might change k during | |
306 | * signing. */ | |
307 | r_from_k(&expected_r, group, k); | |
308 | CHECK(r == expected_r); | |
309 | CHECK((k * s) % order == (i + r * j) % order || | |
310 | (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order); | |
678b0e54 AP |
311 | |
312 | /* Overflow means we've tried every possible nonce */ | |
313 | if (k < starting_k) { | |
314 | break; | |
315 | } | |
83836a95 AP |
316 | } |
317 | } | |
318 | } | |
319 | ||
320 | /* We would like to verify zero-knowledge here by counting how often every | |
321 | * possible (s, r) tuple appears, but because the group order is larger | |
322 | * than the field order, when coercing the x-values to scalar values, some | |
323 | * appear more often than others, so we are actually not zero-knowledge. | |
324 | * (This effect also appears in the real code, but the difference is on the | |
325 | * order of 1/2^128th the field order, so the deviation is not useful to a | |
326 | * computationally bounded attacker.) | |
327 | */ | |
328 | } | |
329 | ||
2cee5fd4 AP |
330 | #ifdef ENABLE_MODULE_RECOVERY |
331 | void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { | |
332 | int i, j, k; | |
333 | ||
334 | /* Loop */ | |
335 | for (i = 1; i < order; i++) { /* message */ | |
336 | for (j = 1; j < order; j++) { /* key */ | |
337 | for (k = 1; k < order; k++) { /* nonce */ | |
338 | const int starting_k = k; | |
339 | secp256k1_fe r_dot_y_normalized; | |
340 | secp256k1_ecdsa_recoverable_signature rsig; | |
341 | secp256k1_ecdsa_signature sig; | |
342 | secp256k1_scalar sk, msg, r, s, expected_r; | |
343 | unsigned char sk32[32], msg32[32]; | |
344 | int expected_recid; | |
345 | int recid; | |
346 | secp256k1_scalar_set_int(&msg, i); | |
347 | secp256k1_scalar_set_int(&sk, j); | |
348 | secp256k1_scalar_get_b32(sk32, &sk); | |
349 | secp256k1_scalar_get_b32(msg32, &msg); | |
350 | ||
351 | secp256k1_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, secp256k1_nonce_function_smallint, &k); | |
352 | ||
353 | /* Check directly */ | |
354 | secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig); | |
355 | r_from_k(&expected_r, group, k); | |
356 | CHECK(r == expected_r); | |
357 | CHECK((k * s) % order == (i + r * j) % order || | |
358 | (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order); | |
359 | /* In computing the recid, there is an overflow condition that is disabled in | |
360 | * scalar_low_impl.h `secp256k1_scalar_set_b32` because almost every r.y value | |
361 | * will exceed the group order, and our signing code always holds out for r | |
362 | * values that don't overflow, so with a proper overflow check the tests would | |
363 | * loop indefinitely. */ | |
364 | r_dot_y_normalized = group[k].y; | |
365 | secp256k1_fe_normalize(&r_dot_y_normalized); | |
366 | /* Also the recovery id is flipped depending if we hit the low-s branch */ | |
367 | if ((k * s) % order == (i + r * j) % order) { | |
368 | expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 1 : 0; | |
369 | } else { | |
370 | expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 0 : 1; | |
371 | } | |
372 | CHECK(recid == expected_recid); | |
373 | ||
374 | /* Convert to a standard sig then check */ | |
375 | secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); | |
376 | secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig); | |
377 | /* Note that we compute expected_r *after* signing -- this is important | |
378 | * because our nonce-computing function function might change k during | |
379 | * signing. */ | |
380 | r_from_k(&expected_r, group, k); | |
381 | CHECK(r == expected_r); | |
382 | CHECK((k * s) % order == (i + r * j) % order || | |
383 | (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order); | |
384 | ||
385 | /* Overflow means we've tried every possible nonce */ | |
386 | if (k < starting_k) { | |
387 | break; | |
388 | } | |
389 | } | |
390 | } | |
391 | } | |
392 | } | |
393 | ||
394 | void test_exhaustive_recovery_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { | |
395 | /* This is essentially a copy of test_exhaustive_verify, with recovery added */ | |
396 | int s, r, msg, key; | |
397 | for (s = 1; s < order; s++) { | |
398 | for (r = 1; r < order; r++) { | |
399 | for (msg = 1; msg < order; msg++) { | |
400 | for (key = 1; key < order; key++) { | |
401 | secp256k1_ge nonconst_ge; | |
402 | secp256k1_ecdsa_recoverable_signature rsig; | |
403 | secp256k1_ecdsa_signature sig; | |
404 | secp256k1_pubkey pk; | |
405 | secp256k1_scalar sk_s, msg_s, r_s, s_s; | |
406 | secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s; | |
407 | int recid = 0; | |
408 | int k, should_verify; | |
409 | unsigned char msg32[32]; | |
410 | ||
411 | secp256k1_scalar_set_int(&s_s, s); | |
412 | secp256k1_scalar_set_int(&r_s, r); | |
413 | secp256k1_scalar_set_int(&msg_s, msg); | |
414 | secp256k1_scalar_set_int(&sk_s, key); | |
415 | secp256k1_scalar_get_b32(msg32, &msg_s); | |
416 | ||
417 | /* Verify by hand */ | |
418 | /* Run through every k value that gives us this r and check that *one* works. | |
419 | * Note there could be none, there could be multiple, ECDSA is weird. */ | |
420 | should_verify = 0; | |
421 | for (k = 0; k < order; k++) { | |
422 | secp256k1_scalar check_x_s; | |
423 | r_from_k(&check_x_s, group, k); | |
424 | if (r_s == check_x_s) { | |
425 | secp256k1_scalar_set_int(&s_times_k_s, k); | |
426 | secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); | |
427 | secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); | |
428 | secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); | |
429 | should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); | |
430 | } | |
431 | } | |
432 | /* nb we have a "high s" rule */ | |
433 | should_verify &= !secp256k1_scalar_is_high(&s_s); | |
434 | ||
435 | /* We would like to try recovering the pubkey and checking that it matches, | |
436 | * but pubkey recovery is impossible in the exhaustive tests (the reason | |
437 | * being that there are 12 nonzero r values, 12 nonzero points, and no | |
438 | * overlap between the sets, so there are no valid signatures). */ | |
439 | ||
440 | /* Verify by converting to a standard signature and calling verify */ | |
441 | secp256k1_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid); | |
442 | secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); | |
443 | memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge)); | |
444 | secp256k1_pubkey_save(&pk, &nonconst_ge); | |
445 | CHECK(should_verify == | |
446 | secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk)); | |
447 | } | |
448 | } | |
449 | } | |
450 | } | |
451 | } | |
452 | #endif | |
453 | ||
20b8877b AP |
454 | int main(void) { |
455 | int i; | |
456 | secp256k1_gej groupj[EXHAUSTIVE_TEST_ORDER]; | |
457 | secp256k1_ge group[EXHAUSTIVE_TEST_ORDER]; | |
458 | ||
459 | /* Build context */ | |
460 | secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); | |
461 | ||
462 | /* TODO set z = 1, then do num_tests runs with random z values */ | |
463 | ||
464 | /* Generate the entire group */ | |
20b8877b | 465 | secp256k1_gej_set_infinity(&groupj[0]); |
83836a95 | 466 | secp256k1_ge_set_gej(&group[0], &groupj[0]); |
20b8877b | 467 | for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { |
83836a95 | 468 | /* Set a different random z-value for each Jacobian point */ |
20b8877b AP |
469 | secp256k1_fe z; |
470 | random_fe(&z); | |
471 | ||
472 | secp256k1_gej_add_ge(&groupj[i], &groupj[i - 1], &secp256k1_ge_const_g); | |
473 | secp256k1_ge_set_gej(&group[i], &groupj[i]); | |
474 | secp256k1_gej_rescale(&groupj[i], &z); | |
83836a95 AP |
475 | |
476 | /* Verify against ecmult_gen */ | |
477 | { | |
478 | secp256k1_scalar scalar_i; | |
479 | secp256k1_gej generatedj; | |
480 | secp256k1_ge generated; | |
481 | ||
482 | secp256k1_scalar_set_int(&scalar_i, i); | |
483 | secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i); | |
484 | secp256k1_ge_set_gej(&generated, &generatedj); | |
485 | ||
486 | CHECK(group[i].infinity == 0); | |
487 | CHECK(generated.infinity == 0); | |
488 | CHECK(secp256k1_fe_equal_var(&generated.x, &group[i].x)); | |
489 | CHECK(secp256k1_fe_equal_var(&generated.y, &group[i].y)); | |
490 | } | |
20b8877b AP |
491 | } |
492 | ||
493 | /* Run the tests */ | |
83836a95 AP |
494 | #ifdef USE_ENDOMORPHISM |
495 | test_exhaustive_endomorphism(group, EXHAUSTIVE_TEST_ORDER); | |
496 | #endif | |
20b8877b AP |
497 | test_exhaustive_addition(group, groupj, EXHAUSTIVE_TEST_ORDER); |
498 | test_exhaustive_ecmult(ctx, group, groupj, EXHAUSTIVE_TEST_ORDER); | |
dba5471b | 499 | test_exhaustive_ecmult_multi(ctx, group, EXHAUSTIVE_TEST_ORDER); |
b4ceedf1 AP |
500 | test_exhaustive_sign(ctx, group, EXHAUSTIVE_TEST_ORDER); |
501 | test_exhaustive_verify(ctx, group, EXHAUSTIVE_TEST_ORDER); | |
20b8877b | 502 | |
2cee5fd4 AP |
503 | #ifdef ENABLE_MODULE_RECOVERY |
504 | test_exhaustive_recovery_sign(ctx, group, EXHAUSTIVE_TEST_ORDER); | |
505 | test_exhaustive_recovery_verify(ctx, group, EXHAUSTIVE_TEST_ORDER); | |
506 | #endif | |
507 | ||
508 | secp256k1_context_destroy(ctx); | |
20b8877b AP |
509 | return 0; |
510 | } | |
511 |