#ifndef SECP256K1_ECMULT_GEN_IMPL_H
#define SECP256K1_ECMULT_GEN_IMPL_H
+#include "util.h"
#include "scalar.h"
#include "group.h"
#include "ecmult_gen.h"
#ifdef USE_ECMULT_STATIC_PRECOMPUTATION
#include "ecmult_static_context.h"
#endif
+
+#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
+ static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = ROUND_TO_ALIGN(sizeof(*((secp256k1_ecmult_gen_context*) NULL)->prec));
+#else
+ static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = 0;
+#endif
+
static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context *ctx) {
ctx->prec = NULL;
}
-static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx, const secp256k1_callback* cb) {
+static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx, void **prealloc) {
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
- secp256k1_ge prec[1024];
+ secp256k1_ge prec[ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G];
secp256k1_gej gj;
secp256k1_gej nums_gej;
int i, j;
+ size_t const prealloc_size = SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE;
+ void* const base = *prealloc;
#endif
if (ctx->prec != NULL) {
return;
}
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
- ctx->prec = (secp256k1_ge_storage (*)[64][16])checked_malloc(cb, sizeof(*ctx->prec));
+ ctx->prec = (secp256k1_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])manual_alloc(prealloc, prealloc_size, base, prealloc_size);
/* get the generator */
secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g);
/* compute prec. */
{
- secp256k1_gej precj[1024]; /* Jacobian versions of prec. */
+ secp256k1_gej precj[ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G]; /* Jacobian versions of prec. */
secp256k1_gej gbase;
secp256k1_gej numsbase;
- gbase = gj; /* 16^j * G */
+ gbase = gj; /* PREC_G^j * G */
numsbase = nums_gej; /* 2^j * nums. */
- for (j = 0; j < 64; j++) {
- /* Set precj[j*16 .. j*16+15] to (numsbase, numsbase + gbase, ..., numsbase + 15*gbase). */
- precj[j*16] = numsbase;
- for (i = 1; i < 16; i++) {
- secp256k1_gej_add_var(&precj[j*16 + i], &precj[j*16 + i - 1], &gbase, NULL);
+ for (j = 0; j < ECMULT_GEN_PREC_N; j++) {
+ /* Set precj[j*PREC_G .. j*PREC_G+(PREC_G-1)] to (numsbase, numsbase + gbase, ..., numsbase + (PREC_G-1)*gbase). */
+ precj[j*ECMULT_GEN_PREC_G] = numsbase;
+ for (i = 1; i < ECMULT_GEN_PREC_G; i++) {
+ secp256k1_gej_add_var(&precj[j*ECMULT_GEN_PREC_G + i], &precj[j*ECMULT_GEN_PREC_G + i - 1], &gbase, NULL);
}
- /* Multiply gbase by 16. */
- for (i = 0; i < 4; i++) {
+ /* Multiply gbase by PREC_G. */
+ for (i = 0; i < ECMULT_GEN_PREC_B; i++) {
secp256k1_gej_double_var(&gbase, &gbase, NULL);
}
/* Multiply numbase by 2. */
secp256k1_gej_double_var(&numsbase, &numsbase, NULL);
- if (j == 62) {
+ if (j == ECMULT_GEN_PREC_N - 2) {
/* In the last iteration, numsbase is (1 - 2^j) * nums instead. */
secp256k1_gej_neg(&numsbase, &numsbase);
secp256k1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL);
}
}
- secp256k1_ge_set_all_gej_var(prec, precj, 1024);
+ secp256k1_ge_set_all_gej_var(prec, precj, ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G);
}
- for (j = 0; j < 64; j++) {
- for (i = 0; i < 16; i++) {
- secp256k1_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*16 + i]);
+ for (j = 0; j < ECMULT_GEN_PREC_N; j++) {
+ for (i = 0; i < ECMULT_GEN_PREC_G; i++) {
+ secp256k1_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*ECMULT_GEN_PREC_G + i]);
}
}
#else
- (void)cb;
- ctx->prec = (secp256k1_ge_storage (*)[64][16])secp256k1_ecmult_static_context;
+ (void)prealloc;
+ ctx->prec = (secp256k1_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])secp256k1_ecmult_static_context;
#endif
secp256k1_ecmult_gen_blind(ctx, NULL);
}
return ctx->prec != NULL;
}
-static void secp256k1_ecmult_gen_context_clone(secp256k1_ecmult_gen_context *dst,
- const secp256k1_ecmult_gen_context *src, const secp256k1_callback* cb) {
- if (src->prec == NULL) {
- dst->prec = NULL;
- } else {
+static void secp256k1_ecmult_gen_context_finalize_memcpy(secp256k1_ecmult_gen_context *dst, const secp256k1_ecmult_gen_context *src) {
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
- dst->prec = (secp256k1_ge_storage (*)[64][16])checked_malloc(cb, sizeof(*dst->prec));
- memcpy(dst->prec, src->prec, sizeof(*dst->prec));
+ if (src->prec != NULL) {
+ /* We cast to void* first to suppress a -Wcast-align warning. */
+ dst->prec = (secp256k1_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])(void*)((unsigned char*)dst + ((unsigned char*)src->prec - (unsigned char*)src));
+ }
#else
- (void)cb;
- dst->prec = src->prec;
+ (void)dst, (void)src;
#endif
- dst->initial = src->initial;
- dst->blind = src->blind;
- }
}
static void secp256k1_ecmult_gen_context_clear(secp256k1_ecmult_gen_context *ctx) {
-#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
- free(ctx->prec);
-#endif
secp256k1_scalar_clear(&ctx->blind);
secp256k1_gej_clear(&ctx->initial);
ctx->prec = NULL;
/* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */
secp256k1_scalar_add(&gnb, gn, &ctx->blind);
add.infinity = 0;
- for (j = 0; j < 64; j++) {
- bits = secp256k1_scalar_get_bits(&gnb, j * 4, 4);
- for (i = 0; i < 16; i++) {
+ for (j = 0; j < ECMULT_GEN_PREC_N; j++) {
+ bits = secp256k1_scalar_get_bits(&gnb, j * ECMULT_GEN_PREC_B, ECMULT_GEN_PREC_B);
+ for (i = 0; i < ECMULT_GEN_PREC_G; i++) {
/** This uses a conditional move to avoid any secret data in array indexes.
* _Any_ use of secret indexes has been demonstrated to result in timing
* sidechannels, even when the cache-line access patterns are uniform.
secp256k1_fe s;
unsigned char nonce32[32];
secp256k1_rfc6979_hmac_sha256 rng;
- int retry;
+ int overflow;
unsigned char keydata[64] = {0};
if (seed32 == NULL) {
/* When seed is NULL, reset the initial point and blinding value. */
}
secp256k1_rfc6979_hmac_sha256_initialize(&rng, keydata, seed32 ? 64 : 32);
memset(keydata, 0, sizeof(keydata));
- /* Retry for out of range results to achieve uniformity. */
- do {
- secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
- retry = !secp256k1_fe_set_b32(&s, nonce32);
- retry |= secp256k1_fe_is_zero(&s);
- } while (retry); /* This branch true is cryptographically unreachable. Requires sha256_hmac output > Fp. */
+ /* Accept unobservably small non-uniformity. */
+ secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
+ overflow = !secp256k1_fe_set_b32(&s, nonce32);
+ overflow |= secp256k1_fe_is_zero(&s);
+ secp256k1_fe_cmov(&s, &secp256k1_fe_one, overflow);
/* Randomize the projection to defend against multiplier sidechannels. */
secp256k1_gej_rescale(&ctx->initial, &s);
secp256k1_fe_clear(&s);
- do {
- secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
- secp256k1_scalar_set_b32(&b, nonce32, &retry);
- /* A blinding value of 0 works, but would undermine the projection hardening. */
- retry |= secp256k1_scalar_is_zero(&b);
- } while (retry); /* This branch true is cryptographically unreachable. Requires sha256_hmac output > order. */
+ secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
+ secp256k1_scalar_set_b32(&b, nonce32, NULL);
+ /* A blinding value of 0 works, but would undermine the projection hardening. */
+ secp256k1_scalar_cmov(&b, &secp256k1_scalar_one, secp256k1_scalar_is_zero(&b));
secp256k1_rfc6979_hmac_sha256_finalize(&rng);
memset(nonce32, 0, 32);
secp256k1_ecmult_gen(ctx, &gb, &b);