for (i = 0; i < 20000; i++) {
secp256k1_scalar_t l, r;
- secp256k1_scalar_split_lambda_var(&l, &r, &data->scalar_x);
+ secp256k1_scalar_split_lambda(&l, &r, &data->scalar_x);
secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
}
}
#ifdef USE_ENDOMORPHISM
/* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */
- secp256k1_scalar_split_lambda_var(&na_1, &na_lam, na);
+ secp256k1_scalar_split_lambda(&na_1, &na_lam, na);
/* build wnaf representation for na_1 and na_lam. */
bits_na_1 = secp256k1_ecmult_wnaf(wnaf_na_1, 130, &na_1, WINDOW_A);
/** Add two scalars together (modulo the group order). Returns whether it overflowed. */
static int secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b);
-/** Add a power of two to a scalar. The result is not allowed to overflow. */
-static void secp256k1_scalar_add_bit(secp256k1_scalar_t *r, unsigned int bit);
+/** Conditionally add a power of two to a scalar. The result is not allowed to overflow. */
+static void secp256k1_scalar_cadd_bit(secp256k1_scalar_t *r, unsigned int bit, int flag);
/** Multiply two scalars (modulo the group order). */
static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b);
/** Find r1 and r2 such that r1+r2*2^128 = a. */
static void secp256k1_scalar_split_128(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a);
/** Find r1 and r2 such that r1+r2*lambda = a, and r1 and r2 are maximum 128 bits long (see secp256k1_gej_mul_lambda). */
-static void secp256k1_scalar_split_lambda_var(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a);
+static void secp256k1_scalar_split_lambda(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a);
#endif
/** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */
return overflow;
}
-static void secp256k1_scalar_add_bit(secp256k1_scalar_t *r, unsigned int bit) {
+static void secp256k1_scalar_cadd_bit(secp256k1_scalar_t *r, unsigned int bit, int flag) {
uint128_t t;
VERIFY_CHECK(bit < 256);
+ bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */
t = (uint128_t)r->d[0] + (((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F));
r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
t += (uint128_t)r->d[1] + (((uint64_t)((bit >> 6) == 1)) << (bit & 0x3F));
r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
- if ((l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1) {
- secp256k1_scalar_add_bit(r, 0);
- }
+ secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
}
#endif
return overflow;
}
-static void secp256k1_scalar_add_bit(secp256k1_scalar_t *r, unsigned int bit) {
+static void secp256k1_scalar_cadd_bit(secp256k1_scalar_t *r, unsigned int bit, int flag) {
uint64_t t;
VERIFY_CHECK(bit < 256);
+ bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 5) > 7 makes this a noop */
t = (uint64_t)r->d[0] + (((uint32_t)((bit >> 5) == 0)) << (bit & 0x1F));
r->d[0] = t & 0xFFFFFFFFULL; t >>= 32;
t += (uint64_t)r->d[1] + (((uint32_t)((bit >> 5) == 1)) << (bit & 0x1F));
r->d[5] = shift < 352 ? (l[5 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[6 + shiftlimbs] << shifthigh) : 0)) : 0;
r->d[6] = shift < 320 ? (l[6 + shiftlimbs] >> shiftlow | (shift < 288 && shiftlow ? (l[7 + shiftlimbs] << shifthigh) : 0)) : 0;
r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0;
- if ((l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1) {
- secp256k1_scalar_add_bit(r, 0);
- }
+ secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1);
}
#endif
* The function below splits a in r1 and r2, such that r1 + lambda * r2 == a (mod order).
*/
-static void secp256k1_scalar_split_lambda_var(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a) {
+static void secp256k1_scalar_split_lambda(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a) {
secp256k1_scalar_t c1, c2;
static const secp256k1_scalar_t minus_lambda = SECP256K1_SCALAR_CONST(
0xAC9C52B3UL, 0x3FA3CF1FUL, 0x5AD9E3FDUL, 0x77ED9BA4UL,
);
VERIFY_CHECK(r1 != a);
VERIFY_CHECK(r2 != a);
+ /* these _var calls are constant time since the shift amount is constant */
secp256k1_scalar_mul_shift_var(&c1, a, &g1, 272);
secp256k1_scalar_mul_shift_var(&c2, a, &g2, 272);
secp256k1_scalar_mul(&c1, &c1, &minus_b1);
r2 = s1;
if (!secp256k1_scalar_add(&r1, &r1, &b)) {
/* No overflow happened. */
- secp256k1_scalar_add_bit(&r2, bit);
+ secp256k1_scalar_cadd_bit(&r2, bit, 1);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ /* cadd is a noop when flag is zero */
+ secp256k1_scalar_cadd_bit(&r2, bit, 0);
CHECK(secp256k1_scalar_eq(&r1, &r2));
}
}
unsigned char tmp[32];
random_scalar_order_test(&full);
- secp256k1_scalar_split_lambda_var(&s1, &slam, &full);
+ secp256k1_scalar_split_lambda(&s1, &slam, &full);
/* check that both are <= 128 bits in size */
if (secp256k1_scalar_is_high(&s1))