even if r was negative. */
static void secp256k1_num_mod(secp256k1_num_t *r, const secp256k1_num_t *m);
-/** Right-shift the passed number by bits bits, and return those bits. */
-static int secp256k1_num_shift(secp256k1_num_t *r, int bits);
+/** Right-shift the passed number by bits bits. */
+static void secp256k1_num_shift(secp256k1_num_t *r, int bits);
/** Check whether a number is zero. */
static int secp256k1_num_is_zero(const secp256k1_num_t *a);
r->neg = a->neg ^ b->neg;
}
-static int secp256k1_num_shift(secp256k1_num_t *r, int bits) {
- VERIFY_CHECK(bits <= GMP_NUMB_BITS);
- mp_limb_t ret = mpn_rshift(r->data, r->data, r->limbs, bits);
- if (r->limbs>1 && r->data[r->limbs-1]==0) r->limbs--;
- ret >>= (GMP_NUMB_BITS - bits);
- return ret;
+static void secp256k1_num_shift(secp256k1_num_t *r, int bits) {
+ if (bits % GMP_NUMB_BITS) {
+ // Shift within limbs.
+ mpn_rshift(r->data, r->data, r->limbs, bits % GMP_NUMB_BITS);
+ }
+ if (bits >= GMP_NUMB_BITS) {
+ // Shift full limbs.
+ for (int i = 0; i < r->limbs; i++) {
+ int index = i + (bits / GMP_NUMB_BITS);
+ if (index < r->limbs && index < 2*NUM_LIMBS) {
+ r->data[i] = r->data[index];
+ } else {
+ r->data[i] = 0;
+ }
+ }
+ }
+ while (r->limbs>1 && r->data[r->limbs-1]==0) r->limbs--;
}
static void secp256k1_num_negate(secp256k1_num_t *r) {
static void secp256k1_scalar_split_lambda_var(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a);
#endif
+/** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */
+static void secp256k1_scalar_mul_shift_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b, unsigned int shift);
+
#endif
secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r));
}
-static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
+static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
/* 160 bit accumulator. */
uint64_t c0 = 0, c1 = 0;
uint32_t c2 = 0;
- uint64_t l[8];
-
/* l[0..7] = a[0..3] * b[0..3]. */
muladd_fast(a->d[0], b->d[0]);
extract_fast(l[0]);
extract_fast(l[6]);
VERIFY_CHECK(c1 <= 0);
l[7] = c0;
-
- secp256k1_scalar_reduce_512(r, l);
}
-static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) {
+static void secp256k1_scalar_sqr_512(uint64_t l[8], const secp256k1_scalar_t *a) {
/* 160 bit accumulator. */
uint64_t c0 = 0, c1 = 0;
uint32_t c2 = 0;
- uint64_t l[8];
-
/* l[0..7] = a[0..3] * b[0..3]. */
muladd_fast(a->d[0], a->d[0]);
extract_fast(l[0]);
extract_fast(l[6]);
VERIFY_CHECK(c1 == 0);
l[7] = c0;
-
- secp256k1_scalar_reduce_512(r, l);
}
#undef sumadd
#undef extract
#undef extract_fast
+static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
+ uint64_t l[8];
+ secp256k1_scalar_mul_512(l, a, b);
+ secp256k1_scalar_reduce_512(r, l);
+}
+
+static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) {
+ uint64_t l[8];
+ secp256k1_scalar_sqr_512(l, a);
+ secp256k1_scalar_reduce_512(r, l);
+}
+
static void secp256k1_scalar_split_128(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a) {
r1->d[0] = a->d[0];
r1->d[1] = a->d[1];
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
}
+SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b, unsigned int shift) {
+ VERIFY_CHECK(shift >= 256);
+ uint64_t l[8];
+ secp256k1_scalar_mul_512(l, a, b);
+ unsigned int shiftlimbs = shift >> 6;
+ unsigned int shiftlow = shift & 0x3F;
+ unsigned int shifthigh = 64 - shiftlow;
+ r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
+ if ((l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1) {
+ secp256k1_scalar_add_bit(r, 0);
+ }
+}
+
#endif
secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r));
}
-static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
+static void secp256k1_scalar_mul_512(uint32_t l[16], const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
/* 96 bit accumulator. */
uint32_t c0 = 0, c1 = 0, c2 = 0;
- uint32_t l[16];
-
/* l[0..15] = a[0..7] * b[0..7]. */
muladd_fast(a->d[0], b->d[0]);
extract_fast(l[0]);
extract_fast(l[14]);
VERIFY_CHECK(c1 == 0);
l[15] = c0;
-
- secp256k1_scalar_reduce_512(r, l);
}
-static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) {
+static void secp256k1_scalar_sqr_512(uint32_t l[16], const secp256k1_scalar_t *a) {
/* 96 bit accumulator. */
uint32_t c0 = 0, c1 = 0, c2 = 0;
- uint32_t l[16];
-
/* l[0..15] = a[0..7]^2. */
muladd_fast(a->d[0], a->d[0]);
extract_fast(l[0]);
extract_fast(l[14]);
VERIFY_CHECK(c1 == 0);
l[15] = c0;
-
- secp256k1_scalar_reduce_512(r, l);
}
#undef sumadd
#undef extract
#undef extract_fast
+static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
+ uint32_t l[16];
+ secp256k1_scalar_mul_512(l, a, b);
+ secp256k1_scalar_reduce_512(r, l);
+}
+
+static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) {
+ uint32_t l[16];
+ secp256k1_scalar_sqr_512(l, a);
+ secp256k1_scalar_reduce_512(r, l);
+}
+
static void secp256k1_scalar_split_128(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a) {
r1->d[0] = a->d[0];
r1->d[1] = a->d[1];
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0;
}
+SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b, unsigned int shift) {
+ VERIFY_CHECK(shift >= 256);
+ uint32_t l[16];
+ secp256k1_scalar_mul_512(l, a, b);
+ unsigned int shiftlimbs = shift >> 5;
+ unsigned int shiftlow = shift & 0x1F;
+ unsigned int shifthigh = 32 - shiftlow;
+ r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 480 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[1] = shift < 480 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[2] = shift < 448 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 416 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[3] = shift < 416 ? (l[3 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[4 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[4] = shift < 384 ? (l[4 + shiftlimbs] >> shiftlow | (shift < 352 && shiftlow ? (l[5 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[5] = shift < 352 ? (l[5 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[6 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[6] = shift < 320 ? (l[6 + shiftlimbs] >> shiftlow | (shift < 288 && shiftlow ? (l[7 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0;
+ if ((l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1) {
+ secp256k1_scalar_add_bit(r, 0);
+ }
+}
+
#endif
secp256k1_scalar_get_num(num, &sc);
}
-void test_num_get_set_bin(void) {
- secp256k1_num_t n1,n2;
- random_num_order_test(&n1);
- unsigned char c[32];
- secp256k1_num_get_bin(c, 32, &n1);
- secp256k1_num_set_bin(&n2, c, 32);
- CHECK(secp256k1_num_eq(&n1, &n2));
- for (int i=0; i<32; i++) {
- /* check whether the lower 8 bits correspond to the last byte */
- int low1 = secp256k1_num_shift(&n1, 8);
- int low2 = c[31];
- CHECK(low1 == low2);
- /* shift bits off the byte representation, and compare */
- memmove(c+1, c, 31);
- c[0] = 0;
- secp256k1_num_set_bin(&n2, c, 32);
- CHECK(secp256k1_num_eq(&n1, &n2));
- }
-}
-
void test_num_negate(void) {
secp256k1_num_t n1;
secp256k1_num_t n2;
void run_num_smalltests(void) {
for (int i=0; i<100*count; i++) {
- test_num_get_set_bin();
test_num_negate();
test_num_add_sub();
}
/* Negating zero should still result in zero. */
CHECK(secp256k1_scalar_is_zero(&neg));
}
+
+ {
+ /* Test secp256k1_scalar_mul_shift_var. */
+ secp256k1_scalar_t r;
+ unsigned int shift = 256 + (secp256k1_rand32() % 257);
+ secp256k1_scalar_mul_shift_var(&r, &s1, &s2, shift);
+ secp256k1_num_t rnum;
+ secp256k1_num_mul(&rnum, &s1num, &s2num);
+ secp256k1_num_shift(&rnum, shift - 1);
+ secp256k1_num_t one;
+ unsigned char cone[1] = {0x01};
+ secp256k1_num_set_bin(&one, cone, 1);
+ secp256k1_num_add(&rnum, &rnum, &one);
+ secp256k1_num_shift(&rnum, 1);
+ secp256k1_num_t rnum2;
+ secp256k1_scalar_get_num(&rnum2, &r);
+ CHECK(secp256k1_num_eq(&rnum, &rnum2));
+ }
#endif
{
secp256k1_scalar_mul(&r2, &s1, &s1);
CHECK(secp256k1_scalar_eq(&r1, &r2));
}
+
}
void run_scalar_tests(void) {