* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
-#ifndef _SECP256K1_SCALAR_REPR_IMPL_H_
-#define _SECP256K1_SCALAR_REPR_IMPL_H_
+#ifndef SECP256K1_SCALAR_REPR_IMPL_H
+#define SECP256K1_SCALAR_REPR_IMPL_H
/* Limbs of the secp256k1 order. */
#define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL)
#define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
#define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL)
-SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar_t *r) {
+SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) {
r->d[0] = 0;
r->d[1] = 0;
r->d[2] = 0;
r->d[3] = 0;
}
-SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar_t *r, unsigned int v) {
+SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) {
r->d[0] = v;
r->d[1] = 0;
r->d[2] = 0;
r->d[3] = 0;
}
-SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar_t *a, unsigned int offset, unsigned int count) {
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6);
return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1);
}
-SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar_t *a, unsigned int offset, unsigned int count) {
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
VERIFY_CHECK(count < 32);
VERIFY_CHECK(offset + count <= 256);
if ((offset + count - 1) >> 6 == offset >> 6) {
}
}
-SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar_t *a) {
+SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) {
int yes = 0;
int no = 0;
no |= (a->d[3] < SECP256K1_N_3); /* No need for a > check. */
return yes;
}
-SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar_t *r, unsigned int overflow) {
+SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigned int overflow) {
uint128_t t;
VERIFY_CHECK(overflow <= 1);
t = (uint128_t)r->d[0] + overflow * SECP256K1_N_C_0;
return overflow;
}
-static int secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
+static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
int overflow;
uint128_t t = (uint128_t)a->d[0] + b->d[0];
r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
return overflow;
}
-static void secp256k1_scalar_cadd_bit(secp256k1_scalar_t *r, unsigned int bit, int flag) {
+static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
uint128_t t;
VERIFY_CHECK(bit < 256);
bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */
#endif
}
-static void secp256k1_scalar_set_b32(secp256k1_scalar_t *r, const unsigned char *b32, int *overflow) {
+static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
int over;
r->d[0] = (uint64_t)b32[31] | (uint64_t)b32[30] << 8 | (uint64_t)b32[29] << 16 | (uint64_t)b32[28] << 24 | (uint64_t)b32[27] << 32 | (uint64_t)b32[26] << 40 | (uint64_t)b32[25] << 48 | (uint64_t)b32[24] << 56;
r->d[1] = (uint64_t)b32[23] | (uint64_t)b32[22] << 8 | (uint64_t)b32[21] << 16 | (uint64_t)b32[20] << 24 | (uint64_t)b32[19] << 32 | (uint64_t)b32[18] << 40 | (uint64_t)b32[17] << 48 | (uint64_t)b32[16] << 56;
}
}
-static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar_t* a) {
+static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
bin[0] = a->d[3] >> 56; bin[1] = a->d[3] >> 48; bin[2] = a->d[3] >> 40; bin[3] = a->d[3] >> 32; bin[4] = a->d[3] >> 24; bin[5] = a->d[3] >> 16; bin[6] = a->d[3] >> 8; bin[7] = a->d[3];
bin[8] = a->d[2] >> 56; bin[9] = a->d[2] >> 48; bin[10] = a->d[2] >> 40; bin[11] = a->d[2] >> 32; bin[12] = a->d[2] >> 24; bin[13] = a->d[2] >> 16; bin[14] = a->d[2] >> 8; bin[15] = a->d[2];
bin[16] = a->d[1] >> 56; bin[17] = a->d[1] >> 48; bin[18] = a->d[1] >> 40; bin[19] = a->d[1] >> 32; bin[20] = a->d[1] >> 24; bin[21] = a->d[1] >> 16; bin[22] = a->d[1] >> 8; bin[23] = a->d[1];
bin[24] = a->d[0] >> 56; bin[25] = a->d[0] >> 48; bin[26] = a->d[0] >> 40; bin[27] = a->d[0] >> 32; bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0];
}
-SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar_t *a) {
+SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0;
}
-static void secp256k1_scalar_negate(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) {
+static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0);
uint128_t t = (uint128_t)(~a->d[0]) + SECP256K1_N_0 + 1;
r->d[0] = t & nonzero; t >>= 64;
r->d[3] = t & nonzero;
}
-SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar_t *a) {
+SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0;
}
-static int secp256k1_scalar_is_high(const secp256k1_scalar_t *a) {
+static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
int yes = 0;
int no = 0;
no |= (a->d[3] < SECP256K1_N_H_3);
return yes;
}
-static int secp256k1_scalar_cond_negate(secp256k1_scalar_t *r, int flag) {
+static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
/* If we are flag = 0, mask = 00...00 and this is a no-op;
* if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */
uint64_t mask = !flag - 1;
tl = t; \
} \
c0 += tl; /* overflow is handled on the next line */ \
- th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
+ th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \
c1 += th; /* overflow is handled on the next line */ \
- c2 += (c1 < th) ? 1 : 0; /* never overflows by contract (verified in the next line) */ \
+ c2 += (c1 < th); /* never overflows by contract (verified in the next line) */ \
VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
}
tl = t; \
} \
c0 += tl; /* overflow is handled on the next line */ \
- th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
+ th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \
c1 += th; /* never overflows by contract (verified in the next line) */ \
VERIFY_CHECK(c1 >= th); \
}
tl = t; \
} \
th2 = th + th; /* at most 0xFFFFFFFFFFFFFFFE (in case th was 0x7FFFFFFFFFFFFFFF) */ \
- c2 += (th2 < th) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ c2 += (th2 < th); /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((th2 >= th) || (c2 != 0)); \
tl2 = tl + tl; /* at most 0xFFFFFFFFFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFFFFFFFFFF) */ \
- th2 += (tl2 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
+ th2 += (tl2 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \
c0 += tl2; /* overflow is handled on the next line */ \
- th2 += (c0 < tl2) ? 1 : 0; /* second overflow is handled on the next line */ \
+ th2 += (c0 < tl2); /* second overflow is handled on the next line */ \
c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \
c1 += th2; /* overflow is handled on the next line */ \
- c2 += (c1 < th2) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ c2 += (c1 < th2); /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \
}
#define sumadd(a) { \
unsigned int over; \
c0 += (a); /* overflow is handled on the next line */ \
- over = (c0 < (a)) ? 1 : 0; \
+ over = (c0 < (a)); \
c1 += over; /* overflow is handled on the next line */ \
- c2 += (c1 < over) ? 1 : 0; /* never overflows by contract */ \
+ c2 += (c1 < over); /* never overflows by contract */ \
}
/** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */
#define sumadd_fast(a) { \
c0 += (a); /* overflow is handled on the next line */ \
- c1 += (c0 < (a)) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ c1 += (c0 < (a)); /* never overflows by contract (verified the next line) */ \
VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
VERIFY_CHECK(c2 == 0); \
}
VERIFY_CHECK(c2 == 0); \
}
-static void secp256k1_scalar_reduce_512(secp256k1_scalar_t *r, const uint64_t *l) {
+static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) {
#ifdef USE_ASM_X86_64
/* Reduce 512 bits into 385. */
uint64_t m0, m1, m2, m3, m4, m5, m6;
"movq 56(%%rsi), %%r14\n"
/* Initialize r8,r9,r10 */
"movq 0(%%rsi), %%r8\n"
- "movq $0, %%r9\n"
- "movq $0, %%r10\n"
+ "xorq %%r9, %%r9\n"
+ "xorq %%r10, %%r10\n"
/* (r8,r9) += n0 * c0 */
"movq %8, %%rax\n"
"mulq %%r11\n"
"adcq %%rdx, %%r9\n"
/* extract m0 */
"movq %%r8, %q0\n"
- "movq $0, %%r8\n"
+ "xorq %%r8, %%r8\n"
/* (r9,r10) += l1 */
"addq 8(%%rsi), %%r9\n"
"adcq $0, %%r10\n"
"adcq $0, %%r8\n"
/* extract m1 */
"movq %%r9, %q1\n"
- "movq $0, %%r9\n"
+ "xorq %%r9, %%r9\n"
/* (r10,r8,r9) += l2 */
"addq 16(%%rsi), %%r10\n"
"adcq $0, %%r8\n"
"adcq $0, %%r9\n"
/* extract m2 */
"movq %%r10, %q2\n"
- "movq $0, %%r10\n"
+ "xorq %%r10, %%r10\n"
/* (r8,r9,r10) += l3 */
"addq 24(%%rsi), %%r8\n"
"adcq $0, %%r9\n"
"adcq $0, %%r10\n"
/* extract m3 */
"movq %%r8, %q3\n"
- "movq $0, %%r8\n"
+ "xorq %%r8, %%r8\n"
/* (r9,r10,r8) += n3 * c1 */
"movq %9, %%rax\n"
"mulq %%r14\n"
/* extract m6 */
"movq %%r8, %q6\n"
: "=g"(m0), "=g"(m1), "=g"(m2), "=g"(m3), "=g"(m4), "=g"(m5), "=g"(m6)
- : "S"(l), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1)
+ : "S"(l), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
: "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc");
/* Reduce 385 bits into 258. */
"movq %q11, %%r13\n"
/* Initialize (r8,r9,r10) */
"movq %q5, %%r8\n"
- "movq $0, %%r9\n"
- "movq $0, %%r10\n"
+ "xorq %%r9, %%r9\n"
+ "xorq %%r10, %%r10\n"
/* (r8,r9) += m4 * c0 */
"movq %12, %%rax\n"
"mulq %%r11\n"
"adcq %%rdx, %%r9\n"
/* extract p0 */
"movq %%r8, %q0\n"
- "movq $0, %%r8\n"
+ "xorq %%r8, %%r8\n"
/* (r9,r10) += m1 */
"addq %q6, %%r9\n"
"adcq $0, %%r10\n"
"adcq $0, %%r8\n"
/* extract p1 */
"movq %%r9, %q1\n"
- "movq $0, %%r9\n"
+ "xorq %%r9, %%r9\n"
/* (r10,r8,r9) += m2 */
"addq %q7, %%r10\n"
"adcq $0, %%r8\n"
/* extract p4 */
"movq %%r9, %q4\n"
: "=&g"(p0), "=&g"(p1), "=&g"(p2), "=g"(p3), "=g"(p4)
- : "g"(m0), "g"(m1), "g"(m2), "g"(m3), "g"(m4), "g"(m5), "g"(m6), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1)
+ : "g"(m0), "g"(m1), "g"(m2), "g"(m3), "g"(m4), "g"(m5), "g"(m6), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
: "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "cc");
/* Reduce 258 bits into 256. */
"movq %%rax, 0(%q6)\n"
/* Move to (r8,r9) */
"movq %%rdx, %%r8\n"
- "movq $0, %%r9\n"
+ "xorq %%r9, %%r9\n"
/* (r8,r9) += p1 */
"addq %q2, %%r8\n"
"adcq $0, %%r9\n"
"adcq %%rdx, %%r9\n"
/* Extract r1 */
"movq %%r8, 8(%q6)\n"
- "movq $0, %%r8\n"
+ "xorq %%r8, %%r8\n"
/* (r9,r8) += p4 */
"addq %%r10, %%r9\n"
"adcq $0, %%r8\n"
"adcq $0, %%r8\n"
/* Extract r2 */
"movq %%r9, 16(%q6)\n"
- "movq $0, %%r9\n"
+ "xorq %%r9, %%r9\n"
/* (r8,r9) += p3 */
"addq %q4, %%r8\n"
"adcq $0, %%r9\n"
/* Extract c */
"movq %%r9, %q0\n"
: "=g"(c)
- : "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1)
+ : "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
: "rax", "rdx", "r8", "r9", "r10", "cc", "memory");
#else
uint128_t c;
secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r));
}
-static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
+static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, const secp256k1_scalar *b) {
#ifdef USE_ASM_X86_64
const uint64_t *pb = b->d;
__asm__ __volatile__(
extract(l[5]);
muladd_fast(a->d[3], b->d[3]);
extract_fast(l[6]);
- VERIFY_CHECK(c1 <= 0);
+ VERIFY_CHECK(c1 == 0);
l[7] = c0;
#endif
}
-static void secp256k1_scalar_sqr_512(uint64_t l[8], const secp256k1_scalar_t *a) {
+static void secp256k1_scalar_sqr_512(uint64_t l[8], const secp256k1_scalar *a) {
#ifdef USE_ASM_X86_64
__asm__ __volatile__(
/* Preload */
#undef extract
#undef extract_fast
-static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
+static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
uint64_t l[8];
secp256k1_scalar_mul_512(l, a, b);
secp256k1_scalar_reduce_512(r, l);
}
-static int secp256k1_scalar_shr_int(secp256k1_scalar_t *r, int n) {
+static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
int ret;
VERIFY_CHECK(n > 0);
VERIFY_CHECK(n < 16);
return ret;
}
-static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) {
+static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) {
uint64_t l[8];
secp256k1_scalar_sqr_512(l, a);
secp256k1_scalar_reduce_512(r, l);
}
-static void secp256k1_scalar_split_128(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a) {
+#ifdef USE_ENDOMORPHISM
+static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
r1->d[0] = a->d[0];
r1->d[1] = a->d[1];
r1->d[2] = 0;
r2->d[2] = 0;
r2->d[3] = 0;
}
+#endif
-SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
+SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
}
-SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b, unsigned int shift) {
+SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift) {
uint64_t l[8];
unsigned int shiftlimbs;
unsigned int shiftlow;
secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
}
-#endif
+static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
+ uint64_t mask0, mask1;
+ VG_CHECK_VERIFY(r->d, sizeof(r->d));
+ mask0 = flag + ~((uint64_t)0);
+ mask1 = ~mask0;
+ r->d[0] = (r->d[0] & mask0) | (a->d[0] & mask1);
+ r->d[1] = (r->d[1] & mask0) | (a->d[1] & mask1);
+ r->d[2] = (r->d[2] & mask0) | (a->d[2] & mask1);
+ r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1);
+}
+
+#endif /* SECP256K1_SCALAR_REPR_IMPL_H */