1 /**********************************************************************
2 * Copyright (c) 2015 Andrew Poelstra *
3 * Distributed under the MIT software license, see the accompanying *
4 * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
5 **********************************************************************/
7 #ifndef SECP256K1_SCALAR_REPR_IMPL_H
8 #define SECP256K1_SCALAR_REPR_IMPL_H
14 SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
18 SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { *r = 0; }
19 SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) { *r = v; }
21 SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
23 return ((*a >> offset) & ((((uint32_t)1) << count) - 1));
28 SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
29 return secp256k1_scalar_get_bits(a, offset, count);
32 SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; }
34 static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
35 *r = (*a + *b) % EXHAUSTIVE_TEST_ORDER;
39 static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
41 *r += ((uint32_t)1 << bit);
43 VERIFY_CHECK(bit < 32);
44 /* Verify that adding (1 << bit) will not overflow any in-range scalar *r by overflowing the underlying uint32_t. */
45 VERIFY_CHECK(((uint32_t)1 << bit) - 1 <= UINT32_MAX - EXHAUSTIVE_TEST_ORDER);
46 VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
50 static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
54 for (i = 0; i < 32; i++) {
55 *r = (*r * 0x100) + b32[i];
56 if (*r >= EXHAUSTIVE_TEST_ORDER) {
58 *r %= EXHAUSTIVE_TEST_ORDER;
61 if (overflow) *overflow = over;
64 static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
66 bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a;
69 SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
73 static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
77 *r = EXHAUSTIVE_TEST_ORDER - *a;
81 SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
85 static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
86 return *a > EXHAUSTIVE_TEST_ORDER / 2;
89 static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
90 if (flag) secp256k1_scalar_negate(r, r);
94 static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
95 *r = (*a * *b) % EXHAUSTIVE_TEST_ORDER;
98 static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
101 VERIFY_CHECK(n < 16);
102 ret = *r & ((1 << n) - 1);
107 static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) {
108 *r = (*a * *a) % EXHAUSTIVE_TEST_ORDER;
111 static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
116 SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
120 static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
121 uint32_t mask0, mask1;
122 VG_CHECK_VERIFY(r, sizeof(*r));
123 mask0 = flag + ~((uint32_t)0);
125 *r = (*r & mask0) | (*a & mask1);
128 #endif /* SECP256K1_SCALAR_REPR_IMPL_H */