]>
Commit | Line | Data |
---|---|---|
83836a95 AP |
1 | /********************************************************************** |
2 | * Copyright (c) 2015 Andrew Poelstra * | |
3 | * Distributed under the MIT software license, see the accompanying * | |
4 | * file COPYING or http://www.opensource.org/licenses/mit-license.php.* | |
5 | **********************************************************************/ | |
6 | ||
abe2d3e8 DR |
7 | #ifndef SECP256K1_SCALAR_REPR_IMPL_H |
8 | #define SECP256K1_SCALAR_REPR_IMPL_H | |
83836a95 AP |
9 | |
10 | #include "scalar.h" | |
11 | ||
12 | #include <string.h> | |
13 | ||
14 | SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { | |
15 | return !(*a & 1); | |
16 | } | |
17 | ||
18 | SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { *r = 0; } | |
19 | SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) { *r = v; } | |
20 | ||
21 | SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { | |
22 | if (offset < 32) | |
23 | return ((*a >> offset) & ((((uint32_t)1) << count) - 1)); | |
24 | else | |
25 | return 0; | |
26 | } | |
27 | ||
28 | SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { | |
29 | return secp256k1_scalar_get_bits(a, offset, count); | |
30 | } | |
31 | ||
32 | SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; } | |
33 | ||
34 | static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { | |
35 | *r = (*a + *b) % EXHAUSTIVE_TEST_ORDER; | |
36 | return *r < *b; | |
37 | } | |
38 | ||
39 | static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) { | |
40 | if (flag && bit < 32) | |
8fe63e56 | 41 | *r += ((uint32_t)1 << bit); |
83836a95 | 42 | #ifdef VERIFY |
0d82732a RC |
43 | VERIFY_CHECK(bit < 32); |
44 | /* Verify that adding (1 << bit) will not overflow any in-range scalar *r by overflowing the underlying uint32_t. */ | |
45 | VERIFY_CHECK(((uint32_t)1 << bit) - 1 <= UINT32_MAX - EXHAUSTIVE_TEST_ORDER); | |
83836a95 AP |
46 | VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); |
47 | #endif | |
48 | } | |
49 | ||
50 | static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) { | |
51 | const int base = 0x100 % EXHAUSTIVE_TEST_ORDER; | |
52 | int i; | |
53 | *r = 0; | |
54 | for (i = 0; i < 32; i++) { | |
55 | *r = ((*r * base) + b32[i]) % EXHAUSTIVE_TEST_ORDER; | |
56 | } | |
57 | /* just deny overflow, it basically always happens */ | |
58 | if (overflow) *overflow = 0; | |
59 | } | |
60 | ||
61 | static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) { | |
62 | memset(bin, 0, 32); | |
63 | bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a; | |
64 | } | |
65 | ||
66 | SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) { | |
67 | return *a == 0; | |
68 | } | |
69 | ||
70 | static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) { | |
71 | if (*a == 0) { | |
72 | *r = 0; | |
73 | } else { | |
74 | *r = EXHAUSTIVE_TEST_ORDER - *a; | |
75 | } | |
76 | } | |
77 | ||
78 | SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { | |
79 | return *a == 1; | |
80 | } | |
81 | ||
82 | static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { | |
83 | return *a > EXHAUSTIVE_TEST_ORDER / 2; | |
84 | } | |
85 | ||
86 | static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { | |
87 | if (flag) secp256k1_scalar_negate(r, r); | |
88 | return flag ? -1 : 1; | |
89 | } | |
90 | ||
91 | static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { | |
92 | *r = (*a * *b) % EXHAUSTIVE_TEST_ORDER; | |
93 | } | |
94 | ||
95 | static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) { | |
96 | int ret; | |
97 | VERIFY_CHECK(n > 0); | |
98 | VERIFY_CHECK(n < 16); | |
99 | ret = *r & ((1 << n) - 1); | |
100 | *r >>= n; | |
101 | return ret; | |
102 | } | |
103 | ||
104 | static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) { | |
105 | *r = (*a * *a) % EXHAUSTIVE_TEST_ORDER; | |
106 | } | |
107 | ||
108 | static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { | |
109 | *r1 = *a; | |
110 | *r2 = 0; | |
111 | } | |
112 | ||
113 | SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { | |
114 | return *a == *b; | |
115 | } | |
116 | ||
34a67c77 GM |
117 | static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) { |
118 | uint32_t mask0, mask1; | |
f79a7adc | 119 | VG_CHECK_VERIFY(r, sizeof(*r)); |
34a67c77 GM |
120 | mask0 = flag + ~((uint32_t)0); |
121 | mask1 = ~mask0; | |
122 | *r = (*r & mask0) | (*a & mask1); | |
123 | } | |
124 | ||
abe2d3e8 | 125 | #endif /* SECP256K1_SCALAR_REPR_IMPL_H */ |