language: c
-sudo: false
+os: linux
addons:
apt:
packages: libgmp-dev
- if [ -n "$HOST" ]; then export USE_HOST="--host=$HOST"; fi
- if [ "x$HOST" = "xi686-linux-gnu" ]; then export CC="$CC -m32"; fi
- ./configure --enable-experimental=$EXPERIMENTAL --enable-endomorphism=$ENDOMORPHISM --with-field=$FIELD --with-bignum=$BIGNUM --with-scalar=$SCALAR --enable-ecmult-static-precomputation=$STATICPRECOMPUTATION --enable-module-ecdh=$ECDH --enable-module-recovery=$RECOVERY --enable-jni=$JNI $EXTRAFLAGS $USE_HOST && make -j2 $BUILD
-os: linux
JNI_LIB =
endif
include_HEADERS = include/secp256k1.h
+include_HEADERS += include/secp256k1_preallocated.h
noinst_HEADERS =
noinst_HEADERS += src/scalar.h
noinst_HEADERS += src/scalar_4x64.h
if !ENABLE_COVERAGE
exhaustive_tests_CPPFLAGS += -DVERIFY
endif
-exhaustive_tests_LDADD = $(SECP_LIBS)
+exhaustive_tests_LDADD = $(SECP_LIBS) $(COMMON_LIB)
exhaustive_tests_LDFLAGS = -static
TESTS += exhaustive_tests
endif
])
AC_ARG_ENABLE(benchmark,
- AS_HELP_STRING([--enable-benchmark],[compile benchmark (default is yes)]),
+ AS_HELP_STRING([--enable-benchmark],[compile benchmark [default=yes]]),
[use_benchmark=$enableval],
[use_benchmark=yes])
AC_ARG_ENABLE(coverage,
- AS_HELP_STRING([--enable-coverage],[enable compiler flags to support kcov coverage analysis]),
+ AS_HELP_STRING([--enable-coverage],[enable compiler flags to support kcov coverage analysis [default=no]]),
[enable_coverage=$enableval],
[enable_coverage=no])
AC_ARG_ENABLE(tests,
- AS_HELP_STRING([--enable-tests],[compile tests (default is yes)]),
+ AS_HELP_STRING([--enable-tests],[compile tests [default=yes]]),
[use_tests=$enableval],
[use_tests=yes])
AC_ARG_ENABLE(openssl_tests,
- AS_HELP_STRING([--enable-openssl-tests],[enable OpenSSL tests, if OpenSSL is available (default is auto)]),
+ AS_HELP_STRING([--enable-openssl-tests],[enable OpenSSL tests [default=auto]]),
[enable_openssl_tests=$enableval],
[enable_openssl_tests=auto])
AC_ARG_ENABLE(experimental,
- AS_HELP_STRING([--enable-experimental],[allow experimental configure options (default is no)]),
+ AS_HELP_STRING([--enable-experimental],[allow experimental configure options [default=no]]),
[use_experimental=$enableval],
[use_experimental=no])
AC_ARG_ENABLE(exhaustive_tests,
- AS_HELP_STRING([--enable-exhaustive-tests],[compile exhaustive tests (default is yes)]),
+ AS_HELP_STRING([--enable-exhaustive-tests],[compile exhaustive tests [default=yes]]),
[use_exhaustive_tests=$enableval],
[use_exhaustive_tests=yes])
AC_ARG_ENABLE(endomorphism,
- AS_HELP_STRING([--enable-endomorphism],[enable endomorphism (default is no)]),
+ AS_HELP_STRING([--enable-endomorphism],[enable endomorphism [default=no]]),
[use_endomorphism=$enableval],
[use_endomorphism=no])
AC_ARG_ENABLE(ecmult_static_precomputation,
- AS_HELP_STRING([--enable-ecmult-static-precomputation],[enable precomputed ecmult table for signing (default is yes)]),
+ AS_HELP_STRING([--enable-ecmult-static-precomputation],[enable precomputed ecmult table for signing [default=auto]]),
[use_ecmult_static_precomputation=$enableval],
[use_ecmult_static_precomputation=auto])
[enable_module_ecdh=no])
AC_ARG_ENABLE(module_recovery,
- AS_HELP_STRING([--enable-module-recovery],[enable ECDSA pubkey recovery module (default is no)]),
+ AS_HELP_STRING([--enable-module-recovery],[enable ECDSA pubkey recovery module [default=no]]),
[enable_module_recovery=$enableval],
[enable_module_recovery=no])
AC_ARG_ENABLE(jni,
- AS_HELP_STRING([--enable-jni],[enable libsecp256k1_jni (default is no)]),
+ AS_HELP_STRING([--enable-jni],[enable libsecp256k1_jni [default=no]]),
[use_jni=$enableval],
[use_jni=no])
AC_ARG_WITH([field], [AS_HELP_STRING([--with-field=64bit|32bit|auto],
-[Specify Field Implementation. Default is auto])],[req_field=$withval], [req_field=auto])
+[finite field implementation to use [default=auto]])],[req_field=$withval], [req_field=auto])
AC_ARG_WITH([bignum], [AS_HELP_STRING([--with-bignum=gmp|no|auto],
-[Specify Bignum Implementation. Default is auto])],[req_bignum=$withval], [req_bignum=auto])
+[bignum implementation to use [default=auto]])],[req_bignum=$withval], [req_bignum=auto])
AC_ARG_WITH([scalar], [AS_HELP_STRING([--with-scalar=64bit|32bit|auto],
-[Specify scalar implementation. Default is auto])],[req_scalar=$withval], [req_scalar=auto])
+[scalar implementation to use [default=auto]])],[req_scalar=$withval], [req_scalar=auto])
-AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm|no|auto]
-[Specify assembly optimizations to use. Default is auto (experimental: arm)])],[req_asm=$withval], [req_asm=auto])
+AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm|no|auto],
+[assembly optimizations to useĀ (experimental: arm) [default=auto]])],[req_asm=$withval], [req_asm=auto])
+
+AC_ARG_WITH([ecmult-window], [AS_HELP_STRING([--with-ecmult-window=SIZE|auto],
+[window size for ecmult precomputation for verification, specified as integer in range [2..24].]
+[Larger values result in possibly better performance at the cost of an exponentially larger precomputed table.]
+[The table will store 2^(SIZE-2) * 64 bytes of data but can be larger in memory due to platform-specific padding and alignment.]
+[If the endomorphism optimization is enabled, two tables of this size are used instead of only one.]
+["auto" is a reasonable setting for desktop machines (currently 15). [default=auto]]
+)],
+[req_ecmult_window=$withval], [req_ecmult_window=auto])
AC_CHECK_TYPES([__int128])
if test x"$enable_coverage" = x"yes"; then
AC_DEFINE(COVERAGE, 1, [Define this symbol to compile out all VERIFY code])
CFLAGS="$CFLAGS -O0 --coverage"
- LDFLAGS="--coverage"
+ LDFLAGS="$LDFLAGS --coverage"
else
CFLAGS="$CFLAGS -O3"
fi
;;
esac
+#set ecmult window size
+if test x"$req_ecmult_window" = x"auto"; then
+ set_ecmult_window=15
+else
+ set_ecmult_window=$req_ecmult_window
+fi
+
+error_window_size=['window size for ecmult precomputation not an integer in range [2..24] or "auto"']
+case $set_ecmult_window in
+''|*[[!0-9]]*)
+ # no valid integer
+ AC_MSG_ERROR($error_window_size)
+ ;;
+*)
+ if test "$set_ecmult_window" -lt 2 -o "$set_ecmult_window" -gt 24 ; then
+ # not in range
+ AC_MSG_ERROR($error_window_size)
+ fi
+ AC_DEFINE_UNQUOTED(ECMULT_WINDOW_SIZE, $set_ecmult_window, [Set window size for ecmult precomputation])
+ ;;
+esac
+
if test x"$use_tests" = x"yes"; then
SECP_OPENSSL_CHECK
if test x"$has_openssl_ec" = x"yes"; then
echo " bignum = $set_bignum"
echo " field = $set_field"
echo " scalar = $set_scalar"
+echo " ecmult window size = $set_ecmult_window"
echo
echo " CC = $CC"
echo " CFLAGS = $CFLAGS"
* verification).
*
* A constructed context can safely be used from multiple threads
- * simultaneously, but API call that take a non-const pointer to a context
+ * simultaneously, but API calls that take a non-const pointer to a context
* need exclusive access to it. In particular this is the case for
- * secp256k1_context_destroy and secp256k1_context_randomize.
+ * secp256k1_context_destroy, secp256k1_context_preallocated_destroy,
+ * and secp256k1_context_randomize.
*
* Regarding randomization, either do it once at creation time (in which case
* you do not need any locking for the other calls), or use a read-write lock.
#define SECP256K1_FLAGS_BIT_CONTEXT_SIGN (1 << 9)
#define SECP256K1_FLAGS_BIT_COMPRESSION (1 << 8)
-/** Flags to pass to secp256k1_context_create. */
+/** Flags to pass to secp256k1_context_create, secp256k1_context_preallocated_size, and
+ * secp256k1_context_preallocated_create. */
#define SECP256K1_CONTEXT_VERIFY (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_VERIFY)
#define SECP256K1_CONTEXT_SIGN (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_SIGN)
#define SECP256K1_CONTEXT_NONE (SECP256K1_FLAGS_TYPE_CONTEXT)
*/
SECP256K1_API extern const secp256k1_context *secp256k1_context_no_precomp;
-/** Create a secp256k1 context object.
+/** Create a secp256k1 context object (in dynamically allocated memory).
+ *
+ * This function uses malloc to allocate memory. It is guaranteed that malloc is
+ * called at most once for every call of this function. If you need to avoid dynamic
+ * memory allocation entirely, see the functions in secp256k1_preallocated.h.
*
* Returns: a newly created context object.
* In: flags: which parts of the context to initialize.
unsigned int flags
) SECP256K1_WARN_UNUSED_RESULT;
-/** Copies a secp256k1 context object.
+/** Copy a secp256k1 context object (into dynamically allocated memory).
+ *
+ * This function uses malloc to allocate memory. It is guaranteed that malloc is
+ * called at most once for every call of this function. If you need to avoid dynamic
+ * memory allocation entirely, see the functions in secp256k1_preallocated.h.
*
* Returns: a newly created context object.
* Args: ctx: an existing context to copy (cannot be NULL)
const secp256k1_context* ctx
) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT;
-/** Destroy a secp256k1 context object.
+/** Destroy a secp256k1 context object (created in dynamically allocated memory).
*
* The context pointer may not be used afterwards.
- * Args: ctx: an existing context to destroy (cannot be NULL)
+ *
+ * The context to destroy must have been created using secp256k1_context_create
+ * or secp256k1_context_clone. If the context has instead been created using
+ * secp256k1_context_preallocated_create or secp256k1_context_preallocated_clone, the
+ * behaviour is undefined. In that case, secp256k1_context_preallocated_destroy must
+ * be used instead.
+ *
+ * Args: ctx: an existing context to destroy, constructed using
+ * secp256k1_context_create or secp256k1_context_clone
*/
SECP256K1_API void secp256k1_context_destroy(
secp256k1_context* ctx
* contexts not initialized for signing; then it will have no effect and return 1.
*
* You should call this after secp256k1_context_create or
- * secp256k1_context_clone, and may call this repeatedly afterwards.
+ * secp256k1_context_clone (and secp256k1_context_preallocated_create or
+ * secp256k1_context_clone, resp.), and you may call this repeatedly afterwards.
*/
SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_context_randomize(
secp256k1_context* ctx,
--- /dev/null
+#ifndef SECP256K1_PREALLOCATED_H
+#define SECP256K1_PREALLOCATED_H
+
+#include "secp256k1.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* The module provided by this header file is intended for settings in which it
+ * is not possible or desirable to rely on dynamic memory allocation. It provides
+ * functions for creating, cloning, and destroying secp256k1 context objects in a
+ * contiguous fixed-size block of memory provided by the caller.
+ *
+ * Context objects created by functions in this module can be used like contexts
+ * objects created by functions in secp256k1.h, i.e., they can be passed to any
+ * API function that excepts a context object (see secp256k1.h for details). The
+ * only exception is that context objects created by functions in this module
+ * must be destroyed using secp256k1_context_preallocated_destroy (in this
+ * module) instead of secp256k1_context_destroy (in secp256k1.h).
+ *
+ * It is guaranteed that functions in by this module will not call malloc or its
+ * friends realloc, calloc, and free.
+ */
+
+/** Determine the memory size of a secp256k1 context object to be created in
+ * caller-provided memory.
+ *
+ * The purpose of this function is to determine how much memory must be provided
+ * to secp256k1_context_preallocated_create.
+ *
+ * Returns: the required size of the caller-provided memory block
+ * In: flags: which parts of the context to initialize.
+ */
+SECP256K1_API size_t secp256k1_context_preallocated_size(
+ unsigned int flags
+) SECP256K1_WARN_UNUSED_RESULT;
+
+/** Create a secp256k1 context object in caller-provided memory.
+ *
+ * The caller must provide a pointer to a rewritable contiguous block of memory
+ * of size at least secp256k1_context_preallocated_size(flags) bytes, suitably
+ * aligned to hold an object of any type.
+ *
+ * The block of memory is exclusively owned by the created context object during
+ * the lifetime of this context object, which begins with the call to this
+ * function and ends when a call to secp256k1_context_preallocated_destroy
+ * (which destroys the context object again) returns. During the lifetime of the
+ * context object, the caller is obligated not to access this block of memory,
+ * i.e., the caller may not read or write the memory, e.g., by copying the memory
+ * contents to a different location or trying to create a second context object
+ * in the memory. In simpler words, the prealloc pointer (or any pointer derived
+ * from it) should not be used during the lifetime of the context object.
+ *
+ * Returns: a newly created context object.
+ * In: prealloc: a pointer to a rewritable contiguous block of memory of
+ * size at least secp256k1_context_preallocated_size(flags)
+ * bytes, as detailed above (cannot be NULL)
+ * flags: which parts of the context to initialize.
+ *
+ * See also secp256k1_context_randomize (in secp256k1.h)
+ * and secp256k1_context_preallocated_destroy.
+ */
+SECP256K1_API secp256k1_context* secp256k1_context_preallocated_create(
+ void* prealloc,
+ unsigned int flags
+) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT;
+
+/** Determine the memory size of a secp256k1 context object to be copied into
+ * caller-provided memory.
+ *
+ * Returns: the required size of the caller-provided memory block.
+ * In: ctx: an existing context to copy (cannot be NULL)
+ */
+SECP256K1_API size_t secp256k1_context_preallocated_clone_size(
+ const secp256k1_context* ctx
+) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT;
+
+/** Copy a secp256k1 context object into caller-provided memory.
+ *
+ * The caller must provide a pointer to a rewritable contiguous block of memory
+ * of size at least secp256k1_context_preallocated_size(flags) bytes, suitably
+ * aligned to hold an object of any type.
+ *
+ * The block of memory is exclusively owned by the created context object during
+ * the lifetime of this context object, see the description of
+ * secp256k1_context_preallocated_create for details.
+ *
+ * Returns: a newly created context object.
+ * Args: ctx: an existing context to copy (cannot be NULL)
+ * In: prealloc: a pointer to a rewritable contiguous block of memory of
+ * size at least secp256k1_context_preallocated_size(flags)
+ * bytes, as detailed above (cannot be NULL)
+ */
+SECP256K1_API secp256k1_context* secp256k1_context_preallocated_clone(
+ const secp256k1_context* ctx,
+ void* prealloc
+) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_WARN_UNUSED_RESULT;
+
+/** Destroy a secp256k1 context object that has been created in
+ * caller-provided memory.
+ *
+ * The context pointer may not be used afterwards.
+ *
+ * The context to destroy must have been created using
+ * secp256k1_context_preallocated_create or secp256k1_context_preallocated_clone.
+ * If the context has instead been created using secp256k1_context_create or
+ * secp256k1_context_clone, the behaviour is undefined. In that case,
+ * secp256k1_context_destroy must be used instead.
+ *
+ * If required, it is the responsibility of the caller to deallocate the block
+ * of memory properly after this function returns, e.g., by calling free on the
+ * preallocated pointer given to secp256k1_context_preallocated_create or
+ * secp256k1_context_preallocated_clone.
+ *
+ * Args: ctx: an existing context to destroy, constructed using
+ * secp256k1_context_preallocated_create or
+ * secp256k1_context_preallocated_clone (cannot be NULL)
+ */
+SECP256K1_API void secp256k1_context_preallocated_destroy(
+ secp256k1_context* ctx
+);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* SECP256K1_PREALLOCATED_H */
*/
.syntax unified
- .arch armv7-a
@ eabi attributes - see readelf -A
- .eabi_attribute 8, 1 @ Tag_ARM_ISA_use = yes
- .eabi_attribute 9, 0 @ Tag_Thumb_ISA_use = no
- .eabi_attribute 10, 0 @ Tag_FP_arch = none
.eabi_attribute 24, 1 @ Tag_ABI_align_needed = 8-byte
.eabi_attribute 25, 1 @ Tag_ABI_align_preserved = 8-byte, except leaf SP
- .eabi_attribute 30, 2 @ Tag_ABI_optimization_goals = Aggressive Speed
- .eabi_attribute 34, 1 @ Tag_CPU_unaligned_access = v6
.text
@ Field constants
#ifdef USE_BASIC_CONFIG
#undef USE_ASM_X86_64
+#undef USE_ECMULT_STATIC_PRECOMPUTATION
#undef USE_ENDOMORPHISM
#undef USE_FIELD_10X26
#undef USE_FIELD_5X52
#define USE_SCALAR_INV_BUILTIN 1
#define USE_FIELD_10X26 1
#define USE_SCALAR_8X32 1
+#define ECMULT_WINDOW_SIZE 15
#endif /* USE_BASIC_CONFIG */
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < 20000; i++) {
- secp256k1_wnaf_const(data->wnaf, data->scalar_x, WINDOW_A, 256);
+ secp256k1_wnaf_const(data->wnaf, &data->scalar_x, WINDOW_A, 256);
secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y);
}
}
#endif
} secp256k1_ecmult_context;
+static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE;
static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx);
-static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, const secp256k1_callback *cb);
-static void secp256k1_ecmult_context_clone(secp256k1_ecmult_context *dst,
- const secp256k1_ecmult_context *src, const secp256k1_callback *cb);
+static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void **prealloc);
+static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *dst, const secp256k1_ecmult_context *src);
static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context *ctx);
static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx);
*
* Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335
*/
-static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w, int size) {
+static int secp256k1_wnaf_const(int *wnaf, const secp256k1_scalar *scalar, int w, int size) {
int global_sign;
int skew = 0;
int word = 0;
int flip;
int bit;
- secp256k1_scalar neg_s;
+ secp256k1_scalar s;
int not_neg_one;
+
+ VERIFY_CHECK(w > 0);
+ VERIFY_CHECK(size > 0);
+
/* Note that we cannot handle even numbers by negating them to be odd, as is
* done in other implementations, since if our scalars were specified to have
* width < 256 for performance reasons, their negations would have width 256
* {1, 2} we want to add to the scalar when ensuring that it's odd. Further
* complicating things, -1 interacts badly with `secp256k1_scalar_cadd_bit` and
* we need to special-case it in this logic. */
- flip = secp256k1_scalar_is_high(&s);
+ flip = secp256k1_scalar_is_high(scalar);
/* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */
- bit = flip ^ !secp256k1_scalar_is_even(&s);
+ bit = flip ^ !secp256k1_scalar_is_even(scalar);
/* We check for negative one, since adding 2 to it will cause an overflow */
- secp256k1_scalar_negate(&neg_s, &s);
- not_neg_one = !secp256k1_scalar_is_one(&neg_s);
+ secp256k1_scalar_negate(&s, scalar);
+ not_neg_one = !secp256k1_scalar_is_one(&s);
+ s = *scalar;
secp256k1_scalar_cadd_bit(&s, bit, not_neg_one);
/* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so caller expects
* that we added two to it and flipped it. In fact for -1 these operations are
/* 4 */
u_last = secp256k1_scalar_shr_int(&s, w);
- while (word * w < size) {
+ do {
int sign;
int even;
wnaf[word++] = u_last * global_sign;
u_last = u;
- }
+ } while (word * w < size);
wnaf[word] = u * global_sign;
VERIFY_CHECK(secp256k1_scalar_is_zero(&s));
int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)];
int i;
- secp256k1_scalar sc = *scalar;
/* build wnaf representation for q. */
int rsize = size;
if (size > 128) {
rsize = 128;
/* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */
- secp256k1_scalar_split_lambda(&q_1, &q_lam, &sc);
- skew_1 = secp256k1_wnaf_const(wnaf_1, q_1, WINDOW_A - 1, 128);
- skew_lam = secp256k1_wnaf_const(wnaf_lam, q_lam, WINDOW_A - 1, 128);
+ secp256k1_scalar_split_lambda(&q_1, &q_lam, scalar);
+ skew_1 = secp256k1_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128);
+ skew_lam = secp256k1_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128);
} else
#endif
{
- skew_1 = secp256k1_wnaf_const(wnaf_1, sc, WINDOW_A - 1, size);
+ skew_1 = secp256k1_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size);
#ifdef USE_ENDOMORPHISM
skew_lam = 0;
#endif
secp256k1_gej initial;
} secp256k1_ecmult_gen_context;
+static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE;
static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context* ctx);
-static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context* ctx, const secp256k1_callback* cb);
-static void secp256k1_ecmult_gen_context_clone(secp256k1_ecmult_gen_context *dst,
- const secp256k1_ecmult_gen_context* src, const secp256k1_callback* cb);
+static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context* ctx, void **prealloc);
+static void secp256k1_ecmult_gen_context_finalize_memcpy(secp256k1_ecmult_gen_context *dst, const secp256k1_ecmult_gen_context* src);
static void secp256k1_ecmult_gen_context_clear(secp256k1_ecmult_gen_context* ctx);
static int secp256k1_ecmult_gen_context_is_built(const secp256k1_ecmult_gen_context* ctx);
#ifndef SECP256K1_ECMULT_GEN_IMPL_H
#define SECP256K1_ECMULT_GEN_IMPL_H
+#include "util.h"
#include "scalar.h"
#include "group.h"
#include "ecmult_gen.h"
#ifdef USE_ECMULT_STATIC_PRECOMPUTATION
#include "ecmult_static_context.h"
#endif
+
+#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
+ static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = ROUND_TO_ALIGN(sizeof(*((secp256k1_ecmult_gen_context*) NULL)->prec));
+#else
+ static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = 0;
+#endif
+
static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context *ctx) {
ctx->prec = NULL;
}
-static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx, const secp256k1_callback* cb) {
+static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx, void **prealloc) {
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
secp256k1_ge prec[1024];
secp256k1_gej gj;
secp256k1_gej nums_gej;
int i, j;
+ size_t const prealloc_size = SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE;
+ void* const base = *prealloc;
#endif
if (ctx->prec != NULL) {
return;
}
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
- ctx->prec = (secp256k1_ge_storage (*)[64][16])checked_malloc(cb, sizeof(*ctx->prec));
+ ctx->prec = (secp256k1_ge_storage (*)[64][16])manual_alloc(prealloc, prealloc_size, base, prealloc_size);
/* get the generator */
secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g);
}
}
#else
- (void)cb;
+ (void)prealloc;
ctx->prec = (secp256k1_ge_storage (*)[64][16])secp256k1_ecmult_static_context;
#endif
secp256k1_ecmult_gen_blind(ctx, NULL);
return ctx->prec != NULL;
}
-static void secp256k1_ecmult_gen_context_clone(secp256k1_ecmult_gen_context *dst,
- const secp256k1_ecmult_gen_context *src, const secp256k1_callback* cb) {
- if (src->prec == NULL) {
- dst->prec = NULL;
- } else {
+static void secp256k1_ecmult_gen_context_finalize_memcpy(secp256k1_ecmult_gen_context *dst, const secp256k1_ecmult_gen_context *src) {
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
- dst->prec = (secp256k1_ge_storage (*)[64][16])checked_malloc(cb, sizeof(*dst->prec));
- memcpy(dst->prec, src->prec, sizeof(*dst->prec));
+ if (src->prec != NULL) {
+ /* We cast to void* first to suppress a -Wcast-align warning. */
+ dst->prec = (secp256k1_ge_storage (*)[64][16])(void*)((unsigned char*)dst + ((unsigned char*)src->prec - (unsigned char*)src));
+ }
#else
- (void)cb;
- dst->prec = src->prec;
+ (void)dst, (void)src;
#endif
- dst->initial = src->initial;
- dst->blind = src->blind;
- }
}
static void secp256k1_ecmult_gen_context_clear(secp256k1_ecmult_gen_context *ctx) {
-#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
- free(ctx->prec);
-#endif
secp256k1_scalar_clear(&ctx->blind);
secp256k1_gej_clear(&ctx->initial);
ctx->prec = NULL;
#include <string.h>
#include <stdint.h>
+#include "util.h"
#include "group.h"
#include "scalar.h"
#include "ecmult.h"
# endif
#else
/* optimal for 128-bit and 256-bit exponents. */
-#define WINDOW_A 5
-/** larger numbers may result in slightly better performance, at the cost of
- exponentially larger precomputed tables. */
-#ifdef USE_ENDOMORPHISM
-/** Two tables for window size 15: 1.375 MiB. */
-#define WINDOW_G 15
-#else
-/** One table for window size 16: 1.375 MiB. */
-#define WINDOW_G 16
+# define WINDOW_A 5
+/** Larger values for ECMULT_WINDOW_SIZE result in possibly better
+ * performance at the cost of an exponentially larger precomputed
+ * table. The exact table size is
+ * (1 << (WINDOW_G - 2)) * sizeof(secp256k1_ge_storage) bytes,
+ * where sizeof(secp256k1_ge_storage) is typically 64 bytes but can
+ * be larger due to platform-specific padding and alignment.
+ * If the endomorphism optimization is enabled (USE_ENDOMORMPHSIM)
+ * two tables of this size are used instead of only one.
+ */
+# define WINDOW_G ECMULT_WINDOW_SIZE
#endif
+
+/* Noone will ever need more than a window size of 24. The code might
+ * be correct for larger values of ECMULT_WINDOW_SIZE but this is not
+ * not tested.
+ *
+ * The following limitations are known, and there are probably more:
+ * If WINDOW_G > 27 and size_t has 32 bits, then the code is incorrect
+ * because the size of the memory object that we allocate (in bytes)
+ * will not fit in a size_t.
+ * If WINDOW_G > 31 and int has 32 bits, then the code is incorrect
+ * because certain expressions will overflow.
+ */
+#if ECMULT_WINDOW_SIZE < 2 || ECMULT_WINDOW_SIZE > 24
+# error Set ECMULT_WINDOW_SIZE to an integer in range [2..24].
#endif
#ifdef USE_ENDOMORPHISM
} \
} while(0)
+static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE =
+ ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G))
+#ifdef USE_ENDOMORPHISM
+ + ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G))
+#endif
+ ;
+
static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx) {
ctx->pre_g = NULL;
#ifdef USE_ENDOMORPHISM
#endif
}
-static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, const secp256k1_callback *cb) {
+static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void **prealloc) {
secp256k1_gej gj;
+ void* const base = *prealloc;
+ size_t const prealloc_size = SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE;
if (ctx->pre_g != NULL) {
return;
/* get the generator */
secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g);
- ctx->pre_g = (secp256k1_ge_storage (*)[])checked_malloc(cb, sizeof((*ctx->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G));
+ {
+ size_t size = sizeof((*ctx->pre_g)[0]) * ((size_t)ECMULT_TABLE_SIZE(WINDOW_G));
+ /* check for overflow */
+ VERIFY_CHECK(size / sizeof((*ctx->pre_g)[0]) == ((size_t)ECMULT_TABLE_SIZE(WINDOW_G)));
+ ctx->pre_g = (secp256k1_ge_storage (*)[])manual_alloc(prealloc, sizeof((*ctx->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G), base, prealloc_size);
+ }
/* precompute the tables with odd multiples */
secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj);
secp256k1_gej g_128j;
int i;
- ctx->pre_g_128 = (secp256k1_ge_storage (*)[])checked_malloc(cb, sizeof((*ctx->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G));
+ size_t size = sizeof((*ctx->pre_g_128)[0]) * ((size_t) ECMULT_TABLE_SIZE(WINDOW_G));
+ /* check for overflow */
+ VERIFY_CHECK(size / sizeof((*ctx->pre_g_128)[0]) == ((size_t)ECMULT_TABLE_SIZE(WINDOW_G)));
+ ctx->pre_g_128 = (secp256k1_ge_storage (*)[])manual_alloc(prealloc, sizeof((*ctx->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G), base, prealloc_size);
/* calculate 2^128*generator */
g_128j = gj;
#endif
}
-static void secp256k1_ecmult_context_clone(secp256k1_ecmult_context *dst,
- const secp256k1_ecmult_context *src, const secp256k1_callback *cb) {
- if (src->pre_g == NULL) {
- dst->pre_g = NULL;
- } else {
- size_t size = sizeof((*dst->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G);
- dst->pre_g = (secp256k1_ge_storage (*)[])checked_malloc(cb, size);
- memcpy(dst->pre_g, src->pre_g, size);
+static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *dst, const secp256k1_ecmult_context *src) {
+ if (src->pre_g != NULL) {
+ /* We cast to void* first to suppress a -Wcast-align warning. */
+ dst->pre_g = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g) - (unsigned char*)src));
}
#ifdef USE_ENDOMORPHISM
- if (src->pre_g_128 == NULL) {
- dst->pre_g_128 = NULL;
- } else {
- size_t size = sizeof((*dst->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G);
- dst->pre_g_128 = (secp256k1_ge_storage (*)[])checked_malloc(cb, size);
- memcpy(dst->pre_g_128, src->pre_g_128, size);
+ if (src->pre_g_128 != NULL) {
+ dst->pre_g_128 = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g_128) - (unsigned char*)src));
}
#endif
}
}
static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context *ctx) {
- free(ctx->pre_g);
-#ifdef USE_ENDOMORPHISM
- free(ctx->pre_g_128);
-#endif
secp256k1_ecmult_context_init(ctx);
}
CHECK(carry == 0);
while (bit < 256) {
CHECK(secp256k1_scalar_get_bits(&s, bit++, 1) == 0);
- }
+ }
#endif
return last_set_bit + 1;
}
size_t entries = n_points + 1;
#endif
size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int);
- return ((1<<bucket_window) * sizeof(secp256k1_gej) + sizeof(struct secp256k1_pippenger_state) + entries * entry_size);
+ return (sizeof(secp256k1_gej) << bucket_window) + sizeof(struct secp256k1_pippenger_state) + entries * entry_size;
}
static int secp256k1_ecmult_pippenger_batch(const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
state_space = (struct secp256k1_pippenger_state *) secp256k1_scratch_alloc(scratch, sizeof(*state_space));
state_space->ps = (struct secp256k1_pippenger_point_state *) secp256k1_scratch_alloc(scratch, entries * sizeof(*state_space->ps));
state_space->wnaf_na = (int *) secp256k1_scratch_alloc(scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int));
- buckets = (secp256k1_gej *) secp256k1_scratch_alloc(scratch, (1<<bucket_window) * sizeof(*buckets));
+ buckets = (secp256k1_gej *) secp256k1_scratch_alloc(scratch, sizeof(*buckets) << bucket_window);
if (inp_g_sc != NULL) {
scalars[0] = *inp_g_sc;
#ifdef USE_ENDOMORPHISM
entry_size = 2*entry_size;
#endif
- space_overhead = ((1<<bucket_window) * sizeof(secp256k1_gej) + entry_size + sizeof(struct secp256k1_pippenger_state));
+ space_overhead = (sizeof(secp256k1_gej) << bucket_window) + entry_size + sizeof(struct secp256k1_pippenger_state);
if (space_overhead > max_alloc) {
break;
}
#include "basic-config.h"
#include "include/secp256k1.h"
+#include "util.h"
#include "field_impl.h"
#include "scalar_impl.h"
#include "group_impl.h"
int main(int argc, char **argv) {
secp256k1_ecmult_gen_context ctx;
+ void *prealloc, *base;
int inner;
int outer;
FILE* fp;
fprintf(stderr, "Could not open src/ecmult_static_context.h for writing!\n");
return -1;
}
-
+
fprintf(fp, "#ifndef _SECP256K1_ECMULT_STATIC_CONTEXT_\n");
fprintf(fp, "#define _SECP256K1_ECMULT_STATIC_CONTEXT_\n");
fprintf(fp, "#include \"src/group.h\"\n");
fprintf(fp, "#define SC SECP256K1_GE_STORAGE_CONST\n");
fprintf(fp, "static const secp256k1_ge_storage secp256k1_ecmult_static_context[64][16] = {\n");
+ base = checked_malloc(&default_error_callback, SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE);
+ prealloc = base;
secp256k1_ecmult_gen_context_init(&ctx);
- secp256k1_ecmult_gen_context_build(&ctx, &default_error_callback);
+ secp256k1_ecmult_gen_context_build(&ctx, &prealloc);
for(outer = 0; outer != 64; outer++) {
fprintf(fp,"{\n");
for(inner = 0; inner != 16; inner++) {
}
fprintf(fp,"};\n");
secp256k1_ecmult_gen_context_clear(&ctx);
-
+ free(base);
+
fprintf(fp, "#undef SC\n");
fprintf(fp, "#endif\n");
fclose(fp);
-
+
return 0;
}
/* extract m6 */
"movq %%r8, %q6\n"
: "=g"(m0), "=g"(m1), "=g"(m2), "=g"(m3), "=g"(m4), "=g"(m5), "=g"(m6)
- : "S"(l), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1)
+ : "S"(l), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
: "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc");
/* Reduce 385 bits into 258. */
/* extract p4 */
"movq %%r9, %q4\n"
: "=&g"(p0), "=&g"(p1), "=&g"(p2), "=g"(p3), "=g"(p4)
- : "g"(m0), "g"(m1), "g"(m2), "g"(m3), "g"(m4), "g"(m5), "g"(m6), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1)
+ : "g"(m0), "g"(m1), "g"(m2), "g"(m3), "g"(m4), "g"(m5), "g"(m6), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
: "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "cc");
/* Reduce 258 bits into 256. */
/* Extract c */
"movq %%r9, %q0\n"
: "=g"(c)
- : "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1)
+ : "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
: "rax", "rdx", "r8", "r9", "r10", "cc", "memory");
#else
uint128_t c;
#ifndef _SECP256K1_SCRATCH_IMPL_H_
#define _SECP256K1_SCRATCH_IMPL_H_
+#include "util.h"
#include "scratch.h"
-/* Using 16 bytes alignment because common architectures never have alignment
- * requirements above 8 for any of the types we care about. In addition we
- * leave some room because currently we don't care about a few bytes.
- * TODO: Determine this at configure time. */
-#define ALIGNMENT 16
-
static secp256k1_scratch* secp256k1_scratch_create(const secp256k1_callback* error_callback, size_t max_size) {
secp256k1_scratch* ret = (secp256k1_scratch*)checked_malloc(error_callback, sizeof(*ret));
if (ret != NULL) {
static void *secp256k1_scratch_alloc(secp256k1_scratch* scratch, size_t size) {
void *ret;
size_t frame = scratch->frame - 1;
- size = ((size + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT;
+ size = ROUND_TO_ALIGN(size);
if (scratch->frame == 0 || size + scratch->offset[frame] > scratch->frame_size[frame]) {
return NULL;
**********************************************************************/
#include "include/secp256k1.h"
+#include "include/secp256k1_preallocated.h"
#include "util.h"
#include "num_impl.h"
};
const secp256k1_context *secp256k1_context_no_precomp = &secp256k1_context_no_precomp_;
-secp256k1_context* secp256k1_context_create(unsigned int flags) {
- secp256k1_context* ret = (secp256k1_context*)checked_malloc(&default_error_callback, sizeof(secp256k1_context));
+size_t secp256k1_context_preallocated_size(unsigned int flags) {
+ size_t ret = ROUND_TO_ALIGN(sizeof(secp256k1_context));
+
+ if (EXPECT((flags & SECP256K1_FLAGS_TYPE_MASK) != SECP256K1_FLAGS_TYPE_CONTEXT, 0)) {
+ secp256k1_callback_call(&default_illegal_callback,
+ "Invalid flags");
+ return 0;
+ }
+
+ if (flags & SECP256K1_FLAGS_BIT_CONTEXT_SIGN) {
+ ret += SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE;
+ }
+ if (flags & SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) {
+ ret += SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE;
+ }
+ return ret;
+}
+
+size_t secp256k1_context_preallocated_clone_size(const secp256k1_context* ctx) {
+ size_t ret = ROUND_TO_ALIGN(sizeof(secp256k1_context));
+ VERIFY_CHECK(ctx != NULL);
+ if (secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) {
+ ret += SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE;
+ }
+ if (secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)) {
+ ret += SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE;
+ }
+ return ret;
+}
+
+secp256k1_context* secp256k1_context_preallocated_create(void* prealloc, unsigned int flags) {
+ void* const base = prealloc;
+ size_t prealloc_size;
+ secp256k1_context* ret;
+
+ VERIFY_CHECK(prealloc != NULL);
+ prealloc_size = secp256k1_context_preallocated_size(flags);
+ ret = (secp256k1_context*)manual_alloc(&prealloc, sizeof(secp256k1_context), base, prealloc_size);
ret->illegal_callback = default_illegal_callback;
ret->error_callback = default_error_callback;
if (EXPECT((flags & SECP256K1_FLAGS_TYPE_MASK) != SECP256K1_FLAGS_TYPE_CONTEXT, 0)) {
secp256k1_callback_call(&ret->illegal_callback,
"Invalid flags");
- free(ret);
return NULL;
}
secp256k1_ecmult_gen_context_init(&ret->ecmult_gen_ctx);
if (flags & SECP256K1_FLAGS_BIT_CONTEXT_SIGN) {
- secp256k1_ecmult_gen_context_build(&ret->ecmult_gen_ctx, &ret->error_callback);
+ secp256k1_ecmult_gen_context_build(&ret->ecmult_gen_ctx, &prealloc);
}
if (flags & SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) {
- secp256k1_ecmult_context_build(&ret->ecmult_ctx, &ret->error_callback);
+ secp256k1_ecmult_context_build(&ret->ecmult_ctx, &prealloc);
+ }
+
+ return (secp256k1_context*) ret;
+}
+
+secp256k1_context* secp256k1_context_create(unsigned int flags) {
+ size_t const prealloc_size = secp256k1_context_preallocated_size(flags);
+ secp256k1_context* ctx = (secp256k1_context*)checked_malloc(&default_error_callback, prealloc_size);
+ if (EXPECT(secp256k1_context_preallocated_create(ctx, flags) == NULL, 0)) {
+ free(ctx);
+ return NULL;
}
+ return ctx;
+}
+
+secp256k1_context* secp256k1_context_preallocated_clone(const secp256k1_context* ctx, void* prealloc) {
+ size_t prealloc_size;
+ secp256k1_context* ret;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(prealloc != NULL);
+
+ prealloc_size = secp256k1_context_preallocated_clone_size(ctx);
+ ret = (secp256k1_context*)prealloc;
+ memcpy(ret, ctx, prealloc_size);
+ secp256k1_ecmult_gen_context_finalize_memcpy(&ret->ecmult_gen_ctx, &ctx->ecmult_gen_ctx);
+ secp256k1_ecmult_context_finalize_memcpy(&ret->ecmult_ctx, &ctx->ecmult_ctx);
return ret;
}
secp256k1_context* secp256k1_context_clone(const secp256k1_context* ctx) {
- secp256k1_context* ret = (secp256k1_context*)checked_malloc(&ctx->error_callback, sizeof(secp256k1_context));
- ret->illegal_callback = ctx->illegal_callback;
- ret->error_callback = ctx->error_callback;
- secp256k1_ecmult_context_clone(&ret->ecmult_ctx, &ctx->ecmult_ctx, &ctx->error_callback);
- secp256k1_ecmult_gen_context_clone(&ret->ecmult_gen_ctx, &ctx->ecmult_gen_ctx, &ctx->error_callback);
+ secp256k1_context* ret;
+ size_t prealloc_size;
+
+ VERIFY_CHECK(ctx != NULL);
+ prealloc_size = secp256k1_context_preallocated_clone_size(ctx);
+ ret = (secp256k1_context*)checked_malloc(&ctx->error_callback, prealloc_size);
+ ret = secp256k1_context_preallocated_clone(ctx, ret);
return ret;
}
-void secp256k1_context_destroy(secp256k1_context* ctx) {
+void secp256k1_context_preallocated_destroy(secp256k1_context* ctx) {
CHECK(ctx != secp256k1_context_no_precomp);
if (ctx != NULL) {
secp256k1_ecmult_context_clear(&ctx->ecmult_ctx);
secp256k1_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx);
+ }
+}
+void secp256k1_context_destroy(secp256k1_context* ctx) {
+ if (ctx != NULL) {
+ secp256k1_context_preallocated_destroy(ctx);
free(ctx);
}
}
secp256k1_scalar_negate(&sec, &sec);
secp256k1_scalar_get_b32(seckey, &sec);
+ secp256k1_scalar_clear(&sec);
return 1;
}
#include "secp256k1.c"
#include "include/secp256k1.h"
+#include "include/secp256k1_preallocated.h"
#include "testrand_impl.h"
#ifdef ENABLE_OPENSSL_TESTS
} while(1);
}
-void run_context_tests(void) {
+void run_context_tests(int use_prealloc) {
secp256k1_pubkey pubkey;
secp256k1_pubkey zero_pubkey;
secp256k1_ecdsa_signature sig;
unsigned char ctmp[32];
int32_t ecount;
int32_t ecount2;
- secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
- secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
- secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY);
- secp256k1_context *both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+ secp256k1_context *none;
+ secp256k1_context *sign;
+ secp256k1_context *vrfy;
+ secp256k1_context *both;
+ void *none_prealloc = NULL;
+ void *sign_prealloc = NULL;
+ void *vrfy_prealloc = NULL;
+ void *both_prealloc = NULL;
secp256k1_gej pubj;
secp256k1_ge pub;
secp256k1_scalar msg, key, nonce;
secp256k1_scalar sigr, sigs;
+ if (use_prealloc) {
+ none_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_NONE));
+ sign_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN));
+ vrfy_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY));
+ both_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY));
+ CHECK(none_prealloc != NULL);
+ CHECK(sign_prealloc != NULL);
+ CHECK(vrfy_prealloc != NULL);
+ CHECK(both_prealloc != NULL);
+ none = secp256k1_context_preallocated_create(none_prealloc, SECP256K1_CONTEXT_NONE);
+ sign = secp256k1_context_preallocated_create(sign_prealloc, SECP256K1_CONTEXT_SIGN);
+ vrfy = secp256k1_context_preallocated_create(vrfy_prealloc, SECP256K1_CONTEXT_VERIFY);
+ both = secp256k1_context_preallocated_create(both_prealloc, SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+ } else {
+ none = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
+ sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
+ vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY);
+ both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+ }
+
memset(&zero_pubkey, 0, sizeof(zero_pubkey));
ecount = 0;
secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, NULL);
CHECK(vrfy->error_callback.fn != sign->error_callback.fn);
+ /* check if sizes for cloning are consistent */
+ CHECK(secp256k1_context_preallocated_clone_size(none) == secp256k1_context_preallocated_size(SECP256K1_CONTEXT_NONE));
+ CHECK(secp256k1_context_preallocated_clone_size(sign) == secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN));
+ CHECK(secp256k1_context_preallocated_clone_size(vrfy) == secp256k1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY));
+ CHECK(secp256k1_context_preallocated_clone_size(both) == secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY));
+
/*** clone and destroy all of them to make sure cloning was complete ***/
{
secp256k1_context *ctx_tmp;
- ctx_tmp = none; none = secp256k1_context_clone(none); secp256k1_context_destroy(ctx_tmp);
- ctx_tmp = sign; sign = secp256k1_context_clone(sign); secp256k1_context_destroy(ctx_tmp);
- ctx_tmp = vrfy; vrfy = secp256k1_context_clone(vrfy); secp256k1_context_destroy(ctx_tmp);
- ctx_tmp = both; both = secp256k1_context_clone(both); secp256k1_context_destroy(ctx_tmp);
+ if (use_prealloc) {
+ /* clone into a non-preallocated context and then again into a new preallocated one. */
+ ctx_tmp = none; none = secp256k1_context_clone(none); secp256k1_context_preallocated_destroy(ctx_tmp);
+ free(none_prealloc); none_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(none_prealloc != NULL);
+ ctx_tmp = none; none = secp256k1_context_preallocated_clone(none, none_prealloc); secp256k1_context_destroy(ctx_tmp);
+
+ ctx_tmp = sign; sign = secp256k1_context_clone(sign); secp256k1_context_preallocated_destroy(ctx_tmp);
+ free(sign_prealloc); sign_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); CHECK(sign_prealloc != NULL);
+ ctx_tmp = sign; sign = secp256k1_context_preallocated_clone(sign, sign_prealloc); secp256k1_context_destroy(ctx_tmp);
+
+ ctx_tmp = vrfy; vrfy = secp256k1_context_clone(vrfy); secp256k1_context_preallocated_destroy(ctx_tmp);
+ free(vrfy_prealloc); vrfy_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); CHECK(vrfy_prealloc != NULL);
+ ctx_tmp = vrfy; vrfy = secp256k1_context_preallocated_clone(vrfy, vrfy_prealloc); secp256k1_context_destroy(ctx_tmp);
+
+ ctx_tmp = both; both = secp256k1_context_clone(both); secp256k1_context_preallocated_destroy(ctx_tmp);
+ free(both_prealloc); both_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); CHECK(both_prealloc != NULL);
+ ctx_tmp = both; both = secp256k1_context_preallocated_clone(both, both_prealloc); secp256k1_context_destroy(ctx_tmp);
+ } else {
+ /* clone into a preallocated context and then again into a new non-preallocated one. */
+ void *prealloc_tmp;
+
+ prealloc_tmp = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(prealloc_tmp != NULL);
+ ctx_tmp = none; none = secp256k1_context_preallocated_clone(none, prealloc_tmp); secp256k1_context_destroy(ctx_tmp);
+ ctx_tmp = none; none = secp256k1_context_clone(none); secp256k1_context_preallocated_destroy(ctx_tmp);
+ free(prealloc_tmp);
+
+ prealloc_tmp = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); CHECK(prealloc_tmp != NULL);
+ ctx_tmp = sign; sign = secp256k1_context_preallocated_clone(sign, prealloc_tmp); secp256k1_context_destroy(ctx_tmp);
+ ctx_tmp = sign; sign = secp256k1_context_clone(sign); secp256k1_context_preallocated_destroy(ctx_tmp);
+ free(prealloc_tmp);
+
+ prealloc_tmp = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); CHECK(prealloc_tmp != NULL);
+ ctx_tmp = vrfy; vrfy = secp256k1_context_preallocated_clone(vrfy, prealloc_tmp); secp256k1_context_destroy(ctx_tmp);
+ ctx_tmp = vrfy; vrfy = secp256k1_context_clone(vrfy); secp256k1_context_preallocated_destroy(ctx_tmp);
+ free(prealloc_tmp);
+
+ prealloc_tmp = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); CHECK(prealloc_tmp != NULL);
+ ctx_tmp = both; both = secp256k1_context_preallocated_clone(both, prealloc_tmp); secp256k1_context_destroy(ctx_tmp);
+ ctx_tmp = both; both = secp256k1_context_clone(both); secp256k1_context_preallocated_destroy(ctx_tmp);
+ free(prealloc_tmp);
+ }
}
/* Verify that the error callback makes it across the clone. */
secp256k1_context_set_illegal_callback(vrfy, NULL, NULL);
secp256k1_context_set_illegal_callback(sign, NULL, NULL);
- /* This shouldn't leak memory, due to already-set tests. */
- secp256k1_ecmult_gen_context_build(&sign->ecmult_gen_ctx, NULL);
- secp256k1_ecmult_context_build(&vrfy->ecmult_ctx, NULL);
-
/* obtain a working nonce */
do {
random_scalar_order_test(&nonce);
CHECK(secp256k1_ecdsa_sig_verify(&both->ecmult_ctx, &sigr, &sigs, &pub, &msg));
/* cleanup */
- secp256k1_context_destroy(none);
- secp256k1_context_destroy(sign);
- secp256k1_context_destroy(vrfy);
- secp256k1_context_destroy(both);
+ if (use_prealloc) {
+ secp256k1_context_preallocated_destroy(none);
+ secp256k1_context_preallocated_destroy(sign);
+ secp256k1_context_preallocated_destroy(vrfy);
+ secp256k1_context_preallocated_destroy(both);
+ free(none_prealloc);
+ free(sign_prealloc);
+ free(vrfy_prealloc);
+ free(both_prealloc);
+ } else {
+ secp256k1_context_destroy(none);
+ secp256k1_context_destroy(sign);
+ secp256k1_context_destroy(vrfy);
+ secp256k1_context_destroy(both);
+ }
/* Defined as no-op. */
secp256k1_context_destroy(NULL);
+ secp256k1_context_preallocated_destroy(NULL);
+
}
void run_scratch_tests(void) {
}
bits = 128;
#endif
- skew = secp256k1_wnaf_const(wnaf, num, w, bits);
+ skew = secp256k1_wnaf_const(wnaf, &num, w, bits);
for (i = WNAF_SIZE_BITS(bits, w); i >= 0; --i) {
secp256k1_scalar t;
printf("random seed = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", seed16[0], seed16[1], seed16[2], seed16[3], seed16[4], seed16[5], seed16[6], seed16[7], seed16[8], seed16[9], seed16[10], seed16[11], seed16[12], seed16[13], seed16[14], seed16[15]);
/* initialize */
- run_context_tests();
+ run_context_tests(0);
+ run_context_tests(1);
run_scratch_tests();
ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
if (secp256k1_rand_bits(1)) {
return ret;
}
+#if defined(__BIGGEST_ALIGNMENT__)
+#define ALIGNMENT __BIGGEST_ALIGNMENT__
+#else
+/* Using 16 bytes alignment because common architectures never have alignment
+ * requirements above 8 for any of the types we care about. In addition we
+ * leave some room because currently we don't care about a few bytes. */
+#define ALIGNMENT 16
+#endif
+
+#define ROUND_TO_ALIGN(size) (((size + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT)
+
+/* Assume there is a contiguous memory object with bounds [base, base + max_size)
+ * of which the memory range [base, *prealloc_ptr) is already allocated for usage,
+ * where *prealloc_ptr is an aligned pointer. In that setting, this functions
+ * reserves the subobject [*prealloc_ptr, *prealloc_ptr + alloc_size) of
+ * alloc_size bytes by increasing *prealloc_ptr accordingly, taking into account
+ * alignment requirements.
+ *
+ * The function returns an aligned pointer to the newly allocated subobject.
+ *
+ * This is useful for manual memory management: if we're simply given a block
+ * [base, base + max_size), the caller can use this function to allocate memory
+ * in this block and keep track of the current allocation state with *prealloc_ptr.
+ *
+ * It is VERIFY_CHECKed that there is enough space left in the memory object and
+ * *prealloc_ptr is aligned relative to base.
+ */
+static SECP256K1_INLINE void *manual_alloc(void** prealloc_ptr, size_t alloc_size, void* base, size_t max_size) {
+ size_t aligned_alloc_size = ROUND_TO_ALIGN(alloc_size);
+ void* ret;
+ VERIFY_CHECK(prealloc_ptr != NULL);
+ VERIFY_CHECK(*prealloc_ptr != NULL);
+ VERIFY_CHECK(base != NULL);
+ VERIFY_CHECK((unsigned char*)*prealloc_ptr >= (unsigned char*)base);
+ VERIFY_CHECK(((unsigned char*)*prealloc_ptr - (unsigned char*)base) % ALIGNMENT == 0);
+ VERIFY_CHECK((unsigned char*)*prealloc_ptr - (unsigned char*)base + aligned_alloc_size <= max_size);
+ ret = *prealloc_ptr;
+ *((unsigned char**)prealloc_ptr) += aligned_alloc_size;
+ return ret;
+}
+
/* Macro for restrict, when available and not in a VERIFY build. */
#if defined(SECP256K1_BUILD) && defined(VERIFY)
# define SECP256K1_RESTRICT