--- /dev/null
+*.o
+*.a
+*.so
+*.d
+depinst/
+depsrc/
+README.html
+doxygen/
+src/gadgetlib2/examples/tutorial
+src/gadgetlib2/tests/gadgetlib2_test
+
+src/algebra/curves/tests/test_bilinearity
+src/algebra/curves/tests/test_groups
+src/algebra/fields/tests/test_fields
+src/common/routing_algorithms/profiling/profile_routing_algorithms
+src/common/routing_algorithms/tests/test_routing_algorithms
+src/gadgetlib1/gadgets/cpu_checkers/fooram/examples/test_fooram
+src/gadgetlib1/gadgets/hashes/knapsack/tests/test_knapsack_gadget
+src/gadgetlib1/gadgets/hashes/sha256/tests/test_sha256_gadget
+src/gadgetlib1/gadgets/merkle_tree/tests/test_merkle_tree_gadgets
+src/gadgetlib1/gadgets/routing/profiling/profile_routing_gadgets
+src/gadgetlib1/gadgets/set_commitment/tests/test_set_commitment_gadget
+src/gadgetlib1/gadgets/verifiers/tests/test_r1cs_ppzksnark_verifier_gadget
+src/reductions/ram_to_r1cs/examples/demo_arithmetization
+src/relations/arithmetic_programs/qap/tests/test_qap
+src/relations/arithmetic_programs/ssp/tests/test_ssp
+src/zk_proof_systems/pcd/r1cs_pcd/r1cs_mp_ppzkpcd/profiling/profile_r1cs_mp_ppzkpcd
+src/zk_proof_systems/pcd/r1cs_pcd/r1cs_mp_ppzkpcd/tests/test_r1cs_mp_ppzkpcd
+src/zk_proof_systems/pcd/r1cs_pcd/r1cs_sp_ppzkpcd/profiling/profile_r1cs_sp_ppzkpcd
+src/zk_proof_systems/pcd/r1cs_pcd/r1cs_sp_ppzkpcd/tests/test_r1cs_sp_ppzkpcd
+src/zk_proof_systems/ppzkadsnark/r1cs_ppzkadsnark/examples/demo_r1cs_ppzkadsnark
+src/zk_proof_systems/ppzksnark/bacs_ppzksnark/profiling/profile_bacs_ppzksnark
+src/zk_proof_systems/ppzksnark/bacs_ppzksnark/tests/test_bacs_ppzksnark
+src/zk_proof_systems/ppzksnark/r1cs_gg_ppzksnark/profiling/profile_r1cs_gg_ppzksnark
+src/zk_proof_systems/ppzksnark/r1cs_gg_ppzksnark/tests/test_r1cs_gg_ppzksnark
+src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark
+src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/tests/test_r1cs_ppzksnark
+src/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark
+src/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark_generator
+src/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark_prover
+src/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark_verifier
+src/zk_proof_systems/ppzksnark/ram_ppzksnark/profiling/profile_ram_ppzksnark
+src/zk_proof_systems/ppzksnark/ram_ppzksnark/tests/test_ram_ppzksnark
+src/zk_proof_systems/ppzksnark/tbcs_ppzksnark/profiling/profile_tbcs_ppzksnark
+src/zk_proof_systems/ppzksnark/tbcs_ppzksnark/tests/test_tbcs_ppzksnark
+src/zk_proof_systems/ppzksnark/uscs_ppzksnark/profiling/profile_uscs_ppzksnark
+src/zk_proof_systems/ppzksnark/uscs_ppzksnark/tests/test_uscs_ppzksnark
+src/zk_proof_systems/zksnark/ram_zksnark/profiling/profile_ram_zksnark
+src/zk_proof_systems/zksnark/ram_zksnark/tests/test_ram_zksnark
--- /dev/null
+SCIPR Lab:
+ Eli Ben-Sasson
+ Alessandro Chiesa
+ Daniel Genkin
+ Shaul Kfir
+ Eran Tromer
+ Madars Virza
+
+External contributors:
+ Michael Backes
+ Manuel Barbosa
+ Dario Fiore
+ Jens Groth
+ Joshua A. Kroll
+ Shigeo MITSUNARI
+ Raphael Reischuk
+ Tadanori TERUYA
+ Sean Bowe
+ Daira Hopwood
--- /dev/null
+The libsnark library is developed by SCIPR Lab (http://scipr-lab.org)
+and contributors.
+
+Copyright (c) 2012-2014 SCIPR Lab and contributors (see AUTHORS file).
+
+All files, with the exceptions below, are released under the MIT License:
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
--- /dev/null
+#********************************************************************************
+# Makefile for the libsnark library.
+#********************************************************************************
+#* @author This file is part of libsnark, developed by SCIPR Lab
+#* and contributors (see AUTHORS).
+#* @copyright MIT license (see LICENSE file)
+#*******************************************************************************/
+
+# To override these, use "make OPTFLAGS=..." etc.
+CURVE = BN128
+OPTFLAGS = -O2 -march=native -mtune=native
+FEATUREFLAGS = -DUSE_ASM -DMONTGOMERY_OUTPUT
+
+# Initialize this using "CXXFLAGS=... make". The makefile appends to that.
+CXXFLAGS += -std=c++11 -Wall -Wextra -Wno-unused-parameter -Wno-comment -Wfatal-errors $(OPTFLAGS) $(FEATUREFLAGS) -DCURVE_$(CURVE)
+
+DEPSRC = depsrc
+DEPINST = depinst
+
+CXXFLAGS += -I$(DEPINST)/include -Isrc
+LDFLAGS += -L$(DEPINST)/lib -Wl,-rpath,$(DEPINST)/lib
+LDLIBS += -lgmpxx -lgmp -lboost_program_options
+# OpenSSL and its dependencies (needed explicitly for static builds):
+LDLIBS += -lcrypto -ldl -lz
+# List of .a files to include within libsnark.a and libsnark.so:
+AR_LIBS =
+# List of library files to install:
+INSTALL_LIBS = $(LIB_FILE)
+# Sentinel file to check existence of this directory (since directories don't work as a Make dependency):
+DEPINST_EXISTS = $(DEPINST)/.exists
+
+
+COMPILE_GTEST :=
+ifneq ($(NO_GTEST),1)
+ GTESTDIR=/usr/src/gtest
+# Compile GTest from sourcecode if we can (e.g., Ubuntu). Otherwise use precompiled one (e.g., Fedora).
+# See https://code.google.com/p/googletest/wiki/FAQ#Why_is_it_not_recommended_to_install_a_pre-compiled_copy_of_Goog .
+ COMPILE_GTEST :=$(shell test -d $(GTESTDIR) && echo -n 1)
+ GTEST_LDLIBS += -lgtest -lpthread
+endif
+
+ifneq ($(NO_SUPERCOP),1)
+ SUPERCOP_LDLIBS += -lsupercop
+ INSTALL_LIBS += depinst/lib/libsupercop.a
+ # Would have been nicer to roll supercop into libsnark.a ("AR_LIBS += $(DEPINST)/lib/libsupercop.a"), but it doesn't support position-independent code (libsnark issue #20).
+endif
+
+LIB_SRCS = \
+ src/algebra/curves/alt_bn128/alt_bn128_g1.cpp \
+ src/algebra/curves/alt_bn128/alt_bn128_g2.cpp \
+ src/algebra/curves/alt_bn128/alt_bn128_init.cpp \
+ src/algebra/curves/alt_bn128/alt_bn128_pairing.cpp \
+ src/algebra/curves/alt_bn128/alt_bn128_pp.cpp \
+ src/common/profiling.cpp \
+ src/common/utils.cpp \
+ src/gadgetlib1/constraint_profiling.cpp \
+
+ifeq ($(CURVE),BN128)
+ LIB_SRCS += \
+ src/algebra/curves/bn128/bn128_g1.cpp \
+ src/algebra/curves/bn128/bn128_g2.cpp \
+ src/algebra/curves/bn128/bn128_gt.cpp \
+ src/algebra/curves/bn128/bn128_init.cpp \
+ src/algebra/curves/bn128/bn128_pairing.cpp \
+ src/algebra/curves/bn128/bn128_pp.cpp
+
+ CXXFLAGS += -DBN_SUPPORT_SNARK
+ AR_LIBS += $(DEPINST)/lib/libzm.a
+endif
+
+# FIXME: most of these are broken due to removed code.
+DISABLED_EXECUTABLES = \
+ src/algebra/curves/tests/test_bilinearity \
+ src/algebra/curves/tests/test_groups \
+ src/algebra/fields/tests/test_fields \
+ src/common/routing_algorithms/profiling/profile_routing_algorithms \
+ src/common/routing_algorithms/tests/test_routing_algorithms \
+ src/gadgetlib1/gadgets/cpu_checkers/fooram/examples/test_fooram \
+ src/gadgetlib1/gadgets/hashes/knapsack/tests/test_knapsack_gadget \
+ src/gadgetlib1/gadgets/hashes/sha256/tests/test_sha256_gadget \
+ src/gadgetlib1/gadgets/merkle_tree/tests/test_merkle_tree_gadgets \
+ src/gadgetlib1/gadgets/routing/profiling/profile_routing_gadgets \
+ src/gadgetlib1/gadgets/set_commitment/tests/test_set_commitment_gadget \
+ src/gadgetlib1/gadgets/verifiers/tests/test_r1cs_ppzksnark_verifier_gadget \
+ src/reductions/ram_to_r1cs/examples/demo_arithmetization \
+ src/relations/arithmetic_programs/qap/tests/test_qap \
+ src/relations/arithmetic_programs/ssp/tests/test_ssp \
+ src/zk_proof_systems/pcd/r1cs_pcd/r1cs_mp_ppzkpcd/profiling/profile_r1cs_mp_ppzkpcd \
+ src/zk_proof_systems/pcd/r1cs_pcd/r1cs_mp_ppzkpcd/tests/test_r1cs_mp_ppzkpcd \
+ src/zk_proof_systems/pcd/r1cs_pcd/r1cs_sp_ppzkpcd/profiling/profile_r1cs_sp_ppzkpcd \
+ src/zk_proof_systems/pcd/r1cs_pcd/r1cs_sp_ppzkpcd/tests/test_r1cs_sp_ppzkpcd \
+ src/zk_proof_systems/ppzksnark/bacs_ppzksnark/profiling/profile_bacs_ppzksnark \
+ src/zk_proof_systems/ppzksnark/bacs_ppzksnark/tests/test_bacs_ppzksnark \
+ src/zk_proof_systems/ppzksnark/r1cs_gg_ppzksnark/profiling/profile_r1cs_gg_ppzksnark \
+ src/zk_proof_systems/ppzksnark/r1cs_gg_ppzksnark/tests/test_r1cs_gg_ppzksnark \
+ src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark \
+ src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/tests/test_r1cs_ppzksnark \
+ src/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark \
+ src/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark_generator \
+ src/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark_prover \
+ src/zk_proof_systems/ppzksnark/ram_ppzksnark/examples/demo_ram_ppzksnark_verifier \
+ src/zk_proof_systems/ppzksnark/ram_ppzksnark/profiling/profile_ram_ppzksnark \
+ src/zk_proof_systems/ppzksnark/ram_ppzksnark/tests/test_ram_ppzksnark \
+ src/zk_proof_systems/ppzksnark/tbcs_ppzksnark/profiling/profile_tbcs_ppzksnark \
+ src/zk_proof_systems/ppzksnark/tbcs_ppzksnark/tests/test_tbcs_ppzksnark \
+ src/zk_proof_systems/ppzksnark/uscs_ppzksnark/profiling/profile_uscs_ppzksnark \
+ src/zk_proof_systems/ppzksnark/uscs_ppzksnark/tests/test_uscs_ppzksnark \
+ src/zk_proof_systems/zksnark/ram_zksnark/profiling/profile_ram_zksnark \
+ src/zk_proof_systems/zksnark/ram_zksnark/tests/test_ram_zksnark
+
+EXECUTABLES = \
+ src/algebra/fields/tests/test_bigint
+
+EXECUTABLES_WITH_GTEST = \
+ src/gadgetlib2/examples/tutorial \
+ src/gadgetlib2/tests/gadgetlib2_test
+
+EXECUTABLES_WITH_SUPERCOP = \
+ src/zk_proof_systems/ppzkadsnark/r1cs_ppzkadsnark/examples/demo_r1cs_ppzkadsnark
+
+DOCS = README.html
+
+LIBSNARK_A = libsnark.a
+
+# For documentation of the following options, see README.md .
+
+ifeq ($(NO_PROCPS),1)
+ CXXFLAGS += -DNO_PROCPS
+else
+ LDLIBS += -lprocps
+endif
+
+ifeq ($(LOWMEM),1)
+ CXXFLAGS += -DLOWMEM
+endif
+
+ifeq ($(PROFILE_OP_COUNTS),1)
+ STATIC = 1
+ CXXFLAGS += -DPROFILE_OP_COUNTS
+endif
+
+ifeq ($(STATIC),1)
+ CXXFLAGS += -static -DSTATIC
+else
+ CXXFLAGS += -fPIC
+endif
+
+ifeq ($(MULTICORE),1)
+ CXXFLAGS += -DMULTICORE -fopenmp
+endif
+
+ifeq ($(CPPDEBUG),1)
+ CXXFLAGS += -D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC
+ DEBUG = 1
+endif
+
+ifeq ($(DEBUG),1)
+ CXXFLAGS += -DDEBUG -ggdb3
+endif
+
+ifeq ($(PERFORMANCE),1)
+ OPTFLAGS = -O3 -march=native -mtune=native
+ CXXFLAGS += -DNDEBUG
+ # Enable link-time optimization:
+ CXXFLAGS += -flto -fuse-linker-plugin
+ LDFLAGS += -flto
+endif
+
+LIB_OBJS =$(patsubst %.cpp,%.o,$(LIB_SRCS))
+EXEC_OBJS =$(patsubst %,%.o,$(EXECUTABLES) $(EXECUTABLES_WITH_GTEST) $(EXECUTABLES_WITH_SUPERCOP))
+
+all: \
+ $(if $(NO_GTEST),,$(EXECUTABLES_WITH_GTEST)) \
+ $(if $(NO_SUPERCOP),,$(EXECUTABLES_WITH_SUPERCOP)) \
+ $(EXECUTABLES) \
+ $(if $(NO_DOCS),,doc)
+
+doc: $(DOCS)
+
+$(DEPINST_EXISTS):
+ # Create placeholder directories for installed dependencies. Some make settings (including the default) require actually running ./prepare-depends.sh to populate this directory.
+ mkdir -p $(DEPINST)/lib $(DEPINST)/include
+ touch $@
+
+# In order to detect changes to #include dependencies. -MMD below generates a .d file for each .o file. Include the .d file.
+-include $(patsubst %.o,%.d, $(LIB_OBJS) $(EXEC_OBJS) )
+
+$(LIB_OBJS) $(EXEC_OBJS): %.o: %.cpp
+ $(CXX) -o $@ $< -c -MMD $(CXXFLAGS)
+
+LIBGTEST_A = $(DEPINST)/lib/libgtest.a
+
+$(LIBGTEST_A): $(GTESTDIR)/src/gtest-all.cc $(DEPINST_EXISTS)
+ $(CXX) -o $(DEPINST)/lib/gtest-all.o -I $(GTESTDIR) -c -isystem $(GTESTDIR)/include $< $(CXXFLAGS)
+ $(AR) -rv $(LIBGTEST_A) $(DEPINST)/lib/gtest-all.o
+
+# libsnark.a will contains all of our relevant object files, and we also mash in the .a files of relevant dependencies built by ./prepare-depends.sh
+$(LIBSNARK_A): $(LIB_OBJS) $(AR_LIBS)
+ ( \
+ echo "create $(LIBSNARK_A)"; \
+ echo "addmod $(LIB_OBJS)"; \
+ if [ -n "$(AR_LIBS)" ]; then for AR_LIB in $(AR_LIBS); do echo addlib $$AR_LIB; done; fi; \
+ echo "save"; \
+ echo "end"; \
+ ) | $(AR) -M
+ $(AR) s $(LIBSNARK_A)
+
+libsnark.so: $(LIBSNARK_A) $(DEPINST_EXISTS)
+ $(CXX) -o $@ --shared -Wl,--whole-archive $(LIBSNARK_A) $(CXXFLAGS) $(LDFLAGS) -Wl,--no-whole-archive $(LDLIBS)
+
+src/gadgetlib2/tests/gadgetlib2_test: \
+ src/gadgetlib2/tests/adapters_UTEST.cpp \
+ src/gadgetlib2/tests/constraint_UTEST.cpp \
+ src/gadgetlib2/tests/gadget_UTEST.cpp \
+ src/gadgetlib2/tests/integration_UTEST.cpp \
+ src/gadgetlib2/tests/protoboard_UTEST.cpp \
+ src/gadgetlib2/tests/variable_UTEST.cpp
+
+$(EXECUTABLES): %: %.o $(LIBSNARK_A) $(DEPINST_EXISTS)
+
+$(EXECUTABLES_WITH_GTEST): %: %.o $(LIBSNARK_A) $(if $(COMPILE_GTEST),$(LIBGTEST_A)) $(DEPINST_EXISTS)
+ $(CXX) -o $@
[email protected] $(LIBSNARK_A) $(CXXFLAGS) $(LDFLAGS) $(GTEST_LDLIBS) $(LDLIBS)
+
+$(EXECUTABLES_WITH_SUPERCOP): %: %.o $(LIBSNARK_A) $(DEPINST_EXISTS)
+ $(CXX) -o $@
[email protected] $(LIBSNARK_A) $(CXXFLAGS) $(LDFLAGS) $(SUPERCOP_LDLIBS) $(LDLIBS)
+
+
+ifeq ($(STATIC),1)
+LIB_FILE = $(LIBSNARK_A)
+else
+LIB_FILE = libsnark.so
+endif
+
+lib: $(LIB_FILE)
+
+$(DOCS): %.html: %.md
+ markdown_py -f $@ $^ -x toc -x extra --noisy
+# TODO: Would be nice to enable "-x smartypants" but Ubuntu 12.04 doesn't support that.
+# TODO: switch to redcarpet, to produce same output as GitHub's processing of README.md. But what about TOC?
+
+ifeq ($(PREFIX),)
+install:
+ $(error Please provide PREFIX. E.g. make install PREFIX=/usr)
+else
+HEADERS_SRC=$(shell find src -name '*.hpp' -o -name '*.tcc')
+HEADERS_DEST=$(patsubst src/%,$(PREFIX)/include/libsnark/%,$(HEADERS_SRC))
+
+$(HEADERS_DEST): $(PREFIX)/include/libsnark/%: src/%
+ mkdir -p $(shell dirname $@)
+ cp $< $@
+
+install: $(INSTALL_LIBS) $(HEADERS_DEST) $(DEPINST_EXISTS)
+ mkdir -p $(PREFIX)/lib
+ cp -v $(INSTALL_LIBS) $(PREFIX)/lib/
+ cp -rv $(DEPINST)/include $(PREFIX)
+endif
+
+doxy:
+ doxygen doxygen.conf
+
+# Clean generated files, except locally-compiled dependencies
+clean:
+ $(RM) \
+ $(LIB_OBJS) $(EXEC_OBJS) \
+ $(EXECUTABLES) $(EXECUTABLES_WITH_GTEST) $(EXECUTABLES_WITH_SUPERCOP) \
+ $(DOCS) \
+ ${patsubst %.o,%.d,${LIB_OBJS} ${EXEC_OBJS}} \
+ libsnark.so $(LIBSNARK_A) \
+ $(RM) -fr doxygen/ \
+ $(RM) $(LIBGTEST_A) $(DEPINST)/lib/gtest-all.o
+
+# Clean all, including locally-compiled dependencies
+clean-all: clean
+ $(RM) -fr $(DEPSRC) $(DEPINST)
+
+.PHONY: all clean clean-all doc doxy lib install
--- /dev/null
+libsnark: a C++ library for zkSNARK proofs
+================================================================================
+
+--------------------------------------------------------------------------------
+Authors
+--------------------------------------------------------------------------------
+
+The libsnark library is developed by the [SCIPR Lab] project and contributors
+and is released under the MIT License (see the [LICENSE] file).
+
+Copyright (c) 2012-2014 SCIPR Lab and contributors (see [AUTHORS] file).
+
+--------------------------------------------------------------------------------
+[TOC]
+
+<!---
+ NOTE: the file you are reading is in Markdown format, which is is fairly readable
+ directly, but can be converted into an HTML file with much nicer formatting.
+ To do so, run "make doc" (this requires the python-markdown package) and view
+ the resulting file README.html. Alternatively, view the latest HTML version at
+ https://github.com/scipr-lab/libsnark .
+-->
+
+--------------------------------------------------------------------------------
+Overview
+--------------------------------------------------------------------------------
+
+This library implements __zkSNARK__ schemes, which are a cryptographic method
+for proving/verifying, in zero knowledge, the integrity of computations.
+
+A computation can be expressed as an NP statement, in forms such as the following:
+
+- "The C program _foo_, when executed, returns exit code 0 if given the input _bar_ and some additional input _qux_."
+- "The Boolean circuit _foo_ is satisfiable by some input _qux_."
+- "The arithmetic circuit _foo_ accepts the partial assignment _bar_, when extended into some full assignment _qux_."
+- "The set of constraints _foo_ is satisfiable by the partial assignment _bar_, when extended into some full assignment _qux_."
+
+A prover who knows the witness for the NP statement (i.e., a satisfying input/assignment) can produce a short proof attesting to the truth of the NP statement. This proof can be verified by anyone, and offers the following properties.
+
+- __Zero knowledge:__
+ the verifier learns nothing from the proof beside the truth of the statement (i.e., the value _qux_, in the above examples, remains secret).
+- __Succinctness:__
+ the proof is short and easy to verify.
+- __Non-interactivity:__
+ the proof is a string (i.e. it does not require back-and-forth interaction between the prover and the verifier).
+- __Soundness:__
+ the proof is computationally sound (i.e., it is infeasible to fake a proof of a false NP statement). Such a proof system is also called an _argument_.
+- __Proof of knowledge:__
+ the proof attests not just that the NP statement is true, but also that the
+ prover knows why (e.g., knows a valid _qux_).
+
+These properties are summarized by the _zkSNARK_ acronym, which stands for _Zero-Knowledge Succinct Non-interactive ARgument of Knowledge_ (though zkSNARKs are also knows as
+_succinct non-interactive computationally-sound zero-knowledge proofs of knowledge_).
+For formal definitions and theoretical discussions about these, see
+\[BCCT12], \[BCIOP13], and the references therein.
+
+The libsnark library currently provides a C++ implementation of:
+
+1. General-purpose proof systems:
+ 1. A preprocessing zkSNARK for the NP-complete language "R1CS"
+ (_Rank-1 Constraint Systems_), which is a language that is similar to arithmetic
+ circuit satisfiability.
+ 2. A preprocessing SNARK for a language of arithmetic circuits, "BACS"
+ (_Bilinear Arithmetic Circuit Satisfiability_). This simplifies the writing
+ of NP statements when the additional flexibility of R1CS is not needed.
+ Internally, it reduces to R1CS.
+ 3. A preprocessing SNARK for the language "USCS"
+ (_Unitary-Square Constraint Systems_). This abstracts and implements the core
+ contribution of \[DFGK14]
+ 4. A preprocessing SNARK for a language of Boolean circuits, "TBCS"
+ (_Two-input Boolean Circuit Satisfiability_). Internally, it reduces to USCS.
+ This is much more efficient than going through R1CS.
+ 5. ADSNARK, a preprocessing SNARKs for proving statements on authenticated
+ data, as described in \[BBFR15].
+ 6. Proof-Carrying Data (PCD). This uses recursive composition of SNARKs, as
+ explained in \[BCCT13] and optimized in \[BCTV14b].
+2. Gadget libraries (gadgetlib1 and gadgetlib2) for constructing R1CS
+ instances out of modular "gadget" classes.
+3. Examples of applications that use the above proof systems to prove
+ statements about:
+ 1. Several toy examples.
+ 2. Execution of TinyRAM machine code, as explained in \[BCTV14a] and
+ \[BCGTV13]. (Such machine code can be obtained, e.g., by compiling from C.)
+ This is easily adapted to any other Random Access Machine that satisfies a
+ simple load-store interface.
+ 3. A scalable for TinyRAM using Proof-Carrying Data, as explained in \[BCTV14b]
+ 4. Zero-knowldge cluster MapReduce, as explained in \[CTV15].
+
+The zkSNARK construction implemented by libsnark follows, extends, and
+optimizes the approach described in \[BCTV14], itself an extension of
+\[BCGTV13], following the approach of \[BCIOP13] and \[GGPR13]. An alternative
+implementation of the basic approach is the _Pinocchio_ system of \[PGHR13].
+See these references for discussions of efficiency aspects that arise in
+practical use of such constructions, as well as security and trust
+considerations.
+
+This scheme is a _preprocessing zkSNARK_ (_ppzkSNARK_): before proofs can be
+created and verified, one needs to first decide on a size/circuit/system
+representing the NP statements to be proved, and run a _generator_ algorithm to
+create corresponding public parameters (a long proving key and a short
+verification key).
+
+Using the library involves the following high-level steps:
+
+1. Express the statements to be proved as an R1CS (or any of the other
+ languages above, such as arithmetic circuits, Boolean circuits, or TinyRAM).
+ This is done by writing C++ code that constructs an R1CS, and linking this code
+ together with libsnark
+2. Use libsnark's generator algorithm to create the public parameters for this
+ statement (once and for all).
+3. Use libsnark's prover algorithm to create proofs of true statements about
+ the satisfiability of the R1CS.
+4. Use libsnark's verifier algorithm to check proofs for alleged statements.
+
+
+--------------------------------------------------------------------------------
+The NP-complete language R1CS
+--------------------------------------------------------------------------------
+
+The ppzkSNARK supports proving/verifying membership in a specific NP-complete
+language: R1CS (*rank-1 constraint systems*). An instance of the language is
+specified by a set of equations over a prime field F, and each equation looks like:
+ < A, (1,X) > * < B , (1,X) > = < C, (1,X) >
+where A,B,C are vectors over F, and X is a vector of variables.
+
+In particular, arithmetic (as well as boolean) circuits are easily reducible to
+this language by converting each gate into a rank-1 constraint. See \[BCGTV13]
+Appendix E (and "System of Rank 1 Quadratic Equations") for more details about this.
+
+
+--------------------------------------------------------------------------------
+Elliptic curve choices
+--------------------------------------------------------------------------------
+
+The ppzkSNARK can be instantiated with different parameter choices, depending on
+which elliptic curve is used. The libsnark library currently provides three
+options:
+
+* "edwards":
+ an instantiation based on an Edwards curve, providing 80 bits of security.
+
+* "bn128":
+ an instantiation based on a Barreto-Naehrig curve, providing 128
+ bits of security. The underlying curve implementation is
+ \[ate-pairing], which has incorporated our patch that changes the
+ BN curve to one suitable for SNARK applications.
+
+ * This implementation uses dynamically-generated machine code for the curve
+ arithmetic. Some modern systems disallow execution of code on the heap, and
+ will thus block this implementation.
+
+ For example, on Fedora 20 at its default settings, you will get the error
+ `zmInit ERR:can't protect` when running this code. To solve this,
+ run `sudo setsebool -P allow_execheap 1` to allow execution,
+ or use `make CURVE=ALT_BN128` instead.
+
+* "alt_bn128":
+ an alternative to "bn128", somewhat slower but avoids dynamic code generation.
+
+Note that bn128 requires an x86-64 CPU while the other curve choices
+should be architecture-independent; see [portability](#portability).
+
+
+--------------------------------------------------------------------------------
+Gadget libraries
+--------------------------------------------------------------------------------
+
+The libsnark library currently provides two libraries for conveniently constructing
+R1CS instances out of reusable "gadgets". Both libraries provide a way to construct
+gadgets on other gadgets as well as additional explicit equations. In this way,
+complex R1CS instances can be built bottom up.
+
+### gadgetlib1
+
+This is a low-level library which expose all features of the preprocessing
+zkSNARK for R1CS. Its design is based on templates (as does the ppzkSNARK code)
+to efficiently support working on multiple elliptic curves simultaneously. This
+library is used for most of the constraint-building in libsnark, both internal
+(reductions and Proof-Carrying Data) and examples applications.
+
+### gadgetlib2
+
+This is an alternative library for constructing systems of polynomial equations
+and, in particular, also R1CS instances. It is better documented and easier to
+use than gadgetlib1, and its interface does not use templates. However, fewer
+useful gadgets are provided.
+
+
+--------------------------------------------------------------------------------
+Security
+--------------------------------------------------------------------------------
+
+The theoretical security of the underlying mathematical constructions, and the
+requisite assumptions, are analyzed in detailed in the aforementioned research
+papers.
+
+**
+This code is a research-quality proof of concept, and has not
+yet undergone extensive review or testing. It is thus not suitable,
+as is, for use in critical or production systems.
+**
+
+Known issues include the following:
+
+* The ppzkSNARK's generator and prover exhibit data-dependent running times
+ and memory usage. These form timing and cache-contention side channels,
+ which may be an issue in some applications.
+
+* Randomness is retrieved from /dev/urandom, but this should be
+ changed to a carefully considered (depending on system and threat
+ model) external, high-quality randomness source when creating
+ long-term proving/verification keys.
+
+
+--------------------------------------------------------------------------------
+Build instructions
+--------------------------------------------------------------------------------
+
+The libsnark library relies on the following:
+
+- C++ build environment
+- GMP for certain bit-integer arithmetic
+- libprocps for reporting memory usage
+- GTest for some of the unit tests
+
+So far we have tested these only on Linux, though we have been able to make the library work,
+with some features disabled (such as memory profiling or GTest tests), on Windows via Cygwin
+and on Mac OS X. (If you succeed in achieving more complete ports of the library, please
+let us know!) See also the notes on [portability](#portability) below.
+
+For example, on a fresh install of Ubuntu 14.04, install the following packages:
+
+ $ sudo apt-get install build-essential git libgmp3-dev libprocps3-dev libgtest-dev python-markdown libboost-all-dev libssl-dev
+
+Or, on Fedora 20:
+
+ $ sudo yum install gcc-c++ make git gmp-devel procps-ng-devel gtest-devel python-markdown
+
+Run the following, to fetch dependencies from their GitHub repos and compile them.
+(Not required if you set `CURVE` to other than the default `BN128` and also set `NO_SUPERCOP=1`.)
+
+ $ ./prepare-depends.sh
+
+Then, to compile the library, tests, profiling harness and documentation, run:
+
+ $ make
+
+To create just the HTML documentation, run
+
+ $ make doc
+
+and then view the resulting `README.html` (which contains the very text you are reading now).
+
+To create Doxygen documentation summarizing all files, classes and functions,
+with some (currently sparse) comments, install the `doxygen` and `graphviz` packages, then run
+
+ $ make doxy
+
+(this may take a few minutes). Then view the resulting [`doxygen/index.html`](doxygen/index.html).
+
+### Using libsnark as a library
+
+To develop an application that uses libsnark, you could add it within the libsnark directory tree and adjust the Makefile, but it is far better to build libsnark as a (shared or static) library. You can then write your code in a separate directory tree, and link it against libsnark.
+
+
+To build just the shared object library `libsnark.so`, run:
+
+ $ make lib
+
+To build just the static library `libsnark.a`, run:
+
+ $ make lib STATIC=1
+
+Note that static compilation requires static versions of all libraries it depends on.
+It may help to minize these dependencies by appending
+`CURVE=ALT_BN128 NO_PROCPS=1 NO_GTEST=1 NO_SUPERCOP=1`. On Fedora 21, the requisite
+library RPM dependencies are then:
+`boost-static glibc-static gmp-static libstdc++-static openssl-static zlib-static
+ boost-devel glibc-devel gmp-devel gmp-devel libstdc++-devel openssl-devel openssl-devel`.
+
+To build *and install* the libsnark library:
+
+ $ make install PREFIX=/install/path
+
+This will install `libsnark.so` into `/install/path/lib`; so your application should be linked using `-L/install/path/lib -lsnark`. It also installs the requisite headers into `/install/path/include`; so your application should be compiled using `-I/install/path/include`.
+
+In addition, unless you use `NO_SUPERCOP=1`, `libsupercop.a` will be installed and should be linked in using `-lsupercop`.
+
+
+### Building on Windows using Cygwin
+Install Cygwin using the graphical installer, including the `g++`, `libgmp`
+and `git` packages. Then disable the dependencies not easily supported under CygWin,
+using:
+
+ $ make NO_PROCPS=1 NO_GTEST=1 NO_DOCS=1
+
+
+### Building on Mac OS X
+
+On Mac OS X, install GMP from MacPorts (`port install gmp`). Then disable the
+dependencies not easily supported under CygWin, using:
+
+ $ make NO_PROCPS=1 NO_GTEST=1 NO_DOCS=1
+
+MacPorts does not write its libraries into standard system folders, so you
+might need to explicitly provide the paths to the header files and libraries by
+appending `CXXFLAGS=-I/opt/local/include LDFLAGS=-L/opt/local/lib` to the line
+above. Similarly, to pass the paths to ate-pairing you would run
+`INC_DIR=-I/opt/local/include LIB_DIR=-L/opt/local/lib ./prepare-depends.sh`
+instead of `./prepare-depends.sh` above.
+
+--------------------------------------------------------------------------------
+Tutorials
+--------------------------------------------------------------------------------
+
+libsnark includes a tutorial, and some usage examples, for the high-level API.
+
+* `src/gadgetlib1/examples1` contains a simple example for constructing a
+ constraint system using gadgetlib1.
+
+* `src/gadgetlib2/examples` contains a tutorial for using gadgetlib2 to express
+ NP statements as constraint systems. It introduces basic terminology, design
+ overview, and recommended programming style. It also shows how to invoke
+ ppzkSNARKs on such constraint systems. The main file, `tutorial.cpp`, builds
+ into a standalone executable.
+
+* `src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark.cpp`
+ constructs a simple constraint system and runs the ppzksnark. See below for how to
+ run it.
+
+
+--------------------------------------------------------------------------------
+Executing profiling example
+--------------------------------------------------------------------------------
+
+The command
+
+ $ src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark 1000 10 Fr
+
+exercises the ppzkSNARK (first generator, then prover, then verifier) on an
+R1CS instance with 1000 equations and an input consisting of 10 field elements.
+
+(If you get the error `zmInit ERR:can't protect`, see the discussion
+[above](#elliptic-curve-choices).)
+
+The command
+
+ $ src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark 1000 10 bytes
+
+does the same but now the input consists of 10 bytes.
+
+
+--------------------------------------------------------------------------------
+Build options
+--------------------------------------------------------------------------------
+
+The following flags change the behavior of the compiled code.
+
+* `make FEATUREFLAGS='-Dname1 -Dname2 ...'`
+
+ Override the active conditional #define names (you can see the default at the top of the Makefile).
+ The next bullets list the most important conditionally-#defined features.
+ For example, `make FEATUREFLAGS='-DBINARY_OUTPUT'` enables binary output and disables the default
+ assembly optimizations and Montgomery-representation output.
+
+* define `BINARY_OUTPUT`
+
+ In serialization, output raw binary data (instead of decimal, when not set).
+
+* `make CURVE=choice` / define `CURVE_choice` (where `choice` is one of:
+ ALT_BN128, BN128, EDWARDS, MNT4, MNT6)
+
+ Set the default curve to one of the above (see [elliptic curve choices](#elliptic-curve-choices)).
+
+* `make DEBUG=1` / define `DEBUG`
+
+ Print additional information for debugging purposes.
+
+* `make LOWMEM=1` / define `LOWMEM`
+
+ Limit the size of multi-exponentiation tables, for low-memory platforms.
+
+* `make NO_DOCS=1`
+
+ Do not generate HTML documentation, e.g. on platforms where Markdown is not easily available.
+
+* `make NO_PROCPS=1`
+
+ Do not link against libprocps. This disables memory profiling.
+
+* `make NO_GTEST=1`
+
+ Do not link against GTest. The tutorial and test suite of gadgetlib2 tutorial won't be compiled.
+
+* `make NO_SUPERCOP=1`
+
+ Do not link against SUPERCOP for optimized crypto. The ADSNARK executables will not be built.
+
+* `make MULTICORE=1`
+
+ Enable parallelized execution of the ppzkSNARK generator and prover, using OpenMP.
+ This will utilize all cores on the CPU for heavyweight parallelizabe operations such as
+ FFT and multiexponentiation. The default is single-core.
+
+ To override the maximum number of cores used, set the environment variable `OMP_NUM_THREADS`
+ at runtime (not compile time), e.g., `OMP_NUM_THREADS=8 test_r1cs_sp_ppzkpc`. It defaults
+ to the autodetected number of cores, but on some devices, dynamic core management confused
+ OpenMP's autodetection, so setting `OMP_NUM_THREADS` is necessary for full utilization.
+
+* define `NO_PT_COMPRESSION`
+
+ Do not use point compression.
+ This gives much faster serialization times, at the expense of ~2x larger
+ sizes for serialized keys and proofs.
+
+* define `MONTGOMERY_OUTPUT` (on by default)
+
+ Serialize Fp elements as their Montgomery representations. If this
+ option is disabled then Fp elements are serialized as their
+ equivalence classes, which is slower but produces human-readable
+ output.
+
+* `make PROFILE_OP_COUNTS=1` / define `PROFILE_OP_COUNTS`
+
+ Collect counts for field and curve operations inside static variables
+ of the corresponding algebraic objects. This option works for all
+ curves except bn128.
+
+* define `USE_ASM` (on by default)
+
+ Use unrolled assembly routines for F[p] arithmetic and faster heap in
+ multi-exponentiation. (When not set, use GMP's `mpn_*` routines instead.)
+
+* define `USE_MIXED_ADDITION`
+
+ Convert each element of the proving key and verification key to
+ affine coordinates. This allows using mixed addition formulas in
+ multiexponentiation and results in slightly faster prover and
+ verifier runtime at expense of increased proving time.
+
+* `make PERFORMANCE=1`
+
+ Enables compiler optimizations such as link-time optimization, and disables debugging aids.
+ (On some distributions this causes a `plugin needed to handle lto object` link error and `undefined reference`s, which can be remedied by `AR=gcc-ar make ...`.)
+
+Not all combinations are tested together or supported by every part of the codebase.
+
+
+--------------------------------------------------------------------------------
+Portability
+--------------------------------------------------------------------------------
+
+libsnark is written in fairly standard C++11.
+
+However, having been developed on Linux on x86-64 CPUs, libsnark has some limitations
+with respect to portability. Specifically:
+
+1. libsnark's algebraic data structures assume little-endian byte order.
+
+2. Profiling routines use `clock_gettime` and `readproc` calls, which are Linux-specific.
+
+3. Random-number generation is done by reading from `/dev/urandom`, which is
+ specific to Unix-like systems.
+
+4. libsnark binary serialization routines (see `BINARY_OUTPUT` above) assume
+ a fixed machine word size (i.e. sizeof(mp_limb_t) for GMP's limb data type).
+ Objects serialized in binary on a 64-bit system cannot be de-serialized on
+ a 32-bit system, and vice versa.
+ (The decimal serialization routines have no such limitation.)
+
+5. libsnark requires a C++ compiler with good C++11 support. It has been
+ tested with g++ 4.7, g++ 4.8, and clang 3.4.
+
+6. On x86-64, we by default use highly optimized assembly implementations for some
+ operations (see `USE_ASM` above). On other architectures we fall back to a
+ portable C++ implementation, which is slower.
+
+Tested configurations include:
+
+* Debian jessie with g++ 4.7 on x86-64
+* Debian jessie with clang 3.4 on x86-64
+* Fedora 20/21 with g++ 4.8.2/4.9.2 on x86-64 and i686
+* Ubuntu 14.04 LTS with g++ 4.8 on x86-64
+* Ubuntu 14.04 LTS with g++ 4.8 on x86-32, for EDWARDS and ALT_BN128 curve choices
+* Debian wheezy with g++ 4.7 on ARM little endian (Debian armel port) inside QEMU, for EDWARDS and ALT_BN128 curve choices
+* Windows 7 with g++ 4.8.3 under Cygwin 1.7.30 on x86-64 with NO_PROCPS=1, NO_GTEST=1 and NO_DOCS=1, for EDWARDS and ALT_BN128 curve choices
+* Mac OS X 10.9.4 (Mavericks) with Apple LLVM version 5.1 (based on LLVM 3.4svn) on x86-64 with NO_PROCPS=1, NO_GTEST=1 and NO_DOCS=1
+
+
+--------------------------------------------------------------------------------
+Directory structure
+--------------------------------------------------------------------------------
+
+The directory structure of the libsnark library is as follows:
+
+* src/ --- main C++ source code, containing the following modules:
+ * algebra/ --- fields and elliptic curve groups
+ * common/ --- miscellaneous utilities
+ * gadgetlib1/ --- gadgetlib1, a library to construct R1CS instances
+ * gadgets/ --- basic gadgets for gadgetlib1
+ * gadgetlib2/ --- gadgetlib2, a library to construct R1CS instances
+ * qap/ --- quadratic arithmetic program
+ * domains/ --- support for fast interpolation/evaluation, by providing
+ FFTs and Lagrange-coefficient computations for various domains
+ * relations/ --- interfaces for expressing statement (relations between instances and witnesses) as various NP-complete languages
+ * constraint_satisfaction_problems/ --- R1CS and USCS languages
+ * circuit_satisfaction_problems/ --- Boolean and arithmetic circuit satisfiability languages
+ * ram_computations/ --- RAM computation languages
+ * zk_proof_systems --- interfaces and implementations of the proof systems
+ * reductions --- reductions between languages (used internally, but contains many examples of building constraints)
+
+ Some of these module directories have the following subdirectories:
+
+ * ...
+ * examples/ --- example code and tutorials for this module
+ * tests/ --- unit tests for this module
+
+ In particular, the top-level API examples are at `src/r1cs_ppzksnark/examples/` and `src/gadgetlib2/examples/`.
+
+* depsrc/ --- created by `prepare_depends.sh` for retrieved sourcecode and local builds of external code
+ (currently: \[ate-pairing], and its dependency xbyak).
+
+* depinst/ --- created by `prepare_depends.sh` and `Makefile`
+ for local installation of locally-compiled dependencies.
+
+* doxygen/ --- created by `make doxy` and contains a Doxygen summary of all files, classes etc. in libsnark.
+
+
+--------------------------------------------------------------------------------
+Further considerations
+--------------------------------------------------------------------------------
+
+### Multiexponentiation window size
+
+The ppzkSNARK's generator has to solve a fixed-base multi-exponentiation
+problem. We use a window-based method in which the optimal window size depends
+on the size of the multiexponentiation instance *and* the platform.
+
+On our benchmarking platform (a 3.40 GHz Intel Core i7-4770 CPU), we have
+computed for each curve optimal windows, provided as
+"fixed_base_exp_window_table" initialization sequences, for each curve; see
+`X_init.cpp` for X=edwards,bn128,alt_bn128.
+
+Performance on other platforms may not be optimal (but probably not be far off).
+Future releases of the libsnark library will include a tool that generates
+optimal window sizes.
+
+
+--------------------------------------------------------------------------------
+References
+--------------------------------------------------------------------------------
+
+\[BBFR15] [
+ _ADSNARK: nearly practical and privacy-preserving proofs on authenticated data_
+](https://eprint.iacr.org/2014/617),
+ Michael Backes, Manuel Barbosa, Dario Fiore, Raphael M. Reischuk,
+ IEEE Symposium on Security and Privacy (Oakland) 2015
+
+\[BCCT12] [
+ _From extractable collision resistance to succinct non-Interactive arguments of knowledge, and back again_
+](http://eprint.iacr.org/2011/443),
+ Nir Bitansky, Ran Canetti, Alessandro Chiesa, Eran Tromer,
+ Innovations in Computer Science (ITCS) 2012
+
+\[BCCT13] [
+ _Recursive composition and bootstrapping for SNARKs and proof-carrying data_
+](http://eprint.iacr.org/2012/095)
+ Nir Bitansky, Ran Canetti, Alessandro Chiesa, Eran Tromer,
+ Symposium on Theory of Computing (STOC) 13
+
+\[BCGTV13] [
+ _SNARKs for C: Verifying Program Executions Succinctly and in Zero Knowledge_
+](http://eprint.iacr.org/2013/507),
+ Eli Ben-Sasson, Alessandro Chiesa, Daniel Genkin, Eran Tromer, Madars Virza,
+ CRYPTO 2013
+
+\[BCIOP13] [
+ _Succinct Non-Interactive Arguments via Linear Interactive Proofs_
+](http://eprint.iacr.org/2012/718),
+ Nir Bitansky, Alessandro Chiesa, Yuval Ishai, Rafail Ostrovsky, Omer Paneth,
+ Theory of Cryptography Conference 2013
+
+\[BCTV14a] [
+ _Succinct Non-Interactive Zero Knowledge for a von Neumann Architecture_
+](http://eprint.iacr.org/2013/879),
+ Eli Ben-Sasson, Alessandro Chiesa, Eran Tromer, Madars Virza,
+ USENIX Security 2014
+
+\[BCTV14b] [
+ _Scalable succinct non-interactive arguments via cycles of elliptic curves_
+](https://eprint.iacr.org/2014/595),
+ Eli Ben-Sasson, Alessandro Chiesa, Eran Tromer, Madars Virza,
+ CRYPTO 2014
+
+\[CTV15] [
+ _Cluster computing in zero knowledge_
+](https://eprint.iacr.org/2015/377),
+ Alessandro Chiesa, Eran Tromer, Madars Virza,
+ Eurocrypt 2015
+
+\[DFGK14] [
+ Square span programs with applications to succinct NIZK arguments
+](https://eprint.iacr.org/2014/718),
+ George Danezis, Cedric Fournet, Jens Groth, Markulf Kohlweiss,
+ ASIACCS 2014
+
+\[GGPR13] [
+ _Quadratic span programs and succinct NIZKs without PCPs_
+](http://eprint.iacr.org/2012/215),
+ Rosario Gennaro, Craig Gentry, Bryan Parno, Mariana Raykova,
+ EUROCRYPT 2013
+
+\[ate-pairing] [
+ _High-Speed Software Implementation of the Optimal Ate Pairing over Barreto-Naehrig Curves_
+](https://github.com/herumi/ate-pairing),
+ MITSUNARI Shigeo, TERUYA Tadanori
+
+\[PGHR13] [
+ _Pinocchio: Nearly Practical Verifiable Computation_
+](http://eprint.iacr.org/2013/279),
+ Bryan Parno, Craig Gentry, Jon Howell, Mariana Raykova,
+ IEEE Symposium on Security and Privacy (Oakland) 2013
+
+[SCIPR Lab]: http://www.scipr-lab.org/ (Succinct Computational Integrity and Privacy Research Lab)
+
+[LICENSE]: LICENSE (LICENSE file in top directory of libsnark distribution)
+
+[AUTHORS]: AUTHORS (AUTHORS file in top directory of libsnark distribution)
--- /dev/null
+# Doxyfile 1.8.2
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# http://www.gnu.org/software/libiconv for the list of possible encodings.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or sequence of words) that should
+# identify the project. Note that if you do not use Doxywizard you need
+# to put quotes around the project name if it contains spaces.
+
+PROJECT_NAME = libsnark
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER =
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer
+# a quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF =
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is
+# included in the documentation. The maximum height of the logo should not
+# exceed 55 pixels and the maximum width should not exceed 200 pixels.
+# Doxygen will copy the logo to the output directory.
+
+PROJECT_LOGO =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY =
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
+# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
+# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
+# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
+# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
+# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip. Note that you specify absolute paths here, but also
+# relative paths, which will be relative from the directory where doxygen is
+# started.
+
+STRIP_FROM_PATH = src
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful if your file system
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like regular Qt-style comments
+# (thus requiring an explicit @brief command for a brief description.)
+
+JAVADOC_AUTOBRIEF = YES
+
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
+# interpret the first line (until the first dot) of a Qt-style
+# comment as the brief description. If set to NO, the comments
+# will behave just like regular Qt-style comments (thus requiring
+# an explicit \brief command for a brief description.)
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 8
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding
+# "class=itcl::class" will allow you to use the command class in the
+# itcl::class meaning.
+
+TCL_SUBST =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for
+# Java. For instance, namespaces will be presented as packages, qualified
+# scopes will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources only. Doxygen will then generate output that is more tailored for
+# Fortran.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for
+# VHDL.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension,
+# and language is one of the parsers supported by doxygen: IDL, Java,
+# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C,
+# C++. For instance to make doxygen treat .inc files as Fortran files (default
+# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note
+# that for custom extensions you also need to set FILE_PATTERNS otherwise the
+# files are not read by doxygen.
+
+EXTENSION_MAPPING = tcc=C++
+
+# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all
+# comments according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you
+# can mix doxygen, HTML, and XML commands with Markdown formatting.
+# Disable only in case of backward compatibilities issues.
+
+MARKDOWN_SUPPORT = YES
+
+# When enabled doxygen tries to link words that correspond to documented classes,
+# or namespaces to their corresponding documentation. Such a link can be
+# prevented in individual cases by by putting a % sign in front of the word or
+# globally by setting AUTOLINK_SUPPORT to NO.
+
+AUTOLINK_SUPPORT = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also makes the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
+# Doxygen will parse them like normal C++ but will assume all classes use public
+# instead of private inheritance when no explicit protection keyword is present.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES (the
+# default) will make doxygen replace the get and set methods by a property in
+# the documentation. This will only work if the methods are indeed getting or
+# setting a simple type. If this is not the case, or you want to show the
+# methods anyway, you should set this option to NO.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and
+# unions are shown inside the group in which they are included (e.g. using
+# @ingroup) instead of on a separate page (for HTML and Man pages) or
+# section (for LaTeX and RTF).
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and
+# unions with only public data fields will be shown inline in the documentation
+# of the scope in which they are defined (i.e. file, namespace, or group
+# documentation), provided this scope is documented. If set to NO (the default),
+# structs, classes, and unions are shown on a separate page (for HTML and Man
+# pages) or section (for LaTeX and RTF).
+
+INLINE_SIMPLE_STRUCTS = NO
+
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
+# is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically
+# be useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
+# determine which symbols to keep in memory and which to flush to disk.
+# When the cache is full, less often used symbols will be written to disk.
+# For small to medium size projects (<1000 input files) the default value is
+# probably good enough. For larger projects a too small cache size can cause
+# doxygen to be busy swapping symbols to and from disk most of the time
+# causing a significant performance penalty.
+# If the system has enough physical memory increasing the cache will improve the
+# performance by keeping more symbols in memory. Note that the value works on
+# a logarithmic scale so increasing the size by one will roughly double the
+# memory usage. The cache size is given by this formula:
+# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
+# corresponding to a cache size of 2^16 = 65536 symbols.
+
+SYMBOL_CACHE_SIZE = 0
+
+# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be
+# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given
+# their name and scope. Since this can be an expensive process and often the
+# same symbol appear multiple times in the code, doxygen keeps a cache of
+# pre-resolved symbols. If the cache is too small doxygen will become slower.
+# If the cache is too large, memory is wasted. The cache size is given by this
+# formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0,
+# corresponding to a cache size of 2^16 = 65536 symbols.
+
+LOOKUP_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = YES
+
+# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# scope will be included in the documentation.
+
+EXTRACT_PACKAGE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base
+# name of the file that contains the anonymous namespace. By default
+# anonymous namespaces are hidden.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
+# will list include files with double quotes in the documentation
+# rather than with sharp brackets.
+
+FORCE_LOCAL_INCLUDES = NO
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
+# will sort the (brief and detailed) documentation of class members so that
+# constructors and destructors are listed first. If set to NO (the default)
+# the constructors will appear in the respective orders defined by
+# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
+# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
+# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
+# hierarchy of group names into alphabetical order. If set to NO (the default)
+# the group names will appear in their defined order.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to
+# do proper type resolution of all parameters of a function it will reject a
+# match between the prototype and the implementation of a member function even
+# if there is only one candidate or it is obvious which candidate to choose
+# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
+# will still accept a match between prototype and implementation in such cases.
+
+STRICT_PROTO_MATCHING = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or macro consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and macros in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
+# This will remove the Files entry from the Quick Index and from the
+# Folder Tree View (if specified). The default is YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
+# Namespaces page. This will remove the Namespaces entry from the Quick Index
+# and from the Folder Tree View (if specified). The default is YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option.
+# You can optionally specify a file name after the option, if omitted
+# DoxygenLayout.xml will be used as the name of the layout file.
+
+LAYOUT_FILE =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files
+# containing the references data. This must be a list of .bib files. The
+# .bib extension is automatically appended if omitted. Using this command
+# requires the bibtex tool to be installed. See also
+# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style
+# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this
+# feature you need bibtex and perl available in the search path.
+
+CITE_BIB_FILES =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# The WARN_NO_PARAMDOC option can be enabled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = src README.md
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
+# also the default input encoding. Doxygen uses libiconv (or the iconv built
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
+# the list of possible encodings.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
+# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
+# *.f90 *.f *.for *.vhd *.vhdl
+
+FILE_PATTERNS = *.md *.c *.h *.cpp *.hpp *.tcc *.inc *.cc
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE = Debug \
+ Release
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output. If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER = "perl -pe 's/^(libsnark: .*)$/$1 {#mainpage}/ if $.==1; s!//+ *(TODO|FIXME|XXX)!/// \\todo!'"
+ # The 1st replacement marks README.md as the main page.
+ # The 2nd replacement identifies additional TODO notations.
+ # These should be done with FILTER_PATTERNS instead, but it looks like shell escaping is different there.
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty or if
+# non of the patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
+# and it is also possible to disable source filtering for a specific pattern
+# using *.ext= (so without naming a filter). This option only has effect when
+# FILTER_SOURCE_FILES is enabled.
+
+FILTER_SOURCE_PATTERNS =
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = YES
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = YES
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C, C++ and Fortran comments will always remain visible.
+
+STRIP_CODE_COMMENTS = NO
+
+# If the REFERENCED_BY_RELATION tag is set to YES
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code. Otherwise they will link to the documentation.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = YES
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = doxygen
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header. Note that when using a custom header you are responsible
+# for the proper inclusion of any scripts and style sheets that doxygen
+# needs, which is dependent on the configuration options used.
+# It is advised to generate a default header using "doxygen -w html
+# header.html footer.html stylesheet.css YourConfigFile" and then modify
+# that header. Note that the header is subject to change so you typically
+# have to redo this when upgrading to a newer version of doxygen or when
+# changing the value of configuration settings such as GENERATE_TREEVIEW!
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If left blank doxygen will
+# generate a default style sheet. Note that it is recommended to use
+# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this
+# tag will in the future become obsolete.
+
+HTML_STYLESHEET =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional
+# user-defined cascading style sheet that is included after the standard
+# style sheets created by doxygen. Using this option one can overrule
+# certain style aspects. This is preferred over using HTML_STYLESHEET
+# since it does not replace the standard style sheet and is therefor more
+# robust against future updates. Doxygen will copy the style sheet file to
+# the output directory.
+
+HTML_EXTRA_STYLESHEET =
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that
+# the files will be copied as-is; there are no commands or markers available.
+
+HTML_EXTRA_FILES =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
+# Doxygen will adjust the colors in the style sheet and background images
+# according to this color. Hue is specified as an angle on a colorwheel,
+# see http://en.wikipedia.org/wiki/Hue for more information.
+# For instance the value 0 represents red, 60 is yellow, 120 is green,
+# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
+# The allowed range is 0 to 359.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
+# the colors in the HTML output. For a value of 0 the output will use
+# grayscales only. A value of 255 will produce the most vivid colors.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
+# the luminance component of the colors in the HTML output. Values below
+# 100 gradually make the output lighter, whereas values above 100 make
+# the output darker. The value divided by 100 is the actual gamma applied,
+# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
+# and 100 does not change the gamma.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting
+# this to NO can help when comparing the output of multiple runs.
+
+HTML_TIMESTAMP = NO
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of
+# entries shown in the various tree structured indices initially; the user
+# can expand and collapse entries dynamically later on. Doxygen will expand
+# the tree to such a level that at most the specified number of entries are
+# visible (unless a fully collapsed tree already exceeds this amount).
+# So setting the number of entries 1 will produce a full collapsed tree by
+# default. 0 is a special value representing an infinite number of entries
+# and will result in a full expanded tree by default.
+
+HTML_INDEX_NUM_ENTRIES = 0
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files
+# will be generated that can be used as input for Apple's Xcode 3
+# integrated development environment, introduced with OSX 10.5 (Leopard).
+# To create a documentation set, doxygen will generate a Makefile in the
+# HTML output directory. Running make will produce the docset in that
+# directory and running "make install" will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
+# it at startup.
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+
+GENERATE_DOCSET = NO
+
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
+# feed. A documentation feed provides an umbrella under which multiple
+# documentation sets from a single provider (such as a company or product suite)
+# can be grouped.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
+# should uniquely identify the documentation set bundle. This should be a
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
+# will append .docset to the name.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely
+# identify the documentation publisher. This should be a reverse domain-name
+# style string, e.g. com.mycompany.MyDocSet.documentation.
+
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
+# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
+
+DOCSET_PUBLISHER_NAME = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file
+# content.
+
+CHM_INDEX_ENCODING =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
+# that can be used as input for Qt's qhelpgenerator to generate a
+# Qt Compressed Help (.qch) of the generated HTML documentation.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
+# be used to specify the file name of the resulting .qch file.
+# The path specified is relative to the HTML output folder.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#namespace
+
+QHP_NAMESPACE = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
+# add. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#custom-filters
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see
+# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
+# Qt Help Project / Custom Filters</a>.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's
+# filter section matches.
+# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
+# Qt Help Project / Filter Attributes</a>.
+
+QHP_SECT_FILTER_ATTRS =
+
+# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
+# be used to specify the location of Qt's qhelpgenerator.
+# If non-empty doxygen will try to run qhelpgenerator on the generated
+# .qhp file.
+
+QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
+# will be generated, which together with the HTML files, form an Eclipse help
+# plugin. To install this plugin and make it available under the help contents
+# menu in Eclipse, the contents of the directory containing the HTML and XML
+# files needs to be copied into the plugins directory of eclipse. The name of
+# the directory within the plugins directory should be the same as
+# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
+# the help appears.
+
+GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have
+# this name.
+
+ECLIPSE_DOC_ID = org.doxygen.Project
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs)
+# at top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it. Since the tabs have the same information as the
+# navigation tree you can set this option to NO if you already set
+# GENERATE_TREEVIEW to YES.
+
+DISABLE_INDEX = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information.
+# If the tag value is set to YES, a side panel will be generated
+# containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
+# Windows users are probably better off using the HTML help feature.
+# Since the tree basically has the same information as the tab index you
+# could consider to set DISABLE_INDEX to NO when enabling this option.
+
+GENERATE_TREEVIEW = YES
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values
+# (range [0,1..20]) that doxygen will group on one line in the generated HTML
+# documentation. Note that a value of 0 will completely suppress the enum
+# values from appearing in the overview section.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
+# links to external symbols imported via tag files in a separate window.
+
+EXT_LINKS_IN_WINDOW = NO
+
+# Use this tag to change the font size of Latex formulas included
+# as images in the HTML documentation. The default is 10. Note that
+# when you change the font size after a successful doxygen run you need
+# to manually remove any form_*.png images from the HTML output directory
+# to force them to be regenerated.
+
+FORMULA_FONTSIZE = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are
+# not supported properly for IE 6.0, but are supported on all modern browsers.
+# Note that when changing this option you need to delete any form_*.png files
+# in the HTML output before the changes have effect.
+
+FORMULA_TRANSPARENT = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
+# (see http://www.mathjax.org) which uses client side Javascript for the
+# rendering instead of using prerendered bitmaps. Use this if you do not
+# have LaTeX installed or if you want to formulas look prettier in the HTML
+# output. When enabled you may also need to install MathJax separately and
+# configure the path to it using the MATHJAX_RELPATH option.
+
+USE_MATHJAX = YES
+
+# When MathJax is enabled you need to specify the location relative to the
+# HTML output directory using the MATHJAX_RELPATH option. The destination
+# directory should contain the MathJax.js script. For instance, if the mathjax
+# directory is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to
+# the MathJax Content Delivery Network so you can quickly see the result without
+# installing MathJax. However, it is strongly recommended to install a local
+# copy of MathJax from http://www.mathjax.org before deployment.
+
+MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
+# names that should be enabled during MathJax rendering.
+
+MATHJAX_EXTENSIONS =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box
+# for the HTML output. The underlying search engine uses javascript
+# and DHTML and should work on any modern browser. Note that when using
+# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
+# (GENERATE_DOCSET) there is already a search function so this one should
+# typically be disabled. For large projects the javascript based search engine
+# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
+
+SEARCHENGINE = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a PHP enabled web server instead of at the web client
+# using Javascript. Doxygen will generate the search PHP script and index
+# file to put on the web server. The advantage of the server
+# based approach is that it scales better to large projects and allows
+# full text search. The disadvantages are that it is more difficult to setup
+# and does not have live searching capabilities.
+
+SERVER_BASED_SEARCH = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+# Note that when enabling USE_PDFLATEX this option is only used for
+# generating bitmaps for formulas in the HTML output, but not in the
+# Makefile that is written to the output directory.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = a4
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES = amsfonts
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for
+# the generated latex document. The footer should contain everything after
+# the last chapter. If it is left blank doxygen will generate a
+# standard footer. Notice: only use this tag if you know what you are doing!
+
+LATEX_FOOTER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = NO
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = NO
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+# If LATEX_SOURCE_CODE is set to YES then doxygen will include
+# source code with syntax highlighting in the LaTeX output.
+# Note that which sources are shown also depends on other settings
+# such as SOURCE_BROWSER.
+
+LATEX_SOURCE_CODE = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See
+# http://en.wikipedia.org/wiki/BibTeX for more info.
+
+LATEX_BIB_STYLE = plain
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load style sheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader. This is useful
+# if you want to understand what is going on. On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# pointed to by INCLUDE_PATH will be searched when a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition that
+# overrules the definition found in the source code.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all references to function-like macros
+# that are alone on a line, have an all uppercase name, and do not end with a
+# semicolon, because these will confuse the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles. For each
+# tag file the location of the external documentation should be added. The
+# format of a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths
+# or URLs. Note that each tag file must have a unique name (where the name does
+# NOT include the path). If a tag file is not located in the directory in which
+# doxygen is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option also works with HAVE_DOT disabled, but it is recommended to
+# install and use dot, since it yields more powerful graphs.
+
+CLASS_DIAGRAMS = NO
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = YES
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
+# allowed to run in parallel. When set to 0 (the default) doxygen will
+# base this on the number of processors available in the system. You can set it
+# explicitly to a value larger than 0 to get control over the balance
+# between CPU load and processing speed.
+
+DOT_NUM_THREADS = 0
+
+# By default doxygen will use the Helvetica font for all dot files that
+# doxygen generates. When you want a differently looking font you can specify
+# the font name using DOT_FONTNAME. You need to make sure dot is able to find
+# the font, which can be done by putting it in a standard location or by setting
+# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
+# directory containing the font.
+
+DOT_FONTNAME = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
+# The default size is 10pt.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the Helvetica font.
+# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to
+# set the path where dot can find it.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = NO
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside
+# the class node. If there are many fields or methods and many nodes the
+# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
+# threshold limits the number of items for each type to make the size more
+# managable. Set this to 0 for no limit. Note that the threshold may be
+# exceeded by 50% before the limit is enforced.
+
+UML_LIMIT_NUM_FIELDS = 10
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then
+# doxygen will generate a call dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable call graphs
+# for selected functions only using the \callgraph command.
+
+CALL_GRAPH = YES
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
+# doxygen will generate a caller dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable caller
+# graphs for selected functions only using the \callergraph command.
+
+CALLER_GRAPH = YES
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will generate a graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are svg, png, jpg, or gif.
+# If left blank png will be used. If you choose svg you need to set
+# HTML_FILE_EXTENSION to xhtml in order to make the SVG files
+# visible in IE 9+ (other browsers do not have this requirement).
+
+DOT_IMAGE_FORMAT = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+# Note that this requires a modern browser other than Internet Explorer.
+# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you
+# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files
+# visible. Older versions of IE do not have SVG support.
+
+INTERACTIVE_SVG = NO
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the
+# \mscfile command).
+
+MSCFILE_DIRS =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
+# nodes that will be shown in the graph. If the number of nodes in a graph
+# becomes larger than this value, doxygen will truncate the graph, which is
+# visualized by representing a node as a red box. Note that doxygen if the
+# number of direct children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
+# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not
+# seem to support this out of the box. Warning: Depending on the platform used,
+# enabling this option may lead to badly anti-aliased labels on the edges of
+# a graph (i.e. they become hard to read).
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = NO
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#include "algebra/curves/alt_bn128/alt_bn128_g1.hpp"
+
+namespace libsnark {
+
+#ifdef PROFILE_OP_COUNTS
+long long alt_bn128_G1::add_cnt = 0;
+long long alt_bn128_G1::dbl_cnt = 0;
+#endif
+
+std::vector<size_t> alt_bn128_G1::wnaf_window_table;
+std::vector<size_t> alt_bn128_G1::fixed_base_exp_window_table;
+alt_bn128_G1 alt_bn128_G1::G1_zero;
+alt_bn128_G1 alt_bn128_G1::G1_one;
+
+alt_bn128_G1::alt_bn128_G1()
+{
+ this->X = G1_zero.X;
+ this->Y = G1_zero.Y;
+ this->Z = G1_zero.Z;
+}
+
+void alt_bn128_G1::print() const
+{
+ if (this->is_zero())
+ {
+ printf("O\n");
+ }
+ else
+ {
+ alt_bn128_G1 copy(*this);
+ copy.to_affine_coordinates();
+ gmp_printf("(%Nd , %Nd)\n",
+ copy.X.as_bigint().data, alt_bn128_Fq::num_limbs,
+ copy.Y.as_bigint().data, alt_bn128_Fq::num_limbs);
+ }
+}
+
+void alt_bn128_G1::print_coordinates() const
+{
+ if (this->is_zero())
+ {
+ printf("O\n");
+ }
+ else
+ {
+ gmp_printf("(%Nd : %Nd : %Nd)\n",
+ this->X.as_bigint().data, alt_bn128_Fq::num_limbs,
+ this->Y.as_bigint().data, alt_bn128_Fq::num_limbs,
+ this->Z.as_bigint().data, alt_bn128_Fq::num_limbs);
+ }
+}
+
+void alt_bn128_G1::to_affine_coordinates()
+{
+ if (this->is_zero())
+ {
+ this->X = alt_bn128_Fq::zero();
+ this->Y = alt_bn128_Fq::one();
+ this->Z = alt_bn128_Fq::zero();
+ }
+ else
+ {
+ alt_bn128_Fq Z_inv = Z.inverse();
+ alt_bn128_Fq Z2_inv = Z_inv.squared();
+ alt_bn128_Fq Z3_inv = Z2_inv * Z_inv;
+ this->X = this->X * Z2_inv;
+ this->Y = this->Y * Z3_inv;
+ this->Z = alt_bn128_Fq::one();
+ }
+}
+
+void alt_bn128_G1::to_special()
+{
+ this->to_affine_coordinates();
+}
+
+bool alt_bn128_G1::is_special() const
+{
+ return (this->is_zero() || this->Z == alt_bn128_Fq::one());
+}
+
+bool alt_bn128_G1::is_zero() const
+{
+ return (this->Z.is_zero());
+}
+
+bool alt_bn128_G1::operator==(const alt_bn128_G1 &other) const
+{
+ if (this->is_zero())
+ {
+ return other.is_zero();
+ }
+
+ if (other.is_zero())
+ {
+ return false;
+ }
+
+ /* now neither is O */
+
+ // using Jacobian coordinates so:
+ // (X1:Y1:Z1) = (X2:Y2:Z2)
+ // iff
+ // X1/Z1^2 == X2/Z2^2 and Y1/Z1^3 == Y2/Z2^3
+ // iff
+ // X1 * Z2^2 == X2 * Z1^2 and Y1 * Z2^3 == Y2 * Z1^3
+
+ alt_bn128_Fq Z1_squared = (this->Z).squared();
+ alt_bn128_Fq Z2_squared = (other.Z).squared();
+
+ if ((this->X * Z2_squared) != (other.X * Z1_squared))
+ {
+ return false;
+ }
+
+ alt_bn128_Fq Z1_cubed = (this->Z) * Z1_squared;
+ alt_bn128_Fq Z2_cubed = (other.Z) * Z2_squared;
+
+ if ((this->Y * Z2_cubed) != (other.Y * Z1_cubed))
+ {
+ return false;
+ }
+
+ return true;
+}
+
+bool alt_bn128_G1::operator!=(const alt_bn128_G1& other) const
+{
+ return !(operator==(other));
+}
+
+alt_bn128_G1 alt_bn128_G1::operator+(const alt_bn128_G1 &other) const
+{
+ // handle special cases having to do with O
+ if (this->is_zero())
+ {
+ return other;
+ }
+
+ if (other.is_zero())
+ {
+ return *this;
+ }
+
+ // no need to handle points of order 2,4
+ // (they cannot exist in a prime-order subgroup)
+
+ // check for doubling case
+
+ // using Jacobian coordinates so:
+ // (X1:Y1:Z1) = (X2:Y2:Z2)
+ // iff
+ // X1/Z1^2 == X2/Z2^2 and Y1/Z1^3 == Y2/Z2^3
+ // iff
+ // X1 * Z2^2 == X2 * Z1^2 and Y1 * Z2^3 == Y2 * Z1^3
+
+ alt_bn128_Fq Z1Z1 = (this->Z).squared();
+ alt_bn128_Fq Z2Z2 = (other.Z).squared();
+
+ alt_bn128_Fq U1 = this->X * Z2Z2;
+ alt_bn128_Fq U2 = other.X * Z1Z1;
+
+ alt_bn128_Fq Z1_cubed = (this->Z) * Z1Z1;
+ alt_bn128_Fq Z2_cubed = (other.Z) * Z2Z2;
+
+ alt_bn128_Fq S1 = (this->Y) * Z2_cubed; // S1 = Y1 * Z2 * Z2Z2
+ alt_bn128_Fq S2 = (other.Y) * Z1_cubed; // S2 = Y2 * Z1 * Z1Z1
+
+ if (U1 == U2 && S1 == S2)
+ {
+ // dbl case; nothing of above can be reused
+ return this->dbl();
+ }
+
+ // rest of add case
+ alt_bn128_Fq H = U2 - U1; // H = U2-U1
+ alt_bn128_Fq S2_minus_S1 = S2-S1;
+ alt_bn128_Fq I = (H+H).squared(); // I = (2 * H)^2
+ alt_bn128_Fq J = H * I; // J = H * I
+ alt_bn128_Fq r = S2_minus_S1 + S2_minus_S1; // r = 2 * (S2-S1)
+ alt_bn128_Fq V = U1 * I; // V = U1 * I
+ alt_bn128_Fq X3 = r.squared() - J - (V+V); // X3 = r^2 - J - 2 * V
+ alt_bn128_Fq S1_J = S1 * J;
+ alt_bn128_Fq Y3 = r * (V-X3) - (S1_J+S1_J); // Y3 = r * (V-X3)-2 S1 J
+ alt_bn128_Fq Z3 = ((this->Z+other.Z).squared()-Z1Z1-Z2Z2) * H; // Z3 = ((Z1+Z2)^2-Z1Z1-Z2Z2) * H
+
+ return alt_bn128_G1(X3, Y3, Z3);
+}
+
+alt_bn128_G1 alt_bn128_G1::operator-() const
+{
+ return alt_bn128_G1(this->X, -(this->Y), this->Z);
+}
+
+
+alt_bn128_G1 alt_bn128_G1::operator-(const alt_bn128_G1 &other) const
+{
+ return (*this) + (-other);
+}
+
+alt_bn128_G1 alt_bn128_G1::add(const alt_bn128_G1 &other) const
+{
+ // handle special cases having to do with O
+ if (this->is_zero())
+ {
+ return other;
+ }
+
+ if (other.is_zero())
+ {
+ return *this;
+ }
+
+ // no need to handle points of order 2,4
+ // (they cannot exist in a prime-order subgroup)
+
+ // handle double case
+ if (this->operator==(other))
+ {
+ return this->dbl();
+ }
+
+#ifdef PROFILE_OP_COUNTS
+ this->add_cnt++;
+#endif
+ // NOTE: does not handle O and pts of order 2,4
+ // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl
+
+ alt_bn128_Fq Z1Z1 = (this->Z).squared(); // Z1Z1 = Z1^2
+ alt_bn128_Fq Z2Z2 = (other.Z).squared(); // Z2Z2 = Z2^2
+ alt_bn128_Fq U1 = (this->X) * Z2Z2; // U1 = X1 * Z2Z2
+ alt_bn128_Fq U2 = (other.X) * Z1Z1; // U2 = X2 * Z1Z1
+ alt_bn128_Fq S1 = (this->Y) * (other.Z) * Z2Z2; // S1 = Y1 * Z2 * Z2Z2
+ alt_bn128_Fq S2 = (other.Y) * (this->Z) * Z1Z1; // S2 = Y2 * Z1 * Z1Z1
+ alt_bn128_Fq H = U2 - U1; // H = U2-U1
+ alt_bn128_Fq S2_minus_S1 = S2-S1;
+ alt_bn128_Fq I = (H+H).squared(); // I = (2 * H)^2
+ alt_bn128_Fq J = H * I; // J = H * I
+ alt_bn128_Fq r = S2_minus_S1 + S2_minus_S1; // r = 2 * (S2-S1)
+ alt_bn128_Fq V = U1 * I; // V = U1 * I
+ alt_bn128_Fq X3 = r.squared() - J - (V+V); // X3 = r^2 - J - 2 * V
+ alt_bn128_Fq S1_J = S1 * J;
+ alt_bn128_Fq Y3 = r * (V-X3) - (S1_J+S1_J); // Y3 = r * (V-X3)-2 S1 J
+ alt_bn128_Fq Z3 = ((this->Z+other.Z).squared()-Z1Z1-Z2Z2) * H; // Z3 = ((Z1+Z2)^2-Z1Z1-Z2Z2) * H
+
+ return alt_bn128_G1(X3, Y3, Z3);
+}
+
+alt_bn128_G1 alt_bn128_G1::mixed_add(const alt_bn128_G1 &other) const
+{
+#ifdef DEBUG
+ assert(other.is_special());
+#endif
+
+ // handle special cases having to do with O
+ if (this->is_zero())
+ {
+ return other;
+ }
+
+ if (other.is_zero())
+ {
+ return *this;
+ }
+
+ // no need to handle points of order 2,4
+ // (they cannot exist in a prime-order subgroup)
+
+ // check for doubling case
+
+ // using Jacobian coordinates so:
+ // (X1:Y1:Z1) = (X2:Y2:Z2)
+ // iff
+ // X1/Z1^2 == X2/Z2^2 and Y1/Z1^3 == Y2/Z2^3
+ // iff
+ // X1 * Z2^2 == X2 * Z1^2 and Y1 * Z2^3 == Y2 * Z1^3
+
+ // we know that Z2 = 1
+
+ const alt_bn128_Fq Z1Z1 = (this->Z).squared();
+
+ const alt_bn128_Fq &U1 = this->X;
+ const alt_bn128_Fq U2 = other.X * Z1Z1;
+
+ const alt_bn128_Fq Z1_cubed = (this->Z) * Z1Z1;
+
+ const alt_bn128_Fq &S1 = (this->Y); // S1 = Y1 * Z2 * Z2Z2
+ const alt_bn128_Fq S2 = (other.Y) * Z1_cubed; // S2 = Y2 * Z1 * Z1Z1
+
+ if (U1 == U2 && S1 == S2)
+ {
+ // dbl case; nothing of above can be reused
+ return this->dbl();
+ }
+
+#ifdef PROFILE_OP_COUNTS
+ this->add_cnt++;
+#endif
+
+ // NOTE: does not handle O and pts of order 2,4
+ // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl
+ alt_bn128_Fq H = U2-(this->X); // H = U2-X1
+ alt_bn128_Fq HH = H.squared() ; // HH = H&2
+ alt_bn128_Fq I = HH+HH; // I = 4*HH
+ I = I + I;
+ alt_bn128_Fq J = H*I; // J = H*I
+ alt_bn128_Fq r = S2-(this->Y); // r = 2*(S2-Y1)
+ r = r + r;
+ alt_bn128_Fq V = (this->X) * I ; // V = X1*I
+ alt_bn128_Fq X3 = r.squared()-J-V-V; // X3 = r^2-J-2*V
+ alt_bn128_Fq Y3 = (this->Y)*J; // Y3 = r*(V-X3)-2*Y1*J
+ Y3 = r*(V-X3) - Y3 - Y3;
+ alt_bn128_Fq Z3 = ((this->Z)+H).squared() - Z1Z1 - HH; // Z3 = (Z1+H)^2-Z1Z1-HH
+
+ return alt_bn128_G1(X3, Y3, Z3);
+}
+
+alt_bn128_G1 alt_bn128_G1::dbl() const
+{
+#ifdef PROFILE_OP_COUNTS
+ this->dbl_cnt++;
+#endif
+ // handle point at infinity
+ if (this->is_zero())
+ {
+ return (*this);
+ }
+
+ // no need to handle points of order 2,4
+ // (they cannot exist in a prime-order subgroup)
+
+ // NOTE: does not handle O and pts of order 2,4
+ // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l
+
+ alt_bn128_Fq A = (this->X).squared(); // A = X1^2
+ alt_bn128_Fq B = (this->Y).squared(); // B = Y1^2
+ alt_bn128_Fq C = B.squared(); // C = B^2
+ alt_bn128_Fq D = (this->X + B).squared() - A - C;
+ D = D+D; // D = 2 * ((X1 + B)^2 - A - C)
+ alt_bn128_Fq E = A + A + A; // E = 3 * A
+ alt_bn128_Fq F = E.squared(); // F = E^2
+ alt_bn128_Fq X3 = F - (D+D); // X3 = F - 2 D
+ alt_bn128_Fq eightC = C+C;
+ eightC = eightC + eightC;
+ eightC = eightC + eightC;
+ alt_bn128_Fq Y3 = E * (D - X3) - eightC; // Y3 = E * (D - X3) - 8 * C
+ alt_bn128_Fq Y1Z1 = (this->Y)*(this->Z);
+ alt_bn128_Fq Z3 = Y1Z1 + Y1Z1; // Z3 = 2 * Y1 * Z1
+
+ return alt_bn128_G1(X3, Y3, Z3);
+}
+
+bool alt_bn128_G1::is_well_formed() const
+{
+ if (this->is_zero())
+ {
+ return true;
+ }
+ else
+ {
+ /*
+ y^2 = x^3 + b
+
+ We are using Jacobian coordinates, so equation we need to check is actually
+
+ (y/z^3)^2 = (x/z^2)^3 + b
+ y^2 / z^6 = x^3 / z^6 + b
+ y^2 = x^3 + b z^6
+ */
+ alt_bn128_Fq X2 = this->X.squared();
+ alt_bn128_Fq Y2 = this->Y.squared();
+ alt_bn128_Fq Z2 = this->Z.squared();
+
+ alt_bn128_Fq X3 = this->X * X2;
+ alt_bn128_Fq Z3 = this->Z * Z2;
+ alt_bn128_Fq Z6 = Z3.squared();
+
+ return (Y2 == X3 + alt_bn128_coeff_b * Z6);
+ }
+}
+
+alt_bn128_G1 alt_bn128_G1::zero()
+{
+ return G1_zero;
+}
+
+alt_bn128_G1 alt_bn128_G1::one()
+{
+ return G1_one;
+}
+
+alt_bn128_G1 alt_bn128_G1::random_element()
+{
+ return (scalar_field::random_element().as_bigint()) * G1_one;
+}
+
+std::ostream& operator<<(std::ostream &out, const alt_bn128_G1 &g)
+{
+ alt_bn128_G1 copy(g);
+ copy.to_affine_coordinates();
+
+ out << (copy.is_zero() ? 1 : 0) << OUTPUT_SEPARATOR;
+#ifdef NO_PT_COMPRESSION
+ out << copy.X << OUTPUT_SEPARATOR << copy.Y;
+#else
+ /* storing LSB of Y */
+ out << copy.X << OUTPUT_SEPARATOR << (copy.Y.as_bigint().data[0] & 1);
+#endif
+
+ return out;
+}
+
+std::istream& operator>>(std::istream &in, alt_bn128_G1 &g)
+{
+ char is_zero;
+ alt_bn128_Fq tX, tY;
+
+#ifdef NO_PT_COMPRESSION
+ in >> is_zero >> tX >> tY;
+ is_zero -= '0';
+#else
+ in.read((char*)&is_zero, 1); // this reads is_zero;
+ is_zero -= '0';
+ consume_OUTPUT_SEPARATOR(in);
+
+ unsigned char Y_lsb;
+ in >> tX;
+ consume_OUTPUT_SEPARATOR(in);
+ in.read((char*)&Y_lsb, 1);
+ Y_lsb -= '0';
+
+ // y = +/- sqrt(x^3 + b)
+ if (!is_zero)
+ {
+ alt_bn128_Fq tX2 = tX.squared();
+ alt_bn128_Fq tY2 = tX2*tX + alt_bn128_coeff_b;
+ tY = tY2.sqrt();
+
+ if ((tY.as_bigint().data[0] & 1) != Y_lsb)
+ {
+ tY = -tY;
+ }
+ }
+#endif
+ // using Jacobian coordinates
+ if (!is_zero)
+ {
+ g.X = tX;
+ g.Y = tY;
+ g.Z = alt_bn128_Fq::one();
+ }
+ else
+ {
+ g = alt_bn128_G1::zero();
+ }
+
+ return in;
+}
+
+std::ostream& operator<<(std::ostream& out, const std::vector<alt_bn128_G1> &v)
+{
+ out << v.size() << "\n";
+ for (const alt_bn128_G1& t : v)
+ {
+ out << t << OUTPUT_NEWLINE;
+ }
+
+ return out;
+}
+
+std::istream& operator>>(std::istream& in, std::vector<alt_bn128_G1> &v)
+{
+ v.clear();
+
+ size_t s;
+ in >> s;
+ consume_newline(in);
+
+ v.reserve(s);
+
+ for (size_t i = 0; i < s; ++i)
+ {
+ alt_bn128_G1 g;
+ in >> g;
+ consume_OUTPUT_NEWLINE(in);
+ v.emplace_back(g);
+ }
+
+ return in;
+}
+
+template<>
+void batch_to_special_all_non_zeros<alt_bn128_G1>(std::vector<alt_bn128_G1> &vec)
+{
+ std::vector<alt_bn128_Fq> Z_vec;
+ Z_vec.reserve(vec.size());
+
+ for (auto &el: vec)
+ {
+ Z_vec.emplace_back(el.Z);
+ }
+ batch_invert<alt_bn128_Fq>(Z_vec);
+
+ const alt_bn128_Fq one = alt_bn128_Fq::one();
+
+ for (size_t i = 0; i < vec.size(); ++i)
+ {
+ alt_bn128_Fq Z2 = Z_vec[i].squared();
+ alt_bn128_Fq Z3 = Z_vec[i] * Z2;
+
+ vec[i].X = vec[i].X * Z2;
+ vec[i].Y = vec[i].Y * Z3;
+ vec[i].Z = one;
+ }
+}
+
+} // libsnark
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef ALT_BN128_G1_HPP_
+#define ALT_BN128_G1_HPP_
+#include <vector>
+#include "algebra/curves/alt_bn128/alt_bn128_init.hpp"
+#include "algebra/curves/curve_utils.hpp"
+
+namespace libsnark {
+
+class alt_bn128_G1;
+std::ostream& operator<<(std::ostream &, const alt_bn128_G1&);
+std::istream& operator>>(std::istream &, alt_bn128_G1&);
+
+class alt_bn128_G1 {
+public:
+#ifdef PROFILE_OP_COUNTS
+ static long long add_cnt;
+ static long long dbl_cnt;
+#endif
+ static std::vector<size_t> wnaf_window_table;
+ static std::vector<size_t> fixed_base_exp_window_table;
+ static alt_bn128_G1 G1_zero;
+ static alt_bn128_G1 G1_one;
+
+ typedef alt_bn128_Fq base_field;
+ typedef alt_bn128_Fr scalar_field;
+
+ alt_bn128_Fq X, Y, Z;
+
+ // using Jacobian coordinates
+ alt_bn128_G1();
+ alt_bn128_G1(const alt_bn128_Fq& X, const alt_bn128_Fq& Y, const alt_bn128_Fq& Z) : X(X), Y(Y), Z(Z) {};
+
+ void print() const;
+ void print_coordinates() const;
+
+ void to_affine_coordinates();
+ void to_special();
+ bool is_special() const;
+
+ bool is_zero() const;
+
+ bool operator==(const alt_bn128_G1 &other) const;
+ bool operator!=(const alt_bn128_G1 &other) const;
+
+ alt_bn128_G1 operator+(const alt_bn128_G1 &other) const;
+ alt_bn128_G1 operator-() const;
+ alt_bn128_G1 operator-(const alt_bn128_G1 &other) const;
+
+ alt_bn128_G1 add(const alt_bn128_G1 &other) const;
+ alt_bn128_G1 mixed_add(const alt_bn128_G1 &other) const;
+ alt_bn128_G1 dbl() const;
+
+ bool is_well_formed() const;
+
+ static alt_bn128_G1 zero();
+ static alt_bn128_G1 one();
+ static alt_bn128_G1 random_element();
+
+ static size_t size_in_bits() { return base_field::size_in_bits() + 1; }
+ static bigint<base_field::num_limbs> base_field_char() { return base_field::field_char(); }
+ static bigint<scalar_field::num_limbs> order() { return scalar_field::field_char(); }
+
+ friend std::ostream& operator<<(std::ostream &out, const alt_bn128_G1 &g);
+ friend std::istream& operator>>(std::istream &in, alt_bn128_G1 &g);
+};
+
+template<mp_size_t m>
+alt_bn128_G1 operator*(const bigint<m> &lhs, const alt_bn128_G1 &rhs)
+{
+ return scalar_mul<alt_bn128_G1, m>(rhs, lhs);
+}
+
+template<mp_size_t m, const bigint<m>& modulus_p>
+alt_bn128_G1 operator*(const Fp_model<m,modulus_p> &lhs, const alt_bn128_G1 &rhs)
+{
+ return scalar_mul<alt_bn128_G1, m>(rhs, lhs.as_bigint());
+}
+
+std::ostream& operator<<(std::ostream& out, const std::vector<alt_bn128_G1> &v);
+std::istream& operator>>(std::istream& in, std::vector<alt_bn128_G1> &v);
+
+template<typename T>
+void batch_to_special_all_non_zeros(std::vector<T> &vec);
+template<>
+void batch_to_special_all_non_zeros<alt_bn128_G1>(std::vector<alt_bn128_G1> &vec);
+
+} // libsnark
+#endif // ALT_BN128_G1_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#include "algebra/curves/alt_bn128/alt_bn128_g2.hpp"
+
+namespace libsnark {
+
+#ifdef PROFILE_OP_COUNTS
+long long alt_bn128_G2::add_cnt = 0;
+long long alt_bn128_G2::dbl_cnt = 0;
+#endif
+
+std::vector<size_t> alt_bn128_G2::wnaf_window_table;
+std::vector<size_t> alt_bn128_G2::fixed_base_exp_window_table;
+alt_bn128_G2 alt_bn128_G2::G2_zero;
+alt_bn128_G2 alt_bn128_G2::G2_one;
+
+alt_bn128_G2::alt_bn128_G2()
+{
+ this->X = G2_zero.X;
+ this->Y = G2_zero.Y;
+ this->Z = G2_zero.Z;
+}
+
+alt_bn128_Fq2 alt_bn128_G2::mul_by_b(const alt_bn128_Fq2 &elt)
+{
+ return alt_bn128_Fq2(alt_bn128_twist_mul_by_b_c0 * elt.c0, alt_bn128_twist_mul_by_b_c1 * elt.c1);
+}
+
+void alt_bn128_G2::print() const
+{
+ if (this->is_zero())
+ {
+ printf("O\n");
+ }
+ else
+ {
+ alt_bn128_G2 copy(*this);
+ copy.to_affine_coordinates();
+ gmp_printf("(%Nd*z + %Nd , %Nd*z + %Nd)\n",
+ copy.X.c1.as_bigint().data, alt_bn128_Fq::num_limbs,
+ copy.X.c0.as_bigint().data, alt_bn128_Fq::num_limbs,
+ copy.Y.c1.as_bigint().data, alt_bn128_Fq::num_limbs,
+ copy.Y.c0.as_bigint().data, alt_bn128_Fq::num_limbs);
+ }
+}
+
+void alt_bn128_G2::print_coordinates() const
+{
+ if (this->is_zero())
+ {
+ printf("O\n");
+ }
+ else
+ {
+ gmp_printf("(%Nd*z + %Nd : %Nd*z + %Nd : %Nd*z + %Nd)\n",
+ this->X.c1.as_bigint().data, alt_bn128_Fq::num_limbs,
+ this->X.c0.as_bigint().data, alt_bn128_Fq::num_limbs,
+ this->Y.c1.as_bigint().data, alt_bn128_Fq::num_limbs,
+ this->Y.c0.as_bigint().data, alt_bn128_Fq::num_limbs,
+ this->Z.c1.as_bigint().data, alt_bn128_Fq::num_limbs,
+ this->Z.c0.as_bigint().data, alt_bn128_Fq::num_limbs);
+ }
+}
+
+void alt_bn128_G2::to_affine_coordinates()
+{
+ if (this->is_zero())
+ {
+ this->X = alt_bn128_Fq2::zero();
+ this->Y = alt_bn128_Fq2::one();
+ this->Z = alt_bn128_Fq2::zero();
+ }
+ else
+ {
+ alt_bn128_Fq2 Z_inv = Z.inverse();
+ alt_bn128_Fq2 Z2_inv = Z_inv.squared();
+ alt_bn128_Fq2 Z3_inv = Z2_inv * Z_inv;
+ this->X = this->X * Z2_inv;
+ this->Y = this->Y * Z3_inv;
+ this->Z = alt_bn128_Fq2::one();
+ }
+}
+
+void alt_bn128_G2::to_special()
+{
+ this->to_affine_coordinates();
+}
+
+bool alt_bn128_G2::is_special() const
+{
+ return (this->is_zero() || this->Z == alt_bn128_Fq2::one());
+}
+
+bool alt_bn128_G2::is_zero() const
+{
+ return (this->Z.is_zero());
+}
+
+bool alt_bn128_G2::operator==(const alt_bn128_G2 &other) const
+{
+ if (this->is_zero())
+ {
+ return other.is_zero();
+ }
+
+ if (other.is_zero())
+ {
+ return false;
+ }
+
+ /* now neither is O */
+
+ // using Jacobian coordinates so:
+ // (X1:Y1:Z1) = (X2:Y2:Z2)
+ // iff
+ // X1/Z1^2 == X2/Z2^2 and Y1/Z1^3 == Y2/Z2^3
+ // iff
+ // X1 * Z2^2 == X2 * Z1^2 and Y1 * Z2^3 == Y2 * Z1^3
+
+ alt_bn128_Fq2 Z1_squared = (this->Z).squared();
+ alt_bn128_Fq2 Z2_squared = (other.Z).squared();
+
+ if ((this->X * Z2_squared) != (other.X * Z1_squared))
+ {
+ return false;
+ }
+
+ alt_bn128_Fq2 Z1_cubed = (this->Z) * Z1_squared;
+ alt_bn128_Fq2 Z2_cubed = (other.Z) * Z2_squared;
+
+ if ((this->Y * Z2_cubed) != (other.Y * Z1_cubed))
+ {
+ return false;
+ }
+
+ return true;
+}
+
+bool alt_bn128_G2::operator!=(const alt_bn128_G2& other) const
+{
+ return !(operator==(other));
+}
+
+alt_bn128_G2 alt_bn128_G2::operator+(const alt_bn128_G2 &other) const
+{
+ // handle special cases having to do with O
+ if (this->is_zero())
+ {
+ return other;
+ }
+
+ if (other.is_zero())
+ {
+ return *this;
+ }
+
+ // no need to handle points of order 2,4
+ // (they cannot exist in a prime-order subgroup)
+
+ // check for doubling case
+
+ // using Jacobian coordinates so:
+ // (X1:Y1:Z1) = (X2:Y2:Z2)
+ // iff
+ // X1/Z1^2 == X2/Z2^2 and Y1/Z1^3 == Y2/Z2^3
+ // iff
+ // X1 * Z2^2 == X2 * Z1^2 and Y1 * Z2^3 == Y2 * Z1^3
+
+ alt_bn128_Fq2 Z1Z1 = (this->Z).squared();
+ alt_bn128_Fq2 Z2Z2 = (other.Z).squared();
+
+ alt_bn128_Fq2 U1 = this->X * Z2Z2;
+ alt_bn128_Fq2 U2 = other.X * Z1Z1;
+
+ alt_bn128_Fq2 Z1_cubed = (this->Z) * Z1Z1;
+ alt_bn128_Fq2 Z2_cubed = (other.Z) * Z2Z2;
+
+ alt_bn128_Fq2 S1 = (this->Y) * Z2_cubed; // S1 = Y1 * Z2 * Z2Z2
+ alt_bn128_Fq2 S2 = (other.Y) * Z1_cubed; // S2 = Y2 * Z1 * Z1Z1
+
+ if (U1 == U2 && S1 == S2)
+ {
+ // dbl case; nothing of above can be reused
+ return this->dbl();
+ }
+
+ // rest of add case
+ alt_bn128_Fq2 H = U2 - U1; // H = U2-U1
+ alt_bn128_Fq2 S2_minus_S1 = S2-S1;
+ alt_bn128_Fq2 I = (H+H).squared(); // I = (2 * H)^2
+ alt_bn128_Fq2 J = H * I; // J = H * I
+ alt_bn128_Fq2 r = S2_minus_S1 + S2_minus_S1; // r = 2 * (S2-S1)
+ alt_bn128_Fq2 V = U1 * I; // V = U1 * I
+ alt_bn128_Fq2 X3 = r.squared() - J - (V+V); // X3 = r^2 - J - 2 * V
+ alt_bn128_Fq2 S1_J = S1 * J;
+ alt_bn128_Fq2 Y3 = r * (V-X3) - (S1_J+S1_J); // Y3 = r * (V-X3)-2 S1 J
+ alt_bn128_Fq2 Z3 = ((this->Z+other.Z).squared()-Z1Z1-Z2Z2) * H; // Z3 = ((Z1+Z2)^2-Z1Z1-Z2Z2) * H
+
+ return alt_bn128_G2(X3, Y3, Z3);
+}
+
+alt_bn128_G2 alt_bn128_G2::operator-() const
+{
+ return alt_bn128_G2(this->X, -(this->Y), this->Z);
+}
+
+
+alt_bn128_G2 alt_bn128_G2::operator-(const alt_bn128_G2 &other) const
+{
+ return (*this) + (-other);
+}
+
+alt_bn128_G2 alt_bn128_G2::add(const alt_bn128_G2 &other) const
+{
+ // handle special cases having to do with O
+ if (this->is_zero())
+ {
+ return other;
+ }
+
+ if (other.is_zero())
+ {
+ return *this;
+ }
+
+ // no need to handle points of order 2,4
+ // (they cannot exist in a prime-order subgroup)
+
+ // handle double case
+ if (this->operator==(other))
+ {
+ return this->dbl();
+ }
+
+#ifdef PROFILE_OP_COUNTS
+ this->add_cnt++;
+#endif
+ // NOTE: does not handle O and pts of order 2,4
+ // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-projective.html#addition-add-1998-cmo-2
+
+ alt_bn128_Fq2 Z1Z1 = (this->Z).squared(); // Z1Z1 = Z1^2
+ alt_bn128_Fq2 Z2Z2 = (other.Z).squared(); // Z2Z2 = Z2^2
+ alt_bn128_Fq2 U1 = (this->X) * Z2Z2; // U1 = X1 * Z2Z2
+ alt_bn128_Fq2 U2 = (other.X) * Z1Z1; // U2 = X2 * Z1Z1
+ alt_bn128_Fq2 S1 = (this->Y) * (other.Z) * Z2Z2; // S1 = Y1 * Z2 * Z2Z2
+ alt_bn128_Fq2 S2 = (other.Y) * (this->Z) * Z1Z1; // S2 = Y2 * Z1 * Z1Z1
+ alt_bn128_Fq2 H = U2 - U1; // H = U2-U1
+ alt_bn128_Fq2 S2_minus_S1 = S2-S1;
+ alt_bn128_Fq2 I = (H+H).squared(); // I = (2 * H)^2
+ alt_bn128_Fq2 J = H * I; // J = H * I
+ alt_bn128_Fq2 r = S2_minus_S1 + S2_minus_S1; // r = 2 * (S2-S1)
+ alt_bn128_Fq2 V = U1 * I; // V = U1 * I
+ alt_bn128_Fq2 X3 = r.squared() - J - (V+V); // X3 = r^2 - J - 2 * V
+ alt_bn128_Fq2 S1_J = S1 * J;
+ alt_bn128_Fq2 Y3 = r * (V-X3) - (S1_J+S1_J); // Y3 = r * (V-X3)-2 S1 J
+ alt_bn128_Fq2 Z3 = ((this->Z+other.Z).squared()-Z1Z1-Z2Z2) * H; // Z3 = ((Z1+Z2)^2-Z1Z1-Z2Z2) * H
+
+ return alt_bn128_G2(X3, Y3, Z3);
+}
+
+alt_bn128_G2 alt_bn128_G2::mixed_add(const alt_bn128_G2 &other) const
+{
+#ifdef DEBUG
+ assert(other.is_special());
+#endif
+
+ // handle special cases having to do with O
+ if (this->is_zero())
+ {
+ return other;
+ }
+
+ if (other.is_zero())
+ {
+ return *this;
+ }
+
+ // no need to handle points of order 2,4
+ // (they cannot exist in a prime-order subgroup)
+
+ // check for doubling case
+
+ // using Jacobian coordinates so:
+ // (X1:Y1:Z1) = (X2:Y2:Z2)
+ // iff
+ // X1/Z1^2 == X2/Z2^2 and Y1/Z1^3 == Y2/Z2^3
+ // iff
+ // X1 * Z2^2 == X2 * Z1^2 and Y1 * Z2^3 == Y2 * Z1^3
+
+ // we know that Z2 = 1
+
+ const alt_bn128_Fq2 Z1Z1 = (this->Z).squared();
+
+ const alt_bn128_Fq2 &U1 = this->X;
+ const alt_bn128_Fq2 U2 = other.X * Z1Z1;
+
+ const alt_bn128_Fq2 Z1_cubed = (this->Z) * Z1Z1;
+
+ const alt_bn128_Fq2 &S1 = (this->Y); // S1 = Y1 * Z2 * Z2Z2
+ const alt_bn128_Fq2 S2 = (other.Y) * Z1_cubed; // S2 = Y2 * Z1 * Z1Z1
+
+ if (U1 == U2 && S1 == S2)
+ {
+ // dbl case; nothing of above can be reused
+ return this->dbl();
+ }
+
+#ifdef PROFILE_OP_COUNTS
+ this->add_cnt++;
+#endif
+
+ // NOTE: does not handle O and pts of order 2,4
+ // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl
+ alt_bn128_Fq2 H = U2-(this->X); // H = U2-X1
+ alt_bn128_Fq2 HH = H.squared() ; // HH = H&2
+ alt_bn128_Fq2 I = HH+HH; // I = 4*HH
+ I = I + I;
+ alt_bn128_Fq2 J = H*I; // J = H*I
+ alt_bn128_Fq2 r = S2-(this->Y); // r = 2*(S2-Y1)
+ r = r + r;
+ alt_bn128_Fq2 V = (this->X) * I ; // V = X1*I
+ alt_bn128_Fq2 X3 = r.squared()-J-V-V; // X3 = r^2-J-2*V
+ alt_bn128_Fq2 Y3 = (this->Y)*J; // Y3 = r*(V-X3)-2*Y1*J
+ Y3 = r*(V-X3) - Y3 - Y3;
+ alt_bn128_Fq2 Z3 = ((this->Z)+H).squared() - Z1Z1 - HH; // Z3 = (Z1+H)^2-Z1Z1-HH
+
+ return alt_bn128_G2(X3, Y3, Z3);
+}
+
+alt_bn128_G2 alt_bn128_G2::dbl() const
+{
+#ifdef PROFILE_OP_COUNTS
+ this->dbl_cnt++;
+#endif
+ // handle point at infinity
+ if (this->is_zero())
+ {
+ return (*this);
+ }
+
+ // NOTE: does not handle O and pts of order 2,4
+ // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-projective.html#doubling-dbl-2007-bl
+
+ alt_bn128_Fq2 A = (this->X).squared(); // A = X1^2
+ alt_bn128_Fq2 B = (this->Y).squared(); // B = Y1^2
+ alt_bn128_Fq2 C = B.squared(); // C = B^2
+ alt_bn128_Fq2 D = (this->X + B).squared() - A - C;
+ D = D+D; // D = 2 * ((X1 + B)^2 - A - C)
+ alt_bn128_Fq2 E = A + A + A; // E = 3 * A
+ alt_bn128_Fq2 F = E.squared(); // F = E^2
+ alt_bn128_Fq2 X3 = F - (D+D); // X3 = F - 2 D
+ alt_bn128_Fq2 eightC = C+C;
+ eightC = eightC + eightC;
+ eightC = eightC + eightC;
+ alt_bn128_Fq2 Y3 = E * (D - X3) - eightC; // Y3 = E * (D - X3) - 8 * C
+ alt_bn128_Fq2 Y1Z1 = (this->Y)*(this->Z);
+ alt_bn128_Fq2 Z3 = Y1Z1 + Y1Z1; // Z3 = 2 * Y1 * Z1
+
+ return alt_bn128_G2(X3, Y3, Z3);
+}
+
+alt_bn128_G2 alt_bn128_G2::mul_by_q() const
+{
+ return alt_bn128_G2(alt_bn128_twist_mul_by_q_X * (this->X).Frobenius_map(1),
+ alt_bn128_twist_mul_by_q_Y * (this->Y).Frobenius_map(1),
+ (this->Z).Frobenius_map(1));
+}
+
+bool alt_bn128_G2::is_well_formed() const
+{
+ if (this->is_zero())
+ {
+ return true;
+ }
+ else
+ {
+ /*
+ y^2 = x^3 + b
+
+ We are using Jacobian coordinates, so equation we need to check is actually
+
+ (y/z^3)^2 = (x/z^2)^3 + b
+ y^2 / z^6 = x^3 / z^6 + b
+ y^2 = x^3 + b z^6
+ */
+ alt_bn128_Fq2 X2 = this->X.squared();
+ alt_bn128_Fq2 Y2 = this->Y.squared();
+ alt_bn128_Fq2 Z2 = this->Z.squared();
+
+ alt_bn128_Fq2 X3 = this->X * X2;
+ alt_bn128_Fq2 Z3 = this->Z * Z2;
+ alt_bn128_Fq2 Z6 = Z3.squared();
+
+ return (Y2 == X3 + alt_bn128_twist_coeff_b * Z6);
+ }
+}
+
+alt_bn128_G2 alt_bn128_G2::zero()
+{
+ return G2_zero;
+}
+
+alt_bn128_G2 alt_bn128_G2::one()
+{
+ return G2_one;
+}
+
+alt_bn128_G2 alt_bn128_G2::random_element()
+{
+ return (alt_bn128_Fr::random_element().as_bigint()) * G2_one;
+}
+
+std::ostream& operator<<(std::ostream &out, const alt_bn128_G2 &g)
+{
+ alt_bn128_G2 copy(g);
+ copy.to_affine_coordinates();
+ out << (copy.is_zero() ? 1 : 0) << OUTPUT_SEPARATOR;
+#ifdef NO_PT_COMPRESSION
+ out << copy.X << OUTPUT_SEPARATOR << copy.Y;
+#else
+ /* storing LSB of Y */
+ out << copy.X << OUTPUT_SEPARATOR << (copy.Y.c0.as_bigint().data[0] & 1);
+#endif
+
+ return out;
+}
+
+std::istream& operator>>(std::istream &in, alt_bn128_G2 &g)
+{
+ char is_zero;
+ alt_bn128_Fq2 tX, tY;
+
+#ifdef NO_PT_COMPRESSION
+ in >> is_zero >> tX >> tY;
+ is_zero -= '0';
+#else
+ in.read((char*)&is_zero, 1); // this reads is_zero;
+ is_zero -= '0';
+ consume_OUTPUT_SEPARATOR(in);
+
+ unsigned char Y_lsb;
+ in >> tX;
+ consume_OUTPUT_SEPARATOR(in);
+ in.read((char*)&Y_lsb, 1);
+ Y_lsb -= '0';
+
+ // y = +/- sqrt(x^3 + b)
+ if (!is_zero)
+ {
+ alt_bn128_Fq2 tX2 = tX.squared();
+ alt_bn128_Fq2 tY2 = tX2 * tX + alt_bn128_twist_coeff_b;
+ tY = tY2.sqrt();
+
+ if ((tY.c0.as_bigint().data[0] & 1) != Y_lsb)
+ {
+ tY = -tY;
+ }
+ }
+#endif
+ // using projective coordinates
+ if (!is_zero)
+ {
+ g.X = tX;
+ g.Y = tY;
+ g.Z = alt_bn128_Fq2::one();
+ }
+ else
+ {
+ g = alt_bn128_G2::zero();
+ }
+
+ return in;
+}
+
+template<>
+void batch_to_special_all_non_zeros<alt_bn128_G2>(std::vector<alt_bn128_G2> &vec)
+{
+ std::vector<alt_bn128_Fq2> Z_vec;
+ Z_vec.reserve(vec.size());
+
+ for (auto &el: vec)
+ {
+ Z_vec.emplace_back(el.Z);
+ }
+ batch_invert<alt_bn128_Fq2>(Z_vec);
+
+ const alt_bn128_Fq2 one = alt_bn128_Fq2::one();
+
+ for (size_t i = 0; i < vec.size(); ++i)
+ {
+ alt_bn128_Fq2 Z2 = Z_vec[i].squared();
+ alt_bn128_Fq2 Z3 = Z_vec[i] * Z2;
+
+ vec[i].X = vec[i].X * Z2;
+ vec[i].Y = vec[i].Y * Z3;
+ vec[i].Z = one;
+ }
+}
+
+} // libsnark
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef ALT_BN128_G2_HPP_
+#define ALT_BN128_G2_HPP_
+#include <vector>
+#include "algebra/curves/alt_bn128/alt_bn128_init.hpp"
+#include "algebra/curves/curve_utils.hpp"
+
+namespace libsnark {
+
+class alt_bn128_G2;
+std::ostream& operator<<(std::ostream &, const alt_bn128_G2&);
+std::istream& operator>>(std::istream &, alt_bn128_G2&);
+
+class alt_bn128_G2 {
+public:
+#ifdef PROFILE_OP_COUNTS
+ static long long add_cnt;
+ static long long dbl_cnt;
+#endif
+ static std::vector<size_t> wnaf_window_table;
+ static std::vector<size_t> fixed_base_exp_window_table;
+ static alt_bn128_G2 G2_zero;
+ static alt_bn128_G2 G2_one;
+
+ typedef alt_bn128_Fq base_field;
+ typedef alt_bn128_Fq2 twist_field;
+ typedef alt_bn128_Fr scalar_field;
+
+ alt_bn128_Fq2 X, Y, Z;
+
+ // using Jacobian coordinates
+ alt_bn128_G2();
+ alt_bn128_G2(const alt_bn128_Fq2& X, const alt_bn128_Fq2& Y, const alt_bn128_Fq2& Z) : X(X), Y(Y), Z(Z) {};
+
+ static alt_bn128_Fq2 mul_by_b(const alt_bn128_Fq2 &elt);
+
+ void print() const;
+ void print_coordinates() const;
+
+ void to_affine_coordinates();
+ void to_special();
+ bool is_special() const;
+
+ bool is_zero() const;
+
+ bool operator==(const alt_bn128_G2 &other) const;
+ bool operator!=(const alt_bn128_G2 &other) const;
+
+ alt_bn128_G2 operator+(const alt_bn128_G2 &other) const;
+ alt_bn128_G2 operator-() const;
+ alt_bn128_G2 operator-(const alt_bn128_G2 &other) const;
+
+ alt_bn128_G2 add(const alt_bn128_G2 &other) const;
+ alt_bn128_G2 mixed_add(const alt_bn128_G2 &other) const;
+ alt_bn128_G2 dbl() const;
+ alt_bn128_G2 mul_by_q() const;
+
+ bool is_well_formed() const;
+
+ static alt_bn128_G2 zero();
+ static alt_bn128_G2 one();
+ static alt_bn128_G2 random_element();
+
+ static size_t size_in_bits() { return twist_field::size_in_bits() + 1; }
+ static bigint<base_field::num_limbs> base_field_char() { return base_field::field_char(); }
+ static bigint<scalar_field::num_limbs> order() { return scalar_field::field_char(); }
+
+ friend std::ostream& operator<<(std::ostream &out, const alt_bn128_G2 &g);
+ friend std::istream& operator>>(std::istream &in, alt_bn128_G2 &g);
+};
+
+template<mp_size_t m>
+alt_bn128_G2 operator*(const bigint<m> &lhs, const alt_bn128_G2 &rhs)
+{
+ return scalar_mul<alt_bn128_G2, m>(rhs, lhs);
+}
+
+template<mp_size_t m, const bigint<m>& modulus_p>
+alt_bn128_G2 operator*(const Fp_model<m,modulus_p> &lhs, const alt_bn128_G2 &rhs)
+{
+ return scalar_mul<alt_bn128_G2, m>(rhs, lhs.as_bigint());
+}
+
+template<typename T>
+void batch_to_special_all_non_zeros(std::vector<T> &vec);
+template<>
+void batch_to_special_all_non_zeros<alt_bn128_G2>(std::vector<alt_bn128_G2> &vec);
+
+} // libsnark
+#endif // ALT_BN128_G2_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#include "algebra/curves/alt_bn128/alt_bn128_init.hpp"
+#include "algebra/curves/alt_bn128/alt_bn128_g1.hpp"
+#include "algebra/curves/alt_bn128/alt_bn128_g2.hpp"
+
+namespace libsnark {
+
+bigint<alt_bn128_r_limbs> alt_bn128_modulus_r;
+bigint<alt_bn128_q_limbs> alt_bn128_modulus_q;
+
+alt_bn128_Fq alt_bn128_coeff_b;
+alt_bn128_Fq2 alt_bn128_twist;
+alt_bn128_Fq2 alt_bn128_twist_coeff_b;
+alt_bn128_Fq alt_bn128_twist_mul_by_b_c0;
+alt_bn128_Fq alt_bn128_twist_mul_by_b_c1;
+alt_bn128_Fq2 alt_bn128_twist_mul_by_q_X;
+alt_bn128_Fq2 alt_bn128_twist_mul_by_q_Y;
+
+bigint<alt_bn128_q_limbs> alt_bn128_ate_loop_count;
+bool alt_bn128_ate_is_loop_count_neg;
+bigint<12*alt_bn128_q_limbs> alt_bn128_final_exponent;
+bigint<alt_bn128_q_limbs> alt_bn128_final_exponent_z;
+bool alt_bn128_final_exponent_is_z_neg;
+
+void init_alt_bn128_params()
+{
+ typedef bigint<alt_bn128_r_limbs> bigint_r;
+ typedef bigint<alt_bn128_q_limbs> bigint_q;
+
+ assert(sizeof(mp_limb_t) == 8 || sizeof(mp_limb_t) == 4); // Montgomery assumes this
+
+ /* parameters for scalar field Fr */
+
+ alt_bn128_modulus_r = bigint_r("21888242871839275222246405745257275088548364400416034343698204186575808495617");
+ assert(alt_bn128_Fr::modulus_is_valid());
+ if (sizeof(mp_limb_t) == 8)
+ {
+ alt_bn128_Fr::Rsquared = bigint_r("944936681149208446651664254269745548490766851729442924617792859073125903783");
+ alt_bn128_Fr::Rcubed = bigint_r("5866548545943845227489894872040244720403868105578784105281690076696998248512");
+ alt_bn128_Fr::inv = 0xc2e1f593efffffff;
+ }
+ if (sizeof(mp_limb_t) == 4)
+ {
+ alt_bn128_Fr::Rsquared = bigint_r("944936681149208446651664254269745548490766851729442924617792859073125903783");
+ alt_bn128_Fr::Rcubed = bigint_r("5866548545943845227489894872040244720403868105578784105281690076696998248512");
+ alt_bn128_Fr::inv = 0xefffffff;
+ }
+ alt_bn128_Fr::num_bits = 254;
+ alt_bn128_Fr::euler = bigint_r("10944121435919637611123202872628637544274182200208017171849102093287904247808");
+ alt_bn128_Fr::s = 28;
+ alt_bn128_Fr::t = bigint_r("81540058820840996586704275553141814055101440848469862132140264610111");
+ alt_bn128_Fr::t_minus_1_over_2 = bigint_r("40770029410420498293352137776570907027550720424234931066070132305055");
+ alt_bn128_Fr::multiplicative_generator = alt_bn128_Fr("5");
+ alt_bn128_Fr::root_of_unity = alt_bn128_Fr("19103219067921713944291392827692070036145651957329286315305642004821462161904");
+ alt_bn128_Fr::nqr = alt_bn128_Fr("5");
+ alt_bn128_Fr::nqr_to_t = alt_bn128_Fr("19103219067921713944291392827692070036145651957329286315305642004821462161904");
+
+ /* parameters for base field Fq */
+
+ alt_bn128_modulus_q = bigint_q("21888242871839275222246405745257275088696311157297823662689037894645226208583");
+ assert(alt_bn128_Fq::modulus_is_valid());
+ if (sizeof(mp_limb_t) == 8)
+ {
+ alt_bn128_Fq::Rsquared = bigint_q("3096616502983703923843567936837374451735540968419076528771170197431451843209");
+ alt_bn128_Fq::Rcubed = bigint_q("14921786541159648185948152738563080959093619838510245177710943249661917737183");
+ alt_bn128_Fq::inv = 0x87d20782e4866389;
+ }
+ if (sizeof(mp_limb_t) == 4)
+ {
+ alt_bn128_Fq::Rsquared = bigint_q("3096616502983703923843567936837374451735540968419076528771170197431451843209");
+ alt_bn128_Fq::Rcubed = bigint_q("14921786541159648185948152738563080959093619838510245177710943249661917737183");
+ alt_bn128_Fq::inv = 0xe4866389;
+ }
+ alt_bn128_Fq::num_bits = 254;
+ alt_bn128_Fq::euler = bigint_q("10944121435919637611123202872628637544348155578648911831344518947322613104291");
+ alt_bn128_Fq::s = 1;
+ alt_bn128_Fq::t = bigint_q("10944121435919637611123202872628637544348155578648911831344518947322613104291");
+ alt_bn128_Fq::t_minus_1_over_2 = bigint_q("5472060717959818805561601436314318772174077789324455915672259473661306552145");
+ alt_bn128_Fq::multiplicative_generator = alt_bn128_Fq("3");
+ alt_bn128_Fq::root_of_unity = alt_bn128_Fq("21888242871839275222246405745257275088696311157297823662689037894645226208582");
+ alt_bn128_Fq::nqr = alt_bn128_Fq("3");
+ alt_bn128_Fq::nqr_to_t = alt_bn128_Fq("21888242871839275222246405745257275088696311157297823662689037894645226208582");
+
+ /* parameters for twist field Fq2 */
+ alt_bn128_Fq2::euler = bigint<2*alt_bn128_q_limbs>("239547588008311421220994022608339370399626158265550411218223901127035046843189118723920525909718935985594116157406550130918127817069793474323196511433944");
+ alt_bn128_Fq2::s = 4;
+ alt_bn128_Fq2::t = bigint<2*alt_bn128_q_limbs>("29943448501038927652624252826042421299953269783193801402277987640879380855398639840490065738714866998199264519675818766364765977133724184290399563929243");
+ alt_bn128_Fq2::t_minus_1_over_2 = bigint<2*alt_bn128_q_limbs>("14971724250519463826312126413021210649976634891596900701138993820439690427699319920245032869357433499099632259837909383182382988566862092145199781964621");
+ alt_bn128_Fq2::non_residue = alt_bn128_Fq("21888242871839275222246405745257275088696311157297823662689037894645226208582");
+ alt_bn128_Fq2::nqr = alt_bn128_Fq2(alt_bn128_Fq("2"),alt_bn128_Fq("1"));
+ alt_bn128_Fq2::nqr_to_t = alt_bn128_Fq2(alt_bn128_Fq("5033503716262624267312492558379982687175200734934877598599011485707452665730"),alt_bn128_Fq("314498342015008975724433667930697407966947188435857772134235984660852259084"));
+ alt_bn128_Fq2::Frobenius_coeffs_c1[0] = alt_bn128_Fq("1");
+ alt_bn128_Fq2::Frobenius_coeffs_c1[1] = alt_bn128_Fq("21888242871839275222246405745257275088696311157297823662689037894645226208582");
+
+ /* parameters for Fq6 */
+ alt_bn128_Fq6::non_residue = alt_bn128_Fq2(alt_bn128_Fq("9"),alt_bn128_Fq("1"));
+ alt_bn128_Fq6::Frobenius_coeffs_c1[0] = alt_bn128_Fq2(alt_bn128_Fq("1"),alt_bn128_Fq("0"));
+ alt_bn128_Fq6::Frobenius_coeffs_c1[1] = alt_bn128_Fq2(alt_bn128_Fq("21575463638280843010398324269430826099269044274347216827212613867836435027261"),alt_bn128_Fq("10307601595873709700152284273816112264069230130616436755625194854815875713954"));
+ alt_bn128_Fq6::Frobenius_coeffs_c1[2] = alt_bn128_Fq2(alt_bn128_Fq("21888242871839275220042445260109153167277707414472061641714758635765020556616"),alt_bn128_Fq("0"));
+ alt_bn128_Fq6::Frobenius_coeffs_c1[3] = alt_bn128_Fq2(alt_bn128_Fq("3772000881919853776433695186713858239009073593817195771773381919316419345261"),alt_bn128_Fq("2236595495967245188281701248203181795121068902605861227855261137820944008926"));
+ alt_bn128_Fq6::Frobenius_coeffs_c1[4] = alt_bn128_Fq2(alt_bn128_Fq("2203960485148121921418603742825762020974279258880205651966"),alt_bn128_Fq("0"));
+ alt_bn128_Fq6::Frobenius_coeffs_c1[5] = alt_bn128_Fq2(alt_bn128_Fq("18429021223477853657660792034369865839114504446431234726392080002137598044644"),alt_bn128_Fq("9344045779998320333812420223237981029506012124075525679208581902008406485703"));
+ alt_bn128_Fq6::Frobenius_coeffs_c2[0] = alt_bn128_Fq2(alt_bn128_Fq("1"),alt_bn128_Fq("0"));
+ alt_bn128_Fq6::Frobenius_coeffs_c2[1] = alt_bn128_Fq2(alt_bn128_Fq("2581911344467009335267311115468803099551665605076196740867805258568234346338"),alt_bn128_Fq("19937756971775647987995932169929341994314640652964949448313374472400716661030"));
+ alt_bn128_Fq6::Frobenius_coeffs_c2[2] = alt_bn128_Fq2(alt_bn128_Fq("2203960485148121921418603742825762020974279258880205651966"),alt_bn128_Fq("0"));
+ alt_bn128_Fq6::Frobenius_coeffs_c2[3] = alt_bn128_Fq2(alt_bn128_Fq("5324479202449903542726783395506214481928257762400643279780343368557297135718"),alt_bn128_Fq("16208900380737693084919495127334387981393726419856888799917914180988844123039"));
+ alt_bn128_Fq6::Frobenius_coeffs_c2[4] = alt_bn128_Fq2(alt_bn128_Fq("21888242871839275220042445260109153167277707414472061641714758635765020556616"),alt_bn128_Fq("0"));
+ alt_bn128_Fq6::Frobenius_coeffs_c2[5] = alt_bn128_Fq2(alt_bn128_Fq("13981852324922362344252311234282257507216387789820983642040889267519694726527"),alt_bn128_Fq("7629828391165209371577384193250820201684255241773809077146787135900891633097"));
+
+ /* parameters for Fq12 */
+
+ alt_bn128_Fq12::non_residue = alt_bn128_Fq2(alt_bn128_Fq("9"),alt_bn128_Fq("1"));
+ alt_bn128_Fq12::Frobenius_coeffs_c1[0] = alt_bn128_Fq2(alt_bn128_Fq("1"),alt_bn128_Fq("0"));
+ alt_bn128_Fq12::Frobenius_coeffs_c1[1] = alt_bn128_Fq2(alt_bn128_Fq("8376118865763821496583973867626364092589906065868298776909617916018768340080"),alt_bn128_Fq("16469823323077808223889137241176536799009286646108169935659301613961712198316"));
+ alt_bn128_Fq12::Frobenius_coeffs_c1[2] = alt_bn128_Fq2(alt_bn128_Fq("21888242871839275220042445260109153167277707414472061641714758635765020556617"),alt_bn128_Fq("0"));
+ alt_bn128_Fq12::Frobenius_coeffs_c1[3] = alt_bn128_Fq2(alt_bn128_Fq("11697423496358154304825782922584725312912383441159505038794027105778954184319"),alt_bn128_Fq("303847389135065887422783454877609941456349188919719272345083954437860409601"));
+ alt_bn128_Fq12::Frobenius_coeffs_c1[4] = alt_bn128_Fq2(alt_bn128_Fq("21888242871839275220042445260109153167277707414472061641714758635765020556616"),alt_bn128_Fq("0"));
+ alt_bn128_Fq12::Frobenius_coeffs_c1[5] = alt_bn128_Fq2(alt_bn128_Fq("3321304630594332808241809054958361220322477375291206261884409189760185844239"),alt_bn128_Fq("5722266937896532885780051958958348231143373700109372999374820235121374419868"));
+ alt_bn128_Fq12::Frobenius_coeffs_c1[6] = alt_bn128_Fq2(alt_bn128_Fq("21888242871839275222246405745257275088696311157297823662689037894645226208582"),alt_bn128_Fq("0"));
+ alt_bn128_Fq12::Frobenius_coeffs_c1[7] = alt_bn128_Fq2(alt_bn128_Fq("13512124006075453725662431877630910996106405091429524885779419978626457868503"),alt_bn128_Fq("5418419548761466998357268504080738289687024511189653727029736280683514010267"));
+ alt_bn128_Fq12::Frobenius_coeffs_c1[8] = alt_bn128_Fq2(alt_bn128_Fq("2203960485148121921418603742825762020974279258880205651966"),alt_bn128_Fq("0"));
+ alt_bn128_Fq12::Frobenius_coeffs_c1[9] = alt_bn128_Fq2(alt_bn128_Fq("10190819375481120917420622822672549775783927716138318623895010788866272024264"),alt_bn128_Fq("21584395482704209334823622290379665147239961968378104390343953940207365798982"));
+ alt_bn128_Fq12::Frobenius_coeffs_c1[10] = alt_bn128_Fq2(alt_bn128_Fq("2203960485148121921418603742825762020974279258880205651967"),alt_bn128_Fq("0"));
+ alt_bn128_Fq12::Frobenius_coeffs_c1[11] = alt_bn128_Fq2(alt_bn128_Fq("18566938241244942414004596690298913868373833782006617400804628704885040364344"),alt_bn128_Fq("16165975933942742336466353786298926857552937457188450663314217659523851788715"));
+
+ /* choice of short Weierstrass curve and its twist */
+
+ alt_bn128_coeff_b = alt_bn128_Fq("3");
+ alt_bn128_twist = alt_bn128_Fq2(alt_bn128_Fq("9"), alt_bn128_Fq("1"));
+ alt_bn128_twist_coeff_b = alt_bn128_coeff_b * alt_bn128_twist.inverse();
+ alt_bn128_twist_mul_by_b_c0 = alt_bn128_coeff_b * alt_bn128_Fq2::non_residue;
+ alt_bn128_twist_mul_by_b_c1 = alt_bn128_coeff_b * alt_bn128_Fq2::non_residue;
+ alt_bn128_twist_mul_by_q_X = alt_bn128_Fq2(alt_bn128_Fq("21575463638280843010398324269430826099269044274347216827212613867836435027261"),
+ alt_bn128_Fq("10307601595873709700152284273816112264069230130616436755625194854815875713954"));
+ alt_bn128_twist_mul_by_q_Y = alt_bn128_Fq2(alt_bn128_Fq("2821565182194536844548159561693502659359617185244120367078079554186484126554"),
+ alt_bn128_Fq("3505843767911556378687030309984248845540243509899259641013678093033130930403"));
+
+ /* choice of group G1 */
+ alt_bn128_G1::G1_zero = alt_bn128_G1(alt_bn128_Fq::zero(),
+ alt_bn128_Fq::one(),
+ alt_bn128_Fq::zero());
+ alt_bn128_G1::G1_one = alt_bn128_G1(alt_bn128_Fq("1"),
+ alt_bn128_Fq("2"),
+ alt_bn128_Fq::one());
+ alt_bn128_G1::wnaf_window_table.push_back(11);
+ alt_bn128_G1::wnaf_window_table.push_back(24);
+ alt_bn128_G1::wnaf_window_table.push_back(60);
+ alt_bn128_G1::wnaf_window_table.push_back(127);
+
+ alt_bn128_G1::fixed_base_exp_window_table.resize(0);
+ // window 1 is unbeaten in [-inf, 4.99]
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(1);
+ // window 2 is unbeaten in [4.99, 10.99]
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(5);
+ // window 3 is unbeaten in [10.99, 32.29]
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(11);
+ // window 4 is unbeaten in [32.29, 55.23]
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(32);
+ // window 5 is unbeaten in [55.23, 162.03]
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(55);
+ // window 6 is unbeaten in [162.03, 360.15]
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(162);
+ // window 7 is unbeaten in [360.15, 815.44]
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(360);
+ // window 8 is unbeaten in [815.44, 2373.07]
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(815);
+ // window 9 is unbeaten in [2373.07, 6977.75]
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(2373);
+ // window 10 is unbeaten in [6977.75, 7122.23]
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(6978);
+ // window 11 is unbeaten in [7122.23, 57818.46]
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(7122);
+ // window 12 is never the best
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(0);
+ // window 13 is unbeaten in [57818.46, 169679.14]
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(57818);
+ // window 14 is never the best
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(0);
+ // window 15 is unbeaten in [169679.14, 439758.91]
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(169679);
+ // window 16 is unbeaten in [439758.91, 936073.41]
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(439759);
+ // window 17 is unbeaten in [936073.41, 4666554.74]
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(936073);
+ // window 18 is never the best
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(0);
+ // window 19 is unbeaten in [4666554.74, 7580404.42]
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(4666555);
+ // window 20 is unbeaten in [7580404.42, 34552892.20]
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(7580404);
+ // window 21 is never the best
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(0);
+ // window 22 is unbeaten in [34552892.20, inf]
+ alt_bn128_G1::fixed_base_exp_window_table.push_back(34552892);
+
+ /* choice of group G2 */
+
+ alt_bn128_G2::G2_zero = alt_bn128_G2(alt_bn128_Fq2::zero(),
+ alt_bn128_Fq2::one(),
+ alt_bn128_Fq2::zero());
+
+ alt_bn128_G2::G2_one = alt_bn128_G2(alt_bn128_Fq2(alt_bn128_Fq("10857046999023057135944570762232829481370756359578518086990519993285655852781"),
+ alt_bn128_Fq("11559732032986387107991004021392285783925812861821192530917403151452391805634")),
+ alt_bn128_Fq2(alt_bn128_Fq("8495653923123431417604973247489272438418190587263600148770280649306958101930"),
+ alt_bn128_Fq("4082367875863433681332203403145435568316851327593401208105741076214120093531")),
+ alt_bn128_Fq2::one());
+ alt_bn128_G2::wnaf_window_table.push_back(5);
+ alt_bn128_G2::wnaf_window_table.push_back(15);
+ alt_bn128_G2::wnaf_window_table.push_back(39);
+ alt_bn128_G2::wnaf_window_table.push_back(109);
+
+ alt_bn128_G2::fixed_base_exp_window_table.resize(0);
+ // window 1 is unbeaten in [-inf, 5.10]
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(1);
+ // window 2 is unbeaten in [5.10, 10.43]
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(5);
+ // window 3 is unbeaten in [10.43, 25.28]
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(10);
+ // window 4 is unbeaten in [25.28, 59.00]
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(25);
+ // window 5 is unbeaten in [59.00, 154.03]
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(59);
+ // window 6 is unbeaten in [154.03, 334.25]
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(154);
+ // window 7 is unbeaten in [334.25, 742.58]
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(334);
+ // window 8 is unbeaten in [742.58, 2034.40]
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(743);
+ // window 9 is unbeaten in [2034.40, 4987.56]
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(2034);
+ // window 10 is unbeaten in [4987.56, 8888.27]
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(4988);
+ // window 11 is unbeaten in [8888.27, 26271.13]
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(8888);
+ // window 12 is unbeaten in [26271.13, 39768.20]
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(26271);
+ // window 13 is unbeaten in [39768.20, 106275.75]
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(39768);
+ // window 14 is unbeaten in [106275.75, 141703.40]
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(106276);
+ // window 15 is unbeaten in [141703.40, 462422.97]
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(141703);
+ // window 16 is unbeaten in [462422.97, 926871.84]
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(462423);
+ // window 17 is unbeaten in [926871.84, 4873049.17]
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(926872);
+ // window 18 is never the best
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(0);
+ // window 19 is unbeaten in [4873049.17, 5706707.88]
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(4873049);
+ // window 20 is unbeaten in [5706707.88, 31673814.95]
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(5706708);
+ // window 21 is never the best
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(0);
+ // window 22 is unbeaten in [31673814.95, inf]
+ alt_bn128_G2::fixed_base_exp_window_table.push_back(31673815);
+
+ /* pairing parameters */
+
+ alt_bn128_ate_loop_count = bigint_q("29793968203157093288");
+ alt_bn128_ate_is_loop_count_neg = false;
+ alt_bn128_final_exponent = bigint<12*alt_bn128_q_limbs>("552484233613224096312617126783173147097382103762957654188882734314196910839907541213974502761540629817009608548654680343627701153829446747810907373256841551006201639677726139946029199968412598804882391702273019083653272047566316584365559776493027495458238373902875937659943504873220554161550525926302303331747463515644711876653177129578303191095900909191624817826566688241804408081892785725967931714097716709526092261278071952560171111444072049229123565057483750161460024353346284167282452756217662335528813519139808291170539072125381230815729071544861602750936964829313608137325426383735122175229541155376346436093930287402089517426973178917569713384748081827255472576937471496195752727188261435633271238710131736096299798168852925540549342330775279877006784354801422249722573783561685179618816480037695005515426162362431072245638324744480");
+ alt_bn128_final_exponent_z = bigint_q("4965661367192848881");
+ alt_bn128_final_exponent_is_z_neg = false;
+
+}
+} // libsnark
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef ALT_BN128_INIT_HPP_
+#define ALT_BN128_INIT_HPP_
+#include "algebra/curves/public_params.hpp"
+#include "algebra/fields/fp.hpp"
+#include "algebra/fields/fp2.hpp"
+#include "algebra/fields/fp6_3over2.hpp"
+#include "algebra/fields/fp12_2over3over2.hpp"
+
+namespace libsnark {
+
+const mp_size_t alt_bn128_r_bitcount = 254;
+const mp_size_t alt_bn128_q_bitcount = 254;
+
+const mp_size_t alt_bn128_r_limbs = (alt_bn128_r_bitcount+GMP_NUMB_BITS-1)/GMP_NUMB_BITS;
+const mp_size_t alt_bn128_q_limbs = (alt_bn128_q_bitcount+GMP_NUMB_BITS-1)/GMP_NUMB_BITS;
+
+extern bigint<alt_bn128_r_limbs> alt_bn128_modulus_r;
+extern bigint<alt_bn128_q_limbs> alt_bn128_modulus_q;
+
+typedef Fp_model<alt_bn128_r_limbs, alt_bn128_modulus_r> alt_bn128_Fr;
+typedef Fp_model<alt_bn128_q_limbs, alt_bn128_modulus_q> alt_bn128_Fq;
+typedef Fp2_model<alt_bn128_q_limbs, alt_bn128_modulus_q> alt_bn128_Fq2;
+typedef Fp6_3over2_model<alt_bn128_q_limbs, alt_bn128_modulus_q> alt_bn128_Fq6;
+typedef Fp12_2over3over2_model<alt_bn128_q_limbs, alt_bn128_modulus_q> alt_bn128_Fq12;
+typedef alt_bn128_Fq12 alt_bn128_GT;
+
+// parameters for Barreto--Naehrig curve E/Fq : y^2 = x^3 + b
+extern alt_bn128_Fq alt_bn128_coeff_b;
+// parameters for twisted Barreto--Naehrig curve E'/Fq2 : y^2 = x^3 + b/xi
+extern alt_bn128_Fq2 alt_bn128_twist;
+extern alt_bn128_Fq2 alt_bn128_twist_coeff_b;
+extern alt_bn128_Fq alt_bn128_twist_mul_by_b_c0;
+extern alt_bn128_Fq alt_bn128_twist_mul_by_b_c1;
+extern alt_bn128_Fq2 alt_bn128_twist_mul_by_q_X;
+extern alt_bn128_Fq2 alt_bn128_twist_mul_by_q_Y;
+
+// parameters for pairing
+extern bigint<alt_bn128_q_limbs> alt_bn128_ate_loop_count;
+extern bool alt_bn128_ate_is_loop_count_neg;
+extern bigint<12*alt_bn128_q_limbs> alt_bn128_final_exponent;
+extern bigint<alt_bn128_q_limbs> alt_bn128_final_exponent_z;
+extern bool alt_bn128_final_exponent_is_z_neg;
+
+void init_alt_bn128_params();
+
+class alt_bn128_G1;
+class alt_bn128_G2;
+
+} // libsnark
+#endif // ALT_BN128_INIT_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#include "algebra/curves/alt_bn128/alt_bn128_pairing.hpp"
+#include "algebra/curves/alt_bn128/alt_bn128_init.hpp"
+#include "algebra/curves/alt_bn128/alt_bn128_g1.hpp"
+#include "algebra/curves/alt_bn128/alt_bn128_g2.hpp"
+#include <cassert>
+#include "common/profiling.hpp"
+#include "common/assert_except.hpp"
+
+namespace libsnark {
+
+bool alt_bn128_ate_G1_precomp::operator==(const alt_bn128_ate_G1_precomp &other) const
+{
+ return (this->PX == other.PX &&
+ this->PY == other.PY);
+}
+
+std::ostream& operator<<(std::ostream &out, const alt_bn128_ate_G1_precomp &prec_P)
+{
+ out << prec_P.PX << OUTPUT_SEPARATOR << prec_P.PY;
+
+ return out;
+}
+
+std::istream& operator>>(std::istream &in, alt_bn128_ate_G1_precomp &prec_P)
+{
+ in >> prec_P.PX;
+ consume_OUTPUT_SEPARATOR(in);
+ in >> prec_P.PY;
+
+ return in;
+}
+
+bool alt_bn128_ate_ell_coeffs::operator==(const alt_bn128_ate_ell_coeffs &other) const
+{
+ return (this->ell_0 == other.ell_0 &&
+ this->ell_VW == other.ell_VW &&
+ this->ell_VV == other.ell_VV);
+}
+
+std::ostream& operator<<(std::ostream &out, const alt_bn128_ate_ell_coeffs &c)
+{
+ out << c.ell_0 << OUTPUT_SEPARATOR << c.ell_VW << OUTPUT_SEPARATOR << c.ell_VV;
+ return out;
+}
+
+std::istream& operator>>(std::istream &in, alt_bn128_ate_ell_coeffs &c)
+{
+ in >> c.ell_0;
+ consume_OUTPUT_SEPARATOR(in);
+ in >> c.ell_VW;
+ consume_OUTPUT_SEPARATOR(in);
+ in >> c.ell_VV;
+
+ return in;
+}
+
+bool alt_bn128_ate_G2_precomp::operator==(const alt_bn128_ate_G2_precomp &other) const
+{
+ return (this->QX == other.QX &&
+ this->QY == other.QY &&
+ this->coeffs == other.coeffs);
+}
+
+std::ostream& operator<<(std::ostream& out, const alt_bn128_ate_G2_precomp &prec_Q)
+{
+ out << prec_Q.QX << OUTPUT_SEPARATOR << prec_Q.QY << "\n";
+ out << prec_Q.coeffs.size() << "\n";
+ for (const alt_bn128_ate_ell_coeffs &c : prec_Q.coeffs)
+ {
+ out << c << OUTPUT_NEWLINE;
+ }
+ return out;
+}
+
+std::istream& operator>>(std::istream& in, alt_bn128_ate_G2_precomp &prec_Q)
+{
+ in >> prec_Q.QX;
+ consume_OUTPUT_SEPARATOR(in);
+ in >> prec_Q.QY;
+ consume_newline(in);
+
+ prec_Q.coeffs.clear();
+ size_t s;
+ in >> s;
+
+ consume_newline(in);
+
+ prec_Q.coeffs.reserve(s);
+
+ for (size_t i = 0; i < s; ++i)
+ {
+ alt_bn128_ate_ell_coeffs c;
+ in >> c;
+ consume_OUTPUT_NEWLINE(in);
+ prec_Q.coeffs.emplace_back(c);
+ }
+
+ return in;
+}
+
+/* final exponentiations */
+
+alt_bn128_Fq12 alt_bn128_final_exponentiation_first_chunk(const alt_bn128_Fq12 &elt)
+{
+ enter_block("Call to alt_bn128_final_exponentiation_first_chunk");
+
+ /*
+ Computes result = elt^((q^6-1)*(q^2+1)).
+ Follows, e.g., Beuchat et al page 9, by computing result as follows:
+ elt^((q^6-1)*(q^2+1)) = (conj(elt) * elt^(-1))^(q^2+1)
+ More precisely:
+ A = conj(elt)
+ B = elt.inverse()
+ C = A * B
+ D = C.Frobenius_map(2)
+ result = D * C
+ */
+
+ const alt_bn128_Fq12 A = alt_bn128_Fq12(elt.c0,-elt.c1);
+ const alt_bn128_Fq12 B = elt.inverse();
+ const alt_bn128_Fq12 C = A * B;
+ const alt_bn128_Fq12 D = C.Frobenius_map(2);
+ const alt_bn128_Fq12 result = D * C;
+
+ leave_block("Call to alt_bn128_final_exponentiation_first_chunk");
+
+ return result;
+}
+
+alt_bn128_Fq12 alt_bn128_exp_by_neg_z(const alt_bn128_Fq12 &elt)
+{
+ enter_block("Call to alt_bn128_exp_by_neg_z");
+
+ alt_bn128_Fq12 result = elt.cyclotomic_exp(alt_bn128_final_exponent_z);
+ if (!alt_bn128_final_exponent_is_z_neg)
+ {
+ result = result.unitary_inverse();
+ }
+
+ leave_block("Call to alt_bn128_exp_by_neg_z");
+
+ return result;
+}
+
+alt_bn128_Fq12 alt_bn128_final_exponentiation_last_chunk(const alt_bn128_Fq12 &elt)
+{
+ enter_block("Call to alt_bn128_final_exponentiation_last_chunk");
+
+ /*
+ Follows Laura Fuentes-Castaneda et al. "Faster hashing to G2"
+ by computing:
+
+ result = elt^(q^3 * (12*z^3 + 6z^2 + 4z - 1) +
+ q^2 * (12*z^3 + 6z^2 + 6z) +
+ q * (12*z^3 + 6z^2 + 4z) +
+ 1 * (12*z^3 + 12z^2 + 6z + 1))
+ which equals
+
+ result = elt^( 2z * ( 6z^2 + 3z + 1 ) * (q^4 - q^2 + 1)/r ).
+
+ Using the following addition chain:
+
+ A = exp_by_neg_z(elt) // = elt^(-z)
+ B = A^2 // = elt^(-2*z)
+ C = B^2 // = elt^(-4*z)
+ D = C * B // = elt^(-6*z)
+ E = exp_by_neg_z(D) // = elt^(6*z^2)
+ F = E^2 // = elt^(12*z^2)
+ G = epx_by_neg_z(F) // = elt^(-12*z^3)
+ H = conj(D) // = elt^(6*z)
+ I = conj(G) // = elt^(12*z^3)
+ J = I * E // = elt^(12*z^3 + 6*z^2)
+ K = J * H // = elt^(12*z^3 + 6*z^2 + 6*z)
+ L = K * B // = elt^(12*z^3 + 6*z^2 + 4*z)
+ M = K * E // = elt^(12*z^3 + 12*z^2 + 6*z)
+ N = M * elt // = elt^(12*z^3 + 12*z^2 + 6*z + 1)
+ O = L.Frobenius_map(1) // = elt^(q*(12*z^3 + 6*z^2 + 4*z))
+ P = O * N // = elt^(q*(12*z^3 + 6*z^2 + 4*z) * (12*z^3 + 12*z^2 + 6*z + 1))
+ Q = K.Frobenius_map(2) // = elt^(q^2 * (12*z^3 + 6*z^2 + 6*z))
+ R = Q * P // = elt^(q^2 * (12*z^3 + 6*z^2 + 6*z) + q*(12*z^3 + 6*z^2 + 4*z) * (12*z^3 + 12*z^2 + 6*z + 1))
+ S = conj(elt) // = elt^(-1)
+ T = S * L // = elt^(12*z^3 + 6*z^2 + 4*z - 1)
+ U = T.Frobenius_map(3) // = elt^(q^3(12*z^3 + 6*z^2 + 4*z - 1))
+ V = U * R // = elt^(q^3(12*z^3 + 6*z^2 + 4*z - 1) + q^2 * (12*z^3 + 6*z^2 + 6*z) + q*(12*z^3 + 6*z^2 + 4*z) * (12*z^3 + 12*z^2 + 6*z + 1))
+ result = V
+
+ */
+
+ const alt_bn128_Fq12 A = alt_bn128_exp_by_neg_z(elt);
+ const alt_bn128_Fq12 B = A.cyclotomic_squared();
+ const alt_bn128_Fq12 C = B.cyclotomic_squared();
+ const alt_bn128_Fq12 D = C * B;
+ const alt_bn128_Fq12 E = alt_bn128_exp_by_neg_z(D);
+ const alt_bn128_Fq12 F = E.cyclotomic_squared();
+ const alt_bn128_Fq12 G = alt_bn128_exp_by_neg_z(F);
+ const alt_bn128_Fq12 H = D.unitary_inverse();
+ const alt_bn128_Fq12 I = G.unitary_inverse();
+ const alt_bn128_Fq12 J = I * E;
+ const alt_bn128_Fq12 K = J * H;
+ const alt_bn128_Fq12 L = K * B;
+ const alt_bn128_Fq12 M = K * E;
+ const alt_bn128_Fq12 N = M * elt;
+ const alt_bn128_Fq12 O = L.Frobenius_map(1);
+ const alt_bn128_Fq12 P = O * N;
+ const alt_bn128_Fq12 Q = K.Frobenius_map(2);
+ const alt_bn128_Fq12 R = Q * P;
+ const alt_bn128_Fq12 S = elt.unitary_inverse();
+ const alt_bn128_Fq12 T = S * L;
+ const alt_bn128_Fq12 U = T.Frobenius_map(3);
+ const alt_bn128_Fq12 V = U * R;
+
+ const alt_bn128_Fq12 result = V;
+
+ leave_block("Call to alt_bn128_final_exponentiation_last_chunk");
+
+ return result;
+}
+
+alt_bn128_GT alt_bn128_final_exponentiation(const alt_bn128_Fq12 &elt)
+{
+ enter_block("Call to alt_bn128_final_exponentiation");
+ /* OLD naive version:
+ alt_bn128_GT result = elt^alt_bn128_final_exponent;
+ */
+ alt_bn128_Fq12 A = alt_bn128_final_exponentiation_first_chunk(elt);
+ alt_bn128_GT result = alt_bn128_final_exponentiation_last_chunk(A);
+
+ leave_block("Call to alt_bn128_final_exponentiation");
+ return result;
+}
+
+/* ate pairing */
+
+void doubling_step_for_flipped_miller_loop(const alt_bn128_Fq two_inv,
+ alt_bn128_G2 ¤t,
+ alt_bn128_ate_ell_coeffs &c)
+{
+ const alt_bn128_Fq2 X = current.X, Y = current.Y, Z = current.Z;
+
+ const alt_bn128_Fq2 A = two_inv * (X * Y); // A = X1 * Y1 / 2
+ const alt_bn128_Fq2 B = Y.squared(); // B = Y1^2
+ const alt_bn128_Fq2 C = Z.squared(); // C = Z1^2
+ const alt_bn128_Fq2 D = C+C+C; // D = 3 * C
+ const alt_bn128_Fq2 E = alt_bn128_twist_coeff_b * D; // E = twist_b * D
+ const alt_bn128_Fq2 F = E+E+E; // F = 3 * E
+ const alt_bn128_Fq2 G = two_inv * (B+F); // G = (B+F)/2
+ const alt_bn128_Fq2 H = (Y+Z).squared() - (B+C); // H = (Y1+Z1)^2-(B+C)
+ const alt_bn128_Fq2 I = E-B; // I = E-B
+ const alt_bn128_Fq2 J = X.squared(); // J = X1^2
+ const alt_bn128_Fq2 E_squared = E.squared(); // E_squared = E^2
+
+ current.X = A * (B-F); // X3 = A * (B-F)
+ current.Y = G.squared() - (E_squared+E_squared+E_squared); // Y3 = G^2 - 3*E^2
+ current.Z = B * H; // Z3 = B * H
+ c.ell_0 = alt_bn128_twist * I; // ell_0 = xi * I
+ c.ell_VW = -H; // ell_VW = - H (later: * yP)
+ c.ell_VV = J+J+J; // ell_VV = 3*J (later: * xP)
+}
+
+void mixed_addition_step_for_flipped_miller_loop(const alt_bn128_G2 base,
+ alt_bn128_G2 ¤t,
+ alt_bn128_ate_ell_coeffs &c)
+{
+ const alt_bn128_Fq2 X1 = current.X, Y1 = current.Y, Z1 = current.Z;
+ const alt_bn128_Fq2 &x2 = base.X, &y2 = base.Y;
+
+ const alt_bn128_Fq2 D = X1 - x2 * Z1; // D = X1 - X2*Z1
+ const alt_bn128_Fq2 E = Y1 - y2 * Z1; // E = Y1 - Y2*Z1
+ const alt_bn128_Fq2 F = D.squared(); // F = D^2
+ const alt_bn128_Fq2 G = E.squared(); // G = E^2
+ const alt_bn128_Fq2 H = D*F; // H = D*F
+ const alt_bn128_Fq2 I = X1 * F; // I = X1 * F
+ const alt_bn128_Fq2 J = H + Z1*G - (I+I); // J = H + Z1*G - (I+I)
+
+ current.X = D * J; // X3 = D*J
+ current.Y = E * (I-J)-(H * Y1); // Y3 = E*(I-J)-(H*Y1)
+ current.Z = Z1 * H; // Z3 = Z1*H
+ c.ell_0 = alt_bn128_twist * (E * x2 - D * y2); // ell_0 = xi * (E * X2 - D * Y2)
+ c.ell_VV = - E; // ell_VV = - E (later: * xP)
+ c.ell_VW = D; // ell_VW = D (later: * yP )
+}
+
+alt_bn128_ate_G1_precomp alt_bn128_ate_precompute_G1(const alt_bn128_G1& P)
+{
+ enter_block("Call to alt_bn128_ate_precompute_G1");
+
+ alt_bn128_G1 Pcopy = P;
+ Pcopy.to_affine_coordinates();
+
+ alt_bn128_ate_G1_precomp result;
+ result.PX = Pcopy.X;
+ result.PY = Pcopy.Y;
+
+ leave_block("Call to alt_bn128_ate_precompute_G1");
+ return result;
+}
+
+alt_bn128_ate_G2_precomp alt_bn128_ate_precompute_G2(const alt_bn128_G2& Q)
+{
+ enter_block("Call to alt_bn128_ate_precompute_G2");
+
+ alt_bn128_G2 Qcopy(Q);
+ Qcopy.to_affine_coordinates();
+
+ alt_bn128_Fq two_inv = (alt_bn128_Fq("2").inverse()); // could add to global params if needed
+
+ alt_bn128_ate_G2_precomp result;
+ result.QX = Qcopy.X;
+ result.QY = Qcopy.Y;
+
+ alt_bn128_G2 R;
+ R.X = Qcopy.X;
+ R.Y = Qcopy.Y;
+ R.Z = alt_bn128_Fq2::one();
+
+ const bigint<alt_bn128_Fr::num_limbs> &loop_count = alt_bn128_ate_loop_count;
+ bool found_one = false;
+ alt_bn128_ate_ell_coeffs c;
+
+ for (long i = loop_count.max_bits(); i >= 0; --i)
+ {
+ const bool bit = loop_count.test_bit(i);
+ if (!found_one)
+ {
+ /* this skips the MSB itself */
+ found_one |= bit;
+ continue;
+ }
+
+ doubling_step_for_flipped_miller_loop(two_inv, R, c);
+ result.coeffs.push_back(c);
+
+ if (bit)
+ {
+ mixed_addition_step_for_flipped_miller_loop(Qcopy, R, c);
+ result.coeffs.push_back(c);
+ }
+ }
+
+ alt_bn128_G2 Q1 = Qcopy.mul_by_q();
+ assert_except(Q1.Z == alt_bn128_Fq2::one());
+ alt_bn128_G2 Q2 = Q1.mul_by_q();
+ assert_except(Q2.Z == alt_bn128_Fq2::one());
+
+ if (alt_bn128_ate_is_loop_count_neg)
+ {
+ R.Y = - R.Y;
+ }
+ Q2.Y = - Q2.Y;
+
+ mixed_addition_step_for_flipped_miller_loop(Q1, R, c);
+ result.coeffs.push_back(c);
+
+ mixed_addition_step_for_flipped_miller_loop(Q2, R, c);
+ result.coeffs.push_back(c);
+
+ leave_block("Call to alt_bn128_ate_precompute_G2");
+ return result;
+}
+
+alt_bn128_Fq12 alt_bn128_ate_miller_loop(const alt_bn128_ate_G1_precomp &prec_P,
+ const alt_bn128_ate_G2_precomp &prec_Q)
+{
+ enter_block("Call to alt_bn128_ate_miller_loop");
+
+ alt_bn128_Fq12 f = alt_bn128_Fq12::one();
+
+ bool found_one = false;
+ size_t idx = 0;
+
+ const bigint<alt_bn128_Fr::num_limbs> &loop_count = alt_bn128_ate_loop_count;
+ alt_bn128_ate_ell_coeffs c;
+
+ for (long i = loop_count.max_bits(); i >= 0; --i)
+ {
+ const bool bit = loop_count.test_bit(i);
+ if (!found_one)
+ {
+ /* this skips the MSB itself */
+ found_one |= bit;
+ continue;
+ }
+
+ /* code below gets executed for all bits (EXCEPT the MSB itself) of
+ alt_bn128_param_p (skipping leading zeros) in MSB to LSB
+ order */
+
+ c = prec_Q.coeffs[idx++];
+ f = f.squared();
+ f = f.mul_by_024(c.ell_0, prec_P.PY * c.ell_VW, prec_P.PX * c.ell_VV);
+
+ if (bit)
+ {
+ c = prec_Q.coeffs[idx++];
+ f = f.mul_by_024(c.ell_0, prec_P.PY * c.ell_VW, prec_P.PX * c.ell_VV);
+ }
+
+ }
+
+ if (alt_bn128_ate_is_loop_count_neg)
+ {
+ f = f.inverse();
+ }
+
+ c = prec_Q.coeffs[idx++];
+ f = f.mul_by_024(c.ell_0,prec_P.PY * c.ell_VW,prec_P.PX * c.ell_VV);
+
+ c = prec_Q.coeffs[idx++];
+ f = f.mul_by_024(c.ell_0,prec_P.PY * c.ell_VW,prec_P.PX * c.ell_VV);
+
+ leave_block("Call to alt_bn128_ate_miller_loop");
+ return f;
+}
+
+alt_bn128_Fq12 alt_bn128_ate_double_miller_loop(const alt_bn128_ate_G1_precomp &prec_P1,
+ const alt_bn128_ate_G2_precomp &prec_Q1,
+ const alt_bn128_ate_G1_precomp &prec_P2,
+ const alt_bn128_ate_G2_precomp &prec_Q2)
+{
+ enter_block("Call to alt_bn128_ate_double_miller_loop");
+
+ alt_bn128_Fq12 f = alt_bn128_Fq12::one();
+
+ bool found_one = false;
+ size_t idx = 0;
+
+ const bigint<alt_bn128_Fr::num_limbs> &loop_count = alt_bn128_ate_loop_count;
+ for (long i = loop_count.max_bits(); i >= 0; --i)
+ {
+ const bool bit = loop_count.test_bit(i);
+ if (!found_one)
+ {
+ /* this skips the MSB itself */
+ found_one |= bit;
+ continue;
+ }
+
+ /* code below gets executed for all bits (EXCEPT the MSB itself) of
+ alt_bn128_param_p (skipping leading zeros) in MSB to LSB
+ order */
+
+ alt_bn128_ate_ell_coeffs c1 = prec_Q1.coeffs[idx];
+ alt_bn128_ate_ell_coeffs c2 = prec_Q2.coeffs[idx];
+ ++idx;
+
+ f = f.squared();
+
+ f = f.mul_by_024(c1.ell_0, prec_P1.PY * c1.ell_VW, prec_P1.PX * c1.ell_VV);
+ f = f.mul_by_024(c2.ell_0, prec_P2.PY * c2.ell_VW, prec_P2.PX * c2.ell_VV);
+
+ if (bit)
+ {
+ alt_bn128_ate_ell_coeffs c1 = prec_Q1.coeffs[idx];
+ alt_bn128_ate_ell_coeffs c2 = prec_Q2.coeffs[idx];
+ ++idx;
+
+ f = f.mul_by_024(c1.ell_0, prec_P1.PY * c1.ell_VW, prec_P1.PX * c1.ell_VV);
+ f = f.mul_by_024(c2.ell_0, prec_P2.PY * c2.ell_VW, prec_P2.PX * c2.ell_VV);
+ }
+ }
+
+ if (alt_bn128_ate_is_loop_count_neg)
+ {
+ f = f.inverse();
+ }
+
+ alt_bn128_ate_ell_coeffs c1 = prec_Q1.coeffs[idx];
+ alt_bn128_ate_ell_coeffs c2 = prec_Q2.coeffs[idx];
+ ++idx;
+ f = f.mul_by_024(c1.ell_0, prec_P1.PY * c1.ell_VW, prec_P1.PX * c1.ell_VV);
+ f = f.mul_by_024(c2.ell_0, prec_P2.PY * c2.ell_VW, prec_P2.PX * c2.ell_VV);
+
+ c1 = prec_Q1.coeffs[idx];
+ c2 = prec_Q2.coeffs[idx];
+ ++idx;
+ f = f.mul_by_024(c1.ell_0, prec_P1.PY * c1.ell_VW, prec_P1.PX * c1.ell_VV);
+ f = f.mul_by_024(c2.ell_0, prec_P2.PY * c2.ell_VW, prec_P2.PX * c2.ell_VV);
+
+ leave_block("Call to alt_bn128_ate_double_miller_loop");
+
+ return f;
+}
+
+alt_bn128_Fq12 alt_bn128_ate_pairing(const alt_bn128_G1& P, const alt_bn128_G2 &Q)
+{
+ enter_block("Call to alt_bn128_ate_pairing");
+ alt_bn128_ate_G1_precomp prec_P = alt_bn128_ate_precompute_G1(P);
+ alt_bn128_ate_G2_precomp prec_Q = alt_bn128_ate_precompute_G2(Q);
+ alt_bn128_Fq12 result = alt_bn128_ate_miller_loop(prec_P, prec_Q);
+ leave_block("Call to alt_bn128_ate_pairing");
+ return result;
+}
+
+alt_bn128_GT alt_bn128_ate_reduced_pairing(const alt_bn128_G1 &P, const alt_bn128_G2 &Q)
+{
+ enter_block("Call to alt_bn128_ate_reduced_pairing");
+ const alt_bn128_Fq12 f = alt_bn128_ate_pairing(P, Q);
+ const alt_bn128_GT result = alt_bn128_final_exponentiation(f);
+ leave_block("Call to alt_bn128_ate_reduced_pairing");
+ return result;
+}
+
+/* choice of pairing */
+
+alt_bn128_G1_precomp alt_bn128_precompute_G1(const alt_bn128_G1& P)
+{
+ return alt_bn128_ate_precompute_G1(P);
+}
+
+alt_bn128_G2_precomp alt_bn128_precompute_G2(const alt_bn128_G2& Q)
+{
+ return alt_bn128_ate_precompute_G2(Q);
+}
+
+alt_bn128_Fq12 alt_bn128_miller_loop(const alt_bn128_G1_precomp &prec_P,
+ const alt_bn128_G2_precomp &prec_Q)
+{
+ return alt_bn128_ate_miller_loop(prec_P, prec_Q);
+}
+
+alt_bn128_Fq12 alt_bn128_double_miller_loop(const alt_bn128_G1_precomp &prec_P1,
+ const alt_bn128_G2_precomp &prec_Q1,
+ const alt_bn128_G1_precomp &prec_P2,
+ const alt_bn128_G2_precomp &prec_Q2)
+{
+ return alt_bn128_ate_double_miller_loop(prec_P1, prec_Q1, prec_P2, prec_Q2);
+}
+
+alt_bn128_Fq12 alt_bn128_pairing(const alt_bn128_G1& P,
+ const alt_bn128_G2 &Q)
+{
+ return alt_bn128_ate_pairing(P, Q);
+}
+
+alt_bn128_GT alt_bn128_reduced_pairing(const alt_bn128_G1 &P,
+ const alt_bn128_G2 &Q)
+{
+ return alt_bn128_ate_reduced_pairing(P, Q);
+}
+} // libsnark
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef ALT_BN128_PAIRING_HPP_
+#define ALT_BN128_PAIRING_HPP_
+#include <vector>
+#include "algebra/curves/alt_bn128/alt_bn128_init.hpp"
+
+namespace libsnark {
+
+/* final exponentiation */
+
+alt_bn128_GT alt_bn128_final_exponentiation(const alt_bn128_Fq12 &elt);
+
+/* ate pairing */
+
+struct alt_bn128_ate_G1_precomp {
+ alt_bn128_Fq PX;
+ alt_bn128_Fq PY;
+
+ bool operator==(const alt_bn128_ate_G1_precomp &other) const;
+ friend std::ostream& operator<<(std::ostream &out, const alt_bn128_ate_G1_precomp &prec_P);
+ friend std::istream& operator>>(std::istream &in, alt_bn128_ate_G1_precomp &prec_P);
+};
+
+struct alt_bn128_ate_ell_coeffs {
+ alt_bn128_Fq2 ell_0;
+ alt_bn128_Fq2 ell_VW;
+ alt_bn128_Fq2 ell_VV;
+
+ bool operator==(const alt_bn128_ate_ell_coeffs &other) const;
+ friend std::ostream& operator<<(std::ostream &out, const alt_bn128_ate_ell_coeffs &dc);
+ friend std::istream& operator>>(std::istream &in, alt_bn128_ate_ell_coeffs &dc);
+};
+
+struct alt_bn128_ate_G2_precomp {
+ alt_bn128_Fq2 QX;
+ alt_bn128_Fq2 QY;
+ std::vector<alt_bn128_ate_ell_coeffs> coeffs;
+
+ bool operator==(const alt_bn128_ate_G2_precomp &other) const;
+ friend std::ostream& operator<<(std::ostream &out, const alt_bn128_ate_G2_precomp &prec_Q);
+ friend std::istream& operator>>(std::istream &in, alt_bn128_ate_G2_precomp &prec_Q);
+};
+
+alt_bn128_ate_G1_precomp alt_bn128_ate_precompute_G1(const alt_bn128_G1& P);
+alt_bn128_ate_G2_precomp alt_bn128_ate_precompute_G2(const alt_bn128_G2& Q);
+
+alt_bn128_Fq12 alt_bn128_ate_miller_loop(const alt_bn128_ate_G1_precomp &prec_P,
+ const alt_bn128_ate_G2_precomp &prec_Q);
+alt_bn128_Fq12 alt_bn128_ate_double_miller_loop(const alt_bn128_ate_G1_precomp &prec_P1,
+ const alt_bn128_ate_G2_precomp &prec_Q1,
+ const alt_bn128_ate_G1_precomp &prec_P2,
+ const alt_bn128_ate_G2_precomp &prec_Q2);
+
+alt_bn128_Fq12 alt_bn128_ate_pairing(const alt_bn128_G1& P,
+ const alt_bn128_G2 &Q);
+alt_bn128_GT alt_bn128_ate_reduced_pairing(const alt_bn128_G1 &P,
+ const alt_bn128_G2 &Q);
+
+/* choice of pairing */
+
+typedef alt_bn128_ate_G1_precomp alt_bn128_G1_precomp;
+typedef alt_bn128_ate_G2_precomp alt_bn128_G2_precomp;
+
+alt_bn128_G1_precomp alt_bn128_precompute_G1(const alt_bn128_G1& P);
+
+alt_bn128_G2_precomp alt_bn128_precompute_G2(const alt_bn128_G2& Q);
+
+alt_bn128_Fq12 alt_bn128_miller_loop(const alt_bn128_G1_precomp &prec_P,
+ const alt_bn128_G2_precomp &prec_Q);
+
+alt_bn128_Fq12 alt_bn128_double_miller_loop(const alt_bn128_G1_precomp &prec_P1,
+ const alt_bn128_G2_precomp &prec_Q1,
+ const alt_bn128_G1_precomp &prec_P2,
+ const alt_bn128_G2_precomp &prec_Q2);
+
+alt_bn128_Fq12 alt_bn128_pairing(const alt_bn128_G1& P,
+ const alt_bn128_G2 &Q);
+
+alt_bn128_GT alt_bn128_reduced_pairing(const alt_bn128_G1 &P,
+ const alt_bn128_G2 &Q);
+
+alt_bn128_GT alt_bn128_affine_reduced_pairing(const alt_bn128_G1 &P,
+ const alt_bn128_G2 &Q);
+
+} // libsnark
+#endif // ALT_BN128_PAIRING_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#include "algebra/curves/alt_bn128/alt_bn128_pp.hpp"
+
+namespace libsnark {
+
+void alt_bn128_pp::init_public_params()
+{
+ init_alt_bn128_params();
+}
+
+alt_bn128_GT alt_bn128_pp::final_exponentiation(const alt_bn128_Fq12 &elt)
+{
+ return alt_bn128_final_exponentiation(elt);
+}
+
+alt_bn128_G1_precomp alt_bn128_pp::precompute_G1(const alt_bn128_G1 &P)
+{
+ return alt_bn128_precompute_G1(P);
+}
+
+alt_bn128_G2_precomp alt_bn128_pp::precompute_G2(const alt_bn128_G2 &Q)
+{
+ return alt_bn128_precompute_G2(Q);
+}
+
+alt_bn128_Fq12 alt_bn128_pp::miller_loop(const alt_bn128_G1_precomp &prec_P,
+ const alt_bn128_G2_precomp &prec_Q)
+{
+ return alt_bn128_miller_loop(prec_P, prec_Q);
+}
+
+alt_bn128_Fq12 alt_bn128_pp::double_miller_loop(const alt_bn128_G1_precomp &prec_P1,
+ const alt_bn128_G2_precomp &prec_Q1,
+ const alt_bn128_G1_precomp &prec_P2,
+ const alt_bn128_G2_precomp &prec_Q2)
+{
+ return alt_bn128_double_miller_loop(prec_P1, prec_Q1, prec_P2, prec_Q2);
+}
+
+alt_bn128_Fq12 alt_bn128_pp::pairing(const alt_bn128_G1 &P,
+ const alt_bn128_G2 &Q)
+{
+ return alt_bn128_pairing(P, Q);
+}
+
+alt_bn128_Fq12 alt_bn128_pp::reduced_pairing(const alt_bn128_G1 &P,
+ const alt_bn128_G2 &Q)
+{
+ return alt_bn128_reduced_pairing(P, Q);
+}
+
+} // libsnark
--- /dev/null
+/** @file
+*****************************************************************************
+* @author This file is part of libsnark, developed by SCIPR Lab
+* and contributors (see AUTHORS).
+* @copyright MIT license (see LICENSE file)
+*****************************************************************************/
+
+#ifndef ALT_BN128_PP_HPP_
+#define ALT_BN128_PP_HPP_
+#include "algebra/curves/public_params.hpp"
+#include "algebra/curves/alt_bn128/alt_bn128_init.hpp"
+#include "algebra/curves/alt_bn128/alt_bn128_g1.hpp"
+#include "algebra/curves/alt_bn128/alt_bn128_g2.hpp"
+#include "algebra/curves/alt_bn128/alt_bn128_pairing.hpp"
+
+namespace libsnark {
+
+class alt_bn128_pp {
+public:
+ typedef alt_bn128_Fr Fp_type;
+ typedef alt_bn128_G1 G1_type;
+ typedef alt_bn128_G2 G2_type;
+ typedef alt_bn128_G1_precomp G1_precomp_type;
+ typedef alt_bn128_G2_precomp G2_precomp_type;
+ typedef alt_bn128_Fq Fq_type;
+ typedef alt_bn128_Fq2 Fqe_type;
+ typedef alt_bn128_Fq12 Fqk_type;
+ typedef alt_bn128_GT GT_type;
+
+ static const bool has_affine_pairing = false;
+
+ static void init_public_params();
+ static alt_bn128_GT final_exponentiation(const alt_bn128_Fq12 &elt);
+ static alt_bn128_G1_precomp precompute_G1(const alt_bn128_G1 &P);
+ static alt_bn128_G2_precomp precompute_G2(const alt_bn128_G2 &Q);
+ static alt_bn128_Fq12 miller_loop(const alt_bn128_G1_precomp &prec_P,
+ const alt_bn128_G2_precomp &prec_Q);
+ static alt_bn128_Fq12 double_miller_loop(const alt_bn128_G1_precomp &prec_P1,
+ const alt_bn128_G2_precomp &prec_Q1,
+ const alt_bn128_G1_precomp &prec_P2,
+ const alt_bn128_G2_precomp &prec_Q2);
+ static alt_bn128_Fq12 pairing(const alt_bn128_G1 &P,
+ const alt_bn128_G2 &Q);
+ static alt_bn128_Fq12 reduced_pairing(const alt_bn128_G1 &P,
+ const alt_bn128_G2 &Q);
+};
+
+} // libsnark
+
+#endif // ALT_BN128_PP_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef CURVE_UTILS_HPP_
+#define CURVE_UTILS_HPP_
+#include <cstdint>
+
+#include "algebra/fields/bigint.hpp"
+
+namespace libsnark {
+
+template<typename GroupT, mp_size_t m>
+GroupT scalar_mul(const GroupT &base, const bigint<m> &scalar);
+
+} // libsnark
+#include "algebra/curves/curve_utils.tcc"
+
+#endif // CURVE_UTILS_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef CURVE_UTILS_TCC_
+#define CURVE_UTILS_TCC_
+
+namespace libsnark {
+
+template<typename GroupT, mp_size_t m>
+GroupT scalar_mul(const GroupT &base, const bigint<m> &scalar)
+{
+ GroupT result = GroupT::zero();
+
+ bool found_one = false;
+ for (long i = scalar.max_bits() - 1; i >= 0; --i)
+ {
+ if (found_one)
+ {
+ result = result.dbl();
+ }
+
+ if (scalar.test_bit(i))
+ {
+ found_one = true;
+ result = result + base;
+ }
+ }
+
+ return result;
+}
+
+} // libsnark
+#endif // CURVE_UTILS_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef PUBLIC_PARAMS_HPP_
+#define PUBLIC_PARAMS_HPP_
+#include <vector>
+
+namespace libsnark {
+
+/*
+ for every curve the user should define corresponding
+ public_params with the following typedefs:
+
+ Fp_type
+ G1_type
+ G2_type
+ G1_precomp_type
+ G2_precomp_type
+ affine_ate_G1_precomp_type
+ affine_ate_G2_precomp_type
+ Fq_type
+ Fqe_type
+ Fqk_type
+ GT_type
+
+ one should also define the following static methods:
+
+ void init_public_params();
+
+ GT<EC_ppT> final_exponentiation(const Fqk<EC_ppT> &elt);
+
+ G1_precomp<EC_ppT> precompute_G1(const G1<EC_ppT> &P);
+ G2_precomp<EC_ppT> precompute_G2(const G2<EC_ppT> &Q);
+
+ Fqk<EC_ppT> miller_loop(const G1_precomp<EC_ppT> &prec_P,
+ const G2_precomp<EC_ppT> &prec_Q);
+
+ affine_ate_G1_precomp<EC_ppT> affine_ate_precompute_G1(const G1<EC_ppT> &P);
+ affine_ate_G2_precomp<EC_ppT> affine_ate_precompute_G2(const G2<EC_ppT> &Q);
+
+
+ Fqk<EC_ppT> affine_ate_miller_loop(const affine_ate_G1_precomp<EC_ppT> &prec_P,
+ const affine_ate_G2_precomp<EC_ppT> &prec_Q);
+ Fqk<EC_ppT> affine_ate_e_over_e_miller_loop(const affine_ate_G1_precomp<EC_ppT> &prec_P1,
+ const affine_ate_G2_precomp<EC_ppT> &prec_Q1,
+ const affine_ate_G1_precomp<EC_ppT> &prec_P2,
+ const affine_ate_G2_precomp<EC_ppT> &prec_Q2);
+ Fqk<EC_ppT> affine_ate_e_times_e_over_e_miller_loop(const affine_ate_G1_precomp<EC_ppT> &prec_P1,
+ const affine_ate_G2_precomp<EC_ppT> &prec_Q1,
+ const affine_ate_G1_precomp<EC_ppT> &prec_P2,
+ const affine_ate_G2_precomp<EC_ppT> &prec_Q2,
+ const affine_ate_G1_precomp<EC_ppT> &prec_P3,
+ const affine_ate_G2_precomp<EC_ppT> &prec_Q3);
+ Fqk<EC_ppT> double_miller_loop(const G1_precomp<EC_ppT> &prec_P1,
+ const G2_precomp<EC_ppT> &prec_Q1,
+ const G1_precomp<EC_ppT> &prec_P2,
+ const G2_precomp<EC_ppT> &prec_Q2);
+
+ Fqk<EC_ppT> pairing(const G1<EC_ppT> &P,
+ const G2<EC_ppT> &Q);
+ GT<EC_ppT> reduced_pairing(const G1<EC_ppT> &P,
+ const G2<EC_ppT> &Q);
+ GT<EC_ppT> affine_reduced_pairing(const G1<EC_ppT> &P,
+ const G2<EC_ppT> &Q);
+*/
+
+template<typename EC_ppT>
+using Fr = typename EC_ppT::Fp_type;
+template<typename EC_ppT>
+using G1 = typename EC_ppT::G1_type;
+template<typename EC_ppT>
+using G2 = typename EC_ppT::G2_type;
+template<typename EC_ppT>
+using G1_precomp = typename EC_ppT::G1_precomp_type;
+template<typename EC_ppT>
+using G2_precomp = typename EC_ppT::G2_precomp_type;
+template<typename EC_ppT>
+using affine_ate_G1_precomp = typename EC_ppT::affine_ate_G1_precomp_type;
+template<typename EC_ppT>
+using affine_ate_G2_precomp = typename EC_ppT::affine_ate_G2_precomp_type;
+template<typename EC_ppT>
+using Fq = typename EC_ppT::Fq_type;
+template<typename EC_ppT>
+using Fqe = typename EC_ppT::Fqe_type;
+template<typename EC_ppT>
+using Fqk = typename EC_ppT::Fqk_type;
+template<typename EC_ppT>
+using GT = typename EC_ppT::GT_type;
+
+template<typename EC_ppT>
+using Fr_vector = std::vector<Fr<EC_ppT> >;
+template<typename EC_ppT>
+using G1_vector = std::vector<G1<EC_ppT> >;
+template<typename EC_ppT>
+using G2_vector = std::vector<G2<EC_ppT> >;
+
+} // libsnark
+
+#endif // PUBLIC_PARAMS_HPP_
--- /dev/null
+/**
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+#include "common/profiling.hpp"
+#include "algebra/curves/edwards/edwards_pp.hpp"
+#ifdef CURVE_BN128
+#include "algebra/curves/bn128/bn128_pp.hpp"
+#endif
+#include "algebra/curves/alt_bn128/alt_bn128_pp.hpp"
+#include "algebra/curves/mnt/mnt4/mnt4_pp.hpp"
+#include "algebra/curves/mnt/mnt6/mnt6_pp.hpp"
+
+using namespace libsnark;
+
+template<typename ppT>
+void pairing_test()
+{
+ GT<ppT> GT_one = GT<ppT>::one();
+
+ printf("Running bilinearity tests:\n");
+ G1<ppT> P = (Fr<ppT>::random_element()) * G1<ppT>::one();
+ //G1<ppT> P = Fr<ppT>("2") * G1<ppT>::one();
+ G2<ppT> Q = (Fr<ppT>::random_element()) * G2<ppT>::one();
+ //G2<ppT> Q = Fr<ppT>("3") * G2<ppT>::one();
+
+ printf("P:\n");
+ P.print();
+ P.print_coordinates();
+ printf("Q:\n");
+ Q.print();
+ Q.print_coordinates();
+ printf("\n\n");
+
+ Fr<ppT> s = Fr<ppT>::random_element();
+ //Fr<ppT> s = Fr<ppT>("2");
+ G1<ppT> sP = s * P;
+ G2<ppT> sQ = s * Q;
+
+ printf("Pairing bilinearity tests (three must match):\n");
+ GT<ppT> ans1 = ppT::reduced_pairing(sP, Q);
+ GT<ppT> ans2 = ppT::reduced_pairing(P, sQ);
+ GT<ppT> ans3 = ppT::reduced_pairing(P, Q)^s;
+ ans1.print();
+ ans2.print();
+ ans3.print();
+ assert(ans1 == ans2);
+ assert(ans2 == ans3);
+
+ assert(ans1 != GT_one);
+ assert((ans1^Fr<ppT>::field_char()) == GT_one);
+ printf("\n\n");
+}
+
+template<typename ppT>
+void double_miller_loop_test()
+{
+ const G1<ppT> P1 = (Fr<ppT>::random_element()) * G1<ppT>::one();
+ const G1<ppT> P2 = (Fr<ppT>::random_element()) * G1<ppT>::one();
+ const G2<ppT> Q1 = (Fr<ppT>::random_element()) * G2<ppT>::one();
+ const G2<ppT> Q2 = (Fr<ppT>::random_element()) * G2<ppT>::one();
+
+ const G1_precomp<ppT> prec_P1 = ppT::precompute_G1(P1);
+ const G1_precomp<ppT> prec_P2 = ppT::precompute_G1(P2);
+ const G2_precomp<ppT> prec_Q1 = ppT::precompute_G2(Q1);
+ const G2_precomp<ppT> prec_Q2 = ppT::precompute_G2(Q2);
+
+ const Fqk<ppT> ans_1 = ppT::miller_loop(prec_P1, prec_Q1);
+ const Fqk<ppT> ans_2 = ppT::miller_loop(prec_P2, prec_Q2);
+ const Fqk<ppT> ans_12 = ppT::double_miller_loop(prec_P1, prec_Q1, prec_P2, prec_Q2);
+ assert(ans_1 * ans_2 == ans_12);
+}
+
+template<typename ppT>
+void affine_pairing_test()
+{
+ GT<ppT> GT_one = GT<ppT>::one();
+
+ printf("Running bilinearity tests:\n");
+ G1<ppT> P = (Fr<ppT>::random_element()) * G1<ppT>::one();
+ G2<ppT> Q = (Fr<ppT>::random_element()) * G2<ppT>::one();
+
+ printf("P:\n");
+ P.print();
+ printf("Q:\n");
+ Q.print();
+ printf("\n\n");
+
+ Fr<ppT> s = Fr<ppT>::random_element();
+ G1<ppT> sP = s * P;
+ G2<ppT> sQ = s * Q;
+
+ printf("Pairing bilinearity tests (three must match):\n");
+ GT<ppT> ans1 = ppT::affine_reduced_pairing(sP, Q);
+ GT<ppT> ans2 = ppT::affine_reduced_pairing(P, sQ);
+ GT<ppT> ans3 = ppT::affine_reduced_pairing(P, Q)^s;
+ ans1.print();
+ ans2.print();
+ ans3.print();
+ assert(ans1 == ans2);
+ assert(ans2 == ans3);
+
+ assert(ans1 != GT_one);
+ assert((ans1^Fr<ppT>::field_char()) == GT_one);
+ printf("\n\n");
+}
+
+int main(void)
+{
+ start_profiling();
+ edwards_pp::init_public_params();
+ pairing_test<edwards_pp>();
+ double_miller_loop_test<edwards_pp>();
+
+ mnt6_pp::init_public_params();
+ pairing_test<mnt6_pp>();
+ double_miller_loop_test<mnt6_pp>();
+ affine_pairing_test<mnt6_pp>();
+
+ mnt4_pp::init_public_params();
+ pairing_test<mnt4_pp>();
+ double_miller_loop_test<mnt4_pp>();
+ affine_pairing_test<mnt4_pp>();
+
+ alt_bn128_pp::init_public_params();
+ pairing_test<alt_bn128_pp>();
+ double_miller_loop_test<alt_bn128_pp>();
+
+#ifdef CURVE_BN128 // BN128 has fancy dependencies so it may be disabled
+ bn128_pp::init_public_params();
+ pairing_test<bn128_pp>();
+ double_miller_loop_test<bn128_pp>();
+#endif
+}
--- /dev/null
+/**
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+#include "common/profiling.hpp"
+#include "algebra/curves/edwards/edwards_pp.hpp"
+#include "algebra/curves/mnt/mnt4/mnt4_pp.hpp"
+#include "algebra/curves/mnt/mnt6/mnt6_pp.hpp"
+#ifdef CURVE_BN128
+#include "algebra/curves/bn128/bn128_pp.hpp"
+#endif
+#include "algebra/curves/alt_bn128/alt_bn128_pp.hpp"
+#include <sstream>
+
+using namespace libsnark;
+
+template<typename GroupT>
+void test_mixed_add()
+{
+ GroupT base, el, result;
+
+ base = GroupT::zero();
+ el = GroupT::zero();
+ el.to_special();
+ result = base.mixed_add(el);
+ assert(result == base + el);
+
+ base = GroupT::zero();
+ el = GroupT::random_element();
+ el.to_special();
+ result = base.mixed_add(el);
+ assert(result == base + el);
+
+ base = GroupT::random_element();
+ el = GroupT::zero();
+ el.to_special();
+ result = base.mixed_add(el);
+ assert(result == base + el);
+
+ base = GroupT::random_element();
+ el = GroupT::random_element();
+ el.to_special();
+ result = base.mixed_add(el);
+ assert(result == base + el);
+
+ base = GroupT::random_element();
+ el = base;
+ el.to_special();
+ result = base.mixed_add(el);
+ assert(result == base.dbl());
+}
+
+template<typename GroupT>
+void test_group()
+{
+ bigint<1> rand1 = bigint<1>("76749407");
+ bigint<1> rand2 = bigint<1>("44410867");
+ bigint<1> randsum = bigint<1>("121160274");
+
+ GroupT zero = GroupT::zero();
+ assert(zero == zero);
+ GroupT one = GroupT::one();
+ assert(one == one);
+ GroupT two = bigint<1>(2l) * GroupT::one();
+ assert(two == two);
+ GroupT five = bigint<1>(5l) * GroupT::one();
+
+ GroupT three = bigint<1>(3l) * GroupT::one();
+ GroupT four = bigint<1>(4l) * GroupT::one();
+
+ assert(two+five == three+four);
+
+ GroupT a = GroupT::random_element();
+ GroupT b = GroupT::random_element();
+
+ assert(one != zero);
+ assert(a != zero);
+ assert(a != one);
+
+ assert(b != zero);
+ assert(b != one);
+
+ assert(a.dbl() == a + a);
+ assert(b.dbl() == b + b);
+ assert(one.add(two) == three);
+ assert(two.add(one) == three);
+ assert(a + b == b + a);
+ assert(a - a == zero);
+ assert(a - b == a + (-b));
+ assert(a - b == (-b) + a);
+
+ // handle special cases
+ assert(zero + (-a) == -a);
+ assert(zero - a == -a);
+ assert(a - zero == a);
+ assert(a + zero == a);
+ assert(zero + a == a);
+
+ assert((a + b).dbl() == (a + b) + (b + a));
+ assert(bigint<1>("2") * (a + b) == (a + b) + (b + a));
+
+ assert((rand1 * a) + (rand2 * a) == (randsum * a));
+
+ assert(GroupT::order() * a == zero);
+ assert(GroupT::order() * one == zero);
+ assert((GroupT::order() * a) - a != zero);
+ assert((GroupT::order() * one) - one != zero);
+
+ test_mixed_add<GroupT>();
+}
+
+template<typename GroupT>
+void test_mul_by_q()
+{
+ GroupT a = GroupT::random_element();
+ assert((GroupT::base_field_char()*a) == a.mul_by_q());
+}
+
+template<typename GroupT>
+void test_output()
+{
+ GroupT g = GroupT::zero();
+
+ for (size_t i = 0; i < 1000; ++i)
+ {
+ std::stringstream ss;
+ ss << g;
+ GroupT gg;
+ ss >> gg;
+ assert(g == gg);
+ /* use a random point in next iteration */
+ g = GroupT::random_element();
+ }
+}
+
+int main(void)
+{
+ edwards_pp::init_public_params();
+ test_group<G1<edwards_pp> >();
+ test_output<G1<edwards_pp> >();
+ test_group<G2<edwards_pp> >();
+ test_output<G2<edwards_pp> >();
+ test_mul_by_q<G2<edwards_pp> >();
+
+ mnt4_pp::init_public_params();
+ test_group<G1<mnt4_pp> >();
+ test_output<G1<mnt4_pp> >();
+ test_group<G2<mnt4_pp> >();
+ test_output<G2<mnt4_pp> >();
+ test_mul_by_q<G2<mnt4_pp> >();
+
+ mnt6_pp::init_public_params();
+ test_group<G1<mnt6_pp> >();
+ test_output<G1<mnt6_pp> >();
+ test_group<G2<mnt6_pp> >();
+ test_output<G2<mnt6_pp> >();
+ test_mul_by_q<G2<mnt6_pp> >();
+
+ alt_bn128_pp::init_public_params();
+ test_group<G1<alt_bn128_pp> >();
+ test_output<G1<alt_bn128_pp> >();
+ test_group<G2<alt_bn128_pp> >();
+ test_output<G2<alt_bn128_pp> >();
+ test_mul_by_q<G2<alt_bn128_pp> >();
+
+#ifdef CURVE_BN128 // BN128 has fancy dependencies so it may be disabled
+ bn128_pp::init_public_params();
+ test_group<G1<bn128_pp> >();
+ test_output<G1<bn128_pp> >();
+ test_group<G2<bn128_pp> >();
+ test_output<G2<bn128_pp> >();
+#endif
+}
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for the "basic radix-2" evaluation domain.
+
+ Roughly, the domain has size m = 2^k and consists of the m-th roots of unity.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef BASIC_RADIX2_DOMAIN_HPP_
+#define BASIC_RADIX2_DOMAIN_HPP_
+
+#include "algebra/evaluation_domain/evaluation_domain.hpp"
+
+namespace libsnark {
+
+template<typename FieldT>
+class basic_radix2_domain : public evaluation_domain<FieldT> {
+public:
+
+ FieldT omega;
+
+ basic_radix2_domain(const size_t m);
+
+ void FFT(std::vector<FieldT> &a);
+ void iFFT(std::vector<FieldT> &a);
+ void cosetFFT(std::vector<FieldT> &a, const FieldT &g);
+ void icosetFFT(std::vector<FieldT> &a, const FieldT &g);
+ std::vector<FieldT> lagrange_coeffs(const FieldT &t);
+ FieldT get_element(const size_t idx);
+ FieldT compute_Z(const FieldT &t);
+ void add_poly_Z(const FieldT &coeff, std::vector<FieldT> &H);
+ void divide_by_Z_on_coset(std::vector<FieldT> &P);
+
+};
+
+} // libsnark
+
+#include "algebra/evaluation_domain/domains/basic_radix2_domain.tcc"
+
+#endif // BASIC_RADIX2_DOMAIN_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of interfaces for the "basic radix-2" evaluation domain.
+
+ See basic_radix2_domain.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef BASIC_RADIX2_DOMAIN_TCC_
+#define BASIC_RADIX2_DOMAIN_TCC_
+
+#include "algebra/evaluation_domain/domains/basic_radix2_domain_aux.hpp"
+
+namespace libsnark {
+
+template<typename FieldT>
+basic_radix2_domain<FieldT>::basic_radix2_domain(const size_t m) : evaluation_domain<FieldT>(m)
+{
+ assert(m > 1);
+ const size_t logm = log2(m);
+ assert(logm <= (FieldT::s));
+
+ omega = get_root_of_unity<FieldT>(m);
+}
+
+template<typename FieldT>
+void basic_radix2_domain<FieldT>::FFT(std::vector<FieldT> &a)
+{
+ enter_block("Execute FFT");
+ assert(a.size() == this->m);
+ _basic_radix2_FFT(a, omega);
+ leave_block("Execute FFT");
+}
+
+template<typename FieldT>
+void basic_radix2_domain<FieldT>::iFFT(std::vector<FieldT> &a)
+{
+ enter_block("Execute inverse FFT");
+ assert(a.size() == this->m);
+ _basic_radix2_FFT(a, omega.inverse());
+
+ const FieldT sconst = FieldT(a.size()).inverse();
+ for (size_t i = 0; i < a.size(); ++i)
+ {
+ a[i] *= sconst;
+ }
+ leave_block("Execute inverse FFT");
+}
+
+template<typename FieldT>
+void basic_radix2_domain<FieldT>::cosetFFT(std::vector<FieldT> &a, const FieldT &g)
+{
+ enter_block("Execute coset FFT");
+ _multiply_by_coset(a, g);
+ FFT(a);
+ leave_block("Execute coset FFT");
+}
+
+template<typename FieldT>
+void basic_radix2_domain<FieldT>::icosetFFT(std::vector<FieldT> &a, const FieldT &g)
+{
+ enter_block("Execute inverse coset IFFT");
+ iFFT(a);
+ _multiply_by_coset(a, g.inverse());
+ leave_block("Execute inverse coset IFFT");
+}
+
+template<typename FieldT>
+std::vector<FieldT> basic_radix2_domain<FieldT>::lagrange_coeffs(const FieldT &t)
+{
+ return _basic_radix2_lagrange_coeffs(this->m, t);
+}
+
+template<typename FieldT>
+FieldT basic_radix2_domain<FieldT>::get_element(const size_t idx)
+{
+ return omega^idx;
+}
+
+template<typename FieldT>
+FieldT basic_radix2_domain<FieldT>::compute_Z(const FieldT &t)
+{
+ return (t^this->m) - FieldT::one();
+}
+
+template<typename FieldT>
+void basic_radix2_domain<FieldT>::add_poly_Z(const FieldT &coeff, std::vector<FieldT> &H)
+{
+ assert(H.size() == this->m+1);
+ H[this->m] += coeff;
+ H[0] -= coeff;
+}
+
+template<typename FieldT>
+void basic_radix2_domain<FieldT>::divide_by_Z_on_coset(std::vector<FieldT> &P)
+{
+ const FieldT coset = FieldT::multiplicative_generator;
+ const FieldT Z_inverse_at_coset = this->compute_Z(coset).inverse();
+ for (size_t i = 0; i < this->m; ++i)
+ {
+ P[i] *= Z_inverse_at_coset;
+ }
+}
+
+} // libsnark
+
+#endif // BASIC_RADIX2_DOMAIN_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for auxiliary functions for the "basic radix-2" evaluation domain.
+
+ These functions compute the radix-2 FFT (in single- or multi-thread mode) and,
+ also compute Lagrange coefficients.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef BASIC_RADIX2_DOMAIN_AUX_HPP_
+#define BASIC_RADIX2_DOMAIN_AUX_HPP_
+
+namespace libsnark {
+
+/**
+ * Compute the radix-2 FFT of the vector a over the set S={omega^{0},...,omega^{m-1}}.
+ */
+template<typename FieldT>
+void _basic_radix2_FFT(std::vector<FieldT> &a, const FieldT &omega);
+
+/**
+ * A multi-thread version of _basic_radix2_FFT.
+ */
+template<typename FieldT>
+void _parallel_basic_radix2_FFT(std::vector<FieldT> &a, const FieldT &omega);
+
+/**
+ * Translate the vector a to a coset defined by g.
+ */
+template<typename FieldT>
+void _multiply_by_coset(std::vector<FieldT> &a, const FieldT &g);
+
+/**
+ * Compute the m Lagrange coefficients, relative to the set S={omega^{0},...,omega^{m-1}}, at the field element t.
+ */
+template<typename FieldT>
+std::vector<FieldT> _basic_radix2_lagrange_coeffs(const size_t m, const FieldT &t);
+
+} // libsnark
+
+#include "algebra/evaluation_domain/domains/basic_radix2_domain_aux.tcc"
+
+#endif // BASIC_RADIX2_DOMAIN_AUX_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of interfaces for auxiliary functions for the "basic radix-2" evaluation domain.
+
+ See basic_radix2_domain_aux.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef BASIC_RADIX2_DOMAIN_AUX_TCC_
+#define BASIC_RADIX2_DOMAIN_AUX_TCC_
+
+#include <cassert>
+#ifdef MULTICORE
+#include <omp.h>
+#endif
+#include "algebra/fields/field_utils.hpp"
+#include "common/profiling.hpp"
+#include "common/utils.hpp"
+
+namespace libsnark {
+
+#ifdef MULTICORE
+#define _basic_radix2_FFT _basic_parallel_radix2_FFT
+#else
+#define _basic_radix2_FFT _basic_serial_radix2_FFT
+#endif
+
+/*
+ Below we make use of pseudocode from [CLRS 2n Ed, pp. 864].
+ Also, note that it's the caller's responsibility to multiply by 1/N.
+ */
+template<typename FieldT>
+void _basic_serial_radix2_FFT(std::vector<FieldT> &a, const FieldT &omega)
+{
+ const size_t n = a.size(), logn = log2(n);
+ assert(n == (1u << logn));
+
+ /* swapping in place (from Storer's book) */
+ for (size_t k = 0; k < n; ++k)
+ {
+ const size_t rk = bitreverse(k, logn);
+ if (k < rk)
+ std::swap(a[k], a[rk]);
+ }
+
+ size_t m = 1; // invariant: m = 2^{s-1}
+ for (size_t s = 1; s <= logn; ++s)
+ {
+ // w_m is 2^s-th root of unity now
+ const FieldT w_m = omega^(n/(2*m));
+
+ asm volatile ("/* pre-inner */");
+ for (size_t k = 0; k < n; k += 2*m)
+ {
+ FieldT w = FieldT::one();
+ for (size_t j = 0; j < m; ++j)
+ {
+ const FieldT t = w * a[k+j+m];
+ a[k+j+m] = a[k+j] - t;
+ a[k+j] += t;
+ w *= w_m;
+ }
+ }
+ asm volatile ("/* post-inner */");
+ m *= 2;
+ }
+}
+
+template<typename FieldT>
+void _basic_parallel_radix2_FFT_inner(std::vector<FieldT> &a, const FieldT &omega, const size_t log_cpus)
+{
+ const size_t num_cpus = 1ul<<log_cpus;
+
+ const size_t m = a.size();
+ const size_t log_m = log2(m);
+ assert(m == 1ul<<log_m);
+
+ if (log_m < log_cpus)
+ {
+ _basic_serial_radix2_FFT(a, omega);
+ return;
+ }
+
+ enter_block("Shuffle inputs");
+ std::vector<std::vector<FieldT> > tmp(num_cpus);
+ for (size_t j = 0; j < num_cpus; ++j)
+ {
+ tmp[j].resize(1ul<<(log_m-log_cpus), FieldT::zero());
+ }
+
+#ifdef MULTICORE
+ #pragma omp parallel for
+#endif
+ for (size_t j = 0; j < num_cpus; ++j)
+ {
+ const FieldT omega_j = omega^j;
+ const FieldT omega_step = omega^(j<<(log_m - log_cpus));
+
+ FieldT elt = FieldT::one();
+ for (size_t i = 0; i < 1ul<<(log_m - log_cpus); ++i)
+ {
+ for (size_t s = 0; s < num_cpus; ++s)
+ {
+ // invariant: elt is omega^(j*idx)
+ const size_t idx = (i + (s<<(log_m - log_cpus))) % (1u << log_m);
+ tmp[j][i] += a[idx] * elt;
+ elt *= omega_step;
+ }
+ elt *= omega_j;
+ }
+ }
+ leave_block("Shuffle inputs");
+
+ enter_block("Execute sub-FFTs");
+ const FieldT omega_num_cpus = omega^num_cpus;
+
+#ifdef MULTICORE
+ #pragma omp parallel for
+#endif
+ for (size_t j = 0; j < num_cpus; ++j)
+ {
+ _basic_serial_radix2_FFT(tmp[j], omega_num_cpus);
+ }
+ leave_block("Execute sub-FFTs");
+
+ enter_block("Re-shuffle outputs");
+
+#ifdef MULTICORE
+ #pragma omp parallel for
+#endif
+ for (size_t i = 0; i < num_cpus; ++i)
+ {
+ for (size_t j = 0; j < 1ul<<(log_m - log_cpus); ++j)
+ {
+ // now: i = idx >> (log_m - log_cpus) and j = idx % (1u << (log_m - log_cpus)), for idx = ((i<<(log_m-log_cpus))+j) % (1u << log_m)
+ a[(j<<log_cpus) + i] = tmp[i][j];
+ }
+ }
+ leave_block("Re-shuffle outputs");
+}
+
+template<typename FieldT>
+void _basic_parallel_radix2_FFT(std::vector<FieldT> &a, const FieldT &omega)
+{
+#ifdef MULTICORE
+ const size_t num_cpus = omp_get_max_threads();
+#else
+ const size_t num_cpus = 1;
+#endif
+ const size_t log_cpus = ((num_cpus & (num_cpus - 1)) == 0 ? log2(num_cpus) : log2(num_cpus) - 1);
+
+#ifdef DEBUG
+ print_indent(); printf("* Invoking parallel FFT on 2^%zu CPUs (omp_get_max_threads = %zu)\n", log_cpus, num_cpus);
+#endif
+
+ if (log_cpus == 0)
+ {
+ _basic_serial_radix2_FFT(a, omega);
+ }
+ else
+ {
+ _basic_parallel_radix2_FFT_inner(a, omega, log_cpus);
+ }
+}
+
+template<typename FieldT>
+void _multiply_by_coset(std::vector<FieldT> &a, const FieldT &g)
+{
+ //enter_block("Multiply by coset");
+ FieldT u = g;
+ for (size_t i = 1; i < a.size(); ++i)
+ {
+ a[i] *= u;
+ u *= g;
+ }
+ //leave_block("Multiply by coset");
+}
+
+template<typename FieldT>
+std::vector<FieldT> _basic_radix2_lagrange_coeffs(const size_t m, const FieldT &t)
+{
+ if (m == 1)
+ {
+ return std::vector<FieldT>(1, FieldT::one());
+ }
+
+ assert(m == (1u << log2(m)));
+
+ const FieldT omega = get_root_of_unity<FieldT>(m);
+
+ std::vector<FieldT> u(m, FieldT::zero());
+
+ /*
+ If t equals one of the roots of unity in S={omega^{0},...,omega^{m-1}}
+ then output 1 at the right place, and 0 elsewhere
+ */
+
+ if ((t^m) == (FieldT::one()))
+ {
+ FieldT omega_i = FieldT::one();
+ for (size_t i = 0; i < m; ++i)
+ {
+ if (omega_i == t) // i.e., t equals omega^i
+ {
+ u[i] = FieldT::one();
+ return u;
+ }
+
+ omega_i *= omega;
+ }
+ }
+
+ /*
+ Otherwise, if t does not equal any of the roots of unity in S,
+ then compute each L_{i,S}(t) as Z_{S}(t) * v_i / (t-\omega^i)
+ where:
+ - Z_{S}(t) = \prod_{j} (t-\omega^j) = (t^m-1), and
+ - v_{i} = 1 / \prod_{j \neq i} (\omega^i-\omega^j).
+ Below we use the fact that v_{0} = 1/m and v_{i+1} = \omega * v_{i}.
+ */
+
+ const FieldT Z = (t^m)-FieldT::one();
+ FieldT l = Z * FieldT(m).inverse();
+ FieldT r = FieldT::one();
+ for (size_t i = 0; i < m; ++i)
+ {
+ u[i] = l * (t - r).inverse();
+ l *= omega;
+ r *= omega;
+ }
+
+ return u;
+}
+
+} // libsnark
+
+#endif // BASIC_RADIX2_DOMAIN_AUX_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for evaluation domains.
+
+ Roughly, given a desired size m for the domain, the constructor selects
+ a choice of domain S with size ~m that has been selected so to optimize
+ - computations of Lagrange polynomials, and
+ - FFT/iFFT computations.
+ An evaluation domain also provides other other functions, e.g., accessing
+ individual elements in S or evaluating its vanishing polynomial.
+
+ The descriptions below make use of the definition of a *Lagrange polynomial*,
+ which we recall. Given a field F, a subset S=(a_i)_i of F, and an index idx
+ in {0,...,|S-1|}, the idx-th Lagrange polynomial (wrt to subset S) is defined to be
+ \f[ L_{idx,S}(z) := prod_{k \neq idx} (z - a_k) / prod_{k \neq idx} (a_{idx} - a_k) \f]
+ Note that, by construction:
+ \f[ \forall j \neq idx: L_{idx,S}(a_{idx}) = 1 \text{ and } L_{idx,S}(a_j) = 0 \f]
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef EVALUATION_DOMAIN_HPP_
+#define EVALUATION_DOMAIN_HPP_
+
+#include <memory>
+
+namespace libsnark {
+
+/**
+ * An evaluation domain.
+ */
+template<typename FieldT>
+class evaluation_domain {
+public:
+
+ const size_t m;
+
+ /**
+ * Construct an evaluation domain S of size m, if possible.
+ *
+ * (See the function get_evaluation_domain below.)
+ */
+ evaluation_domain(const size_t m) : m(m) {};
+
+ /**
+ * Get the idx-th element in S.
+ */
+ virtual FieldT get_element(const size_t idx) = 0;
+
+ /**
+ * Compute the FFT, over the domain S, of the vector a.
+ */
+ virtual void FFT(std::vector<FieldT> &a) = 0;
+
+ /**
+ * Compute the inverse FFT, over the domain S, of the vector a.
+ */
+ virtual void iFFT(std::vector<FieldT> &a) = 0;
+
+ /**
+ * Compute the FFT, over the domain g*S, of the vector a.
+ */
+ virtual void cosetFFT(std::vector<FieldT> &a, const FieldT &g) = 0;
+
+ /**
+ * Compute the inverse FFT, over the domain g*S, of the vector a.
+ */
+ virtual void icosetFFT(std::vector<FieldT> &a, const FieldT &g) = 0;
+
+ /**
+ * Evaluate all Lagrange polynomials.
+ *
+ * The inputs are:
+ * - an integer m
+ * - an element t
+ * The output is a vector (b_{0},...,b_{m-1})
+ * where b_{i} is the evaluation of L_{i,S}(z) at z = t.
+ */
+ virtual std::vector<FieldT> lagrange_coeffs(const FieldT &t) = 0;
+
+ /**
+ * Evaluate the vanishing polynomial of S at the field element t.
+ */
+ virtual FieldT compute_Z(const FieldT &t) = 0;
+
+ /**
+ * Add the coefficients of the vanishing polynomial of S to the coefficients of the polynomial H.
+ */
+ virtual void add_poly_Z(const FieldT &coeff, std::vector<FieldT> &H) = 0;
+
+ /**
+ * Multiply by the evaluation, on a coset of S, of the inverse of the vanishing polynomial of S.
+ */
+ virtual void divide_by_Z_on_coset(std::vector<FieldT> &P) = 0;
+};
+
+/**
+ * Return an evaluation domain object in which the domain S has size |S| >= min_size.
+ * The function chooses from different supported domains, depending on min_size.
+ */
+template<typename FieldT>
+std::shared_ptr<evaluation_domain<FieldT> > get_evaluation_domain(const size_t min_size);
+
+/**
+ * Naive evaluation of a *single* Lagrange polynomial, used for testing purposes.
+ *
+ * The inputs are:
+ * - an integer m
+ * - a domain S = (a_{0},...,a_{m-1}) of size m
+ * - a field element element t
+ * - an index idx in {0,...,m-1}
+ * The output is the polynomial L_{idx,S}(z) evaluated at z = t.
+ */
+template<typename FieldT>
+FieldT lagrange_eval(const size_t m, const std::vector<FieldT> &domain, const FieldT &t, const size_t idx);
+
+} // libsnark
+
+#include "algebra/evaluation_domain/evaluation_domain.tcc"
+
+#endif // EVALUATION_DOMAIN_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Imeplementation of interfaces for evaluation domains.
+
+ See evaluation_domain.hpp .
+
+ We currently implement, and select among, three types of domains:
+ - "basic radix-2": the domain has size m = 2^k and consists of the m-th roots of unity
+ - "extended radix-2": the domain has size m = 2^{k+1} and consists of "the m-th roots of unity" union "a coset"
+ - "step radix-2": the domain has size m = 2^k + 2^r and consists of "the 2^k-th roots of unity" union "a coset of 2^r-th roots of unity"
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef EVALUATION_DOMAIN_TCC_
+#define EVALUATION_DOMAIN_TCC_
+
+#include <cassert>
+#include "algebra/fields/field_utils.hpp"
+#include "algebra/evaluation_domain/domains/basic_radix2_domain.hpp"
+
+namespace libsnark {
+
+template<typename FieldT>
+std::shared_ptr<evaluation_domain<FieldT> > get_evaluation_domain(const size_t min_size)
+{
+ assert(min_size > 1);
+ const size_t log_min_size = log2(min_size);
+ assert(log_min_size <= (FieldT::s+1));
+
+ std::shared_ptr<evaluation_domain<FieldT> > result;
+ if (min_size == (1u << log_min_size))
+ {
+ if (log_min_size == FieldT::s+1)
+ {
+ if (!inhibit_profiling_info)
+ {
+ print_indent(); printf("* Selected domain: extended_radix2\n");
+ }
+ assert(0);
+ }
+ else
+ {
+ if (!inhibit_profiling_info)
+ {
+ print_indent(); printf("* Selected domain: basic_radix2\n");
+ }
+ result.reset(new basic_radix2_domain<FieldT>(min_size));
+ }
+ }
+ else
+ {
+ const size_t big = 1ul<<(log2(min_size)-1);
+ const size_t small = min_size - big;
+ const size_t rounded_small = (1ul<<log2(small));
+ if (big == rounded_small)
+ {
+ if (log2(big + rounded_small) < FieldT::s+1)
+ {
+ if (!inhibit_profiling_info)
+ {
+ print_indent(); printf("* Selected domain: basic_radix2\n");
+ }
+ result.reset(new basic_radix2_domain<FieldT>(big + rounded_small));
+ }
+ else
+ {
+ if (!inhibit_profiling_info)
+ {
+ print_indent(); printf("* Selected domain: extended_radix2\n");
+ }
+ assert(0);
+ }
+ }
+ else
+ {
+ if (!inhibit_profiling_info)
+ {
+ print_indent(); printf("* Selected domain: step_radix2\n");
+ }
+ assert(0);
+ }
+ }
+
+ return result;
+}
+
+template<typename FieldT>
+FieldT lagrange_eval(const size_t m, const std::vector<FieldT> &domain, const FieldT &t, const size_t idx)
+{
+ assert(m == domain.size());
+ assert(idx < m);
+
+ FieldT num = FieldT::one();
+ FieldT denom = FieldT::one();
+
+ for (size_t k = 0; k < m; ++k)
+ {
+ if (k == idx)
+ {
+ continue;
+ }
+
+ num *= t - domain[k];
+ denom *= domain[idx] - domain[k];
+ }
+
+ return num * denom.inverse();
+}
+
+} // libsnark
+
+#endif // EVALUATION_DOMAIN_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for (square-and-multiply) exponentiation.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef EXPONENTIATION_HPP_
+#define EXPONENTIATION_HPP_
+
+#include <cstdint>
+
+#include "algebra/fields/bigint.hpp"
+
+namespace libsnark {
+
+template<typename FieldT, mp_size_t m>
+FieldT power(const FieldT &base, const bigint<m> &exponent);
+
+template<typename FieldT>
+FieldT power(const FieldT &base, const unsigned long exponent);
+
+} // libsnark
+
+#include "algebra/exponentiation/exponentiation.tcc"
+
+#endif // EXPONENTIATION_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of interfaces for (square-and-multiply) exponentiation.
+
+ See exponentiation.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef EXPONENTIATION_TCC_
+#define EXPONENTIATION_TCC_
+
+#include "common/utils.hpp"
+
+namespace libsnark {
+
+template<typename FieldT, mp_size_t m>
+FieldT power(const FieldT &base, const bigint<m> &exponent)
+{
+ FieldT result = FieldT::one();
+
+ bool found_one = false;
+
+ for (long i = exponent.max_bits() - 1; i >= 0; --i)
+ {
+ if (found_one)
+ {
+ result = result * result;
+ }
+
+ if (exponent.test_bit(i))
+ {
+ found_one = true;
+ result = result * base;
+ }
+ }
+
+ return result;
+}
+
+template<typename FieldT>
+FieldT power(const FieldT &base, const unsigned long exponent)
+{
+ return power<FieldT>(base, bigint<1>(exponent));
+}
+
+} // libsnark
+
+#endif // EXPONENTIATION_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+ Declaration of bigint wrapper class around GMP's MPZ long integers.
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef BIGINT_HPP_
+#define BIGINT_HPP_
+#include <cstddef>
+#include <iostream>
+#include <gmp.h>
+#include "common/serialization.hpp"
+
+namespace libsnark {
+
+template<mp_size_t n> class bigint;
+template<mp_size_t n> std::ostream& operator<<(std::ostream &, const bigint<n>&);
+template<mp_size_t n> std::istream& operator>>(std::istream &, bigint<n>&);
+
+/**
+ * Wrapper class around GMP's MPZ long integers. It supports arithmetic operations,
+ * serialization and randomization. Serialization is fragile, see common/serialization.hpp.
+ */
+
+template<mp_size_t n>
+class bigint {
+public:
+ static const mp_size_t N = n;
+
+ mp_limb_t data[n] = {0};
+
+ bigint() = default;
+ bigint(const unsigned long x); /// Initalize from a small integer
+ bigint(const char* s); /// Initialize from a string containing an integer in decimal notation
+ bigint(const mpz_t r); /// Initialize from MPZ element
+
+ void print() const;
+ void print_hex() const;
+ bool operator==(const bigint<n>& other) const;
+ bool operator!=(const bigint<n>& other) const;
+ void clear();
+ bool is_zero() const;
+ size_t max_bits() const { return n * GMP_NUMB_BITS; }
+ size_t num_bits() const;
+
+ unsigned long as_ulong() const; /* return the last limb of the integer */
+ void to_mpz(mpz_t r) const;
+ bool test_bit(const std::size_t bitno) const;
+
+ template<mp_size_t m> inline void operator+=(const bigint<m>& other);
+ template<mp_size_t m> inline bigint<n+m> operator*(const bigint<m>& other) const;
+ template<mp_size_t d> static inline void div_qr(bigint<n-d+1>& quotient, bigint<d>& remainder,
+ const bigint<n>& dividend, const bigint<d>& divisor);
+ template<mp_size_t m> inline bigint<m> shorten(const bigint<m>& q, const char *msg) const;
+
+ inline void limit(const bigint<n>& q, const char *msg) const;
+ bool operator>(const bigint<n>& other) const;
+
+ bigint& randomize();
+
+ friend std::ostream& operator<< <n>(std::ostream &out, const bigint<n> &b);
+ friend std::istream& operator>> <n>(std::istream &in, bigint<n> &b);
+};
+
+} // libsnark
+#include "algebra/fields/bigint.tcc"
+#endif
--- /dev/null
+/** @file
+ *****************************************************************************
+ Implementation of bigint wrapper class around GMP's MPZ long integers.
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef BIGINT_TCC_
+#define BIGINT_TCC_
+#include <cassert>
+#include <climits>
+#include <cstring>
+#include "sodium.h"
+
+namespace libsnark {
+
+template<mp_size_t n>
+bigint<n>::bigint(const unsigned long x) /// Initalize from a small integer
+{
+ static_assert(ULONG_MAX <= GMP_NUMB_MAX, "unsigned long does not fit in a GMP limb");
+ this->data[0] = x;
+}
+
+template<mp_size_t n>
+bigint<n>::bigint(const char* s) /// Initialize from a string containing an integer in decimal notation
+{
+ size_t l = strlen(s);
+ unsigned char* s_copy = new unsigned char[l];
+
+ for (size_t i = 0; i < l; ++i)
+ {
+ assert(s[i] >= '0' && s[i] <= '9');
+ s_copy[i] = s[i] - '0';
+ }
+
+ mp_size_t limbs_written = mpn_set_str(this->data, s_copy, l, 10);
+ assert(limbs_written <= n);
+
+ delete[] s_copy;
+}
+
+template<mp_size_t n>
+bigint<n>::bigint(const mpz_t r) /// Initialize from MPZ element
+{
+ mpz_t k;
+ mpz_init_set(k, r);
+
+ for (size_t i = 0; i < n; ++i)
+ {
+ data[i] = mpz_get_ui(k);
+ mpz_fdiv_q_2exp(k, k, GMP_NUMB_BITS);
+ }
+
+ assert(mpz_sgn(k) == 0);
+ mpz_clear(k);
+}
+
+template<mp_size_t n>
+void bigint<n>::print() const
+{
+ gmp_printf("%Nd\n", this->data, n);
+}
+
+template<mp_size_t n>
+void bigint<n>::print_hex() const
+{
+ gmp_printf("%Nx\n", this->data, n);
+}
+
+template<mp_size_t n>
+bool bigint<n>::operator==(const bigint<n>& other) const
+{
+ return (mpn_cmp(this->data, other.data, n) == 0);
+}
+
+template<mp_size_t n>
+bool bigint<n>::operator!=(const bigint<n>& other) const
+{
+ return !(operator==(other));
+}
+
+template<mp_size_t n>
+void bigint<n>::clear()
+{
+ mpn_zero(this->data, n);
+}
+
+template<mp_size_t n>
+bool bigint<n>::is_zero() const
+{
+ for (mp_size_t i = 0; i < n; ++i)
+ {
+ if (this->data[i])
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template<mp_size_t n>
+size_t bigint<n>::num_bits() const
+{
+/*
+ for (long i = max_bits(); i >= 0; --i)
+ {
+ if (this->test_bit(i))
+ {
+ return i+1;
+ }
+ }
+
+ return 0;
+*/
+ for (long i = n-1; i >= 0; --i)
+ {
+ mp_limb_t x = this->data[i];
+ if (x == 0)
+ {
+ continue;
+ }
+ else
+ {
+ return ((i+1) * GMP_NUMB_BITS) - __builtin_clzl(x);
+ }
+ }
+ return 0;
+}
+
+template<mp_size_t n>
+unsigned long bigint<n>::as_ulong() const
+{
+ return this->data[0];
+}
+
+template<mp_size_t n>
+void bigint<n>::to_mpz(mpz_t r) const
+{
+ mpz_set_ui(r, 0);
+
+ for (int i = n-1; i >= 0; --i)
+ {
+ mpz_mul_2exp(r, r, GMP_NUMB_BITS);
+ mpz_add_ui(r, r, this->data[i]);
+ }
+}
+
+template<mp_size_t n>
+bool bigint<n>::test_bit(const std::size_t bitno) const
+{
+ if (bitno >= n * GMP_NUMB_BITS)
+ {
+ return false;
+ }
+ else
+ {
+ const std::size_t part = bitno/GMP_NUMB_BITS;
+ const std::size_t bit = bitno - (GMP_NUMB_BITS*part);
+ const mp_limb_t one = 1;
+ return (this->data[part] & (one<<bit));
+ }
+}
+
+
+template<mp_size_t n> template<mp_size_t m>
+inline void bigint<n>::operator+=(const bigint<m>& other)
+{
+ static_assert(n >= m, "first arg must not be smaller than second arg for bigint in-place add");
+ mpn_add(data, data, n, other.data, m);
+}
+
+template<mp_size_t n> template<mp_size_t m>
+inline bigint<n+m> bigint<n>::operator*(const bigint<m>& other) const
+{
+ static_assert(n >= m, "first arg must not be smaller than second arg for bigint mul");
+ bigint<n+m> res;
+ mpn_mul(res.data, data, n, other.data, m);
+ return res;
+}
+
+template<mp_size_t n> template<mp_size_t d>
+inline void bigint<n>::div_qr(bigint<n-d+1>& quotient, bigint<d>& remainder,
+ const bigint<n>& dividend, const bigint<d>& divisor)
+{
+ static_assert(n >= d, "dividend must not be smaller than divisor for bigint::div_qr");
+ assert(divisor.data[d-1] != 0);
+ mpn_tdiv_qr(quotient.data, remainder.data, 0, dividend.data, n, divisor.data, d);
+}
+
+// Return a copy shortened to m limbs provided it is less than limit, throwing std::domain_error if not in range.
+template<mp_size_t n> template<mp_size_t m>
+inline bigint<m> bigint<n>::shorten(const bigint<m>& q, const char *msg) const
+{
+ static_assert(m <= n, "number of limbs must not increase for bigint::shorten");
+ for (mp_size_t i = m; i < n; i++) { // high-order limbs
+ if (data[i] != 0) {
+ throw std::domain_error(msg);
+ }
+ }
+ bigint<m> res;
+ mpn_copyi(res.data, data, n);
+ res.limit(q, msg);
+ return res;
+}
+
+template<mp_size_t n>
+inline void bigint<n>::limit(const bigint<n>& q, const char *msg) const
+{
+ if (!(q > *this)) {
+ throw std::domain_error(msg);
+ }
+}
+
+template<mp_size_t n>
+inline bool bigint<n>::operator>(const bigint<n>& other) const
+{
+ return mpn_cmp(this->data, other.data, n) > 0;
+}
+
+template<mp_size_t n>
+bigint<n>& bigint<n>::randomize()
+{
+ assert(GMP_NUMB_BITS == sizeof(mp_limb_t) * 8);
+
+ randombytes_buf(this->data, sizeof(mp_limb_t) * n);
+
+ return (*this);
+}
+
+
+template<mp_size_t n>
+std::ostream& operator<<(std::ostream &out, const bigint<n> &b)
+{
+#ifdef BINARY_OUTPUT
+ out.write((char*)b.data, sizeof(b.data[0]) * n);
+#else
+ mpz_t t;
+ mpz_init(t);
+ b.to_mpz(t);
+
+ out << t;
+
+ mpz_clear(t);
+#endif
+ return out;
+}
+
+template<mp_size_t n>
+std::istream& operator>>(std::istream &in, bigint<n> &b)
+{
+#ifdef BINARY_OUTPUT
+ in.read((char*)b.data, sizeof(b.data[0]) * n);
+#else
+ std::string s;
+ in >> s;
+
+ size_t l = s.size();
+ unsigned char* s_copy = new unsigned char[l];
+
+ for (size_t i = 0; i < l; ++i)
+ {
+ assert(s[i] >= '0' && s[i] <= '9');
+ s_copy[i] = s[i] - '0';
+ }
+
+ mp_size_t limbs_written = mpn_set_str(b.data, s_copy, l, 10);
+ assert(limbs_written <= n);
+
+ delete[] s_copy;
+#endif
+ return in;
+}
+
+} // libsnark
+#endif // BIGINT_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef FIELD_UTILS_HPP_
+#define FIELD_UTILS_HPP_
+#include <cstdint>
+
+#include "common/utils.hpp"
+#include "algebra/fields/bigint.hpp"
+
+namespace libsnark {
+
+// returns root of unity of order n (for n a power of 2), if one exists
+template<typename FieldT>
+FieldT get_root_of_unity(const size_t n);
+
+template<typename FieldT>
+std::vector<FieldT> pack_int_vector_into_field_element_vector(const std::vector<size_t> &v, const size_t w);
+
+template<typename FieldT>
+std::vector<FieldT> pack_bit_vector_into_field_element_vector(const bit_vector &v, const size_t chunk_bits);
+
+template<typename FieldT>
+std::vector<FieldT> pack_bit_vector_into_field_element_vector(const bit_vector &v);
+
+template<typename FieldT>
+std::vector<FieldT> convert_bit_vector_to_field_element_vector(const bit_vector &v);
+
+template<typename FieldT>
+bit_vector convert_field_element_vector_to_bit_vector(const std::vector<FieldT> &v);
+
+template<typename FieldT>
+bit_vector convert_field_element_to_bit_vector(const FieldT &el);
+
+template<typename FieldT>
+bit_vector convert_field_element_to_bit_vector(const FieldT &el, const size_t bitcount);
+
+template<typename FieldT>
+FieldT convert_bit_vector_to_field_element(const bit_vector &v);
+
+template<typename FieldT>
+void batch_invert(std::vector<FieldT> &vec);
+
+} // libsnark
+#include "algebra/fields/field_utils.tcc"
+
+#endif // FIELD_UTILS_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ Implementation of misc. math and serialization utility functions
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef FIELD_UTILS_TCC_
+#define FIELD_UTILS_TCC_
+
+#include "common/utils.hpp"
+
+namespace libsnark {
+
+template<typename FieldT>
+FieldT coset_shift()
+{
+ return FieldT::multiplicative_generator.squared();
+}
+
+template<typename FieldT>
+FieldT get_root_of_unity(const size_t n)
+{
+ const size_t logn = log2(n);
+ assert(n == (1u << logn));
+ assert(logn <= FieldT::s);
+
+ FieldT omega = FieldT::root_of_unity;
+ for (size_t i = FieldT::s; i > logn; --i)
+ {
+ omega *= omega;
+ }
+
+ return omega;
+}
+
+template<typename FieldT>
+std::vector<FieldT> pack_int_vector_into_field_element_vector(const std::vector<size_t> &v, const size_t w)
+{
+ const size_t chunk_bits = FieldT::capacity();
+ const size_t repacked_size = div_ceil(v.size() * w, chunk_bits);
+ std::vector<FieldT> result(repacked_size);
+
+ for (size_t i = 0; i < repacked_size; ++i)
+ {
+ bigint<FieldT::num_limbs> b;
+ for (size_t j = 0; j < chunk_bits; ++j)
+ {
+ const size_t word_index = (i * chunk_bits + j) / w;
+ const size_t pos_in_word = (i * chunk_bits + j) % w;
+ const size_t word_or_0 = (word_index < v.size() ? v[word_index] : 0);
+ const size_t bit = (word_or_0 >> pos_in_word) & 1;
+
+ b.data[j / GMP_NUMB_BITS] |= bit << (j % GMP_NUMB_BITS);
+ }
+ result[i] = FieldT(b);
+ }
+
+ return result;
+}
+
+template<typename FieldT>
+std::vector<FieldT> pack_bit_vector_into_field_element_vector(const bit_vector &v, const size_t chunk_bits)
+{
+ assert(chunk_bits <= FieldT::capacity());
+
+ const size_t repacked_size = div_ceil(v.size(), chunk_bits);
+ std::vector<FieldT> result(repacked_size);
+
+ for (size_t i = 0; i < repacked_size; ++i)
+ {
+ bigint<FieldT::num_limbs> b;
+ for (size_t j = 0; j < chunk_bits; ++j)
+ {
+ b.data[j / GMP_NUMB_BITS] |= ((i * chunk_bits + j) < v.size() && v[i * chunk_bits + j] ? 1ll : 0ll) << (j % GMP_NUMB_BITS);
+ }
+ result[i] = FieldT(b);
+ }
+
+ return result;
+}
+
+template<typename FieldT>
+std::vector<FieldT> pack_bit_vector_into_field_element_vector(const bit_vector &v)
+{
+ return pack_bit_vector_into_field_element_vector<FieldT>(v, FieldT::capacity());
+}
+
+template<typename FieldT>
+std::vector<FieldT> convert_bit_vector_to_field_element_vector(const bit_vector &v)
+{
+ std::vector<FieldT> result;
+ result.reserve(v.size());
+
+ for (const bool b : v)
+ {
+ result.emplace_back(b ? FieldT::one() : FieldT::zero());
+ }
+
+ return result;
+}
+
+template<typename FieldT>
+bit_vector convert_field_element_vector_to_bit_vector(const std::vector<FieldT> &v)
+{
+ bit_vector result;
+
+ for (const FieldT &el : v)
+ {
+ const bit_vector el_bits = convert_field_element_to_bit_vector<FieldT>(el);
+ result.insert(result.end(), el_bits.begin(), el_bits.end());
+ }
+
+ return result;
+}
+
+template<typename FieldT>
+bit_vector convert_field_element_to_bit_vector(const FieldT &el)
+{
+ bit_vector result;
+
+ bigint<FieldT::num_limbs> b = el.as_bigint();
+ for (size_t i = 0; i < FieldT::size_in_bits(); ++i)
+ {
+ result.push_back(b.test_bit(i));
+ }
+
+ return result;
+}
+
+template<typename FieldT>
+bit_vector convert_field_element_to_bit_vector(const FieldT &el, const size_t bitcount)
+{
+ bit_vector result = convert_field_element_to_bit_vector(el);
+ result.resize(bitcount);
+
+ return result;
+}
+
+template<typename FieldT>
+FieldT convert_bit_vector_to_field_element(const bit_vector &v)
+{
+ assert(v.size() <= FieldT::size_in_bits());
+
+ FieldT res = FieldT::zero();
+ FieldT c = FieldT::one();
+ for (bool b : v)
+ {
+ res += b ? c : FieldT::zero();
+ c += c;
+ }
+ return res;
+}
+
+template<typename FieldT>
+void batch_invert(std::vector<FieldT> &vec)
+{
+ std::vector<FieldT> prod;
+ prod.reserve(vec.size());
+
+ FieldT acc = FieldT::one();
+
+ for (auto el : vec)
+ {
+ assert(!el.is_zero());
+ prod.emplace_back(acc);
+ acc = acc * el;
+ }
+
+ FieldT acc_inverse = acc.inverse();
+
+ for (long i = vec.size()-1; i >= 0; --i)
+ {
+ const FieldT old_el = vec[i];
+ vec[i] = acc_inverse * prod[i];
+ acc_inverse = acc_inverse * old_el;
+ }
+}
+
+} // libsnark
+#endif // FIELD_UTILS_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+ Declaration of arithmetic in the finite field F[p], for prime p of fixed length.
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef FP_HPP_
+#define FP_HPP_
+
+#include "algebra/fields/bigint.hpp"
+#include "algebra/exponentiation/exponentiation.hpp"
+
+namespace libsnark {
+
+template<mp_size_t n, const bigint<n>& modulus>
+class Fp_model;
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::ostream& operator<<(std::ostream &, const Fp_model<n, modulus>&);
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::istream& operator>>(std::istream &, Fp_model<n, modulus> &);
+
+/**
+ * Arithmetic in the finite field F[p], for prime p of fixed length.
+ *
+ * This class implements Fp-arithmetic, for a large prime p, using a fixed number
+ * of words. It is optimized for tight memory consumption, so the modulus p is
+ * passed as a template parameter, to avoid per-element overheads.
+ *
+ * The implementation is mostly a wrapper around GMP's MPN (constant-size integers).
+ * But for the integer sizes of interest for libsnark (3 to 5 limbs of 64 bits each),
+ * we implement performance-critical routines, like addition and multiplication,
+ * using hand-optimzied assembly code.
+*/
+template<mp_size_t n, const bigint<n>& modulus>
+class Fp_model {
+public:
+ bigint<n> mont_repr;
+public:
+ static const mp_size_t num_limbs = n;
+ static const constexpr bigint<n>& mod = modulus;
+#ifdef PROFILE_OP_COUNTS
+ static long long add_cnt;
+ static long long sub_cnt;
+ static long long mul_cnt;
+ static long long sqr_cnt;
+ static long long inv_cnt;
+#endif
+ static size_t num_bits;
+ static bigint<n> euler; // (modulus-1)/2
+ static size_t s; // modulus = 2^s * t + 1
+ static bigint<n> t; // with t odd
+ static bigint<n> t_minus_1_over_2; // (t-1)/2
+ static Fp_model<n, modulus> nqr; // a quadratic nonresidue
+ static Fp_model<n, modulus> nqr_to_t; // nqr^t
+ static Fp_model<n, modulus> multiplicative_generator; // generator of Fp^*
+ static Fp_model<n, modulus> root_of_unity; // generator^((modulus-1)/2^s)
+ static mp_limb_t inv; // modulus^(-1) mod W, where W = 2^(word size)
+ static bigint<n> Rsquared; // R^2, where R = W^k, where k = ??
+ static bigint<n> Rcubed; // R^3
+
+ static bool modulus_is_valid() { return modulus.data[n-1] != 0; } // mpn inverse assumes that highest limb is non-zero
+
+ Fp_model() {};
+ Fp_model(const bigint<n> &b);
+ Fp_model(const long x, const bool is_unsigned=false);
+
+ void set_ulong(const unsigned long x);
+
+ void mul_reduce(const bigint<n> &other);
+
+ void clear();
+
+ /* Return the standard (not Montgomery) representation of the
+ Field element's requivalence class. I.e. Fp(2).as_bigint()
+ would return bigint(2) */
+ bigint<n> as_bigint() const;
+ /* Return the last limb of the standard representation of the
+ field element. E.g. on 64-bit architectures Fp(123).as_ulong()
+ and Fp(2^64+123).as_ulong() would both return 123. */
+ unsigned long as_ulong() const;
+
+ bool operator==(const Fp_model& other) const;
+ bool operator!=(const Fp_model& other) const;
+ bool is_zero() const;
+
+ void print() const;
+
+ Fp_model& operator+=(const Fp_model& other);
+ Fp_model& operator-=(const Fp_model& other);
+ Fp_model& operator*=(const Fp_model& other);
+ Fp_model& operator^=(const unsigned long pow);
+
+ template<mp_size_t m>
+ Fp_model& operator^=(const bigint<m> &pow);
+
+ Fp_model operator+(const Fp_model& other) const;
+ Fp_model operator-(const Fp_model& other) const;
+ Fp_model operator*(const Fp_model& other) const;
+ Fp_model operator-() const;
+ Fp_model squared() const;
+ Fp_model& invert();
+ Fp_model inverse() const;
+ Fp_model sqrt() const; // HAS TO BE A SQUARE (else does not terminate)
+
+ Fp_model operator^(const unsigned long pow) const;
+ template<mp_size_t m>
+ Fp_model operator^(const bigint<m> &pow) const;
+
+ static size_t size_in_bits() { return num_bits; }
+ static size_t capacity() { return num_bits - 1; }
+ static bigint<n> field_char() { return modulus; }
+
+ static Fp_model<n, modulus> zero();
+ static Fp_model<n, modulus> one();
+ static Fp_model<n, modulus> random_element();
+
+ friend std::ostream& operator<< <n,modulus>(std::ostream &out, const Fp_model<n, modulus> &p);
+ friend std::istream& operator>> <n,modulus>(std::istream &in, Fp_model<n, modulus> &p);
+};
+
+#ifdef PROFILE_OP_COUNTS
+template<mp_size_t n, const bigint<n>& modulus>
+long long Fp_model<n, modulus>::add_cnt = 0;
+
+template<mp_size_t n, const bigint<n>& modulus>
+long long Fp_model<n, modulus>::sub_cnt = 0;
+
+template<mp_size_t n, const bigint<n>& modulus>
+long long Fp_model<n, modulus>::mul_cnt = 0;
+
+template<mp_size_t n, const bigint<n>& modulus>
+long long Fp_model<n, modulus>::sqr_cnt = 0;
+
+template<mp_size_t n, const bigint<n>& modulus>
+long long Fp_model<n, modulus>::inv_cnt = 0;
+#endif
+
+template<mp_size_t n, const bigint<n>& modulus>
+size_t Fp_model<n, modulus>::num_bits;
+
+template<mp_size_t n, const bigint<n>& modulus>
+bigint<n> Fp_model<n, modulus>::euler;
+
+template<mp_size_t n, const bigint<n>& modulus>
+size_t Fp_model<n, modulus>::s;
+
+template<mp_size_t n, const bigint<n>& modulus>
+bigint<n> Fp_model<n, modulus>::t;
+
+template<mp_size_t n, const bigint<n>& modulus>
+bigint<n> Fp_model<n, modulus>::t_minus_1_over_2;
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n, modulus> Fp_model<n, modulus>::nqr;
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n, modulus> Fp_model<n, modulus>::nqr_to_t;
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n, modulus> Fp_model<n, modulus>::multiplicative_generator;
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n, modulus> Fp_model<n, modulus>::root_of_unity;
+
+template<mp_size_t n, const bigint<n>& modulus>
+mp_limb_t Fp_model<n, modulus>::inv;
+
+template<mp_size_t n, const bigint<n>& modulus>
+bigint<n> Fp_model<n, modulus>::Rsquared;
+
+template<mp_size_t n, const bigint<n>& modulus>
+bigint<n> Fp_model<n, modulus>::Rcubed;
+
+} // libsnark
+#include "algebra/fields/fp.tcc"
+
+#endif // FP_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ Implementation of arithmetic in the finite field F[p], for prime p of fixed length.
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef FP_TCC_
+#define FP_TCC_
+#include <cassert>
+#include <cstdlib>
+#include <cmath>
+
+#include "algebra/fields/fp_aux.tcc"
+#include "algebra/fields/field_utils.hpp"
+#include "common/assert_except.hpp"
+
+namespace libsnark {
+
+template<mp_size_t n, const bigint<n>& modulus>
+void Fp_model<n,modulus>::mul_reduce(const bigint<n> &other)
+{
+ /* stupid pre-processor tricks; beware */
+#if defined(__x86_64__) && defined(USE_ASM)
+ if (n == 3)
+ { // Use asm-optimized Comba multiplication and reduction
+ mp_limb_t res[2*n];
+ mp_limb_t c0, c1, c2;
+ COMBA_3_BY_3_MUL(c0, c1, c2, res, this->mont_repr.data, other.data);
+
+ mp_limb_t k;
+ mp_limb_t tmp1, tmp2, tmp3;
+ REDUCE_6_LIMB_PRODUCT(k, tmp1, tmp2, tmp3, inv, res, modulus.data);
+
+ /* subtract t > mod */
+ __asm__
+ ("/* check for overflow */ \n\t"
+ MONT_CMP(16)
+ MONT_CMP(8)
+ MONT_CMP(0)
+
+ "/* subtract mod if overflow */ \n\t"
+ "subtract%=: \n\t"
+ MONT_FIRSTSUB
+ MONT_NEXTSUB(8)
+ MONT_NEXTSUB(16)
+ "done%=: \n\t"
+ :
+ : [tmp] "r" (res+n), [M] "r" (modulus.data)
+ : "cc", "memory", "%rax");
+ mpn_copyi(this->mont_repr.data, res+n, n);
+ }
+ else if (n == 4)
+ { // use asm-optimized "CIOS method"
+
+ mp_limb_t tmp[n+1];
+ mp_limb_t T0=0, T1=1, cy=2, u=3; // TODO: fix this
+
+ __asm__ (MONT_PRECOMPUTE
+ MONT_FIRSTITER(1)
+ MONT_FIRSTITER(2)
+ MONT_FIRSTITER(3)
+ MONT_FINALIZE(3)
+ MONT_ITERFIRST(1)
+ MONT_ITERITER(1, 1)
+ MONT_ITERITER(1, 2)
+ MONT_ITERITER(1, 3)
+ MONT_FINALIZE(3)
+ MONT_ITERFIRST(2)
+ MONT_ITERITER(2, 1)
+ MONT_ITERITER(2, 2)
+ MONT_ITERITER(2, 3)
+ MONT_FINALIZE(3)
+ MONT_ITERFIRST(3)
+ MONT_ITERITER(3, 1)
+ MONT_ITERITER(3, 2)
+ MONT_ITERITER(3, 3)
+ MONT_FINALIZE(3)
+ "/* check for overflow */ \n\t"
+ MONT_CMP(24)
+ MONT_CMP(16)
+ MONT_CMP(8)
+ MONT_CMP(0)
+
+ "/* subtract mod if overflow */ \n\t"
+ "subtract%=: \n\t"
+ MONT_FIRSTSUB
+ MONT_NEXTSUB(8)
+ MONT_NEXTSUB(16)
+ MONT_NEXTSUB(24)
+ "done%=: \n\t"
+ :
+ : [tmp] "r" (tmp), [A] "r" (this->mont_repr.data), [B] "r" (other.data), [inv] "r" (inv), [M] "r" (modulus.data),
+ [T0] "r" (T0), [T1] "r" (T1), [cy] "r" (cy), [u] "r" (u)
+ : "cc", "memory", "%rax", "%rdx"
+ );
+ mpn_copyi(this->mont_repr.data, tmp, n);
+ }
+ else if (n == 5)
+ { // use asm-optimized "CIOS method"
+
+ mp_limb_t tmp[n+1];
+ mp_limb_t T0=0, T1=1, cy=2, u=3; // TODO: fix this
+
+ __asm__ (MONT_PRECOMPUTE
+ MONT_FIRSTITER(1)
+ MONT_FIRSTITER(2)
+ MONT_FIRSTITER(3)
+ MONT_FIRSTITER(4)
+ MONT_FINALIZE(4)
+ MONT_ITERFIRST(1)
+ MONT_ITERITER(1, 1)
+ MONT_ITERITER(1, 2)
+ MONT_ITERITER(1, 3)
+ MONT_ITERITER(1, 4)
+ MONT_FINALIZE(4)
+ MONT_ITERFIRST(2)
+ MONT_ITERITER(2, 1)
+ MONT_ITERITER(2, 2)
+ MONT_ITERITER(2, 3)
+ MONT_ITERITER(2, 4)
+ MONT_FINALIZE(4)
+ MONT_ITERFIRST(3)
+ MONT_ITERITER(3, 1)
+ MONT_ITERITER(3, 2)
+ MONT_ITERITER(3, 3)
+ MONT_ITERITER(3, 4)
+ MONT_FINALIZE(4)
+ MONT_ITERFIRST(4)
+ MONT_ITERITER(4, 1)
+ MONT_ITERITER(4, 2)
+ MONT_ITERITER(4, 3)
+ MONT_ITERITER(4, 4)
+ MONT_FINALIZE(4)
+ "/* check for overflow */ \n\t"
+ MONT_CMP(32)
+ MONT_CMP(24)
+ MONT_CMP(16)
+ MONT_CMP(8)
+ MONT_CMP(0)
+
+ "/* subtract mod if overflow */ \n\t"
+ "subtract%=: \n\t"
+ MONT_FIRSTSUB
+ MONT_NEXTSUB(8)
+ MONT_NEXTSUB(16)
+ MONT_NEXTSUB(24)
+ MONT_NEXTSUB(32)
+ "done%=: \n\t"
+ :
+ : [tmp] "r" (tmp), [A] "r" (this->mont_repr.data), [B] "r" (other.data), [inv] "r" (inv), [M] "r" (modulus.data),
+ [T0] "r" (T0), [T1] "r" (T1), [cy] "r" (cy), [u] "r" (u)
+ : "cc", "memory", "%rax", "%rdx"
+ );
+ mpn_copyi(this->mont_repr.data, tmp, n);
+ }
+ else
+#endif
+ {
+ mp_limb_t res[2*n];
+ mpn_mul_n(res, this->mont_repr.data, other.data, n);
+
+ /*
+ The Montgomery reduction here is based on Algorithm 14.32 in
+ Handbook of Applied Cryptography
+ <http://cacr.uwaterloo.ca/hac/about/chap14.pdf>.
+ */
+ for (size_t i = 0; i < n; ++i)
+ {
+ mp_limb_t k = inv * res[i];
+ /* calculate res = res + k * mod * b^i */
+ mp_limb_t carryout = mpn_addmul_1(res+i, modulus.data, n, k);
+ carryout = mpn_add_1(res+n+i, res+n+i, n-i, carryout);
+ assert(carryout == 0);
+ }
+
+ if (mpn_cmp(res+n, modulus.data, n) >= 0)
+ {
+ const mp_limb_t borrow = mpn_sub(res+n, res+n, n, modulus.data, n);
+ assert(borrow == 0);
+ }
+
+ mpn_copyi(this->mont_repr.data, res+n, n);
+ }
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n,modulus>::Fp_model(const bigint<n> &b)
+{
+ mpn_copyi(this->mont_repr.data, Rsquared.data, n);
+ mul_reduce(b);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n,modulus>::Fp_model(const long x, const bool is_unsigned)
+{
+ if (is_unsigned || x >= 0)
+ {
+ this->mont_repr.data[0] = x;
+ }
+ else
+ {
+ const mp_limb_t borrow = mpn_sub_1(this->mont_repr.data, modulus.data, n, -x);
+ assert(borrow == 0);
+ }
+
+ mul_reduce(Rsquared);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+void Fp_model<n,modulus>::set_ulong(const unsigned long x)
+{
+ this->mont_repr.clear();
+ this->mont_repr.data[0] = x;
+ mul_reduce(Rsquared);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+void Fp_model<n,modulus>::clear()
+{
+ this->mont_repr.clear();
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+bigint<n> Fp_model<n,modulus>::as_bigint() const
+{
+ bigint<n> one;
+ one.clear();
+ one.data[0] = 1;
+
+ Fp_model<n, modulus> res(*this);
+ res.mul_reduce(one);
+
+ return (res.mont_repr);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+unsigned long Fp_model<n,modulus>::as_ulong() const
+{
+ return this->as_bigint().as_ulong();
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+bool Fp_model<n,modulus>::operator==(const Fp_model& other) const
+{
+ return (this->mont_repr == other.mont_repr);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+bool Fp_model<n,modulus>::operator!=(const Fp_model& other) const
+{
+ return (this->mont_repr != other.mont_repr);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+bool Fp_model<n,modulus>::is_zero() const
+{
+ return (this->mont_repr.is_zero()); // zero maps to zero
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+void Fp_model<n,modulus>::print() const
+{
+ Fp_model<n,modulus> tmp;
+ tmp.mont_repr.data[0] = 1;
+ tmp.mul_reduce(this->mont_repr);
+
+ tmp.mont_repr.print();
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n,modulus> Fp_model<n,modulus>::zero()
+{
+ Fp_model<n,modulus> res;
+ res.mont_repr.clear();
+ return res;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n,modulus> Fp_model<n,modulus>::one()
+{
+ Fp_model<n,modulus> res;
+ res.mont_repr.data[0] = 1;
+ res.mul_reduce(Rsquared);
+ return res;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n,modulus>& Fp_model<n,modulus>::operator+=(const Fp_model<n,modulus>& other)
+{
+#ifdef PROFILE_OP_COUNTS
+ this->add_cnt++;
+#endif
+#if defined(__x86_64__) && defined(USE_ASM)
+ if (n == 3)
+ {
+ __asm__
+ ("/* perform bignum addition */ \n\t"
+ ADD_FIRSTADD
+ ADD_NEXTADD(8)
+ ADD_NEXTADD(16)
+ "/* if overflow: subtract */ \n\t"
+ "/* (tricky point: if A and B are in the range we do not need to do anything special for the possible carry flag) */ \n\t"
+ "jc subtract%= \n\t"
+
+ "/* check for overflow */ \n\t"
+ ADD_CMP(16)
+ ADD_CMP(8)
+ ADD_CMP(0)
+
+ "/* subtract mod if overflow */ \n\t"
+ "subtract%=: \n\t"
+ ADD_FIRSTSUB
+ ADD_NEXTSUB(8)
+ ADD_NEXTSUB(16)
+ "done%=: \n\t"
+ :
+ : [A] "r" (this->mont_repr.data), [B] "r" (other.mont_repr.data), [mod] "r" (modulus.data)
+ : "cc", "memory", "%rax");
+ }
+ else if (n == 4)
+ {
+ __asm__
+ ("/* perform bignum addition */ \n\t"
+ ADD_FIRSTADD
+ ADD_NEXTADD(8)
+ ADD_NEXTADD(16)
+ ADD_NEXTADD(24)
+ "/* if overflow: subtract */ \n\t"
+ "/* (tricky point: if A and B are in the range we do not need to do anything special for the possible carry flag) */ \n\t"
+ "jc subtract%= \n\t"
+
+ "/* check for overflow */ \n\t"
+ ADD_CMP(24)
+ ADD_CMP(16)
+ ADD_CMP(8)
+ ADD_CMP(0)
+
+ "/* subtract mod if overflow */ \n\t"
+ "subtract%=: \n\t"
+ ADD_FIRSTSUB
+ ADD_NEXTSUB(8)
+ ADD_NEXTSUB(16)
+ ADD_NEXTSUB(24)
+ "done%=: \n\t"
+ :
+ : [A] "r" (this->mont_repr.data), [B] "r" (other.mont_repr.data), [mod] "r" (modulus.data)
+ : "cc", "memory", "%rax");
+ }
+ else if (n == 5)
+ {
+ __asm__
+ ("/* perform bignum addition */ \n\t"
+ ADD_FIRSTADD
+ ADD_NEXTADD(8)
+ ADD_NEXTADD(16)
+ ADD_NEXTADD(24)
+ ADD_NEXTADD(32)
+ "/* if overflow: subtract */ \n\t"
+ "/* (tricky point: if A and B are in the range we do not need to do anything special for the possible carry flag) */ \n\t"
+ "jc subtract%= \n\t"
+
+ "/* check for overflow */ \n\t"
+ ADD_CMP(32)
+ ADD_CMP(24)
+ ADD_CMP(16)
+ ADD_CMP(8)
+ ADD_CMP(0)
+
+ "/* subtract mod if overflow */ \n\t"
+ "subtract%=: \n\t"
+ ADD_FIRSTSUB
+ ADD_NEXTSUB(8)
+ ADD_NEXTSUB(16)
+ ADD_NEXTSUB(24)
+ ADD_NEXTSUB(32)
+ "done%=: \n\t"
+ :
+ : [A] "r" (this->mont_repr.data), [B] "r" (other.mont_repr.data), [mod] "r" (modulus.data)
+ : "cc", "memory", "%rax");
+ }
+ else
+#endif
+ {
+ mp_limb_t scratch[n+1];
+ const mp_limb_t carry = mpn_add_n(scratch, this->mont_repr.data, other.mont_repr.data, n);
+ scratch[n] = carry;
+
+ if (carry || mpn_cmp(scratch, modulus.data, n) >= 0)
+ {
+ const mp_limb_t borrow = mpn_sub(scratch, scratch, n+1, modulus.data, n);
+ assert(borrow == 0);
+ }
+
+ mpn_copyi(this->mont_repr.data, scratch, n);
+ }
+
+ return *this;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n,modulus>& Fp_model<n,modulus>::operator-=(const Fp_model<n,modulus>& other)
+{
+#ifdef PROFILE_OP_COUNTS
+ this->sub_cnt++;
+#endif
+#if defined(__x86_64__) && defined(USE_ASM)
+ if (n == 3)
+ {
+ __asm__
+ (SUB_FIRSTSUB
+ SUB_NEXTSUB(8)
+ SUB_NEXTSUB(16)
+
+ "jnc done%=\n\t"
+
+ SUB_FIRSTADD
+ SUB_NEXTADD(8)
+ SUB_NEXTADD(16)
+
+ "done%=:\n\t"
+ :
+ : [A] "r" (this->mont_repr.data), [B] "r" (other.mont_repr.data), [mod] "r" (modulus.data)
+ : "cc", "memory", "%rax");
+ }
+ else if (n == 4)
+ {
+ __asm__
+ (SUB_FIRSTSUB
+ SUB_NEXTSUB(8)
+ SUB_NEXTSUB(16)
+ SUB_NEXTSUB(24)
+
+ "jnc done%=\n\t"
+
+ SUB_FIRSTADD
+ SUB_NEXTADD(8)
+ SUB_NEXTADD(16)
+ SUB_NEXTADD(24)
+
+ "done%=:\n\t"
+ :
+ : [A] "r" (this->mont_repr.data), [B] "r" (other.mont_repr.data), [mod] "r" (modulus.data)
+ : "cc", "memory", "%rax");
+ }
+ else if (n == 5)
+ {
+ __asm__
+ (SUB_FIRSTSUB
+ SUB_NEXTSUB(8)
+ SUB_NEXTSUB(16)
+ SUB_NEXTSUB(24)
+ SUB_NEXTSUB(32)
+
+ "jnc done%=\n\t"
+
+ SUB_FIRSTADD
+ SUB_NEXTADD(8)
+ SUB_NEXTADD(16)
+ SUB_NEXTADD(24)
+ SUB_NEXTADD(32)
+
+ "done%=:\n\t"
+ :
+ : [A] "r" (this->mont_repr.data), [B] "r" (other.mont_repr.data), [mod] "r" (modulus.data)
+ : "cc", "memory", "%rax");
+ }
+ else
+#endif
+ {
+ mp_limb_t scratch[n+1];
+ if (mpn_cmp(this->mont_repr.data, other.mont_repr.data, n) < 0)
+ {
+ const mp_limb_t carry = mpn_add_n(scratch, this->mont_repr.data, modulus.data, n);
+ scratch[n] = carry;
+ }
+ else
+ {
+ mpn_copyi(scratch, this->mont_repr.data, n);
+ scratch[n] = 0;
+ }
+
+ const mp_limb_t borrow = mpn_sub(scratch, scratch, n+1, other.mont_repr.data, n);
+ assert(borrow == 0);
+
+ mpn_copyi(this->mont_repr.data, scratch, n);
+ }
+ return *this;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n,modulus>& Fp_model<n,modulus>::operator*=(const Fp_model<n,modulus>& other)
+{
+#ifdef PROFILE_OP_COUNTS
+ this->mul_cnt++;
+#endif
+
+ mul_reduce(other.mont_repr);
+ return *this;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n,modulus>& Fp_model<n,modulus>::operator^=(const unsigned long pow)
+{
+ (*this) = power<Fp_model<n, modulus> >(*this, pow);
+ return (*this);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+template<mp_size_t m>
+Fp_model<n,modulus>& Fp_model<n,modulus>::operator^=(const bigint<m> &pow)
+{
+ (*this) = power<Fp_model<n, modulus>, m>(*this, pow);
+ return (*this);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n,modulus> Fp_model<n,modulus>::operator+(const Fp_model<n,modulus>& other) const
+{
+ Fp_model<n, modulus> r(*this);
+ return (r += other);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n,modulus> Fp_model<n,modulus>::operator-(const Fp_model<n,modulus>& other) const
+{
+ Fp_model<n, modulus> r(*this);
+ return (r -= other);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n,modulus> Fp_model<n,modulus>::operator*(const Fp_model<n,modulus>& other) const
+{
+ Fp_model<n, modulus> r(*this);
+ return (r *= other);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n,modulus> Fp_model<n,modulus>::operator^(const unsigned long pow) const
+{
+ Fp_model<n, modulus> r(*this);
+ return (r ^= pow);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+template<mp_size_t m>
+Fp_model<n,modulus> Fp_model<n,modulus>::operator^(const bigint<m> &pow) const
+{
+ Fp_model<n, modulus> r(*this);
+ return (r ^= pow);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n,modulus> Fp_model<n,modulus>::operator-() const
+{
+#ifdef PROFILE_OP_COUNTS
+ this->sub_cnt++;
+#endif
+
+ if (this->is_zero())
+ {
+ return (*this);
+ }
+ else
+ {
+ Fp_model<n, modulus> r;
+ mpn_sub_n(r.mont_repr.data, modulus.data, this->mont_repr.data, n);
+ return r;
+ }
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n,modulus> Fp_model<n,modulus>::squared() const
+{
+#ifdef PROFILE_OP_COUNTS
+ this->sqr_cnt++;
+ this->mul_cnt--; // zero out the upcoming mul
+#endif
+ /* stupid pre-processor tricks; beware */
+#if defined(__x86_64__) && defined(USE_ASM)
+ if (n == 3)
+ { // use asm-optimized Comba squaring
+ mp_limb_t res[2*n];
+ mp_limb_t c0, c1, c2;
+ COMBA_3_BY_3_SQR(c0, c1, c2, res, this->mont_repr.data);
+
+ mp_limb_t k;
+ mp_limb_t tmp1, tmp2, tmp3;
+ REDUCE_6_LIMB_PRODUCT(k, tmp1, tmp2, tmp3, inv, res, modulus.data);
+
+ /* subtract t > mod */
+ __asm__ volatile
+ ("/* check for overflow */ \n\t"
+ MONT_CMP(16)
+ MONT_CMP(8)
+ MONT_CMP(0)
+
+ "/* subtract mod if overflow */ \n\t"
+ "subtract%=: \n\t"
+ MONT_FIRSTSUB
+ MONT_NEXTSUB(8)
+ MONT_NEXTSUB(16)
+ "done%=: \n\t"
+ :
+ : [tmp] "r" (res+n), [M] "r" (modulus.data)
+ : "cc", "memory", "%rax");
+
+ Fp_model<n, modulus> r;
+ mpn_copyi(r.mont_repr.data, res+n, n);
+ return r;
+ }
+ else
+#endif
+ {
+ Fp_model<n, modulus> r(*this);
+ return (r *= r);
+ }
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n,modulus>& Fp_model<n,modulus>::invert()
+{
+#ifdef PROFILE_OP_COUNTS
+ this->inv_cnt++;
+#endif
+
+ assert(!this->is_zero());
+
+ bigint<n> g; /* gp should have room for vn = n limbs */
+
+ mp_limb_t s[n+1]; /* sp should have room for vn+1 limbs */
+ mp_size_t sn;
+
+ bigint<n> v = modulus; // both source operands are destroyed by mpn_gcdext
+
+ /* computes gcd(u, v) = g = u*s + v*t, so s*u will be 1 (mod v) */
+ const mp_size_t gn = mpn_gcdext(g.data, s, &sn, this->mont_repr.data, n, v.data, n);
+ assert(gn == 1 && g.data[0] == 1); /* inverse exists */
+
+ mp_limb_t q; /* division result fits into q, as sn <= n+1 */
+ /* sn < 0 indicates negative sn; will fix up later */
+
+ if (std::abs(sn) >= n)
+ {
+ /* if sn could require modulus reduction, do it here */
+ mpn_tdiv_qr(&q, this->mont_repr.data, 0, s, std::abs(sn), modulus.data, n);
+ }
+ else
+ {
+ /* otherwise just copy it over */
+ mpn_zero(this->mont_repr.data, n);
+ mpn_copyi(this->mont_repr.data, s, std::abs(sn));
+ }
+
+ /* fix up the negative sn */
+ if (sn < 0)
+ {
+ const mp_limb_t borrow = mpn_sub_n(this->mont_repr.data, modulus.data, this->mont_repr.data, n);
+ assert(borrow == 0);
+ }
+
+ mul_reduce(Rcubed);
+ return *this;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n,modulus> Fp_model<n,modulus>::inverse() const
+{
+ Fp_model<n, modulus> r(*this);
+ return (r.invert());
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n, modulus> Fp_model<n,modulus>::random_element() /// returns random element of Fp_model
+{
+ /* note that as Montgomery representation is a bijection then
+ selecting a random element of {xR} is the same as selecting a
+ random element of {x} */
+ Fp_model<n, modulus> r;
+ do
+ {
+ r.mont_repr.randomize();
+
+ /* clear all bits higher than MSB of modulus */
+ size_t bitno = GMP_NUMB_BITS * n - 1;
+ while (modulus.test_bit(bitno) == false)
+ {
+ const std::size_t part = bitno/GMP_NUMB_BITS;
+ const std::size_t bit = bitno - (GMP_NUMB_BITS*part);
+
+ r.mont_repr.data[part] &= ~(1ul<<bit);
+
+ bitno--;
+ }
+ }
+ /* if r.data is still >= modulus -- repeat (rejection sampling) */
+ while (mpn_cmp(r.mont_repr.data, modulus.data, n) >= 0);
+
+ return r;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n,modulus> Fp_model<n,modulus>::sqrt() const
+{
+ if (is_zero()) {
+ return *this;
+ }
+
+ Fp_model<n,modulus> one = Fp_model<n,modulus>::one();
+
+ size_t v = Fp_model<n,modulus>::s;
+ Fp_model<n,modulus> z = Fp_model<n,modulus>::nqr_to_t;
+ Fp_model<n,modulus> w = (*this)^Fp_model<n,modulus>::t_minus_1_over_2;
+ Fp_model<n,modulus> x = (*this) * w;
+ Fp_model<n,modulus> b = x * w; // b = (*this)^t
+
+
+ // check if square with euler's criterion
+ Fp_model<n,modulus> check = b;
+ for (size_t i = 0; i < v-1; ++i)
+ {
+ check = check.squared();
+ }
+ if (check != one)
+ {
+ assert_except(0);
+ }
+
+
+ // compute square root with Tonelli--Shanks
+ // (does not terminate if not a square!)
+
+ while (b != one)
+ {
+ size_t m = 0;
+ Fp_model<n,modulus> b2m = b;
+ while (b2m != one)
+ {
+ /* invariant: b2m = b^(2^m) after entering this loop */
+ b2m = b2m.squared();
+ m += 1;
+ }
+
+ int j = v-m-1;
+ w = z;
+ while (j > 0)
+ {
+ w = w.squared();
+ --j;
+ } // w = z^2^(v-m-1)
+
+ z = w.squared();
+ b = b * z;
+ x = x * w;
+ v = m;
+ }
+
+ return x;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::ostream& operator<<(std::ostream &out, const Fp_model<n, modulus> &p)
+{
+#ifndef MONTGOMERY_OUTPUT
+ Fp_model<n,modulus> tmp;
+ tmp.mont_repr.data[0] = 1;
+ tmp.mul_reduce(p.mont_repr);
+ out << tmp.mont_repr;
+#else
+ out << p.mont_repr;
+#endif
+ return out;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::istream& operator>>(std::istream &in, Fp_model<n, modulus> &p)
+{
+#ifndef MONTGOMERY_OUTPUT
+ in >> p.mont_repr;
+ p.mul_reduce(Fp_model<n, modulus>::Rsquared);
+#else
+ in >> p.mont_repr;
+#endif
+ return in;
+}
+
+} // libsnark
+#endif // FP_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+ Declaration of arithmetic in the finite field F[((p^2)^3)^2].
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef FP12_2OVER3OVER2_HPP_
+#define FP12_2OVER3OVER2_HPP_
+#include "algebra/fields/fp.hpp"
+#include "algebra/fields/fp2.hpp"
+#include "algebra/fields/fp6_3over2.hpp"
+#include <vector>
+
+namespace libsnark {
+
+template<mp_size_t n, const bigint<n>& modulus>
+class Fp12_2over3over2_model;
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::ostream& operator<<(std::ostream &, const Fp12_2over3over2_model<n, modulus> &);
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::istream& operator>>(std::istream &, Fp12_2over3over2_model<n, modulus> &);
+
+/**
+ * Arithmetic in the finite field F[((p^2)^3)^2].
+ *
+ * Let p := modulus. This interface provides arithmetic for the extension field
+ * Fp12 = Fp6[W]/(W^2-V) where Fp6 = Fp2[V]/(V^3-non_residue) and non_residue is in Fp2
+ *
+ * ASSUMPTION: p = 1 (mod 6)
+ */
+template<mp_size_t n, const bigint<n>& modulus>
+class Fp12_2over3over2_model {
+public:
+ typedef Fp_model<n, modulus> my_Fp;
+ typedef Fp2_model<n, modulus> my_Fp2;
+ typedef Fp6_3over2_model<n, modulus> my_Fp6;
+
+ static Fp2_model<n, modulus> non_residue;
+ static Fp2_model<n, modulus> Frobenius_coeffs_c1[12]; // non_residue^((modulus^i-1)/6) for i=0,...,11
+
+ my_Fp6 c0, c1;
+ Fp12_2over3over2_model() {};
+ Fp12_2over3over2_model(const my_Fp6& c0, const my_Fp6& c1) : c0(c0), c1(c1) {};
+
+ void clear() { c0.clear(); c1.clear(); }
+ void print() const { printf("c0/c1:\n"); c0.print(); c1.print(); }
+
+ static Fp12_2over3over2_model<n, modulus> zero();
+ static Fp12_2over3over2_model<n, modulus> one();
+ static Fp12_2over3over2_model<n, modulus> random_element();
+
+ bool is_zero() const { return c0.is_zero() && c1.is_zero(); }
+ bool operator==(const Fp12_2over3over2_model &other) const;
+ bool operator!=(const Fp12_2over3over2_model &other) const;
+
+ Fp12_2over3over2_model operator+(const Fp12_2over3over2_model &other) const;
+ Fp12_2over3over2_model operator-(const Fp12_2over3over2_model &other) const;
+ Fp12_2over3over2_model operator*(const Fp12_2over3over2_model &other) const;
+ Fp12_2over3over2_model operator-() const;
+ Fp12_2over3over2_model squared() const; // default is squared_complex
+ Fp12_2over3over2_model squared_karatsuba() const;
+ Fp12_2over3over2_model squared_complex() const;
+ Fp12_2over3over2_model inverse() const;
+ Fp12_2over3over2_model Frobenius_map(unsigned long power) const;
+ Fp12_2over3over2_model unitary_inverse() const;
+ Fp12_2over3over2_model cyclotomic_squared() const;
+
+ Fp12_2over3over2_model mul_by_024(const my_Fp2 &ell_0, const my_Fp2 &ell_VW, const my_Fp2 &ell_VV) const;
+
+ static my_Fp6 mul_by_non_residue(const my_Fp6 &elt);
+
+ template<mp_size_t m>
+ Fp12_2over3over2_model cyclotomic_exp(const bigint<m> &exponent) const;
+
+ static bigint<n> base_field_char() { return modulus; }
+ static size_t extension_degree() { return 12; }
+
+ friend std::ostream& operator<< <n, modulus>(std::ostream &out, const Fp12_2over3over2_model<n, modulus> &el);
+ friend std::istream& operator>> <n, modulus>(std::istream &in, Fp12_2over3over2_model<n, modulus> &el);
+};
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::ostream& operator<<(std::ostream& out, const std::vector<Fp12_2over3over2_model<n, modulus> > &v);
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::istream& operator>>(std::istream& in, std::vector<Fp12_2over3over2_model<n, modulus> > &v);
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp12_2over3over2_model<n, modulus> operator*(const Fp_model<n, modulus> &lhs, const Fp12_2over3over2_model<n, modulus> &rhs);
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp12_2over3over2_model<n, modulus> operator*(const Fp2_model<n, modulus> &lhs, const Fp12_2over3over2_model<n, modulus> &rhs);
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp12_2over3over2_model<n, modulus> operator*(const Fp6_3over2_model<n, modulus> &lhs, const Fp12_2over3over2_model<n, modulus> &rhs);
+
+template<mp_size_t n, const bigint<n>& modulus, mp_size_t m>
+Fp12_2over3over2_model<n, modulus> operator^(const Fp12_2over3over2_model<n, modulus> &self, const bigint<m> &exponent);
+
+template<mp_size_t n, const bigint<n>& modulus, mp_size_t m, const bigint<m>& exp_modulus>
+Fp12_2over3over2_model<n, modulus> operator^(const Fp12_2over3over2_model<n, modulus> &self, const Fp_model<m, exp_modulus> &exponent);
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n, modulus> Fp12_2over3over2_model<n, modulus>::non_residue;
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n, modulus> Fp12_2over3over2_model<n, modulus>::Frobenius_coeffs_c1[12];
+
+} // libsnark
+#include "algebra/fields/fp12_2over3over2.tcc"
+#endif // FP12_2OVER3OVER2_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ Implementation of arithmetic in the finite field F[((p^2)^3)^2].
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef FP12_2OVER3OVER2_TCC_
+#define FP12_2OVER3OVER2_TCC_
+
+namespace libsnark {
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp6_3over2_model<n, modulus> Fp12_2over3over2_model<n,modulus>::mul_by_non_residue(const Fp6_3over2_model<n, modulus> &elt)
+{
+ return Fp6_3over2_model<n, modulus>(non_residue * elt.c2, elt.c0, elt.c1);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::zero()
+{
+ return Fp12_2over3over2_model<n, modulus>(my_Fp6::zero(), my_Fp6::zero());
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::one()
+{
+ return Fp12_2over3over2_model<n, modulus>(my_Fp6::one(), my_Fp6::zero());
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::random_element()
+{
+ Fp12_2over3over2_model<n, modulus> r;
+ r.c0 = my_Fp6::random_element();
+ r.c1 = my_Fp6::random_element();
+
+ return r;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+bool Fp12_2over3over2_model<n,modulus>::operator==(const Fp12_2over3over2_model<n,modulus> &other) const
+{
+ return (this->c0 == other.c0 && this->c1 == other.c1);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+bool Fp12_2over3over2_model<n,modulus>::operator!=(const Fp12_2over3over2_model<n,modulus> &other) const
+{
+ return !(operator==(other));
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::operator+(const Fp12_2over3over2_model<n,modulus> &other) const
+{
+ return Fp12_2over3over2_model<n,modulus>(this->c0 + other.c0,
+ this->c1 + other.c1);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::operator-(const Fp12_2over3over2_model<n,modulus> &other) const
+{
+ return Fp12_2over3over2_model<n,modulus>(this->c0 - other.c0,
+ this->c1 - other.c1);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp12_2over3over2_model<n, modulus> operator*(const Fp_model<n, modulus> &lhs, const Fp12_2over3over2_model<n, modulus> &rhs)
+{
+ return Fp12_2over3over2_model<n,modulus>(lhs*rhs.c0,
+ lhs*rhs.c1);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp12_2over3over2_model<n, modulus> operator*(const Fp2_model<n, modulus> &lhs, const Fp12_2over3over2_model<n, modulus> &rhs)
+{
+ return Fp12_2over3over2_model<n,modulus>(lhs*rhs.c0,
+ lhs*rhs.c1);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp12_2over3over2_model<n, modulus> operator*(const Fp6_3over2_model<n, modulus> &lhs, const Fp12_2over3over2_model<n, modulus> &rhs)
+{
+ return Fp12_2over3over2_model<n,modulus>(lhs*rhs.c0,
+ lhs*rhs.c1);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::operator*(const Fp12_2over3over2_model<n,modulus> &other) const
+{
+ /* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 3 (Karatsuba) */
+
+ const my_Fp6 &A = other.c0, &B = other.c1,
+ &a = this->c0, &b = this->c1;
+ const my_Fp6 aA = a * A;
+ const my_Fp6 bB = b * B;
+
+ return Fp12_2over3over2_model<n,modulus>(aA + Fp12_2over3over2_model<n, modulus>::mul_by_non_residue(bB),
+ (a + b)*(A+B) - aA - bB);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::operator-() const
+{
+ return Fp12_2over3over2_model<n,modulus>(-this->c0,
+ -this->c1);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::squared() const
+{
+ return squared_complex();
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::squared_karatsuba() const
+{
+ /* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 3 (Karatsuba squaring) */
+
+ const my_Fp6 &a = this->c0, &b = this->c1;
+ const my_Fp6 asq = a.squared();
+ const my_Fp6 bsq = b.squared();
+
+ return Fp12_2over3over2_model<n,modulus>(asq + Fp12_2over3over2_model<n, modulus>::mul_by_non_residue(bsq),
+ (a + b).squared() - asq - bsq);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::squared_complex() const
+{
+ /* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 3 (Complex squaring) */
+
+ const my_Fp6 &a = this->c0, &b = this->c1;
+ const my_Fp6 ab = a * b;
+
+ return Fp12_2over3over2_model<n,modulus>((a + b) * (a + Fp12_2over3over2_model<n, modulus>::mul_by_non_residue(b)) - ab - Fp12_2over3over2_model<n, modulus>::mul_by_non_residue(ab),
+ ab + ab);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::inverse() const
+{
+ /* From "High-Speed Software Implementation of the Optimal Ate Pairing over Barreto-Naehrig Curves"; Algorithm 8 */
+
+ const my_Fp6 &a = this->c0, &b = this->c1;
+ const my_Fp6 t0 = a.squared();
+ const my_Fp6 t1 = b.squared();
+ const my_Fp6 t2 = t0 - Fp12_2over3over2_model<n, modulus>::mul_by_non_residue(t1);
+ const my_Fp6 t3 = t2.inverse();
+ const my_Fp6 c0 = a * t3;
+ const my_Fp6 c1 = - (b * t3);
+
+ return Fp12_2over3over2_model<n,modulus>(c0, c1);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::Frobenius_map(unsigned long power) const
+{
+ return Fp12_2over3over2_model<n,modulus>(c0.Frobenius_map(power),
+ Frobenius_coeffs_c1[power % 12] * c1.Frobenius_map(power));
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::unitary_inverse() const
+{
+ return Fp12_2over3over2_model<n,modulus>(this->c0,
+ -this->c1);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::cyclotomic_squared() const
+{
+ /* OLD: naive implementation
+ return (*this).squared();
+ */
+ my_Fp2 z0 = this->c0.c0;
+ my_Fp2 z4 = this->c0.c1;
+ my_Fp2 z3 = this->c0.c2;
+ my_Fp2 z2 = this->c1.c0;
+ my_Fp2 z1 = this->c1.c1;
+ my_Fp2 z5 = this->c1.c2;
+
+ my_Fp2 t0, t1, t2, t3, t4, t5, tmp;
+
+ // t0 + t1*y = (z0 + z1*y)^2 = a^2
+ tmp = z0 * z1;
+ t0 = (z0 + z1) * (z0 + my_Fp6::non_residue * z1) - tmp - my_Fp6::non_residue * tmp;
+ t1 = tmp + tmp;
+ // t2 + t3*y = (z2 + z3*y)^2 = b^2
+ tmp = z2 * z3;
+ t2 = (z2 + z3) * (z2 + my_Fp6::non_residue * z3) - tmp - my_Fp6::non_residue * tmp;
+ t3 = tmp + tmp;
+ // t4 + t5*y = (z4 + z5*y)^2 = c^2
+ tmp = z4 * z5;
+ t4 = (z4 + z5) * (z4 + my_Fp6::non_residue * z5) - tmp - my_Fp6::non_residue * tmp;
+ t5 = tmp + tmp;
+
+ // for A
+
+ // z0 = 3 * t0 - 2 * z0
+ z0 = t0 - z0;
+ z0 = z0 + z0;
+ z0 = z0 + t0;
+ // z1 = 3 * t1 + 2 * z1
+ z1 = t1 + z1;
+ z1 = z1 + z1;
+ z1 = z1 + t1;
+
+ // for B
+
+ // z2 = 3 * (xi * t5) + 2 * z2
+ tmp = my_Fp6::non_residue * t5;
+ z2 = tmp + z2;
+ z2 = z2 + z2;
+ z2 = z2 + tmp;
+
+ // z3 = 3 * t4 - 2 * z3
+ z3 = t4 - z3;
+ z3 = z3 + z3;
+ z3 = z3 + t4;
+
+ // for C
+
+ // z4 = 3 * t2 - 2 * z4
+ z4 = t2 - z4;
+ z4 = z4 + z4;
+ z4 = z4 + t2;
+
+ // z5 = 3 * t3 + 2 * z5
+ z5 = t3 + z5;
+ z5 = z5 + z5;
+ z5 = z5 + t3;
+
+ return Fp12_2over3over2_model<n,modulus>(my_Fp6(z0,z4,z3),my_Fp6(z2,z1,z5));
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp12_2over3over2_model<n,modulus> Fp12_2over3over2_model<n,modulus>::mul_by_024(const Fp2_model<n, modulus> &ell_0,
+ const Fp2_model<n, modulus> &ell_VW,
+ const Fp2_model<n, modulus> &ell_VV) const
+{
+ /* OLD: naive implementation
+ Fp12_2over3over2_model<n,modulus> a(my_Fp6(ell_0, my_Fp2::zero(), ell_VV),
+ my_Fp6(my_Fp2::zero(), ell_VW, my_Fp2::zero()));
+
+ return (*this) * a;
+ */
+ my_Fp2 z0 = this->c0.c0;
+ my_Fp2 z1 = this->c0.c1;
+ my_Fp2 z2 = this->c0.c2;
+ my_Fp2 z3 = this->c1.c0;
+ my_Fp2 z4 = this->c1.c1;
+ my_Fp2 z5 = this->c1.c2;
+
+ my_Fp2 x0 = ell_0;
+ my_Fp2 x2 = ell_VV;
+ my_Fp2 x4 = ell_VW;
+
+ my_Fp2 t0, t1, t2, s0, T3, T4, D0, D2, D4, S1;
+
+ D0 = z0 * x0;
+ D2 = z2 * x2;
+ D4 = z4 * x4;
+ t2 = z0 + z4;
+ t1 = z0 + z2;
+ s0 = z1 + z3 + z5;
+
+ // For z.a_.a_ = z0.
+ S1 = z1 * x2;
+ T3 = S1 + D4;
+ T4 = my_Fp6::non_residue * T3 + D0;
+ z0 = T4;
+
+ // For z.a_.b_ = z1
+ T3 = z5 * x4;
+ S1 = S1 + T3;
+ T3 = T3 + D2;
+ T4 = my_Fp6::non_residue * T3;
+ T3 = z1 * x0;
+ S1 = S1 + T3;
+ T4 = T4 + T3;
+ z1 = T4;
+
+ // For z.a_.c_ = z2
+ t0 = x0 + x2;
+ T3 = t1 * t0 - D0 - D2;
+ T4 = z3 * x4;
+ S1 = S1 + T4;
+ T3 = T3 + T4;
+
+ // For z.b_.a_ = z3 (z3 needs z2)
+ t0 = z2 + z4;
+ z2 = T3;
+ t1 = x2 + x4;
+ T3 = t0 * t1 - D2 - D4;
+ T4 = my_Fp6::non_residue * T3;
+ T3 = z3 * x0;
+ S1 = S1 + T3;
+ T4 = T4 + T3;
+ z3 = T4;
+
+ // For z.b_.b_ = z4
+ T3 = z5 * x2;
+ S1 = S1 + T3;
+ T4 = my_Fp6::non_residue * T3;
+ t0 = x0 + x4;
+ T3 = t2 * t0 - D0 - D4;
+ T4 = T4 + T3;
+ z4 = T4;
+
+ // For z.b_.c_ = z5.
+ t0 = x0 + x2 + x4;
+ T3 = s0 * t0 - S1;
+ z5 = T3;
+
+ return Fp12_2over3over2_model<n,modulus>(my_Fp6(z0,z1,z2),my_Fp6(z3,z4,z5));
+
+}
+
+template<mp_size_t n, const bigint<n>& modulus, mp_size_t m>
+Fp12_2over3over2_model<n, modulus> operator^(const Fp12_2over3over2_model<n, modulus> &self, const bigint<m> &exponent)
+{
+ return power<Fp12_2over3over2_model<n, modulus> >(self, exponent);
+}
+
+template<mp_size_t n, const bigint<n>& modulus, mp_size_t m, const bigint<m>& exp_modulus>
+Fp12_2over3over2_model<n, modulus> operator^(const Fp12_2over3over2_model<n, modulus> &self, const Fp_model<m, exp_modulus> &exponent)
+{
+ return self^(exponent.as_bigint());
+}
+
+
+template<mp_size_t n, const bigint<n>& modulus>
+template<mp_size_t m>
+Fp12_2over3over2_model<n, modulus> Fp12_2over3over2_model<n,modulus>::cyclotomic_exp(const bigint<m> &exponent) const
+{
+ Fp12_2over3over2_model<n,modulus> res = Fp12_2over3over2_model<n,modulus>::one();
+
+ bool found_one = false;
+ for (long i = m-1; i >= 0; --i)
+ {
+ for (long j = GMP_NUMB_BITS - 1; j >= 0; --j)
+ {
+ if (found_one)
+ {
+ res = res.cyclotomic_squared();
+ }
+
+ if (exponent.data[i] & (1ul<<j))
+ {
+ found_one = true;
+ res = res * (*this);
+ }
+ }
+ }
+
+ return res;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::ostream& operator<<(std::ostream &out, const Fp12_2over3over2_model<n, modulus> &el)
+{
+ out << el.c0 << OUTPUT_SEPARATOR << el.c1;
+ return out;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::istream& operator>>(std::istream &in, Fp12_2over3over2_model<n, modulus> &el)
+{
+ in >> el.c0 >> el.c1;
+ return in;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::ostream& operator<<(std::ostream& out, const std::vector<Fp12_2over3over2_model<n, modulus> > &v)
+{
+ out << v.size() << "\n";
+ for (const Fp12_2over3over2_model<n, modulus>& t : v)
+ {
+ out << t << OUTPUT_NEWLINE;
+ }
+
+ return out;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::istream& operator>>(std::istream& in, std::vector<Fp12_2over3over2_model<n, modulus> > &v)
+{
+ v.clear();
+
+ size_t s;
+ in >> s;
+
+ char b;
+ in.read(&b, 1);
+
+ v.reserve(s);
+
+ for (size_t i = 0; i < s; ++i)
+ {
+ Fp12_2over3over2_model<n, modulus> el;
+ in >> el;
+ v.emplace_back(el);
+ }
+
+ return in;
+}
+
+} // libsnark
+#endif // FP12_2OVER3OVER2_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+ Implementation of arithmetic in the finite field F[p^2].
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef FP2_HPP_
+#define FP2_HPP_
+#include "algebra/fields/fp.hpp"
+#include <vector>
+
+namespace libsnark {
+
+template<mp_size_t n, const bigint<n>& modulus>
+class Fp2_model;
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::ostream& operator<<(std::ostream &, const Fp2_model<n, modulus> &);
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::istream& operator>>(std::istream &, Fp2_model<n, modulus> &);
+
+/**
+ * Arithmetic in the field F[p^3].
+ *
+ * Let p := modulus. This interface provides arithmetic for the extension field
+ * Fp2 = Fp[U]/(U^2-non_residue), where non_residue is in Fp.
+ *
+ * ASSUMPTION: p = 1 (mod 6)
+ */
+template<mp_size_t n, const bigint<n>& modulus>
+class Fp2_model {
+public:
+ typedef Fp_model<n, modulus> my_Fp;
+
+ static bigint<2*n> euler; // (modulus^2-1)/2
+ static size_t s; // modulus^2 = 2^s * t + 1
+ static bigint<2*n> t; // with t odd
+ static bigint<2*n> t_minus_1_over_2; // (t-1)/2
+ static my_Fp non_residue; // X^4-non_residue irreducible over Fp; used for constructing Fp2 = Fp[X] / (X^2 - non_residue)
+ static Fp2_model<n, modulus> nqr; // a quadratic nonresidue in Fp2
+ static Fp2_model<n, modulus> nqr_to_t; // nqr^t
+ static my_Fp Frobenius_coeffs_c1[2]; // non_residue^((modulus^i-1)/2) for i=0,1
+
+ my_Fp c0, c1;
+ Fp2_model() {};
+ Fp2_model(const my_Fp& c0, const my_Fp& c1) : c0(c0), c1(c1) {};
+
+ void clear() { c0.clear(); c1.clear(); }
+ void print() const { printf("c0/c1:\n"); c0.print(); c1.print(); }
+
+ static Fp2_model<n, modulus> zero();
+ static Fp2_model<n, modulus> one();
+ static Fp2_model<n, modulus> random_element();
+
+ bool is_zero() const { return c0.is_zero() && c1.is_zero(); }
+ bool operator==(const Fp2_model &other) const;
+ bool operator!=(const Fp2_model &other) const;
+
+ Fp2_model operator+(const Fp2_model &other) const;
+ Fp2_model operator-(const Fp2_model &other) const;
+ Fp2_model operator*(const Fp2_model &other) const;
+ Fp2_model operator-() const;
+ Fp2_model squared() const; // default is squared_complex
+ Fp2_model inverse() const;
+ Fp2_model Frobenius_map(unsigned long power) const;
+ Fp2_model sqrt() const; // HAS TO BE A SQUARE (else does not terminate)
+ Fp2_model squared_karatsuba() const;
+ Fp2_model squared_complex() const;
+
+ template<mp_size_t m>
+ Fp2_model operator^(const bigint<m> &other) const;
+
+ static size_t size_in_bits() { return 2*my_Fp::size_in_bits(); }
+ static bigint<n> base_field_char() { return modulus; }
+
+ friend std::ostream& operator<< <n, modulus>(std::ostream &out, const Fp2_model<n, modulus> &el);
+ friend std::istream& operator>> <n, modulus>(std::istream &in, Fp2_model<n, modulus> &el);
+};
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::ostream& operator<<(std::ostream& out, const std::vector<Fp2_model<n, modulus> > &v);
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::istream& operator>>(std::istream& in, std::vector<Fp2_model<n, modulus> > &v);
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n, modulus> operator*(const Fp_model<n, modulus> &lhs, const Fp2_model<n, modulus> &rhs);
+
+template<mp_size_t n, const bigint<n>& modulus>
+bigint<2*n> Fp2_model<n, modulus>::euler;
+
+template<mp_size_t n, const bigint<n>& modulus>
+size_t Fp2_model<n, modulus>::s;
+
+template<mp_size_t n, const bigint<n>& modulus>
+bigint<2*n> Fp2_model<n, modulus>::t;
+
+template<mp_size_t n, const bigint<n>& modulus>
+bigint<2*n> Fp2_model<n, modulus>::t_minus_1_over_2;
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n, modulus> Fp2_model<n, modulus>::non_residue;
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n, modulus> Fp2_model<n, modulus>::nqr;
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n, modulus> Fp2_model<n, modulus>::nqr_to_t;
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n, modulus> Fp2_model<n, modulus>::Frobenius_coeffs_c1[2];
+
+} // libsnark
+#include "algebra/fields/fp2.tcc"
+
+#endif // FP2_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ Implementation of arithmetic in the finite field F[p^2].
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef FP2_TCC_
+#define FP2_TCC_
+
+#include "algebra/fields/field_utils.hpp"
+
+namespace libsnark {
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n,modulus> Fp2_model<n,modulus>::zero()
+{
+ return Fp2_model<n, modulus>(my_Fp::zero(), my_Fp::zero());
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n,modulus> Fp2_model<n,modulus>::one()
+{
+ return Fp2_model<n, modulus>(my_Fp::one(), my_Fp::zero());
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n,modulus> Fp2_model<n,modulus>::random_element()
+{
+ Fp2_model<n, modulus> r;
+ r.c0 = my_Fp::random_element();
+ r.c1 = my_Fp::random_element();
+
+ return r;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+bool Fp2_model<n,modulus>::operator==(const Fp2_model<n,modulus> &other) const
+{
+ return (this->c0 == other.c0 && this->c1 == other.c1);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+bool Fp2_model<n,modulus>::operator!=(const Fp2_model<n,modulus> &other) const
+{
+ return !(operator==(other));
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n,modulus> Fp2_model<n,modulus>::operator+(const Fp2_model<n,modulus> &other) const
+{
+ return Fp2_model<n,modulus>(this->c0 + other.c0,
+ this->c1 + other.c1);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n,modulus> Fp2_model<n,modulus>::operator-(const Fp2_model<n,modulus> &other) const
+{
+ return Fp2_model<n,modulus>(this->c0 - other.c0,
+ this->c1 - other.c1);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n, modulus> operator*(const Fp_model<n, modulus> &lhs, const Fp2_model<n, modulus> &rhs)
+{
+ return Fp2_model<n,modulus>(lhs*rhs.c0,
+ lhs*rhs.c1);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n,modulus> Fp2_model<n,modulus>::operator*(const Fp2_model<n,modulus> &other) const
+{
+ /* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 3 (Karatsuba) */
+ const my_Fp
+ &A = other.c0, &B = other.c1,
+ &a = this->c0, &b = this->c1;
+ const my_Fp aA = a * A;
+ const my_Fp bB = b * B;
+
+ return Fp2_model<n,modulus>(aA + non_residue * bB,
+ (a + b)*(A+B) - aA - bB);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n,modulus> Fp2_model<n,modulus>::operator-() const
+{
+ return Fp2_model<n,modulus>(-this->c0,
+ -this->c1);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n,modulus> Fp2_model<n,modulus>::squared() const
+{
+ return squared_complex();
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n,modulus> Fp2_model<n,modulus>::squared_karatsuba() const
+{
+ /* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 3 (Karatsuba squaring) */
+ const my_Fp &a = this->c0, &b = this->c1;
+ const my_Fp asq = a.squared();
+ const my_Fp bsq = b.squared();
+
+ return Fp2_model<n,modulus>(asq + non_residue * bsq,
+ (a + b).squared() - asq - bsq);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n,modulus> Fp2_model<n,modulus>::squared_complex() const
+{
+ /* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 3 (Complex squaring) */
+ const my_Fp &a = this->c0, &b = this->c1;
+ const my_Fp ab = a * b;
+
+ return Fp2_model<n,modulus>((a + b) * (a + non_residue * b) - ab - non_residue * ab,
+ ab + ab);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n,modulus> Fp2_model<n,modulus>::inverse() const
+{
+ const my_Fp &a = this->c0, &b = this->c1;
+
+ /* From "High-Speed Software Implementation of the Optimal Ate Pairing over Barreto-Naehrig Curves"; Algorithm 8 */
+ const my_Fp t0 = a.squared();
+ const my_Fp t1 = b.squared();
+ const my_Fp t2 = t0 - non_residue * t1;
+ const my_Fp t3 = t2.inverse();
+ const my_Fp c0 = a * t3;
+ const my_Fp c1 = - (b * t3);
+
+ return Fp2_model<n,modulus>(c0, c1);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n,modulus> Fp2_model<n,modulus>::Frobenius_map(unsigned long power) const
+{
+ return Fp2_model<n,modulus>(c0,
+ Frobenius_coeffs_c1[power % 2] * c1);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n,modulus> Fp2_model<n,modulus>::sqrt() const
+{
+ if (is_zero()) {
+ return *this;
+ }
+
+ Fp2_model<n,modulus> one = Fp2_model<n,modulus>::one();
+
+ size_t v = Fp2_model<n,modulus>::s;
+ Fp2_model<n,modulus> z = Fp2_model<n,modulus>::nqr_to_t;
+ Fp2_model<n,modulus> w = (*this)^Fp2_model<n,modulus>::t_minus_1_over_2;
+ Fp2_model<n,modulus> x = (*this) * w;
+ Fp2_model<n,modulus> b = x * w; // b = (*this)^t
+
+
+ // check if square with euler's criterion
+ Fp2_model<n,modulus> check = b;
+ for (size_t i = 0; i < v-1; ++i)
+ {
+ check = check.squared();
+ }
+ if (check != one)
+ {
+ assert_except(0);
+ }
+
+
+ // compute square root with Tonelli--Shanks
+ // (does not terminate if not a square!)
+
+ while (b != one)
+ {
+ size_t m = 0;
+ Fp2_model<n,modulus> b2m = b;
+ while (b2m != one)
+ {
+ /* invariant: b2m = b^(2^m) after entering this loop */
+ b2m = b2m.squared();
+ m += 1;
+ }
+
+ int j = v-m-1;
+ w = z;
+ while (j > 0)
+ {
+ w = w.squared();
+ --j;
+ } // w = z^2^(v-m-1)
+
+ z = w.squared();
+ b = b * z;
+ x = x * w;
+ v = m;
+ }
+
+ return x;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+template<mp_size_t m>
+Fp2_model<n,modulus> Fp2_model<n,modulus>::operator^(const bigint<m> &pow) const
+{
+ return power<Fp2_model<n, modulus>, m>(*this, pow);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::ostream& operator<<(std::ostream &out, const Fp2_model<n, modulus> &el)
+{
+ out << el.c0 << OUTPUT_SEPARATOR << el.c1;
+ return out;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::istream& operator>>(std::istream &in, Fp2_model<n, modulus> &el)
+{
+ in >> el.c0 >> el.c1;
+ return in;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::ostream& operator<<(std::ostream& out, const std::vector<Fp2_model<n, modulus> > &v)
+{
+ out << v.size() << "\n";
+ for (const Fp2_model<n, modulus>& t : v)
+ {
+ out << t << OUTPUT_NEWLINE;
+ }
+
+ return out;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::istream& operator>>(std::istream& in, std::vector<Fp2_model<n, modulus> > &v)
+{
+ v.clear();
+
+ size_t s;
+ in >> s;
+
+ char b;
+ in.read(&b, 1);
+
+ v.reserve(s);
+
+ for (size_t i = 0; i < s; ++i)
+ {
+ Fp2_model<n, modulus> el;
+ in >> el;
+ v.emplace_back(el);
+ }
+
+ return in;
+}
+
+} // libsnark
+#endif // FP2_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+ Declaration of arithmetic in the finite field F[p^3].
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef FP3_HPP_
+#define FP3_HPP_
+#include "algebra/fields/fp.hpp"
+#include <vector>
+
+namespace libsnark {
+
+template<mp_size_t n, const bigint<n>& modulus>
+class Fp3_model;
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::ostream& operator<<(std::ostream &, const Fp3_model<n, modulus> &);
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::istream& operator>>(std::istream &, Fp3_model<n, modulus> &);
+
+/**
+ * Arithmetic in the field F[p^3].
+ *
+ * Let p := modulus. This interface provides arithmetic for the extension field
+ * Fp3 = Fp[U]/(U^3-non_residue), where non_residue is in Fp.
+ *
+ * ASSUMPTION: p = 1 (mod 6)
+ */
+template<mp_size_t n, const bigint<n>& modulus>
+class Fp3_model {
+public:
+ typedef Fp_model<n, modulus> my_Fp;
+
+ static bigint<3*n> euler; // (modulus^3-1)/2
+ static size_t s; // modulus^3 = 2^s * t + 1
+ static bigint<3*n> t; // with t odd
+ static bigint<3*n> t_minus_1_over_2; // (t-1)/2
+ static my_Fp non_residue; // X^6-non_residue irreducible over Fp; used for constructing Fp3 = Fp[X] / (X^3 - non_residue)
+ static Fp3_model<n, modulus> nqr; // a quadratic nonresidue in Fp3
+ static Fp3_model<n, modulus> nqr_to_t; // nqr^t
+ static my_Fp Frobenius_coeffs_c1[3]; // non_residue^((modulus^i-1)/3) for i=0,1,2
+ static my_Fp Frobenius_coeffs_c2[3]; // non_residue^((2*modulus^i-2)/3) for i=0,1,2
+
+ my_Fp c0, c1, c2;
+ Fp3_model() {};
+ Fp3_model(const my_Fp& c0, const my_Fp& c1, const my_Fp& c2) : c0(c0), c1(c1), c2(c2) {};
+
+ void clear() { c0.clear(); c1.clear(); c2.clear(); }
+ void print() const { printf("c0/c1/c2:\n"); c0.print(); c1.print(); c2.print(); }
+
+ static Fp3_model<n, modulus> zero();
+ static Fp3_model<n, modulus> one();
+ static Fp3_model<n, modulus> random_element();
+
+ bool is_zero() const { return c0.is_zero() && c1.is_zero() && c2.is_zero(); }
+ bool operator==(const Fp3_model &other) const;
+ bool operator!=(const Fp3_model &other) const;
+
+ Fp3_model operator+(const Fp3_model &other) const;
+ Fp3_model operator-(const Fp3_model &other) const;
+ Fp3_model operator*(const Fp3_model &other) const;
+ Fp3_model operator-() const;
+ Fp3_model squared() const;
+ Fp3_model inverse() const;
+ Fp3_model Frobenius_map(unsigned long power) const;
+ Fp3_model sqrt() const; // HAS TO BE A SQUARE (else does not terminate)
+
+ template<mp_size_t m>
+ Fp3_model operator^(const bigint<m> &other) const;
+
+ static size_t size_in_bits() { return 3*my_Fp::size_in_bits(); }
+ static bigint<n> base_field_char() { return modulus; }
+
+ friend std::ostream& operator<< <n, modulus>(std::ostream &out, const Fp3_model<n, modulus> &el);
+ friend std::istream& operator>> <n, modulus>(std::istream &in, Fp3_model<n, modulus> &el);
+};
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::ostream& operator<<(std::ostream& out, const std::vector<Fp3_model<n, modulus> > &v);
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::istream& operator>>(std::istream& in, std::vector<Fp3_model<n, modulus> > &v);
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp3_model<n, modulus> operator*(const Fp_model<n, modulus> &lhs, const Fp3_model<n, modulus> &rhs);
+
+template<mp_size_t n, const bigint<n>& modulus>
+bigint<3*n> Fp3_model<n, modulus>::euler;
+
+template<mp_size_t n, const bigint<n>& modulus>
+size_t Fp3_model<n, modulus>::s;
+
+template<mp_size_t n, const bigint<n>& modulus>
+bigint<3*n> Fp3_model<n, modulus>::t;
+
+template<mp_size_t n, const bigint<n>& modulus>
+bigint<3*n> Fp3_model<n, modulus>::t_minus_1_over_2;
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n, modulus> Fp3_model<n, modulus>::non_residue;
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp3_model<n, modulus> Fp3_model<n, modulus>::nqr;
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp3_model<n, modulus> Fp3_model<n, modulus>::nqr_to_t;
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n, modulus> Fp3_model<n, modulus>::Frobenius_coeffs_c1[3];
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp_model<n, modulus> Fp3_model<n, modulus>::Frobenius_coeffs_c2[3];
+
+} // libsnark
+#include "algebra/fields/fp3.tcc"
+
+#endif // FP3_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ Implementation of arithmetic in the finite field F[p^3].
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef FP3_TCC_
+#define FP3_TCC_
+
+#include "algebra/fields/field_utils.hpp"
+
+namespace libsnark {
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp3_model<n,modulus> Fp3_model<n,modulus>::zero()
+{
+ return Fp3_model<n, modulus>(my_Fp::zero(), my_Fp::zero(), my_Fp::zero());
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp3_model<n,modulus> Fp3_model<n,modulus>::one()
+{
+ return Fp3_model<n, modulus>(my_Fp::one(), my_Fp::zero(), my_Fp::zero());
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp3_model<n,modulus> Fp3_model<n,modulus>::random_element()
+{
+ Fp3_model<n, modulus> r;
+ r.c0 = my_Fp::random_element();
+ r.c1 = my_Fp::random_element();
+ r.c2 = my_Fp::random_element();
+
+ return r;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+bool Fp3_model<n,modulus>::operator==(const Fp3_model<n,modulus> &other) const
+{
+ return (this->c0 == other.c0 && this->c1 == other.c1 && this->c2 == other.c2);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+bool Fp3_model<n,modulus>::operator!=(const Fp3_model<n,modulus> &other) const
+{
+ return !(operator==(other));
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp3_model<n,modulus> Fp3_model<n,modulus>::operator+(const Fp3_model<n,modulus> &other) const
+{
+ return Fp3_model<n,modulus>(this->c0 + other.c0,
+ this->c1 + other.c1,
+ this->c2 + other.c2);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp3_model<n,modulus> Fp3_model<n,modulus>::operator-(const Fp3_model<n,modulus> &other) const
+{
+ return Fp3_model<n,modulus>(this->c0 - other.c0,
+ this->c1 - other.c1,
+ this->c2 - other.c2);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp3_model<n, modulus> operator*(const Fp_model<n, modulus> &lhs, const Fp3_model<n, modulus> &rhs)
+{
+ return Fp3_model<n,modulus>(lhs*rhs.c0,
+ lhs*rhs.c1,
+ lhs*rhs.c2);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp3_model<n,modulus> Fp3_model<n,modulus>::operator*(const Fp3_model<n,modulus> &other) const
+{
+ /* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 4 (Karatsuba) */
+ const my_Fp
+ &A = other.c0, &B = other.c1, &C = other.c2,
+ &a = this->c0, &b = this->c1, &c = this->c2;
+ const my_Fp aA = a*A;
+ const my_Fp bB = b*B;
+ const my_Fp cC = c*C;
+
+ return Fp3_model<n,modulus>(aA + non_residue*((b+c)*(B+C)-bB-cC),
+ (a+b)*(A+B)-aA-bB+non_residue*cC,
+ (a+c)*(A+C)-aA+bB-cC);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp3_model<n,modulus> Fp3_model<n,modulus>::operator-() const
+{
+ return Fp3_model<n,modulus>(-this->c0,
+ -this->c1,
+ -this->c2);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp3_model<n,modulus> Fp3_model<n,modulus>::squared() const
+{
+ /* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 4 (CH-SQR2) */
+ const my_Fp
+ &a = this->c0, &b = this->c1, &c = this->c2;
+ const my_Fp s0 = a.squared();
+ const my_Fp ab = a*b;
+ const my_Fp s1 = ab + ab;
+ const my_Fp s2 = (a - b + c).squared();
+ const my_Fp bc = b*c;
+ const my_Fp s3 = bc + bc;
+ const my_Fp s4 = c.squared();
+
+ return Fp3_model<n,modulus>(s0 + non_residue * s3,
+ s1 + non_residue * s4,
+ s1 + s2 + s3 - s0 - s4);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp3_model<n,modulus> Fp3_model<n,modulus>::inverse() const
+{
+ const my_Fp
+ &a = this->c0, &b = this->c1, &c = this->c2;
+
+ /* From "High-Speed Software Implementation of the Optimal Ate Pairing over Barreto-Naehrig Curves"; Algorithm 17 */
+ const my_Fp t0 = a.squared();
+ const my_Fp t1 = b.squared();
+ const my_Fp t2 = c.squared();
+ const my_Fp t3 = a*b;
+ const my_Fp t4 = a*c;
+ const my_Fp t5 = b*c;
+ const my_Fp c0 = t0 - non_residue * t5;
+ const my_Fp c1 = non_residue * t2 - t3;
+ const my_Fp c2 = t1 - t4; // typo in paper referenced above. should be "-" as per Scott, but is "*"
+ const my_Fp t6 = (a * c0 + non_residue * (c * c1 + b * c2)).inverse();
+ return Fp3_model<n,modulus>(t6 * c0, t6 * c1, t6 * c2);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp3_model<n,modulus> Fp3_model<n,modulus>::Frobenius_map(unsigned long power) const
+{
+ return Fp3_model<n,modulus>(c0,
+ Frobenius_coeffs_c1[power % 3] * c1,
+ Frobenius_coeffs_c2[power % 3] * c2);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp3_model<n,modulus> Fp3_model<n,modulus>::sqrt() const
+{
+ Fp3_model<n,modulus> one = Fp3_model<n,modulus>::one();
+
+ size_t v = Fp3_model<n,modulus>::s;
+ Fp3_model<n,modulus> z = Fp3_model<n,modulus>::nqr_to_t;
+ Fp3_model<n,modulus> w = (*this)^Fp3_model<n,modulus>::t_minus_1_over_2;
+ Fp3_model<n,modulus> x = (*this) * w;
+ Fp3_model<n,modulus> b = x * w; // b = (*this)^t
+
+#if DEBUG
+ // check if square with euler's criterion
+ Fp3_model<n,modulus> check = b;
+ for (size_t i = 0; i < v-1; ++i)
+ {
+ check = check.squared();
+ }
+ if (check != one)
+ {
+ assert(0);
+ }
+#endif
+
+ // compute square root with Tonelli--Shanks
+ // (does not terminate if not a square!)
+
+ while (b != one)
+ {
+ size_t m = 0;
+ Fp3_model<n,modulus> b2m = b;
+ while (b2m != one)
+ {
+ /* invariant: b2m = b^(2^m) after entering this loop */
+ b2m = b2m.squared();
+ m += 1;
+ }
+
+ int j = v-m-1;
+ w = z;
+ while (j > 0)
+ {
+ w = w.squared();
+ --j;
+ } // w = z^2^(v-m-1)
+
+ z = w.squared();
+ b = b * z;
+ x = x * w;
+ v = m;
+ }
+
+ return x;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+template<mp_size_t m>
+Fp3_model<n,modulus> Fp3_model<n,modulus>::operator^(const bigint<m> &pow) const
+{
+ return power<Fp3_model<n, modulus> >(*this, pow);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::ostream& operator<<(std::ostream &out, const Fp3_model<n, modulus> &el)
+{
+ out << el.c0 << OUTPUT_SEPARATOR << el.c1 << OUTPUT_SEPARATOR << el.c2;
+ return out;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::istream& operator>>(std::istream &in, Fp3_model<n, modulus> &el)
+{
+ in >> el.c0 >> el.c1 >> el.c2;
+ return in;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::ostream& operator<<(std::ostream& out, const std::vector<Fp3_model<n, modulus> > &v)
+{
+ out << v.size() << "\n";
+ for (const Fp3_model<n, modulus>& t : v)
+ {
+ out << t << OUTPUT_NEWLINE;
+ }
+
+ return out;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::istream& operator>>(std::istream& in, std::vector<Fp3_model<n, modulus> > &v)
+{
+ v.clear();
+
+ size_t s;
+ in >> s;
+
+ char b;
+ in.read(&b, 1);
+
+ v.reserve(s);
+
+ for (size_t i = 0; i < s; ++i)
+ {
+ Fp3_model<n, modulus> el;
+ in >> el;
+ v.emplace_back(el);
+ }
+
+ return in;
+}
+
+} // libsnark
+#endif // FP3_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+ Declaration of arithmetic in the finite field F[(p^2)^3]
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef FP6_3OVER2_HPP_
+#define FP6_3OVER2_HPP_
+#include "algebra/fields/fp.hpp"
+#include "algebra/fields/fp2.hpp"
+#include <vector>
+
+namespace libsnark {
+
+template<mp_size_t n, const bigint<n>& modulus>
+class Fp6_3over2_model;
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::ostream& operator<<(std::ostream &, const Fp6_3over2_model<n, modulus> &);
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::istream& operator>>(std::istream &, Fp6_3over2_model<n, modulus> &);
+
+/**
+ * Arithmetic in the finite field F[(p^2)^3].
+ *
+ * Let p := modulus. This interface provides arithmetic for the extension field
+ * Fp6 = Fp2[V]/(V^3-non_residue) where non_residue is in Fp.
+ *
+ * ASSUMPTION: p = 1 (mod 6)
+ */
+template<mp_size_t n, const bigint<n>& modulus>
+class Fp6_3over2_model {
+public:
+ typedef Fp_model<n, modulus> my_Fp;
+ typedef Fp2_model<n, modulus> my_Fp2;
+
+ static my_Fp2 non_residue;
+ static my_Fp2 Frobenius_coeffs_c1[6]; // non_residue^((modulus^i-1)/3) for i=0,1,2,3,4,5
+ static my_Fp2 Frobenius_coeffs_c2[6]; // non_residue^((2*modulus^i-2)/3) for i=0,1,2,3,4,5
+
+ my_Fp2 c0, c1, c2;
+ Fp6_3over2_model() {};
+ Fp6_3over2_model(const my_Fp2& c0, const my_Fp2& c1, const my_Fp2& c2) : c0(c0), c1(c1), c2(c2) {};
+
+ void clear() { c0.clear(); c1.clear(); c2.clear(); }
+ void print() const { printf("c0/c1/c2:\n"); c0.print(); c1.print(); c2.print(); }
+
+ static Fp6_3over2_model<n, modulus> zero();
+ static Fp6_3over2_model<n, modulus> one();
+ static Fp6_3over2_model<n, modulus> random_element();
+
+ bool is_zero() const { return c0.is_zero() && c1.is_zero() && c2.is_zero(); }
+ bool operator==(const Fp6_3over2_model &other) const;
+ bool operator!=(const Fp6_3over2_model &other) const;
+
+ Fp6_3over2_model operator+(const Fp6_3over2_model &other) const;
+ Fp6_3over2_model operator-(const Fp6_3over2_model &other) const;
+ Fp6_3over2_model operator*(const Fp6_3over2_model &other) const;
+ Fp6_3over2_model operator-() const;
+ Fp6_3over2_model squared() const;
+ Fp6_3over2_model inverse() const;
+ Fp6_3over2_model Frobenius_map(unsigned long power) const;
+
+ static my_Fp2 mul_by_non_residue(const my_Fp2 &elt);
+
+ template<mp_size_t m>
+ Fp6_3over2_model operator^(const bigint<m> &other) const;
+
+ static bigint<n> base_field_char() { return modulus; }
+ static size_t extension_degree() { return 6; }
+
+ friend std::ostream& operator<< <n, modulus>(std::ostream &out, const Fp6_3over2_model<n, modulus> &el);
+ friend std::istream& operator>> <n, modulus>(std::istream &in, Fp6_3over2_model<n, modulus> &el);
+};
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::ostream& operator<<(std::ostream& out, const std::vector<Fp6_3over2_model<n, modulus> > &v);
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::istream& operator>>(std::istream& in, std::vector<Fp6_3over2_model<n, modulus> > &v);
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp6_3over2_model<n, modulus> operator*(const Fp_model<n, modulus> &lhs, const Fp6_3over2_model<n, modulus> &rhs);
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp6_3over2_model<n, modulus> operator*(const Fp2_model<n, modulus> &lhs, const Fp6_3over2_model<n, modulus> &rhs);
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n, modulus> Fp6_3over2_model<n, modulus>::non_residue;
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n, modulus> Fp6_3over2_model<n, modulus>::Frobenius_coeffs_c1[6];
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n, modulus> Fp6_3over2_model<n, modulus>::Frobenius_coeffs_c2[6];
+
+} // libsnark
+#include "algebra/fields/fp6_3over2.tcc"
+
+#endif // FP6_3OVER2_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ Implementation of arithmetic in the finite field F[(p^2)^3].
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef FP6_3OVER2_TCC_
+#define FP6_3OVER2_TCC_
+#include "algebra/fields/field_utils.hpp"
+
+namespace libsnark {
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp2_model<n, modulus> Fp6_3over2_model<n,modulus>::mul_by_non_residue(const Fp2_model<n, modulus> &elt)
+{
+ return Fp2_model<n, modulus>(non_residue * elt);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp6_3over2_model<n,modulus> Fp6_3over2_model<n,modulus>::zero()
+{
+ return Fp6_3over2_model<n, modulus>(my_Fp2::zero(), my_Fp2::zero(), my_Fp2::zero());
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp6_3over2_model<n,modulus> Fp6_3over2_model<n,modulus>::one()
+{
+ return Fp6_3over2_model<n, modulus>(my_Fp2::one(), my_Fp2::zero(), my_Fp2::zero());
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp6_3over2_model<n,modulus> Fp6_3over2_model<n,modulus>::random_element()
+{
+ Fp6_3over2_model<n, modulus> r;
+ r.c0 = my_Fp2::random_element();
+ r.c1 = my_Fp2::random_element();
+ r.c2 = my_Fp2::random_element();
+
+ return r;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+bool Fp6_3over2_model<n,modulus>::operator==(const Fp6_3over2_model<n,modulus> &other) const
+{
+ return (this->c0 == other.c0 && this->c1 == other.c1 && this->c2 == other.c2);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+bool Fp6_3over2_model<n,modulus>::operator!=(const Fp6_3over2_model<n,modulus> &other) const
+{
+ return !(operator==(other));
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp6_3over2_model<n,modulus> Fp6_3over2_model<n,modulus>::operator+(const Fp6_3over2_model<n,modulus> &other) const
+{
+ return Fp6_3over2_model<n,modulus>(this->c0 + other.c0,
+ this->c1 + other.c1,
+ this->c2 + other.c2);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp6_3over2_model<n,modulus> Fp6_3over2_model<n,modulus>::operator-(const Fp6_3over2_model<n,modulus> &other) const
+{
+ return Fp6_3over2_model<n,modulus>(this->c0 - other.c0,
+ this->c1 - other.c1,
+ this->c2 - other.c2);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp6_3over2_model<n, modulus> operator*(const Fp_model<n, modulus> &lhs, const Fp6_3over2_model<n, modulus> &rhs)
+{
+ return Fp6_3over2_model<n,modulus>(lhs*rhs.c0,
+ lhs*rhs.c1,
+ lhs*rhs.c2);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp6_3over2_model<n, modulus> operator*(const Fp2_model<n, modulus> &lhs, const Fp6_3over2_model<n, modulus> &rhs)
+{
+ return Fp6_3over2_model<n,modulus>(lhs*rhs.c0,
+ lhs*rhs.c1,
+ lhs*rhs.c2);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp6_3over2_model<n,modulus> Fp6_3over2_model<n,modulus>::operator*(const Fp6_3over2_model<n,modulus> &other) const
+{
+ /* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 4 (Karatsuba) */
+
+ const my_Fp2 &A = other.c0, &B = other.c1, &C = other.c2,
+ &a = this->c0, &b = this->c1, &c = this->c2;
+ const my_Fp2 aA = a*A;
+ const my_Fp2 bB = b*B;
+ const my_Fp2 cC = c*C;
+
+ return Fp6_3over2_model<n,modulus>(aA + Fp6_3over2_model<n,modulus>::mul_by_non_residue((b+c)*(B+C)-bB-cC),
+ (a+b)*(A+B)-aA-bB+Fp6_3over2_model<n,modulus>::mul_by_non_residue(cC),
+ (a+c)*(A+C)-aA+bB-cC);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp6_3over2_model<n,modulus> Fp6_3over2_model<n,modulus>::operator-() const
+{
+ return Fp6_3over2_model<n,modulus>(-this->c0,
+ -this->c1,
+ -this->c2);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp6_3over2_model<n,modulus> Fp6_3over2_model<n,modulus>::squared() const
+{
+ /* Devegili OhEig Scott Dahab --- Multiplication and Squaring on Pairing-Friendly Fields.pdf; Section 4 (CH-SQR2) */
+
+ const my_Fp2 &a = this->c0, &b = this->c1, &c = this->c2;
+ const my_Fp2 s0 = a.squared();
+ const my_Fp2 ab = a*b;
+ const my_Fp2 s1 = ab + ab;
+ const my_Fp2 s2 = (a - b + c).squared();
+ const my_Fp2 bc = b*c;
+ const my_Fp2 s3 = bc + bc;
+ const my_Fp2 s4 = c.squared();
+
+ return Fp6_3over2_model<n,modulus>(s0 + Fp6_3over2_model<n,modulus>::mul_by_non_residue(s3),
+ s1 + Fp6_3over2_model<n,modulus>::mul_by_non_residue(s4),
+ s1 + s2 + s3 - s0 - s4);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp6_3over2_model<n,modulus> Fp6_3over2_model<n,modulus>::inverse() const
+{
+ /* From "High-Speed Software Implementation of the Optimal Ate Pairing over Barreto-Naehrig Curves"; Algorithm 17 */
+
+ const my_Fp2 &a = this->c0, &b = this->c1, &c = this->c2;
+ const my_Fp2 t0 = a.squared();
+ const my_Fp2 t1 = b.squared();
+ const my_Fp2 t2 = c.squared();
+ const my_Fp2 t3 = a*b;
+ const my_Fp2 t4 = a*c;
+ const my_Fp2 t5 = b*c;
+ const my_Fp2 c0 = t0 - Fp6_3over2_model<n,modulus>::mul_by_non_residue(t5);
+ const my_Fp2 c1 = Fp6_3over2_model<n,modulus>::mul_by_non_residue(t2) - t3;
+ const my_Fp2 c2 = t1 - t4; // typo in paper referenced above. should be "-" as per Scott, but is "*"
+ const my_Fp2 t6 = (a * c0 + Fp6_3over2_model<n,modulus>::mul_by_non_residue((c * c1 + b * c2))).inverse();
+ return Fp6_3over2_model<n,modulus>(t6 * c0, t6 * c1, t6 * c2);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+Fp6_3over2_model<n,modulus> Fp6_3over2_model<n,modulus>::Frobenius_map(unsigned long power) const
+{
+ return Fp6_3over2_model<n,modulus>(c0.Frobenius_map(power),
+ Frobenius_coeffs_c1[power % 6] * c1.Frobenius_map(power),
+ Frobenius_coeffs_c2[power % 6] * c2.Frobenius_map(power));
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+template<mp_size_t m>
+Fp6_3over2_model<n,modulus> Fp6_3over2_model<n,modulus>::operator^(const bigint<m> &pow) const
+{
+ return power<Fp6_3over2_model<n, modulus>, m>(*this, pow);
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::ostream& operator<<(std::ostream &out, const Fp6_3over2_model<n, modulus> &el)
+{
+ out << el.c0 << OUTPUT_SEPARATOR << el.c1 << OUTPUT_SEPARATOR << el.c2;
+ return out;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::istream& operator>>(std::istream &in, Fp6_3over2_model<n, modulus> &el)
+{
+ in >> el.c0 >> el.c1 >> el.c2;
+ return in;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::ostream& operator<<(std::ostream& out, const std::vector<Fp6_3over2_model<n, modulus> > &v)
+{
+ out << v.size() << "\n";
+ for (const Fp6_3over2_model<n, modulus>& t : v)
+ {
+ out << t << OUTPUT_NEWLINE;
+ }
+
+ return out;
+}
+
+template<mp_size_t n, const bigint<n>& modulus>
+std::istream& operator>>(std::istream& in, std::vector<Fp6_3over2_model<n, modulus> > &v)
+{
+ v.clear();
+
+ size_t s;
+ in >> s;
+
+ char b;
+ in.read(&b, 1);
+
+ v.reserve(s);
+
+ for (size_t i = 0; i < s; ++i)
+ {
+ Fp6_3over2_model<n, modulus> el;
+ in >> el;
+ v.emplace_back(el);
+ }
+
+ return in;
+}
+
+} // libsnark
+#endif // FP6_3_OVER_2_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+ Assembly code snippets for F[p] finite field arithmetic, used by fp.tcc .
+ Specific to x86-64, and used only if USE_ASM is defined.
+ On other architectures or without USE_ASM, fp.tcc uses a portable
+ C++ implementation instead.
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef FP_AUX_TCC_
+#define FP_AUX_TCC_
+
+namespace libsnark {
+
+#define STR_HELPER(x) #x
+#define STR(x) STR_HELPER(x)
+
+/* addq is faster than adcq, even if preceded by clc */
+#define ADD_FIRSTADD \
+ "movq (%[B]), %%rax \n\t" \
+ "addq %%rax, (%[A]) \n\t"
+
+#define ADD_NEXTADD(ofs) \
+ "movq " STR(ofs) "(%[B]), %%rax \n\t" \
+ "adcq %%rax, " STR(ofs) "(%[A]) \n\t"
+
+#define ADD_CMP(ofs) \
+ "movq " STR(ofs) "(%[mod]), %%rax \n\t" \
+ "cmpq %%rax, " STR(ofs) "(%[A]) \n\t" \
+ "jb done%= \n\t" \
+ "ja subtract%= \n\t"
+
+#define ADD_FIRSTSUB \
+ "movq (%[mod]), %%rax \n\t" \
+ "subq %%rax, (%[A]) \n\t"
+
+#define ADD_FIRSTSUB \
+ "movq (%[mod]), %%rax \n\t" \
+ "subq %%rax, (%[A]) \n\t"
+
+#define ADD_NEXTSUB(ofs) \
+ "movq " STR(ofs) "(%[mod]), %%rax \n\t" \
+ "sbbq %%rax, " STR(ofs) "(%[A]) \n\t"
+
+#define SUB_FIRSTSUB \
+ "movq (%[B]), %%rax\n\t" \
+ "subq %%rax, (%[A])\n\t"
+
+#define SUB_NEXTSUB(ofs) \
+ "movq " STR(ofs) "(%[B]), %%rax\n\t" \
+ "sbbq %%rax, " STR(ofs) "(%[A])\n\t"
+
+#define SUB_FIRSTADD \
+ "movq (%[mod]), %%rax\n\t" \
+ "addq %%rax, (%[A])\n\t"
+
+#define SUB_NEXTADD(ofs) \
+ "movq " STR(ofs) "(%[mod]), %%rax\n\t" \
+ "adcq %%rax, " STR(ofs) "(%[A])\n\t"
+
+#define MONT_CMP(ofs) \
+ "movq " STR(ofs) "(%[M]), %%rax \n\t" \
+ "cmpq %%rax, " STR(ofs) "(%[tmp]) \n\t" \
+ "jb done%= \n\t" \
+ "ja subtract%= \n\t"
+
+#define MONT_FIRSTSUB \
+ "movq (%[M]), %%rax \n\t" \
+ "subq %%rax, (%[tmp]) \n\t"
+
+#define MONT_NEXTSUB(ofs) \
+ "movq " STR(ofs) "(%[M]), %%rax \n\t" \
+ "sbbq %%rax, " STR(ofs) "(%[tmp]) \n\t"
+
+/*
+ The x86-64 Montgomery multiplication here is similar
+ to Algorithm 2 (CIOS method) in http://eprint.iacr.org/2012/140.pdf
+ and the PowerPC pseudocode of gmp-ecm library (c) Paul Zimmermann and Alexander Kruppa
+ (see comments on top of powerpc64/mulredc.m4).
+*/
+
+#define MONT_PRECOMPUTE \
+ "xorq %[cy], %[cy] \n\t" \
+ "movq 0(%[A]), %%rax \n\t" \
+ "mulq 0(%[B]) \n\t" \
+ "movq %%rax, %[T0] \n\t" \
+ "movq %%rdx, %[T1] # T1:T0 <- A[0] * B[0] \n\t" \
+ "mulq %[inv] \n\t" \
+ "movq %%rax, %[u] # u <- T0 * inv \n\t" \
+ "mulq 0(%[M]) \n\t" \
+ "addq %[T0], %%rax \n\t" \
+ "adcq %%rdx, %[T1] \n\t" \
+ "adcq $0, %[cy] # cy:T1 <- (M[0]*u + T1 * b + T0) / b\n\t"
+
+#define MONT_FIRSTITER(j) \
+ "xorq %[T0], %[T0] \n\t" \
+ "movq 0(%[A]), %%rax \n\t" \
+ "mulq " STR((j*8)) "(%[B]) \n\t" \
+ "addq %[T1], %%rax \n\t" \
+ "movq %%rax, " STR(((j-1)*8)) "(%[tmp]) \n\t" \
+ "adcq $0, %%rdx \n\t" \
+ "movq %%rdx, %[T1] # now T1:tmp[j-1] <-- X[0] * Y[j] + T1\n\t" \
+ "movq " STR((j*8)) "(%[M]), %%rax \n\t" \
+ "mulq %[u] \n\t" \
+ "addq %%rax, " STR(((j-1)*8)) "(%[tmp]) \n\t" \
+ "adcq %[cy], %%rdx \n\t" \
+ "adcq $0, %[T0] \n\t" \
+ "xorq %[cy], %[cy] \n\t" \
+ "addq %%rdx, %[T1] \n\t" \
+ "adcq %[T0], %[cy] # cy:T1:tmp[j-1] <---- (X[0] * Y[j] + T1) + (M[j] * u + cy * b) \n\t"
+
+#define MONT_ITERFIRST(i) \
+ "xorq %[cy], %[cy] \n\t" \
+ "movq " STR((i*8)) "(%[A]), %%rax \n\t" \
+ "mulq 0(%[B]) \n\t" \
+ "addq 0(%[tmp]), %%rax \n\t" \
+ "adcq 8(%[tmp]), %%rdx \n\t" \
+ "adcq $0, %[cy] \n\t" \
+ "movq %%rax, %[T0] \n\t" \
+ "movq %%rdx, %[T1] # cy:T1:T0 <- A[i] * B[0] + tmp[1] * b + tmp[0]\n\t" \
+ "mulq %[inv] \n\t" \
+ "movq %%rax, %[u] # u <- T0 * inv\n\t" \
+ "mulq 0(%[M]) \n\t" \
+ "addq %[T0], %%rax \n\t" \
+ "adcq %%rdx, %[T1] \n\t" \
+ "adcq $0, %[cy] # cy:T1 <- (M[0]*u + cy * b * b + T1 * b + T0) / b\n\t"
+
+#define MONT_ITERITER(i, j) \
+ "xorq %[T0], %[T0] \n\t" \
+ "movq " STR((i*8)) "(%[A]), %%rax \n\t" \
+ "mulq " STR((j*8)) "(%[B]) \n\t" \
+ "addq %[T1], %%rax \n\t" \
+ "movq %%rax, " STR(((j-1)*8)) "(%[tmp]) \n\t" \
+ "adcq $0, %%rdx \n\t" \
+ "movq %%rdx, %[T1] # now T1:tmp[j-1] <-- X[i] * Y[j] + T1 \n\t" \
+ "movq " STR((j*8)) "(%[M]), %%rax \n\t" \
+ "mulq %[u] \n\t" \
+ "addq %%rax, " STR(((j-1)*8)) "(%[tmp]) \n\t" \
+ "adcq %[cy], %%rdx \n\t" \
+ "adcq $0, %[T0] \n\t" \
+ "xorq %[cy], %[cy] \n\t" \
+ "addq %%rdx, %[T1] \n\t" \
+ "adcq %[T0], %[cy] # cy:T1:tmp[j-1] <-- (X[i] * Y[j] + T1) + M[j] * u + cy * b \n\t" \
+ "addq " STR(((j+1)*8)) "(%[tmp]), %[T1] \n\t" \
+ "adcq $0, %[cy] # cy:T1:tmp[j-1] <-- (X[i] * Y[j] + T1) + M[j] * u + (tmp[j+1] + cy) * b \n\t"
+
+#define MONT_FINALIZE(j) \
+ "movq %[T1], " STR((j*8)) "(%[tmp]) \n\t" \
+ "movq %[cy], " STR(((j+1)*8)) "(%[tmp]) \n\t"
+
+/*
+ Comba multiplication and squaring routines are based on the
+ public-domain tomsfastmath library by Tom St Denis
+ <http://www.libtom.org/>
+ <https://github.com/libtom/tomsfastmath/blob/master/src/sqr/fp_sqr_comba.c
+ <https://github.com/libtom/tomsfastmath/blob/master/src/mul/fp_mul_comba.c>
+
+ Compared to the above, we save 5-20% of cycles by using careful register
+ renaming to implement Comba forward operation.
+ */
+
+#define COMBA_3_BY_3_MUL(c0_, c1_, c2_, res_, A_, B_) \
+ asm volatile ( \
+ "movq 0(%[A]), %%rax \n\t" \
+ "mulq 0(%[B]) \n\t" \
+ "movq %%rax, 0(%[res]) \n\t" \
+ "movq %%rdx, %[c0] \n\t" \
+ \
+ "xorq %[c1], %[c1] \n\t" \
+ "movq 0(%[A]), %%rax \n\t" \
+ "mulq 8(%[B]) \n\t" \
+ "addq %%rax, %[c0] \n\t" \
+ "adcq %%rdx, %[c1] \n\t" \
+ \
+ "xorq %[c2], %[c2] \n\t" \
+ "movq 8(%[A]), %%rax \n\t" \
+ "mulq 0(%[B]) \n\t" \
+ "addq %%rax, %[c0] \n\t" \
+ "movq %[c0], 8(%[res]) \n\t" \
+ "adcq %%rdx, %[c1] \n\t" \
+ "adcq $0, %[c2] \n\t" \
+ \
+ "// register renaming (c1, c2, c0)\n\t" \
+ "xorq %[c0], %[c0] \n\t" \
+ "movq 0(%[A]), %%rax \n\t" \
+ "mulq 16(%[B]) \n\t" \
+ "addq %%rax, %[c1] \n\t" \
+ "adcq %%rdx, %[c2] \n\t" \
+ "adcq $0, %[c0] \n\t" \
+ \
+ "movq 8(%[A]), %%rax \n\t" \
+ "mulq 8(%[B]) \n\t" \
+ "addq %%rax, %[c1] \n\t" \
+ "adcq %%rdx, %[c2] \n\t" \
+ "adcq $0, %[c0] \n\t" \
+ \
+ "movq 16(%[A]), %%rax \n\t" \
+ "mulq 0(%[B]) \n\t" \
+ "addq %%rax, %[c1] \n\t" \
+ "movq %[c1], 16(%[res]) \n\t" \
+ "adcq %%rdx, %[c2] \n\t" \
+ "adcq $0, %[c0] \n\t" \
+ \
+ "// register renaming (c2, c0, c1)\n\t" \
+ "xorq %[c1], %[c1] \n\t" \
+ "movq 8(%[A]), %%rax \n\t" \
+ "mulq 16(%[B]) \n\t" \
+ "addq %%rax, %[c2] \n\t" \
+ "adcq %%rdx, %[c0] \n\t" \
+ "adcq $0, %[c1] \n\t" \
+ \
+ "movq 16(%[A]), %%rax \n\t" \
+ "mulq 8(%[B]) \n\t" \
+ "addq %%rax, %[c2] \n\t" \
+ "movq %[c2], 24(%[res]) \n\t" \
+ "adcq %%rdx, %[c0] \n\t" \
+ "adcq $0, %[c1] \n\t" \
+ \
+ "// register renaming (c0, c1, c2)\n\t" \
+ "xorq %[c2], %[c2] \n\t" \
+ "movq 16(%[A]), %%rax \n\t" \
+ "mulq 16(%[B]) \n\t" \
+ "addq %%rax, %[c0] \n\t" \
+ "movq %[c0], 32(%[res]) \n\t" \
+ "adcq %%rdx, %[c1] \n\t" \
+ "movq %[c1], 40(%[res]) \n\t" \
+ : [c0] "=&r" (c0_), [c1] "=&r" (c1_), [c2] "=&r" (c2_) \
+ : [res] "r" (res_), [A] "r" (A_), [B] "r" (B_) \
+ : "%rax", "%rdx", "cc", "memory")
+
+#define COMBA_3_BY_3_SQR(c0_, c1_, c2_, res_, A_) \
+ asm volatile ( \
+ "xorq %[c1], %[c1] \n\t" \
+ "xorq %[c2], %[c2] \n\t" \
+ "movq 0(%[A]), %%rax \n\t" \
+ "mulq %%rax \n\t" \
+ "movq %%rax, 0(%[res]) \n\t" \
+ "movq %%rdx, %[c0] \n\t" \
+ \
+ "movq 0(%[A]), %%rax \n\t" \
+ "mulq 8(%[A]) \n\t" \
+ "addq %%rax, %[c0] \n\t" \
+ "adcq %%rdx, %[c1] \n\t" \
+ "addq %%rax, %[c0] \n\t" \
+ "movq %[c0], 8(%[res]) \n\t" \
+ "adcq %%rdx, %[c1] \n\t" \
+ "adcq $0, %[c2] \n\t" \
+ \
+ "// register renaming (c1, c2, c0)\n\t" \
+ "movq 0(%[A]), %%rax \n\t" \
+ "xorq %[c0], %[c0] \n\t" \
+ "mulq 16(%[A]) \n\t" \
+ "addq %%rax, %[c1] \n\t" \
+ "adcq %%rdx, %[c2] \n\t" \
+ "adcq $0, %[c0] \n\t" \
+ "addq %%rax, %[c1] \n\t" \
+ "adcq %%rdx, %[c2] \n\t" \
+ "adcq $0, %[c0] \n\t" \
+ \
+ "movq 8(%[A]), %%rax \n\t" \
+ "mulq %%rax \n\t" \
+ "addq %%rax, %[c1] \n\t" \
+ "movq %[c1], 16(%[res]) \n\t" \
+ "adcq %%rdx, %[c2] \n\t" \
+ "adcq $0, %[c0] \n\t" \
+ \
+ "// register renaming (c2, c0, c1)\n\t" \
+ "movq 8(%[A]), %%rax \n\t" \
+ "xorq %[c1], %[c1] \n\t" \
+ "mulq 16(%[A]) \n\t" \
+ "addq %%rax, %[c2] \n\t" \
+ "adcq %%rdx, %[c0] \n\t" \
+ "adcq $0, %[c1] \n\t" \
+ "addq %%rax, %[c2] \n\t" \
+ "movq %[c2], 24(%[res]) \n\t" \
+ "adcq %%rdx, %[c0] \n\t" \
+ "adcq $0, %[c1] \n\t" \
+ \
+ "// register renaming (c0, c1, c2)\n\t" \
+ "movq 16(%[A]), %%rax \n\t" \
+ "mulq %%rax \n\t" \
+ "addq %%rax, %[c0] \n\t" \
+ "movq %[c0], 32(%[res]) \n\t" \
+ "adcq %%rdx, %[c1] \n\t" \
+ "movq %[c1], 40(%[res]) \n\t" \
+ \
+ : [c0] "=&r" (c0_), [c1] "=&r" (c1_), [c2] "=&r" (c2_) \
+ : [res] "r" (res_), [A] "r" (A_) \
+ : "%rax", "%rdx", "cc", "memory")
+
+/*
+ The Montgomery reduction here is based on Algorithm 14.32 in
+ Handbook of Applied Cryptography
+ <http://cacr.uwaterloo.ca/hac/about/chap14.pdf>.
+ */
+#define REDUCE_6_LIMB_PRODUCT(k_, tmp1_, tmp2_, tmp3_, inv_, res_, mod_) \
+ __asm__ volatile \
+ ("///////////////////////////////////\n\t" \
+ "movq 0(%[res]), %%rax \n\t" \
+ "mulq %[modprime] \n\t" \
+ "movq %%rax, %[k] \n\t" \
+ \
+ "movq (%[mod]), %%rax \n\t" \
+ "mulq %[k] \n\t" \
+ "movq %%rax, %[tmp1] \n\t" \
+ "movq %%rdx, %[tmp2] \n\t" \
+ \
+ "xorq %[tmp3], %[tmp3] \n\t" \
+ "movq 8(%[mod]), %%rax \n\t" \
+ "mulq %[k] \n\t" \
+ "addq %[tmp1], 0(%[res]) \n\t" \
+ "adcq %%rax, %[tmp2] \n\t" \
+ "adcq %%rdx, %[tmp3] \n\t" \
+ \
+ "xorq %[tmp1], %[tmp1] \n\t" \
+ "movq 16(%[mod]), %%rax \n\t" \
+ "mulq %[k] \n\t" \
+ "addq %[tmp2], 8(%[res]) \n\t" \
+ "adcq %%rax, %[tmp3] \n\t" \
+ "adcq %%rdx, %[tmp1] \n\t" \
+ \
+ "addq %[tmp3], 16(%[res]) \n\t" \
+ "adcq %[tmp1], 24(%[res]) \n\t" \
+ "adcq $0, 32(%[res]) \n\t" \
+ "adcq $0, 40(%[res]) \n\t" \
+ \
+ "///////////////////////////////////\n\t" \
+ "movq 8(%[res]), %%rax \n\t" \
+ "mulq %[modprime] \n\t" \
+ "movq %%rax, %[k] \n\t" \
+ \
+ "movq (%[mod]), %%rax \n\t" \
+ "mulq %[k] \n\t" \
+ "movq %%rax, %[tmp1] \n\t" \
+ "movq %%rdx, %[tmp2] \n\t" \
+ \
+ "xorq %[tmp3], %[tmp3] \n\t" \
+ "movq 8(%[mod]), %%rax \n\t" \
+ "mulq %[k] \n\t" \
+ "addq %[tmp1], 8(%[res]) \n\t" \
+ "adcq %%rax, %[tmp2] \n\t" \
+ "adcq %%rdx, %[tmp3] \n\t" \
+ \
+ "xorq %[tmp1], %[tmp1] \n\t" \
+ "movq 16(%[mod]), %%rax \n\t" \
+ "mulq %[k] \n\t" \
+ "addq %[tmp2], 16(%[res]) \n\t" \
+ "adcq %%rax, %[tmp3] \n\t" \
+ "adcq %%rdx, %[tmp1] \n\t" \
+ \
+ "addq %[tmp3], 24(%[res]) \n\t" \
+ "adcq %[tmp1], 32(%[res]) \n\t" \
+ "adcq $0, 40(%[res]) \n\t" \
+ \
+ "///////////////////////////////////\n\t" \
+ "movq 16(%[res]), %%rax \n\t" \
+ "mulq %[modprime] \n\t" \
+ "movq %%rax, %[k] \n\t" \
+ \
+ "movq (%[mod]), %%rax \n\t" \
+ "mulq %[k] \n\t" \
+ "movq %%rax, %[tmp1] \n\t" \
+ "movq %%rdx, %[tmp2] \n\t" \
+ \
+ "xorq %[tmp3], %[tmp3] \n\t" \
+ "movq 8(%[mod]), %%rax \n\t" \
+ "mulq %[k] \n\t" \
+ "addq %[tmp1], 16(%[res]) \n\t" \
+ "adcq %%rax, %[tmp2] \n\t" \
+ "adcq %%rdx, %[tmp3] \n\t" \
+ \
+ "xorq %[tmp1], %[tmp1] \n\t" \
+ "movq 16(%[mod]), %%rax \n\t" \
+ "mulq %[k] \n\t" \
+ "addq %[tmp2], 24(%[res]) \n\t" \
+ "adcq %%rax, %[tmp3] \n\t" \
+ "adcq %%rdx, %[tmp1] \n\t" \
+ \
+ "addq %[tmp3], 32(%[res]) \n\t" \
+ "adcq %[tmp1], 40(%[res]) \n\t" \
+ : [k] "=&r" (k_), [tmp1] "=&r" (tmp1_), [tmp2] "=&r" (tmp2_), [tmp3] "=&r" (tmp3_) \
+ : [modprime] "r" (inv_), [res] "r" (res_), [mod] "r" (mod_) \
+ : "%rax", "%rdx", "cc", "memory")
+
+} // libsnark
+#endif // FP_AUX_TCC_
--- /dev/null
+/**
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#include "algebra/fields/bigint.hpp"
+
+using namespace libsnark;
+
+void test_bigint()
+{
+ static_assert(ULONG_MAX == 0xFFFFFFFFFFFFFFFFul, "unsigned long not 64-bit");
+ static_assert(GMP_NUMB_BITS == 64, "GMP limb not 64-bit");
+
+ const char *b1_decimal = "76749407";
+ const char *b2_decimal = "435020359732196472065729437602";
+ const char *b3_decimal = "33387554642372758038536799358397002014";
+ const char *b2_binary = "0000000000000000000000000000010101111101101000000110100001011010"
+ "1101101010001001000001101000101000100110011001110001111110100010";
+
+ bigint<1> b0 = bigint<1>(0ul);
+ bigint<1> b1 = bigint<1>(b1_decimal);
+ bigint<2> b2 = bigint<2>(b2_decimal);
+
+ assert(b0.as_ulong() == 0ul);
+ assert(b0.is_zero());
+ assert(b1.as_ulong() == 76749407ul);
+ assert(!(b1.is_zero()));
+ assert(b2.as_ulong() == 15747124762497195938ul);
+ assert(!(b2.is_zero()));
+ assert(b0 != b1);
+ assert(!(b0 == b1));
+
+ assert(b2.max_bits() == 128);
+ assert(b2.num_bits() == 99);
+ for (size_t i = 0; i < 128; i++) {
+ assert(b2.test_bit(i) == (b2_binary[127-i] == '1'));
+ }
+
+ bigint<3> b3 = b2 * b1;
+
+ assert(b3 == bigint<3>(b3_decimal));
+ assert(!(b3.is_zero()));
+
+ bigint<3> b3a { b3 };
+ assert(b3a == bigint<3>(b3_decimal));
+ assert(b3a == b3);
+ assert(!(b3a.is_zero()));
+
+ mpz_t m3;
+ mpz_init(m3);
+ b3.to_mpz(m3);
+ bigint<3> b3b { m3 };
+ assert(b3b == b3);
+
+ bigint<2> quotient;
+ bigint<2> remainder;
+ bigint<3>::div_qr(quotient, remainder, b3, b2);
+ assert(quotient.num_bits() < GMP_NUMB_BITS);
+ assert(quotient.as_ulong() == b1.as_ulong());
+ bigint<1> b1inc = bigint<1>("76749408");
+ bigint<1> b1a = quotient.shorten(b1inc, "test");
+ assert(b1a == b1);
+ assert(remainder.is_zero());
+ remainder.limit(b2, "test");
+
+ try {
+ (void)(quotient.shorten(b1, "test"));
+ assert(false);
+ } catch (std::domain_error) {}
+ try {
+ remainder.limit(remainder, "test");
+ assert(false);
+ } catch (std::domain_error) {}
+
+ bigint<1> br = bigint<1>("42");
+ b3 += br;
+ assert(b3 != b3a);
+ assert(b3 > b3a);
+ assert(!(b3a > b3));
+
+ bigint<3>::div_qr(quotient, remainder, b3, b2);
+ assert(quotient.num_bits() < GMP_NUMB_BITS);
+ assert(quotient.as_ulong() == b1.as_ulong());
+ assert(remainder.num_bits() < GMP_NUMB_BITS);
+ assert(remainder.as_ulong() == 42);
+
+ b3a.clear();
+ assert(b3a.is_zero());
+ assert(b3a.num_bits() == 0);
+ assert(!(b3.is_zero()));
+
+ bigint<4> bx = bigint<4>().randomize();
+ bigint<4> by = bigint<4>().randomize();
+ assert(!(bx == by));
+
+ // TODO: test serialization
+}
+
+int main(void)
+{
+ test_bigint();
+ return 0;
+}
+
--- /dev/null
+/**
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+#include "common/profiling.hpp"
+#include "algebra/curves/edwards/edwards_pp.hpp"
+#include "algebra/curves/mnt/mnt4/mnt4_pp.hpp"
+#include "algebra/curves/mnt/mnt6/mnt6_pp.hpp"
+#ifdef CURVE_BN128
+#include "algebra/curves/bn128/bn128_pp.hpp"
+#endif
+#include "algebra/curves/alt_bn128/alt_bn128_pp.hpp"
+#include "algebra/fields/fp6_3over2.hpp"
+#include "algebra/fields/fp12_2over3over2.hpp"
+
+using namespace libsnark;
+
+template<typename FieldT>
+void test_field()
+{
+ bigint<1> rand1 = bigint<1>("76749407");
+ bigint<1> rand2 = bigint<1>("44410867");
+ bigint<1> randsum = bigint<1>("121160274");
+
+ FieldT zero = FieldT::zero();
+ FieldT one = FieldT::one();
+ FieldT a = FieldT::random_element();
+ FieldT a_ser;
+ a_ser = reserialize<FieldT>(a);
+ assert(a_ser == a);
+
+ FieldT b = FieldT::random_element();
+ FieldT c = FieldT::random_element();
+ FieldT d = FieldT::random_element();
+
+ assert(a != zero);
+ assert(a != one);
+
+ assert(a * a == a.squared());
+ assert((a + b).squared() == a.squared() + a*b + b*a + b.squared());
+ assert((a + b)*(c + d) == a*c + a*d + b*c + b*d);
+ assert(a - b == a + (-b));
+ assert(a - b == (-b) + a);
+
+ assert((a ^ rand1) * (a ^ rand2) == (a^randsum));
+
+ assert(a * a.inverse() == one);
+ assert((a + b) * c.inverse() == a * c.inverse() + (b.inverse() * c).inverse());
+
+}
+
+template<typename FieldT>
+void test_sqrt()
+{
+ for (size_t i = 0; i < 100; ++i)
+ {
+ FieldT a = FieldT::random_element();
+ FieldT asq = a.squared();
+ assert(asq.sqrt() == a || asq.sqrt() == -a);
+ }
+}
+
+template<typename FieldT>
+void test_two_squarings()
+{
+ FieldT a = FieldT::random_element();
+ assert(a.squared() == a * a);
+ assert(a.squared() == a.squared_complex());
+ assert(a.squared() == a.squared_karatsuba());
+}
+
+template<typename FieldT>
+void test_Frobenius()
+{
+ FieldT a = FieldT::random_element();
+ assert(a.Frobenius_map(0) == a);
+ FieldT a_q = a ^ FieldT::base_field_char();
+ for (size_t power = 1; power < 10; ++power)
+ {
+ const FieldT a_qi = a.Frobenius_map(power);
+ assert(a_qi == a_q);
+
+ a_q = a_q ^ FieldT::base_field_char();
+ }
+}
+
+template<typename FieldT>
+void test_unitary_inverse()
+{
+ assert(FieldT::extension_degree() % 2 == 0);
+ FieldT a = FieldT::random_element();
+ FieldT aqcubed_minus1 = a.Frobenius_map(FieldT::extension_degree()/2) * a.inverse();
+ assert(aqcubed_minus1.inverse() == aqcubed_minus1.unitary_inverse());
+}
+
+template<typename FieldT>
+void test_cyclotomic_squaring();
+
+template<>
+void test_cyclotomic_squaring<Fqk<edwards_pp> >()
+{
+ typedef Fqk<edwards_pp> FieldT;
+ assert(FieldT::extension_degree() % 2 == 0);
+ FieldT a = FieldT::random_element();
+ FieldT a_unitary = a.Frobenius_map(FieldT::extension_degree()/2) * a.inverse();
+ // beta = a^((q^(k/2)-1)*(q+1))
+ FieldT beta = a_unitary.Frobenius_map(1) * a_unitary;
+ assert(beta.cyclotomic_squared() == beta.squared());
+}
+
+template<>
+void test_cyclotomic_squaring<Fqk<mnt4_pp> >()
+{
+ typedef Fqk<mnt4_pp> FieldT;
+ assert(FieldT::extension_degree() % 2 == 0);
+ FieldT a = FieldT::random_element();
+ FieldT a_unitary = a.Frobenius_map(FieldT::extension_degree()/2) * a.inverse();
+ // beta = a^(q^(k/2)-1)
+ FieldT beta = a_unitary;
+ assert(beta.cyclotomic_squared() == beta.squared());
+}
+
+template<>
+void test_cyclotomic_squaring<Fqk<mnt6_pp> >()
+{
+ typedef Fqk<mnt6_pp> FieldT;
+ assert(FieldT::extension_degree() % 2 == 0);
+ FieldT a = FieldT::random_element();
+ FieldT a_unitary = a.Frobenius_map(FieldT::extension_degree()/2) * a.inverse();
+ // beta = a^((q^(k/2)-1)*(q+1))
+ FieldT beta = a_unitary.Frobenius_map(1) * a_unitary;
+ assert(beta.cyclotomic_squared() == beta.squared());
+}
+
+template<typename ppT>
+void test_all_fields()
+{
+ test_field<Fr<ppT> >();
+ test_field<Fq<ppT> >();
+ test_field<Fqe<ppT> >();
+ test_field<Fqk<ppT> >();
+
+ test_sqrt<Fr<ppT> >();
+ test_sqrt<Fq<ppT> >();
+ test_sqrt<Fqe<ppT> >();
+
+ test_Frobenius<Fqe<ppT> >();
+ test_Frobenius<Fqk<ppT> >();
+
+ test_unitary_inverse<Fqk<ppT> >();
+}
+
+template<typename Fp4T>
+void test_Fp4_tom_cook()
+{
+ typedef typename Fp4T::my_Fp FieldT;
+ for (size_t i = 0; i < 100; ++i)
+ {
+ const Fp4T a = Fp4T::random_element();
+ const Fp4T b = Fp4T::random_element();
+ const Fp4T correct_res = a * b;
+
+ Fp4T res;
+
+ const FieldT
+ &a0 = a.c0.c0,
+ &a1 = a.c1.c0,
+ &a2 = a.c0.c1,
+ &a3 = a.c1.c1;
+
+ const FieldT
+ &b0 = b.c0.c0,
+ &b1 = b.c1.c0,
+ &b2 = b.c0.c1,
+ &b3 = b.c1.c1;
+
+ FieldT
+ &c0 = res.c0.c0,
+ &c1 = res.c1.c0,
+ &c2 = res.c0.c1,
+ &c3 = res.c1.c1;
+
+ const FieldT v0 = a0 * b0;
+ const FieldT v1 = (a0 + a1 + a2 + a3) * (b0 + b1 + b2 + b3);
+ const FieldT v2 = (a0 - a1 + a2 - a3) * (b0 - b1 + b2 - b3);
+ const FieldT v3 = (a0 + FieldT(2)*a1 + FieldT(4)*a2 + FieldT(8)*a3) * (b0 + FieldT(2)*b1 + FieldT(4)*b2 + FieldT(8)*b3);
+ const FieldT v4 = (a0 - FieldT(2)*a1 + FieldT(4)*a2 - FieldT(8)*a3) * (b0 - FieldT(2)*b1 + FieldT(4)*b2 - FieldT(8)*b3);
+ const FieldT v5 = (a0 + FieldT(3)*a1 + FieldT(9)*a2 + FieldT(27)*a3) * (b0 + FieldT(3)*b1 + FieldT(9)*b2 + FieldT(27)*b3);
+ const FieldT v6 = a3 * b3;
+
+ const FieldT beta = Fp4T::non_residue;
+
+ c0 = v0 + beta*(FieldT(4).inverse()*v0 - FieldT(6).inverse()*(v1 + v2) + FieldT(24).inverse() * (v3 + v4) - FieldT(5) * v6);
+ c1 = - FieldT(3).inverse()*v0 + v1 - FieldT(2).inverse()*v2 - FieldT(4).inverse()*v3 + FieldT(20).inverse() * v4 + FieldT(30).inverse() * v5 - FieldT(12) * v6 + beta * ( - FieldT(12).inverse() * (v0 - v1) + FieldT(24).inverse()*(v2 - v3) - FieldT(120).inverse() * (v4 - v5) - FieldT(3) * v6);
+ c2 = - (FieldT(5)*(FieldT(4).inverse()))* v0 + (FieldT(2)*(FieldT(3).inverse()))*(v1 + v2) - FieldT(24).inverse()*(v3 + v4) + FieldT(4)*v6 + beta*v6;
+ c3 = FieldT(12).inverse() * (FieldT(5)*v0 - FieldT(7)*v1) - FieldT(24).inverse()*(v2 - FieldT(7)*v3 + v4 + v5) + FieldT(15)*v6;
+
+ assert(res == correct_res);
+
+ // {v0, v3, v4, v5}
+ const FieldT u = (FieldT::one() - beta).inverse();
+ assert(v0 == u * c0 + beta * u * c2 - beta * u * FieldT(2).inverse() * v1 - beta * u * FieldT(2).inverse() * v2 + beta * v6);
+ assert(v3 == - FieldT(15) * u * c0 - FieldT(30) * u * c1 - FieldT(3) * (FieldT(4) + beta) * u * c2 - FieldT(6) * (FieldT(4) + beta) * u * c3 + (FieldT(24) - FieldT(3) * beta * FieldT(2).inverse()) * u * v1 + (-FieldT(8) + beta * FieldT(2).inverse()) * u * v2
+ - FieldT(3) * (-FieldT(16) + beta) * v6);
+ assert(v4 == - FieldT(15) * u * c0 + FieldT(30) * u * c1 - FieldT(3) * (FieldT(4) + beta) * u * c2 + FieldT(6) * (FieldT(4) + beta) * u * c3 + (FieldT(24) - FieldT(3) * beta * FieldT(2).inverse()) * u * v2 + (-FieldT(8) + beta * FieldT(2).inverse()) * u * v1
+ - FieldT(3) * (-FieldT(16) + beta) * v6);
+ assert(v5 == - FieldT(80) * u * c0 - FieldT(240) * u * c1 - FieldT(8) * (FieldT(9) + beta) * u * c2 - FieldT(24) * (FieldT(9) + beta) * u * c3 - FieldT(2) * (-FieldT(81) + beta) * u * v1 + (-FieldT(81) + beta) * u * v2
+ - FieldT(8) * (-FieldT(81) + beta) * v6);
+
+ // c0 + beta c2 - (beta v1)/2 - (beta v2)/ 2 - (-1 + beta) beta v6,
+ // -15 c0 - 30 c1 - 3 (4 + beta) c2 - 6 (4 + beta) c3 + (24 - (3 beta)/2) v1 + (-8 + beta/2) v2 + 3 (-16 + beta) (-1 + beta) v6,
+ // -15 c0 + 30 c1 - 3 (4 + beta) c2 + 6 (4 + beta) c3 + (-8 + beta/2) v1 + (24 - (3 beta)/2) v2 + 3 (-16 + beta) (-1 + beta) v6,
+ // -80 c0 - 240 c1 - 8 (9 + beta) c2 - 24 (9 + beta) c3 - 2 (-81 + beta) v1 + (-81 + beta) v2 + 8 (-81 + beta) (-1 + beta) v6
+ }
+}
+
+int main(void)
+{
+ edwards_pp::init_public_params();
+ test_all_fields<edwards_pp>();
+ test_cyclotomic_squaring<Fqk<edwards_pp> >();
+
+ mnt4_pp::init_public_params();
+ test_all_fields<mnt4_pp>();
+ test_Fp4_tom_cook<mnt4_Fq4>();
+ test_two_squarings<Fqe<mnt4_pp> >();
+ test_cyclotomic_squaring<Fqk<mnt4_pp> >();
+
+ mnt6_pp::init_public_params();
+ test_all_fields<mnt6_pp>();
+ test_cyclotomic_squaring<Fqk<mnt6_pp> >();
+
+ alt_bn128_pp::init_public_params();
+ test_field<alt_bn128_Fq6>();
+ test_Frobenius<alt_bn128_Fq6>();
+ test_all_fields<alt_bn128_pp>();
+
+#ifdef CURVE_BN128 // BN128 has fancy dependencies so it may be disabled
+ bn128_pp::init_public_params();
+ test_field<Fr<bn128_pp> >();
+ test_field<Fq<bn128_pp> >();
+#endif
+}
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for:
+ - a knowledge commitment, and
+ - a knowledge commitment vector.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef KNOWLEDGE_COMMITMENT_HPP_
+#define KNOWLEDGE_COMMITMENT_HPP_
+
+#include "algebra/fields/fp.hpp"
+#include "common/data_structures/sparse_vector.hpp"
+
+namespace libsnark {
+
+/********************** Knowledge commitment *********************************/
+
+/**
+ * A knowledge commitment is a pair (g,h) where g is in T1 and h in T2,
+ * and T1 and T2 are groups (written additively).
+ *
+ * Such pairs form a group by defining:
+ * - "zero" = (0,0)
+ * - "one" = (1,1)
+ * - a * (g,h) + b * (g',h') := ( a * g + b * g', a * h + b * h').
+ */
+template<typename T1, typename T2>
+struct knowledge_commitment {
+
+ T1 g;
+ T2 h;
+
+ knowledge_commitment<T1,T2>() = default;
+ knowledge_commitment<T1,T2>(const knowledge_commitment<T1,T2> &other) = default;
+ knowledge_commitment<T1,T2>(knowledge_commitment<T1,T2> &&other) = default;
+ knowledge_commitment<T1,T2>(const T1 &g, const T2 &h);
+
+ knowledge_commitment<T1,T2>& operator=(const knowledge_commitment<T1,T2> &other) = default;
+ knowledge_commitment<T1,T2>& operator=(knowledge_commitment<T1,T2> &&other) = default;
+ knowledge_commitment<T1,T2> operator+(const knowledge_commitment<T1, T2> &other) const;
+
+ bool is_zero() const;
+ bool operator==(const knowledge_commitment<T1,T2> &other) const;
+ bool operator!=(const knowledge_commitment<T1,T2> &other) const;
+
+ static knowledge_commitment<T1,T2> zero();
+ static knowledge_commitment<T1,T2> one();
+
+ void print() const;
+
+ static size_t size_in_bits();
+};
+
+template<typename T1, typename T2, mp_size_t m>
+knowledge_commitment<T1,T2> operator*(const bigint<m> &lhs, const knowledge_commitment<T1,T2> &rhs);
+
+template<typename T1, typename T2, mp_size_t m, const bigint<m> &modulus_p>
+knowledge_commitment<T1,T2> operator*(const Fp_model<m, modulus_p> &lhs, const knowledge_commitment<T1,T2> &rhs);
+
+template<typename T1,typename T2>
+std::ostream& operator<<(std::ostream& out, const knowledge_commitment<T1,T2> &kc);
+
+template<typename T1,typename T2>
+std::istream& operator>>(std::istream& in, knowledge_commitment<T1,T2> &kc);
+
+/******************** Knowledge commitment vector ****************************/
+
+/**
+ * A knowledge commitment vector is a sparse vector of knowledge commitments.
+ */
+template<typename T1, typename T2>
+using knowledge_commitment_vector = sparse_vector<knowledge_commitment<T1, T2> >;
+
+} // libsnark
+
+#include "algebra/knowledge_commitment/knowledge_commitment.tcc"
+
+#endif // KNOWLEDGE_COMMITMENT_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of interfaces for:
+ - a knowledge commitment, and
+ - a knowledge commitment vector.
+
+ See knowledge_commitment.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef KNOWLEDGE_COMMITMENT_TCC_
+#define KNOWLEDGE_COMMITMENT_TCC_
+
+namespace libsnark {
+
+template<typename T1, typename T2>
+knowledge_commitment<T1,T2>::knowledge_commitment(const T1 &g, const T2 &h) :
+ g(g), h(h)
+{
+}
+
+template<typename T1, typename T2>
+knowledge_commitment<T1,T2> knowledge_commitment<T1,T2>::zero()
+{
+ return knowledge_commitment<T1,T2>(T1::zero(), T2::zero());
+}
+
+template<typename T1, typename T2>
+knowledge_commitment<T1,T2> knowledge_commitment<T1,T2>::one()
+{
+ return knowledge_commitment<T1,T2>(T1::one(), T2::one());
+}
+
+template<typename T1, typename T2>
+knowledge_commitment<T1,T2> knowledge_commitment<T1,T2>::operator+(const knowledge_commitment<T1,T2> &other) const
+{
+ return knowledge_commitment<T1,T2>(this->g + other.g,
+ this->h + other.h);
+}
+
+template<typename T1, typename T2>
+bool knowledge_commitment<T1,T2>::is_zero() const
+{
+ return (g.is_zero() && h.is_zero());
+}
+
+template<typename T1, typename T2>
+bool knowledge_commitment<T1,T2>::operator==(const knowledge_commitment<T1,T2> &other) const
+{
+ return (this->g == other.g &&
+ this->h == other.h);
+}
+
+template<typename T1, typename T2>
+bool knowledge_commitment<T1,T2>::operator!=(const knowledge_commitment<T1,T2> &other) const
+{
+ return !((*this) == other);
+}
+
+template<typename T1, typename T2, mp_size_t m>
+knowledge_commitment<T1,T2> operator*(const bigint<m> &lhs, const knowledge_commitment<T1,T2> &rhs)
+{
+ return knowledge_commitment<T1,T2>(lhs * rhs.g,
+ lhs * rhs.h);
+}
+
+template<typename T1, typename T2, mp_size_t m, const bigint<m> &modulus_p>
+knowledge_commitment<T1,T2> operator*(const Fp_model<m, modulus_p> &lhs, const knowledge_commitment<T1,T2> &rhs)
+{
+ return (lhs.as_bigint()) * rhs;
+}
+
+template<typename T1, typename T2>
+void knowledge_commitment<T1,T2>::print() const
+{
+ printf("knowledge_commitment.g:\n");
+ g.print();
+ printf("knowledge_commitment.h:\n");
+ h.print();
+}
+
+template<typename T1, typename T2>
+size_t knowledge_commitment<T1,T2>::size_in_bits()
+{
+ return T1::size_in_bits() + T2::size_in_bits();
+}
+
+template<typename T1,typename T2>
+std::ostream& operator<<(std::ostream& out, const knowledge_commitment<T1,T2> &kc)
+{
+ out << kc.g << OUTPUT_SEPARATOR << kc.h;
+ return out;
+}
+
+template<typename T1,typename T2>
+std::istream& operator>>(std::istream& in, knowledge_commitment<T1,T2> &kc)
+{
+ in >> kc.g;
+ consume_OUTPUT_SEPARATOR(in);
+ in >> kc.h;
+ return in;
+}
+
+} // libsnark
+
+#endif // KNOWLEDGE_COMMITMENT_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef KC_MULTIEXP_HPP_
+#define KC_MULTIEXP_HPP_
+
+/*
+ Split out from multiexp to prevent cyclical
+ dependencies. I.e. previously multiexp dependend on
+ knowledge_commitment, which dependend on sparse_vector, which
+ dependend on multiexp (to do accumulate).
+
+ Will probably go away in more general exp refactoring.
+*/
+
+#include "algebra/knowledge_commitment/knowledge_commitment.hpp"
+
+namespace libsnark {
+
+template<typename T1, typename T2, mp_size_t n>
+knowledge_commitment<T1,T2> opt_window_wnaf_exp(const knowledge_commitment<T1,T2> &base,
+ const bigint<n> &scalar, const size_t scalar_bits);
+
+template<typename T1, typename T2, typename FieldT>
+knowledge_commitment<T1, T2> kc_multi_exp_with_mixed_addition(const knowledge_commitment_vector<T1, T2> &vec,
+ const size_t min_idx,
+ const size_t max_idx,
+ typename std::vector<FieldT>::const_iterator scalar_start,
+ typename std::vector<FieldT>::const_iterator scalar_end,
+ const size_t chunks,
+ const bool use_multiexp=false);
+
+template<typename T1, typename T2>
+void kc_batch_to_special(std::vector<knowledge_commitment<T1, T2> > &vec);
+
+template<typename T1, typename T2, typename FieldT>
+knowledge_commitment_vector<T1, T2> kc_batch_exp(const size_t scalar_size,
+ const size_t T1_window,
+ const size_t T2_window,
+ const window_table<T1> &T1_table,
+ const window_table<T2> &T2_table,
+ const FieldT &T1_coeff,
+ const FieldT &T2_coeff,
+ const std::vector<FieldT> &v,
+ const size_t suggested_num_chunks);
+
+} // libsnark
+
+#include "algebra/scalar_multiplication/kc_multiexp.tcc"
+
+#endif // KC_MULTIEXP_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef KC_MULTIEXP_TCC_
+#define KC_MULTIEXP_TCC_
+
+namespace libsnark {
+
+template<typename T1, typename T2, mp_size_t n>
+knowledge_commitment<T1,T2> opt_window_wnaf_exp(const knowledge_commitment<T1,T2> &base,
+ const bigint<n> &scalar, const size_t scalar_bits)
+{
+ return knowledge_commitment<T1,T2>(opt_window_wnaf_exp(base.g, scalar, scalar_bits),
+ opt_window_wnaf_exp(base.h, scalar, scalar_bits));
+}
+
+template<typename T1, typename T2, typename FieldT>
+knowledge_commitment<T1, T2> kc_multi_exp_with_mixed_addition(const knowledge_commitment_vector<T1, T2> &vec,
+ const size_t min_idx,
+ const size_t max_idx,
+ typename std::vector<FieldT>::const_iterator scalar_start,
+ typename std::vector<FieldT>::const_iterator scalar_end,
+ const size_t chunks,
+ const bool use_multiexp)
+{
+ enter_block("Process scalar vector");
+ auto index_it = std::lower_bound(vec.indices.begin(), vec.indices.end(), min_idx);
+ const size_t offset = index_it - vec.indices.begin();
+
+ auto value_it = vec.values.begin() + offset;
+
+ const FieldT zero = FieldT::zero();
+ const FieldT one = FieldT::one();
+
+ std::vector<FieldT> p;
+ std::vector<knowledge_commitment<T1, T2> > g;
+
+ knowledge_commitment<T1, T2> acc = knowledge_commitment<T1, T2>::zero();
+
+ size_t num_skip = 0;
+ size_t num_add = 0;
+ size_t num_other = 0;
+
+ const size_t scalar_length = std::distance(scalar_start, scalar_end);
+
+ while (index_it != vec.indices.end() && *index_it < max_idx)
+ {
+ const size_t scalar_position = (*index_it) - min_idx;
+ assert(scalar_position < scalar_length);
+
+ const FieldT scalar = *(scalar_start + scalar_position);
+
+ if (scalar == zero)
+ {
+ // do nothing
+ ++num_skip;
+ }
+ else if (scalar == one)
+ {
+#ifdef USE_MIXED_ADDITION
+ acc.g = acc.g.mixed_add(value_it->g);
+ acc.h = acc.h.mixed_add(value_it->h);
+#else
+ acc.g = acc.g + value_it->g;
+ acc.h = acc.h + value_it->h;
+#endif
+ ++num_add;
+ }
+ else
+ {
+ p.emplace_back(scalar);
+ g.emplace_back(*value_it);
+ ++num_other;
+ }
+
+ ++index_it;
+ ++value_it;
+ }
+
+ //print_indent(); printf("* Elements of w skipped: %zu (%0.2f%%)\n", num_skip, 100.*num_skip/(num_skip+num_add+num_other));
+ //print_indent(); printf("* Elements of w processed with special addition: %zu (%0.2f%%)\n", num_add, 100.*num_add/(num_skip+num_add+num_other));
+ //print_indent(); printf("* Elements of w remaining: %zu (%0.2f%%)\n", num_other, 100.*num_other/(num_skip+num_add+num_other));
+ leave_block("Process scalar vector");
+
+ return acc + multi_exp<knowledge_commitment<T1, T2>, FieldT>(g.begin(), g.end(), p.begin(), p.end(), chunks, use_multiexp);
+}
+
+template<typename T1, typename T2>
+void kc_batch_to_special(std::vector<knowledge_commitment<T1, T2> > &vec)
+{
+ enter_block("Batch-convert knowledge-commitments to special form");
+
+ std::vector<T1> g_vec;
+ g_vec.reserve(vec.size());
+
+ for (size_t i = 0; i < vec.size(); ++i)
+ {
+ if (!vec[i].g.is_zero())
+ {
+ g_vec.emplace_back(vec[i].g);
+ }
+ }
+
+ batch_to_special_all_non_zeros<T1>(g_vec);
+ auto g_it = g_vec.begin();
+ T1 T1_zero_special = T1::zero();
+ T1_zero_special.to_special();
+
+ for (size_t i = 0; i < vec.size(); ++i)
+ {
+ if (!vec[i].g.is_zero())
+ {
+ vec[i].g = *g_it;
+ ++g_it;
+ }
+ else
+ {
+ vec[i].g = T1_zero_special;
+ }
+ }
+
+ g_vec.clear();
+
+ std::vector<T2> h_vec;
+ h_vec.reserve(vec.size());
+
+ for (size_t i = 0; i < vec.size(); ++i)
+ {
+ if (!vec[i].h.is_zero())
+ {
+ h_vec.emplace_back(vec[i].h);
+ }
+ }
+
+ batch_to_special_all_non_zeros<T2>(h_vec);
+ auto h_it = h_vec.begin();
+ T2 T2_zero_special = T2::zero();
+ T2_zero_special.to_special();
+
+ for (size_t i = 0; i < vec.size(); ++i)
+ {
+ if (!vec[i].h.is_zero())
+ {
+ vec[i].h = *h_it;
+ ++h_it;
+ }
+ else
+ {
+ vec[i].h = T2_zero_special;
+ }
+ }
+
+ g_vec.clear();
+
+ leave_block("Batch-convert knowledge-commitments to special form");
+}
+
+template<typename T1, typename T2, typename FieldT>
+knowledge_commitment_vector<T1, T2> kc_batch_exp_internal(const size_t scalar_size,
+ const size_t T1_window,
+ const size_t T2_window,
+ const window_table<T1> &T1_table,
+ const window_table<T2> &T2_table,
+ const FieldT &T1_coeff,
+ const FieldT &T2_coeff,
+ const std::vector<FieldT> &v,
+ const size_t start_pos,
+ const size_t end_pos,
+ const size_t expected_size)
+{
+ knowledge_commitment_vector<T1, T2> res;
+
+ res.values.reserve(expected_size);
+ res.indices.reserve(expected_size);
+
+ for (size_t pos = start_pos; pos != end_pos; ++pos)
+ {
+ if (!v[pos].is_zero())
+ {
+ res.values.emplace_back(knowledge_commitment<T1, T2>(windowed_exp(scalar_size, T1_window, T1_table, T1_coeff * v[pos]),
+ windowed_exp(scalar_size, T2_window, T2_table, T2_coeff * v[pos])));
+ res.indices.emplace_back(pos);
+ }
+ }
+
+ return res;
+}
+
+template<typename T1, typename T2, typename FieldT>
+knowledge_commitment_vector<T1, T2> kc_batch_exp(const size_t scalar_size,
+ const size_t T1_window,
+ const size_t T2_window,
+ const window_table<T1> &T1_table,
+ const window_table<T2> &T2_table,
+ const FieldT &T1_coeff,
+ const FieldT &T2_coeff,
+ const std::vector<FieldT> &v,
+ const size_t suggested_num_chunks)
+{
+ knowledge_commitment_vector<T1, T2> res;
+ res.domain_size_ = v.size();
+
+ size_t nonzero = 0;
+ for (size_t i = 0; i < v.size(); ++i)
+ {
+ nonzero += (v[i].is_zero() ? 0 : 1);
+ }
+
+ const size_t num_chunks = std::max((size_t)1, std::min(nonzero, suggested_num_chunks));
+
+ if (!inhibit_profiling_info)
+ {
+ print_indent(); printf("Non-zero coordinate count: %zu/%zu (%0.2f%%)\n", nonzero, v.size(), 100.*nonzero/v.size());
+ }
+
+ std::vector<knowledge_commitment_vector<T1, T2> > tmp(num_chunks);
+ std::vector<size_t> chunk_pos(num_chunks+1);
+
+ const size_t chunk_size = nonzero / num_chunks;
+ const size_t last_chunk = nonzero - chunk_size * (num_chunks - 1);
+
+ chunk_pos[0] = 0;
+
+ size_t cnt = 0;
+ size_t chunkno = 1;
+
+ for (size_t i = 0; i < v.size(); ++i)
+ {
+ cnt += (v[i].is_zero() ? 0 : 1);
+ if (cnt == chunk_size && chunkno < num_chunks)
+ {
+ chunk_pos[chunkno] = i;
+ cnt = 0;
+ ++chunkno;
+ }
+ }
+
+ chunk_pos[num_chunks] = v.size();
+
+#ifdef MULTICORE
+#pragma omp parallel for
+#endif
+ for (size_t i = 0; i < num_chunks; ++i)
+ {
+ tmp[i] = kc_batch_exp_internal<T1, T2, FieldT>(scalar_size, T1_window, T2_window, T1_table, T2_table, T1_coeff, T2_coeff, v,
+ chunk_pos[i], chunk_pos[i+1], i == num_chunks - 1 ? last_chunk : chunk_size);
+#ifdef USE_MIXED_ADDITION
+ kc_batch_to_special<T1, T2>(tmp[i].values);
+#endif
+ }
+
+ if (num_chunks == 1)
+ {
+ tmp[0].domain_size_ = v.size();
+ return tmp[0];
+ }
+ else
+ {
+ for (size_t i = 0; i < num_chunks; ++i)
+ {
+ res.values.insert(res.values.end(), tmp[i].values.begin(), tmp[i].values.end());
+ res.indices.insert(res.indices.end(), tmp[i].indices.begin(), tmp[i].indices.end());
+ }
+ return res;
+ }
+}
+
+} // libsnark
+
+#endif // KC_MULTIEXP_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for multi-exponentiation routines.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef MULTIEXP_HPP_
+#define MULTIEXP_HPP_
+
+namespace libsnark {
+
+/**
+ * Naive multi-exponentiation individually multiplies each base by the
+ * corresponding scalar and adds up the results.
+ */
+template<typename T, typename FieldT>
+T naive_exp(typename std::vector<T>::const_iterator vec_start,
+ typename std::vector<T>::const_iterator vec_end,
+ typename std::vector<FieldT>::const_iterator scalar_start,
+ typename std::vector<FieldT>::const_iterator scalar_end);
+
+template<typename T, typename FieldT>
+T naive_plain_exp(typename std::vector<T>::const_iterator vec_start,
+ typename std::vector<T>::const_iterator vec_end,
+ typename std::vector<FieldT>::const_iterator scalar_start,
+ typename std::vector<FieldT>::const_iterator scalar_end);
+
+/**
+ * Naive multi-exponentiation uses a variant of the Bos-Coster algorithm [1],
+ * and implementation suggestions from [2].
+ *
+ * [1] = Bos and Coster, "Addition chain heuristics", CRYPTO '89
+ * [2] = Bernstein, Duif, Lange, Schwabe, and Yang, "High-speed high-security signatures", CHES '11
+ */
+template<typename T, typename FieldT>
+T multi_exp(typename std::vector<T>::const_iterator vec_start,
+ typename std::vector<T>::const_iterator vec_end,
+ typename std::vector<FieldT>::const_iterator scalar_start,
+ typename std::vector<FieldT>::const_iterator scalar_end,
+ const size_t chunks,
+ const bool use_multiexp=false);
+
+
+/**
+ * A variant of multi_exp that takes advantage of the method mixed_add (instead of the operator '+').
+ */
+template<typename T, typename FieldT>
+T multi_exp_with_mixed_addition(typename std::vector<T>::const_iterator vec_start,
+ typename std::vector<T>::const_iterator vec_end,
+ typename std::vector<FieldT>::const_iterator scalar_start,
+ typename std::vector<FieldT>::const_iterator scalar_end,
+ const size_t chunks,
+ const bool use_multiexp);
+
+/**
+ * A window table stores window sizes for different instance sizes for fixed-base multi-scalar multiplications.
+ */
+template<typename T>
+using window_table = std::vector<std::vector<T> >;
+
+/**
+ * Compute window size for the given number of scalars.
+ */
+template<typename T>
+size_t get_exp_window_size(const size_t num_scalars);
+
+/**
+ * Compute table of window sizes.
+ */
+template<typename T>
+window_table<T> get_window_table(const size_t scalar_size,
+ const size_t window,
+ const T &g);
+
+template<typename T, typename FieldT>
+T windowed_exp(const size_t scalar_size,
+ const size_t window,
+ const window_table<T> &powers_of_g,
+ const FieldT &pow);
+
+template<typename T, typename FieldT>
+std::vector<T> batch_exp(const size_t scalar_size,
+ const size_t window,
+ const window_table<T> &table,
+ const std::vector<FieldT> &v);
+
+template<typename T, typename FieldT>
+std::vector<T> batch_exp_with_coeff(const size_t scalar_size,
+ const size_t window,
+ const window_table<T> &table,
+ const FieldT &coeff,
+ const std::vector<FieldT> &v);
+
+// defined in every curve
+template<typename T>
+void batch_to_special_all_non_zeros(std::vector<T> &vec);
+
+template<typename T>
+void batch_to_special(std::vector<T> &vec);
+
+} // libsnark
+
+#include "algebra/scalar_multiplication/multiexp.tcc"
+
+#endif // MULTIEXP_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of interfaces for multi-exponentiation routines.
+
+ See multiexp.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef MULTIEXP_TCC_
+#define MULTIEXP_TCC_
+
+#include "algebra/fields/fp_aux.tcc"
+
+#include <algorithm>
+#include <cassert>
+#include <type_traits>
+
+#include "common/profiling.hpp"
+#include "common/utils.hpp"
+#include "algebra/scalar_multiplication/wnaf.hpp"
+
+namespace libsnark {
+
+template<mp_size_t n>
+class ordered_exponent {
+// to use std::push_heap and friends later
+public:
+ size_t idx;
+ bigint<n> r;
+
+ ordered_exponent(const size_t idx, const bigint<n> &r) : idx(idx), r(r) {};
+
+ bool operator<(const ordered_exponent<n> &other) const
+ {
+#if defined(__x86_64__) && defined(USE_ASM)
+ if (n == 3)
+ {
+ long res;
+ __asm__
+ ("// check for overflow \n\t"
+ "mov $0, %[res] \n\t"
+ ADD_CMP(16)
+ ADD_CMP(8)
+ ADD_CMP(0)
+ "jmp done%= \n\t"
+ "subtract%=: \n\t"
+ "mov $1, %[res] \n\t"
+ "done%=: \n\t"
+ : [res] "=&r" (res)
+ : [A] "r" (other.r.data), [mod] "r" (this->r.data)
+ : "cc", "%rax");
+ return res;
+ }
+ else if (n == 4)
+ {
+ long res;
+ __asm__
+ ("// check for overflow \n\t"
+ "mov $0, %[res] \n\t"
+ ADD_CMP(24)
+ ADD_CMP(16)
+ ADD_CMP(8)
+ ADD_CMP(0)
+ "jmp done%= \n\t"
+ "subtract%=: \n\t"
+ "mov $1, %[res] \n\t"
+ "done%=: \n\t"
+ : [res] "=&r" (res)
+ : [A] "r" (other.r.data), [mod] "r" (this->r.data)
+ : "cc", "%rax");
+ return res;
+ }
+ else if (n == 5)
+ {
+ long res;
+ __asm__
+ ("// check for overflow \n\t"
+ "mov $0, %[res] \n\t"
+ ADD_CMP(32)
+ ADD_CMP(24)
+ ADD_CMP(16)
+ ADD_CMP(8)
+ ADD_CMP(0)
+ "jmp done%= \n\t"
+ "subtract%=: \n\t"
+ "mov $1, %[res] \n\t"
+ "done%=: \n\t"
+ : [res] "=&r" (res)
+ : [A] "r" (other.r.data), [mod] "r" (this->r.data)
+ : "cc", "%rax");
+ return res;
+ }
+ else
+#endif
+ {
+ return (mpn_cmp(this->r.data, other.r.data, n) < 0);
+ }
+ }
+};
+
+template<typename T, typename FieldT>
+T naive_exp(typename std::vector<T>::const_iterator vec_start,
+ typename std::vector<T>::const_iterator vec_end,
+ typename std::vector<FieldT>::const_iterator scalar_start,
+ typename std::vector<FieldT>::const_iterator scalar_end)
+{
+ T result(T::zero());
+
+ typename std::vector<T>::const_iterator vec_it;
+ typename std::vector<FieldT>::const_iterator scalar_it;
+
+ for (vec_it = vec_start, scalar_it = scalar_start; vec_it != vec_end; ++vec_it, ++scalar_it)
+ {
+ bigint<FieldT::num_limbs> scalar_bigint = scalar_it->as_bigint();
+ result = result + opt_window_wnaf_exp(*vec_it, scalar_bigint, scalar_bigint.num_bits());
+ }
+ assert(scalar_it == scalar_end);
+
+ return result;
+}
+
+template<typename T, typename FieldT>
+T naive_plain_exp(typename std::vector<T>::const_iterator vec_start,
+ typename std::vector<T>::const_iterator vec_end,
+ typename std::vector<FieldT>::const_iterator scalar_start,
+ typename std::vector<FieldT>::const_iterator scalar_end)
+{
+ T result(T::zero());
+
+ typename std::vector<T>::const_iterator vec_it;
+ typename std::vector<FieldT>::const_iterator scalar_it;
+
+ for (vec_it = vec_start, scalar_it = scalar_start; vec_it != vec_end; ++vec_it, ++scalar_it)
+ {
+ result = result + (*scalar_it) * (*vec_it);
+ }
+ assert(scalar_it == scalar_end);
+
+ return result;
+}
+
+/*
+ The multi-exponentiation algorithm below is a variant of the Bos-Coster algorithm
+ [Bos and Coster, "Addition chain heuristics", CRYPTO '89].
+ The implementation uses suggestions from
+ [Bernstein, Duif, Lange, Schwabe, and Yang, "High-speed high-security signatures", CHES '11].
+*/
+template<typename T, typename FieldT>
+T multi_exp_inner(typename std::vector<T>::const_iterator vec_start,
+ typename std::vector<T>::const_iterator vec_end,
+ typename std::vector<FieldT>::const_iterator scalar_start,
+ typename std::vector<FieldT>::const_iterator scalar_end)
+{
+ const mp_size_t n = std::remove_reference<decltype(*scalar_start)>::type::num_limbs;
+
+ if (vec_start == vec_end)
+ {
+ return T::zero();
+ }
+
+ if (vec_start + 1 == vec_end)
+ {
+ return (*scalar_start)*(*vec_start);
+ }
+
+ std::vector<ordered_exponent<n> > opt_q;
+ const size_t vec_len = scalar_end - scalar_start;
+ const size_t odd_vec_len = (vec_len % 2 == 1 ? vec_len : vec_len + 1);
+ opt_q.reserve(odd_vec_len);
+ std::vector<T> g;
+ g.reserve(odd_vec_len);
+
+ typename std::vector<T>::const_iterator vec_it;
+ typename std::vector<FieldT>::const_iterator scalar_it;
+ size_t i;
+ for (i=0, vec_it = vec_start, scalar_it = scalar_start; vec_it != vec_end; ++vec_it, ++scalar_it, ++i)
+ {
+ g.emplace_back(*vec_it);
+
+ opt_q.emplace_back(ordered_exponent<n>(i, scalar_it->as_bigint()));
+ }
+ std::make_heap(opt_q.begin(),opt_q.end());
+ assert(scalar_it == scalar_end);
+
+ if (vec_len != odd_vec_len)
+ {
+ g.emplace_back(T::zero());
+ opt_q.emplace_back(ordered_exponent<n>(odd_vec_len - 1, bigint<n>(0ul)));
+ }
+ assert(g.size() % 2 == 1);
+ assert(opt_q.size() == g.size());
+
+ T opt_result = T::zero();
+
+ while (true)
+ {
+ ordered_exponent<n> &a = opt_q[0];
+ ordered_exponent<n> &b = (opt_q[1] < opt_q[2] ? opt_q[2] : opt_q[1]);
+
+ const size_t abits = a.r.num_bits();
+
+ if (b.r.is_zero())
+ {
+ // opt_result = opt_result + (a.r * g[a.idx]);
+ opt_result = opt_result + opt_window_wnaf_exp(g[a.idx], a.r, abits);
+ break;
+ }
+
+ const size_t bbits = b.r.num_bits();
+ const size_t limit = (abits-bbits >= 20 ? 20 : abits-bbits);
+
+ if (bbits < 1ul<<limit)
+ {
+ /*
+ In this case, exponentiating to the power of a is cheaper than
+ subtracting b from a multiple times, so let's do it directly
+ */
+ // opt_result = opt_result + (a.r * g[a.idx]);
+ opt_result = opt_result + opt_window_wnaf_exp(g[a.idx], a.r, abits);
+#ifdef DEBUG
+ printf("Skipping the following pair (%zu bit number vs %zu bit):\n", abits, bbits);
+ a.r.print();
+ b.r.print();
+#endif
+ a.r.clear();
+ }
+ else
+ {
+ // x A + y B => (x-y) A + y (B+A)
+ mpn_sub_n(a.r.data, a.r.data, b.r.data, n);
+ g[b.idx] = g[b.idx] + g[a.idx];
+ }
+
+ // regardless of whether a was cleared or subtracted from we push it down, then take back up
+
+ /* heapify A down */
+ size_t a_pos = 0;
+ while (2*a_pos + 2< odd_vec_len)
+ {
+ // this is a max-heap so to maintain a heap property we swap with the largest of the two
+ if (opt_q[2*a_pos+1] < opt_q[2*a_pos+2])
+ {
+ std::swap(opt_q[a_pos], opt_q[2*a_pos+2]);
+ a_pos = 2*a_pos+2;
+ }
+ else
+ {
+ std::swap(opt_q[a_pos], opt_q[2*a_pos+1]);
+ a_pos = 2*a_pos+1;
+ }
+ }
+
+ /* now heapify A up appropriate amount of times */
+ while (a_pos > 0 && opt_q[(a_pos-1)/2] < opt_q[a_pos])
+ {
+ std::swap(opt_q[a_pos], opt_q[(a_pos-1)/2]);
+ a_pos = (a_pos-1) / 2;
+ }
+ }
+
+ return opt_result;
+}
+
+template<typename T, typename FieldT>
+T multi_exp(typename std::vector<T>::const_iterator vec_start,
+ typename std::vector<T>::const_iterator vec_end,
+ typename std::vector<FieldT>::const_iterator scalar_start,
+ typename std::vector<FieldT>::const_iterator scalar_end,
+ const size_t chunks,
+ const bool use_multiexp)
+{
+ const size_t total = vec_end - vec_start;
+ if (total < chunks)
+ {
+ return naive_exp<T, FieldT>(vec_start, vec_end, scalar_start, scalar_end);
+ }
+
+ const size_t one = total/chunks;
+
+ std::vector<T> partial(chunks, T::zero());
+
+ if (use_multiexp)
+ {
+#ifdef MULTICORE
+#pragma omp parallel for
+#endif
+ for (size_t i = 0; i < chunks; ++i)
+ {
+ partial[i] = multi_exp_inner<T, FieldT>(vec_start + i*one,
+ (i == chunks-1 ? vec_end : vec_start + (i+1)*one),
+ scalar_start + i*one,
+ (i == chunks-1 ? scalar_end : scalar_start + (i+1)*one));
+ }
+ }
+ else
+ {
+#ifdef MULTICORE
+#pragma omp parallel for
+#endif
+ for (size_t i = 0; i < chunks; ++i)
+ {
+ partial[i] = naive_exp<T, FieldT>(vec_start + i*one,
+ (i == chunks-1 ? vec_end : vec_start + (i+1)*one),
+ scalar_start + i*one,
+ (i == chunks-1 ? scalar_end : scalar_start + (i+1)*one));
+ }
+ }
+
+ T final = T::zero();
+
+ for (size_t i = 0; i < chunks; ++i)
+ {
+ final = final + partial[i];
+ }
+
+ return final;
+}
+
+template<typename T, typename FieldT>
+T multi_exp_with_mixed_addition(typename std::vector<T>::const_iterator vec_start,
+ typename std::vector<T>::const_iterator vec_end,
+ typename std::vector<FieldT>::const_iterator scalar_start,
+ typename std::vector<FieldT>::const_iterator scalar_end,
+ const size_t chunks,
+ const bool use_multiexp)
+{
+ assert(std::distance(vec_start, vec_end) == std::distance(scalar_start, scalar_end));
+ enter_block("Process scalar vector");
+ auto value_it = vec_start;
+ auto scalar_it = scalar_start;
+
+ const FieldT zero = FieldT::zero();
+ const FieldT one = FieldT::one();
+ std::vector<FieldT> p;
+ std::vector<T> g;
+
+ T acc = T::zero();
+
+ size_t num_skip = 0;
+ size_t num_add = 0;
+ size_t num_other = 0;
+
+ for (; scalar_it != scalar_end; ++scalar_it, ++value_it)
+ {
+ if (*scalar_it == zero)
+ {
+ // do nothing
+ ++num_skip;
+ }
+ else if (*scalar_it == one)
+ {
+#ifdef USE_MIXED_ADDITION
+ acc = acc.mixed_add(*value_it);
+#else
+ acc = acc + (*value_it);
+#endif
+ ++num_add;
+ }
+ else
+ {
+ p.emplace_back(*scalar_it);
+ g.emplace_back(*value_it);
+ ++num_other;
+ }
+ }
+ //print_indent(); printf("* Elements of w skipped: %zu (%0.2f%%)\n", num_skip, 100.*num_skip/(num_skip+num_add+num_other));
+ //print_indent(); printf("* Elements of w processed with special addition: %zu (%0.2f%%)\n", num_add, 100.*num_add/(num_skip+num_add+num_other));
+ //print_indent(); printf("* Elements of w remaining: %zu (%0.2f%%)\n", num_other, 100.*num_other/(num_skip+num_add+num_other));
+
+ leave_block("Process scalar vector");
+
+ return acc + multi_exp<T, FieldT>(g.begin(), g.end(), p.begin(), p.end(), chunks, use_multiexp);
+}
+
+template<typename T>
+size_t get_exp_window_size(const size_t num_scalars)
+{
+ if (T::fixed_base_exp_window_table.empty())
+ {
+#ifdef LOWMEM
+ return 14;
+#else
+ return 17;
+#endif
+ }
+ size_t window = 1;
+ for (long i = T::fixed_base_exp_window_table.size()-1; i >= 0; --i)
+ {
+#ifdef DEBUG
+ if (!inhibit_profiling_info)
+ {
+ printf("%ld %zu %zu\n", i, num_scalars, T::fixed_base_exp_window_table[i]);
+ }
+#endif
+ if (T::fixed_base_exp_window_table[i] != 0 && num_scalars >= T::fixed_base_exp_window_table[i])
+ {
+ window = i+1;
+ break;
+ }
+ }
+
+ if (!inhibit_profiling_info)
+ {
+ print_indent(); printf("Choosing window size %zu for %zu elements\n", window, num_scalars);
+ }
+
+#ifdef LOWMEM
+ window = std::min((size_t)14, window);
+#endif
+ return window;
+}
+
+template<typename T>
+window_table<T> get_window_table(const size_t scalar_size,
+ const size_t window,
+ const T &g)
+{
+ const size_t in_window = 1ul<<window;
+ const size_t outerc = (scalar_size+window-1)/window;
+ const size_t last_in_window = 1ul<<(scalar_size - (outerc-1)*window);
+#ifdef DEBUG
+ if (!inhibit_profiling_info)
+ {
+ print_indent(); printf("* scalar_size=%zu; window=%zu; in_window=%zu; outerc=%zu\n", scalar_size, window, in_window, outerc);
+ }
+#endif
+
+ window_table<T> powers_of_g(outerc, std::vector<T>(in_window, T::zero()));
+
+ T gouter = g;
+
+ for (size_t outer = 0; outer < outerc; ++outer)
+ {
+ T ginner = T::zero();
+ size_t cur_in_window = outer == outerc-1 ? last_in_window : in_window;
+ for (size_t inner = 0; inner < cur_in_window; ++inner)
+ {
+ powers_of_g[outer][inner] = ginner;
+ ginner = ginner + gouter;
+ }
+
+ for (size_t i = 0; i < window; ++i)
+ {
+ gouter = gouter + gouter;
+ }
+ }
+
+ return powers_of_g;
+}
+
+template<typename T, typename FieldT>
+T windowed_exp(const size_t scalar_size,
+ const size_t window,
+ const window_table<T> &powers_of_g,
+ const FieldT &pow)
+{
+ const size_t outerc = (scalar_size+window-1)/window;
+ const bigint<FieldT::num_limbs> pow_val = pow.as_bigint();
+
+ /* exp */
+ T res = powers_of_g[0][0];
+
+ for (size_t outer = 0; outer < outerc; ++outer)
+ {
+ size_t inner = 0;
+ for (size_t i = 0; i < window; ++i)
+ {
+ if (pow_val.test_bit(outer*window + i))
+ {
+ inner |= 1u << i;
+ }
+ }
+
+ res = res + powers_of_g[outer][inner];
+ }
+
+ return res;
+}
+
+template<typename T, typename FieldT>
+std::vector<T> batch_exp(const size_t scalar_size,
+ const size_t window,
+ const window_table<T> &table,
+ const std::vector<FieldT> &v)
+{
+ if (!inhibit_profiling_info)
+ {
+ print_indent();
+ }
+ std::vector<T> res(v.size(), table[0][0]);
+
+#ifdef MULTICORE
+#pragma omp parallel for
+#endif
+ for (size_t i = 0; i < v.size(); ++i)
+ {
+ res[i] = windowed_exp(scalar_size, window, table, v[i]);
+
+ if (!inhibit_profiling_info && (i % 10000 == 0))
+ {
+ printf(".");
+ fflush(stdout);
+ }
+ }
+
+ if (!inhibit_profiling_info)
+ {
+ printf(" DONE!\n");
+ }
+
+ return res;
+}
+
+template<typename T, typename FieldT>
+std::vector<T> batch_exp_with_coeff(const size_t scalar_size,
+ const size_t window,
+ const window_table<T> &table,
+ const FieldT &coeff,
+ const std::vector<FieldT> &v)
+{
+ if (!inhibit_profiling_info)
+ {
+ print_indent();
+ }
+ std::vector<T> res(v.size(), table[0][0]);
+
+#ifdef MULTICORE
+#pragma omp parallel for
+#endif
+ for (size_t i = 0; i < v.size(); ++i)
+ {
+ res[i] = windowed_exp(scalar_size, window, table, coeff * v[i]);
+
+ if (!inhibit_profiling_info && (i % 10000 == 0))
+ {
+ printf(".");
+ fflush(stdout);
+ }
+ }
+
+ if (!inhibit_profiling_info)
+ {
+ printf(" DONE!\n");
+ }
+
+ return res;
+}
+
+template<typename T>
+void batch_to_special(std::vector<T> &vec)
+{
+ enter_block("Batch-convert elements to special form");
+
+ std::vector<T> non_zero_vec;
+ for (size_t i = 0; i < vec.size(); ++i)
+ {
+ if (!vec[i].is_zero())
+ {
+ non_zero_vec.emplace_back(vec[i]);
+ }
+ }
+
+ batch_to_special_all_non_zeros<T>(non_zero_vec);
+ auto it = non_zero_vec.begin();
+ T zero_special = T::zero();
+ zero_special.to_special();
+
+ for (size_t i = 0; i < vec.size(); ++i)
+ {
+ if (!vec[i].is_zero())
+ {
+ vec[i] = *it;
+ ++it;
+ }
+ else
+ {
+ vec[i] = zero_special;
+ }
+ }
+ leave_block("Batch-convert elements to special form");
+}
+
+} // libsnark
+
+#endif // MULTIEXP_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for wNAF ("width-w Non-Adjacent Form") exponentiation routines.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef WNAF_HPP_
+#define WNAF_HPP_
+
+namespace libsnark {
+
+/**
+ * Find the wNAF representation of the given scalar relative to the given window size.
+ */
+template<mp_size_t n>
+std::vector<long> find_wnaf(const size_t window_size, const bigint<n> &scalar);
+
+/**
+ * In additive notation, use wNAF exponentiation (with the given window size) to compute scalar * base.
+ */
+template<typename T, mp_size_t n>
+T fixed_window_wnaf_exp(const size_t window_size, const T &base, const bigint<n> &scalar);
+
+/**
+ * In additive notation, use wNAF exponentiation (with the window size determined by T) to compute scalar * base.
+ */
+template<typename T, mp_size_t n>
+T opt_window_wnaf_exp(const T &base, const bigint<n> &scalar, const size_t scalar_bits);
+
+} // libsnark
+
+#include "algebra/scalar_multiplication/wnaf.tcc"
+
+#endif // WNAF_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of interfaces for wNAF ("weighted Non-Adjacent Form") exponentiation routines.
+
+ See wnaf.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef WNAF_TCC_
+#define WNAF_TCC_
+
+namespace libsnark {
+
+template<mp_size_t n>
+std::vector<long> find_wnaf(const size_t window_size, const bigint<n> &scalar)
+{
+ const size_t length = scalar.max_bits(); // upper bound
+ std::vector<long> res(length+1);
+ bigint<n> c = scalar;
+ long j = 0;
+ while (!c.is_zero())
+ {
+ long u;
+ if ((c.data[0] & 1) == 1)
+ {
+ u = c.data[0] % (1u << (window_size+1));
+ if (u > (1 << window_size))
+ {
+ u = u - (1 << (window_size+1));
+ }
+
+ if (u > 0)
+ {
+ mpn_sub_1(c.data, c.data, n, u);
+ }
+ else
+ {
+ mpn_add_1(c.data, c.data, n, -u);
+ }
+ }
+ else
+ {
+ u = 0;
+ }
+ res[j] = u;
+ ++j;
+
+ mpn_rshift(c.data, c.data, n, 1); // c = c/2
+ }
+
+ return res;
+}
+
+template<typename T, mp_size_t n>
+T fixed_window_wnaf_exp(const size_t window_size, const T &base, const bigint<n> &scalar)
+{
+ std::vector<long> naf = find_wnaf(window_size, scalar);
+ std::vector<T> table(1ul<<(window_size-1));
+ T tmp = base;
+ T dbl = base.dbl();
+ for (size_t i = 0; i < 1ul<<(window_size-1); ++i)
+ {
+ table[i] = tmp;
+ tmp = tmp + dbl;
+ }
+
+ T res = T::zero();
+ bool found_nonzero = false;
+ for (long i = naf.size()-1; i >= 0; --i)
+ {
+ if (found_nonzero)
+ {
+ res = res.dbl();
+ }
+
+ if (naf[i] != 0)
+ {
+ found_nonzero = true;
+ if (naf[i] > 0)
+ {
+ res = res + table[naf[i]/2];
+ }
+ else
+ {
+ res = res - table[(-naf[i])/2];
+ }
+ }
+ }
+
+ return res;
+}
+
+template<typename T, mp_size_t n>
+T opt_window_wnaf_exp(const T &base, const bigint<n> &scalar, const size_t scalar_bits)
+{
+ size_t best = 0;
+ for (long i = T::wnaf_window_table.size() - 1; i >= 0; --i)
+ {
+ if (scalar_bits >= T::wnaf_window_table[i])
+ {
+ best = i+1;
+ break;
+ }
+ }
+
+ if (best > 0)
+ {
+ return fixed_window_wnaf_exp(best, base, scalar);
+ }
+ else
+ {
+ return scalar * base;
+ }
+}
+
+} // libsnark
+
+#endif // WNAF_TCC_
--- /dev/null
+#ifndef ASSERT_except_H
+#define ASSERT_except_H
+
+#include <exception>
+
+inline void assert_except(bool condition) {
+ if (!condition) {
+ throw std::runtime_error("Assertion failed.");
+ }
+}
+
+#endif
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for an accumulation vector.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef ACCUMULATION_VECTOR_HPP_
+#define ACCUMULATION_VECTOR_HPP_
+
+#include "common/data_structures/sparse_vector.hpp"
+
+namespace libsnark {
+
+template<typename T>
+class accumulation_vector;
+
+template<typename T>
+std::ostream& operator<<(std::ostream &out, const accumulation_vector<T> &v);
+
+template<typename T>
+std::istream& operator>>(std::istream &in, accumulation_vector<T> &v);
+
+/**
+ * An accumulation vector comprises an accumulation value and a sparse vector.
+ * The method "accumulate_chunk" allows one to accumlate portions of the sparse
+ * vector into the accumualation value.
+ */
+template<typename T>
+class accumulation_vector {
+public:
+ T first;
+ sparse_vector<T> rest;
+
+ accumulation_vector() = default;
+ accumulation_vector(const accumulation_vector<T> &other) = default;
+ accumulation_vector(accumulation_vector<T> &&other) = default;
+ accumulation_vector(T &&first, sparse_vector<T> &&rest) : first(std::move(first)), rest(std::move(rest)) {};
+ accumulation_vector(T &&first, std::vector<T> &&v) : first(std::move(first)), rest(std::move(v)) {}
+ accumulation_vector(std::vector<T> &&v) : first(T::zero()), rest(std::move(v)) {};
+
+ accumulation_vector<T>& operator=(const accumulation_vector<T> &other) = default;
+ accumulation_vector<T>& operator=(accumulation_vector<T> &&other) = default;
+
+ bool operator==(const accumulation_vector<T> &other) const;
+
+ bool is_fully_accumulated() const;
+
+ size_t domain_size() const;
+ size_t size() const;
+ size_t size_in_bits() const;
+
+ template<typename FieldT>
+ accumulation_vector<T> accumulate_chunk(const typename std::vector<FieldT>::const_iterator &it_begin,
+ const typename std::vector<FieldT>::const_iterator &it_end,
+ const size_t offset) const;
+
+};
+
+template<typename T>
+std::ostream& operator<<(std::ostream &out, const accumulation_vector<T> &v);
+
+template<typename T>
+std::istream& operator>>(std::istream &in, accumulation_vector<T> &v);
+
+} // libsnark
+
+#include "common/data_structures/accumulation_vector.tcc"
+
+#endif // ACCUMULATION_VECTOR_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of interfaces for an accumulation vector.
+
+ See accumulation_vector.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef ACCUMULATION_VECTOR_TCC_
+#define ACCUMULATION_VECTOR_TCC_
+
+namespace libsnark {
+
+template<typename T>
+bool accumulation_vector<T>::operator==(const accumulation_vector<T> &other) const
+{
+ return (this->first == other.first && this->rest == other.rest);
+}
+
+template<typename T>
+bool accumulation_vector<T>::is_fully_accumulated() const
+{
+ return rest.empty();
+}
+
+template<typename T>
+size_t accumulation_vector<T>::domain_size() const
+{
+ return rest.domain_size();
+}
+
+template<typename T>
+size_t accumulation_vector<T>::size() const
+{
+ return rest.domain_size();
+}
+
+template<typename T>
+size_t accumulation_vector<T>::size_in_bits() const
+{
+ const size_t first_size_in_bits = T::size_in_bits();
+ const size_t rest_size_in_bits = rest.size_in_bits();
+ return first_size_in_bits + rest_size_in_bits;
+}
+
+template<typename T>
+template<typename FieldT>
+accumulation_vector<T> accumulation_vector<T>::accumulate_chunk(const typename std::vector<FieldT>::const_iterator &it_begin,
+ const typename std::vector<FieldT>::const_iterator &it_end,
+ const size_t offset) const
+{
+ std::pair<T, sparse_vector<T> > acc_result = rest.template accumulate<FieldT>(it_begin, it_end, offset);
+ T new_first = first + acc_result.first;
+ return accumulation_vector<T>(std::move(new_first), std::move(acc_result.second));
+}
+
+template<typename T>
+std::ostream& operator<<(std::ostream& out, const accumulation_vector<T> &v)
+{
+ out << v.first << OUTPUT_NEWLINE;
+ out << v.rest << OUTPUT_NEWLINE;
+
+ return out;
+}
+
+template<typename T>
+std::istream& operator>>(std::istream& in, accumulation_vector<T> &v)
+{
+ in >> v.first;
+ consume_OUTPUT_NEWLINE(in);
+ in >> v.rest;
+ consume_OUTPUT_NEWLINE(in);
+
+ return in;
+}
+
+} // libsnark
+
+#endif // ACCUMULATION_VECTOR_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for a Merkle tree.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef MERKLE_TREE_HPP_
+#define MERKLE_TREE_HPP_
+
+#include <map>
+#include <vector>
+#include "common/utils.hpp"
+
+namespace libsnark {
+
+/**
+ * A Merkle tree is maintained as two maps:
+ * - a map from addresses to values, and
+ * - a map from addresses to hashes.
+ *
+ * The second map maintains the intermediate hashes of a Merkle tree
+ * built atop the values currently stored in the tree (the
+ * implementation admits a very efficient support for sparse
+ * trees). Besides offering methods to load and store values, the
+ * class offers methods to retrieve the root of the Merkle tree and to
+ * obtain the authentication paths for (the value at) a given address.
+ */
+
+typedef bit_vector merkle_authentication_node;
+typedef std::vector<merkle_authentication_node> merkle_authentication_path;
+
+template<typename HashT>
+class merkle_tree {
+private:
+
+ typedef typename HashT::hash_value_type hash_value_type;
+ typedef typename HashT::merkle_authentication_path_type merkle_authentication_path_type;
+
+public:
+
+ std::vector<hash_value_type> hash_defaults;
+ std::map<size_t, bit_vector> values;
+ std::map<size_t, hash_value_type> hashes;
+
+ size_t depth;
+ size_t value_size;
+ size_t digest_size;
+
+ merkle_tree(const size_t depth, const size_t value_size);
+ merkle_tree(const size_t depth, const size_t value_size, const std::vector<bit_vector> &contents_as_vector);
+ merkle_tree(const size_t depth, const size_t value_size, const std::map<size_t, bit_vector> &contents);
+
+ bit_vector get_value(const size_t address) const;
+ void set_value(const size_t address, const bit_vector &value);
+
+ hash_value_type get_root() const;
+ merkle_authentication_path_type get_path(const size_t address) const;
+
+ void dump() const;
+};
+
+} // libsnark
+
+#include "common/data_structures/merkle_tree.tcc"
+
+#endif // MERKLE_TREE_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of interfaces for Merkle tree.
+
+ See merkle_tree.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef MERKLE_TREE_TCC
+#define MERKLE_TREE_TCC
+
+#include <algorithm>
+
+#include "common/profiling.hpp"
+#include "common/utils.hpp"
+
+namespace libsnark {
+
+template<typename HashT>
+typename HashT::hash_value_type two_to_one_CRH(const typename HashT::hash_value_type &l,
+ const typename HashT::hash_value_type &r)
+{
+ typename HashT::hash_value_type new_input;
+ new_input.insert(new_input.end(), l.begin(), l.end());
+ new_input.insert(new_input.end(), r.begin(), r.end());
+
+ const size_t digest_size = HashT::get_digest_len();
+ assert(l.size() == digest_size);
+ assert(r.size() == digest_size);
+
+ return HashT::get_hash(new_input);
+}
+
+template<typename HashT>
+merkle_tree<HashT>::merkle_tree(const size_t depth, const size_t value_size) :
+ depth(depth), value_size(value_size)
+{
+ assert(depth < sizeof(size_t) * 8);
+
+ digest_size = HashT::get_digest_len();
+ assert(value_size <= digest_size);
+
+ hash_value_type last(digest_size);
+ hash_defaults.reserve(depth+1);
+ hash_defaults.emplace_back(last);
+ for (size_t i = 0; i < depth; ++i)
+ {
+ last = two_to_one_CRH<HashT>(last, last);
+ hash_defaults.emplace_back(last);
+ }
+
+ std::reverse(hash_defaults.begin(), hash_defaults.end());
+}
+
+template<typename HashT>
+merkle_tree<HashT>::merkle_tree(const size_t depth,
+ const size_t value_size,
+ const std::vector<bit_vector> &contents_as_vector) :
+ merkle_tree<HashT>(depth, value_size)
+{
+ assert(log2(contents_as_vector.size()) <= depth);
+ for (size_t address = 0; address < contents_as_vector.size(); ++address)
+ {
+ const size_t idx = address + (1ul<<depth) - 1;
+ values[idx] = contents_as_vector[address];
+ hashes[idx] = contents_as_vector[address];
+ hashes[idx].resize(digest_size);
+ }
+
+ size_t idx_begin = (1ul<<depth) - 1;
+ size_t idx_end = contents_as_vector.size() + ((1ul<<depth) - 1);
+
+ for (int layer = depth; layer > 0; --layer)
+ {
+ for (size_t idx = idx_begin; idx < idx_end; idx += 2)
+ {
+ hash_value_type l = hashes[idx]; // this is sound, because idx_begin is always a left child
+ hash_value_type r = (idx + 1 < idx_end ? hashes[idx+1] : hash_defaults[layer]);
+
+ hash_value_type h = two_to_one_CRH<HashT>(l, r);
+ hashes[(idx-1)/2] = h;
+ }
+
+ idx_begin = (idx_begin-1)/2;
+ idx_end = (idx_end-1)/2;
+ }
+}
+
+template<typename HashT>
+merkle_tree<HashT>::merkle_tree(const size_t depth,
+ const size_t value_size,
+ const std::map<size_t, bit_vector> &contents) :
+ merkle_tree<HashT>(depth, value_size)
+{
+
+ if (!contents.empty())
+ {
+ assert(contents.rbegin()->first < 1ul<<depth);
+
+ for (auto it = contents.begin(); it != contents.end(); ++it)
+ {
+ const size_t address = it->first;
+ const bit_vector value = it->second;
+ const size_t idx = address + (1ul<<depth) - 1;
+
+ values[address] = value;
+ hashes[idx] = value;
+ hashes[idx].resize(digest_size);
+ }
+
+ auto last_it = hashes.end();
+
+ for (int layer = depth; layer > 0; --layer)
+ {
+ auto next_last_it = hashes.begin();
+
+ for (auto it = hashes.begin(); it != last_it; ++it)
+ {
+ const size_t idx = it->first;
+ const hash_value_type hash = it->second;
+
+ if (idx % 2 == 0)
+ {
+ // this is the right child of its parent and by invariant we are missing the left child
+ hashes[(idx-1)/2] = two_to_one_CRH<HashT>(hash_defaults[layer], hash);
+ }
+ else
+ {
+ if (std::next(it) == last_it || std::next(it)->first != idx + 1)
+ {
+ // this is the left child of its parent and is missing its right child
+ hashes[(idx-1)/2] = two_to_one_CRH<HashT>(hash, hash_defaults[layer]);
+ }
+ else
+ {
+ // typical case: this is the left child of the parent and adjecent to it there is a right child
+ hashes[(idx-1)/2] = two_to_one_CRH<HashT>(hash, std::next(it)->second);
+ ++it;
+ }
+ }
+ }
+
+ last_it = next_last_it;
+ }
+ }
+}
+
+template<typename HashT>
+bit_vector merkle_tree<HashT>::get_value(const size_t address) const
+{
+ assert(log2(address) <= depth);
+
+ auto it = values.find(address);
+ bit_vector padded_result = (it == values.end() ? bit_vector(digest_size) : it->second);
+ padded_result.resize(value_size);
+
+ return padded_result;
+}
+
+template<typename HashT>
+void merkle_tree<HashT>::set_value(const size_t address,
+ const bit_vector &value)
+{
+ assert(log2(address) <= depth);
+ size_t idx = address + (1ul<<depth) - 1;
+
+ assert(value.size() == value_size);
+ values[address] = value;
+ hashes[idx] = value;
+ hashes[idx].resize(digest_size);
+
+ for (int layer = depth-1; layer >=0; --layer)
+ {
+ idx = (idx-1)/2;
+
+ auto it = hashes.find(2*idx+1);
+ hash_value_type l = (it == hashes.end() ? hash_defaults[layer+1] : it->second);
+
+ it = hashes.find(2*idx+2);
+ hash_value_type r = (it == hashes.end() ? hash_defaults[layer+1] : it->second);
+
+ hash_value_type h = two_to_one_CRH<HashT>(l, r);
+ hashes[idx] = h;
+ }
+}
+
+template<typename HashT>
+typename HashT::hash_value_type merkle_tree<HashT>::get_root() const
+{
+ auto it = hashes.find(0);
+ return (it == hashes.end() ? hash_defaults[0] : it->second);
+}
+
+template<typename HashT>
+typename HashT::merkle_authentication_path_type merkle_tree<HashT>::get_path(const size_t address) const
+{
+ typename HashT::merkle_authentication_path_type result(depth);
+ assert(log2(address) <= depth);
+ size_t idx = address + (1ul<<depth) - 1;
+
+ for (size_t layer = depth; layer > 0; --layer)
+ {
+ size_t sibling_idx = ((idx + 1) ^ 1) - 1;
+ auto it = hashes.find(sibling_idx);
+ if (layer == depth)
+ {
+ auto it2 = values.find(sibling_idx - ((1ul<<depth) - 1));
+ result[layer-1] = (it2 == values.end() ? bit_vector(value_size, false) : it2->second);
+ result[layer-1].resize(digest_size);
+ }
+ else
+ {
+ result[layer-1] = (it == hashes.end() ? hash_defaults[layer] : it->second);
+ }
+
+ idx = (idx-1)/2;
+ }
+
+ return result;
+}
+
+template<typename HashT>
+void merkle_tree<HashT>::dump() const
+{
+ for (size_t i = 0; i < 1ul<<depth; ++i)
+ {
+ auto it = values.find(i);
+ printf("[%zu] -> ", i);
+ const bit_vector value = (it == values.end() ? bit_vector(value_size) : it->second);
+ for (bool b : value)
+ {
+ printf("%d", b ? 1 : 0);
+ }
+ printf("\n");
+ }
+ printf("\n");
+}
+
+} // libsnark
+
+#endif // MERKLE_TREE_TCC
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for a sparse vector.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef SPARSE_VECTOR_HPP_
+#define SPARSE_VECTOR_HPP_
+
+#include <vector>
+
+namespace libsnark {
+
+template<typename T>
+struct sparse_vector;
+
+template<typename T>
+std::ostream& operator<<(std::ostream &out, const sparse_vector<T> &v);
+
+template<typename T>
+std::istream& operator>>(std::istream &in, sparse_vector<T> &v);
+
+/**
+ * A sparse vector is a list of indices along with corresponding values.
+ * The indices are selected from the set {0,1,...,domain_size-1}.
+ */
+template<typename T>
+struct sparse_vector {
+
+ std::vector<size_t> indices;
+ std::vector<T> values;
+ size_t domain_size_ = 0;
+
+ sparse_vector() = default;
+ sparse_vector(const sparse_vector<T> &other) = default;
+ sparse_vector(sparse_vector<T> &&other) = default;
+ sparse_vector(std::vector<T> &&v); /* constructor from std::vector */
+
+ sparse_vector<T>& operator=(const sparse_vector<T> &other) = default;
+ sparse_vector<T>& operator=(sparse_vector<T> &&other) = default;
+
+ T operator[](const size_t idx) const;
+
+ bool operator==(const sparse_vector<T> &other) const;
+ bool operator==(const std::vector<T> &other) const;
+
+ bool is_valid() const;
+ bool empty() const;
+
+ size_t domain_size() const; // return domain_size_
+ size_t size() const; // return the number of indices (representing the number of non-zero entries)
+ size_t size_in_bits() const; // return the number bits needed to store the sparse vector
+
+ /* return a pair consisting of the accumulated value and the sparse vector of non-accumuated values */
+ template<typename FieldT>
+ std::pair<T, sparse_vector<T> > accumulate(const typename std::vector<FieldT>::const_iterator &it_begin,
+ const typename std::vector<FieldT>::const_iterator &it_end,
+ const size_t offset) const;
+
+ friend std::ostream& operator<< <T>(std::ostream &out, const sparse_vector<T> &v);
+ friend std::istream& operator>> <T>(std::istream &in, sparse_vector<T> &v);
+};
+
+template<typename T>
+std::ostream& operator<<(std::ostream& out, const sparse_vector<T> &v);
+
+template<typename T>
+std::istream& operator>>(std::istream& in, sparse_vector<T> &v);
+
+} // libsnark
+
+#include "common/data_structures/sparse_vector.tcc"
+
+#endif // SPARSE_VECTOR_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of interfaces for a sparse vector.
+
+ See sparse_vector.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef SPARSE_VECTOR_TCC_
+#define SPARSE_VECTOR_TCC_
+
+#include "algebra/scalar_multiplication/multiexp.hpp"
+
+#include <numeric>
+
+namespace libsnark {
+
+template<typename T>
+sparse_vector<T>::sparse_vector(std::vector<T> &&v) :
+ values(std::move(v)), domain_size_(values.size())
+{
+ indices.resize(domain_size_);
+ std::iota(indices.begin(), indices.end(), 0);
+}
+
+template<typename T>
+T sparse_vector<T>::operator[](const size_t idx) const
+{
+ auto it = std::lower_bound(indices.begin(), indices.end(), idx);
+ return (it != indices.end() && *it == idx) ? values[it - indices.begin()] : T();
+}
+
+template<typename T>
+bool sparse_vector<T>::operator==(const sparse_vector<T> &other) const
+{
+ if (this->domain_size_ != other.domain_size_)
+ {
+ return false;
+ }
+
+ size_t this_pos = 0, other_pos = 0;
+ while (this_pos < this->indices.size() && other_pos < other.indices.size())
+ {
+ if (this->indices[this_pos] == other.indices[other_pos])
+ {
+ if (this->values[this_pos] != other.values[other_pos])
+ {
+ return false;
+ }
+ ++this_pos;
+ ++other_pos;
+ }
+ else if (this->indices[this_pos] < other.indices[other_pos])
+ {
+ if (!this->values[this_pos].is_zero())
+ {
+ return false;
+ }
+ ++this_pos;
+ }
+ else
+ {
+ if (!other.values[other_pos].is_zero())
+ {
+ return false;
+ }
+ ++other_pos;
+ }
+ }
+
+ /* at least one of the vectors has been exhausted, so other must be empty */
+ while (this_pos < this->indices.size())
+ {
+ if (!this->values[this_pos].is_zero())
+ {
+ return false;
+ }
+ ++this_pos;
+ }
+
+ while (other_pos < other.indices.size())
+ {
+ if (!other.values[other_pos].is_zero())
+ {
+ return false;
+ }
+ ++other_pos;
+ }
+
+ return true;
+}
+
+template<typename T>
+bool sparse_vector<T>::operator==(const std::vector<T> &other) const
+{
+ if (this->domain_size_ < other.size())
+ {
+ return false;
+ }
+
+ size_t j = 0;
+ for (size_t i = 0; i < other.size(); ++i)
+ {
+ if (this->indices[j] == i)
+ {
+ if (this->values[j] != other[j])
+ {
+ return false;
+ }
+ ++j;
+ }
+ else
+ {
+ if (!other[j].is_zero())
+ {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+template<typename T>
+bool sparse_vector<T>::is_valid() const
+{
+ if (values.size() == indices.size() && values.size() <= domain_size_)
+ {
+ return false;
+ }
+
+ for (size_t i = 0; i + 1 < indices.size(); ++i)
+ {
+ if (indices[i] >= indices[i+1])
+ {
+ return false;
+ }
+ }
+
+ if (!indices.empty() && indices[indices.size()-1] >= domain_size_)
+ {
+ return false;
+ }
+
+ return true;
+}
+
+template<typename T>
+bool sparse_vector<T>::empty() const
+{
+ return indices.empty();
+}
+
+template<typename T>
+size_t sparse_vector<T>::domain_size() const
+{
+ return domain_size_;
+}
+
+template<typename T>
+size_t sparse_vector<T>::size() const
+{
+ return indices.size();
+}
+
+template<typename T>
+size_t sparse_vector<T>::size_in_bits() const
+{
+ return indices.size() * (sizeof(size_t) * 8 + T::size_in_bits());
+}
+
+template<typename T>
+template<typename FieldT>
+std::pair<T, sparse_vector<T> > sparse_vector<T>::accumulate(const typename std::vector<FieldT>::const_iterator &it_begin,
+ const typename std::vector<FieldT>::const_iterator &it_end,
+ const size_t offset) const
+{
+ // TODO: does not really belong here.
+ const size_t chunks = 1;
+ const bool use_multiexp = true;
+
+ T accumulated_value = T::zero();
+ sparse_vector<T> resulting_vector;
+ resulting_vector.domain_size_ = domain_size_;
+
+ const size_t range_len = it_end - it_begin;
+ bool in_block = false;
+ size_t first_pos = -1, last_pos = -1; // g++ -flto emits unitialized warning, even though in_block guards for such cases.
+
+ for (size_t i = 0; i < indices.size(); ++i)
+ {
+ const bool matching_pos = (offset <= indices[i] && indices[i] < offset + range_len);
+ // printf("i = %zu, pos[i] = %zu, offset = %zu, w_size = %zu\n", i, indices[i], offset, w_size);
+ bool copy_over;
+
+ if (in_block)
+ {
+ if (matching_pos && last_pos == i-1)
+ {
+ // block can be extended, do it
+ last_pos = i;
+ copy_over = false;
+ }
+ else
+ {
+ // block has ended here
+ in_block = false;
+ copy_over = true;
+
+#ifdef DEBUG
+ print_indent(); printf("doing multiexp for w_%zu ... w_%zu\n", indices[first_pos], indices[last_pos]);
+#endif
+ accumulated_value = accumulated_value + multi_exp<T, FieldT>(values.begin() + first_pos,
+ values.begin() + last_pos + 1,
+ it_begin + (indices[first_pos] - offset),
+ it_begin + (indices[last_pos] - offset) + 1,
+ chunks, use_multiexp);
+ }
+ }
+ else
+ {
+ if (matching_pos)
+ {
+ // block can be started
+ first_pos = i;
+ last_pos = i;
+ in_block = true;
+ copy_over = false;
+ }
+ else
+ {
+ copy_over = true;
+ }
+ }
+
+ if (copy_over)
+ {
+ resulting_vector.indices.emplace_back(indices[i]);
+ resulting_vector.values.emplace_back(values[i]);
+ }
+ }
+
+ if (in_block)
+ {
+#ifdef DEBUG
+ print_indent(); printf("doing multiexp for w_%zu ... w_%zu\n", indices[first_pos], indices[last_pos]);
+#endif
+ accumulated_value = accumulated_value + multi_exp<T, FieldT>(values.begin() + first_pos,
+ values.begin() + last_pos + 1,
+ it_begin + (indices[first_pos] - offset),
+ it_begin + (indices[last_pos] - offset) + 1,
+ chunks, use_multiexp);
+ }
+
+ return std::make_pair(accumulated_value, resulting_vector);
+}
+
+template<typename T>
+std::ostream& operator<<(std::ostream& out, const sparse_vector<T> &v)
+{
+ out << v.domain_size_ << "\n";
+ out << v.indices.size() << "\n";
+ for (const size_t& i : v.indices)
+ {
+ out << i << "\n";
+ }
+
+ out << v.values.size() << "\n";
+ for (const T& t : v.values)
+ {
+ out << t << OUTPUT_NEWLINE;
+ }
+
+ return out;
+}
+
+template<typename T>
+std::istream& operator>>(std::istream& in, sparse_vector<T> &v)
+{
+ in >> v.domain_size_;
+ consume_newline(in);
+
+ size_t s;
+ in >> s;
+ consume_newline(in);
+ v.indices.resize(s);
+ for (size_t i = 0; i < s; ++i)
+ {
+ in >> v.indices[i];
+ consume_newline(in);
+ }
+
+ v.values.clear();
+ in >> s;
+ consume_newline(in);
+ v.values.reserve(s);
+
+ for (size_t i = 0; i < s; ++i)
+ {
+ T t;
+ in >> t;
+ consume_OUTPUT_NEWLINE(in);
+ v.values.emplace_back(t);
+ }
+
+ return in;
+}
+
+} // libsnark
+
+#endif // SPARSE_VECTOR_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ This file defines default_ec_pp based on the CURVE=... make flag, which selects
+ which elliptic curve is used to implement group arithmetic and pairings.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef EC_PP_HPP_
+#define EC_PP_HPP_
+
+/************************ Pick the elliptic curve ****************************/
+
+#ifdef CURVE_ALT_BN128
+#include "algebra/curves/alt_bn128/alt_bn128_pp.hpp"
+namespace libsnark {
+typedef alt_bn128_pp default_ec_pp;
+} // libsnark
+#endif
+
+#ifdef CURVE_BN128
+#include "algebra/curves/bn128/bn128_pp.hpp"
+namespace libsnark {
+typedef bn128_pp default_ec_pp;
+} // libsnark
+#endif
+
+#ifdef CURVE_EDWARDS
+#include "algebra/curves/edwards/edwards_pp.hpp"
+namespace libsnark {
+typedef edwards_pp default_ec_pp;
+} // libsnark
+#endif
+
+#ifdef CURVE_MNT4
+#include "algebra/curves/mnt/mnt4/mnt4_pp.hpp"
+namespace libsnark {
+typedef mnt4_pp default_ec_pp;
+} // libsnark
+#endif
+
+#ifdef CURVE_MNT6
+#include "algebra/curves/mnt/mnt6/mnt6_pp.hpp"
+namespace libsnark {
+typedef mnt6_pp default_ec_pp;
+} // libsnark
+#endif
+
+#endif // EC_PP_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ This file defines default_r1cs_ppzksnark_pp based on the elliptic curve
+ choice selected in ec_pp.hpp.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef R1CS_PPZKSNARK_PP_HPP_
+#define R1CS_PPZKSNARK_PP_HPP_
+
+#include "common/default_types/ec_pp.hpp"
+
+namespace libsnark {
+typedef default_ec_pp default_r1cs_ppzksnark_pp;
+} // libsnark
+
+#endif // R1CS_PPZKSNARK_PP_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of functions for profiling code blocks.
+
+ See profiling.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#include "common/profiling.hpp"
+#include <cassert>
+#include <stdexcept>
+#include <chrono>
+#include <cstdio>
+#include <list>
+#include <vector>
+#include <ctime>
+#include "common/default_types/ec_pp.hpp"
+#include "common/utils.hpp"
+
+#ifndef NO_PROCPS
+#include <proc/readproc.h>
+#endif
+
+namespace libsnark {
+
+long long get_nsec_time()
+{
+ auto timepoint = std::chrono::high_resolution_clock::now();
+ return std::chrono::duration_cast<std::chrono::nanoseconds>(timepoint.time_since_epoch()).count();
+}
+
+/* Return total CPU time consumsed by all threads of the process, in nanoseconds. */
+long long get_nsec_cpu_time()
+{
+ ::timespec ts;
+ if ( ::clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts) )
+ throw ::std::runtime_error("clock_gettime(CLOCK_PROCESS_CPUTIME_ID) failed");
+ // If we expected this to work, don't silently ignore failures, because that would hide the problem and incur an unnecessarily system-call overhead. So if we ever observe this exception, we should probably add a suitable #ifdef .
+ //TODO: clock_gettime(CLOCK_PROCESS_CPUTIME_ID) is not supported by native Windows. What about Cygwin? Should we #ifdef on CLOCK_PROCESS_CPUTIME_ID or on __linux__?
+ return ts.tv_sec * 1000000000ll + ts.tv_nsec;
+}
+
+long long start_time, last_time;
+long long start_cpu_time, last_cpu_time;
+
+void start_profiling()
+{
+ printf("Reset time counters for profiling\n");
+
+ last_time = start_time = get_nsec_time();
+ last_cpu_time = start_cpu_time = get_nsec_cpu_time();
+}
+
+std::map<std::string, size_t> invocation_counts;
+std::map<std::string, long long> enter_times;
+std::map<std::string, long long> last_times;
+std::map<std::string, long long> cumulative_times;
+//TODO: Instead of analogous maps for time and cpu_time, use a single struct-valued map
+std::map<std::string, long long> enter_cpu_times;
+std::map<std::string, long long> last_cpu_times;
+std::map<std::pair<std::string, std::string>, long long> op_counts;
+std::map<std::pair<std::string, std::string>, long long> cumulative_op_counts; // ((msg, data_point), value)
+ // TODO: Convert op_counts and cumulative_op_counts from pair to structs
+size_t indentation = 0;
+
+std::vector<std::string> block_names;
+
+std::list<std::pair<std::string, long long*> > op_data_points = {
+#ifdef PROFILE_OP_COUNTS
+ std::make_pair("Fradd", &Fr<default_ec_pp>::add_cnt),
+ std::make_pair("Frsub", &Fr<default_ec_pp>::sub_cnt),
+ std::make_pair("Frmul", &Fr<default_ec_pp>::mul_cnt),
+ std::make_pair("Frinv", &Fr<default_ec_pp>::inv_cnt),
+ std::make_pair("Fqadd", &Fq<default_ec_pp>::add_cnt),
+ std::make_pair("Fqsub", &Fq<default_ec_pp>::sub_cnt),
+ std::make_pair("Fqmul", &Fq<default_ec_pp>::mul_cnt),
+ std::make_pair("Fqinv", &Fq<default_ec_pp>::inv_cnt),
+ std::make_pair("G1add", &G1<default_ec_pp>::add_cnt),
+ std::make_pair("G1dbl", &G1<default_ec_pp>::dbl_cnt),
+ std::make_pair("G2add", &G2<default_ec_pp>::add_cnt),
+ std::make_pair("G2dbl", &G2<default_ec_pp>::dbl_cnt)
+#endif
+};
+
+bool inhibit_profiling_info = false;
+bool inhibit_profiling_counters = false;
+
+void clear_profiling_counters()
+{
+ invocation_counts.clear();
+ last_times.clear();
+ last_cpu_times.clear();
+ cumulative_times.clear();
+}
+
+void print_cumulative_time_entry(const std::string &key, const long long factor)
+{
+ const double total_ms = (cumulative_times.at(key) * 1e-6);
+ const size_t cnt = invocation_counts.at(key);
+ const double avg_ms = total_ms / cnt;
+ printf(" %-45s: %12.5fms = %lld * %0.5fms (%zu invocations, %0.5fms = %lld * %0.5fms per invocation)\n", key.c_str(), total_ms, factor, total_ms/factor, cnt, avg_ms, factor, avg_ms/factor);
+}
+
+void print_cumulative_times(const long long factor)
+{
+ printf("Dumping times:\n");
+ for (auto& kv : cumulative_times)
+ {
+ print_cumulative_time_entry(kv.first, factor);
+ }
+}
+
+void print_cumulative_op_counts(const bool only_fq)
+{
+#ifdef PROFILE_OP_COUNTS
+ printf("Dumping operation counts:\n");
+ for (auto& msg : invocation_counts)
+ {
+ printf(" %-45s: ", msg.first.c_str());
+ bool first = true;
+ for (auto& data_point : op_data_points)
+ {
+ if (only_fq && data_point.first.compare(0, 2, "Fq") != 0)
+ {
+ continue;
+ }
+
+ if (!first)
+ {
+ printf(", ");
+ }
+ printf("%-5s = %7.0f (%3zu)",
+ data_point.first.c_str(),
+ 1. * cumulative_op_counts[std::make_pair(msg.first, data_point.first)] / msg.second,
+ msg.second);
+ first = false;
+ }
+ printf("\n");
+ }
+#else
+ UNUSED(only_fq);
+#endif
+}
+
+void print_op_profiling(const std::string &msg)
+{
+#ifdef PROFILE_OP_COUNTS
+ printf("\n");
+ print_indent();
+
+ printf("(opcounts) = (");
+ bool first = true;
+ for (std::pair<std::string, long long*> p : op_data_points)
+ {
+ if (!first)
+ {
+ printf(", ");
+ }
+
+ printf("%s=%lld", p.first.c_str(), *(p.second)-op_counts[std::make_pair(msg, p.first)]);
+ first = false;
+ }
+ printf(")");
+#else
+ UNUSED(msg);
+#endif
+}
+
+static void print_times_from_last_and_start(long long now, long long last,
+ long long cpu_now, long long cpu_last)
+{
+ long long time_from_start = now - start_time;
+ long long time_from_last = now - last;
+
+ long long cpu_time_from_start = cpu_now - start_cpu_time;
+ long long cpu_time_from_last = cpu_now - cpu_last;
+
+ if (time_from_last != 0) {
+ double parallelism_from_last = 1.0 * cpu_time_from_last / time_from_last;
+ printf("[%0.4fs x%0.2f]", time_from_last * 1e-9, parallelism_from_last);
+ } else {
+ printf("[ ]");
+ }
+ if (time_from_start != 0) {
+ double parallelism_from_start = 1.0 * cpu_time_from_start / time_from_start;
+ printf("\t(%0.4fs x%0.2f from start)", time_from_start * 1e-9, parallelism_from_start);
+ }
+}
+
+void print_time(const char* msg)
+{
+ if (inhibit_profiling_info)
+ {
+ return;
+ }
+
+ long long now = get_nsec_time();
+ long long cpu_now = get_nsec_cpu_time();
+
+ printf("%-35s\t", msg);
+ print_times_from_last_and_start(now, last_time, cpu_now, last_cpu_time);
+#ifdef PROFILE_OP_COUNTS
+ print_op_profiling(msg);
+#endif
+ printf("\n");
+
+ fflush(stdout);
+ last_time = now;
+ last_cpu_time = cpu_now;
+}
+
+void print_header(const char *msg)
+{
+ printf("\n================================================================================\n");
+ printf("%s\n", msg);
+ printf("================================================================================\n\n");
+}
+
+void print_indent()
+{
+ for (size_t i = 0; i < indentation; ++i)
+ {
+ printf(" ");
+ }
+}
+
+void op_profiling_enter(const std::string &msg)
+{
+ for (std::pair<std::string, long long*> p : op_data_points)
+ {
+ op_counts[std::make_pair(msg, p.first)] = *(p.second);
+ }
+}
+
+void enter_block(const std::string &msg, const bool indent)
+{
+ if (inhibit_profiling_counters)
+ {
+ return;
+ }
+
+ block_names.emplace_back(msg);
+ long long t = get_nsec_time();
+ enter_times[msg] = t;
+ long long cpu_t = get_nsec_cpu_time();
+ enter_cpu_times[msg] = cpu_t;
+
+ if (inhibit_profiling_info)
+ {
+ return;
+ }
+
+#ifdef MULTICORE
+#pragma omp critical
+#endif
+ {
+ op_profiling_enter(msg);
+
+ print_indent();
+ printf("(enter) %-35s\t", msg.c_str());
+ print_times_from_last_and_start(t, t, cpu_t, cpu_t);
+ printf("\n");
+ fflush(stdout);
+
+ if (indent)
+ {
+ ++indentation;
+ }
+ }
+}
+
+void leave_block(const std::string &msg, const bool indent)
+{
+ if (inhibit_profiling_counters)
+ {
+ return;
+ }
+
+#ifndef MULTICORE
+ assert(*(--block_names.end()) == msg);
+#endif
+ block_names.pop_back();
+
+ ++invocation_counts[msg];
+
+ long long t = get_nsec_time();
+ last_times[msg] = (t - enter_times[msg]);
+ cumulative_times[msg] += (t - enter_times[msg]);
+
+ long long cpu_t = get_nsec_cpu_time();
+ last_cpu_times[msg] = (cpu_t - enter_cpu_times[msg]);
+
+#ifdef PROFILE_OP_COUNTS
+ for (std::pair<std::string, long long*> p : op_data_points)
+ {
+ cumulative_op_counts[std::make_pair(msg, p.first)] += *(p.second)-op_counts[std::make_pair(msg, p.first)];
+ }
+#endif
+
+ if (inhibit_profiling_info)
+ {
+ return;
+ }
+
+#ifdef MULTICORE
+#pragma omp critical
+#endif
+ {
+ if (indent)
+ {
+ --indentation;
+ }
+
+ print_indent();
+ printf("(leave) %-35s\t", msg.c_str());
+ print_times_from_last_and_start(t, enter_times[msg], cpu_t, enter_cpu_times[msg]);
+ print_op_profiling(msg);
+ printf("\n");
+ fflush(stdout);
+ }
+}
+
+void print_mem(const std::string &s)
+{
+#ifndef NO_PROCPS
+ struct proc_t usage;
+ look_up_our_self(&usage);
+ if (s.empty())
+ {
+ printf("* Peak vsize (physical memory+swap) in mebibytes: %lu\n", usage.vsize >> 20);
+ }
+ else
+ {
+ printf("* Peak vsize (physical memory+swap) in mebibytes (%s): %lu\n", s.c_str(), usage.vsize >> 20);
+ }
+#else
+ printf("* Memory profiling not supported in NO_PROCPS mode\n");
+#endif
+}
+
+void print_compilation_info()
+{
+#ifdef __GNUC__
+ printf("g++ version: %s\n", __VERSION__);
+ //printf("Compiled on %s %s\n", __DATE__, __TIME__);
+#endif
+#ifdef STATIC
+ printf("STATIC: yes\n");
+#else
+ printf("STATIC: no\n");
+#endif
+#ifdef MULTICORE
+ printf("MULTICORE: yes\n");
+#else
+ printf("MULTICORE: no\n");
+#endif
+#ifdef DEBUG
+ printf("DEBUG: yes\n");
+#else
+ printf("DEBUG: no\n");
+#endif
+#ifdef PROFILE_OP_COUNTS
+ printf("PROFILE_OP_COUNTS: yes\n");
+#else
+ printf("PROFILE_OP_COUNTS: no\n");
+#endif
+#ifdef _GLIBCXX_DEBUG
+ printf("_GLIBCXX_DEBUG: yes\n");
+#else
+ printf("_GLIBCXX_DEBUG: no\n");
+#endif
+}
+
+} // libsnark
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of functions for profiling code blocks.
+
+ Reports time, operation counts, memory usage, and others.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef PROFILING_HPP_
+#define PROFILING_HPP_
+
+#include <cstddef>
+#include <map>
+#include <string>
+#include <vector>
+
+namespace libsnark {
+
+void start_profiling();
+long long get_nsec_time();
+void print_time(const char* msg);
+void print_header(const char* msg);
+
+void print_indent();
+
+extern bool inhibit_profiling_info;
+extern bool inhibit_profiling_counters;
+extern std::map<std::string, size_t> invocation_counts;
+extern std::map<std::string, long long> last_times;
+extern std::map<std::string, long long> cumulative_times;
+
+void clear_profiling_counters();
+
+void print_cumulative_time_entry(const std::string &key, const long long factor=1);
+void print_cumulative_times(const long long factor=1);
+void print_cumulative_op_counts(const bool only_fq=false);
+
+void enter_block(const std::string &msg, const bool indent=true);
+void leave_block(const std::string &msg, const bool indent=true);
+
+void print_mem(const std::string &s = "");
+void print_compilation_info();
+
+} // libsnark
+
+#endif // PROFILING_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of serialization routines and constants.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef SERIALIZATION_HPP_
+#define SERIALIZATION_HPP_
+
+#include <istream>
+#include <map>
+#include <ostream>
+#include <set>
+#include <vector>
+
+namespace libsnark {
+
+/*
+ * @todo
+ * The serialization is fragile. Shoud be rewritten using a standard, portable-format
+ * library like boost::serialize.
+ *
+ * However, for now the following conventions are used within the code.
+ *
+ * All algebraic objects support either binary or decimal output using
+ * the standard C++ stream operators (operator<<, operator>>).
+ *
+ * The binary mode is activated by defining a BINARY_OUTPUT
+ * preprocessor macro (e.g. g++ -DBINARY_OUTPUT ...).
+ *
+ * Binary output assumes that the stream is to be binary read at its
+ * current position so any white space should be consumed beforehand.
+ *
+ * Consecutive algebraic objects are separated by OUTPUT_NEWLINE and
+ * within themselves (e.g. X and Y coordinates for field elements) with
+ * OUTPUT_SEPARATOR (as defined below).
+ *
+ * Therefore to dump two integers, two Fp elements and another integer
+ * one would:
+ *
+ * out << 3 << "\n";
+ * out << 4 << "\n";
+ * out << FieldT(56) << OUTPUT_NEWLINE;
+ * out << FieldT(78) << OUTPUT_NEWLINE;
+ * out << 9 << "\n";
+ *
+ * Then reading back it its reader's responsibility (!) to consume "\n"
+ * after 4, but Fp::operator<< will correctly consume OUTPUT_NEWLINE.
+ *
+ * The reader should also consume "\n" after 9, so that another field
+ * element can be properly chained. This is especially important for
+ * binary output.
+ *
+ * The binary serialization of algebraic objects is currently *not*
+ * portable between machines of different word sizes.
+ */
+
+#ifdef BINARY_OUTPUT
+#define OUTPUT_NEWLINE ""
+#define OUTPUT_SEPARATOR ""
+#else
+#define OUTPUT_NEWLINE "\n"
+#define OUTPUT_SEPARATOR " "
+#endif
+
+inline void consume_newline(std::istream &in);
+inline void consume_OUTPUT_NEWLINE(std::istream &in);
+inline void consume_OUTPUT_SEPARATOR(std::istream &in);
+
+inline void output_bool(std::ostream &out, const bool b);
+
+inline void output_bool_vector(std::ostream &out, const std::vector<bool> &v);
+
+template<typename T>
+T reserialize(const T &obj);
+
+template<typename T>
+std::ostream& operator<<(std::ostream& out, const std::vector<T> &v);
+
+template<typename T>
+std::istream& operator>>(std::ostream& out, std::vector<T> &v);
+
+template<typename T1, typename T2>
+std::ostream& operator<<(std::ostream& out, const std::map<T1, T2> &m);
+
+template<typename T1, typename T2>
+std::istream& operator>>(std::istream& in, std::map<T1, T2> &m);
+
+template<typename T>
+std::ostream& operator<<(std::ostream& out, const std::set<T> &s);
+
+template<typename T>
+std::istream& operator>>(std::istream& in, std::set<T> &s);
+
+} // libsnark
+
+#include "common/serialization.tcc"
+
+#endif // SERIALIZATION_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of serialization routines.
+
+ See serialization.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef SERIALIZATION_TCC_
+#define SERIALIZATION_TCC_
+
+#include <cassert>
+#include <sstream>
+#include "common/utils.hpp"
+
+namespace libsnark {
+
+inline void consume_newline(std::istream &in)
+{
+ char c;
+ in.read(&c, 1);
+}
+
+inline void consume_OUTPUT_NEWLINE(std::istream &in)
+{
+#ifdef BINARY_OUTPUT
+ // nothing to consume
+ UNUSED(in);
+#else
+ char c;
+ in.read(&c, 1);
+#endif
+}
+
+inline void consume_OUTPUT_SEPARATOR(std::istream &in)
+{
+#ifdef BINARY_OUTPUT
+ // nothing to consume
+ UNUSED(in);
+#else
+ char c;
+ in.read(&c, 1);
+#endif
+}
+
+inline void output_bool(std::ostream &out, const bool b)
+{
+ out << (b ? 1 : 0) << "\n";
+}
+
+inline void output_bool_vector(std::ostream &out, const std::vector<bool> &v)
+{
+ out << v.size() << "\n";
+ for (const bool b : v)
+ {
+ output_bool(out, b);
+ }
+}
+
+template<typename T>
+T reserialize(const T &obj)
+{
+ std::stringstream ss;
+ ss << obj;
+ T tmp;
+ ss >> tmp;
+ assert(obj == tmp);
+ return tmp;
+}
+
+template<typename T>
+std::ostream& operator<<(std::ostream& out, const std::vector<T> &v)
+{
+ static_assert(!std::is_same<T, bool>::value, "this does not work for std::vector<bool>");
+ out << v.size() << "\n";
+ for (const T& t : v)
+ {
+ out << t << OUTPUT_NEWLINE;
+ }
+
+ return out;
+}
+
+template<typename T>
+std::istream& operator>>(std::istream& in, std::vector<T> &v)
+{
+ static_assert(!std::is_same<T, bool>::value, "this does not work for std::vector<bool>");
+ size_t size;
+ in >> size;
+ consume_newline(in);
+
+ v.resize(0);
+ for (size_t i = 0; i < size; ++i)
+ {
+ T elt;
+ in >> elt;
+ consume_OUTPUT_NEWLINE(in);
+ v.push_back(elt);
+ }
+
+ return in;
+}
+
+template<typename T1, typename T2>
+std::ostream& operator<<(std::ostream& out, const std::map<T1, T2> &m)
+{
+ out << m.size() << "\n";
+
+ for (auto &it : m)
+ {
+ out << it.first << "\n";
+ out << it.second << "\n";
+ }
+
+ return out;
+}
+
+template<typename T1, typename T2>
+std::istream& operator>>(std::istream& in, std::map<T1, T2> &m)
+{
+ m.clear();
+ size_t size;
+ in >> size;
+ consume_newline(in);
+
+ for (size_t i = 0; i < size; ++i)
+ {
+ T1 k;
+ T2 v;
+ in >> k;
+ consume_newline(in);
+ in >> v;
+ consume_newline(in);
+ m[k] = v;
+ }
+
+ return in;
+}
+
+template<typename T>
+std::ostream& operator<<(std::ostream& out, const std::set<T> &s)
+{
+ out << s.size() << "\n";
+
+ for (auto &el : s)
+ {
+ out << el << "\n";
+ }
+
+ return out;
+}
+
+
+template<typename T>
+std::istream& operator>>(std::istream& in, std::set<T> &s)
+{
+ s.clear();
+ size_t size;
+ in >> size;
+ consume_newline(in);
+
+ for (size_t i = 0; i < size; ++i)
+ {
+ T el;
+ in >> el;
+ consume_newline(in);
+ s.insert(el);
+ }
+
+ return in;
+}
+
+}
+
+#endif // SERIALIZATION_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of functions for supporting the use of templates.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef TEMPLATE_UTILS_HPP_
+#define TEMPLATE_UTILS_HPP_
+
+namespace libsnark {
+
+/* A commonly used SFINAE helper type */
+template<typename T>
+struct void_type
+{
+ typedef void type;
+};
+
+} // libsnark
+
+#endif // TEMPLATE_UTILS_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ Implementation of misc math and serialization utility functions
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <cstdarg>
+#include "common/utils.hpp"
+
+namespace libsnark {
+
+size_t log2(size_t n)
+/* returns ceil(log2(n)), so 1ul<<log2(n) is the smallest power of 2,
+ that is not less than n. */
+{
+ size_t r = ((n & (n-1)) == 0 ? 0 : 1); // add 1 if n is not power of 2
+
+ while (n > 1)
+ {
+ n >>= 1;
+ r++;
+ }
+
+ return r;
+}
+
+size_t bitreverse(size_t n, const size_t l)
+{
+ size_t r = 0;
+ for (size_t k = 0; k < l; ++k)
+ {
+ r = (r << 1) | (n & 1);
+ n >>= 1;
+ }
+ return r;
+}
+
+bit_vector int_list_to_bits(const std::initializer_list<unsigned long> &l, const size_t wordsize)
+{
+ bit_vector res(wordsize*l.size());
+ for (size_t i = 0; i < l.size(); ++i)
+ {
+ for (size_t j = 0; j < wordsize; ++j)
+ {
+ res[i*wordsize + j] = (*(l.begin()+i) & (1ul<<(wordsize-1-j)));
+ }
+ }
+ return res;
+}
+
+long long div_ceil(long long x, long long y)
+{
+ return (x + (y-1)) / y;
+}
+
+bool is_little_endian()
+{
+ uint64_t a = 0x12345678;
+ unsigned char *c = (unsigned char*)(&a);
+ return (*c = 0x78);
+}
+
+std::string FORMAT(const std::string &prefix, const char* format, ...)
+{
+ const static size_t MAX_FMT = 256;
+ char buf[MAX_FMT];
+ va_list args;
+ va_start(args, format);
+ vsnprintf(buf, MAX_FMT, format, args);
+ va_end(args);
+
+ return prefix + std::string(buf);
+}
+
+void serialize_bit_vector(std::ostream &out, const bit_vector &v)
+{
+ out << v.size() << "\n";
+ for (size_t i = 0; i < v.size(); ++i)
+ {
+ out << v[i] << "\n";
+ }
+}
+
+void deserialize_bit_vector(std::istream &in, bit_vector &v)
+{
+ size_t size;
+ in >> size;
+ v.resize(size);
+ for (size_t i = 0; i < size; ++i)
+ {
+ bool b;
+ in >> b;
+ v[i] = b;
+ }
+}
+} // libsnark
--- /dev/null
+/** @file
+ *****************************************************************************
+ Declaration of misc math and serialization utility functions
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef UTILS_HPP_
+#define UTILS_HPP_
+
+#include <cassert>
+#include <iostream>
+#include <sstream>
+#include <string>
+#include <vector>
+
+namespace libsnark {
+
+typedef std::vector<bool> bit_vector;
+
+/// returns ceil(log2(n)), so 1ul<<log2(n) is the smallest power of 2, that is not less than n
+size_t log2(size_t n);
+
+inline size_t exp2(size_t k) { return 1ul << k; }
+
+size_t bitreverse(size_t n, const size_t l);
+bit_vector int_list_to_bits(const std::initializer_list<unsigned long> &l, const size_t wordsize);
+long long div_ceil(long long x, long long y);
+
+bool is_little_endian();
+
+std::string FORMAT(const std::string &prefix, const char* format, ...);
+
+/* A variadic template to suppress unused argument warnings */
+template<typename ... Types>
+void UNUSED(Types&&...) {}
+
+#ifdef DEBUG
+#define FMT FORMAT
+#else
+#define FMT(...) (UNUSED(__VA_ARGS__), "")
+#endif
+
+void serialize_bit_vector(std::ostream &out, const bit_vector &v);
+void deserialize_bit_vector(std::istream &in, bit_vector &v);
+
+template<typename T>
+size_t size_in_bits(const std::vector<T> &v);
+
+#define ARRAY_SIZE(arr) (sizeof(arr)/sizeof(arr[0]))
+
+} // libsnark
+
+#include "common/utils.tcc" /* note that utils has a templatized part (utils.tcc) and non-templatized part (utils.cpp) */
+#endif // UTILS_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ Implementation of templatized utility functions
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef UTILS_TCC_
+#define UTILS_TCC_
+
+namespace libsnark {
+
+template<typename T>
+size_t size_in_bits(const std::vector<T> &v)
+{
+ return v.size() * T::size_in_bits();
+}
+
+} // libsnark
+
+#endif // UTILS_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of interfaces for profiling constraints.
+
+ See constraint_profiling.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#include "gadgetlib1/constraint_profiling.hpp"
+#include "common/profiling.hpp"
+
+namespace libsnark {
+
+size_t constraint_profiling_indent = 0;
+std::vector<constraint_profiling_entry> constraint_profiling_table;
+
+size_t PRINT_CONSTRAINT_PROFILING()
+{
+ size_t accounted = 0;
+ print_indent();
+ printf("Constraint profiling:\n");
+ for (constraint_profiling_entry &ent : constraint_profiling_table)
+ {
+ if (ent.indent == 0)
+ {
+ accounted += ent.count;
+ }
+
+ print_indent();
+ for (size_t i = 0; i < ent.indent; ++i)
+ {
+ printf(" ");
+ }
+ printf("* Number of constraints in [%s]: %zu\n", ent.annotation.c_str(), ent.count);
+ }
+
+ constraint_profiling_table.clear();
+ constraint_profiling_indent = 0;
+
+ return accounted;
+}
+
+}
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for profiling constraints.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef CONSTRAINT_PROFILING_HPP_
+#define CONSTRAINT_PROFILING_HPP_
+
+#include <cstddef>
+#include <map>
+#include <string>
+#include <vector>
+
+namespace libsnark {
+
+extern size_t constraint_profiling_indent;
+
+struct constraint_profiling_entry {
+ size_t indent;
+ std::string annotation;
+ size_t count;
+};
+
+extern std::vector<constraint_profiling_entry> constraint_profiling_table;
+
+#define PROFILE_CONSTRAINTS(pb, annotation) \
+ for (size_t _num_constraints_before = pb.num_constraints(), _iter = (++constraint_profiling_indent, 0), _cp_pos = constraint_profiling_table.size(); \
+ _iter == 0; \
+ constraint_profiling_table.insert(constraint_profiling_table.begin() + _cp_pos, constraint_profiling_entry{--constraint_profiling_indent, annotation, pb.num_constraints() - _num_constraints_before}), \
+ _iter = 1)
+
+size_t PRINT_CONSTRAINT_PROFILING(); // returns # of top level constraints
+
+} // libsnark
+
+#endif // CONSTRAINT_PROFILING_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef SIMPLE_EXAMPLE_HPP_
+#define SIMPLE_EXAMPLE_HPP_
+
+#include "examples/r1cs_examples.hpp"
+
+namespace libsnark {
+
+template<typename FieldT>
+r1cs_example<FieldT> gen_r1cs_example_from_protoboard(const size_t num_constraints,
+ const size_t num_inputs);
+
+} // libsnark
+
+#include "gadgetlib1/examples/simple_example.tcc"
+
+#endif // SIMPLE_EXAMPLE_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef SIMPLE_EXAMPLE_TCC_
+#define SIMPLE_EXAMPLE_TCC_
+
+#include <cassert>
+#include "gadgetlib1/gadgets/basic_gadgets.hpp"
+
+namespace libsnark {
+
+/* NOTE: all examples here actually generate one constraint less to account for soundness constraint in QAP */
+
+template<typename FieldT>
+r1cs_example<FieldT> gen_r1cs_example_from_protoboard(const size_t num_constraints,
+ const size_t num_inputs)
+{
+ const size_t new_num_constraints = num_constraints - 1;
+
+ /* construct dummy example: inner products of two vectors */
+ protoboard<FieldT> pb;
+ pb_variable_array<FieldT> A;
+ pb_variable_array<FieldT> B;
+ pb_variable<FieldT> res;
+
+ // the variables on the protoboard are (ONE (constant 1 term), res, A[0], ..., A[num_constraints-1], B[0], ..., B[num_constraints-1])
+ res.allocate(pb, "res");
+ A.allocate(pb, new_num_constraints, "A");
+ B.allocate(pb, new_num_constraints, "B");
+
+ inner_product_gadget<FieldT> compute_inner_product(pb, A, B, res, "compute_inner_product");
+ compute_inner_product.generate_r1cs_constraints();
+
+ /* fill in random example */
+ for (size_t i = 0; i < new_num_constraints; ++i)
+ {
+ pb.val(A[i]) = FieldT::random_element();
+ pb.val(B[i]) = FieldT::random_element();
+ }
+
+ compute_inner_product.generate_r1cs_witness();
+
+ pb.constraint_system.num_inputs = num_inputs;
+ const r1cs_variable_assignment<FieldT> va = pb.values;
+ const r1cs_variable_assignment<FieldT> input(va.begin(), va.begin() + num_inputs);
+ return r1cs_example<FieldT>(pb.constraint_system, input, va, num_inputs);
+}
+
+} // libsnark
+#endif // R1CS_EXAMPLES_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef GADGET_HPP_
+#define GADGET_HPP_
+
+#include "gadgetlib1/protoboard.hpp"
+
+namespace libsnark {
+
+template<typename FieldT>
+class gadget {
+protected:
+ protoboard<FieldT> &pb;
+ const std::string annotation_prefix;
+public:
+ gadget(protoboard<FieldT> &pb, const std::string &annotation_prefix="");
+};
+
+} // libsnark
+#include "gadgetlib1/gadget.tcc"
+
+#endif // GADGET_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef GADGET_TCC_
+#define GADGET_TCC_
+
+namespace libsnark {
+
+template<typename FieldT>
+gadget<FieldT>::gadget(protoboard<FieldT> &pb, const std::string &annotation_prefix) :
+ pb(pb), annotation_prefix(annotation_prefix)
+{
+#ifdef DEBUG
+ assert(annotation_prefix != "");
+#endif
+}
+
+} // libsnark
+#endif // GADGET_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef BASIC_GADGETS_HPP_
+#define BASIC_GADGETS_HPP_
+
+#include <cassert>
+#include <memory>
+
+#include "gadgetlib1/gadget.hpp"
+
+namespace libsnark {
+
+/* forces lc to take value 0 or 1 by adding constraint lc * (1-lc) = 0 */
+template<typename FieldT>
+void generate_boolean_r1cs_constraint(protoboard<FieldT> &pb, const pb_linear_combination<FieldT> &lc, const std::string &annotation_prefix="");
+
+template<typename FieldT>
+void generate_r1cs_equals_const_constraint(protoboard<FieldT> &pb, const pb_linear_combination<FieldT> &lc, const FieldT& c, const std::string &annotation_prefix="");
+
+template<typename FieldT>
+class packing_gadget : public gadget<FieldT> {
+private:
+ /* no internal variables */
+public:
+ const pb_linear_combination_array<FieldT> bits;
+ const pb_linear_combination<FieldT> packed;
+
+ packing_gadget(protoboard<FieldT> &pb,
+ const pb_linear_combination_array<FieldT> &bits,
+ const pb_linear_combination<FieldT> &packed,
+ const std::string &annotation_prefix="") :
+ gadget<FieldT>(pb, annotation_prefix), bits(bits), packed(packed) {}
+
+ void generate_r1cs_constraints(const bool enforce_bitness);
+ /* adds constraint result = \sum bits[i] * 2^i */
+
+ void generate_r1cs_witness_from_packed();
+ void generate_r1cs_witness_from_bits();
+};
+
+template<typename FieldT>
+class multipacking_gadget : public gadget<FieldT> {
+private:
+ std::vector<packing_gadget<FieldT> > packers;
+public:
+ const pb_linear_combination_array<FieldT> bits;
+ const pb_linear_combination_array<FieldT> packed_vars;
+
+ const size_t chunk_size;
+ const size_t num_chunks;
+ // const size_t last_chunk_size;
+
+ multipacking_gadget(protoboard<FieldT> &pb,
+ const pb_linear_combination_array<FieldT> &bits,
+ const pb_linear_combination_array<FieldT> &packed_vars,
+ const size_t chunk_size,
+ const std::string &annotation_prefix="");
+ void generate_r1cs_constraints(const bool enforce_bitness);
+ void generate_r1cs_witness_from_packed();
+ void generate_r1cs_witness_from_bits();
+};
+
+template<typename FieldT>
+class field_vector_copy_gadget : public gadget<FieldT> {
+public:
+ const pb_variable_array<FieldT> source;
+ const pb_variable_array<FieldT> target;
+ const pb_linear_combination<FieldT> do_copy;
+
+ field_vector_copy_gadget(protoboard<FieldT> &pb,
+ const pb_variable_array<FieldT> &source,
+ const pb_variable_array<FieldT> &target,
+ const pb_linear_combination<FieldT> &do_copy,
+ const std::string &annotation_prefix="");
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness();
+};
+
+template<typename FieldT>
+class bit_vector_copy_gadget : public gadget<FieldT> {
+public:
+ const pb_variable_array<FieldT> source_bits;
+ const pb_variable_array<FieldT> target_bits;
+ const pb_linear_combination<FieldT> do_copy;
+
+ pb_variable_array<FieldT> packed_source;
+ pb_variable_array<FieldT> packed_target;
+
+ std::shared_ptr<multipacking_gadget<FieldT> > pack_source;
+ std::shared_ptr<multipacking_gadget<FieldT> > pack_target;
+ std::shared_ptr<field_vector_copy_gadget<FieldT> > copier;
+
+ const size_t chunk_size;
+ const size_t num_chunks;
+
+ bit_vector_copy_gadget(protoboard<FieldT> &pb,
+ const pb_variable_array<FieldT> &source_bits,
+ const pb_variable_array<FieldT> &target_bits,
+ const pb_linear_combination<FieldT> &do_copy,
+ const size_t chunk_size,
+ const std::string &annotation_prefix="");
+ void generate_r1cs_constraints(const bool enforce_source_bitness, const bool enforce_target_bitness);
+ void generate_r1cs_witness();
+};
+
+template<typename FieldT>
+class dual_variable_gadget : public gadget<FieldT> {
+private:
+ std::shared_ptr<packing_gadget<FieldT> > consistency_check;
+public:
+ pb_variable<FieldT> packed;
+ pb_variable_array<FieldT> bits;
+
+ dual_variable_gadget(protoboard<FieldT> &pb,
+ const size_t width,
+ const std::string &annotation_prefix="") :
+ gadget<FieldT>(pb, annotation_prefix)
+ {
+ packed.allocate(pb, FMT(this->annotation_prefix, " packed"));
+ bits.allocate(pb, width, FMT(this->annotation_prefix, " bits"));
+ consistency_check.reset(new packing_gadget<FieldT>(pb,
+ bits,
+ packed,
+ FMT(this->annotation_prefix, " consistency_check")));
+ }
+
+ dual_variable_gadget(protoboard<FieldT> &pb,
+ const pb_variable_array<FieldT> &bits,
+ const std::string &annotation_prefix="") :
+ gadget<FieldT>(pb, annotation_prefix), bits(bits)
+ {
+ packed.allocate(pb, FMT(this->annotation_prefix, " packed"));
+ consistency_check.reset(new packing_gadget<FieldT>(pb,
+ bits,
+ packed,
+ FMT(this->annotation_prefix, " consistency_check")));
+ }
+
+ dual_variable_gadget(protoboard<FieldT> &pb,
+ const pb_variable<FieldT> &packed,
+ const size_t width,
+ const std::string &annotation_prefix="") :
+ gadget<FieldT>(pb, annotation_prefix), packed(packed)
+ {
+ bits.allocate(pb, width, FMT(this->annotation_prefix, " bits"));
+ consistency_check.reset(new packing_gadget<FieldT>(pb,
+ bits,
+ packed,
+ FMT(this->annotation_prefix, " consistency_check")));
+ }
+
+ void generate_r1cs_constraints(const bool enforce_bitness);
+ void generate_r1cs_witness_from_packed();
+ void generate_r1cs_witness_from_bits();
+};
+
+/*
+ the gadgets below are Fp specific:
+ I * X = R
+ (1-R) * X = 0
+
+ if X = 0 then R = 0
+ if X != 0 then R = 1 and I = X^{-1}
+*/
+
+template<typename FieldT>
+class disjunction_gadget : public gadget<FieldT> {
+private:
+ pb_variable<FieldT> inv;
+public:
+ const pb_variable_array<FieldT> inputs;
+ const pb_variable<FieldT> output;
+
+ disjunction_gadget(protoboard<FieldT>& pb,
+ const pb_variable_array<FieldT> &inputs,
+ const pb_variable<FieldT> &output,
+ const std::string &annotation_prefix="") :
+ gadget<FieldT>(pb, annotation_prefix), inputs(inputs), output(output)
+ {
+ assert(inputs.size() >= 1);
+ inv.allocate(pb, FMT(this->annotation_prefix, " inv"));
+ }
+
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness();
+};
+
+template<typename FieldT>
+void test_disjunction_gadget(const size_t n);
+
+template<typename FieldT>
+class conjunction_gadget : public gadget<FieldT> {
+private:
+ pb_variable<FieldT> inv;
+public:
+ const pb_variable_array<FieldT> inputs;
+ const pb_variable<FieldT> output;
+
+ conjunction_gadget(protoboard<FieldT>& pb,
+ const pb_variable_array<FieldT> &inputs,
+ const pb_variable<FieldT> &output,
+ const std::string &annotation_prefix="") :
+ gadget<FieldT>(pb, annotation_prefix), inputs(inputs), output(output)
+ {
+ assert(inputs.size() >= 1);
+ inv.allocate(pb, FMT(this->annotation_prefix, " inv"));
+ }
+
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness();
+};
+
+template<typename FieldT>
+void test_conjunction_gadget(const size_t n);
+
+template<typename FieldT>
+class comparison_gadget : public gadget<FieldT> {
+private:
+ pb_variable_array<FieldT> alpha;
+ pb_variable<FieldT> alpha_packed;
+ std::shared_ptr<packing_gadget<FieldT> > pack_alpha;
+
+ std::shared_ptr<disjunction_gadget<FieldT> > all_zeros_test;
+ pb_variable<FieldT> not_all_zeros;
+public:
+ const size_t n;
+ const pb_linear_combination<FieldT> A;
+ const pb_linear_combination<FieldT> B;
+ const pb_variable<FieldT> less;
+ const pb_variable<FieldT> less_or_eq;
+
+ comparison_gadget(protoboard<FieldT>& pb,
+ const size_t n,
+ const pb_linear_combination<FieldT> &A,
+ const pb_linear_combination<FieldT> &B,
+ const pb_variable<FieldT> &less,
+ const pb_variable<FieldT> &less_or_eq,
+ const std::string &annotation_prefix="") :
+ gadget<FieldT>(pb, annotation_prefix), n(n), A(A), B(B), less(less), less_or_eq(less_or_eq)
+ {
+ alpha.allocate(pb, n, FMT(this->annotation_prefix, " alpha"));
+ alpha.emplace_back(less_or_eq); // alpha[n] is less_or_eq
+
+ alpha_packed.allocate(pb, FMT(this->annotation_prefix, " alpha_packed"));
+ not_all_zeros.allocate(pb, FMT(this->annotation_prefix, " not_all_zeros"));
+
+ pack_alpha.reset(new packing_gadget<FieldT>(pb, alpha, alpha_packed,
+ FMT(this->annotation_prefix, " pack_alpha")));
+
+ all_zeros_test.reset(new disjunction_gadget<FieldT>(pb,
+ pb_variable_array<FieldT>(alpha.begin(), alpha.begin() + n),
+ not_all_zeros,
+ FMT(this->annotation_prefix, " all_zeros_test")));
+ };
+
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness();
+};
+
+template<typename FieldT>
+void test_comparison_gadget(const size_t n);
+
+template<typename FieldT>
+class inner_product_gadget : public gadget<FieldT> {
+private:
+ /* S_i = \sum_{k=0}^{i+1} A[i] * B[i] */
+ pb_variable_array<FieldT> S;
+public:
+ const pb_linear_combination_array<FieldT> A;
+ const pb_linear_combination_array<FieldT> B;
+ const pb_variable<FieldT> result;
+
+ inner_product_gadget(protoboard<FieldT>& pb,
+ const pb_linear_combination_array<FieldT> &A,
+ const pb_linear_combination_array<FieldT> &B,
+ const pb_variable<FieldT> &result,
+ const std::string &annotation_prefix="") :
+ gadget<FieldT>(pb, annotation_prefix), A(A), B(B), result(result)
+ {
+ assert(A.size() >= 1);
+ assert(A.size() == B.size());
+
+ S.allocate(pb, A.size()-1, FMT(this->annotation_prefix, " S"));
+ }
+
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness();
+};
+
+template<typename FieldT>
+void test_inner_product_gadget(const size_t n);
+
+template<typename FieldT>
+class loose_multiplexing_gadget : public gadget<FieldT> {
+/*
+ this implements loose multiplexer:
+ index not in bounds -> success_flag = 0
+ index in bounds && success_flag = 1 -> result is correct
+ however if index is in bounds we can also set success_flag to 0 (and then result will be forced to be 0)
+*/
+public:
+ pb_variable_array<FieldT> alpha;
+private:
+ std::shared_ptr<inner_product_gadget<FieldT> > compute_result;
+public:
+ const pb_linear_combination_array<FieldT> arr;
+ const pb_variable<FieldT> index;
+ const pb_variable<FieldT> result;
+ const pb_variable<FieldT> success_flag;
+
+ loose_multiplexing_gadget(protoboard<FieldT>& pb,
+ const pb_linear_combination_array<FieldT> &arr,
+ const pb_variable<FieldT> &index,
+ const pb_variable<FieldT> &result,
+ const pb_variable<FieldT> &success_flag,
+ const std::string &annotation_prefix="") :
+ gadget<FieldT>(pb, annotation_prefix), arr(arr), index(index), result(result), success_flag(success_flag)
+ {
+ alpha.allocate(pb, arr.size(), FMT(this->annotation_prefix, " alpha"));
+ compute_result.reset(new inner_product_gadget<FieldT>(pb, alpha, arr, result, FMT(this->annotation_prefix, " compute_result")));
+ };
+
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness();
+};
+
+template<typename FieldT>
+void test_loose_multiplexing_gadget(const size_t n);
+
+template<typename FieldT, typename VarT>
+void create_linear_combination_constraints(protoboard<FieldT> &pb,
+ const std::vector<FieldT> &base,
+ const std::vector<std::pair<VarT, FieldT> > &v,
+ const VarT &target,
+ const std::string &annotation_prefix);
+
+template<typename FieldT, typename VarT>
+void create_linear_combination_witness(protoboard<FieldT> &pb,
+ const std::vector<FieldT> &base,
+ const std::vector<std::pair<VarT, FieldT> > &v,
+ const VarT &target);
+
+} // libsnark
+#include "gadgetlib1/gadgets/basic_gadgets.tcc"
+
+#endif // BASIC_GADGETS_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef BASIC_GADGETS_TCC_
+#define BASIC_GADGETS_TCC_
+
+#include "common/profiling.hpp"
+#include "common/utils.hpp"
+
+namespace libsnark {
+
+template<typename FieldT>
+void generate_boolean_r1cs_constraint(protoboard<FieldT> &pb, const pb_linear_combination<FieldT> &lc, const std::string &annotation_prefix)
+/* forces lc to take value 0 or 1 by adding constraint lc * (1-lc) = 0 */
+{
+ pb.add_r1cs_constraint(r1cs_constraint<FieldT>(lc, 1-lc, 0),
+ FMT(annotation_prefix, " boolean_r1cs_constraint"));
+}
+
+template<typename FieldT>
+void generate_r1cs_equals_const_constraint(protoboard<FieldT> &pb, const pb_linear_combination<FieldT> &lc, const FieldT& c, const std::string &annotation_prefix)
+{
+ pb.add_r1cs_constraint(r1cs_constraint<FieldT>(1, lc, c),
+ FMT(annotation_prefix, " constness_constraint"));
+}
+
+template<typename FieldT>
+void packing_gadget<FieldT>::generate_r1cs_constraints(const bool enforce_bitness)
+/* adds constraint result = \sum bits[i] * 2^i */
+{
+ this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(1, pb_packing_sum<FieldT>(bits), packed), FMT(this->annotation_prefix, " packing_constraint"));
+
+ if (enforce_bitness)
+ {
+ for (size_t i = 0; i < bits.size(); ++i)
+ {
+ generate_boolean_r1cs_constraint<FieldT>(this->pb, bits[i], FMT(this->annotation_prefix, " bitness_%zu", i));
+ }
+ }
+}
+
+template<typename FieldT>
+void packing_gadget<FieldT>::generate_r1cs_witness_from_packed()
+{
+ packed.evaluate(this->pb);
+ assert(this->pb.lc_val(packed).as_bigint().num_bits() <= bits.size());
+ bits.fill_with_bits_of_field_element(this->pb, this->pb.lc_val(packed));
+}
+
+template<typename FieldT>
+void packing_gadget<FieldT>::generate_r1cs_witness_from_bits()
+{
+ bits.evaluate(this->pb);
+ this->pb.lc_val(packed) = bits.get_field_element_from_bits(this->pb);
+}
+
+template<typename FieldT>
+multipacking_gadget<FieldT>::multipacking_gadget(protoboard<FieldT> &pb,
+ const pb_linear_combination_array<FieldT> &bits,
+ const pb_linear_combination_array<FieldT> &packed_vars,
+ const size_t chunk_size,
+ const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix), bits(bits), packed_vars(packed_vars),
+ chunk_size(chunk_size),
+ num_chunks(div_ceil(bits.size(), chunk_size))
+ // last_chunk_size(bits.size() - (num_chunks-1) * chunk_size)
+{
+ assert(packed_vars.size() == num_chunks);
+ for (size_t i = 0; i < num_chunks; ++i)
+ {
+ packers.emplace_back(packing_gadget<FieldT>(this->pb, pb_linear_combination_array<FieldT>(bits.begin() + i * chunk_size,
+ bits.begin() + std::min((i+1) * chunk_size, bits.size())),
+ packed_vars[i], FMT(this->annotation_prefix, " packers_%zu", i)));
+ }
+}
+
+template<typename FieldT>
+void multipacking_gadget<FieldT>::generate_r1cs_constraints(const bool enforce_bitness)
+{
+ for (size_t i = 0; i < num_chunks; ++i)
+ {
+ packers[i].generate_r1cs_constraints(enforce_bitness);
+ }
+}
+
+template<typename FieldT>
+void multipacking_gadget<FieldT>::generate_r1cs_witness_from_packed()
+{
+ for (size_t i = 0; i < num_chunks; ++i)
+ {
+ packers[i].generate_r1cs_witness_from_packed();
+ }
+}
+
+template<typename FieldT>
+void multipacking_gadget<FieldT>::generate_r1cs_witness_from_bits()
+{
+ for (size_t i = 0; i < num_chunks; ++i)
+ {
+ packers[i].generate_r1cs_witness_from_bits();
+ }
+}
+
+template<typename FieldT>
+size_t multipacking_num_chunks(const size_t num_bits)
+{
+ return div_ceil(num_bits, FieldT::capacity());
+}
+
+template<typename FieldT>
+field_vector_copy_gadget<FieldT>::field_vector_copy_gadget(protoboard<FieldT> &pb,
+ const pb_variable_array<FieldT> &source,
+ const pb_variable_array<FieldT> &target,
+ const pb_linear_combination<FieldT> &do_copy,
+ const std::string &annotation_prefix) :
+gadget<FieldT>(pb, annotation_prefix), source(source), target(target), do_copy(do_copy)
+{
+ assert(source.size() == target.size());
+}
+
+template<typename FieldT>
+void field_vector_copy_gadget<FieldT>::generate_r1cs_constraints()
+{
+ for (size_t i = 0; i < source.size(); ++i)
+ {
+ this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(do_copy, source[i] - target[i], 0),
+ FMT(this->annotation_prefix, " copying_check_%zu", i));
+ }
+}
+
+template<typename FieldT>
+void field_vector_copy_gadget<FieldT>::generate_r1cs_witness()
+{
+ do_copy.evaluate(this->pb);
+ assert(this->pb.lc_val(do_copy) == FieldT::one() || this->pb.lc_val(do_copy) == FieldT::zero());
+ if (this->pb.lc_val(do_copy) != FieldT::zero())
+ {
+ for (size_t i = 0; i < source.size(); ++i)
+ {
+ this->pb.val(target[i]) = this->pb.val(source[i]);
+ }
+ }
+}
+
+template<typename FieldT>
+bit_vector_copy_gadget<FieldT>::bit_vector_copy_gadget(protoboard<FieldT> &pb,
+ const pb_variable_array<FieldT> &source_bits,
+ const pb_variable_array<FieldT> &target_bits,
+ const pb_linear_combination<FieldT> &do_copy,
+ const size_t chunk_size,
+ const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix), source_bits(source_bits), target_bits(target_bits), do_copy(do_copy),
+ chunk_size(chunk_size), num_chunks(div_ceil(source_bits.size(), chunk_size))
+{
+ assert(source_bits.size() == target_bits.size());
+
+ packed_source.allocate(pb, num_chunks, FMT(annotation_prefix, " packed_source"));
+ pack_source.reset(new multipacking_gadget<FieldT>(pb, source_bits, packed_source, chunk_size, FMT(annotation_prefix, " pack_source")));
+
+ packed_target.allocate(pb, num_chunks, FMT(annotation_prefix, " packed_target"));
+ pack_target.reset(new multipacking_gadget<FieldT>(pb, target_bits, packed_target, chunk_size, FMT(annotation_prefix, " pack_target")));
+
+ copier.reset(new field_vector_copy_gadget<FieldT>(pb, packed_source, packed_target, do_copy, FMT(annotation_prefix, " copier")));
+}
+
+template<typename FieldT>
+void bit_vector_copy_gadget<FieldT>::generate_r1cs_constraints(const bool enforce_source_bitness, const bool enforce_target_bitness)
+{
+ pack_source->generate_r1cs_constraints(enforce_source_bitness);
+ pack_target->generate_r1cs_constraints(enforce_target_bitness);
+
+ copier->generate_r1cs_constraints();
+}
+
+template<typename FieldT>
+void bit_vector_copy_gadget<FieldT>::generate_r1cs_witness()
+{
+ do_copy.evaluate(this->pb);
+ assert(this->pb.lc_val(do_copy) == FieldT::zero() || this->pb.lc_val(do_copy) == FieldT::one());
+ if (this->pb.lc_val(do_copy) == FieldT::one())
+ {
+ for (size_t i = 0; i < source_bits.size(); ++i)
+ {
+ this->pb.val(target_bits[i]) = this->pb.val(source_bits[i]);
+ }
+ }
+
+ pack_source->generate_r1cs_witness_from_bits();
+ pack_target->generate_r1cs_witness_from_bits();
+}
+
+template<typename FieldT>
+void dual_variable_gadget<FieldT>::generate_r1cs_constraints(const bool enforce_bitness)
+{
+ consistency_check->generate_r1cs_constraints(enforce_bitness);
+}
+
+template<typename FieldT>
+void dual_variable_gadget<FieldT>::generate_r1cs_witness_from_packed()
+{
+ consistency_check->generate_r1cs_witness_from_packed();
+}
+
+template<typename FieldT>
+void dual_variable_gadget<FieldT>::generate_r1cs_witness_from_bits()
+{
+ consistency_check->generate_r1cs_witness_from_bits();
+}
+
+template<typename FieldT>
+void disjunction_gadget<FieldT>::generate_r1cs_constraints()
+{
+ /* inv * sum = output */
+ linear_combination<FieldT> a1, b1, c1;
+ a1.add_term(inv);
+ for (size_t i = 0; i < inputs.size(); ++i)
+ {
+ b1.add_term(inputs[i]);
+ }
+ c1.add_term(output);
+
+ this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(a1, b1, c1), FMT(this->annotation_prefix, " inv*sum=output"));
+
+ /* (1-output) * sum = 0 */
+ linear_combination<FieldT> a2, b2, c2;
+ a2.add_term(ONE);
+ a2.add_term(output, -1);
+ for (size_t i = 0; i < inputs.size(); ++i)
+ {
+ b2.add_term(inputs[i]);
+ }
+ c2.add_term(ONE, 0);
+
+ this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(a2, b2, c2), FMT(this->annotation_prefix, " (1-output)*sum=0"));
+}
+
+template<typename FieldT>
+void disjunction_gadget<FieldT>::generate_r1cs_witness()
+{
+ FieldT sum = FieldT::zero();
+
+ for (size_t i = 0; i < inputs.size(); ++i)
+ {
+ sum += this->pb.val(inputs[i]);
+ }
+
+ if (sum.is_zero())
+ {
+ this->pb.val(inv) = FieldT::zero();
+ this->pb.val(output) = FieldT::zero();
+ }
+ else
+ {
+ this->pb.val(inv) = sum.inverse();
+ this->pb.val(output) = FieldT::one();
+ }
+}
+
+template<typename FieldT>
+void test_disjunction_gadget(const size_t n)
+{
+ printf("testing disjunction_gadget on all %zu bit strings\n", n);
+
+ protoboard<FieldT> pb;
+ pb_variable_array<FieldT> inputs;
+ inputs.allocate(pb, n, "inputs");
+
+ pb_variable<FieldT> output;
+ output.allocate(pb, "output");
+
+ disjunction_gadget<FieldT> d(pb, inputs, output, "d");
+ d.generate_r1cs_constraints();
+
+ for (size_t w = 0; w < 1ul<<n; ++w)
+ {
+ for (size_t j = 0; j < n; ++j)
+ {
+ pb.val(inputs[j]) = FieldT((w & (1ul<<j)) ? 1 : 0);
+ }
+
+ d.generate_r1cs_witness();
+
+#ifdef DEBUG
+ printf("positive test for %zu\n", w);
+#endif
+ assert(pb.val(output) == (w ? FieldT::one() : FieldT::zero()));
+ assert(pb.is_satisfied());
+
+#ifdef DEBUG
+ printf("negative test for %zu\n", w);
+#endif
+ pb.val(output) = (w ? FieldT::zero() : FieldT::one());
+ assert(!pb.is_satisfied());
+ }
+
+ print_time("disjunction tests successful");
+}
+
+template<typename FieldT>
+void conjunction_gadget<FieldT>::generate_r1cs_constraints()
+{
+ /* inv * (n-sum) = 1-output */
+ linear_combination<FieldT> a1, b1, c1;
+ a1.add_term(inv);
+ b1.add_term(ONE, inputs.size());
+ for (size_t i = 0; i < inputs.size(); ++i)
+ {
+ b1.add_term(inputs[i], -1);
+ }
+ c1.add_term(ONE);
+ c1.add_term(output, -1);
+
+ this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(a1, b1, c1), FMT(this->annotation_prefix, " inv*(n-sum)=(1-output)"));
+
+ /* output * (n-sum) = 0 */
+ linear_combination<FieldT> a2, b2, c2;
+ a2.add_term(output);
+ b2.add_term(ONE, inputs.size());
+ for (size_t i = 0; i < inputs.size(); ++i)
+ {
+ b2.add_term(inputs[i], -1);
+ }
+ c2.add_term(ONE, 0);
+
+ this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(a2, b2, c2), FMT(this->annotation_prefix, " output*(n-sum)=0"));
+}
+
+template<typename FieldT>
+void conjunction_gadget<FieldT>::generate_r1cs_witness()
+{
+ FieldT sum = FieldT(inputs.size());
+
+ for (size_t i = 0; i < inputs.size(); ++i)
+ {
+ sum -= this->pb.val(inputs[i]);
+ }
+
+ if (sum.is_zero())
+ {
+ this->pb.val(inv) = FieldT::zero();
+ this->pb.val(output) = FieldT::one();
+ }
+ else
+ {
+ this->pb.val(inv) = sum.inverse();
+ this->pb.val(output) = FieldT::zero();
+ }
+}
+
+template<typename FieldT>
+void test_conjunction_gadget(const size_t n)
+{
+ printf("testing conjunction_gadget on all %zu bit strings\n", n);
+
+ protoboard<FieldT> pb;
+ pb_variable_array<FieldT> inputs;
+ inputs.allocate(pb, n, "inputs");
+
+ pb_variable<FieldT> output;
+ output.allocate(pb, "output");
+
+ conjunction_gadget<FieldT> c(pb, inputs, output, "c");
+ c.generate_r1cs_constraints();
+
+ for (size_t w = 0; w < 1ul<<n; ++w)
+ {
+ for (size_t j = 0; j < n; ++j)
+ {
+ pb.val(inputs[j]) = (w & (1ul<<j)) ? FieldT::one() : FieldT::zero();
+ }
+
+ c.generate_r1cs_witness();
+
+#ifdef DEBUG
+ printf("positive test for %zu\n", w);
+#endif
+ assert(pb.val(output) == (w == (1ul<<n) - 1 ? FieldT::one() : FieldT::zero()));
+ assert(pb.is_satisfied());
+
+#ifdef DEBUG
+ printf("negative test for %zu\n", w);
+#endif
+ pb.val(output) = (w == (1ul<<n) - 1 ? FieldT::zero() : FieldT::one());
+ assert(!pb.is_satisfied());
+ }
+
+ print_time("conjunction tests successful");
+}
+
+template<typename FieldT>
+void comparison_gadget<FieldT>::generate_r1cs_constraints()
+{
+ /*
+ packed(alpha) = 2^n + B - A
+
+ not_all_zeros = \bigvee_{i=0}^{n-1} alpha_i
+
+ if B - A > 0, then 2^n + B - A > 2^n,
+ so alpha_n = 1 and not_all_zeros = 1
+ if B - A = 0, then 2^n + B - A = 2^n,
+ so alpha_n = 1 and not_all_zeros = 0
+ if B - A < 0, then 2^n + B - A \in {0, 1, \ldots, 2^n-1},
+ so alpha_n = 0
+
+ therefore alpha_n = less_or_eq and alpha_n * not_all_zeros = less
+ */
+
+ /* not_all_zeros to be Boolean, alpha_i are Boolean by packing gadget */
+ generate_boolean_r1cs_constraint<FieldT>(this->pb, not_all_zeros,
+ FMT(this->annotation_prefix, " not_all_zeros"));
+
+ /* constraints for packed(alpha) = 2^n + B - A */
+ pack_alpha->generate_r1cs_constraints(true);
+ this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(1, (FieldT(2)^n) + B - A, alpha_packed), FMT(this->annotation_prefix, " main_constraint"));
+
+ /* compute result */
+ all_zeros_test->generate_r1cs_constraints();
+ this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(less_or_eq, not_all_zeros, less),
+ FMT(this->annotation_prefix, " less"));
+}
+
+template<typename FieldT>
+void comparison_gadget<FieldT>::generate_r1cs_witness()
+{
+ A.evaluate(this->pb);
+ B.evaluate(this->pb);
+
+ /* unpack 2^n + B - A into alpha_packed */
+ this->pb.val(alpha_packed) = (FieldT(2)^n) + this->pb.lc_val(B) - this->pb.lc_val(A);
+ pack_alpha->generate_r1cs_witness_from_packed();
+
+ /* compute result */
+ all_zeros_test->generate_r1cs_witness();
+ this->pb.val(less) = this->pb.val(less_or_eq) * this->pb.val(not_all_zeros);
+}
+
+template<typename FieldT>
+void test_comparison_gadget(const size_t n)
+{
+ printf("testing comparison_gadget on all %zu bit inputs\n", n);
+
+ protoboard<FieldT> pb;
+
+ pb_variable<FieldT> A, B, less, less_or_eq;
+ A.allocate(pb, "A");
+ B.allocate(pb, "B");
+ less.allocate(pb, "less");
+ less_or_eq.allocate(pb, "less_or_eq");
+
+ comparison_gadget<FieldT> cmp(pb, n, A, B, less, less_or_eq, "cmp");
+ cmp.generate_r1cs_constraints();
+
+ for (size_t a = 0; a < 1ul<<n; ++a)
+ {
+ for (size_t b = 0; b < 1ul<<n; ++b)
+ {
+ pb.val(A) = FieldT(a);
+ pb.val(B) = FieldT(b);
+
+ cmp.generate_r1cs_witness();
+
+#ifdef DEBUG
+ printf("positive test for %zu < %zu\n", a, b);
+#endif
+ assert(pb.val(less) == (a < b ? FieldT::one() : FieldT::zero()));
+ assert(pb.val(less_or_eq) == (a <= b ? FieldT::one() : FieldT::zero()));
+ assert(pb.is_satisfied());
+ }
+ }
+
+ print_time("comparison tests successful");
+}
+
+template<typename FieldT>
+void inner_product_gadget<FieldT>::generate_r1cs_constraints()
+{
+ /*
+ S_i = \sum_{k=0}^{i+1} A[i] * B[i]
+ S[0] = A[0] * B[0]
+ S[i+1] - S[i] = A[i] * B[i]
+ */
+ for (size_t i = 0; i < A.size(); ++i)
+ {
+ this->pb.add_r1cs_constraint(
+ r1cs_constraint<FieldT>(A[i], B[i],
+ (i == A.size()-1 ? result : S[i]) + (i == 0 ? 0 * ONE : -S[i-1])),
+ FMT(this->annotation_prefix, " S_%zu", i));
+ }
+}
+
+template<typename FieldT>
+void inner_product_gadget<FieldT>::generate_r1cs_witness()
+{
+ FieldT total = FieldT::zero();
+ for (size_t i = 0; i < A.size(); ++i)
+ {
+ A[i].evaluate(this->pb);
+ B[i].evaluate(this->pb);
+
+ total += this->pb.lc_val(A[i]) * this->pb.lc_val(B[i]);
+ this->pb.val(i == A.size()-1 ? result : S[i]) = total;
+ }
+}
+
+template<typename FieldT>
+void test_inner_product_gadget(const size_t n)
+{
+ printf("testing inner_product_gadget on all %zu bit strings\n", n);
+
+ protoboard<FieldT> pb;
+ pb_variable_array<FieldT> A;
+ A.allocate(pb, n, "A");
+ pb_variable_array<FieldT> B;
+ B.allocate(pb, n, "B");
+
+ pb_variable<FieldT> result;
+ result.allocate(pb, "result");
+
+ inner_product_gadget<FieldT> g(pb, A, B, result, "g");
+ g.generate_r1cs_constraints();
+
+ for (size_t i = 0; i < 1ul<<n; ++i)
+ {
+ for (size_t j = 0; j < 1ul<<n; ++j)
+ {
+ size_t correct = 0;
+ for (size_t k = 0; k < n; ++k)
+ {
+ pb.val(A[k]) = (i & (1ul<<k) ? FieldT::one() : FieldT::zero());
+ pb.val(B[k]) = (j & (1ul<<k) ? FieldT::one() : FieldT::zero());
+ correct += ((i & (1ul<<k)) && (j & (1ul<<k)) ? 1 : 0);
+ }
+
+ g.generate_r1cs_witness();
+#ifdef DEBUG
+ printf("positive test for (%zu, %zu)\n", i, j);
+#endif
+ assert(pb.val(result) == FieldT(correct));
+ assert(pb.is_satisfied());
+
+#ifdef DEBUG
+ printf("negative test for (%zu, %zu)\n", i, j);
+#endif
+ pb.val(result) = FieldT(100*n+19);
+ assert(!pb.is_satisfied());
+ }
+ }
+
+ print_time("inner_product_gadget tests successful");
+}
+
+template<typename FieldT>
+void loose_multiplexing_gadget<FieldT>::generate_r1cs_constraints()
+{
+ /* \alpha_i (index - i) = 0 */
+ for (size_t i = 0; i < arr.size(); ++i)
+ {
+ this->pb.add_r1cs_constraint(
+ r1cs_constraint<FieldT>(alpha[i], index - i, 0),
+ FMT(this->annotation_prefix, " alpha_%zu", i));
+ }
+
+ /* 1 * (\sum \alpha_i) = success_flag */
+ linear_combination<FieldT> a, b, c;
+ a.add_term(ONE);
+ for (size_t i = 0; i < arr.size(); ++i)
+ {
+ b.add_term(alpha[i]);
+ }
+ c.add_term(success_flag);
+ this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(a, b, c), FMT(this->annotation_prefix, " main_constraint"));
+
+ /* now success_flag is constrained to either 0 (if index is out of
+ range) or \alpha_i. constrain it and \alpha_i to zero */
+ generate_boolean_r1cs_constraint<FieldT>(this->pb, success_flag, FMT(this->annotation_prefix, " success_flag"));
+
+ /* compute result */
+ compute_result->generate_r1cs_constraints();
+}
+
+template<typename FieldT>
+void loose_multiplexing_gadget<FieldT>::generate_r1cs_witness()
+{
+ /* assumes that idx can be fit in ulong; true for our purposes for now */
+ const bigint<FieldT::num_limbs> valint = this->pb.val(index).as_bigint();
+ unsigned long idx = valint.as_ulong();
+ const bigint<FieldT::num_limbs> arrsize(arr.size());
+
+ if (idx >= arr.size() || mpn_cmp(valint.data, arrsize.data, FieldT::num_limbs) >= 0)
+ {
+ for (size_t i = 0; i < arr.size(); ++i)
+ {
+ this->pb.val(alpha[i]) = FieldT::zero();
+ }
+
+ this->pb.val(success_flag) = FieldT::zero();
+ }
+ else
+ {
+ for (size_t i = 0; i < arr.size(); ++i)
+ {
+ this->pb.val(alpha[i]) = (i == idx ? FieldT::one() : FieldT::zero());
+ }
+
+ this->pb.val(success_flag) = FieldT::one();
+ }
+
+ compute_result->generate_r1cs_witness();
+}
+
+template<typename FieldT>
+void test_loose_multiplexing_gadget(const size_t n)
+{
+ printf("testing loose_multiplexing_gadget on 2**%zu pb_variable<FieldT> array inputs\n", n);
+ protoboard<FieldT> pb;
+
+ pb_variable_array<FieldT> arr;
+ arr.allocate(pb, 1ul<<n, "arr");
+ pb_variable<FieldT> index, result, success_flag;
+ index.allocate(pb, "index");
+ result.allocate(pb, "result");
+ success_flag.allocate(pb, "success_flag");
+
+ loose_multiplexing_gadget<FieldT> g(pb, arr, index, result, success_flag, "g");
+ g.generate_r1cs_constraints();
+
+ for (size_t i = 0; i < 1ul<<n; ++i)
+ {
+ pb.val(arr[i]) = FieldT((19*i) % (1ul<<n));
+ }
+
+ for (int idx = -1; idx <= (int)(1ul<<n); ++idx)
+ {
+ pb.val(index) = FieldT(idx);
+ g.generate_r1cs_witness();
+
+ if (0 <= idx && idx <= (int)(1ul<<n) - 1)
+ {
+ printf("demuxing element %d (in bounds)\n", idx);
+ assert(pb.val(result) == FieldT((19*idx) % (1ul<<n)));
+ assert(pb.val(success_flag) == FieldT::one());
+ assert(pb.is_satisfied());
+ pb.val(result) -= FieldT::one();
+ assert(!pb.is_satisfied());
+ }
+ else
+ {
+ printf("demuxing element %d (out of bounds)\n", idx);
+ assert(pb.val(success_flag) == FieldT::zero());
+ assert(pb.is_satisfied());
+ pb.val(success_flag) = FieldT::one();
+ assert(!pb.is_satisfied());
+ }
+ }
+ printf("loose_multiplexing_gadget tests successful\n");
+}
+
+template<typename FieldT, typename VarT>
+void create_linear_combination_constraints(protoboard<FieldT> &pb,
+ const std::vector<FieldT> &base,
+ const std::vector<std::pair<VarT, FieldT> > &v,
+ const VarT &target,
+ const std::string &annotation_prefix)
+{
+ for (size_t i = 0; i < base.size(); ++i)
+ {
+ linear_combination<FieldT> a, b, c;
+
+ a.add_term(ONE);
+ b.add_term(ONE, base[i]);
+
+ for (auto &p : v)
+ {
+ b.add_term(p.first.all_vars[i], p.second);
+ }
+
+ c.add_term(target.all_vars[i]);
+
+ pb.add_r1cs_constraint(r1cs_constraint<FieldT>(a, b, c), FMT(annotation_prefix, " linear_combination_%zu", i));
+ }
+}
+
+template<typename FieldT, typename VarT>
+void create_linear_combination_witness(protoboard<FieldT> &pb,
+ const std::vector<FieldT> &base,
+ const std::vector<std::pair<VarT, FieldT> > &v,
+ const VarT &target)
+{
+ for (size_t i = 0; i < base.size(); ++i)
+ {
+ pb.val(target.all_vars[i]) = base[i];
+
+ for (auto &p : v)
+ {
+ pb.val(target.all_vars[i]) += p.second * pb.val(p.first.all_vars[i]);
+ }
+ }
+}
+
+} // libsnark
+#endif // BASIC_GADGETS_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for a gadget that can be created from an R1CS constraint system.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef GADGET_FROM_R1CS_HPP_
+#define GADGET_FROM_R1CS_HPP_
+
+#include <map>
+
+#include "gadgetlib1/gadget.hpp"
+
+namespace libsnark {
+
+template<typename FieldT>
+class gadget_from_r1cs : public gadget<FieldT> {
+
+private:
+ const std::vector<pb_variable_array<FieldT> > vars;
+ const r1cs_constraint_system<FieldT> cs;
+ std::map<size_t, size_t> cs_to_vars;
+
+public:
+
+ gadget_from_r1cs(protoboard<FieldT> &pb,
+ const std::vector<pb_variable_array<FieldT> > &vars,
+ const r1cs_constraint_system<FieldT> &cs,
+ const std::string &annotation_prefix);
+
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness(const r1cs_primary_input<FieldT> &primary_input,
+ const r1cs_auxiliary_input<FieldT> &auxiliary_input);
+};
+
+} // libsnark
+
+#include "gadgetlib1/gadgets/gadget_from_r1cs.tcc"
+
+#endif // GADGET_FROM_R1CS_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of interfaces for a gadget that can be created from an R1CS constraint system.
+
+ See gadget_from_r1cs.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef GADGET_FROM_R1CS_TCC_
+#define GADGET_FROM_R1CS_TCC_
+
+namespace libsnark {
+
+template<typename FieldT>
+gadget_from_r1cs<FieldT>::gadget_from_r1cs(protoboard<FieldT> &pb,
+ const std::vector<pb_variable_array<FieldT> > &vars,
+ const r1cs_constraint_system<FieldT> &cs,
+ const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix),
+ vars(vars),
+ cs(cs)
+{
+ cs_to_vars[0] = 0; /* constant term maps to constant term */
+
+ size_t cs_var_idx = 1;
+ for (auto va : vars)
+ {
+#ifdef DEBUG
+ printf("gadget_from_r1cs: translating a block of variables with length %zu\n", va.size());
+#endif
+ for (auto v : va)
+ {
+ cs_to_vars[cs_var_idx] = v.index;
+
+#ifdef DEBUG
+ if (v.index != 0)
+ {
+ // handle annotations, except for re-annotating constant term
+ const std::map<size_t, std::string>::const_iterator it = cs.variable_annotations.find(cs_var_idx);
+
+ std::string annotation = FMT(annotation_prefix, " variable_%zu", cs_var_idx);
+ if (it != cs.variable_annotations.end())
+ {
+ annotation = annotation_prefix + " " + it->second;
+ }
+
+ pb.augment_variable_annotation(v, annotation);
+ }
+#endif
+ ++cs_var_idx;
+ }
+ }
+
+#ifdef DEBUG
+ printf("gadget_from_r1cs: sum of all block lengths: %zu\n", cs_var_idx-1);
+ printf("gadget_from_r1cs: cs.num_variables(): %zu\n", cs.num_variables());
+#endif
+
+ assert(cs_var_idx - 1 == cs.num_variables());
+}
+
+template<typename FieldT>
+void gadget_from_r1cs<FieldT>::generate_r1cs_constraints()
+{
+ for (size_t i = 0; i < cs.num_constraints(); ++i)
+ {
+ const r1cs_constraint<FieldT> &constr = cs.constraints[i];
+ r1cs_constraint<FieldT> translated_constr;
+
+ for (const linear_term<FieldT> &t: constr.a.terms)
+ {
+ translated_constr.a.terms.emplace_back(linear_term<FieldT>(pb_variable<FieldT>(cs_to_vars[t.index]), t.coeff));
+ }
+
+ for (const linear_term<FieldT> &t: constr.b.terms)
+ {
+ translated_constr.b.terms.emplace_back(linear_term<FieldT>(pb_variable<FieldT>(cs_to_vars[t.index]), t.coeff));
+ }
+
+ for (const linear_term<FieldT> &t: constr.c.terms)
+ {
+ translated_constr.c.terms.emplace_back(linear_term<FieldT>(pb_variable<FieldT>(cs_to_vars[t.index]), t.coeff));
+ }
+
+ std::string annotation = FMT(this->annotation_prefix, " constraint_%zu", i);
+
+#ifdef DEBUG
+ auto it = cs.constraint_annotations.find(i);
+ if (it != cs.constraint_annotations.end())
+ {
+ annotation = this->annotation_prefix + " " + it->second;
+ }
+#endif
+ this->pb.add_r1cs_constraint(translated_constr, annotation);
+ }
+}
+
+template<typename FieldT>
+void gadget_from_r1cs<FieldT>::generate_r1cs_witness(const r1cs_primary_input<FieldT> &primary_input,
+ const r1cs_auxiliary_input<FieldT> &auxiliary_input)
+{
+ assert(cs.num_inputs() == primary_input.size());
+ assert(cs.num_variables() == primary_input.size() + auxiliary_input.size());
+
+ for (size_t i = 0; i < primary_input.size(); ++i)
+ {
+ this->pb.val(pb_variable<FieldT>(cs_to_vars[i+1])) = primary_input[i];
+ }
+
+ for (size_t i = 0; i < auxiliary_input.size(); ++i)
+ {
+ this->pb.val(pb_variable<FieldT>(cs_to_vars[primary_input.size()+i+1])) = auxiliary_input[i];
+ }
+}
+
+} // libsnark
+
+#endif // GADGET_FROM_R1CS_TCC_
--- /dev/null
+/**
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+#ifndef DIGEST_SELECTOR_GADGET_HPP_
+#define DIGEST_SELECTOR_GADGET_HPP_
+
+#include <vector>
+
+#include "gadgetlib1/gadgets/basic_gadgets.hpp"
+#include "gadgetlib1/gadgets/hashes/hash_io.hpp"
+
+namespace libsnark {
+
+template<typename FieldT>
+class digest_selector_gadget : public gadget<FieldT> {
+public:
+ size_t digest_size;
+ digest_variable<FieldT> input;
+ pb_linear_combination<FieldT> is_right;
+ digest_variable<FieldT> left;
+ digest_variable<FieldT> right;
+
+ digest_selector_gadget(protoboard<FieldT> &pb,
+ const size_t digest_size,
+ const digest_variable<FieldT> &input,
+ const pb_linear_combination<FieldT> &is_right,
+ const digest_variable<FieldT> &left,
+ const digest_variable<FieldT> &right,
+ const std::string &annotation_prefix);
+
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness();
+};
+
+} // libsnark
+
+#include "gadgetlib1/gadgets/hashes/digest_selector_gadget.tcc"
+
+#endif // DIGEST_SELECTOR_GADGET_HPP_
--- /dev/null
+/**
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+#ifndef DIGEST_SELECTOR_GADGET_TCC_
+#define DIGEST_SELECTOR_GADGET_TCC_
+
+namespace libsnark {
+
+template<typename FieldT>
+digest_selector_gadget<FieldT>::digest_selector_gadget(protoboard<FieldT> &pb,
+ const size_t digest_size,
+ const digest_variable<FieldT> &input,
+ const pb_linear_combination<FieldT> &is_right,
+ const digest_variable<FieldT> &left,
+ const digest_variable<FieldT> &right,
+ const std::string &annotation_prefix) :
+gadget<FieldT>(pb, annotation_prefix), digest_size(digest_size), input(input), is_right(is_right), left(left), right(right)
+{
+}
+
+template<typename FieldT>
+void digest_selector_gadget<FieldT>::generate_r1cs_constraints()
+{
+ for (size_t i = 0; i < digest_size; ++i)
+ {
+ /*
+ input = is_right * right + (1-is_right) * left
+ input - left = is_right(right - left)
+ */
+ this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(is_right, right.bits[i] - left.bits[i], input.bits[i] - left.bits[i]),
+ FMT(this->annotation_prefix, " propagate_%zu", i));
+ }
+}
+
+template<typename FieldT>
+void digest_selector_gadget<FieldT>::generate_r1cs_witness()
+{
+ is_right.evaluate(this->pb);
+
+ assert(this->pb.lc_val(is_right) == FieldT::one() || this->pb.lc_val(is_right) == FieldT::zero());
+ if (this->pb.lc_val(is_right) == FieldT::one())
+ {
+ for (size_t i = 0; i < digest_size; ++i)
+ {
+ this->pb.val(right.bits[i]) = this->pb.val(input.bits[i]);
+ }
+ }
+ else
+ {
+ for (size_t i = 0; i < digest_size; ++i)
+ {
+ this->pb.val(left.bits[i]) = this->pb.val(input.bits[i]);
+ }
+ }
+}
+
+} // libsnark
+
+#endif // DIGEST_SELECTOR_GADGET_TCC_
--- /dev/null
+/**
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+#ifndef HASH_IO_HPP_
+#define HASH_IO_HPP_
+#include <cstddef>
+#include <vector>
+#include "gadgetlib1/gadgets/basic_gadgets.hpp"
+
+namespace libsnark {
+
+template<typename FieldT>
+class digest_variable : public gadget<FieldT> {
+public:
+ size_t digest_size;
+ pb_variable_array<FieldT> bits;
+
+ digest_variable<FieldT>(protoboard<FieldT> &pb,
+ const size_t digest_size,
+ const std::string &annotation_prefix);
+
+ digest_variable<FieldT>(protoboard<FieldT> &pb,
+ const size_t digest_size,
+ const pb_variable_array<FieldT> &partial_bits,
+ const pb_variable<FieldT> &padding,
+ const std::string &annotation_prefix);
+
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness(const bit_vector& contents);
+ bit_vector get_digest() const;
+};
+
+template<typename FieldT>
+class block_variable : public gadget<FieldT> {
+public:
+ size_t block_size;
+ pb_variable_array<FieldT> bits;
+
+ block_variable(protoboard<FieldT> &pb,
+ const size_t block_size,
+ const std::string &annotation_prefix);
+
+ block_variable(protoboard<FieldT> &pb,
+ const std::vector<pb_variable_array<FieldT> > &parts,
+ const std::string &annotation_prefix);
+
+ block_variable(protoboard<FieldT> &pb,
+ const digest_variable<FieldT> &left,
+ const digest_variable<FieldT> &right,
+ const std::string &annotation_prefix);
+
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness(const bit_vector& contents);
+ bit_vector get_block() const;
+};
+
+} // libsnark
+#include "gadgetlib1/gadgets/hashes/hash_io.tcc"
+
+#endif // HASH_IO_HPP_
--- /dev/null
+/**
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+#ifndef HASH_IO_TCC_
+#define HASH_IO_TCC_
+
+namespace libsnark {
+
+template<typename FieldT>
+digest_variable<FieldT>::digest_variable(protoboard<FieldT> &pb,
+ const size_t digest_size,
+ const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix), digest_size(digest_size)
+{
+ bits.allocate(pb, digest_size, FMT(this->annotation_prefix, " bits"));
+}
+
+template<typename FieldT>
+digest_variable<FieldT>::digest_variable(protoboard<FieldT> &pb,
+ const size_t digest_size,
+ const pb_variable_array<FieldT> &partial_bits,
+ const pb_variable<FieldT> &padding,
+ const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix), digest_size(digest_size)
+{
+ assert(bits.size() <= digest_size);
+ bits = partial_bits;
+ while (bits.size() != digest_size)
+ {
+ bits.emplace_back(padding);
+ }
+}
+
+template<typename FieldT>
+void digest_variable<FieldT>::generate_r1cs_constraints()
+{
+ for (size_t i = 0; i < digest_size; ++i)
+ {
+ generate_boolean_r1cs_constraint<FieldT>(this->pb, bits[i], FMT(this->annotation_prefix, " bits_%zu", i));
+ }
+}
+
+template<typename FieldT>
+void digest_variable<FieldT>::generate_r1cs_witness(const bit_vector& contents)
+{
+ bits.fill_with_bits(this->pb, contents);
+}
+
+template<typename FieldT>
+bit_vector digest_variable<FieldT>::get_digest() const
+{
+ return bits.get_bits(this->pb);
+}
+
+template<typename FieldT>
+block_variable<FieldT>::block_variable(protoboard<FieldT> &pb,
+ const size_t block_size,
+ const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix), block_size(block_size)
+{
+ bits.allocate(pb, block_size, FMT(this->annotation_prefix, " bits"));
+}
+
+template<typename FieldT>
+block_variable<FieldT>::block_variable(protoboard<FieldT> &pb,
+ const std::vector<pb_variable_array<FieldT> > &parts,
+ const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix)
+{
+ for (auto &part : parts)
+ {
+ bits.insert(bits.end(), part.begin(), part.end());
+ }
+}
+
+template<typename FieldT>
+block_variable<FieldT>::block_variable(protoboard<FieldT> &pb,
+ const digest_variable<FieldT> &left,
+ const digest_variable<FieldT> &right,
+ const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix)
+{
+ assert(left.bits.size() == right.bits.size());
+ block_size = 2 * left.bits.size();
+ bits.insert(bits.end(), left.bits.begin(), left.bits.end());
+ bits.insert(bits.end(), right.bits.begin(), right.bits.end());
+}
+
+template<typename FieldT>
+void block_variable<FieldT>::generate_r1cs_witness(const bit_vector& contents)
+{
+ bits.fill_with_bits(this->pb, contents);
+}
+
+template<typename FieldT>
+bit_vector block_variable<FieldT>::get_block() const
+{
+ return bits.get_bits(this->pb);
+}
+
+} // libsnark
+#endif // HASH_IO_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for auxiliary gadgets for the SHA256 gadget.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef SHA256_AUX_HPP_
+#define SHA256_AUX_HPP_
+
+#include "gadgetlib1/gadgets/basic_gadgets.hpp"
+
+namespace libsnark {
+
+template<typename FieldT>
+class lastbits_gadget : public gadget<FieldT> {
+public:
+ pb_variable<FieldT> X;
+ size_t X_bits;
+ pb_variable<FieldT> result;
+ pb_linear_combination_array<FieldT> result_bits;
+
+ pb_linear_combination_array<FieldT> full_bits;
+ std::shared_ptr<packing_gadget<FieldT> > unpack_bits;
+ std::shared_ptr<packing_gadget<FieldT> > pack_result;
+
+ lastbits_gadget(protoboard<FieldT> &pb,
+ const pb_variable<FieldT> &X,
+ const size_t X_bits,
+ const pb_variable<FieldT> &result,
+ const pb_linear_combination_array<FieldT> &result_bits,
+ const std::string &annotation_prefix);
+
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness();
+};
+
+template<typename FieldT>
+class XOR3_gadget : public gadget<FieldT> {
+private:
+ pb_variable<FieldT> tmp;
+public:
+ pb_linear_combination<FieldT> A;
+ pb_linear_combination<FieldT> B;
+ pb_linear_combination<FieldT> C;
+ bool assume_C_is_zero;
+ pb_linear_combination<FieldT> out;
+
+ XOR3_gadget(protoboard<FieldT> &pb,
+ const pb_linear_combination<FieldT> &A,
+ const pb_linear_combination<FieldT> &B,
+ const pb_linear_combination<FieldT> &C,
+ const bool assume_C_is_zero,
+ const pb_linear_combination<FieldT> &out,
+ const std::string &annotation_prefix);
+
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness();
+};
+
+/* Page 10 of http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf */
+template<typename FieldT>
+class small_sigma_gadget : public gadget<FieldT> {
+private:
+ pb_variable_array<FieldT> W;
+ pb_variable<FieldT> result;
+public:
+ pb_variable_array<FieldT> result_bits;
+ std::vector<std::shared_ptr<XOR3_gadget<FieldT> > > compute_bits;
+ std::shared_ptr<packing_gadget<FieldT> > pack_result;
+
+ small_sigma_gadget(protoboard<FieldT> &pb,
+ const pb_variable_array<FieldT> &W,
+ const pb_variable<FieldT> &result,
+ const size_t rot1,
+ const size_t rot2,
+ const size_t shift,
+ const std::string &annotation_prefix);
+
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness();
+};
+
+/* Page 10 of http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf */
+template<typename FieldT>
+class big_sigma_gadget : public gadget<FieldT> {
+private:
+ pb_linear_combination_array<FieldT> W;
+ pb_variable<FieldT> result;
+public:
+ pb_variable_array<FieldT> result_bits;
+ std::vector<std::shared_ptr<XOR3_gadget<FieldT> > > compute_bits;
+ std::shared_ptr<packing_gadget<FieldT> > pack_result;
+
+ big_sigma_gadget(protoboard<FieldT> &pb,
+ const pb_linear_combination_array<FieldT> &W,
+ const pb_variable<FieldT> &result,
+ const size_t rot1,
+ const size_t rot2,
+ const size_t rot3,
+ const std::string &annotation_prefix);
+
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness();
+};
+
+/* Page 10 of http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf */
+template<typename FieldT>
+class choice_gadget : public gadget<FieldT> {
+private:
+ pb_variable_array<FieldT> result_bits;
+public:
+ pb_linear_combination_array<FieldT> X;
+ pb_linear_combination_array<FieldT> Y;
+ pb_linear_combination_array<FieldT> Z;
+ pb_variable<FieldT> result;
+ std::shared_ptr<packing_gadget<FieldT> > pack_result;
+
+ choice_gadget(protoboard<FieldT> &pb,
+ const pb_linear_combination_array<FieldT> &X,
+ const pb_linear_combination_array<FieldT> &Y,
+ const pb_linear_combination_array<FieldT> &Z,
+ const pb_variable<FieldT> &result, const std::string &annotation_prefix);
+
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness();
+};
+
+/* Page 10 of http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf */
+template<typename FieldT>
+class majority_gadget : public gadget<FieldT> {
+private:
+ pb_variable_array<FieldT> result_bits;
+ std::shared_ptr<packing_gadget<FieldT> > pack_result;
+public:
+ pb_linear_combination_array<FieldT> X;
+ pb_linear_combination_array<FieldT> Y;
+ pb_linear_combination_array<FieldT> Z;
+ pb_variable<FieldT> result;
+
+ majority_gadget(protoboard<FieldT> &pb,
+ const pb_linear_combination_array<FieldT> &X,
+ const pb_linear_combination_array<FieldT> &Y,
+ const pb_linear_combination_array<FieldT> &Z,
+ const pb_variable<FieldT> &result,
+ const std::string &annotation_prefix);
+
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness();
+};
+
+} // libsnark
+
+#include "gadgetlib1/gadgets/hashes/sha256/sha256_aux.tcc"
+
+#endif // SHA256_AUX_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of interfaces for auxiliary gadgets for the SHA256 gadget.
+
+ See sha256_aux.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef SHA256_AUX_TCC_
+#define SHA256_AUX_TCC_
+
+namespace libsnark {
+
+template<typename FieldT>
+lastbits_gadget<FieldT>::lastbits_gadget(protoboard<FieldT> &pb,
+ const pb_variable<FieldT> &X,
+ const size_t X_bits,
+ const pb_variable<FieldT> &result,
+ const pb_linear_combination_array<FieldT> &result_bits,
+ const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix),
+ X(X),
+ X_bits(X_bits),
+ result(result),
+ result_bits(result_bits)
+{
+ full_bits = result_bits;
+ for (size_t i = result_bits.size(); i < X_bits; ++i)
+ {
+ pb_variable<FieldT> full_bits_overflow;
+ full_bits_overflow.allocate(pb, FMT(this->annotation_prefix, " full_bits_%zu", i));
+ full_bits.emplace_back(full_bits_overflow);
+ }
+
+ unpack_bits.reset(new packing_gadget<FieldT>(pb, full_bits, X, FMT(this->annotation_prefix, " unpack_bits")));
+ pack_result.reset(new packing_gadget<FieldT>(pb, result_bits, result, FMT(this->annotation_prefix, " pack_result")));
+}
+
+template<typename FieldT>
+void lastbits_gadget<FieldT>::generate_r1cs_constraints()
+{
+ unpack_bits->generate_r1cs_constraints(true);
+ pack_result->generate_r1cs_constraints(false);
+}
+
+template<typename FieldT>
+void lastbits_gadget<FieldT>::generate_r1cs_witness()
+{
+ unpack_bits->generate_r1cs_witness_from_packed();
+ pack_result->generate_r1cs_witness_from_bits();
+}
+
+template<typename FieldT>
+XOR3_gadget<FieldT>::XOR3_gadget(protoboard<FieldT> &pb,
+ const pb_linear_combination<FieldT> &A,
+ const pb_linear_combination<FieldT> &B,
+ const pb_linear_combination<FieldT> &C,
+ const bool assume_C_is_zero,
+ const pb_linear_combination<FieldT> &out,
+ const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix),
+ A(A),
+ B(B),
+ C(C),
+ assume_C_is_zero(assume_C_is_zero),
+ out(out)
+{
+ if (!assume_C_is_zero)
+ {
+ tmp.allocate(pb, FMT(this->annotation_prefix, " tmp"));
+ }
+}
+
+template<typename FieldT>
+void XOR3_gadget<FieldT>::generate_r1cs_constraints()
+{
+ /*
+ tmp = A + B - 2AB i.e. tmp = A xor B
+ out = tmp + C - 2tmp C i.e. out = tmp xor C
+ */
+ if (assume_C_is_zero)
+ {
+ this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(2*A, B, A + B - out), FMT(this->annotation_prefix, " implicit_tmp_equals_out"));
+ }
+ else
+ {
+ this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(2*A, B, A + B - tmp), FMT(this->annotation_prefix, " tmp"));
+ this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(2 * tmp, C, tmp + C - out), FMT(this->annotation_prefix, " out"));
+ }
+}
+
+template<typename FieldT>
+void XOR3_gadget<FieldT>::generate_r1cs_witness()
+{
+ if (assume_C_is_zero)
+ {
+ this->pb.lc_val(out) = this->pb.lc_val(A) + this->pb.lc_val(B) - FieldT(2) * this->pb.lc_val(A) * this->pb.lc_val(B);
+ }
+ else
+ {
+ this->pb.val(tmp) = this->pb.lc_val(A) + this->pb.lc_val(B) - FieldT(2) * this->pb.lc_val(A) * this->pb.lc_val(B);
+ this->pb.lc_val(out) = this->pb.val(tmp) + this->pb.lc_val(C) - FieldT(2) * this->pb.val(tmp) * this->pb.lc_val(C);
+ }
+}
+
+#define SHA256_GADGET_ROTR(A, i, k) A[((i)+(k)) % 32]
+
+/* Page 10 of http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf */
+template<typename FieldT>
+small_sigma_gadget<FieldT>::small_sigma_gadget(protoboard<FieldT> &pb,
+ const pb_variable_array<FieldT> &W,
+ const pb_variable<FieldT> &result,
+ const size_t rot1,
+ const size_t rot2,
+ const size_t shift,
+ const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix),
+ W(W),
+ result(result)
+{
+ result_bits.allocate(pb, 32, FMT(this->annotation_prefix, " result_bits"));
+ compute_bits.resize(32);
+ for (size_t i = 0; i < 32; ++i)
+ {
+ compute_bits[i].reset(new XOR3_gadget<FieldT>(pb, SHA256_GADGET_ROTR(W, i, rot1), SHA256_GADGET_ROTR(W, i, rot2),
+ (i + shift < 32 ? W[i+shift] : ONE),
+ (i + shift >= 32), result_bits[i],
+ FMT(this->annotation_prefix, " compute_bits_%zu", i)));
+ }
+ pack_result.reset(new packing_gadget<FieldT>(pb, result_bits, result, FMT(this->annotation_prefix, " pack_result")));
+}
+
+template<typename FieldT>
+void small_sigma_gadget<FieldT>::generate_r1cs_constraints()
+{
+ for (size_t i = 0; i < 32; ++i)
+ {
+ compute_bits[i]->generate_r1cs_constraints();
+ }
+
+ pack_result->generate_r1cs_constraints(false);
+}
+
+template<typename FieldT>
+void small_sigma_gadget<FieldT>::generate_r1cs_witness()
+{
+ for (size_t i = 0; i < 32; ++i)
+ {
+ compute_bits[i]->generate_r1cs_witness();
+ }
+
+ pack_result->generate_r1cs_witness_from_bits();
+}
+
+template<typename FieldT>
+big_sigma_gadget<FieldT>::big_sigma_gadget(protoboard<FieldT> &pb,
+ const pb_linear_combination_array<FieldT> &W,
+ const pb_variable<FieldT> &result,
+ const size_t rot1,
+ const size_t rot2,
+ const size_t rot3,
+ const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix),
+ W(W),
+ result(result)
+{
+ result_bits.allocate(pb, 32, FMT(this->annotation_prefix, " result_bits"));
+ compute_bits.resize(32);
+ for (size_t i = 0; i < 32; ++i)
+ {
+ compute_bits[i].reset(new XOR3_gadget<FieldT>(pb, SHA256_GADGET_ROTR(W, i, rot1), SHA256_GADGET_ROTR(W, i, rot2), SHA256_GADGET_ROTR(W, i, rot3), false, result_bits[i],
+ FMT(this->annotation_prefix, " compute_bits_%zu", i)));
+ }
+
+ pack_result.reset(new packing_gadget<FieldT>(pb, result_bits, result, FMT(this->annotation_prefix, " pack_result")));
+}
+
+template<typename FieldT>
+void big_sigma_gadget<FieldT>::generate_r1cs_constraints()
+{
+ for (size_t i = 0; i < 32; ++i)
+ {
+ compute_bits[i]->generate_r1cs_constraints();
+ }
+
+ pack_result->generate_r1cs_constraints(false);
+}
+
+template<typename FieldT>
+void big_sigma_gadget<FieldT>::generate_r1cs_witness()
+{
+ for (size_t i = 0; i < 32; ++i)
+ {
+ compute_bits[i]->generate_r1cs_witness();
+ }
+
+ pack_result->generate_r1cs_witness_from_bits();
+}
+
+/* Page 10 of http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf */
+template<typename FieldT>
+choice_gadget<FieldT>::choice_gadget(protoboard<FieldT> &pb,
+ const pb_linear_combination_array<FieldT> &X,
+ const pb_linear_combination_array<FieldT> &Y,
+ const pb_linear_combination_array<FieldT> &Z,
+ const pb_variable<FieldT> &result, const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix),
+ X(X),
+ Y(Y),
+ Z(Z),
+ result(result)
+{
+ result_bits.allocate(pb, 32, FMT(this->annotation_prefix, " result_bits"));
+ pack_result.reset(new packing_gadget<FieldT>(pb, result_bits, result, FMT(this->annotation_prefix, " result")));
+}
+
+template<typename FieldT>
+void choice_gadget<FieldT>::generate_r1cs_constraints()
+{
+ for (size_t i = 0; i < 32; ++i)
+ {
+ /*
+ result = x * y + (1-x) * z
+ result - z = x * (y - z)
+ */
+ this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(X[i], Y[i] - Z[i], result_bits[i] - Z[i]), FMT(this->annotation_prefix, " result_bits_%zu", i));
+ }
+ pack_result->generate_r1cs_constraints(false);
+}
+
+template<typename FieldT>
+void choice_gadget<FieldT>::generate_r1cs_witness()
+{
+ for (size_t i = 0; i < 32; ++i)
+ {
+ this->pb.val(result_bits[i]) = this->pb.lc_val(X[i]) * this->pb.lc_val(Y[i]) + (FieldT::one() - this->pb.lc_val(X[i])) * this->pb.lc_val(Z[i]);
+ }
+ pack_result->generate_r1cs_witness_from_bits();
+}
+
+/* Page 10 of http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf */
+template<typename FieldT>
+majority_gadget<FieldT>::majority_gadget(protoboard<FieldT> &pb,
+ const pb_linear_combination_array<FieldT> &X,
+ const pb_linear_combination_array<FieldT> &Y,
+ const pb_linear_combination_array<FieldT> &Z,
+ const pb_variable<FieldT> &result,
+ const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix),
+ X(X),
+ Y(Y),
+ Z(Z),
+ result(result)
+{
+ result_bits.allocate(pb, 32, FMT(this->annotation_prefix, " result_bits"));
+ pack_result.reset(new packing_gadget<FieldT>(pb, result_bits, result, FMT(this->annotation_prefix, " result")));
+}
+
+template<typename FieldT>
+void majority_gadget<FieldT>::generate_r1cs_constraints()
+{
+ for (size_t i = 0; i < 32; ++i)
+ {
+ /*
+ 2*result + aux = x + y + z
+ x, y, z, aux -- bits
+ aux = x + y + z - 2*result
+ */
+ generate_boolean_r1cs_constraint<FieldT>(this->pb, result_bits[i], FMT(this->annotation_prefix, " result_%zu", i));
+ this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(X[i] + Y[i] + Z[i] - 2 * result_bits[i],
+ 1 - (X[i] + Y[i] + Z[i] - 2 * result_bits[i]),
+ 0),
+ FMT(this->annotation_prefix, " result_bits_%zu", i));
+ }
+ pack_result->generate_r1cs_constraints(false);
+}
+
+template<typename FieldT>
+void majority_gadget<FieldT>::generate_r1cs_witness()
+{
+ for (size_t i = 0; i < 32; ++i)
+ {
+ const long v = (this->pb.lc_val(X[i]) + this->pb.lc_val(Y[i]) + this->pb.lc_val(Z[i])).as_ulong();
+ this->pb.val(result_bits[i]) = FieldT(v / 2);
+ }
+
+ pack_result->generate_r1cs_witness_from_bits();
+}
+
+} // libsnark
+
+#endif // SHA256_AUX_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for gadgets for the SHA256 message schedule and round function.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef SHA256_COMPONENTS_HPP_
+#define SHA256_COMPONENTS_HPP_
+
+#include "gadgetlib1/gadgets/basic_gadgets.hpp"
+#include "gadgetlib1/gadgets/hashes/hash_io.hpp"
+#include "gadgetlib1/gadgets/hashes/sha256/sha256_aux.hpp"
+
+namespace libsnark {
+
+const size_t SHA256_digest_size = 256;
+const size_t SHA256_block_size = 512;
+
+template<typename FieldT>
+pb_linear_combination_array<FieldT> SHA256_default_IV(protoboard<FieldT> &pb);
+
+template<typename FieldT>
+class sha256_message_schedule_gadget : public gadget<FieldT> {
+public:
+ std::vector<pb_variable_array<FieldT> > W_bits;
+ std::vector<std::shared_ptr<packing_gadget<FieldT> > > pack_W;
+
+ std::vector<pb_variable<FieldT> > sigma0;
+ std::vector<pb_variable<FieldT> > sigma1;
+ std::vector<std::shared_ptr<small_sigma_gadget<FieldT> > > compute_sigma0;
+ std::vector<std::shared_ptr<small_sigma_gadget<FieldT> > > compute_sigma1;
+ std::vector<pb_variable<FieldT> > unreduced_W;
+ std::vector<std::shared_ptr<lastbits_gadget<FieldT> > > mod_reduce_W;
+public:
+ pb_variable_array<FieldT> M;
+ pb_variable_array<FieldT> packed_W;
+ sha256_message_schedule_gadget(protoboard<FieldT> &pb,
+ const pb_variable_array<FieldT> &M,
+ const pb_variable_array<FieldT> &packed_W,
+ const std::string &annotation_prefix);
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness();
+};
+
+template<typename FieldT>
+class sha256_round_function_gadget : public gadget<FieldT> {
+public:
+ pb_variable<FieldT> sigma0;
+ pb_variable<FieldT> sigma1;
+ std::shared_ptr<big_sigma_gadget<FieldT> > compute_sigma0;
+ std::shared_ptr<big_sigma_gadget<FieldT> > compute_sigma1;
+ pb_variable<FieldT> choice;
+ pb_variable<FieldT> majority;
+ std::shared_ptr<choice_gadget<FieldT> > compute_choice;
+ std::shared_ptr<majority_gadget<FieldT> > compute_majority;
+ pb_variable<FieldT> packed_d;
+ std::shared_ptr<packing_gadget<FieldT> > pack_d;
+ pb_variable<FieldT> packed_h;
+ std::shared_ptr<packing_gadget<FieldT> > pack_h;
+ pb_variable<FieldT> unreduced_new_a;
+ pb_variable<FieldT> unreduced_new_e;
+ std::shared_ptr<lastbits_gadget<FieldT> > mod_reduce_new_a;
+ std::shared_ptr<lastbits_gadget<FieldT> > mod_reduce_new_e;
+ pb_variable<FieldT> packed_new_a;
+ pb_variable<FieldT> packed_new_e;
+public:
+ pb_linear_combination_array<FieldT> a;
+ pb_linear_combination_array<FieldT> b;
+ pb_linear_combination_array<FieldT> c;
+ pb_linear_combination_array<FieldT> d;
+ pb_linear_combination_array<FieldT> e;
+ pb_linear_combination_array<FieldT> f;
+ pb_linear_combination_array<FieldT> g;
+ pb_linear_combination_array<FieldT> h;
+ pb_variable<FieldT> W;
+ long K;
+ pb_linear_combination_array<FieldT> new_a;
+ pb_linear_combination_array<FieldT> new_e;
+
+ sha256_round_function_gadget(protoboard<FieldT> &pb,
+ const pb_linear_combination_array<FieldT> &a,
+ const pb_linear_combination_array<FieldT> &b,
+ const pb_linear_combination_array<FieldT> &c,
+ const pb_linear_combination_array<FieldT> &d,
+ const pb_linear_combination_array<FieldT> &e,
+ const pb_linear_combination_array<FieldT> &f,
+ const pb_linear_combination_array<FieldT> &g,
+ const pb_linear_combination_array<FieldT> &h,
+ const pb_variable<FieldT> &W,
+ const long &K,
+ const pb_linear_combination_array<FieldT> &new_a,
+ const pb_linear_combination_array<FieldT> &new_e,
+ const std::string &annotation_prefix);
+
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness();
+};
+
+} // libsnark
+
+#include "gadgetlib1/gadgets/hashes/sha256/sha256_components.tcc"
+
+#endif // SHA256_COMPONENTS_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of interfaces for gadgets for the SHA256 message schedule and round function.
+
+ See sha256_components.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef SHA256_COMPONENTS_TCC_
+#define SHA256_COMPONENTS_TCC_
+
+namespace libsnark {
+
+const unsigned long SHA256_K[64] = {
+ 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
+ 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
+ 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
+ 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
+ 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
+ 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
+ 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
+ 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
+};
+
+const unsigned long SHA256_H[8] = {
+ 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19
+};
+
+template<typename FieldT>
+pb_linear_combination_array<FieldT> SHA256_default_IV(protoboard<FieldT> &pb)
+{
+ pb_linear_combination_array<FieldT> result;
+ result.reserve(SHA256_digest_size);
+
+ for (size_t i = 0; i < SHA256_digest_size; ++i)
+ {
+ int iv_val = (SHA256_H[i / 32] >> (31-(i % 32))) & 1;
+
+ pb_linear_combination<FieldT> iv_element;
+ iv_element.assign(pb, iv_val * ONE);
+ iv_element.evaluate(pb);
+
+ result.emplace_back(iv_element);
+ }
+
+ return result;
+}
+
+template<typename FieldT>
+sha256_message_schedule_gadget<FieldT>::sha256_message_schedule_gadget(protoboard<FieldT> &pb,
+ const pb_variable_array<FieldT> &M,
+ const pb_variable_array<FieldT> &packed_W,
+ const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix),
+ M(M),
+ packed_W(packed_W)
+{
+ W_bits.resize(64);
+
+ pack_W.resize(16);
+ for (size_t i = 0; i < 16; ++i)
+ {
+ W_bits[i] = pb_variable_array<FieldT>(M.rbegin() + (15-i) * 32, M.rbegin() + (16-i) * 32);
+ pack_W[i].reset(new packing_gadget<FieldT>(pb, W_bits[i], packed_W[i], FMT(this->annotation_prefix, " pack_W_%zu", i)));
+ }
+
+ /* NB: some of those will be un-allocated */
+ sigma0.resize(64);
+ sigma1.resize(64);
+ compute_sigma0.resize(64);
+ compute_sigma1.resize(64);
+ unreduced_W.resize(64);
+ mod_reduce_W.resize(64);
+
+ for (size_t i = 16; i < 64; ++i)
+ {
+ /* allocate result variables for sigma0/sigma1 invocations */
+ sigma0[i].allocate(pb, FMT(this->annotation_prefix, " sigma0_%zu", i));
+ sigma1[i].allocate(pb, FMT(this->annotation_prefix, " sigma1_%zu", i));
+
+ /* compute sigma0/sigma1 */
+ compute_sigma0[i].reset(new small_sigma_gadget<FieldT>(pb, W_bits[i-15], sigma0[i], 7, 18, 3, FMT(this->annotation_prefix, " compute_sigma0_%zu", i)));
+ compute_sigma1[i].reset(new small_sigma_gadget<FieldT>(pb, W_bits[i-2], sigma1[i], 17, 19, 10, FMT(this->annotation_prefix, " compute_sigma1_%zu", i)));
+
+ /* unreduced_W = sigma0(W_{i-15}) + sigma1(W_{i-2}) + W_{i-7} + W_{i-16} before modulo 2^32 */
+ unreduced_W[i].allocate(pb, FMT(this->annotation_prefix, "unreduced_W_%zu", i));
+
+ /* allocate the bit representation of packed_W[i] */
+ W_bits[i].allocate(pb, 32, FMT(this->annotation_prefix, " W_bits_%zu", i));
+
+ /* and finally reduce this into packed and bit representations */
+ mod_reduce_W[i].reset(new lastbits_gadget<FieldT>(pb, unreduced_W[i], 32+2, packed_W[i], W_bits[i], FMT(this->annotation_prefix, " mod_reduce_W_%zu", i)));
+ }
+}
+
+template<typename FieldT>
+void sha256_message_schedule_gadget<FieldT>::generate_r1cs_constraints()
+{
+ for (size_t i = 0; i < 16; ++i)
+ {
+ pack_W[i]->generate_r1cs_constraints(false); // do not enforce bitness here; caller be aware.
+ }
+
+ for (size_t i = 16; i < 64; ++i)
+ {
+ compute_sigma0[i]->generate_r1cs_constraints();
+ compute_sigma1[i]->generate_r1cs_constraints();
+
+ this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(1,
+ sigma0[i] + sigma1[i] + packed_W[i-16] + packed_W[i-7],
+ unreduced_W[i]),
+ FMT(this->annotation_prefix, " unreduced_W_%zu", i));
+
+ mod_reduce_W[i]->generate_r1cs_constraints();
+ }
+}
+
+template<typename FieldT>
+void sha256_message_schedule_gadget<FieldT>::generate_r1cs_witness()
+{
+ for (size_t i = 0; i < 16; ++i)
+ {
+ pack_W[i]->generate_r1cs_witness_from_bits();
+ }
+
+ for (size_t i = 16; i < 64; ++i)
+ {
+ compute_sigma0[i]->generate_r1cs_witness();
+ compute_sigma1[i]->generate_r1cs_witness();
+
+ this->pb.val(unreduced_W[i]) = this->pb.val(sigma0[i]) + this->pb.val(sigma1[i]) + this->pb.val(packed_W[i-16]) + this->pb.val(packed_W[i-7]);
+ mod_reduce_W[i]->generate_r1cs_witness();
+ }
+}
+
+template<typename FieldT>
+sha256_round_function_gadget<FieldT>::sha256_round_function_gadget(protoboard<FieldT> &pb,
+ const pb_linear_combination_array<FieldT> &a,
+ const pb_linear_combination_array<FieldT> &b,
+ const pb_linear_combination_array<FieldT> &c,
+ const pb_linear_combination_array<FieldT> &d,
+ const pb_linear_combination_array<FieldT> &e,
+ const pb_linear_combination_array<FieldT> &f,
+ const pb_linear_combination_array<FieldT> &g,
+ const pb_linear_combination_array<FieldT> &h,
+ const pb_variable<FieldT> &W,
+ const long &K,
+ const pb_linear_combination_array<FieldT> &new_a,
+ const pb_linear_combination_array<FieldT> &new_e,
+ const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix),
+ a(a),
+ b(b),
+ c(c),
+ d(d),
+ e(e),
+ f(f),
+ g(g),
+ h(h),
+ W(W),
+ K(K),
+ new_a(new_a),
+ new_e(new_e)
+{
+ /* compute sigma0 and sigma1 */
+ sigma0.allocate(pb, FMT(this->annotation_prefix, " sigma0"));
+ sigma1.allocate(pb, FMT(this->annotation_prefix, " sigma1"));
+ compute_sigma0.reset(new big_sigma_gadget<FieldT>(pb, a, sigma0, 2, 13, 22, FMT(this->annotation_prefix, " compute_sigma0")));
+ compute_sigma1.reset(new big_sigma_gadget<FieldT>(pb, e, sigma1, 6, 11, 25, FMT(this->annotation_prefix, " compute_sigma1")));
+
+ /* compute choice */
+ choice.allocate(pb, FMT(this->annotation_prefix, " choice"));
+ compute_choice.reset(new choice_gadget<FieldT>(pb, e, f, g, choice, FMT(this->annotation_prefix, " compute_choice")));
+
+ /* compute majority */
+ majority.allocate(pb, FMT(this->annotation_prefix, " majority"));
+ compute_majority.reset(new majority_gadget<FieldT>(pb, a, b, c, majority, FMT(this->annotation_prefix, " compute_majority")));
+
+ /* pack d */
+ packed_d.allocate(pb, FMT(this->annotation_prefix, " packed_d"));
+ pack_d.reset(new packing_gadget<FieldT>(pb, d, packed_d, FMT(this->annotation_prefix, " pack_d")));
+
+ /* pack h */
+ packed_h.allocate(pb, FMT(this->annotation_prefix, " packed_h"));
+ pack_h.reset(new packing_gadget<FieldT>(pb, h, packed_h, FMT(this->annotation_prefix, " pack_h")));
+
+ /* compute the actual results for the round */
+ unreduced_new_a.allocate(pb, FMT(this->annotation_prefix, " unreduced_new_a"));
+ unreduced_new_e.allocate(pb, FMT(this->annotation_prefix, " unreduced_new_e"));
+
+ packed_new_a.allocate(pb, FMT(this->annotation_prefix, " packed_new_a"));
+ packed_new_e.allocate(pb, FMT(this->annotation_prefix, " packed_new_e"));
+
+ mod_reduce_new_a.reset(new lastbits_gadget<FieldT>(pb, unreduced_new_a, 32+3, packed_new_a, new_a, FMT(this->annotation_prefix, " mod_reduce_new_a")));
+ mod_reduce_new_e.reset(new lastbits_gadget<FieldT>(pb, unreduced_new_e, 32+3, packed_new_e, new_e, FMT(this->annotation_prefix, " mod_reduce_new_e")));
+}
+
+template<typename FieldT>
+void sha256_round_function_gadget<FieldT>::generate_r1cs_constraints()
+{
+ compute_sigma0->generate_r1cs_constraints();
+ compute_sigma1->generate_r1cs_constraints();
+
+ compute_choice->generate_r1cs_constraints();
+ compute_majority->generate_r1cs_constraints();
+
+ pack_d->generate_r1cs_constraints(false);
+ pack_h->generate_r1cs_constraints(false);
+
+ this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(1,
+ packed_h + sigma1 + choice + K + W + sigma0 + majority,
+ unreduced_new_a),
+ FMT(this->annotation_prefix, " unreduced_new_a"));
+
+ this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(1,
+ packed_d + packed_h + sigma1 + choice + K + W,
+ unreduced_new_e),
+ FMT(this->annotation_prefix, " unreduced_new_e"));
+
+ mod_reduce_new_a->generate_r1cs_constraints();
+ mod_reduce_new_e->generate_r1cs_constraints();
+}
+
+template<typename FieldT>
+void sha256_round_function_gadget<FieldT>::generate_r1cs_witness()
+{
+ compute_sigma0->generate_r1cs_witness();
+ compute_sigma1->generate_r1cs_witness();
+
+ compute_choice->generate_r1cs_witness();
+ compute_majority->generate_r1cs_witness();
+
+ pack_d->generate_r1cs_witness_from_bits();
+ pack_h->generate_r1cs_witness_from_bits();
+
+ this->pb.val(unreduced_new_a) = this->pb.val(packed_h) + this->pb.val(sigma1) + this->pb.val(choice) + FieldT(K) + this->pb.val(W) + this->pb.val(sigma0) + this->pb.val(majority);
+ this->pb.val(unreduced_new_e) = this->pb.val(packed_d) + this->pb.val(packed_h) + this->pb.val(sigma1) + this->pb.val(choice) + FieldT(K) + this->pb.val(W);
+
+ mod_reduce_new_a->generate_r1cs_witness();
+ mod_reduce_new_e->generate_r1cs_witness();
+}
+
+} // libsnark
+
+#endif // SHA256_COMPONENTS_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for top-level SHA256 gadgets.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef SHA256_GADGET_HPP_
+#define SHA256_GADGET_HPP_
+
+#include "common/data_structures/merkle_tree.hpp"
+#include "gadgetlib1/gadgets/basic_gadgets.hpp"
+#include "gadgetlib1/gadgets/hashes/hash_io.hpp"
+#include "gadgetlib1/gadgets/hashes/sha256/sha256_components.hpp"
+
+namespace libsnark {
+
+/**
+ * Gadget for the SHA256 compression function.
+ */
+template<typename FieldT>
+class sha256_compression_function_gadget : public gadget<FieldT> {
+public:
+ std::vector<pb_linear_combination_array<FieldT> > round_a;
+ std::vector<pb_linear_combination_array<FieldT> > round_b;
+ std::vector<pb_linear_combination_array<FieldT> > round_c;
+ std::vector<pb_linear_combination_array<FieldT> > round_d;
+ std::vector<pb_linear_combination_array<FieldT> > round_e;
+ std::vector<pb_linear_combination_array<FieldT> > round_f;
+ std::vector<pb_linear_combination_array<FieldT> > round_g;
+ std::vector<pb_linear_combination_array<FieldT> > round_h;
+
+ pb_variable_array<FieldT> packed_W;
+ std::shared_ptr<sha256_message_schedule_gadget<FieldT> > message_schedule;
+ std::vector<sha256_round_function_gadget<FieldT> > round_functions;
+
+ pb_variable_array<FieldT> unreduced_output;
+ pb_variable_array<FieldT> reduced_output;
+ std::vector<lastbits_gadget<FieldT> > reduce_output;
+public:
+ pb_linear_combination_array<FieldT> prev_output;
+ pb_variable_array<FieldT> new_block;
+ digest_variable<FieldT> output;
+
+ sha256_compression_function_gadget(protoboard<FieldT> &pb,
+ const pb_linear_combination_array<FieldT> &prev_output,
+ const pb_variable_array<FieldT> &new_block,
+ const digest_variable<FieldT> &output,
+ const std::string &annotation_prefix);
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness();
+};
+
+/**
+ * Gadget for the SHA256 compression function, viewed as a 2-to-1 hash
+ * function, and using the same initialization vector as in SHA256
+ * specification. Thus, any collision for
+ * sha256_two_to_one_hash_gadget trivially extends to a collision for
+ * full SHA256 (by appending the same padding).
+ */
+template<typename FieldT>
+class sha256_two_to_one_hash_gadget : public gadget<FieldT> {
+public:
+ typedef bit_vector hash_value_type;
+ typedef merkle_authentication_path merkle_authentication_path_type;
+
+ std::shared_ptr<sha256_compression_function_gadget<FieldT> > f;
+
+ sha256_two_to_one_hash_gadget(protoboard<FieldT> &pb,
+ const digest_variable<FieldT> &left,
+ const digest_variable<FieldT> &right,
+ const digest_variable<FieldT> &output,
+ const std::string &annotation_prefix);
+ sha256_two_to_one_hash_gadget(protoboard<FieldT> &pb,
+ const size_t block_length,
+ const block_variable<FieldT> &input_block,
+ const digest_variable<FieldT> &output,
+ const std::string &annotation_prefix);
+
+ void generate_r1cs_constraints(const bool ensure_output_bitness=true); // TODO: ignored for now
+ void generate_r1cs_witness();
+
+ static size_t get_block_len();
+ static size_t get_digest_len();
+ static bit_vector get_hash(const bit_vector &input);
+
+ static size_t expected_constraints(const bool ensure_output_bitness=true); // TODO: ignored for now
+};
+
+} // libsnark
+
+#include "gadgetlib1/gadgets/hashes/sha256/sha256_gadget.tcc"
+
+#endif // SHA256_GADGET_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of interfaces for top-level SHA256 gadgets.
+
+ See sha256_gadget.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef SHA256_GADGET_TCC_
+#define SHA256_GADGET_TCC_
+
+namespace libsnark {
+
+template<typename FieldT>
+sha256_compression_function_gadget<FieldT>::sha256_compression_function_gadget(protoboard<FieldT> &pb,
+ const pb_linear_combination_array<FieldT> &prev_output,
+ const pb_variable_array<FieldT> &new_block,
+ const digest_variable<FieldT> &output,
+ const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix),
+ prev_output(prev_output),
+ new_block(new_block),
+ output(output)
+{
+ /* message schedule and inputs for it */
+ packed_W.allocate(pb, 64, FMT(this->annotation_prefix, " packed_W"));
+ message_schedule.reset(new sha256_message_schedule_gadget<FieldT>(pb, new_block, packed_W, FMT(this->annotation_prefix, " message_schedule")));
+
+ /* initalize */
+ round_a.push_back(pb_linear_combination_array<FieldT>(prev_output.rbegin() + 7*32, prev_output.rbegin() + 8*32));
+ round_b.push_back(pb_linear_combination_array<FieldT>(prev_output.rbegin() + 6*32, prev_output.rbegin() + 7*32));
+ round_c.push_back(pb_linear_combination_array<FieldT>(prev_output.rbegin() + 5*32, prev_output.rbegin() + 6*32));
+ round_d.push_back(pb_linear_combination_array<FieldT>(prev_output.rbegin() + 4*32, prev_output.rbegin() + 5*32));
+ round_e.push_back(pb_linear_combination_array<FieldT>(prev_output.rbegin() + 3*32, prev_output.rbegin() + 4*32));
+ round_f.push_back(pb_linear_combination_array<FieldT>(prev_output.rbegin() + 2*32, prev_output.rbegin() + 3*32));
+ round_g.push_back(pb_linear_combination_array<FieldT>(prev_output.rbegin() + 1*32, prev_output.rbegin() + 2*32));
+ round_h.push_back(pb_linear_combination_array<FieldT>(prev_output.rbegin() + 0*32, prev_output.rbegin() + 1*32));
+
+ /* do the rounds */
+ for (size_t i = 0; i < 64; ++i)
+ {
+ round_h.push_back(round_g[i]);
+ round_g.push_back(round_f[i]);
+ round_f.push_back(round_e[i]);
+ round_d.push_back(round_c[i]);
+ round_c.push_back(round_b[i]);
+ round_b.push_back(round_a[i]);
+
+ pb_variable_array<FieldT> new_round_a_variables;
+ new_round_a_variables.allocate(pb, 32, FMT(this->annotation_prefix, " new_round_a_variables_%zu", i+1));
+ round_a.emplace_back(new_round_a_variables);
+
+ pb_variable_array<FieldT> new_round_e_variables;
+ new_round_e_variables.allocate(pb, 32, FMT(this->annotation_prefix, " new_round_e_variables_%zu", i+1));
+ round_e.emplace_back(new_round_e_variables);
+
+ round_functions.push_back(sha256_round_function_gadget<FieldT>(pb,
+ round_a[i], round_b[i], round_c[i], round_d[i],
+ round_e[i], round_f[i], round_g[i], round_h[i],
+ packed_W[i], SHA256_K[i], round_a[i+1], round_e[i+1],
+ FMT(this->annotation_prefix, " round_functions_%zu", i)));
+ }
+
+ /* finalize */
+ unreduced_output.allocate(pb, 8, FMT(this->annotation_prefix, " unreduced_output"));
+ reduced_output.allocate(pb, 8, FMT(this->annotation_prefix, " reduced_output"));
+ for (size_t i = 0; i < 8; ++i)
+ {
+ reduce_output.push_back(lastbits_gadget<FieldT>(pb,
+ unreduced_output[i],
+ 32+1,
+ reduced_output[i],
+ pb_variable_array<FieldT>(output.bits.rbegin() + (7-i) * 32, output.bits.rbegin() + (8-i) * 32),
+ FMT(this->annotation_prefix, " reduce_output_%zu", i)));
+ }
+}
+
+template<typename FieldT>
+void sha256_compression_function_gadget<FieldT>::generate_r1cs_constraints()
+{
+ message_schedule->generate_r1cs_constraints();
+ for (size_t i = 0; i < 64; ++i)
+ {
+ round_functions[i].generate_r1cs_constraints();
+ }
+
+ for (size_t i = 0; i < 4; ++i)
+ {
+ this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(1,
+ round_functions[3-i].packed_d + round_functions[63-i].packed_new_a,
+ unreduced_output[i]),
+ FMT(this->annotation_prefix, " unreduced_output_%zu", i));
+
+ this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(1,
+ round_functions[3-i].packed_h + round_functions[63-i].packed_new_e,
+ unreduced_output[4+i]),
+ FMT(this->annotation_prefix, " unreduced_output_%zu", 4+i));
+ }
+
+ for (size_t i = 0; i < 8; ++i)
+ {
+ reduce_output[i].generate_r1cs_constraints();
+ }
+}
+
+template<typename FieldT>
+void sha256_compression_function_gadget<FieldT>::generate_r1cs_witness()
+{
+ message_schedule->generate_r1cs_witness();
+
+#ifdef DEBUG
+ printf("Input:\n");
+ for (size_t j = 0; j < 16; ++j)
+ {
+ printf("%lx ", this->pb.val(packed_W[j]).as_ulong());
+ }
+ printf("\n");
+#endif
+
+ for (size_t i = 0; i < 64; ++i)
+ {
+ round_functions[i].generate_r1cs_witness();
+ }
+
+ for (size_t i = 0; i < 4; ++i)
+ {
+ this->pb.val(unreduced_output[i]) = this->pb.val(round_functions[3-i].packed_d) + this->pb.val(round_functions[63-i].packed_new_a);
+ this->pb.val(unreduced_output[4+i]) = this->pb.val(round_functions[3-i].packed_h) + this->pb.val(round_functions[63-i].packed_new_e);
+ }
+
+ for (size_t i = 0; i < 8; ++i)
+ {
+ reduce_output[i].generate_r1cs_witness();
+ }
+
+#ifdef DEBUG
+ printf("Output:\n");
+ for (size_t j = 0; j < 8; ++j)
+ {
+ printf("%lx ", this->pb.val(reduced_output[j]).as_ulong());
+ }
+ printf("\n");
+#endif
+}
+
+template<typename FieldT>
+sha256_two_to_one_hash_gadget<FieldT>::sha256_two_to_one_hash_gadget(protoboard<FieldT> &pb,
+ const digest_variable<FieldT> &left,
+ const digest_variable<FieldT> &right,
+ const digest_variable<FieldT> &output,
+ const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix)
+{
+ /* concatenate block = left || right */
+ pb_variable_array<FieldT> block;
+ block.insert(block.end(), left.bits.begin(), left.bits.end());
+ block.insert(block.end(), right.bits.begin(), right.bits.end());
+
+ /* compute the hash itself */
+ f.reset(new sha256_compression_function_gadget<FieldT>(pb, SHA256_default_IV<FieldT>(pb), block, output, FMT(this->annotation_prefix, " f")));
+}
+
+template<typename FieldT>
+sha256_two_to_one_hash_gadget<FieldT>::sha256_two_to_one_hash_gadget(protoboard<FieldT> &pb,
+ const size_t block_length,
+ const block_variable<FieldT> &input_block,
+ const digest_variable<FieldT> &output,
+ const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix)
+{
+ assert(block_length == SHA256_block_size);
+ assert(input_block.bits.size() == block_length);
+ f.reset(new sha256_compression_function_gadget<FieldT>(pb, SHA256_default_IV<FieldT>(pb), input_block.bits, output, FMT(this->annotation_prefix, " f")));
+}
+
+template<typename FieldT>
+void sha256_two_to_one_hash_gadget<FieldT>::generate_r1cs_constraints(const bool ensure_output_bitness)
+{
+ UNUSED(ensure_output_bitness);
+ f->generate_r1cs_constraints();
+}
+
+template<typename FieldT>
+void sha256_two_to_one_hash_gadget<FieldT>::generate_r1cs_witness()
+{
+ f->generate_r1cs_witness();
+}
+
+template<typename FieldT>
+size_t sha256_two_to_one_hash_gadget<FieldT>::get_block_len()
+{
+ return SHA256_block_size;
+}
+
+template<typename FieldT>
+size_t sha256_two_to_one_hash_gadget<FieldT>::get_digest_len()
+{
+ return SHA256_digest_size;
+}
+
+template<typename FieldT>
+bit_vector sha256_two_to_one_hash_gadget<FieldT>::get_hash(const bit_vector &input)
+{
+ protoboard<FieldT> pb;
+
+ block_variable<FieldT> input_variable(pb, SHA256_block_size, "input");
+ digest_variable<FieldT> output_variable(pb, SHA256_digest_size, "output");
+ sha256_two_to_one_hash_gadget<FieldT> f(pb, SHA256_block_size, input_variable, output_variable, "f");
+
+ input_variable.generate_r1cs_witness(input);
+ f.generate_r1cs_witness();
+
+ return output_variable.get_digest();
+}
+
+template<typename FieldT>
+size_t sha256_two_to_one_hash_gadget<FieldT>::expected_constraints(const bool ensure_output_bitness)
+{
+ UNUSED(ensure_output_bitness);
+ return 27280; /* hardcoded for now */
+}
+
+} // libsnark
+
+#endif // SHA256_GADGET_TCC_
--- /dev/null
+#!/usr/bin/env python
+##
+# @author This file is part of libsnark, developed by SCIPR Lab
+# and contributors (see AUTHORS).
+# @copyright MIT license (see LICENSE file)
+
+import random
+import pypy_sha256 # PyPy's implementation of SHA256 compression function; see copyright and authorship notice within.
+
+BLOCK_LEN = 512
+BLOCK_BYTES = BLOCK_LEN // 8
+HASH_LEN = 256
+HASH_BYTES = HASH_LEN // 8
+
+def gen_random_bytes(n):
+ return [random.randint(0, 255) for i in xrange(n)]
+
+def words_to_bytes(arr):
+ return sum(([x >> 24, (x >> 16) & 0xff, (x >> 8) & 0xff, x & 0xff] for x in arr), [])
+
+def bytes_to_words(arr):
+ l = len(arr)
+ assert l % 4 == 0
+ return [(arr[i*4 + 3] << 24) + (arr[i*4+2] << 16) + (arr[i*4+1] << 8) + arr[i*4] for i in xrange(l//4)]
+
+def cpp_val(s, log_radix=32):
+ if log_radix == 8:
+ hexfmt = '0x%02x'
+ elif log_radix == 32:
+ hexfmt = '0x%08x'
+ s = bytes_to_words(s)
+ else:
+ raise
+ return 'int_list_to_bits({%s}, %d)' % (', '.join(hexfmt % x for x in s), log_radix)
+
+def H_bytes(x):
+ assert len(x) == BLOCK_BYTES
+ state = pypy_sha256.sha_init()
+ state['data'] = words_to_bytes(bytes_to_words(x))
+ pypy_sha256.sha_transform(state)
+ return words_to_bytes(bytes_to_words(words_to_bytes(state['digest'])))
+
+def generate_sha256_gadget_tests():
+ left = gen_random_bytes(HASH_BYTES)
+ right = gen_random_bytes(HASH_BYTES)
+ hash = H_bytes(left + right)
+
+ print "const bit_vector left_bv = %s;" % cpp_val(left)
+ print "const bit_vector right_bv = %s;" % cpp_val(right)
+ print "const bit_vector hash_bv = %s;" % cpp_val(hash)
+
+if __name__ == '__main__':
+ random.seed(0) # for reproducibility
+ generate_sha256_gadget_tests()
+
--- /dev/null
+#!/usr/bin/env python
+#
+# SHA256 compression function implementation below is a verbatim copy of PyPy's implementation from
+# https://bitbucket.org/pypy/pypy/raw/f1f064b3faf1e012f7a9a9ab08f18074637ebe8a/lib_pypy/_sha256.py .
+#
+# It is licensed under the MIT license and copyright PyPy Copyright holders 2003-2015
+# See https://bitbucket.org/pypy/pypy/src/tip/LICENSE for the full copyright notice.
+#
+
+SHA_BLOCKSIZE = 64
+SHA_DIGESTSIZE = 32
+
+
+def new_shaobject():
+ return {
+ 'digest': [0]*8,
+ 'count_lo': 0,
+ 'count_hi': 0,
+ 'data': [0]* SHA_BLOCKSIZE,
+ 'local': 0,
+ 'digestsize': 0
+ }
+
+ROR = lambda x, y: (((x & 0xffffffff) >> (y & 31)) | (x << (32 - (y & 31)))) & 0xffffffff
+Ch = lambda x, y, z: (z ^ (x & (y ^ z)))
+Maj = lambda x, y, z: (((x | y) & z) | (x & y))
+S = lambda x, n: ROR(x, n)
+R = lambda x, n: (x & 0xffffffff) >> n
+Sigma0 = lambda x: (S(x, 2) ^ S(x, 13) ^ S(x, 22))
+Sigma1 = lambda x: (S(x, 6) ^ S(x, 11) ^ S(x, 25))
+Gamma0 = lambda x: (S(x, 7) ^ S(x, 18) ^ R(x, 3))
+Gamma1 = lambda x: (S(x, 17) ^ S(x, 19) ^ R(x, 10))
+
+def sha_transform(sha_info):
+ W = []
+
+ d = sha_info['data']
+ for i in range(0,16):
+ W.append( (d[4*i]<<24) + (d[4*i+1]<<16) + (d[4*i+2]<<8) + d[4*i+3])
+
+ for i in range(16,64):
+ W.append( (Gamma1(W[i - 2]) + W[i - 7] + Gamma0(W[i - 15]) + W[i - 16]) & 0xffffffff )
+
+ ss = sha_info['digest'][:]
+
+ def RND(a,b,c,d,e,f,g,h,i,ki):
+ t0 = h + Sigma1(e) + Ch(e, f, g) + ki + W[i];
+ t1 = Sigma0(a) + Maj(a, b, c);
+ d += t0;
+ h = t0 + t1;
+ return d & 0xffffffff, h & 0xffffffff
+
+ ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],0,0x428a2f98);
+ ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],1,0x71374491);
+ ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],2,0xb5c0fbcf);
+ ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],3,0xe9b5dba5);
+ ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],4,0x3956c25b);
+ ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],5,0x59f111f1);
+ ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],6,0x923f82a4);
+ ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],7,0xab1c5ed5);
+ ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],8,0xd807aa98);
+ ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],9,0x12835b01);
+ ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],10,0x243185be);
+ ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],11,0x550c7dc3);
+ ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],12,0x72be5d74);
+ ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],13,0x80deb1fe);
+ ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],14,0x9bdc06a7);
+ ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],15,0xc19bf174);
+ ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],16,0xe49b69c1);
+ ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],17,0xefbe4786);
+ ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],18,0x0fc19dc6);
+ ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],19,0x240ca1cc);
+ ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],20,0x2de92c6f);
+ ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],21,0x4a7484aa);
+ ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],22,0x5cb0a9dc);
+ ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],23,0x76f988da);
+ ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],24,0x983e5152);
+ ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],25,0xa831c66d);
+ ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],26,0xb00327c8);
+ ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],27,0xbf597fc7);
+ ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],28,0xc6e00bf3);
+ ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],29,0xd5a79147);
+ ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],30,0x06ca6351);
+ ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],31,0x14292967);
+ ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],32,0x27b70a85);
+ ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],33,0x2e1b2138);
+ ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],34,0x4d2c6dfc);
+ ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],35,0x53380d13);
+ ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],36,0x650a7354);
+ ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],37,0x766a0abb);
+ ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],38,0x81c2c92e);
+ ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],39,0x92722c85);
+ ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],40,0xa2bfe8a1);
+ ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],41,0xa81a664b);
+ ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],42,0xc24b8b70);
+ ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],43,0xc76c51a3);
+ ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],44,0xd192e819);
+ ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],45,0xd6990624);
+ ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],46,0xf40e3585);
+ ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],47,0x106aa070);
+ ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],48,0x19a4c116);
+ ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],49,0x1e376c08);
+ ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],50,0x2748774c);
+ ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],51,0x34b0bcb5);
+ ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],52,0x391c0cb3);
+ ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],53,0x4ed8aa4a);
+ ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],54,0x5b9cca4f);
+ ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],55,0x682e6ff3);
+ ss[3], ss[7] = RND(ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],56,0x748f82ee);
+ ss[2], ss[6] = RND(ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],57,0x78a5636f);
+ ss[1], ss[5] = RND(ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],ss[5],58,0x84c87814);
+ ss[0], ss[4] = RND(ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],ss[4],59,0x8cc70208);
+ ss[7], ss[3] = RND(ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],ss[3],60,0x90befffa);
+ ss[6], ss[2] = RND(ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],ss[2],61,0xa4506ceb);
+ ss[5], ss[1] = RND(ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],ss[1],62,0xbef9a3f7);
+ ss[4], ss[0] = RND(ss[1],ss[2],ss[3],ss[4],ss[5],ss[6],ss[7],ss[0],63,0xc67178f2);
+
+ dig = []
+ for i, x in enumerate(sha_info['digest']):
+ dig.append( (x + ss[i]) & 0xffffffff )
+ sha_info['digest'] = dig
+
+def sha_init():
+ sha_info = new_shaobject()
+ sha_info['digest'] = [0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19]
+ sha_info['count_lo'] = 0
+ sha_info['count_hi'] = 0
+ sha_info['local'] = 0
+ sha_info['digestsize'] = 32
+ return sha_info
+
+def sha224_init():
+ sha_info = new_shaobject()
+ sha_info['digest'] = [0xc1059ed8, 0x367cd507, 0x3070dd17, 0xf70e5939, 0xffc00b31, 0x68581511, 0x64f98fa7, 0xbefa4fa4]
+ sha_info['count_lo'] = 0
+ sha_info['count_hi'] = 0
+ sha_info['local'] = 0
+ sha_info['digestsize'] = 28
+ return sha_info
+
+def sha_update(sha_info, buffer):
+ if isinstance(buffer, str):
+ raise TypeError("Unicode strings must be encoded before hashing")
+ count = len(buffer)
+ buffer_idx = 0
+ clo = (sha_info['count_lo'] + (count << 3)) & 0xffffffff
+ if clo < sha_info['count_lo']:
+ sha_info['count_hi'] += 1
+ sha_info['count_lo'] = clo
+
+ sha_info['count_hi'] += (count >> 29)
+
+ if sha_info['local']:
+ i = SHA_BLOCKSIZE - sha_info['local']
+ if i > count:
+ i = count
+
+ # copy buffer
+ sha_info['data'][sha_info['local']:sha_info['local']+i] = buffer[buffer_idx:buffer_idx+i]
+
+ count -= i
+ buffer_idx += i
+
+ sha_info['local'] += i
+ if sha_info['local'] == SHA_BLOCKSIZE:
+ sha_transform(sha_info)
+ sha_info['local'] = 0
+ else:
+ return
+
+ while count >= SHA_BLOCKSIZE:
+ # copy buffer
+ sha_info['data'] = list(buffer[buffer_idx:buffer_idx + SHA_BLOCKSIZE])
+ count -= SHA_BLOCKSIZE
+ buffer_idx += SHA_BLOCKSIZE
+ sha_transform(sha_info)
+
+
+ # copy buffer
+ pos = sha_info['local']
+ sha_info['data'][pos:pos+count] = buffer[buffer_idx:buffer_idx + count]
+ sha_info['local'] = count
+
+def sha_final(sha_info):
+ lo_bit_count = sha_info['count_lo']
+ hi_bit_count = sha_info['count_hi']
+ count = (lo_bit_count >> 3) & 0x3f
+ sha_info['data'][count] = 0x80;
+ count += 1
+ if count > SHA_BLOCKSIZE - 8:
+ # zero the bytes in data after the count
+ sha_info['data'] = sha_info['data'][:count] + ([0] * (SHA_BLOCKSIZE - count))
+ sha_transform(sha_info)
+ # zero bytes in data
+ sha_info['data'] = [0] * SHA_BLOCKSIZE
+ else:
+ sha_info['data'] = sha_info['data'][:count] + ([0] * (SHA_BLOCKSIZE - count))
+
+ sha_info['data'][56] = (hi_bit_count >> 24) & 0xff
+ sha_info['data'][57] = (hi_bit_count >> 16) & 0xff
+ sha_info['data'][58] = (hi_bit_count >> 8) & 0xff
+ sha_info['data'][59] = (hi_bit_count >> 0) & 0xff
+ sha_info['data'][60] = (lo_bit_count >> 24) & 0xff
+ sha_info['data'][61] = (lo_bit_count >> 16) & 0xff
+ sha_info['data'][62] = (lo_bit_count >> 8) & 0xff
+ sha_info['data'][63] = (lo_bit_count >> 0) & 0xff
+
+ sha_transform(sha_info)
+
+ dig = []
+ for i in sha_info['digest']:
+ dig.extend([ ((i>>24) & 0xff), ((i>>16) & 0xff), ((i>>8) & 0xff), (i & 0xff) ])
+ return ''.join([chr(i) for i in dig])
+
+class sha256(object):
+ digest_size = digestsize = SHA_DIGESTSIZE
+ block_size = SHA_BLOCKSIZE
+
+ def __init__(self, s=None):
+ self._sha = sha_init()
+ if s:
+ sha_update(self._sha, s)
+
+ def update(self, s):
+ sha_update(self._sha, s)
+
+ def digest(self):
+ return sha_final(self._sha.copy())[:self._sha['digestsize']]
+
+ def hexdigest(self):
+ return ''.join(['%.2x' % ord(i) for i in self.digest()])
+
+ def copy(self):
+ new = sha256.__new__(sha256)
+ new._sha = self._sha.copy()
+ return new
+
+class sha224(sha256):
+ digest_size = digestsize = 28
+
+ def __init__(self, s=None):
+ self._sha = sha224_init()
+ if s:
+ sha_update(self._sha, s)
+
+ def copy(self):
+ new = sha224.__new__(sha224)
+ new._sha = self._sha.copy()
+ return new
+
+def test():
+ a_str = "just a test string"
+
+ assert 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' == sha256().hexdigest()
+ assert 'd7b553c6f09ac85d142415f857c5310f3bbbe7cdd787cce4b985acedd585266f' == sha256(a_str).hexdigest()
+ assert '8113ebf33c97daa9998762aacafe750c7cefc2b2f173c90c59663a57fe626f21' == sha256(a_str*7).hexdigest()
+
+ s = sha256(a_str)
+ s.update(a_str)
+ assert '03d9963e05a094593190b6fc794cb1a3e1ac7d7883f0b5855268afeccc70d461' == s.hexdigest()
+
+if __name__ == "__main__":
+ test()
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#include "common/default_types/ec_pp.hpp"
+#include "common/utils.hpp"
+#include "common/profiling.hpp"
+#include "gadgetlib1/gadgets/hashes/sha256/sha256_gadget.hpp"
+
+using namespace libsnark;
+
+template<typename FieldT>
+void test_two_to_one()
+{
+ protoboard<FieldT> pb;
+
+ digest_variable<FieldT> left(pb, SHA256_digest_size, "left");
+ digest_variable<FieldT> right(pb, SHA256_digest_size, "right");
+ digest_variable<FieldT> output(pb, SHA256_digest_size, "output");
+
+ sha256_two_to_one_hash_gadget<FieldT> f(pb, left, right, output, "f");
+ f.generate_r1cs_constraints();
+ printf("Number of constraints for sha256_two_to_one_hash_gadget: %zu\n", pb.num_constraints());
+
+ const bit_vector left_bv = int_list_to_bits({0x426bc2d8, 0x4dc86782, 0x81e8957a, 0x409ec148, 0xe6cffbe8, 0xafe6ba4f, 0x9c6f1978, 0xdd7af7e9}, 32);
+ const bit_vector right_bv = int_list_to_bits({0x038cce42, 0xabd366b8, 0x3ede7e00, 0x9130de53, 0x72cdf73d, 0xee825114, 0x8cb48d1b, 0x9af68ad0}, 32);
+ const bit_vector hash_bv = int_list_to_bits({0xeffd0b7f, 0x1ccba116, 0x2ee816f7, 0x31c62b48, 0x59305141, 0x990e5c0a, 0xce40d33d, 0x0b1167d1}, 32);
+
+ left.generate_r1cs_witness(left_bv);
+ right.generate_r1cs_witness(right_bv);
+
+ f.generate_r1cs_witness();
+ output.generate_r1cs_witness(hash_bv);
+
+ assert(pb.is_satisfied());
+}
+
+int main(void)
+{
+ start_profiling();
+ default_ec_pp::init_public_params();
+ test_two_to_one<Fr<default_ec_pp> >();
+}
--- /dev/null
+/**
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef MERKLE_AUTHENTICATION_PATH_VARIABLE_HPP_
+#define MERKLE_AUTHENTICATION_PATH_VARIABLE_HPP_
+
+#include "common/data_structures/merkle_tree.hpp"
+#include "gadgetlib1/gadget.hpp"
+#include "gadgetlib1/gadgets/hashes/hash_io.hpp"
+
+namespace libsnark {
+
+template<typename FieldT, typename HashT>
+class merkle_authentication_path_variable : public gadget<FieldT> {
+public:
+
+ const size_t tree_depth;
+ std::vector<digest_variable<FieldT> > left_digests;
+ std::vector<digest_variable<FieldT> > right_digests;
+
+ merkle_authentication_path_variable(protoboard<FieldT> &pb,
+ const size_t tree_depth,
+ const std::string &annotation_prefix);
+
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness(const size_t address, const merkle_authentication_path &path);
+ merkle_authentication_path get_authentication_path(const size_t address) const;
+};
+
+} // libsnark
+
+#include "gadgetlib1/gadgets/merkle_tree/merkle_authentication_path_variable.tcc"
+
+#endif // MERKLE_AUTHENTICATION_PATH_VARIABLE_HPP
--- /dev/null
+/**
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef MERKLE_AUTHENTICATION_PATH_VARIABLE_TCC_
+#define MERKLE_AUTHENTICATION_PATH_VARIABLE_TCC_
+
+namespace libsnark {
+
+template<typename FieldT, typename HashT>
+merkle_authentication_path_variable<FieldT, HashT>::merkle_authentication_path_variable(protoboard<FieldT> &pb,
+ const size_t tree_depth,
+ const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix),
+ tree_depth(tree_depth)
+{
+ for (size_t i = 0; i < tree_depth; ++i)
+ {
+ left_digests.emplace_back(digest_variable<FieldT>(pb, HashT::get_digest_len(), FMT(annotation_prefix, " left_digests_%zu", i)));
+ right_digests.emplace_back(digest_variable<FieldT>(pb, HashT::get_digest_len(), FMT(annotation_prefix, " right_digests_%zu", i)));
+ }
+}
+
+template<typename FieldT, typename HashT>
+void merkle_authentication_path_variable<FieldT, HashT>::generate_r1cs_constraints()
+{
+ for (size_t i = 0; i < tree_depth; ++i)
+ {
+ left_digests[i].generate_r1cs_constraints();
+ right_digests[i].generate_r1cs_constraints();
+ }
+}
+
+template<typename FieldT, typename HashT>
+void merkle_authentication_path_variable<FieldT, HashT>::generate_r1cs_witness(const size_t address, const merkle_authentication_path &path)
+{
+ assert(path.size() == tree_depth);
+
+ for (size_t i = 0; i < tree_depth; ++i)
+ {
+ if (address & (1ul << (tree_depth-1-i)))
+ {
+ left_digests[i].generate_r1cs_witness(path[i]);
+ }
+ else
+ {
+ right_digests[i].generate_r1cs_witness(path[i]);
+ }
+ }
+}
+
+template<typename FieldT, typename HashT>
+merkle_authentication_path merkle_authentication_path_variable<FieldT, HashT>::get_authentication_path(const size_t address) const
+{
+ merkle_authentication_path result;
+ for (size_t i = 0; i < tree_depth; ++i)
+ {
+ if (address & (1ul << (tree_depth-1-i)))
+ {
+ result.emplace_back(left_digests[i].get_digest());
+ }
+ else
+ {
+ result.emplace_back(right_digests[i].get_digest());
+ }
+ }
+
+ return result;
+}
+
+} // libsnark
+
+#endif // MERKLE_AUTHENTICATION_PATH_VARIABLE_TCC
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for the Merkle tree check read gadget.
+
+ The gadget checks the following: given a root R, address A, value V, and
+ authentication path P, check that P is a valid authentication path for the
+ value V as the A-th leaf in a Merkle tree with root R.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef MERKLE_TREE_CHECK_READ_GADGET_HPP_
+#define MERKLE_TREE_CHECK_READ_GADGET_HPP_
+
+#include "common/data_structures/merkle_tree.hpp"
+#include "gadgetlib1/gadget.hpp"
+#include "gadgetlib1/gadgets/hashes/hash_io.hpp"
+#include "gadgetlib1/gadgets/hashes/digest_selector_gadget.hpp"
+#include "gadgetlib1/gadgets/merkle_tree/merkle_authentication_path_variable.hpp"
+
+namespace libsnark {
+
+template<typename FieldT, typename HashT>
+class merkle_tree_check_read_gadget : public gadget<FieldT> {
+private:
+
+ std::vector<HashT> hashers;
+ std::vector<block_variable<FieldT> > hasher_inputs;
+ std::vector<digest_selector_gadget<FieldT> > propagators;
+ std::vector<digest_variable<FieldT> > internal_output;
+
+ std::shared_ptr<digest_variable<FieldT> > computed_root;
+ std::shared_ptr<bit_vector_copy_gadget<FieldT> > check_root;
+
+public:
+
+ const size_t digest_size;
+ const size_t tree_depth;
+ pb_linear_combination_array<FieldT> address_bits;
+ digest_variable<FieldT> leaf;
+ digest_variable<FieldT> root;
+ merkle_authentication_path_variable<FieldT, HashT> path;
+ pb_linear_combination<FieldT> read_successful;
+
+ merkle_tree_check_read_gadget(protoboard<FieldT> &pb,
+ const size_t tree_depth,
+ const pb_linear_combination_array<FieldT> &address_bits,
+ const digest_variable<FieldT> &leaf_digest,
+ const digest_variable<FieldT> &root_digest,
+ const merkle_authentication_path_variable<FieldT, HashT> &path,
+ const pb_linear_combination<FieldT> &read_successful,
+ const std::string &annotation_prefix);
+
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness();
+
+ static size_t root_size_in_bits();
+ /* for debugging purposes */
+ static size_t expected_constraints(const size_t tree_depth);
+};
+
+template<typename FieldT, typename HashT>
+void test_merkle_tree_check_read_gadget();
+
+} // libsnark
+
+#include "gadgetlib1/gadgets/merkle_tree/merkle_tree_check_read_gadget.tcc"
+
+#endif // MERKLE_TREE_CHECK_READ_GADGET_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of interfaces for the Merkle tree check read.
+
+ See merkle_tree_check_read_gadget.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef MERKLE_TREE_CHECK_READ_GADGET_TCC_
+#define MERKLE_TREE_CHECK_READ_GADGET_TCC_
+
+namespace libsnark {
+
+template<typename FieldT, typename HashT>
+merkle_tree_check_read_gadget<FieldT, HashT>::merkle_tree_check_read_gadget(protoboard<FieldT> &pb,
+ const size_t tree_depth,
+ const pb_linear_combination_array<FieldT> &address_bits,
+ const digest_variable<FieldT> &leaf,
+ const digest_variable<FieldT> &root,
+ const merkle_authentication_path_variable<FieldT, HashT> &path,
+ const pb_linear_combination<FieldT> &read_successful,
+ const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix),
+ digest_size(HashT::get_digest_len()),
+ tree_depth(tree_depth),
+ address_bits(address_bits),
+ leaf(leaf),
+ root(root),
+ path(path),
+ read_successful(read_successful)
+{
+ /*
+ The tricky part here is ordering. For Merkle tree
+ authentication paths, path[0] corresponds to one layer below
+ the root (and path[tree_depth-1] corresponds to the layer
+ containing the leaf), while address_bits has the reverse order:
+ address_bits[0] is LSB, and corresponds to layer containing the
+ leaf, and address_bits[tree_depth-1] is MSB, and corresponds to
+ the subtree directly under the root.
+ */
+ assert(tree_depth > 0);
+ assert(tree_depth == address_bits.size());
+
+ for (size_t i = 0; i < tree_depth-1; ++i)
+ {
+ internal_output.emplace_back(digest_variable<FieldT>(pb, digest_size, FMT(this->annotation_prefix, " internal_output_%zu", i)));
+ }
+
+ computed_root.reset(new digest_variable<FieldT>(pb, digest_size, FMT(this->annotation_prefix, " computed_root")));
+
+ for (size_t i = 0; i < tree_depth; ++i)
+ {
+ block_variable<FieldT> inp(pb, path.left_digests[i], path.right_digests[i], FMT(this->annotation_prefix, " inp_%zu", i));
+ hasher_inputs.emplace_back(inp);
+ hashers.emplace_back(HashT(pb, 2*digest_size, inp, (i == 0 ? *computed_root : internal_output[i-1]),
+ FMT(this->annotation_prefix, " load_hashers_%zu", i)));
+ }
+
+ for (size_t i = 0; i < tree_depth; ++i)
+ {
+ /*
+ The propagators take a computed hash value (or leaf in the
+ base case) and propagate it one layer up, either in the left
+ or the right slot of authentication_path_variable.
+ */
+ propagators.emplace_back(digest_selector_gadget<FieldT>(pb, digest_size, i < tree_depth - 1 ? internal_output[i] : leaf,
+ address_bits[tree_depth-1-i], path.left_digests[i], path.right_digests[i],
+ FMT(this->annotation_prefix, " digest_selector_%zu", i)));
+ }
+
+ check_root.reset(new bit_vector_copy_gadget<FieldT>(pb, computed_root->bits, root.bits, read_successful, FieldT::capacity(), FMT(annotation_prefix, " check_root")));
+}
+
+template<typename FieldT, typename HashT>
+void merkle_tree_check_read_gadget<FieldT, HashT>::generate_r1cs_constraints()
+{
+ /* ensure correct hash computations */
+ for (size_t i = 0; i < tree_depth; ++i)
+ {
+ // Note that we check root outside and have enforced booleanity of path.left_digests/path.right_digests outside in path.generate_r1cs_constraints
+ hashers[i].generate_r1cs_constraints(false);
+ }
+
+ /* ensure consistency of path.left_digests/path.right_digests with internal_output */
+ for (size_t i = 0; i < tree_depth; ++i)
+ {
+ propagators[i].generate_r1cs_constraints();
+ }
+
+ check_root->generate_r1cs_constraints(false, false);
+}
+
+template<typename FieldT, typename HashT>
+void merkle_tree_check_read_gadget<FieldT, HashT>::generate_r1cs_witness()
+{
+ /* do the hash computations bottom-up */
+ for (int i = tree_depth-1; i >= 0; --i)
+ {
+ /* propagate previous input */
+ propagators[i].generate_r1cs_witness();
+
+ /* compute hash */
+ hashers[i].generate_r1cs_witness();
+ }
+
+ check_root->generate_r1cs_witness();
+}
+
+template<typename FieldT, typename HashT>
+size_t merkle_tree_check_read_gadget<FieldT, HashT>::root_size_in_bits()
+{
+ return HashT::get_digest_len();
+}
+
+template<typename FieldT, typename HashT>
+size_t merkle_tree_check_read_gadget<FieldT, HashT>::expected_constraints(const size_t tree_depth)
+{
+ /* NB: this includes path constraints */
+ const size_t hasher_constraints = tree_depth * HashT::expected_constraints(false);
+ const size_t propagator_constraints = tree_depth * HashT::get_digest_len();
+ const size_t authentication_path_constraints = 2 * tree_depth * HashT::get_digest_len();
+ const size_t check_root_constraints = 3 * div_ceil(HashT::get_digest_len(), FieldT::capacity());
+
+ return hasher_constraints + propagator_constraints + authentication_path_constraints + check_root_constraints;
+}
+
+template<typename FieldT, typename HashT>
+void test_merkle_tree_check_read_gadget()
+{
+ /* prepare test */
+ const size_t digest_len = HashT::get_digest_len();
+ const size_t tree_depth = 16;
+ std::vector<merkle_authentication_node> path(tree_depth);
+
+ bit_vector prev_hash(digest_len);
+ std::generate(prev_hash.begin(), prev_hash.end(), [&]() { return std::rand() % 2; });
+ bit_vector leaf = prev_hash;
+
+ bit_vector address_bits;
+
+ size_t address = 0;
+ for (long level = tree_depth-1; level >= 0; --level)
+ {
+ const bool computed_is_right = (std::rand() % 2);
+ address |= (computed_is_right ? 1ul << (tree_depth-1-level) : 0);
+ address_bits.push_back(computed_is_right);
+ bit_vector other(digest_len);
+ std::generate(other.begin(), other.end(), [&]() { return std::rand() % 2; });
+
+ bit_vector block = prev_hash;
+ block.insert(computed_is_right ? block.begin() : block.end(), other.begin(), other.end());
+ bit_vector h = HashT::get_hash(block);
+
+ path[level] = other;
+
+ prev_hash = h;
+ }
+ bit_vector root = prev_hash;
+
+ /* execute test */
+ protoboard<FieldT> pb;
+ pb_variable_array<FieldT> address_bits_va;
+ address_bits_va.allocate(pb, tree_depth, "address_bits");
+ digest_variable<FieldT> leaf_digest(pb, digest_len, "input_block");
+ digest_variable<FieldT> root_digest(pb, digest_len, "output_digest");
+ merkle_authentication_path_variable<FieldT, HashT> path_var(pb, tree_depth, "path_var");
+ merkle_tree_check_read_gadget<FieldT, HashT> ml(pb, tree_depth, address_bits_va, leaf_digest, root_digest, path_var, ONE, "ml");
+
+ path_var.generate_r1cs_constraints();
+ ml.generate_r1cs_constraints();
+
+ address_bits_va.fill_with_bits(pb, address_bits);
+ assert(address_bits_va.get_field_element_from_bits(pb).as_ulong() == address);
+ leaf_digest.generate_r1cs_witness(leaf);
+ path_var.generate_r1cs_witness(address, path);
+ ml.generate_r1cs_witness();
+
+ /* make sure that read checker didn't accidentally overwrite anything */
+ address_bits_va.fill_with_bits(pb, address_bits);
+ leaf_digest.generate_r1cs_witness(leaf);
+ root_digest.generate_r1cs_witness(root);
+ assert(pb.is_satisfied());
+
+ const size_t num_constraints = pb.num_constraints();
+ const size_t expected_constraints = merkle_tree_check_read_gadget<FieldT, HashT>::expected_constraints(tree_depth);
+ assert(num_constraints == expected_constraints);
+}
+
+} // libsnark
+
+#endif // MERKLE_TREE_CHECK_READ_GADGET_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for the Merkle tree check read gadget.
+
+ The gadget checks the following: given two roots R1 and R2, address A, two
+ values V1 and V2, and authentication path P, check that
+ - P is a valid authentication path for the value V1 as the A-th leaf in a Merkle tree with root R1, and
+ - P is a valid authentication path for the value V2 as the A-th leaf in a Merkle tree with root R2.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef MERKLE_TREE_CHECK_UPDATE_GADGET_HPP_
+#define MERKLE_TREE_CHECK_UPDATE_GADGET_HPP_
+
+#include "common/data_structures/merkle_tree.hpp"
+#include "gadgetlib1/gadget.hpp"
+#include "gadgetlib1/gadgets/hashes/crh_gadget.hpp"
+#include "gadgetlib1/gadgets/hashes/hash_io.hpp"
+#include "gadgetlib1/gadgets/hashes/digest_selector_gadget.hpp"
+#include "gadgetlib1/gadgets/merkle_tree/merkle_authentication_path_variable.hpp"
+
+namespace libsnark {
+
+template<typename FieldT, typename HashT>
+class merkle_tree_check_update_gadget : public gadget<FieldT> {
+private:
+
+ std::vector<HashT> prev_hashers;
+ std::vector<block_variable<FieldT> > prev_hasher_inputs;
+ std::vector<digest_selector_gadget<FieldT> > prev_propagators;
+ std::vector<digest_variable<FieldT> > prev_internal_output;
+
+ std::vector<HashT> next_hashers;
+ std::vector<block_variable<FieldT> > next_hasher_inputs;
+ std::vector<digest_selector_gadget<FieldT> > next_propagators;
+ std::vector<digest_variable<FieldT> > next_internal_output;
+
+ std::shared_ptr<digest_variable<FieldT> > computed_next_root;
+ std::shared_ptr<bit_vector_copy_gadget<FieldT> > check_next_root;
+
+public:
+
+ const size_t digest_size;
+ const size_t tree_depth;
+
+ pb_variable_array<FieldT> address_bits;
+ digest_variable<FieldT> prev_leaf_digest;
+ digest_variable<FieldT> prev_root_digest;
+ merkle_authentication_path_variable<FieldT, HashT> prev_path;
+ digest_variable<FieldT> next_leaf_digest;
+ digest_variable<FieldT> next_root_digest;
+ merkle_authentication_path_variable<FieldT, HashT> next_path;
+ pb_linear_combination<FieldT> update_successful;
+
+ /* Note that while it is necessary to generate R1CS constraints
+ for prev_path, it is not necessary to do so for next_path. See
+ comment in the implementation of generate_r1cs_constraints() */
+
+ merkle_tree_check_update_gadget(protoboard<FieldT> &pb,
+ const size_t tree_depth,
+ const pb_variable_array<FieldT> &address_bits,
+ const digest_variable<FieldT> &prev_leaf_digest,
+ const digest_variable<FieldT> &prev_root_digest,
+ const merkle_authentication_path_variable<FieldT, HashT> &prev_path,
+ const digest_variable<FieldT> &next_leaf_digest,
+ const digest_variable<FieldT> &next_root_digest,
+ const merkle_authentication_path_variable<FieldT, HashT> &next_path,
+ const pb_linear_combination<FieldT> &update_successful,
+ const std::string &annotation_prefix);
+
+ void generate_r1cs_constraints();
+ void generate_r1cs_witness();
+
+ static size_t root_size_in_bits();
+ /* for debugging purposes */
+ static size_t expected_constraints(const size_t tree_depth);
+};
+
+template<typename FieldT, typename HashT>
+void test_merkle_tree_check_update_gadget();
+
+} // libsnark
+
+#include "gadgetlib1/gadgets/merkle_tree/merkle_tree_check_update_gadget.tcc"
+
+#endif // MERKLE_TREE_CHECK_UPDATE_GADGET_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of interfaces for the Merkle tree check update gadget.
+
+ See merkle_tree_check_update_gadget.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef MERKLE_TREE_CHECK_UPDATE_GADGET_TCC_
+#define MERKLE_TREE_CHECK_UPDATE_GADGET_TCC_
+
+namespace libsnark {
+
+template<typename FieldT, typename HashT>
+merkle_tree_check_update_gadget<FieldT, HashT>::merkle_tree_check_update_gadget(protoboard<FieldT> &pb,
+ const size_t tree_depth,
+ const pb_variable_array<FieldT> &address_bits,
+ const digest_variable<FieldT> &prev_leaf_digest,
+ const digest_variable<FieldT> &prev_root_digest,
+ const merkle_authentication_path_variable<FieldT, HashT> &prev_path,
+ const digest_variable<FieldT> &next_leaf_digest,
+ const digest_variable<FieldT> &next_root_digest,
+ const merkle_authentication_path_variable<FieldT, HashT> &next_path,
+ const pb_linear_combination<FieldT> &update_successful,
+ const std::string &annotation_prefix) :
+ gadget<FieldT>(pb, annotation_prefix),
+ digest_size(HashT::get_digest_len()),
+ tree_depth(tree_depth),
+ address_bits(address_bits),
+ prev_leaf_digest(prev_leaf_digest),
+ prev_root_digest(prev_root_digest),
+ prev_path(prev_path),
+ next_leaf_digest(next_leaf_digest),
+ next_root_digest(next_root_digest),
+ next_path(next_path),
+ update_successful(update_successful)
+{
+ assert(tree_depth > 0);
+ assert(tree_depth == address_bits.size());
+
+ for (size_t i = 0; i < tree_depth-1; ++i)
+ {
+ prev_internal_output.emplace_back(digest_variable<FieldT>(pb, digest_size, FMT(this->annotation_prefix, " prev_internal_output_%zu", i)));
+ next_internal_output.emplace_back(digest_variable<FieldT>(pb, digest_size, FMT(this->annotation_prefix, " next_internal_output_%zu", i)));
+ }
+
+ computed_next_root.reset(new digest_variable<FieldT>(pb, digest_size, FMT(this->annotation_prefix, " computed_root")));
+
+ for (size_t i = 0; i < tree_depth; ++i)
+ {
+ block_variable<FieldT> prev_inp(pb, prev_path.left_digests[i], prev_path.right_digests[i], FMT(this->annotation_prefix, " prev_inp_%zu", i));
+ prev_hasher_inputs.emplace_back(prev_inp);
+ prev_hashers.emplace_back(HashT(pb, 2*digest_size, prev_inp, (i == 0 ? prev_root_digest : prev_internal_output[i-1]),
+ FMT(this->annotation_prefix, " prev_hashers_%zu", i)));
+
+ block_variable<FieldT> next_inp(pb, next_path.left_digests[i], next_path.right_digests[i], FMT(this->annotation_prefix, " next_inp_%zu", i));
+ next_hasher_inputs.emplace_back(next_inp);
+ next_hashers.emplace_back(HashT(pb, 2*digest_size, next_inp, (i == 0 ? *computed_next_root : next_internal_output[i-1]),
+ FMT(this->annotation_prefix, " next_hashers_%zu", i)));
+ }
+
+ for (size_t i = 0; i < tree_depth; ++i)
+ {
+ prev_propagators.emplace_back(digest_selector_gadget<FieldT>(pb, digest_size, i < tree_depth -1 ? prev_internal_output[i] : prev_leaf_digest,
+ address_bits[tree_depth-1-i], prev_path.left_digests[i], prev_path.right_digests[i],
+ FMT(this->annotation_prefix, " prev_propagators_%zu", i)));
+ next_propagators.emplace_back(digest_selector_gadget<FieldT>(pb, digest_size, i < tree_depth -1 ? next_internal_output[i] : next_leaf_digest,
+ address_bits[tree_depth-1-i], next_path.left_digests[i], next_path.right_digests[i],
+ FMT(this->annotation_prefix, " next_propagators_%zu", i)));
+ }
+
+ check_next_root.reset(new bit_vector_copy_gadget<FieldT>(pb, computed_next_root->bits, next_root_digest.bits, update_successful, FieldT::capacity(), FMT(annotation_prefix, " check_next_root")));
+}
+
+template<typename FieldT, typename HashT>
+void merkle_tree_check_update_gadget<FieldT, HashT>::generate_r1cs_constraints()
+{
+ /* ensure correct hash computations */
+ for (size_t i = 0; i < tree_depth; ++i)
+ {
+ prev_hashers[i].generate_r1cs_constraints(false); // we check root outside and prev_left/prev_right above
+ next_hashers[i].generate_r1cs_constraints(true); // however we must check right side hashes
+ }
+
+ /* ensure consistency of internal_left/internal_right with internal_output */
+ for (size_t i = 0; i < tree_depth; ++i)
+ {
+ prev_propagators[i].generate_r1cs_constraints();
+ next_propagators[i].generate_r1cs_constraints();
+ }
+
+ /* ensure that prev auxiliary input and next auxiliary input match */
+ for (size_t i = 0; i < tree_depth; ++i)
+ {
+ for (size_t j = 0; j < digest_size; ++j)
+ {
+ /*
+ addr * (prev_left - next_left) + (1 - addr) * (prev_right - next_right) = 0
+ addr * (prev_left - next_left - prev_right + next_right) = next_right - prev_right
+ */
+ this->pb.add_r1cs_constraint(r1cs_constraint<FieldT>(address_bits[tree_depth-1-i],
+ prev_path.left_digests[i].bits[j] - next_path.left_digests[i].bits[j] - prev_path.right_digests[i].bits[j] + next_path.right_digests[i].bits[j],
+ next_path.right_digests[i].bits[j] - prev_path.right_digests[i].bits[j]),
+ FMT(this->annotation_prefix, " aux_check_%zu_%zu", i, j));
+ }
+ }
+
+ /* Note that while it is necessary to generate R1CS constraints
+ for prev_path, it is not necessary to do so for next_path.
+
+ This holds, because { next_path.left_inputs[i],
+ next_path.right_inputs[i] } is a pair { hash_output,
+ auxiliary_input }. The bitness for hash_output is enforced
+ above by next_hashers[i].generate_r1cs_constraints.
+
+ Because auxiliary input is the same for prev_path and next_path
+ (enforced above), we have that auxiliary_input part is also
+ constrained to be boolean, because prev_path is *all*
+ constrained to be all boolean. */
+
+ check_next_root->generate_r1cs_constraints(false, false);
+}
+
+template<typename FieldT, typename HashT>
+void merkle_tree_check_update_gadget<FieldT, HashT>::generate_r1cs_witness()
+{
+ /* do the hash computations bottom-up */
+ for (int i = tree_depth-1; i >= 0; --i)
+ {
+ /* ensure consistency of prev_path and next_path */
+ if (this->pb.val(address_bits[tree_depth-1-i]) == FieldT::one())
+ {
+ next_path.left_digests[i].generate_r1cs_witness(prev_path.left_digests[i].get_digest());
+ }
+ else
+ {
+ next_path.right_digests[i].generate_r1cs_witness(prev_path.right_digests[i].get_digest());
+ }
+
+ /* propagate previous input */
+ prev_propagators[i].generate_r1cs_witness();
+ next_propagators[i].generate_r1cs_witness();
+
+ /* compute hash */
+ prev_hashers[i].generate_r1cs_witness();
+ next_hashers[i].generate_r1cs_witness();
+ }
+
+ check_next_root->generate_r1cs_witness();
+}
+
+template<typename FieldT, typename HashT>
+size_t merkle_tree_check_update_gadget<FieldT, HashT>::root_size_in_bits()
+{
+ return HashT::get_digest_len();
+}
+
+template<typename FieldT, typename HashT>
+size_t merkle_tree_check_update_gadget<FieldT, HashT>::expected_constraints(const size_t tree_depth)
+{
+ /* NB: this includes path constraints */
+ const size_t prev_hasher_constraints = tree_depth * HashT::expected_constraints(false);
+ const size_t next_hasher_constraints = tree_depth * HashT::expected_constraints(true);
+ const size_t prev_authentication_path_constraints = 2 * tree_depth * HashT::get_digest_len();
+ const size_t prev_propagator_constraints = tree_depth * HashT::get_digest_len();
+ const size_t next_propagator_constraints = tree_depth * HashT::get_digest_len();
+ const size_t check_next_root_constraints = 3 * div_ceil(HashT::get_digest_len(), FieldT::capacity());
+ const size_t aux_equality_constraints = tree_depth * HashT::get_digest_len();
+
+ return (prev_hasher_constraints + next_hasher_constraints + prev_authentication_path_constraints +
+ prev_propagator_constraints + next_propagator_constraints + check_next_root_constraints +
+ aux_equality_constraints);
+}
+
+template<typename FieldT, typename HashT>
+void test_merkle_tree_check_update_gadget()
+{
+ /* prepare test */
+ const size_t digest_len = HashT::get_digest_len();
+
+ const size_t tree_depth = 16;
+ std::vector<merkle_authentication_node> prev_path(tree_depth);
+
+ bit_vector prev_load_hash(digest_len);
+ std::generate(prev_load_hash.begin(), prev_load_hash.end(), [&]() { return std::rand() % 2; });
+ bit_vector prev_store_hash(digest_len);
+ std::generate(prev_store_hash.begin(), prev_store_hash.end(), [&]() { return std::rand() % 2; });
+
+ bit_vector loaded_leaf = prev_load_hash;
+ bit_vector stored_leaf = prev_store_hash;
+
+ bit_vector address_bits;
+
+ size_t address = 0;
+ for (long level = tree_depth-1; level >= 0; --level)
+ {
+ const bool computed_is_right = (std::rand() % 2);
+ address |= (computed_is_right ? 1ul << (tree_depth-1-level) : 0);
+ address_bits.push_back(computed_is_right);
+ bit_vector other(digest_len);
+ std::generate(other.begin(), other.end(), [&]() { return std::rand() % 2; });
+
+ bit_vector load_block = prev_load_hash;
+ load_block.insert(computed_is_right ? load_block.begin() : load_block.end(), other.begin(), other.end());
+ bit_vector store_block = prev_store_hash;
+ store_block.insert(computed_is_right ? store_block.begin() : store_block.end(), other.begin(), other.end());
+
+ bit_vector load_h = HashT::get_hash(load_block);
+ bit_vector store_h = HashT::get_hash(store_block);
+
+ prev_path[level] = other;
+
+ prev_load_hash = load_h;
+ prev_store_hash = store_h;
+ }
+
+ bit_vector load_root = prev_load_hash;
+ bit_vector store_root = prev_store_hash;
+
+ /* execute the test */
+ protoboard<FieldT> pb;
+ pb_variable_array<FieldT> address_bits_va;
+ address_bits_va.allocate(pb, tree_depth, "address_bits");
+ digest_variable<FieldT> prev_leaf_digest(pb, digest_len, "prev_leaf_digest");
+ digest_variable<FieldT> prev_root_digest(pb, digest_len, "prev_root_digest");
+ merkle_authentication_path_variable<FieldT, HashT> prev_path_var(pb, tree_depth, "prev_path_var");
+ digest_variable<FieldT> next_leaf_digest(pb, digest_len, "next_leaf_digest");
+ digest_variable<FieldT> next_root_digest(pb, digest_len, "next_root_digest");
+ merkle_authentication_path_variable<FieldT, HashT> next_path_var(pb, tree_depth, "next_path_var");
+ merkle_tree_check_update_gadget<FieldT, HashT> mls(pb, tree_depth, address_bits_va,
+ prev_leaf_digest, prev_root_digest, prev_path_var,
+ next_leaf_digest, next_root_digest, next_path_var, ONE, "mls");
+
+ prev_path_var.generate_r1cs_constraints();
+ mls.generate_r1cs_constraints();
+
+ address_bits_va.fill_with_bits(pb, address_bits);
+ assert(address_bits_va.get_field_element_from_bits(pb).as_ulong() == address);
+ prev_leaf_digest.generate_r1cs_witness(loaded_leaf);
+ prev_path_var.generate_r1cs_witness(address, prev_path);
+ next_leaf_digest.generate_r1cs_witness(stored_leaf);
+ address_bits_va.fill_with_bits(pb, address_bits);
+ mls.generate_r1cs_witness();
+
+ /* make sure that update check will check for the right things */
+ prev_leaf_digest.generate_r1cs_witness(loaded_leaf);
+ next_leaf_digest.generate_r1cs_witness(stored_leaf);
+ prev_root_digest.generate_r1cs_witness(load_root);
+ next_root_digest.generate_r1cs_witness(store_root);
+ address_bits_va.fill_with_bits(pb, address_bits);
+ assert(pb.is_satisfied());
+
+ const size_t num_constraints = pb.num_constraints();
+ const size_t expected_constraints = merkle_tree_check_update_gadget<FieldT, HashT>::expected_constraints(tree_depth);
+ assert(num_constraints == expected_constraints);
+}
+
+} // libsnark
+
+#endif // MERKLE_TREE_CHECK_UPDATE_GADGET_TCC_
--- /dev/null
+/**
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifdef CURVE_BN128
+#include "algebra/curves/bn128/bn128_pp.hpp"
+#endif
+#include "algebra/curves/edwards/edwards_pp.hpp"
+#include "algebra/curves/mnt/mnt4/mnt4_pp.hpp"
+#include "algebra/curves/mnt/mnt6/mnt6_pp.hpp"
+#include "gadgetlib1/gadgets/merkle_tree/merkle_tree_check_read_gadget.hpp"
+#include "gadgetlib1/gadgets/merkle_tree/merkle_tree_check_update_gadget.hpp"
+#include "gadgetlib1/gadgets/hashes/sha256/sha256_gadget.hpp"
+
+using namespace libsnark;
+
+template<typename ppT>
+void test_all_merkle_tree_gadgets()
+{
+ typedef Fr<ppT> FieldT;
+ test_merkle_tree_check_read_gadget<FieldT, CRH_with_bit_out_gadget<FieldT> >();
+ test_merkle_tree_check_read_gadget<FieldT, sha256_two_to_one_hash_gadget<FieldT> >();
+
+ test_merkle_tree_check_update_gadget<FieldT, CRH_with_bit_out_gadget<FieldT> >();
+ test_merkle_tree_check_update_gadget<FieldT, sha256_two_to_one_hash_gadget<FieldT> >();
+}
+
+int main(void)
+{
+ start_profiling();
+
+#ifdef CURVE_BN128 // BN128 has fancy dependencies so it may be disabled
+ bn128_pp::init_public_params();
+ test_all_merkle_tree_gadgets<bn128_pp>();
+#endif
+
+ edwards_pp::init_public_params();
+ test_all_merkle_tree_gadgets<edwards_pp>();
+
+ mnt4_pp::init_public_params();
+ test_all_merkle_tree_gadgets<mnt4_pp>();
+
+ mnt6_pp::init_public_params();
+ test_all_merkle_tree_gadgets<mnt6_pp>();
+}
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef PB_VARIABLE_HPP_
+#define PB_VARIABLE_HPP_
+
+#include <cstddef>
+#include <string>
+#include <vector>
+#include "common/utils.hpp"
+#include "relations/variable.hpp"
+
+namespace libsnark {
+
+typedef size_t lc_index_t;
+
+template<typename FieldT>
+class protoboard;
+
+template<typename FieldT>
+class pb_variable : public variable<FieldT> {
+public:
+ pb_variable(const var_index_t index = 0) : variable<FieldT>(index) {};
+
+ void allocate(protoboard<FieldT> &pb, const std::string &annotation="");
+};
+
+template<typename FieldT>
+class pb_variable_array : private std::vector<pb_variable<FieldT> >
+{
+ typedef std::vector<pb_variable<FieldT> > contents;
+public:
+ using typename contents::iterator;
+ using typename contents::const_iterator;
+ using typename contents::reverse_iterator;
+ using typename contents::const_reverse_iterator;
+
+ using contents::begin;
+ using contents::end;
+ using contents::rbegin;
+ using contents::rend;
+ using contents::emplace_back;
+ using contents::insert;
+ using contents::reserve;
+ using contents::size;
+ using contents::empty;
+ using contents::operator[];
+ using contents::resize;
+
+ pb_variable_array() : contents() {};
+ pb_variable_array(size_t count, const pb_variable<FieldT> &value) : contents(count, value) {};
+ pb_variable_array(typename contents::const_iterator first, typename contents::const_iterator last) : contents(first, last) {};
+ pb_variable_array(typename contents::const_reverse_iterator first, typename contents::const_reverse_iterator last) : contents(first, last) {};
+ void allocate(protoboard<FieldT> &pb, const size_t n, const std::string &annotation_prefix="");
+
+ void fill_with_field_elements(protoboard<FieldT> &pb, const std::vector<FieldT>& vals) const;
+ void fill_with_bits(protoboard<FieldT> &pb, const bit_vector& bits) const;
+ void fill_with_bits_of_ulong(protoboard<FieldT> &pb, const unsigned long i) const;
+ void fill_with_bits_of_field_element(protoboard<FieldT> &pb, const FieldT &r) const;
+
+ std::vector<FieldT> get_vals(const protoboard<FieldT> &pb) const;
+ bit_vector get_bits(const protoboard<FieldT> &pb) const;
+
+ FieldT get_field_element_from_bits(const protoboard<FieldT> &pb) const;
+};
+
+/* index 0 corresponds to the constant term (used in legacy code) */
+#define ONE pb_variable<FieldT>(0)
+
+template<typename FieldT>
+class pb_linear_combination : public linear_combination<FieldT> {
+public:
+ bool is_variable;
+ lc_index_t index;
+
+ pb_linear_combination();
+ pb_linear_combination(const pb_variable<FieldT> &var);
+
+ void assign(protoboard<FieldT> &pb, const linear_combination<FieldT> &lc);
+ void evaluate(protoboard<FieldT> &pb) const;
+
+ bool is_constant() const;
+ FieldT constant_term() const;
+};
+
+template<typename FieldT>
+class pb_linear_combination_array : private std::vector<pb_linear_combination<FieldT> >
+{
+ typedef std::vector<pb_linear_combination<FieldT> > contents;
+public:
+ using typename contents::iterator;
+ using typename contents::const_iterator;
+ using typename contents::reverse_iterator;
+ using typename contents::const_reverse_iterator;
+
+ using contents::begin;
+ using contents::end;
+ using contents::rbegin;
+ using contents::rend;
+ using contents::emplace_back;
+ using contents::insert;
+ using contents::reserve;
+ using contents::size;
+ using contents::empty;
+ using contents::operator[];
+ using contents::resize;
+
+ pb_linear_combination_array() : contents() {};
+ pb_linear_combination_array(const pb_variable_array<FieldT> &arr) { for (auto &v : arr) this->emplace_back(pb_linear_combination<FieldT>(v)); };
+ pb_linear_combination_array(size_t count) : contents(count) {};
+ pb_linear_combination_array(size_t count, const pb_linear_combination<FieldT> &value) : contents(count, value) {};
+ pb_linear_combination_array(typename contents::const_iterator first, typename contents::const_iterator last) : contents(first, last) {};
+ pb_linear_combination_array(typename contents::const_reverse_iterator first, typename contents::const_reverse_iterator last) : contents(first, last) {};
+
+ void evaluate(protoboard<FieldT> &pb) const;
+
+ void fill_with_field_elements(protoboard<FieldT> &pb, const std::vector<FieldT>& vals) const;
+ void fill_with_bits(protoboard<FieldT> &pb, const bit_vector& bits) const;
+ void fill_with_bits_of_ulong(protoboard<FieldT> &pb, const unsigned long i) const;
+ void fill_with_bits_of_field_element(protoboard<FieldT> &pb, const FieldT &r) const;
+
+ std::vector<FieldT> get_vals(const protoboard<FieldT> &pb) const;
+ bit_vector get_bits(const protoboard<FieldT> &pb) const;
+
+ FieldT get_field_element_from_bits(const protoboard<FieldT> &pb) const;
+};
+
+template<typename FieldT>
+linear_combination<FieldT> pb_sum(const pb_linear_combination_array<FieldT> &v);
+
+template<typename FieldT>
+linear_combination<FieldT> pb_packing_sum(const pb_linear_combination_array<FieldT> &v);
+
+template<typename FieldT>
+linear_combination<FieldT> pb_coeff_sum(const pb_linear_combination_array<FieldT> &v, const std::vector<FieldT> &coeffs);
+
+} // libsnark
+#include "gadgetlib1/pb_variable.tcc"
+
+#endif // PB_VARIABLE_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef PB_VARIABLE_TCC_
+#define PB_VARIABLE_TCC_
+#include <cassert>
+#include "gadgetlib1/protoboard.hpp"
+#include "common/utils.hpp"
+
+namespace libsnark {
+
+template<typename FieldT>
+void pb_variable<FieldT>::allocate(protoboard<FieldT> &pb, const std::string &annotation)
+{
+ this->index = pb.allocate_var_index(annotation);
+}
+
+/* allocates pb_variable<FieldT> array in MSB->LSB order */
+template<typename FieldT>
+void pb_variable_array<FieldT>::allocate(protoboard<FieldT> &pb, const size_t n, const std::string &annotation_prefix)
+{
+#ifdef DEBUG
+ assert(annotation_prefix != "");
+#endif
+ (*this).resize(n);
+
+ for (size_t i = 0; i < n; ++i)
+ {
+ (*this)[i].allocate(pb, FMT(annotation_prefix, "_%zu", i));
+ }
+}
+
+template<typename FieldT>
+void pb_variable_array<FieldT>::fill_with_field_elements(protoboard<FieldT> &pb, const std::vector<FieldT>& vals) const
+{
+ assert(this->size() == vals.size());
+ for (size_t i = 0; i < vals.size(); ++i)
+ {
+ pb.val((*this)[i]) = vals[i];
+ }
+}
+
+template<typename FieldT>
+void pb_variable_array<FieldT>::fill_with_bits(protoboard<FieldT> &pb, const bit_vector& bits) const
+{
+ assert(this->size() == bits.size());
+ for (size_t i = 0; i < bits.size(); ++i)
+ {
+ pb.val((*this)[i]) = (bits[i] ? FieldT::one() : FieldT::zero());
+ }
+}
+
+template<typename FieldT>
+void pb_variable_array<FieldT>::fill_with_bits_of_field_element(protoboard<FieldT> &pb, const FieldT &r) const
+{
+ const bigint<FieldT::num_limbs> rint = r.as_bigint();
+ for (size_t i = 0; i < this->size(); ++i)
+ {
+ pb.val((*this)[i]) = rint.test_bit(i) ? FieldT::one() : FieldT::zero();
+ }
+}
+
+template<typename FieldT>
+void pb_variable_array<FieldT>::fill_with_bits_of_ulong(protoboard<FieldT> &pb, const unsigned long i) const
+{
+ this->fill_with_bits_of_field_element(pb, FieldT(i, true));
+}
+
+template<typename FieldT>
+std::vector<FieldT> pb_variable_array<FieldT>::get_vals(const protoboard<FieldT> &pb) const
+{
+ std::vector<FieldT> result(this->size());
+ for (size_t i = 0; i < this->size(); ++i)
+ {
+ result[i] = pb.val((*this)[i]);
+ }
+ return result;
+}
+
+template<typename FieldT>
+bit_vector pb_variable_array<FieldT>::get_bits(const protoboard<FieldT> &pb) const
+{
+ bit_vector result;
+ for (size_t i = 0; i < this->size(); ++i)
+ {
+ const FieldT v = pb.val((*this)[i]);
+ assert(v == FieldT::zero() || v == FieldT::one());
+ result.push_back(v == FieldT::one());
+ }
+ return result;
+}
+
+template<typename FieldT>
+FieldT pb_variable_array<FieldT>::get_field_element_from_bits(const protoboard<FieldT> &pb) const
+{
+ FieldT result = FieldT::zero();
+
+ for (size_t i = 0; i < this->size(); ++i)
+ {
+ /* push in the new bit */
+ const FieldT v = pb.val((*this)[this->size()-1-i]);
+ assert(v == FieldT::zero() || v == FieldT::one());
+ result += result + v;
+ }
+
+ return result;
+}
+
+template<typename FieldT>
+pb_linear_combination<FieldT>::pb_linear_combination()
+{
+ this->is_variable = false;
+ this->index = 0;
+}
+
+template<typename FieldT>
+pb_linear_combination<FieldT>::pb_linear_combination(const pb_variable<FieldT> &var)
+{
+ this->is_variable = true;
+ this->index = var.index;
+ this->terms.emplace_back(linear_term<FieldT>(var));
+}
+
+template<typename FieldT>
+void pb_linear_combination<FieldT>::assign(protoboard<FieldT> &pb, const linear_combination<FieldT> &lc)
+{
+ assert(this->is_variable == false);
+ this->index = pb.allocate_lc_index();
+ this->terms = lc.terms;
+}
+
+template<typename FieldT>
+void pb_linear_combination<FieldT>::evaluate(protoboard<FieldT> &pb) const
+{
+ if (this->is_variable)
+ {
+ return; // do nothing
+ }
+
+ FieldT sum = 0;
+ for (auto term : this->terms)
+ {
+ sum += term.coeff * pb.val(pb_variable<FieldT>(term.index));
+ }
+
+ pb.lc_val(*this) = sum;
+}
+
+template<typename FieldT>
+bool pb_linear_combination<FieldT>::is_constant() const
+{
+ if (is_variable)
+ {
+ return (index == 0);
+ }
+ else
+ {
+ for (auto term : this->terms)
+ {
+ if (term.index != 0)
+ {
+ return false;
+ }
+ }
+
+ return true;
+ }
+}
+
+template<typename FieldT>
+FieldT pb_linear_combination<FieldT>::constant_term() const
+{
+ if (is_variable)
+ {
+ return (index == 0 ? FieldT::one() : FieldT::zero());
+ }
+ else
+ {
+ FieldT result = FieldT::zero();
+ for (auto term : this->terms)
+ {
+ if (term.index == 0)
+ {
+ result += term.coeff;
+ }
+ }
+ return result;
+ }
+}
+
+template<typename FieldT>
+void pb_linear_combination_array<FieldT>::evaluate(protoboard<FieldT> &pb) const
+{
+ for (size_t i = 0; i < this->size(); ++i)
+ {
+ (*this)[i].evaluate(pb);
+ }
+}
+
+template<typename FieldT>
+void pb_linear_combination_array<FieldT>::fill_with_field_elements(protoboard<FieldT> &pb, const std::vector<FieldT>& vals) const
+{
+ assert(this->size() == vals.size());
+ for (size_t i = 0; i < vals.size(); ++i)
+ {
+ pb.lc_val((*this)[i]) = vals[i];
+ }
+}
+
+template<typename FieldT>
+void pb_linear_combination_array<FieldT>::fill_with_bits(protoboard<FieldT> &pb, const bit_vector& bits) const
+{
+ assert(this->size() == bits.size());
+ for (size_t i = 0; i < bits.size(); ++i)
+ {
+ pb.lc_val((*this)[i]) = (bits[i] ? FieldT::one() : FieldT::zero());
+ }
+}
+
+template<typename FieldT>
+void pb_linear_combination_array<FieldT>::fill_with_bits_of_field_element(protoboard<FieldT> &pb, const FieldT &r) const
+{
+ const bigint<FieldT::num_limbs> rint = r.as_bigint();
+ for (size_t i = 0; i < this->size(); ++i)
+ {
+ pb.lc_val((*this)[i]) = rint.test_bit(i) ? FieldT::one() : FieldT::zero();
+ }
+}
+
+template<typename FieldT>
+void pb_linear_combination_array<FieldT>::fill_with_bits_of_ulong(protoboard<FieldT> &pb, const unsigned long i) const
+{
+ this->fill_with_bits_of_field_element(pb, FieldT(i));
+}
+
+template<typename FieldT>
+std::vector<FieldT> pb_linear_combination_array<FieldT>::get_vals(const protoboard<FieldT> &pb) const
+{
+ std::vector<FieldT> result(this->size());
+ for (size_t i = 0; i < this->size(); ++i)
+ {
+ result[i] = pb.lc_val((*this)[i]);
+ }
+ return result;
+}
+
+template<typename FieldT>
+bit_vector pb_linear_combination_array<FieldT>::get_bits(const protoboard<FieldT> &pb) const
+{
+ bit_vector result;
+ for (size_t i = 0; i < this->size(); ++i)
+ {
+ const FieldT v = pb.lc_val((*this)[i]);
+ assert(v == FieldT::zero() || v == FieldT::one());
+ result.push_back(v == FieldT::one());
+ }
+ return result;
+}
+
+template<typename FieldT>
+FieldT pb_linear_combination_array<FieldT>::get_field_element_from_bits(const protoboard<FieldT> &pb) const
+{
+ FieldT result = FieldT::zero();
+
+ for (size_t i = 0; i < this->size(); ++i)
+ {
+ /* push in the new bit */
+ const FieldT v = pb.lc_val((*this)[this->size()-1-i]);
+ assert(v == FieldT::zero() || v == FieldT::one());
+ result += result + v;
+ }
+
+ return result;
+}
+
+template<typename FieldT>
+linear_combination<FieldT> pb_sum(const pb_linear_combination_array<FieldT> &v)
+{
+ linear_combination<FieldT> result;
+ for (auto &term : v)
+ {
+ result = result + term;
+ }
+
+ return result;
+}
+
+template<typename FieldT>
+linear_combination<FieldT> pb_packing_sum(const pb_linear_combination_array<FieldT> &v)
+{
+ FieldT twoi = FieldT::one(); // will hold 2^i entering each iteration
+ std::vector<linear_term<FieldT> > all_terms;
+ for (auto &lc : v)
+ {
+ for (auto &term : lc.terms)
+ {
+ all_terms.emplace_back(twoi * term);
+ }
+ twoi += twoi;
+ }
+
+ return linear_combination<FieldT>(all_terms);
+}
+
+template<typename FieldT>
+linear_combination<FieldT> pb_coeff_sum(const pb_linear_combination_array<FieldT> &v, const std::vector<FieldT> &coeffs)
+{
+ assert(v.size() == coeffs.size());
+ std::vector<linear_term<FieldT> > all_terms;
+
+ auto coeff_it = coeffs.begin();
+ for (auto &lc : v)
+ {
+ for (auto &term : lc.terms)
+ {
+ all_terms.emplace_back((*coeff_it) * term);
+ }
+ ++coeff_it;
+ }
+
+ return linear_combination<FieldT>(all_terms);
+}
+
+
+} // libsnark
+#endif // PB_VARIABLE_TCC
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef PROTOBOARD_HPP_
+#define PROTOBOARD_HPP_
+
+#include <algorithm>
+#include <cassert>
+#include <cstdio>
+#include <string>
+#include <vector>
+#include "gadgetlib1/pb_variable.hpp"
+#include "relations/constraint_satisfaction_problems/r1cs/r1cs.hpp"
+#include "common/utils.hpp"
+
+namespace libsnark {
+
+template<typename FieldT>
+class r1cs_constraint;
+
+template<typename FieldT>
+class r1cs_constraint_system;
+
+template<typename FieldT>
+class protoboard {
+private:
+ FieldT constant_term; /* only here, because pb.val() needs to be able to return reference to the constant 1 term */
+ r1cs_variable_assignment<FieldT> values; /* values[0] will hold the value of the first allocated variable of the protoboard, *NOT* constant 1 */
+ var_index_t next_free_var;
+ lc_index_t next_free_lc;
+ std::vector<FieldT> lc_values;
+public:
+ r1cs_constraint_system<FieldT> constraint_system;
+
+ protoboard();
+
+ void clear_values();
+
+ FieldT& val(const pb_variable<FieldT> &var);
+ FieldT val(const pb_variable<FieldT> &var) const;
+
+ FieldT& lc_val(const pb_linear_combination<FieldT> &lc);
+ FieldT lc_val(const pb_linear_combination<FieldT> &lc) const;
+
+ void add_r1cs_constraint(const r1cs_constraint<FieldT> &constr, const std::string &annotation="");
+ void augment_variable_annotation(const pb_variable<FieldT> &v, const std::string &postfix);
+ bool is_satisfied() const;
+ void dump_variables() const;
+
+ size_t num_constraints() const;
+ size_t num_inputs() const;
+ size_t num_variables() const;
+
+ void set_input_sizes(const size_t primary_input_size);
+
+ r1cs_variable_assignment<FieldT> full_variable_assignment() const;
+ r1cs_primary_input<FieldT> primary_input() const;
+ r1cs_auxiliary_input<FieldT> auxiliary_input() const;
+ r1cs_constraint_system<FieldT> get_constraint_system() const;
+
+ friend class pb_variable<FieldT>;
+ friend class pb_linear_combination<FieldT>;
+
+private:
+ var_index_t allocate_var_index(const std::string &annotation="");
+ lc_index_t allocate_lc_index();
+};
+
+} // libsnark
+#include "gadgetlib1/protoboard.tcc"
+#endif // PROTOBOARD_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef PROTOBOARD_TCC_
+#define PROTOBOARD_TCC_
+
+#include <cstdio>
+#include <cstdarg>
+#include "common/profiling.hpp"
+
+namespace libsnark {
+
+template<typename FieldT>
+protoboard<FieldT>::protoboard()
+{
+ constant_term = FieldT::one();
+
+#ifdef DEBUG
+ constraint_system.variable_annotations[0] = "ONE";
+#endif
+
+ next_free_var = 1; /* to account for constant 1 term */
+ next_free_lc = 0;
+}
+
+template<typename FieldT>
+void protoboard<FieldT>::clear_values()
+{
+ std::fill(values.begin(), values.end(), FieldT::zero());
+}
+
+template<typename FieldT>
+var_index_t protoboard<FieldT>::allocate_var_index(const std::string &annotation)
+{
+#ifdef DEBUG
+ assert(annotation != "");
+ constraint_system.variable_annotations[next_free_var] = annotation;
+#else
+ UNUSED(annotation);
+#endif
+ ++constraint_system.auxiliary_input_size;
+ values.emplace_back(FieldT::zero());
+ return next_free_var++;
+}
+
+template<typename FieldT>
+lc_index_t protoboard<FieldT>::allocate_lc_index()
+{
+ lc_values.emplace_back(FieldT::zero());
+ return next_free_lc++;
+}
+
+template<typename FieldT>
+FieldT& protoboard<FieldT>::val(const pb_variable<FieldT> &var)
+{
+ assert(var.index <= values.size());
+ return (var.index == 0 ? constant_term : values[var.index-1]);
+}
+
+template<typename FieldT>
+FieldT protoboard<FieldT>::val(const pb_variable<FieldT> &var) const
+{
+ assert(var.index <= values.size());
+ return (var.index == 0 ? constant_term : values[var.index-1]);
+}
+
+template<typename FieldT>
+FieldT& protoboard<FieldT>::lc_val(const pb_linear_combination<FieldT> &lc)
+{
+ if (lc.is_variable)
+ {
+ return this->val(pb_variable<FieldT>(lc.index));
+ }
+ else
+ {
+ assert(lc.index < lc_values.size());
+ return lc_values[lc.index];
+ }
+}
+
+template<typename FieldT>
+FieldT protoboard<FieldT>::lc_val(const pb_linear_combination<FieldT> &lc) const
+{
+ if (lc.is_variable)
+ {
+ return this->val(pb_variable<FieldT>(lc.index));
+ }
+ else
+ {
+ assert(lc.index < lc_values.size());
+ return lc_values[lc.index];
+ }
+}
+
+template<typename FieldT>
+void protoboard<FieldT>::add_r1cs_constraint(const r1cs_constraint<FieldT> &constr, const std::string &annotation)
+{
+#ifdef DEBUG
+ assert(annotation != "");
+ constraint_system.constraint_annotations[constraint_system.constraints.size()] = annotation;
+#else
+ UNUSED(annotation);
+#endif
+ constraint_system.constraints.emplace_back(constr);
+}
+
+template<typename FieldT>
+void protoboard<FieldT>::augment_variable_annotation(const pb_variable<FieldT> &v, const std::string &postfix)
+{
+#ifdef DEBUG
+ auto it = constraint_system.variable_annotations.find(v.index);
+ constraint_system.variable_annotations[v.index] = (it == constraint_system.variable_annotations.end() ? "" : it->second + " ") + postfix;
+#endif
+}
+
+template<typename FieldT>
+bool protoboard<FieldT>::is_satisfied() const
+{
+ return constraint_system.is_satisfied(primary_input(), auxiliary_input());
+}
+
+template<typename FieldT>
+void protoboard<FieldT>::dump_variables() const
+{
+#ifdef DEBUG
+ for (size_t i = 0; i < constraint_system.num_variables; ++i)
+ {
+ printf("%-40s --> ", constraint_system.variable_annotations[i].c_str());
+ values[i].as_bigint().print_hex();
+ }
+#endif
+}
+
+template<typename FieldT>
+size_t protoboard<FieldT>::num_constraints() const
+{
+ return constraint_system.num_constraints();
+}
+
+template<typename FieldT>
+size_t protoboard<FieldT>::num_inputs() const
+{
+ return constraint_system.num_inputs();
+}
+
+template<typename FieldT>
+size_t protoboard<FieldT>::num_variables() const
+{
+ return next_free_var - 1;
+}
+
+template<typename FieldT>
+void protoboard<FieldT>::set_input_sizes(const size_t primary_input_size)
+{
+ assert(primary_input_size <= num_variables());
+ constraint_system.primary_input_size = primary_input_size;
+ constraint_system.auxiliary_input_size = num_variables() - primary_input_size;
+}
+
+template<typename FieldT>
+r1cs_variable_assignment<FieldT> protoboard<FieldT>::full_variable_assignment() const
+{
+ return values;
+}
+
+template<typename FieldT>
+r1cs_primary_input<FieldT> protoboard<FieldT>::primary_input() const
+{
+ return r1cs_primary_input<FieldT>(values.begin(), values.begin() + num_inputs());
+}
+
+template<typename FieldT>
+r1cs_auxiliary_input<FieldT> protoboard<FieldT>::auxiliary_input() const
+{
+ return r1cs_primary_input<FieldT>(values.begin() + num_inputs(), values.end());
+}
+
+template<typename FieldT>
+r1cs_constraint_system<FieldT> protoboard<FieldT>::get_constraint_system() const
+{
+ return constraint_system;
+}
+
+} // libsnark
+#endif // PROTOBOARD_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for a R1CS-to-QAP reduction, that is, constructing
+ a QAP ("Quadratic Arithmetic Program") from a R1CS ("Rank-1 Constraint System").
+
+ QAPs are defined in \[GGPR13], and construced for R1CS also in \[GGPR13].
+
+ The implementation of the reduction follows, extends, and optimizes
+ the efficient approach described in Appendix E of \[BCGTV13].
+
+ References:
+
+ \[BCGTV13]
+ "SNARKs for C: Verifying Program Executions Succinctly and in Zero Knowledge",
+ Eli Ben-Sasson, Alessandro Chiesa, Daniel Genkin, Eran Tromer, Madars Virza,
+ CRYPTO 2013,
+ <http://eprint.iacr.org/2013/507>
+
+ \[GGPR13]:
+ "Quadratic span programs and succinct NIZKs without PCPs",
+ Rosario Gennaro, Craig Gentry, Bryan Parno, Mariana Raykova,
+ EUROCRYPT 2013,
+ <http://eprint.iacr.org/2012/215>
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef R1CS_TO_QAP_HPP_
+#define R1CS_TO_QAP_HPP_
+
+#include "relations/arithmetic_programs/qap/qap.hpp"
+#include "relations/constraint_satisfaction_problems/r1cs/r1cs.hpp"
+
+namespace libsnark {
+
+/**
+ * Instance map for the R1CS-to-QAP reduction.
+ */
+template<typename FieldT>
+qap_instance<FieldT> r1cs_to_qap_instance_map(const r1cs_constraint_system<FieldT> &cs);
+
+/**
+ * Instance map for the R1CS-to-QAP reduction followed by evaluation of the resulting QAP instance.
+ */
+template<typename FieldT>
+qap_instance_evaluation<FieldT> r1cs_to_qap_instance_map_with_evaluation(const r1cs_constraint_system<FieldT> &cs,
+ const FieldT &t);
+
+/**
+ * Witness map for the R1CS-to-QAP reduction.
+ *
+ * The witness map takes zero knowledge into account when d1,d2,d3 are random.
+ */
+template<typename FieldT>
+qap_witness<FieldT> r1cs_to_qap_witness_map(const r1cs_constraint_system<FieldT> &cs,
+ const r1cs_primary_input<FieldT> &primary_input,
+ const r1cs_auxiliary_input<FieldT> &auxiliary_input,
+ const FieldT &d1,
+ const FieldT &d2,
+ const FieldT &d3);
+
+} // libsnark
+
+#include "reductions/r1cs_to_qap/r1cs_to_qap.tcc"
+
+#endif // R1CS_TO_QAP_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of interfaces for a R1CS-to-QAP reduction.
+
+ See r1cs_to_qap.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef R1CS_TO_QAP_TCC_
+#define R1CS_TO_QAP_TCC_
+
+#include "common/profiling.hpp"
+#include "common/utils.hpp"
+#include "algebra/evaluation_domain/evaluation_domain.hpp"
+
+namespace libsnark {
+
+/**
+ * Instance map for the R1CS-to-QAP reduction.
+ *
+ * Namely, given a R1CS constraint system cs, construct a QAP instance for which:
+ * A := (A_0(z),A_1(z),...,A_m(z))
+ * B := (B_0(z),B_1(z),...,B_m(z))
+ * C := (C_0(z),C_1(z),...,C_m(z))
+ * where
+ * m = number of variables of the QAP
+ * and
+ * each A_i,B_i,C_i is expressed in the Lagrange basis.
+ */
+template<typename FieldT>
+qap_instance<FieldT> r1cs_to_qap_instance_map(const r1cs_constraint_system<FieldT> &cs)
+{
+ enter_block("Call to r1cs_to_qap_instance_map");
+
+ const std::shared_ptr<evaluation_domain<FieldT> > domain = get_evaluation_domain<FieldT>(cs.num_constraints() + cs.num_inputs() + 1);
+
+ std::vector<std::map<size_t, FieldT> > A_in_Lagrange_basis(cs.num_variables()+1);
+ std::vector<std::map<size_t, FieldT> > B_in_Lagrange_basis(cs.num_variables()+1);
+ std::vector<std::map<size_t, FieldT> > C_in_Lagrange_basis(cs.num_variables()+1);
+
+ enter_block("Compute polynomials A, B, C in Lagrange basis");
+ /**
+ * add and process the constraints
+ * input_i * 0 = 0
+ * to ensure soundness of input consistency
+ */
+ for (size_t i = 0; i <= cs.num_inputs(); ++i)
+ {
+ A_in_Lagrange_basis[i][cs.num_constraints() + i] = FieldT::one();
+ }
+ /* process all other constraints */
+ for (size_t i = 0; i < cs.num_constraints(); ++i)
+ {
+ for (size_t j = 0; j < cs.constraints[i].a.terms.size(); ++j)
+ {
+ A_in_Lagrange_basis[cs.constraints[i].a.terms[j].index][i] +=
+ cs.constraints[i].a.terms[j].coeff;
+ }
+
+ for (size_t j = 0; j < cs.constraints[i].b.terms.size(); ++j)
+ {
+ B_in_Lagrange_basis[cs.constraints[i].b.terms[j].index][i] +=
+ cs.constraints[i].b.terms[j].coeff;
+ }
+
+ for (size_t j = 0; j < cs.constraints[i].c.terms.size(); ++j)
+ {
+ C_in_Lagrange_basis[cs.constraints[i].c.terms[j].index][i] +=
+ cs.constraints[i].c.terms[j].coeff;
+ }
+ }
+ leave_block("Compute polynomials A, B, C in Lagrange basis");
+
+ leave_block("Call to r1cs_to_qap_instance_map");
+
+ return qap_instance<FieldT>(domain,
+ cs.num_variables(),
+ domain->m,
+ cs.num_inputs(),
+ std::move(A_in_Lagrange_basis),
+ std::move(B_in_Lagrange_basis),
+ std::move(C_in_Lagrange_basis));
+}
+
+/**
+ * Instance map for the R1CS-to-QAP reduction followed by evaluation of the resulting QAP instance.
+ *
+ * Namely, given a R1CS constraint system cs and a field element t, construct
+ * a QAP instance (evaluated at t) for which:
+ * At := (A_0(t),A_1(t),...,A_m(t))
+ * Bt := (B_0(t),B_1(t),...,B_m(t))
+ * Ct := (C_0(t),C_1(t),...,C_m(t))
+ * Ht := (1,t,t^2,...,t^n)
+ * Zt := Z(t) = "vanishing polynomial of a certain set S, evaluated at t"
+ * where
+ * m = number of variables of the QAP
+ * n = degree of the QAP
+ */
+template<typename FieldT>
+qap_instance_evaluation<FieldT> r1cs_to_qap_instance_map_with_evaluation(const r1cs_constraint_system<FieldT> &cs,
+ const FieldT &t)
+{
+ enter_block("Call to r1cs_to_qap_instance_map_with_evaluation");
+
+ const std::shared_ptr<evaluation_domain<FieldT> > domain = get_evaluation_domain<FieldT>(cs.num_constraints() + cs.num_inputs() + 1);
+
+ std::vector<FieldT> At, Bt, Ct, Ht;
+
+ At.resize(cs.num_variables()+1, FieldT::zero());
+ Bt.resize(cs.num_variables()+1, FieldT::zero());
+ Ct.resize(cs.num_variables()+1, FieldT::zero());
+ Ht.reserve(domain->m+1);
+
+ const FieldT Zt = domain->compute_Z(t);
+
+ enter_block("Compute evaluations of A, B, C, H at t");
+ const std::vector<FieldT> u = domain->lagrange_coeffs(t);
+ /**
+ * add and process the constraints
+ * input_i * 0 = 0
+ * to ensure soundness of input consistency
+ */
+ for (size_t i = 0; i <= cs.num_inputs(); ++i)
+ {
+ At[i] = u[cs.num_constraints() + i];
+ }
+ /* process all other constraints */
+ for (size_t i = 0; i < cs.num_constraints(); ++i)
+ {
+ for (size_t j = 0; j < cs.constraints[i].a.terms.size(); ++j)
+ {
+ At[cs.constraints[i].a.terms[j].index] +=
+ u[i]*cs.constraints[i].a.terms[j].coeff;
+ }
+
+ for (size_t j = 0; j < cs.constraints[i].b.terms.size(); ++j)
+ {
+ Bt[cs.constraints[i].b.terms[j].index] +=
+ u[i]*cs.constraints[i].b.terms[j].coeff;
+ }
+
+ for (size_t j = 0; j < cs.constraints[i].c.terms.size(); ++j)
+ {
+ Ct[cs.constraints[i].c.terms[j].index] +=
+ u[i]*cs.constraints[i].c.terms[j].coeff;
+ }
+ }
+
+ FieldT ti = FieldT::one();
+ for (size_t i = 0; i < domain->m+1; ++i)
+ {
+ Ht.emplace_back(ti);
+ ti *= t;
+ }
+ leave_block("Compute evaluations of A, B, C, H at t");
+
+ leave_block("Call to r1cs_to_qap_instance_map_with_evaluation");
+
+ return qap_instance_evaluation<FieldT>(domain,
+ cs.num_variables(),
+ domain->m,
+ cs.num_inputs(),
+ t,
+ std::move(At),
+ std::move(Bt),
+ std::move(Ct),
+ std::move(Ht),
+ Zt);
+}
+
+/**
+ * Witness map for the R1CS-to-QAP reduction.
+ *
+ * The witness map takes zero knowledge into account when d1,d2,d3 are random.
+ *
+ * More precisely, compute the coefficients
+ * h_0,h_1,...,h_n
+ * of the polynomial
+ * H(z) := (A(z)*B(z)-C(z))/Z(z)
+ * where
+ * A(z) := A_0(z) + \sum_{k=1}^{m} w_k A_k(z) + d1 * Z(z)
+ * B(z) := B_0(z) + \sum_{k=1}^{m} w_k B_k(z) + d2 * Z(z)
+ * C(z) := C_0(z) + \sum_{k=1}^{m} w_k C_k(z) + d3 * Z(z)
+ * Z(z) := "vanishing polynomial of set S"
+ * and
+ * m = number of variables of the QAP
+ * n = degree of the QAP
+ *
+ * This is done as follows:
+ * (1) compute evaluations of A,B,C on S = {sigma_1,...,sigma_n}
+ * (2) compute coefficients of A,B,C
+ * (3) compute evaluations of A,B,C on T = "coset of S"
+ * (4) compute evaluation of H on T
+ * (5) compute coefficients of H
+ * (6) patch H to account for d1,d2,d3 (i.e., add coefficients of the polynomial (A d2 + B d1 - d3) + d1*d2*Z )
+ *
+ * The code below is not as simple as the above high-level description due to
+ * some reshuffling to save space.
+ */
+template<typename FieldT>
+qap_witness<FieldT> r1cs_to_qap_witness_map(const r1cs_constraint_system<FieldT> &cs,
+ const r1cs_primary_input<FieldT> &primary_input,
+ const r1cs_auxiliary_input<FieldT> &auxiliary_input,
+ const FieldT &d1,
+ const FieldT &d2,
+ const FieldT &d3)
+{
+ enter_block("Call to r1cs_to_qap_witness_map");
+
+ /* sanity check */
+ assert(cs.is_satisfied(primary_input, auxiliary_input));
+
+ const std::shared_ptr<evaluation_domain<FieldT> > domain = get_evaluation_domain<FieldT>(cs.num_constraints() + cs.num_inputs() + 1);
+
+ r1cs_variable_assignment<FieldT> full_variable_assignment = primary_input;
+ full_variable_assignment.insert(full_variable_assignment.end(), auxiliary_input.begin(), auxiliary_input.end());
+
+ enter_block("Compute evaluation of polynomials A, B on set S");
+ std::vector<FieldT> aA(domain->m, FieldT::zero()), aB(domain->m, FieldT::zero());
+
+ /* account for the additional constraints input_i * 0 = 0 */
+ for (size_t i = 0; i <= cs.num_inputs(); ++i)
+ {
+ aA[i+cs.num_constraints()] = (i > 0 ? full_variable_assignment[i-1] : FieldT::one());
+ }
+ /* account for all other constraints */
+ for (size_t i = 0; i < cs.num_constraints(); ++i)
+ {
+ aA[i] += cs.constraints[i].a.evaluate(full_variable_assignment);
+ aB[i] += cs.constraints[i].b.evaluate(full_variable_assignment);
+ }
+ leave_block("Compute evaluation of polynomials A, B on set S");
+
+ enter_block("Compute coefficients of polynomial A");
+ domain->iFFT(aA);
+ leave_block("Compute coefficients of polynomial A");
+
+ enter_block("Compute coefficients of polynomial B");
+ domain->iFFT(aB);
+ leave_block("Compute coefficients of polynomial B");
+
+ enter_block("Compute ZK-patch");
+ std::vector<FieldT> coefficients_for_H(domain->m+1, FieldT::zero());
+#ifdef MULTICORE
+#pragma omp parallel for
+#endif
+ /* add coefficients of the polynomial (d2*A + d1*B - d3) + d1*d2*Z */
+ for (size_t i = 0; i < domain->m; ++i)
+ {
+ coefficients_for_H[i] = d2*aA[i] + d1*aB[i];
+ }
+ coefficients_for_H[0] -= d3;
+ domain->add_poly_Z(d1*d2, coefficients_for_H);
+ leave_block("Compute ZK-patch");
+
+ enter_block("Compute evaluation of polynomial A on set T");
+ domain->cosetFFT(aA, FieldT::multiplicative_generator);
+ leave_block("Compute evaluation of polynomial A on set T");
+
+ enter_block("Compute evaluation of polynomial B on set T");
+ domain->cosetFFT(aB, FieldT::multiplicative_generator);
+ leave_block("Compute evaluation of polynomial B on set T");
+
+ enter_block("Compute evaluation of polynomial H on set T");
+ std::vector<FieldT> &H_tmp = aA; // can overwrite aA because it is not used later
+#ifdef MULTICORE
+#pragma omp parallel for
+#endif
+ for (size_t i = 0; i < domain->m; ++i)
+ {
+ H_tmp[i] = aA[i]*aB[i];
+ }
+ std::vector<FieldT>().swap(aB); // destroy aB
+
+ enter_block("Compute evaluation of polynomial C on set S");
+ std::vector<FieldT> aC(domain->m, FieldT::zero());
+ for (size_t i = 0; i < cs.num_constraints(); ++i)
+ {
+ aC[i] += cs.constraints[i].c.evaluate(full_variable_assignment);
+ }
+ leave_block("Compute evaluation of polynomial C on set S");
+
+ enter_block("Compute coefficients of polynomial C");
+ domain->iFFT(aC);
+ leave_block("Compute coefficients of polynomial C");
+
+ enter_block("Compute evaluation of polynomial C on set T");
+ domain->cosetFFT(aC, FieldT::multiplicative_generator);
+ leave_block("Compute evaluation of polynomial C on set T");
+
+#ifdef MULTICORE
+#pragma omp parallel for
+#endif
+ for (size_t i = 0; i < domain->m; ++i)
+ {
+ H_tmp[i] = (H_tmp[i]-aC[i]);
+ }
+
+ enter_block("Divide by Z on set T");
+ domain->divide_by_Z_on_coset(H_tmp);
+ leave_block("Divide by Z on set T");
+
+ leave_block("Compute evaluation of polynomial H on set T");
+
+ enter_block("Compute coefficients of polynomial H");
+ domain->icosetFFT(H_tmp, FieldT::multiplicative_generator);
+ leave_block("Compute coefficients of polynomial H");
+
+ enter_block("Compute sum of H and ZK-patch");
+#ifdef MULTICORE
+#pragma omp parallel for
+#endif
+ for (size_t i = 0; i < domain->m; ++i)
+ {
+ coefficients_for_H[i] += H_tmp[i];
+ }
+ leave_block("Compute sum of H and ZK-patch");
+
+ leave_block("Call to r1cs_to_qap_witness_map");
+
+ return qap_witness<FieldT>(cs.num_variables(),
+ domain->m,
+ cs.num_inputs(),
+ d1,
+ d2,
+ d3,
+ full_variable_assignment,
+ std::move(coefficients_for_H));
+}
+
+} // libsnark
+
+#endif // R1CS_TO_QAP_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for a QAP ("Quadratic Arithmetic Program").
+
+ QAPs are defined in \[GGPR13].
+
+ References:
+
+ \[GGPR13]:
+ "Quadratic span programs and succinct NIZKs without PCPs",
+ Rosario Gennaro, Craig Gentry, Bryan Parno, Mariana Raykova,
+ EUROCRYPT 2013,
+ <http://eprint.iacr.org/2012/215>
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef QAP_HPP_
+#define QAP_HPP_
+
+#include "algebra/evaluation_domain/evaluation_domain.hpp"
+
+namespace libsnark {
+
+/* forward declaration */
+template<typename FieldT>
+class qap_witness;
+
+/**
+ * A QAP instance.
+ *
+ * Specifically, the datastructure stores:
+ * - a choice of domain (corresponding to a certain subset of the field);
+ * - the number of variables, the degree, and the number of inputs; and
+ * - coefficients of the A,B,C polynomials in the Lagrange basis.
+ *
+ * There is no need to store the Z polynomial because it is uniquely
+ * determined by the domain (as Z is its vanishing polynomial).
+ */
+template<typename FieldT>
+class qap_instance {
+private:
+ size_t num_variables_;
+ size_t degree_;
+ size_t num_inputs_;
+
+public:
+ std::shared_ptr<evaluation_domain<FieldT> > domain;
+
+ std::vector<std::map<size_t, FieldT> > A_in_Lagrange_basis;
+ std::vector<std::map<size_t, FieldT> > B_in_Lagrange_basis;
+ std::vector<std::map<size_t, FieldT> > C_in_Lagrange_basis;
+
+ qap_instance(const std::shared_ptr<evaluation_domain<FieldT> > &domain,
+ const size_t num_variables,
+ const size_t degree,
+ const size_t num_inputs,
+ const std::vector<std::map<size_t, FieldT> > &A_in_Lagrange_basis,
+ const std::vector<std::map<size_t, FieldT> > &B_in_Lagrange_basis,
+ const std::vector<std::map<size_t, FieldT> > &C_in_Lagrange_basis);
+
+ qap_instance(const std::shared_ptr<evaluation_domain<FieldT> > &domain,
+ const size_t num_variables,
+ const size_t degree,
+ const size_t num_inputs,
+ std::vector<std::map<size_t, FieldT> > &&A_in_Lagrange_basis,
+ std::vector<std::map<size_t, FieldT> > &&B_in_Lagrange_basis,
+ std::vector<std::map<size_t, FieldT> > &&C_in_Lagrange_basis);
+
+ qap_instance(const qap_instance<FieldT> &other) = default;
+ qap_instance(qap_instance<FieldT> &&other) = default;
+ qap_instance& operator=(const qap_instance<FieldT> &other) = default;
+ qap_instance& operator=(qap_instance<FieldT> &&other) = default;
+
+ size_t num_variables() const;
+ size_t degree() const;
+ size_t num_inputs() const;
+
+ bool is_satisfied(const qap_witness<FieldT> &witness) const;
+};
+
+/**
+ * A QAP instance evaluation is a QAP instance that is evaluated at a field element t.
+ *
+ * Specifically, the datastructure stores:
+ * - a choice of domain (corresponding to a certain subset of the field);
+ * - the number of variables, the degree, and the number of inputs;
+ * - a field element t;
+ * - evaluations of the A,B,C (and Z) polynomials at t;
+ * - evaluations of all monomials of t;
+ * - counts about how many of the above evaluations are in fact non-zero.
+ */
+template<typename FieldT>
+class qap_instance_evaluation {
+private:
+ size_t num_variables_;
+ size_t degree_;
+ size_t num_inputs_;
+public:
+ std::shared_ptr<evaluation_domain<FieldT> > domain;
+
+ FieldT t;
+
+ std::vector<FieldT> At, Bt, Ct, Ht;
+
+ FieldT Zt;
+
+ qap_instance_evaluation(const std::shared_ptr<evaluation_domain<FieldT> > &domain,
+ const size_t num_variables,
+ const size_t degree,
+ const size_t num_inputs,
+ const FieldT &t,
+ const std::vector<FieldT> &At,
+ const std::vector<FieldT> &Bt,
+ const std::vector<FieldT> &Ct,
+ const std::vector<FieldT> &Ht,
+ const FieldT &Zt);
+ qap_instance_evaluation(const std::shared_ptr<evaluation_domain<FieldT> > &domain,
+ const size_t num_variables,
+ const size_t degree,
+ const size_t num_inputs,
+ const FieldT &t,
+ std::vector<FieldT> &&At,
+ std::vector<FieldT> &&Bt,
+ std::vector<FieldT> &&Ct,
+ std::vector<FieldT> &&Ht,
+ const FieldT &Zt);
+
+ qap_instance_evaluation(const qap_instance_evaluation<FieldT> &other) = default;
+ qap_instance_evaluation(qap_instance_evaluation<FieldT> &&other) = default;
+ qap_instance_evaluation& operator=(const qap_instance_evaluation<FieldT> &other) = default;
+ qap_instance_evaluation& operator=(qap_instance_evaluation<FieldT> &&other) = default;
+
+ size_t num_variables() const;
+ size_t degree() const;
+ size_t num_inputs() const;
+
+ bool is_satisfied(const qap_witness<FieldT> &witness) const;
+};
+
+/**
+ * A QAP witness.
+ */
+template<typename FieldT>
+class qap_witness {
+private:
+ size_t num_variables_;
+ size_t degree_;
+ size_t num_inputs_;
+
+public:
+ FieldT d1, d2, d3;
+
+ std::vector<FieldT> coefficients_for_ABCs;
+ std::vector<FieldT> coefficients_for_H;
+
+ qap_witness(const size_t num_variables,
+ const size_t degree,
+ const size_t num_inputs,
+ const FieldT &d1,
+ const FieldT &d2,
+ const FieldT &d3,
+ const std::vector<FieldT> &coefficients_for_ABCs,
+ const std::vector<FieldT> &coefficients_for_H);
+
+ qap_witness(const size_t num_variables,
+ const size_t degree,
+ const size_t num_inputs,
+ const FieldT &d1,
+ const FieldT &d2,
+ const FieldT &d3,
+ const std::vector<FieldT> &coefficients_for_ABCs,
+ std::vector<FieldT> &&coefficients_for_H);
+
+ qap_witness(const qap_witness<FieldT> &other) = default;
+ qap_witness(qap_witness<FieldT> &&other) = default;
+ qap_witness& operator=(const qap_witness<FieldT> &other) = default;
+ qap_witness& operator=(qap_witness<FieldT> &&other) = default;
+
+ size_t num_variables() const;
+ size_t degree() const;
+ size_t num_inputs() const;
+};
+
+} // libsnark
+
+#include "relations/arithmetic_programs/qap/qap.tcc"
+
+#endif // QAP_HPP_
--- /dev/null
+/** @file
+*****************************************************************************
+
+Implementation of interfaces for a QAP ("Quadratic Arithmetic Program").
+
+See qap.hpp .
+
+*****************************************************************************
+* @author This file is part of libsnark, developed by SCIPR Lab
+* and contributors (see AUTHORS).
+* @copyright MIT license (see LICENSE file)
+*****************************************************************************/
+
+#ifndef QAP_TCC_
+#define QAP_TCC_
+
+#include "common/profiling.hpp"
+#include "common/utils.hpp"
+#include "algebra/evaluation_domain/evaluation_domain.hpp"
+#include "algebra/scalar_multiplication/multiexp.hpp"
+
+namespace libsnark {
+
+template<typename FieldT>
+qap_instance<FieldT>::qap_instance(const std::shared_ptr<evaluation_domain<FieldT> > &domain,
+ const size_t num_variables,
+ const size_t degree,
+ const size_t num_inputs,
+ const std::vector<std::map<size_t, FieldT> > &A_in_Lagrange_basis,
+ const std::vector<std::map<size_t, FieldT> > &B_in_Lagrange_basis,
+ const std::vector<std::map<size_t, FieldT> > &C_in_Lagrange_basis) :
+ num_variables_(num_variables),
+ degree_(degree),
+ num_inputs_(num_inputs),
+ domain(domain),
+ A_in_Lagrange_basis(A_in_Lagrange_basis),
+ B_in_Lagrange_basis(B_in_Lagrange_basis),
+ C_in_Lagrange_basis(C_in_Lagrange_basis)
+{
+}
+
+template<typename FieldT>
+qap_instance<FieldT>::qap_instance(const std::shared_ptr<evaluation_domain<FieldT> > &domain,
+ const size_t num_variables,
+ const size_t degree,
+ const size_t num_inputs,
+ std::vector<std::map<size_t, FieldT> > &&A_in_Lagrange_basis,
+ std::vector<std::map<size_t, FieldT> > &&B_in_Lagrange_basis,
+ std::vector<std::map<size_t, FieldT> > &&C_in_Lagrange_basis) :
+ num_variables_(num_variables),
+ degree_(degree),
+ num_inputs_(num_inputs),
+ domain(domain),
+ A_in_Lagrange_basis(std::move(A_in_Lagrange_basis)),
+ B_in_Lagrange_basis(std::move(B_in_Lagrange_basis)),
+ C_in_Lagrange_basis(std::move(C_in_Lagrange_basis))
+{
+}
+
+template<typename FieldT>
+size_t qap_instance<FieldT>::num_variables() const
+{
+ return num_variables_;
+}
+
+template<typename FieldT>
+size_t qap_instance<FieldT>::degree() const
+{
+ return degree_;
+}
+
+template<typename FieldT>
+size_t qap_instance<FieldT>::num_inputs() const
+{
+ return num_inputs_;
+}
+
+template<typename FieldT>
+bool qap_instance<FieldT>::is_satisfied(const qap_witness<FieldT> &witness) const
+{
+ const FieldT t = FieldT::random_element();
+
+ std::vector<FieldT> At(this->num_variables()+1, FieldT::zero());
+ std::vector<FieldT> Bt(this->num_variables()+1, FieldT::zero());
+ std::vector<FieldT> Ct(this->num_variables()+1, FieldT::zero());
+ std::vector<FieldT> Ht(this->degree()+1);
+
+ const FieldT Zt = this->domain->compute_Z(t);
+
+ const std::vector<FieldT> u = this->domain->lagrange_coeffs(t);
+
+ for (size_t i = 0; i < this->num_variables()+1; ++i)
+ {
+ for (auto &el : A_in_Lagrange_basis[i])
+ {
+ At[i] += u[el.first] * el.second;
+ }
+
+ for (auto &el : B_in_Lagrange_basis[i])
+ {
+ Bt[i] += u[el.first] * el.second;
+ }
+
+ for (auto &el : C_in_Lagrange_basis[i])
+ {
+ Ct[i] += u[el.first] * el.second;
+ }
+ }
+
+ FieldT ti = FieldT::one();
+ for (size_t i = 0; i < this->degree()+1; ++i)
+ {
+ Ht[i] = ti;
+ ti *= t;
+ }
+
+ const qap_instance_evaluation<FieldT> eval_qap_inst(this->domain,
+ this->num_variables(),
+ this->degree(),
+ this->num_inputs(),
+ t,
+ std::move(At),
+ std::move(Bt),
+ std::move(Ct),
+ std::move(Ht),
+ Zt);
+ return eval_qap_inst.is_satisfied(witness);
+}
+
+template<typename FieldT>
+qap_instance_evaluation<FieldT>::qap_instance_evaluation(const std::shared_ptr<evaluation_domain<FieldT> > &domain,
+ const size_t num_variables,
+ const size_t degree,
+ const size_t num_inputs,
+ const FieldT &t,
+ const std::vector<FieldT> &At,
+ const std::vector<FieldT> &Bt,
+ const std::vector<FieldT> &Ct,
+ const std::vector<FieldT> &Ht,
+ const FieldT &Zt) :
+ num_variables_(num_variables),
+ degree_(degree),
+ num_inputs_(num_inputs),
+ domain(domain),
+ t(t),
+ At(At),
+ Bt(Bt),
+ Ct(Ct),
+ Ht(Ht),
+ Zt(Zt)
+{
+}
+
+template<typename FieldT>
+qap_instance_evaluation<FieldT>::qap_instance_evaluation(const std::shared_ptr<evaluation_domain<FieldT> > &domain,
+ const size_t num_variables,
+ const size_t degree,
+ const size_t num_inputs,
+ const FieldT &t,
+ std::vector<FieldT> &&At,
+ std::vector<FieldT> &&Bt,
+ std::vector<FieldT> &&Ct,
+ std::vector<FieldT> &&Ht,
+ const FieldT &Zt) :
+ num_variables_(num_variables),
+ degree_(degree),
+ num_inputs_(num_inputs),
+ domain(domain),
+ t(t),
+ At(std::move(At)),
+ Bt(std::move(Bt)),
+ Ct(std::move(Ct)),
+ Ht(std::move(Ht)),
+ Zt(Zt)
+{
+}
+
+template<typename FieldT>
+size_t qap_instance_evaluation<FieldT>::num_variables() const
+{
+ return num_variables_;
+}
+
+template<typename FieldT>
+size_t qap_instance_evaluation<FieldT>::degree() const
+{
+ return degree_;
+}
+
+template<typename FieldT>
+size_t qap_instance_evaluation<FieldT>::num_inputs() const
+{
+ return num_inputs_;
+}
+
+template<typename FieldT>
+bool qap_instance_evaluation<FieldT>::is_satisfied(const qap_witness<FieldT> &witness) const
+{
+
+ if (this->num_variables() != witness.num_variables())
+ {
+ return false;
+ }
+
+ if (this->degree() != witness.degree())
+ {
+ return false;
+ }
+
+ if (this->num_inputs() != witness.num_inputs())
+ {
+ return false;
+ }
+
+ if (this->num_variables() != witness.coefficients_for_ABCs.size())
+ {
+ return false;
+ }
+
+ if (this->degree()+1 != witness.coefficients_for_H.size())
+ {
+ return false;
+ }
+
+ if (this->At.size() != this->num_variables()+1 || this->Bt.size() != this->num_variables()+1 || this->Ct.size() != this->num_variables()+1)
+ {
+ return false;
+ }
+
+ if (this->Ht.size() != this->degree()+1)
+ {
+ return false;
+ }
+
+ if (this->Zt != this->domain->compute_Z(this->t))
+ {
+ return false;
+ }
+
+ FieldT ans_A = this->At[0] + witness.d1*this->Zt;
+ FieldT ans_B = this->Bt[0] + witness.d2*this->Zt;
+ FieldT ans_C = this->Ct[0] + witness.d3*this->Zt;
+ FieldT ans_H = FieldT::zero();
+
+ ans_A = ans_A + naive_plain_exp<FieldT, FieldT>(this->At.begin()+1, this->At.begin()+1+this->num_variables(),
+ witness.coefficients_for_ABCs.begin(), witness.coefficients_for_ABCs.begin()+this->num_variables());
+ ans_B = ans_B + naive_plain_exp<FieldT, FieldT>(this->Bt.begin()+1, this->Bt.begin()+1+this->num_variables(),
+ witness.coefficients_for_ABCs.begin(), witness.coefficients_for_ABCs.begin()+this->num_variables());
+ ans_C = ans_C + naive_plain_exp<FieldT, FieldT>(this->Ct.begin()+1, this->Ct.begin()+1+this->num_variables(),
+ witness.coefficients_for_ABCs.begin(), witness.coefficients_for_ABCs.begin()+this->num_variables());
+ ans_H = ans_H + naive_plain_exp<FieldT, FieldT>(this->Ht.begin(), this->Ht.begin()+this->degree()+1,
+ witness.coefficients_for_H.begin(), witness.coefficients_for_H.begin()+this->degree()+1);
+
+ if (ans_A * ans_B - ans_C != ans_H * this->Zt)
+ {
+ return false;
+ }
+
+ return true;
+}
+
+template<typename FieldT>
+qap_witness<FieldT>::qap_witness(const size_t num_variables,
+ const size_t degree,
+ const size_t num_inputs,
+ const FieldT &d1,
+ const FieldT &d2,
+ const FieldT &d3,
+ const std::vector<FieldT> &coefficients_for_ABCs,
+ const std::vector<FieldT> &coefficients_for_H) :
+ num_variables_(num_variables),
+ degree_(degree),
+ num_inputs_(num_inputs),
+ d1(d1),
+ d2(d2),
+ d3(d3),
+ coefficients_for_ABCs(coefficients_for_ABCs),
+ coefficients_for_H(coefficients_for_H)
+{
+}
+
+template<typename FieldT>
+qap_witness<FieldT>::qap_witness(const size_t num_variables,
+ const size_t degree,
+ const size_t num_inputs,
+ const FieldT &d1,
+ const FieldT &d2,
+ const FieldT &d3,
+ const std::vector<FieldT> &coefficients_for_ABCs,
+ std::vector<FieldT> &&coefficients_for_H) :
+ num_variables_(num_variables),
+ degree_(degree),
+ num_inputs_(num_inputs),
+ d1(d1),
+ d2(d2),
+ d3(d3),
+ coefficients_for_ABCs(coefficients_for_ABCs),
+ coefficients_for_H(std::move(coefficients_for_H))
+{
+}
+
+
+template<typename FieldT>
+size_t qap_witness<FieldT>::num_variables() const
+{
+ return num_variables_;
+}
+
+template<typename FieldT>
+size_t qap_witness<FieldT>::degree() const
+{
+ return degree_;
+}
+
+template<typename FieldT>
+size_t qap_witness<FieldT>::num_inputs() const
+{
+ return num_inputs_;
+}
+
+
+} // libsnark
+
+#endif // QAP_TCC_
--- /dev/null
+/**
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+#include <algorithm>
+#include <cassert>
+#include <cstdio>
+#include <cstring>
+#include <vector>
+
+#include "algebra/curves/mnt/mnt6/mnt6_pp.hpp"
+#include "algebra/fields/field_utils.hpp"
+#include "common/profiling.hpp"
+#include "common/utils.hpp"
+#include "reductions/r1cs_to_qap/r1cs_to_qap.hpp"
+#include "relations/constraint_satisfaction_problems/r1cs/examples/r1cs_examples.hpp"
+
+using namespace libsnark;
+
+template<typename FieldT>
+void test_qap(const size_t qap_degree, const size_t num_inputs, const bool binary_input)
+{
+ /*
+ We construct an instance where the QAP degree is qap_degree.
+ So we generate an instance of R1CS where the number of constraints qap_degree - num_inputs - 1.
+ See the transformation from R1CS to QAP for why this is the case.
+ So we need that qap_degree >= num_inputs + 1.
+ */
+ assert(num_inputs + 1 <= qap_degree);
+ enter_block("Call to test_qap");
+
+ const size_t num_constraints = qap_degree - num_inputs - 1;
+
+ print_indent(); printf("* QAP degree: %zu\n", qap_degree);
+ print_indent(); printf("* Number of inputs: %zu\n", num_inputs);
+ print_indent(); printf("* Number of R1CS constraints: %zu\n", num_constraints);
+ print_indent(); printf("* Input type: %s\n", binary_input ? "binary" : "field");
+
+ enter_block("Generate constraint system and assignment");
+ r1cs_example<FieldT> example;
+ if (binary_input)
+ {
+ example = generate_r1cs_example_with_binary_input<FieldT>(num_constraints, num_inputs);
+ }
+ else
+ {
+ example = generate_r1cs_example_with_field_input<FieldT>(num_constraints, num_inputs);
+ }
+ leave_block("Generate constraint system and assignment");
+
+ enter_block("Check satisfiability of constraint system");
+ assert(example.constraint_system.is_satisfied(example.primary_input, example.auxiliary_input));
+ leave_block("Check satisfiability of constraint system");
+
+ const FieldT t = FieldT::random_element(),
+ d1 = FieldT::random_element(),
+ d2 = FieldT::random_element(),
+ d3 = FieldT::random_element();
+
+ enter_block("Compute QAP instance 1");
+ qap_instance<FieldT> qap_inst_1 = r1cs_to_qap_instance_map(example.constraint_system);
+ leave_block("Compute QAP instance 1");
+
+ enter_block("Compute QAP instance 2");
+ qap_instance_evaluation<FieldT> qap_inst_2 = r1cs_to_qap_instance_map_with_evaluation(example.constraint_system, t);
+ leave_block("Compute QAP instance 2");
+
+ enter_block("Compute QAP witness");
+ qap_witness<FieldT> qap_wit = r1cs_to_qap_witness_map(example.constraint_system, example.primary_input, example.auxiliary_input, d1, d2, d3);
+ leave_block("Compute QAP witness");
+
+ enter_block("Check satisfiability of QAP instance 1");
+ assert(qap_inst_1.is_satisfied(qap_wit));
+ leave_block("Check satisfiability of QAP instance 1");
+
+ enter_block("Check satisfiability of QAP instance 2");
+ assert(qap_inst_2.is_satisfied(qap_wit));
+ leave_block("Check satisfiability of QAP instance 2");
+
+ leave_block("Call to test_qap");
+}
+
+int main()
+{
+ start_profiling();
+
+ mnt6_pp::init_public_params();
+
+ const size_t num_inputs = 10;
+
+ const size_t basic_domain_size = 1ul<<mnt6_Fr::s;
+ const size_t step_domain_size = (1ul<<10) + (1ul<<8);
+ const size_t extended_domain_size = 1ul<<(mnt6_Fr::s+1);
+ const size_t extended_domain_size_special = extended_domain_size-1;
+
+ enter_block("Test QAP with binary input");
+
+ test_qap<Fr<mnt6_pp> >(basic_domain_size, num_inputs, true);
+ test_qap<Fr<mnt6_pp> >(step_domain_size, num_inputs, true);
+ test_qap<Fr<mnt6_pp> >(extended_domain_size, num_inputs, true);
+ test_qap<Fr<mnt6_pp> >(extended_domain_size_special, num_inputs, true);
+
+ leave_block("Test QAP with binary input");
+
+ enter_block("Test QAP with field input");
+
+ test_qap<Fr<mnt6_pp> >(basic_domain_size, num_inputs, false);
+ test_qap<Fr<mnt6_pp> >(step_domain_size, num_inputs, false);
+ test_qap<Fr<mnt6_pp> >(extended_domain_size, num_inputs, false);
+ test_qap<Fr<mnt6_pp> >(extended_domain_size_special, num_inputs, false);
+
+ leave_block("Test QAP with field input");
+}
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for a R1CS example, as well as functions to sample
+ R1CS examples with prescribed parameters (according to some distribution).
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef R1CS_EXAMPLES_HPP_
+#define R1CS_EXAMPLES_HPP_
+
+#include "relations/constraint_satisfaction_problems/r1cs/r1cs.hpp"
+
+namespace libsnark {
+
+/**
+ * A R1CS example comprises a R1CS constraint system, R1CS input, and R1CS witness.
+ */
+template<typename FieldT>
+struct r1cs_example {
+ r1cs_constraint_system<FieldT> constraint_system;
+ r1cs_primary_input<FieldT> primary_input;
+ r1cs_auxiliary_input<FieldT> auxiliary_input;
+
+ r1cs_example<FieldT>() = default;
+ r1cs_example<FieldT>(const r1cs_example<FieldT> &other) = default;
+ r1cs_example<FieldT>(const r1cs_constraint_system<FieldT> &constraint_system,
+ const r1cs_primary_input<FieldT> &primary_input,
+ const r1cs_auxiliary_input<FieldT> &auxiliary_input) :
+ constraint_system(constraint_system),
+ primary_input(primary_input),
+ auxiliary_input(auxiliary_input)
+ {};
+ r1cs_example<FieldT>(r1cs_constraint_system<FieldT> &&constraint_system,
+ r1cs_primary_input<FieldT> &&primary_input,
+ r1cs_auxiliary_input<FieldT> &&auxiliary_input) :
+ constraint_system(std::move(constraint_system)),
+ primary_input(std::move(primary_input)),
+ auxiliary_input(std::move(auxiliary_input))
+ {};
+};
+
+/**
+ * Generate a R1CS example such that:
+ * - the number of constraints of the R1CS constraint system is num_constraints;
+ * - the number of variables of the R1CS constraint system is (approximately) num_constraints;
+ * - the number of inputs of the R1CS constraint system is num_inputs;
+ * - the R1CS input consists of ``full'' field elements (typically require the whole log|Field| bits to represent).
+ */
+template<typename FieldT>
+r1cs_example<FieldT> generate_r1cs_example_with_field_input(const size_t num_constraints,
+ const size_t num_inputs);
+
+/**
+ * Generate a R1CS example such that:
+ * - the number of constraints of the R1CS constraint system is num_constraints;
+ * - the number of variables of the R1CS constraint system is (approximately) num_constraints;
+ * - the number of inputs of the R1CS constraint system is num_inputs;
+ * - the R1CS input consists of binary values (as opposed to ``full'' field elements).
+ */
+template<typename FieldT>
+r1cs_example<FieldT> generate_r1cs_example_with_binary_input(const size_t num_constraints,
+ const size_t num_inputs);
+
+} // libsnark
+
+#include "relations/constraint_satisfaction_problems/r1cs/examples/r1cs_examples.tcc"
+
+#endif // R1CS_EXAMPLES_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of functions to sample R1CS examples with prescribed parameters
+ (according to some distribution).
+
+ See r1cs_examples.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef R1CS_EXAMPLES_TCC_
+#define R1CS_EXAMPLES_TCC_
+
+#include <cassert>
+
+#include "common/utils.hpp"
+
+namespace libsnark {
+
+template<typename FieldT>
+r1cs_example<FieldT> generate_r1cs_example_with_field_input(const size_t num_constraints,
+ const size_t num_inputs)
+{
+ enter_block("Call to generate_r1cs_example_with_field_input");
+
+ assert(num_inputs <= num_constraints + 2);
+
+ r1cs_constraint_system<FieldT> cs;
+ cs.primary_input_size = num_inputs;
+ cs.auxiliary_input_size = 2 + num_constraints - num_inputs; // TODO: explain this
+
+ r1cs_variable_assignment<FieldT> full_variable_assignment;
+ FieldT a = FieldT::random_element();
+ FieldT b = FieldT::random_element();
+ full_variable_assignment.push_back(a);
+ full_variable_assignment.push_back(b);
+
+ for (size_t i = 0; i < num_constraints-1; ++i)
+ {
+ linear_combination<FieldT> A, B, C;
+
+ if (i % 2)
+ {
+ // a * b = c
+ A.add_term(i+1, 1);
+ B.add_term(i+2, 1);
+ C.add_term(i+3, 1);
+ FieldT tmp = a*b;
+ full_variable_assignment.push_back(tmp);
+ a = b; b = tmp;
+ }
+ else
+ {
+ // a + b = c
+ B.add_term(0, 1);
+ A.add_term(i+1, 1);
+ A.add_term(i+2, 1);
+ C.add_term(i+3, 1);
+ FieldT tmp = a+b;
+ full_variable_assignment.push_back(tmp);
+ a = b; b = tmp;
+ }
+
+ cs.add_constraint(r1cs_constraint<FieldT>(A, B, C));
+ }
+
+ linear_combination<FieldT> A, B, C;
+ FieldT fin = FieldT::zero();
+ for (size_t i = 1; i < cs.num_variables(); ++i)
+ {
+ A.add_term(i, 1);
+ B.add_term(i, 1);
+ fin = fin + full_variable_assignment[i-1];
+ }
+ C.add_term(cs.num_variables(), 1);
+ cs.add_constraint(r1cs_constraint<FieldT>(A, B, C));
+ full_variable_assignment.push_back(fin.squared());
+
+ /* split variable assignment */
+ r1cs_primary_input<FieldT> primary_input(full_variable_assignment.begin(), full_variable_assignment.begin() + num_inputs);
+ r1cs_primary_input<FieldT> auxiliary_input(full_variable_assignment.begin() + num_inputs, full_variable_assignment.end());
+
+ /* sanity checks */
+ assert(cs.num_variables() == full_variable_assignment.size());
+ assert(cs.num_variables() >= num_inputs);
+ assert(cs.num_inputs() == num_inputs);
+ assert(cs.num_constraints() == num_constraints);
+ assert(cs.is_satisfied(primary_input, auxiliary_input));
+
+ leave_block("Call to generate_r1cs_example_with_field_input");
+
+ return r1cs_example<FieldT>(std::move(cs), std::move(primary_input), std::move(auxiliary_input));
+}
+
+template<typename FieldT>
+r1cs_example<FieldT> generate_r1cs_example_with_binary_input(const size_t num_constraints,
+ const size_t num_inputs)
+{
+ enter_block("Call to generate_r1cs_example_with_binary_input");
+
+ assert(num_inputs >= 1);
+
+ r1cs_constraint_system<FieldT> cs;
+ cs.primary_input_size = num_inputs;
+ cs.auxiliary_input_size = num_constraints; /* we will add one auxiliary variable per constraint */
+
+ r1cs_variable_assignment<FieldT> full_variable_assignment;
+ for (size_t i = 0; i < num_inputs; ++i)
+ {
+ full_variable_assignment.push_back(FieldT(std::rand() % 2));
+ }
+
+ size_t lastvar = num_inputs-1;
+ for (size_t i = 0; i < num_constraints; ++i)
+ {
+ ++lastvar;
+ const size_t u = (i == 0 ? std::rand() % num_inputs : std::rand() % i);
+ const size_t v = (i == 0 ? std::rand() % num_inputs : std::rand() % i);
+
+ /* chose two random bits and XOR them together:
+ res = u + v - 2 * u * v
+ 2 * u * v = u + v - res
+ */
+ linear_combination<FieldT> A, B, C;
+ A.add_term(u+1, 2);
+ B.add_term(v+1, 1);
+ if (u == v)
+ {
+ C.add_term(u+1, 2);
+ }
+ else
+ {
+ C.add_term(u+1, 1);
+ C.add_term(v+1, 1);
+ }
+ C.add_term(lastvar+1, -FieldT::one());
+
+ cs.add_constraint(r1cs_constraint<FieldT>(A, B, C));
+ full_variable_assignment.push_back(full_variable_assignment[u] + full_variable_assignment[v] - full_variable_assignment[u] * full_variable_assignment[v] - full_variable_assignment[u] * full_variable_assignment[v]);
+ }
+
+ /* split variable assignment */
+ r1cs_primary_input<FieldT> primary_input(full_variable_assignment.begin(), full_variable_assignment.begin() + num_inputs);
+ r1cs_primary_input<FieldT> auxiliary_input(full_variable_assignment.begin() + num_inputs, full_variable_assignment.end());
+
+ /* sanity checks */
+ assert(cs.num_variables() == full_variable_assignment.size());
+ assert(cs.num_variables() >= num_inputs);
+ assert(cs.num_inputs() == num_inputs);
+ assert(cs.num_constraints() == num_constraints);
+ assert(cs.is_satisfied(primary_input, auxiliary_input));
+
+ leave_block("Call to generate_r1cs_example_with_binary_input");
+
+ return r1cs_example<FieldT>(std::move(cs), std::move(primary_input), std::move(auxiliary_input));
+}
+
+} // libsnark
+
+#endif // R1CS_EXAMPLES_TCC
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for:
+ - a R1CS constraint,
+ - a R1CS variable assignment, and
+ - a R1CS constraint system.
+
+ Above, R1CS stands for "Rank-1 Constraint System".
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef R1CS_HPP_
+#define R1CS_HPP_
+
+#include <cstdlib>
+#include <iostream>
+#include <map>
+#include <string>
+#include <vector>
+
+#include "relations/variable.hpp"
+
+namespace libsnark {
+
+/************************* R1CS constraint ***********************************/
+
+template<typename FieldT>
+class r1cs_constraint;
+
+template<typename FieldT>
+std::ostream& operator<<(std::ostream &out, const r1cs_constraint<FieldT> &c);
+
+template<typename FieldT>
+std::istream& operator>>(std::istream &in, r1cs_constraint<FieldT> &c);
+
+/**
+ * A R1CS constraint is a formal expression of the form
+ *
+ * < A , X > * < B , X > = < C , X > ,
+ *
+ * where X = (x_0,x_1,...,x_m) is a vector of formal variables and A,B,C each
+ * consist of 1+m elements in <FieldT>.
+ *
+ * A R1CS constraint is used to construct a R1CS constraint system (see below).
+ */
+template<typename FieldT>
+class r1cs_constraint {
+public:
+
+ linear_combination<FieldT> a, b, c;
+
+ r1cs_constraint() {};
+ r1cs_constraint(const linear_combination<FieldT> &a,
+ const linear_combination<FieldT> &b,
+ const linear_combination<FieldT> &c);
+
+ r1cs_constraint(const std::initializer_list<linear_combination<FieldT> > &A,
+ const std::initializer_list<linear_combination<FieldT> > &B,
+ const std::initializer_list<linear_combination<FieldT> > &C);
+
+ bool operator==(const r1cs_constraint<FieldT> &other) const;
+
+ friend std::ostream& operator<< <FieldT>(std::ostream &out, const r1cs_constraint<FieldT> &c);
+ friend std::istream& operator>> <FieldT>(std::istream &in, r1cs_constraint<FieldT> &c);
+};
+
+/************************* R1CS variable assignment **************************/
+
+/**
+ * A R1CS variable assignment is a vector of <FieldT> elements that represents
+ * a candidate solution to a R1CS constraint system (see below).
+ */
+
+/* TODO: specify that it does *NOT* include the constant 1 */
+template<typename FieldT>
+using r1cs_primary_input = std::vector<FieldT>;
+
+template<typename FieldT>
+using r1cs_auxiliary_input = std::vector<FieldT>;
+
+template<typename FieldT>
+using r1cs_variable_assignment = std::vector<FieldT>; /* note the changed name! (TODO: remove this comment after primary_input transition is complete) */
+
+/************************* R1CS constraint system ****************************/
+
+template<typename FieldT>
+class r1cs_constraint_system;
+
+template<typename FieldT>
+std::ostream& operator<<(std::ostream &out, const r1cs_constraint_system<FieldT> &cs);
+
+template<typename FieldT>
+std::istream& operator>>(std::istream &in, r1cs_constraint_system<FieldT> &cs);
+
+/**
+ * A system of R1CS constraints looks like
+ *
+ * { < A_k , X > * < B_k , X > = < C_k , X > }_{k=1}^{n} .
+ *
+ * In other words, the system is satisfied if and only if there exist a
+ * USCS variable assignment for which each R1CS constraint is satisfied.
+ *
+ * NOTE:
+ * The 0-th variable (i.e., "x_{0}") always represents the constant 1.
+ * Thus, the 0-th variable is not included in num_variables.
+ */
+template<typename FieldT>
+class r1cs_constraint_system {
+public:
+ size_t primary_input_size;
+ size_t auxiliary_input_size;
+
+ std::vector<r1cs_constraint<FieldT> > constraints;
+
+ r1cs_constraint_system() : primary_input_size(0), auxiliary_input_size(0) {}
+
+ size_t num_inputs() const;
+ size_t num_variables() const;
+ size_t num_constraints() const;
+
+#ifdef DEBUG
+ std::map<size_t, std::string> constraint_annotations;
+ std::map<size_t, std::string> variable_annotations;
+#endif
+
+ bool is_valid() const;
+ bool is_satisfied(const r1cs_primary_input<FieldT> &primary_input,
+ const r1cs_auxiliary_input<FieldT> &auxiliary_input) const;
+
+ void add_constraint(const r1cs_constraint<FieldT> &c);
+ void add_constraint(const r1cs_constraint<FieldT> &c, const std::string &annotation);
+
+ void swap_AB_if_beneficial();
+
+ bool operator==(const r1cs_constraint_system<FieldT> &other) const;
+
+ friend std::ostream& operator<< <FieldT>(std::ostream &out, const r1cs_constraint_system<FieldT> &cs);
+ friend std::istream& operator>> <FieldT>(std::istream &in, r1cs_constraint_system<FieldT> &cs);
+
+ void report_linear_constraint_statistics() const;
+};
+
+
+} // libsnark
+
+#include "relations/constraint_satisfaction_problems/r1cs/r1cs.tcc"
+
+#endif // R1CS_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for:
+ - a R1CS constraint,
+ - a R1CS variable assignment, and
+ - a R1CS constraint system.
+
+ See r1cs.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef R1CS_TCC_
+#define R1CS_TCC_
+
+#include <algorithm>
+#include <cassert>
+#include <set>
+#include "common/utils.hpp"
+#include "common/profiling.hpp"
+#include "algebra/fields/bigint.hpp"
+
+namespace libsnark {
+
+template<typename FieldT>
+r1cs_constraint<FieldT>::r1cs_constraint(const linear_combination<FieldT> &a,
+ const linear_combination<FieldT> &b,
+ const linear_combination<FieldT> &c) :
+ a(a), b(b), c(c)
+{
+}
+
+template<typename FieldT>
+r1cs_constraint<FieldT>::r1cs_constraint(const std::initializer_list<linear_combination<FieldT> > &A,
+ const std::initializer_list<linear_combination<FieldT> > &B,
+ const std::initializer_list<linear_combination<FieldT> > &C)
+{
+ for (auto lc_A : A)
+ {
+ a.terms.insert(a.terms.end(), lc_A.terms.begin(), lc_A.terms.end());
+ }
+ for (auto lc_B : B)
+ {
+ b.terms.insert(b.terms.end(), lc_B.terms.begin(), lc_B.terms.end());
+ }
+ for (auto lc_C : C)
+ {
+ c.terms.insert(c.terms.end(), lc_C.terms.begin(), lc_C.terms.end());
+ }
+}
+
+template<typename FieldT>
+bool r1cs_constraint<FieldT>::operator==(const r1cs_constraint<FieldT> &other) const
+{
+ return (this->a == other.a &&
+ this->b == other.b &&
+ this->c == other.c);
+}
+
+template<typename FieldT>
+std::ostream& operator<<(std::ostream &out, const r1cs_constraint<FieldT> &c)
+{
+ out << c.a;
+ out << c.b;
+ out << c.c;
+
+ return out;
+}
+
+template<typename FieldT>
+std::istream& operator>>(std::istream &in, r1cs_constraint<FieldT> &c)
+{
+ in >> c.a;
+ in >> c.b;
+ in >> c.c;
+
+ return in;
+}
+
+template<typename FieldT>
+size_t r1cs_constraint_system<FieldT>::num_inputs() const
+{
+ return primary_input_size;
+}
+
+template<typename FieldT>
+size_t r1cs_constraint_system<FieldT>::num_variables() const
+{
+ return primary_input_size + auxiliary_input_size;
+}
+
+
+template<typename FieldT>
+size_t r1cs_constraint_system<FieldT>::num_constraints() const
+{
+ return constraints.size();
+}
+
+template<typename FieldT>
+bool r1cs_constraint_system<FieldT>::is_valid() const
+{
+ if (this->num_inputs() > this->num_variables()) return false;
+
+ for (size_t c = 0; c < constraints.size(); ++c)
+ {
+ if (!(constraints[c].a.is_valid(this->num_variables()) &&
+ constraints[c].b.is_valid(this->num_variables()) &&
+ constraints[c].c.is_valid(this->num_variables())))
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template<typename FieldT>
+void dump_r1cs_constraint(const r1cs_constraint<FieldT> &constraint,
+ const r1cs_variable_assignment<FieldT> &full_variable_assignment,
+ const std::map<size_t, std::string> &variable_annotations)
+{
+ printf("terms for a:\n"); constraint.a.print_with_assignment(full_variable_assignment, variable_annotations);
+ printf("terms for b:\n"); constraint.b.print_with_assignment(full_variable_assignment, variable_annotations);
+ printf("terms for c:\n"); constraint.c.print_with_assignment(full_variable_assignment, variable_annotations);
+}
+
+template<typename FieldT>
+bool r1cs_constraint_system<FieldT>::is_satisfied(const r1cs_primary_input<FieldT> &primary_input,
+ const r1cs_auxiliary_input<FieldT> &auxiliary_input) const
+{
+ assert(primary_input.size() == num_inputs());
+ assert(primary_input.size() + auxiliary_input.size() == num_variables());
+
+ r1cs_variable_assignment<FieldT> full_variable_assignment = primary_input;
+ full_variable_assignment.insert(full_variable_assignment.end(), auxiliary_input.begin(), auxiliary_input.end());
+
+ for (size_t c = 0; c < constraints.size(); ++c)
+ {
+ const FieldT ares = constraints[c].a.evaluate(full_variable_assignment);
+ const FieldT bres = constraints[c].b.evaluate(full_variable_assignment);
+ const FieldT cres = constraints[c].c.evaluate(full_variable_assignment);
+
+ if (!(ares*bres == cres))
+ {
+#ifdef DEBUG
+ auto it = constraint_annotations.find(c);
+ printf("constraint %zu (%s) unsatisfied\n", c, (it == constraint_annotations.end() ? "no annotation" : it->second.c_str()));
+ printf("<a,(1,x)> = "); ares.print();
+ printf("<b,(1,x)> = "); bres.print();
+ printf("<c,(1,x)> = "); cres.print();
+ printf("constraint was:\n");
+ dump_r1cs_constraint(constraints[c], full_variable_assignment, variable_annotations);
+#endif // DEBUG
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template<typename FieldT>
+void r1cs_constraint_system<FieldT>::add_constraint(const r1cs_constraint<FieldT> &c)
+{
+ constraints.emplace_back(c);
+}
+
+template<typename FieldT>
+void r1cs_constraint_system<FieldT>::add_constraint(const r1cs_constraint<FieldT> &c, const std::string &annotation)
+{
+#ifdef DEBUG
+ constraint_annotations[constraints.size()] = annotation;
+#endif
+ constraints.emplace_back(c);
+}
+
+template<typename FieldT>
+void r1cs_constraint_system<FieldT>::swap_AB_if_beneficial()
+{
+ enter_block("Call to r1cs_constraint_system::swap_AB_if_beneficial");
+
+ enter_block("Estimate densities");
+ bit_vector touched_by_A(this->num_variables() + 1, false), touched_by_B(this->num_variables() + 1, false);
+
+ for (size_t i = 0; i < this->constraints.size(); ++i)
+ {
+ for (size_t j = 0; j < this->constraints[i].a.terms.size(); ++j)
+ {
+ touched_by_A[this->constraints[i].a.terms[j].index] = true;
+ }
+
+ for (size_t j = 0; j < this->constraints[i].b.terms.size(); ++j)
+ {
+ touched_by_B[this->constraints[i].b.terms[j].index] = true;
+ }
+ }
+
+ size_t non_zero_A_count = 0, non_zero_B_count = 0;
+ for (size_t i = 0; i < this->num_variables() + 1; ++i)
+ {
+ non_zero_A_count += touched_by_A[i] ? 1 : 0;
+ non_zero_B_count += touched_by_B[i] ? 1 : 0;
+ }
+
+ if (!inhibit_profiling_info)
+ {
+ print_indent(); printf("* Non-zero A-count (estimate): %zu\n", non_zero_A_count);
+ print_indent(); printf("* Non-zero B-count (estimate): %zu\n", non_zero_B_count);
+ }
+ leave_block("Estimate densities");
+
+ if (non_zero_B_count > non_zero_A_count)
+ {
+ enter_block("Perform the swap");
+ for (size_t i = 0; i < this->constraints.size(); ++i)
+ {
+ std::swap(this->constraints[i].a, this->constraints[i].b);
+ }
+ leave_block("Perform the swap");
+ }
+ else
+ {
+ print_indent(); printf("Swap is not beneficial, not performing\n");
+ }
+
+ leave_block("Call to r1cs_constraint_system::swap_AB_if_beneficial");
+}
+
+template<typename FieldT>
+bool r1cs_constraint_system<FieldT>::operator==(const r1cs_constraint_system<FieldT> &other) const
+{
+ return (this->constraints == other.constraints &&
+ this->primary_input_size == other.primary_input_size &&
+ this->auxiliary_input_size == other.auxiliary_input_size);
+}
+
+template<typename FieldT>
+std::ostream& operator<<(std::ostream &out, const r1cs_constraint_system<FieldT> &cs)
+{
+ out << cs.primary_input_size << "\n";
+ out << cs.auxiliary_input_size << "\n";
+
+ out << cs.num_constraints() << "\n";
+ for (const r1cs_constraint<FieldT>& c : cs.constraints)
+ {
+ out << c;
+ }
+
+ return out;
+}
+
+template<typename FieldT>
+std::istream& operator>>(std::istream &in, r1cs_constraint_system<FieldT> &cs)
+{
+ in >> cs.primary_input_size;
+ in >> cs.auxiliary_input_size;
+
+ cs.constraints.clear();
+
+ size_t s;
+ in >> s;
+
+ char b;
+ in.read(&b, 1);
+
+ cs.constraints.reserve(s);
+
+ for (size_t i = 0; i < s; ++i)
+ {
+ r1cs_constraint<FieldT> c;
+ in >> c;
+ cs.constraints.emplace_back(c);
+ }
+
+ return in;
+}
+
+template<typename FieldT>
+void r1cs_constraint_system<FieldT>::report_linear_constraint_statistics() const
+{
+#ifdef DEBUG
+ for (size_t i = 0; i < constraints.size(); ++i)
+ {
+ auto &constr = constraints[i];
+ bool a_is_const = true;
+ for (auto &t : constr.a.terms)
+ {
+ a_is_const = a_is_const && (t.index == 0);
+ }
+
+ bool b_is_const = true;
+ for (auto &t : constr.b.terms)
+ {
+ b_is_const = b_is_const && (t.index == 0);
+ }
+
+ if (a_is_const || b_is_const)
+ {
+ auto it = constraint_annotations.find(i);
+ printf("%s\n", (it == constraint_annotations.end() ? FORMAT("", "constraint_%zu", i) : it->second).c_str());
+ }
+ }
+#endif
+}
+
+} // libsnark
+#endif // R1CS_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for:
+ - a variable (i.e., x_i),
+ - a linear term (i.e., a_i * x_i), and
+ - a linear combination (i.e., sum_i a_i * x_i).
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef VARIABLE_HPP_
+#define VARIABLE_HPP_
+
+#include <cstddef>
+#include <map>
+#include <string>
+#include <vector>
+
+namespace libsnark {
+
+/**
+ * Mnemonic typedefs.
+ */
+typedef size_t var_index_t;
+typedef long integer_coeff_t;
+
+/**
+ * Forward declaration.
+ */
+template<typename FieldT>
+class linear_term;
+
+/**
+ * Forward declaration.
+ */
+template<typename FieldT>
+class linear_combination;
+
+/********************************* Variable **********************************/
+
+/**
+ * A variable represents a formal expresison of the form "x_{index}".
+ */
+template<typename FieldT>
+class variable {
+public:
+
+ var_index_t index;
+
+ variable(const var_index_t index = 0) : index(index) {};
+
+ linear_term<FieldT> operator*(const integer_coeff_t int_coeff) const;
+ linear_term<FieldT> operator*(const FieldT &field_coeff) const;
+
+ linear_combination<FieldT> operator+(const linear_combination<FieldT> &other) const;
+ linear_combination<FieldT> operator-(const linear_combination<FieldT> &other) const;
+
+ linear_term<FieldT> operator-() const;
+
+ bool operator==(const variable<FieldT> &other) const;
+};
+
+template<typename FieldT>
+linear_term<FieldT> operator*(const integer_coeff_t int_coeff, const variable<FieldT> &var);
+
+template<typename FieldT>
+linear_term<FieldT> operator*(const FieldT &field_coeff, const variable<FieldT> &var);
+
+template<typename FieldT>
+linear_combination<FieldT> operator+(const integer_coeff_t int_coeff, const variable<FieldT> &var);
+
+template<typename FieldT>
+linear_combination<FieldT> operator+(const FieldT &field_coeff, const variable<FieldT> &var);
+
+template<typename FieldT>
+linear_combination<FieldT> operator-(const integer_coeff_t int_coeff, const variable<FieldT> &var);
+
+template<typename FieldT>
+linear_combination<FieldT> operator-(const FieldT &field_coeff, const variable<FieldT> &var);
+
+
+/****************************** Linear term **********************************/
+
+/**
+ * A linear term represents a formal expression of the form "coeff * x_{index}".
+ */
+template<typename FieldT>
+class linear_term {
+public:
+
+ var_index_t index = 0;
+ FieldT coeff;
+
+ linear_term() {};
+ linear_term(const variable<FieldT> &var);
+ linear_term(const variable<FieldT> &var, const integer_coeff_t int_coeff);
+ linear_term(const variable<FieldT> &var, const FieldT &field_coeff);
+
+ linear_term<FieldT> operator*(const integer_coeff_t int_coeff) const;
+ linear_term<FieldT> operator*(const FieldT &field_coeff) const;
+
+ linear_combination<FieldT> operator+(const linear_combination<FieldT> &other) const;
+ linear_combination<FieldT> operator-(const linear_combination<FieldT> &other) const;
+
+ linear_term<FieldT> operator-() const;
+
+ bool operator==(const linear_term<FieldT> &other) const;
+};
+
+template<typename FieldT>
+linear_term<FieldT> operator*(const integer_coeff_t int_coeff, const linear_term<FieldT> <);
+
+template<typename FieldT>
+linear_term<FieldT> operator*(const FieldT &field_coeff, const linear_term<FieldT> <);
+
+template<typename FieldT>
+linear_combination<FieldT> operator+(const integer_coeff_t int_coeff, const linear_term<FieldT> <);
+
+template<typename FieldT>
+linear_combination<FieldT> operator+(const FieldT &field_coeff, const linear_term<FieldT> <);
+
+template<typename FieldT>
+linear_combination<FieldT> operator-(const integer_coeff_t int_coeff, const linear_term<FieldT> <);
+
+template<typename FieldT>
+linear_combination<FieldT> operator-(const FieldT &field_coeff, const linear_term<FieldT> <);
+
+
+/***************************** Linear combination ****************************/
+
+template<typename FieldT>
+class linear_combination;
+
+template<typename FieldT>
+std::ostream& operator<<(std::ostream &out, const linear_combination<FieldT> &lc);
+
+template<typename FieldT>
+std::istream& operator>>(std::istream &in, linear_combination<FieldT> &lc);
+
+/**
+ * A linear combination represents a formal expression of the form "sum_i coeff_i * x_{index_i}".
+ */
+template<typename FieldT>
+class linear_combination {
+public:
+
+ std::vector<linear_term<FieldT> > terms;
+
+ linear_combination() {};
+ linear_combination(const integer_coeff_t int_coeff);
+ linear_combination(const FieldT &field_coeff);
+ linear_combination(const variable<FieldT> &var);
+ linear_combination(const linear_term<FieldT> <);
+ linear_combination(const std::vector<linear_term<FieldT> > &all_terms);
+
+ /* for supporting range-based for loops over linear_combination */
+ typename std::vector<linear_term<FieldT> >::const_iterator begin() const;
+ typename std::vector<linear_term<FieldT> >::const_iterator end() const;
+
+ void add_term(const variable<FieldT> &var);
+ void add_term(const variable<FieldT> &var, const integer_coeff_t int_coeff);
+ void add_term(const variable<FieldT> &var, const FieldT &field_coeff);
+
+ void add_term(const linear_term<FieldT> <);
+
+ FieldT evaluate(const std::vector<FieldT> &assignment) const;
+
+ linear_combination<FieldT> operator*(const integer_coeff_t int_coeff) const;
+ linear_combination<FieldT> operator*(const FieldT &field_coeff) const;
+
+ linear_combination<FieldT> operator+(const linear_combination<FieldT> &other) const;
+
+ linear_combination<FieldT> operator-(const linear_combination<FieldT> &other) const;
+ linear_combination<FieldT> operator-() const;
+
+ bool operator==(const linear_combination<FieldT> &other) const;
+
+ bool is_valid(const size_t num_variables) const;
+
+ void print(const std::map<size_t, std::string> &variable_annotations = std::map<size_t, std::string>()) const;
+ void print_with_assignment(const std::vector<FieldT> &full_assignment, const std::map<size_t, std::string> &variable_annotations = std::map<size_t, std::string>()) const;
+
+ friend std::ostream& operator<< <FieldT>(std::ostream &out, const linear_combination<FieldT> &lc);
+ friend std::istream& operator>> <FieldT>(std::istream &in, linear_combination<FieldT> &lc);
+};
+
+template<typename FieldT>
+linear_combination<FieldT> operator*(const integer_coeff_t int_coeff, const linear_combination<FieldT> &lc);
+
+template<typename FieldT>
+linear_combination<FieldT> operator*(const FieldT &field_coeff, const linear_combination<FieldT> &lc);
+
+template<typename FieldT>
+linear_combination<FieldT> operator+(const integer_coeff_t int_coeff, const linear_combination<FieldT> &lc);
+
+template<typename FieldT>
+linear_combination<FieldT> operator+(const FieldT &field_coeff, const linear_combination<FieldT> &lc);
+
+template<typename FieldT>
+linear_combination<FieldT> operator-(const integer_coeff_t int_coeff, const linear_combination<FieldT> &lc);
+
+template<typename FieldT>
+linear_combination<FieldT> operator-(const FieldT &field_coeff, const linear_combination<FieldT> &lc);
+
+} // libsnark
+
+#include "relations/variable.tcc"
+
+#endif // VARIABLE_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of interfaces for:
+ - a variable (i.e., x_i),
+ - a linear term (i.e., a_i * x_i), and
+ - a linear combination (i.e., sum_i a_i * x_i).
+
+ See variabe.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef VARIABLE_TCC_
+#define VARIABLE_TCC_
+
+#include <algorithm>
+#include <cassert>
+
+#include "algebra/fields/bigint.hpp"
+
+namespace libsnark {
+
+template<typename FieldT>
+linear_term<FieldT> variable<FieldT>::operator*(const integer_coeff_t int_coeff) const
+{
+ return linear_term<FieldT>(*this, int_coeff);
+}
+
+template<typename FieldT>
+linear_term<FieldT> variable<FieldT>::operator*(const FieldT &field_coeff) const
+{
+ return linear_term<FieldT>(*this, field_coeff);
+}
+
+template<typename FieldT>
+linear_combination<FieldT> variable<FieldT>::operator+(const linear_combination<FieldT> &other) const
+{
+ linear_combination<FieldT> result;
+
+ result.add_term(*this);
+ result.terms.insert(result.terms.begin(), other.terms.begin(), other.terms.end());
+
+ return result;
+}
+
+template<typename FieldT>
+linear_combination<FieldT> variable<FieldT>::operator-(const linear_combination<FieldT> &other) const
+{
+ return (*this) + (-other);
+}
+
+template<typename FieldT>
+linear_term<FieldT> variable<FieldT>::operator-() const
+{
+ return linear_term<FieldT>(*this, -FieldT::one());
+}
+
+template<typename FieldT>
+bool variable<FieldT>::operator==(const variable<FieldT> &other) const
+{
+ return (this->index == other.index);
+}
+
+template<typename FieldT>
+linear_term<FieldT> operator*(const integer_coeff_t int_coeff, const variable<FieldT> &var)
+{
+ return linear_term<FieldT>(var, int_coeff);
+}
+
+template<typename FieldT>
+linear_term<FieldT> operator*(const FieldT &field_coeff, const variable<FieldT> &var)
+{
+ return linear_term<FieldT>(var, field_coeff);
+}
+
+template<typename FieldT>
+linear_combination<FieldT> operator+(const integer_coeff_t int_coeff, const variable<FieldT> &var)
+{
+ return linear_combination<FieldT>(int_coeff) + var;
+}
+
+template<typename FieldT>
+linear_combination<FieldT> operator+(const FieldT &field_coeff, const variable<FieldT> &var)
+{
+ return linear_combination<FieldT>(field_coeff) + var;
+}
+
+template<typename FieldT>
+linear_combination<FieldT> operator-(const integer_coeff_t int_coeff, const variable<FieldT> &var)
+{
+ return linear_combination<FieldT>(int_coeff) - var;
+}
+
+template<typename FieldT>
+linear_combination<FieldT> operator-(const FieldT &field_coeff, const variable<FieldT> &var)
+{
+ return linear_combination<FieldT>(field_coeff) - var;
+}
+
+template<typename FieldT>
+linear_term<FieldT>::linear_term(const variable<FieldT> &var) :
+ index(var.index), coeff(FieldT::one())
+{
+}
+
+template<typename FieldT>
+linear_term<FieldT>::linear_term(const variable<FieldT> &var, const integer_coeff_t int_coeff) :
+ index(var.index), coeff(FieldT(int_coeff))
+{
+}
+
+template<typename FieldT>
+linear_term<FieldT>::linear_term(const variable<FieldT> &var, const FieldT &coeff) :
+ index(var.index), coeff(coeff)
+{
+}
+
+template<typename FieldT>
+linear_term<FieldT> linear_term<FieldT>::operator*(const integer_coeff_t int_coeff) const
+{
+ return (this->operator*(FieldT(int_coeff)));
+}
+
+template<typename FieldT>
+linear_term<FieldT> linear_term<FieldT>::operator*(const FieldT &field_coeff) const
+{
+ return linear_term<FieldT>(this->index, field_coeff * this->coeff);
+}
+
+template<typename FieldT>
+linear_combination<FieldT> operator+(const integer_coeff_t int_coeff, const linear_term<FieldT> <)
+{
+ return linear_combination<FieldT>(int_coeff) + lt;
+}
+
+template<typename FieldT>
+linear_combination<FieldT> operator+(const FieldT &field_coeff, const linear_term<FieldT> <)
+{
+ return linear_combination<FieldT>(field_coeff) + lt;
+}
+
+template<typename FieldT>
+linear_combination<FieldT> operator-(const integer_coeff_t int_coeff, const linear_term<FieldT> <)
+{
+ return linear_combination<FieldT>(int_coeff) - lt;
+}
+
+template<typename FieldT>
+linear_combination<FieldT> operator-(const FieldT &field_coeff, const linear_term<FieldT> <)
+{
+ return linear_combination<FieldT>(field_coeff) - lt;
+}
+
+template<typename FieldT>
+linear_combination<FieldT> linear_term<FieldT>::operator+(const linear_combination<FieldT> &other) const
+{
+ return linear_combination<FieldT>(*this) + other;
+}
+
+template<typename FieldT>
+linear_combination<FieldT> linear_term<FieldT>::operator-(const linear_combination<FieldT> &other) const
+{
+ return (*this) + (-other);
+}
+
+template<typename FieldT>
+linear_term<FieldT> linear_term<FieldT>::operator-() const
+{
+ return linear_term<FieldT>(this->index, -this->coeff);
+}
+
+template<typename FieldT>
+bool linear_term<FieldT>::operator==(const linear_term<FieldT> &other) const
+{
+ return (this->index == other.index &&
+ this->coeff == other.coeff);
+}
+
+template<typename FieldT>
+linear_term<FieldT> operator*(const integer_coeff_t int_coeff, const linear_term<FieldT> <)
+{
+ return FieldT(int_coeff) * lt;
+}
+
+template<typename FieldT>
+linear_term<FieldT> operator*(const FieldT &field_coeff, const linear_term<FieldT> <)
+{
+ return linear_term<FieldT>(lt.index, field_coeff * lt.coeff);
+}
+
+template<typename FieldT>
+linear_combination<FieldT>::linear_combination(const integer_coeff_t int_coeff)
+{
+ this->add_term(linear_term<FieldT>(0, int_coeff));
+}
+
+template<typename FieldT>
+linear_combination<FieldT>::linear_combination(const FieldT &field_coeff)
+{
+ this->add_term(linear_term<FieldT>(0, field_coeff));
+}
+
+template<typename FieldT>
+linear_combination<FieldT>::linear_combination(const variable<FieldT> &var)
+{
+ this->add_term(var);
+}
+
+template<typename FieldT>
+linear_combination<FieldT>::linear_combination(const linear_term<FieldT> <)
+{
+ this->add_term(lt);
+}
+
+template<typename FieldT>
+typename std::vector<linear_term<FieldT> >::const_iterator linear_combination<FieldT>::begin() const
+{
+ return terms.begin();
+}
+
+template<typename FieldT>
+typename std::vector<linear_term<FieldT> >::const_iterator linear_combination<FieldT>::end() const
+{
+ return terms.end();
+}
+
+template<typename FieldT>
+void linear_combination<FieldT>::add_term(const variable<FieldT> &var)
+{
+ this->terms.emplace_back(linear_term<FieldT>(var.index, FieldT::one()));
+}
+
+template<typename FieldT>
+void linear_combination<FieldT>::add_term(const variable<FieldT> &var, const integer_coeff_t int_coeff)
+{
+ this->terms.emplace_back(linear_term<FieldT>(var.index, int_coeff));
+}
+
+template<typename FieldT>
+void linear_combination<FieldT>::add_term(const variable<FieldT> &var, const FieldT &coeff)
+{
+ this->terms.emplace_back(linear_term<FieldT>(var.index, coeff));
+}
+
+template<typename FieldT>
+void linear_combination<FieldT>::add_term(const linear_term<FieldT> &other)
+{
+ this->terms.emplace_back(other);
+}
+
+template<typename FieldT>
+linear_combination<FieldT> linear_combination<FieldT>::operator*(const integer_coeff_t int_coeff) const
+{
+ return (*this) * FieldT(int_coeff);
+}
+
+template<typename FieldT>
+FieldT linear_combination<FieldT>::evaluate(const std::vector<FieldT> &assignment) const
+{
+ FieldT acc = FieldT::zero();
+ for (auto < : terms)
+ {
+ acc += (lt.index == 0 ? FieldT::one() : assignment[lt.index-1]) * lt.coeff;
+ }
+ return acc;
+}
+
+template<typename FieldT>
+linear_combination<FieldT> linear_combination<FieldT>::operator*(const FieldT &field_coeff) const
+{
+ linear_combination<FieldT> result;
+ result.terms.reserve(this->terms.size());
+ for (const linear_term<FieldT> < : this->terms)
+ {
+ result.terms.emplace_back(lt * field_coeff);
+ }
+ return result;
+}
+
+template<typename FieldT>
+linear_combination<FieldT> linear_combination<FieldT>::operator+(const linear_combination<FieldT> &other) const
+{
+ linear_combination<FieldT> result;
+
+ auto it1 = this->terms.begin();
+ auto it2 = other.terms.begin();
+
+ /* invariant: it1 and it2 always point to unprocessed items in the corresponding linear combinations */
+ while (it1 != this->terms.end() && it2 != other.terms.end())
+ {
+ if (it1->index < it2->index)
+ {
+ result.terms.emplace_back(*it1);
+ ++it1;
+ }
+ else if (it1->index > it2->index)
+ {
+ result.terms.emplace_back(*it2);
+ ++it2;
+ }
+ else
+ {
+ /* it1->index == it2->index */
+ result.terms.emplace_back(linear_term<FieldT>(variable<FieldT>(it1->index), it1->coeff + it2->coeff));
+ ++it1;
+ ++it2;
+ }
+ }
+
+ if (it1 != this->terms.end())
+ {
+ result.terms.insert(result.terms.end(), it1, this->terms.end());
+ }
+ else
+ {
+ result.terms.insert(result.terms.end(), it2, other.terms.end());
+ }
+
+ return result;
+}
+
+template<typename FieldT>
+linear_combination<FieldT> linear_combination<FieldT>::operator-(const linear_combination<FieldT> &other) const
+{
+ return (*this) + (-other);
+}
+
+template<typename FieldT>
+linear_combination<FieldT> linear_combination<FieldT>::operator-() const
+{
+ return (*this) * (-FieldT::one());
+}
+
+template<typename FieldT>
+bool linear_combination<FieldT>::operator==(const linear_combination<FieldT> &other) const
+{
+ return (this->terms == other.terms);
+}
+
+template<typename FieldT>
+bool linear_combination<FieldT>::is_valid(const size_t num_variables) const
+{
+ /* check that all terms in linear combination are sorted */
+ for (size_t i = 1; i < terms.size(); ++i)
+ {
+ if (terms[i-1].index >= terms[i].index)
+ {
+ return false;
+ }
+ }
+
+ /* check that the variables are in proper range. as the variables
+ are sorted, it suffices to check the last term */
+ if ((--terms.end())->index >= num_variables)
+ {
+ return false;
+ }
+
+ return true;
+}
+
+template<typename FieldT>
+void linear_combination<FieldT>::print(const std::map<size_t, std::string> &variable_annotations) const
+{
+ for (auto < : terms)
+ {
+ if (lt.index == 0)
+ {
+ printf(" 1 * ");
+ lt.coeff.print();
+ }
+ else
+ {
+ auto it = variable_annotations.find(lt.index);
+ printf(" x_%zu (%s) * ", lt.index, (it == variable_annotations.end() ? "no annotation" : it->second.c_str()));
+ lt.coeff.print();
+ }
+ }
+}
+
+template<typename FieldT>
+void linear_combination<FieldT>::print_with_assignment(const std::vector<FieldT> &full_assignment, const std::map<size_t, std::string> &variable_annotations) const
+{
+ for (auto < : terms)
+ {
+ if (lt.index == 0)
+ {
+ printf(" 1 * ");
+ lt.coeff.print();
+ }
+ else
+ {
+ printf(" x_%zu * ", lt.index);
+ lt.coeff.print();
+
+ auto it = variable_annotations.find(lt.index);
+ printf(" where x_%zu (%s) was assigned value ", lt.index,
+ (it == variable_annotations.end() ? "no annotation" : it->second.c_str()));
+ full_assignment[lt.index-1].print();
+ printf(" i.e. negative of ");
+ (-full_assignment[lt.index-1]).print();
+ }
+ }
+}
+
+template<typename FieldT>
+std::ostream& operator<<(std::ostream &out, const linear_combination<FieldT> &lc)
+{
+ out << lc.terms.size() << "\n";
+ for (const linear_term<FieldT>& lt : lc.terms)
+ {
+ out << lt.index << "\n";
+ out << lt.coeff << OUTPUT_NEWLINE;
+ }
+
+ return out;
+}
+
+template<typename FieldT>
+std::istream& operator>>(std::istream &in, linear_combination<FieldT> &lc)
+{
+ lc.terms.clear();
+
+ size_t s;
+ in >> s;
+
+ consume_newline(in);
+
+ lc.terms.reserve(s);
+
+ for (size_t i = 0; i < s; ++i)
+ {
+ linear_term<FieldT> lt;
+ in >> lt.index;
+ consume_newline(in);
+ in >> lt.coeff;
+ consume_OUTPUT_NEWLINE(in);
+ lc.terms.emplace_back(lt);
+ }
+
+ return in;
+}
+
+template<typename FieldT>
+linear_combination<FieldT> operator*(const integer_coeff_t int_coeff, const linear_combination<FieldT> &lc)
+{
+ return lc * int_coeff;
+}
+
+template<typename FieldT>
+linear_combination<FieldT> operator*(const FieldT &field_coeff, const linear_combination<FieldT> &lc)
+{
+ return lc * field_coeff;
+}
+
+template<typename FieldT>
+linear_combination<FieldT> operator+(const integer_coeff_t int_coeff, const linear_combination<FieldT> &lc)
+{
+ return linear_combination<FieldT>(int_coeff) + lc;
+}
+
+template<typename FieldT>
+linear_combination<FieldT> operator+(const FieldT &field_coeff, const linear_combination<FieldT> &lc)
+{
+ return linear_combination<FieldT>(field_coeff) + lc;
+}
+
+template<typename FieldT>
+linear_combination<FieldT> operator-(const integer_coeff_t int_coeff, const linear_combination<FieldT> &lc)
+{
+ return linear_combination<FieldT>(int_coeff) - lc;
+}
+
+template<typename FieldT>
+linear_combination<FieldT> operator-(const FieldT &field_coeff, const linear_combination<FieldT> &lc)
+{
+ return linear_combination<FieldT>(field_coeff) - lc;
+}
+
+template<typename FieldT>
+linear_combination<FieldT>::linear_combination(const std::vector<linear_term<FieldT> > &all_terms)
+{
+ if (all_terms.empty())
+ {
+ return;
+ }
+
+ terms = all_terms;
+ std::sort(terms.begin(), terms.end(), [](linear_term<FieldT> a, linear_term<FieldT> b) { return a.index < b.index; });
+
+ auto result_it = terms.begin();
+ for (auto it = ++terms.begin(); it != terms.end(); ++it)
+ {
+ if (it->index == result_it->index)
+ {
+ result_it->coeff += it->coeff;
+ }
+ else
+ {
+ *(++result_it) = *it;
+ }
+ }
+ terms.resize((result_it - terms.begin()) + 1);
+}
+
+} // libsnark
+
+#endif // VARIABLE_TCC
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of functionality that runs the R1CS ppzkSNARK for
+ a given R1CS example.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef RUN_R1CS_PPZKSNARK_HPP_
+#define RUN_R1CS_PPZKSNARK_HPP_
+
+#include "relations/constraint_satisfaction_problems/r1cs/examples/r1cs_examples.hpp"
+
+namespace libsnark {
+
+/**
+ * Runs the ppzkSNARK (generator, prover, and verifier) for a given
+ * R1CS example (specified by a constraint system, input, and witness).
+ *
+ * Optionally, also test the serialization routines for keys and proofs.
+ * (This takes additional time.)
+ */
+template<typename ppT>
+bool run_r1cs_ppzksnark(const r1cs_example<Fr<ppT> > &example,
+ const bool test_serialization);
+
+} // libsnark
+
+#include "zk_proof_systems/ppzksnark/r1cs_ppzksnark/examples/run_r1cs_ppzksnark.tcc"
+
+#endif // RUN_R1CS_PPZKSNARK_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Implementation of functionality that runs the R1CS ppzkSNARK for
+ a given R1CS example.
+
+ See run_r1cs_ppzksnark.hpp .
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef RUN_R1CS_PPZKSNARK_TCC_
+#define RUN_R1CS_PPZKSNARK_TCC_
+
+#include "zk_proof_systems/ppzksnark/r1cs_ppzksnark/r1cs_ppzksnark.hpp"
+
+#include <sstream>
+#include <type_traits>
+
+#include "common/profiling.hpp"
+
+namespace libsnark {
+
+template<typename ppT>
+typename std::enable_if<ppT::has_affine_pairing, void>::type
+test_affine_verifier(const r1cs_ppzksnark_verification_key<ppT> &vk,
+ const r1cs_ppzksnark_primary_input<ppT> &primary_input,
+ const r1cs_ppzksnark_proof<ppT> &proof,
+ const bool expected_answer)
+{
+ print_header("R1CS ppzkSNARK Affine Verifier");
+ const bool answer = r1cs_ppzksnark_affine_verifier_weak_IC<ppT>(vk, primary_input, proof);
+ assert(answer == expected_answer);
+}
+
+template<typename ppT>
+typename std::enable_if<!ppT::has_affine_pairing, void>::type
+test_affine_verifier(const r1cs_ppzksnark_verification_key<ppT> &vk,
+ const r1cs_ppzksnark_primary_input<ppT> &primary_input,
+ const r1cs_ppzksnark_proof<ppT> &proof,
+ const bool expected_answer)
+{
+ UNUSED(vk, primary_input, proof, expected_answer);
+ print_header("R1CS ppzkSNARK Affine Verifier");
+ printf("Affine verifier is not supported; not testing anything.\n");
+}
+
+/**
+ * The code below provides an example of all stages of running a R1CS ppzkSNARK.
+ *
+ * Of course, in a real-life scenario, we would have three distinct entities,
+ * mangled into one in the demonstration below. The three entities are as follows.
+ * (1) The "generator", which runs the ppzkSNARK generator on input a given
+ * constraint system CS to create a proving and a verification key for CS.
+ * (2) The "prover", which runs the ppzkSNARK prover on input the proving key,
+ * a primary input for CS, and an auxiliary input for CS.
+ * (3) The "verifier", which runs the ppzkSNARK verifier on input the verification key,
+ * a primary input for CS, and a proof.
+ */
+template<typename ppT>
+bool run_r1cs_ppzksnark(const r1cs_example<Fr<ppT> > &example,
+ const bool test_serialization)
+{
+ enter_block("Call to run_r1cs_ppzksnark");
+
+ print_header("R1CS ppzkSNARK Generator");
+ r1cs_ppzksnark_keypair<ppT> keypair = r1cs_ppzksnark_generator<ppT>(example.constraint_system);
+ printf("\n"); print_indent(); print_mem("after generator");
+
+ print_header("Preprocess verification key");
+ r1cs_ppzksnark_processed_verification_key<ppT> pvk = r1cs_ppzksnark_verifier_process_vk<ppT>(keypair.vk);
+
+ if (test_serialization)
+ {
+ enter_block("Test serialization of keys");
+ keypair.pk = reserialize<r1cs_ppzksnark_proving_key<ppT> >(keypair.pk);
+ keypair.vk = reserialize<r1cs_ppzksnark_verification_key<ppT> >(keypair.vk);
+ pvk = reserialize<r1cs_ppzksnark_processed_verification_key<ppT> >(pvk);
+ leave_block("Test serialization of keys");
+ }
+
+ print_header("R1CS ppzkSNARK Prover");
+ r1cs_ppzksnark_proof<ppT> proof = r1cs_ppzksnark_prover<ppT>(keypair.pk, example.primary_input, example.auxiliary_input);
+ printf("\n"); print_indent(); print_mem("after prover");
+
+ if (test_serialization)
+ {
+ enter_block("Test serialization of proof");
+ proof = reserialize<r1cs_ppzksnark_proof<ppT> >(proof);
+ leave_block("Test serialization of proof");
+ }
+
+ print_header("R1CS ppzkSNARK Verifier");
+ const bool ans = r1cs_ppzksnark_verifier_strong_IC<ppT>(keypair.vk, example.primary_input, proof);
+ printf("\n"); print_indent(); print_mem("after verifier");
+ printf("* The verification result is: %s\n", (ans ? "PASS" : "FAIL"));
+
+ print_header("R1CS ppzkSNARK Online Verifier");
+ const bool ans2 = r1cs_ppzksnark_online_verifier_strong_IC<ppT>(pvk, example.primary_input, proof);
+ assert(ans == ans2);
+
+ test_affine_verifier<ppT>(keypair.vk, example.primary_input, proof, ans);
+
+ leave_block("Call to run_r1cs_ppzksnark");
+
+ return ans;
+}
+
+} // libsnark
+
+#endif // RUN_R1CS_PPZKSNARK_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+ Profiling program that exercises the ppzkSNARK (first generator, then prover,
+ then verifier) on a synthetic R1CS instance.
+
+ The command
+
+ $ src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark 1000 10 Fr
+
+ exercises the ppzkSNARK (first generator, then prover, then verifier) on an R1CS instance with 1000 equations and an input consisting of 10 field elements.
+
+ (If you get the error `zmInit ERR:can't protect`, see the discussion [above](#elliptic-curve-choices).)
+
+ The command
+
+ $ src/zk_proof_systems/ppzksnark/r1cs_ppzksnark/profiling/profile_r1cs_ppzksnark 1000 10 bytes
+
+ does the same but now the input consists of 10 bytes.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+#include <cassert>
+#include <cstdio>
+
+#include "common/default_types/r1cs_ppzksnark_pp.hpp"
+#include "common/profiling.hpp"
+#include "common/utils.hpp"
+#include "relations/constraint_satisfaction_problems/r1cs/examples/r1cs_examples.hpp"
+#include "zk_proof_systems/ppzksnark/r1cs_ppzksnark/examples/run_r1cs_ppzksnark.hpp"
+
+using namespace libsnark;
+
+int main(int argc, const char * argv[])
+{
+ default_r1cs_ppzksnark_pp::init_public_params();
+ start_profiling();
+
+ if (argc == 2 && strcmp(argv[1], "-v") == 0)
+ {
+ print_compilation_info();
+ return 0;
+ }
+
+ if (argc != 3 && argc != 4)
+ {
+ printf("usage: %s num_constraints input_size [Fr|bytes]\n", argv[0]);
+ return 1;
+ }
+ const int num_constraints = atoi(argv[1]);
+ int input_size = atoi(argv[2]);
+ if (argc == 4)
+ {
+ assert(strcmp(argv[3], "Fr") == 0 || strcmp(argv[3], "bytes") == 0);
+ if (strcmp(argv[3], "bytes") == 0)
+ {
+ input_size = div_ceil(8 * input_size, Fr<default_ec_pp>::capacity());
+ }
+ }
+
+ enter_block("Generate R1CS example");
+ r1cs_example<Fr<default_r1cs_ppzksnark_pp> > example = generate_r1cs_example_with_field_input<Fr<default_r1cs_ppzksnark_pp> >(num_constraints, input_size);
+ leave_block("Generate R1CS example");
+
+ print_header("(enter) Profile R1CS ppzkSNARK");
+ const bool test_serialization = true;
+ run_r1cs_ppzksnark<default_r1cs_ppzksnark_pp>(example, test_serialization);
+ print_header("(leave) Profile R1CS ppzkSNARK");
+}
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of interfaces for a ppzkSNARK for R1CS.
+
+ This includes:
+ - class for proving key
+ - class for verification key
+ - class for processed verification key
+ - class for key pair (proving key & verification key)
+ - class for proof
+ - generator algorithm
+ - prover algorithm
+ - verifier algorithm (with strong or weak input consistency)
+ - online verifier algorithm (with strong or weak input consistency)
+
+ The implementation instantiates (a modification of) the protocol of \[PGHR13],
+ by following extending, and optimizing the approach described in \[BCTV14].
+
+
+ Acronyms:
+
+ - R1CS = "Rank-1 Constraint Systems"
+ - ppzkSNARK = "PreProcessing Zero-Knowledge Succinct Non-interactive ARgument of Knowledge"
+
+ References:
+
+ \[BCTV14]:
+ "Succinct Non-Interactive Zero Knowledge for a von Neumann Architecture",
+ Eli Ben-Sasson, Alessandro Chiesa, Eran Tromer, Madars Virza,
+ USENIX Security 2014,
+ <http://eprint.iacr.org/2013/879>
+
+ \[PGHR13]:
+ "Pinocchio: Nearly practical verifiable computation",
+ Bryan Parno, Craig Gentry, Jon Howell, Mariana Raykova,
+ IEEE S&P 2013,
+ <https://eprint.iacr.org/2013/279>
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef R1CS_PPZKSNARK_HPP_
+#define R1CS_PPZKSNARK_HPP_
+
+#include <memory>
+
+#include "algebra/curves/public_params.hpp"
+#include "common/data_structures/accumulation_vector.hpp"
+#include "algebra/knowledge_commitment/knowledge_commitment.hpp"
+#include "relations/constraint_satisfaction_problems/r1cs/r1cs.hpp"
+#include "zk_proof_systems/ppzksnark/r1cs_ppzksnark/r1cs_ppzksnark_params.hpp"
+
+namespace libsnark {
+
+/******************************** Proving key ********************************/
+
+template<typename ppT>
+class r1cs_ppzksnark_proving_key;
+
+template<typename ppT>
+std::ostream& operator<<(std::ostream &out, const r1cs_ppzksnark_proving_key<ppT> &pk);
+
+template<typename ppT>
+std::istream& operator>>(std::istream &in, r1cs_ppzksnark_proving_key<ppT> &pk);
+
+/**
+ * A proving key for the R1CS ppzkSNARK.
+ */
+template<typename ppT>
+class r1cs_ppzksnark_proving_key {
+public:
+ knowledge_commitment_vector<G1<ppT>, G1<ppT> > A_query;
+ knowledge_commitment_vector<G2<ppT>, G1<ppT> > B_query;
+ knowledge_commitment_vector<G1<ppT>, G1<ppT> > C_query;
+ G1_vector<ppT> H_query;
+ G1_vector<ppT> K_query;
+
+ r1cs_ppzksnark_proving_key() {};
+ r1cs_ppzksnark_proving_key<ppT>& operator=(const r1cs_ppzksnark_proving_key<ppT> &other) = default;
+ r1cs_ppzksnark_proving_key(const r1cs_ppzksnark_proving_key<ppT> &other) = default;
+ r1cs_ppzksnark_proving_key(r1cs_ppzksnark_proving_key<ppT> &&other) = default;
+ r1cs_ppzksnark_proving_key(knowledge_commitment_vector<G1<ppT>, G1<ppT> > &&A_query,
+ knowledge_commitment_vector<G2<ppT>, G1<ppT> > &&B_query,
+ knowledge_commitment_vector<G1<ppT>, G1<ppT> > &&C_query,
+ G1_vector<ppT> &&H_query,
+ G1_vector<ppT> &&K_query) :
+ A_query(std::move(A_query)),
+ B_query(std::move(B_query)),
+ C_query(std::move(C_query)),
+ H_query(std::move(H_query)),
+ K_query(std::move(K_query))
+ {};
+
+ size_t G1_size() const
+ {
+ return 2*(A_query.domain_size() + C_query.domain_size()) + B_query.domain_size() + H_query.size() + K_query.size();
+ }
+
+ size_t G2_size() const
+ {
+ return B_query.domain_size();
+ }
+
+ size_t G1_sparse_size() const
+ {
+ return 2*(A_query.size() + C_query.size()) + B_query.size() + H_query.size() + K_query.size();
+ }
+
+ size_t G2_sparse_size() const
+ {
+ return B_query.size();
+ }
+
+ size_t size_in_bits() const
+ {
+ return A_query.size_in_bits() + B_query.size_in_bits() + C_query.size_in_bits() + libsnark::size_in_bits(H_query) + libsnark::size_in_bits(K_query);
+ }
+
+ void print_size() const
+ {
+ print_indent(); printf("* G1 elements in PK: %zu\n", this->G1_size());
+ print_indent(); printf("* Non-zero G1 elements in PK: %zu\n", this->G1_sparse_size());
+ print_indent(); printf("* G2 elements in PK: %zu\n", this->G2_size());
+ print_indent(); printf("* Non-zero G2 elements in PK: %zu\n", this->G2_sparse_size());
+ print_indent(); printf("* PK size in bits: %zu\n", this->size_in_bits());
+ }
+
+ bool operator==(const r1cs_ppzksnark_proving_key<ppT> &other) const;
+ friend std::ostream& operator<< <ppT>(std::ostream &out, const r1cs_ppzksnark_proving_key<ppT> &pk);
+ friend std::istream& operator>> <ppT>(std::istream &in, r1cs_ppzksnark_proving_key<ppT> &pk);
+};
+
+
+/******************************* Verification key ****************************/
+
+template<typename ppT>
+class r1cs_ppzksnark_verification_key;
+
+template<typename ppT>
+std::ostream& operator<<(std::ostream &out, const r1cs_ppzksnark_verification_key<ppT> &vk);
+
+template<typename ppT>
+std::istream& operator>>(std::istream &in, r1cs_ppzksnark_verification_key<ppT> &vk);
+
+/**
+ * A verification key for the R1CS ppzkSNARK.
+ */
+template<typename ppT>
+class r1cs_ppzksnark_verification_key {
+public:
+ G2<ppT> alphaA_g2;
+ G1<ppT> alphaB_g1;
+ G2<ppT> alphaC_g2;
+ G2<ppT> gamma_g2;
+ G1<ppT> gamma_beta_g1;
+ G2<ppT> gamma_beta_g2;
+ G2<ppT> rC_Z_g2;
+
+ accumulation_vector<G1<ppT> > encoded_IC_query;
+
+ r1cs_ppzksnark_verification_key() = default;
+ r1cs_ppzksnark_verification_key(const G2<ppT> &alphaA_g2,
+ const G1<ppT> &alphaB_g1,
+ const G2<ppT> &alphaC_g2,
+ const G2<ppT> &gamma_g2,
+ const G1<ppT> &gamma_beta_g1,
+ const G2<ppT> &gamma_beta_g2,
+ const G2<ppT> &rC_Z_g2,
+ const accumulation_vector<G1<ppT> > &eIC) :
+ alphaA_g2(alphaA_g2),
+ alphaB_g1(alphaB_g1),
+ alphaC_g2(alphaC_g2),
+ gamma_g2(gamma_g2),
+ gamma_beta_g1(gamma_beta_g1),
+ gamma_beta_g2(gamma_beta_g2),
+ rC_Z_g2(rC_Z_g2),
+ encoded_IC_query(eIC)
+ {};
+
+ size_t G1_size() const
+ {
+ return 2 + encoded_IC_query.size();
+ }
+
+ size_t G2_size() const
+ {
+ return 5;
+ }
+
+ size_t size_in_bits() const
+ {
+ return (2 * G1<ppT>::size_in_bits() + encoded_IC_query.size_in_bits() + 5 * G2<ppT>::size_in_bits());
+ }
+
+ void print_size() const
+ {
+ print_indent(); printf("* G1 elements in VK: %zu\n", this->G1_size());
+ print_indent(); printf("* G2 elements in VK: %zu\n", this->G2_size());
+ print_indent(); printf("* VK size in bits: %zu\n", this->size_in_bits());
+ }
+
+ bool operator==(const r1cs_ppzksnark_verification_key<ppT> &other) const;
+ friend std::ostream& operator<< <ppT>(std::ostream &out, const r1cs_ppzksnark_verification_key<ppT> &vk);
+ friend std::istream& operator>> <ppT>(std::istream &in, r1cs_ppzksnark_verification_key<ppT> &vk);
+
+ static r1cs_ppzksnark_verification_key<ppT> dummy_verification_key(const size_t input_size);
+};
+
+
+/************************ Processed verification key *************************/
+
+template<typename ppT>
+class r1cs_ppzksnark_processed_verification_key;
+
+template<typename ppT>
+std::ostream& operator<<(std::ostream &out, const r1cs_ppzksnark_processed_verification_key<ppT> &pvk);
+
+template<typename ppT>
+std::istream& operator>>(std::istream &in, r1cs_ppzksnark_processed_verification_key<ppT> &pvk);
+
+/**
+ * A processed verification key for the R1CS ppzkSNARK.
+ *
+ * Compared to a (non-processed) verification key, a processed verification key
+ * contains a small constant amount of additional pre-computed information that
+ * enables a faster verification time.
+ */
+template<typename ppT>
+class r1cs_ppzksnark_processed_verification_key {
+public:
+ G2_precomp<ppT> pp_G2_one_precomp;
+ G2_precomp<ppT> vk_alphaA_g2_precomp;
+ G1_precomp<ppT> vk_alphaB_g1_precomp;
+ G2_precomp<ppT> vk_alphaC_g2_precomp;
+ G2_precomp<ppT> vk_rC_Z_g2_precomp;
+ G2_precomp<ppT> vk_gamma_g2_precomp;
+ G1_precomp<ppT> vk_gamma_beta_g1_precomp;
+ G2_precomp<ppT> vk_gamma_beta_g2_precomp;
+
+ accumulation_vector<G1<ppT> > encoded_IC_query;
+
+ bool operator==(const r1cs_ppzksnark_processed_verification_key &other) const;
+ friend std::ostream& operator<< <ppT>(std::ostream &out, const r1cs_ppzksnark_processed_verification_key<ppT> &pvk);
+ friend std::istream& operator>> <ppT>(std::istream &in, r1cs_ppzksnark_processed_verification_key<ppT> &pvk);
+};
+
+
+/********************************** Key pair *********************************/
+
+/**
+ * A key pair for the R1CS ppzkSNARK, which consists of a proving key and a verification key.
+ */
+template<typename ppT>
+class r1cs_ppzksnark_keypair {
+public:
+ r1cs_ppzksnark_proving_key<ppT> pk;
+ r1cs_ppzksnark_verification_key<ppT> vk;
+
+ r1cs_ppzksnark_keypair() = default;
+ r1cs_ppzksnark_keypair(const r1cs_ppzksnark_keypair<ppT> &other) = default;
+ r1cs_ppzksnark_keypair(r1cs_ppzksnark_proving_key<ppT> &&pk,
+ r1cs_ppzksnark_verification_key<ppT> &&vk) :
+ pk(std::move(pk)),
+ vk(std::move(vk))
+ {}
+
+ r1cs_ppzksnark_keypair(r1cs_ppzksnark_keypair<ppT> &&other) = default;
+};
+
+
+/*********************************** Proof ***********************************/
+
+template<typename ppT>
+class r1cs_ppzksnark_proof;
+
+template<typename ppT>
+std::ostream& operator<<(std::ostream &out, const r1cs_ppzksnark_proof<ppT> &proof);
+
+template<typename ppT>
+std::istream& operator>>(std::istream &in, r1cs_ppzksnark_proof<ppT> &proof);
+
+/**
+ * A proof for the R1CS ppzkSNARK.
+ *
+ * While the proof has a structure, externally one merely opaquely produces,
+ * seralizes/deserializes, and verifies proofs. We only expose some information
+ * about the structure for statistics purposes.
+ */
+template<typename ppT>
+class r1cs_ppzksnark_proof {
+public:
+ knowledge_commitment<G1<ppT>, G1<ppT> > g_A;
+ knowledge_commitment<G2<ppT>, G1<ppT> > g_B;
+ knowledge_commitment<G1<ppT>, G1<ppT> > g_C;
+ G1<ppT> g_H;
+ G1<ppT> g_K;
+
+ r1cs_ppzksnark_proof()
+ {
+ // invalid proof with valid curve points
+ this->g_A.g = G1<ppT> ::one();
+ this->g_A.h = G1<ppT>::one();
+ this->g_B.g = G2<ppT> ::one();
+ this->g_B.h = G1<ppT>::one();
+ this->g_C.g = G1<ppT> ::one();
+ this->g_C.h = G1<ppT>::one();
+ this->g_H = G1<ppT>::one();
+ this->g_K = G1<ppT>::one();
+ }
+ r1cs_ppzksnark_proof(knowledge_commitment<G1<ppT>, G1<ppT> > &&g_A,
+ knowledge_commitment<G2<ppT>, G1<ppT> > &&g_B,
+ knowledge_commitment<G1<ppT>, G1<ppT> > &&g_C,
+ G1<ppT> &&g_H,
+ G1<ppT> &&g_K) :
+ g_A(std::move(g_A)),
+ g_B(std::move(g_B)),
+ g_C(std::move(g_C)),
+ g_H(std::move(g_H)),
+ g_K(std::move(g_K))
+ {};
+
+ size_t G1_size() const
+ {
+ return 7;
+ }
+
+ size_t G2_size() const
+ {
+ return 1;
+ }
+
+ size_t size_in_bits() const
+ {
+ return G1_size() * G1<ppT>::size_in_bits() + G2_size() * G2<ppT>::size_in_bits();
+ }
+
+ void print_size() const
+ {
+ print_indent(); printf("* G1 elements in proof: %zu\n", this->G1_size());
+ print_indent(); printf("* G2 elements in proof: %zu\n", this->G2_size());
+ print_indent(); printf("* Proof size in bits: %zu\n", this->size_in_bits());
+ }
+
+ bool is_well_formed() const
+ {
+ return (g_A.g.is_well_formed() && g_A.h.is_well_formed() &&
+ g_B.g.is_well_formed() && g_B.h.is_well_formed() &&
+ g_C.g.is_well_formed() && g_C.h.is_well_formed() &&
+ g_H.is_well_formed() &&
+ g_K.is_well_formed());
+ }
+
+ bool operator==(const r1cs_ppzksnark_proof<ppT> &other) const;
+ friend std::ostream& operator<< <ppT>(std::ostream &out, const r1cs_ppzksnark_proof<ppT> &proof);
+ friend std::istream& operator>> <ppT>(std::istream &in, r1cs_ppzksnark_proof<ppT> &proof);
+};
+
+
+/***************************** Main algorithms *******************************/
+
+/**
+ * A generator algorithm for the R1CS ppzkSNARK.
+ *
+ * Given a R1CS constraint system CS, this algorithm produces proving and verification keys for CS.
+ */
+template<typename ppT>
+r1cs_ppzksnark_keypair<ppT> r1cs_ppzksnark_generator(const r1cs_ppzksnark_constraint_system<ppT> &cs);
+
+template<typename ppT>
+r1cs_ppzksnark_keypair<ppT> r1cs_ppzksnark_generator(
+ const r1cs_ppzksnark_constraint_system<ppT> &cs,
+ const Fr<ppT>& t,
+ const Fr<ppT>& alphaA,
+ const Fr<ppT>& alphaB,
+ const Fr<ppT>& alphaC,
+ const Fr<ppT>& rA,
+ const Fr<ppT>& rB,
+ const Fr<ppT>& beta,
+ const Fr<ppT>& gamma
+);
+
+/**
+ * A prover algorithm for the R1CS ppzkSNARK.
+ *
+ * Given a R1CS primary input X and a R1CS auxiliary input Y, this algorithm
+ * produces a proof (of knowledge) that attests to the following statement:
+ * ``there exists Y such that CS(X,Y)=0''.
+ * Above, CS is the R1CS constraint system that was given as input to the generator algorithm.
+ */
+template<typename ppT>
+r1cs_ppzksnark_proof<ppT> r1cs_ppzksnark_prover(const r1cs_ppzksnark_proving_key<ppT> &pk,
+ const r1cs_ppzksnark_primary_input<ppT> &primary_input,
+ const r1cs_ppzksnark_auxiliary_input<ppT> &auxiliary_input);
+
+/*
+ Below are four variants of verifier algorithm for the R1CS ppzkSNARK.
+
+ These are the four cases that arise from the following two choices:
+
+ (1) The verifier accepts a (non-processed) verification key or, instead, a processed verification key.
+ In the latter case, we call the algorithm an "online verifier".
+
+ (2) The verifier checks for "weak" input consistency or, instead, "strong" input consistency.
+ Strong input consistency requires that |primary_input| = CS.num_inputs, whereas
+ weak input consistency requires that |primary_input| <= CS.num_inputs (and
+ the primary input is implicitly padded with zeros up to length CS.num_inputs).
+ */
+
+/**
+ * A verifier algorithm for the R1CS ppzkSNARK that:
+ * (1) accepts a non-processed verification key, and
+ * (2) has weak input consistency.
+ */
+template<typename ppT>
+bool r1cs_ppzksnark_verifier_weak_IC(const r1cs_ppzksnark_verification_key<ppT> &vk,
+ const r1cs_ppzksnark_primary_input<ppT> &primary_input,
+ const r1cs_ppzksnark_proof<ppT> &proof);
+
+/**
+ * A verifier algorithm for the R1CS ppzkSNARK that:
+ * (1) accepts a non-processed verification key, and
+ * (2) has strong input consistency.
+ */
+template<typename ppT>
+bool r1cs_ppzksnark_verifier_strong_IC(const r1cs_ppzksnark_verification_key<ppT> &vk,
+ const r1cs_ppzksnark_primary_input<ppT> &primary_input,
+ const r1cs_ppzksnark_proof<ppT> &proof);
+
+/**
+ * Convert a (non-processed) verification key into a processed verification key.
+ */
+template<typename ppT>
+r1cs_ppzksnark_processed_verification_key<ppT> r1cs_ppzksnark_verifier_process_vk(const r1cs_ppzksnark_verification_key<ppT> &vk);
+
+/**
+ * A verifier algorithm for the R1CS ppzkSNARK that:
+ * (1) accepts a processed verification key, and
+ * (2) has weak input consistency.
+ */
+template<typename ppT>
+bool r1cs_ppzksnark_online_verifier_weak_IC(const r1cs_ppzksnark_processed_verification_key<ppT> &pvk,
+ const r1cs_ppzksnark_primary_input<ppT> &input,
+ const r1cs_ppzksnark_proof<ppT> &proof);
+
+/**
+ * A verifier algorithm for the R1CS ppzkSNARK that:
+ * (1) accepts a processed verification key, and
+ * (2) has strong input consistency.
+ */
+template<typename ppT>
+bool r1cs_ppzksnark_online_verifier_strong_IC(const r1cs_ppzksnark_processed_verification_key<ppT> &pvk,
+ const r1cs_ppzksnark_primary_input<ppT> &primary_input,
+ const r1cs_ppzksnark_proof<ppT> &proof);
+
+/****************************** Miscellaneous ********************************/
+
+/**
+ * For debugging purposes (of r1cs_ppzksnark_r1cs_ppzksnark_verifier_gadget):
+ *
+ * A verifier algorithm for the R1CS ppzkSNARK that:
+ * (1) accepts a non-processed verification key,
+ * (2) has weak input consistency, and
+ * (3) uses affine coordinates for elliptic-curve computations.
+ */
+template<typename ppT>
+bool r1cs_ppzksnark_affine_verifier_weak_IC(const r1cs_ppzksnark_verification_key<ppT> &vk,
+ const r1cs_ppzksnark_primary_input<ppT> &primary_input,
+ const r1cs_ppzksnark_proof<ppT> &proof);
+
+
+} // libsnark
+
+#include "zk_proof_systems/ppzksnark/r1cs_ppzksnark/r1cs_ppzksnark.tcc"
+
+#endif // R1CS_PPZKSNARK_HPP_
--- /dev/null
+/** @file
+*****************************************************************************
+
+Implementation of interfaces for a ppzkSNARK for R1CS.
+
+See r1cs_ppzksnark.hpp .
+
+*****************************************************************************
+* @author This file is part of libsnark, developed by SCIPR Lab
+* and contributors (see AUTHORS).
+* @copyright MIT license (see LICENSE file)
+*****************************************************************************/
+
+#ifndef R1CS_PPZKSNARK_TCC_
+#define R1CS_PPZKSNARK_TCC_
+
+#include <algorithm>
+#include <cassert>
+#include <functional>
+#include <iostream>
+#include <sstream>
+
+#include "common/profiling.hpp"
+#include "common/utils.hpp"
+#include "algebra/scalar_multiplication/multiexp.hpp"
+#include "algebra/scalar_multiplication/kc_multiexp.hpp"
+#include "reductions/r1cs_to_qap/r1cs_to_qap.hpp"
+
+namespace libsnark {
+
+template<typename ppT>
+bool r1cs_ppzksnark_proving_key<ppT>::operator==(const r1cs_ppzksnark_proving_key<ppT> &other) const
+{
+ return (this->A_query == other.A_query &&
+ this->B_query == other.B_query &&
+ this->C_query == other.C_query &&
+ this->H_query == other.H_query &&
+ this->K_query == other.K_query);
+}
+
+template<typename ppT>
+std::ostream& operator<<(std::ostream &out, const r1cs_ppzksnark_proving_key<ppT> &pk)
+{
+ out << pk.A_query;
+ out << pk.B_query;
+ out << pk.C_query;
+ out << pk.H_query;
+ out << pk.K_query;
+
+ return out;
+}
+
+template<typename ppT>
+std::istream& operator>>(std::istream &in, r1cs_ppzksnark_proving_key<ppT> &pk)
+{
+ in >> pk.A_query;
+ in >> pk.B_query;
+ in >> pk.C_query;
+ in >> pk.H_query;
+ in >> pk.K_query;
+
+ return in;
+}
+
+template<typename ppT>
+bool r1cs_ppzksnark_verification_key<ppT>::operator==(const r1cs_ppzksnark_verification_key<ppT> &other) const
+{
+ return (this->alphaA_g2 == other.alphaA_g2 &&
+ this->alphaB_g1 == other.alphaB_g1 &&
+ this->alphaC_g2 == other.alphaC_g2 &&
+ this->gamma_g2 == other.gamma_g2 &&
+ this->gamma_beta_g1 == other.gamma_beta_g1 &&
+ this->gamma_beta_g2 == other.gamma_beta_g2 &&
+ this->rC_Z_g2 == other.rC_Z_g2 &&
+ this->encoded_IC_query == other.encoded_IC_query);
+}
+
+template<typename ppT>
+std::ostream& operator<<(std::ostream &out, const r1cs_ppzksnark_verification_key<ppT> &vk)
+{
+ out << vk.alphaA_g2 << OUTPUT_NEWLINE;
+ out << vk.alphaB_g1 << OUTPUT_NEWLINE;
+ out << vk.alphaC_g2 << OUTPUT_NEWLINE;
+ out << vk.gamma_g2 << OUTPUT_NEWLINE;
+ out << vk.gamma_beta_g1 << OUTPUT_NEWLINE;
+ out << vk.gamma_beta_g2 << OUTPUT_NEWLINE;
+ out << vk.rC_Z_g2 << OUTPUT_NEWLINE;
+ out << vk.encoded_IC_query << OUTPUT_NEWLINE;
+
+ return out;
+}
+
+template<typename ppT>
+std::istream& operator>>(std::istream &in, r1cs_ppzksnark_verification_key<ppT> &vk)
+{
+ in >> vk.alphaA_g2;
+ consume_OUTPUT_NEWLINE(in);
+ in >> vk.alphaB_g1;
+ consume_OUTPUT_NEWLINE(in);
+ in >> vk.alphaC_g2;
+ consume_OUTPUT_NEWLINE(in);
+ in >> vk.gamma_g2;
+ consume_OUTPUT_NEWLINE(in);
+ in >> vk.gamma_beta_g1;
+ consume_OUTPUT_NEWLINE(in);
+ in >> vk.gamma_beta_g2;
+ consume_OUTPUT_NEWLINE(in);
+ in >> vk.rC_Z_g2;
+ consume_OUTPUT_NEWLINE(in);
+ in >> vk.encoded_IC_query;
+ consume_OUTPUT_NEWLINE(in);
+
+ return in;
+}
+
+template<typename ppT>
+bool r1cs_ppzksnark_processed_verification_key<ppT>::operator==(const r1cs_ppzksnark_processed_verification_key<ppT> &other) const
+{
+ return (this->pp_G2_one_precomp == other.pp_G2_one_precomp &&
+ this->vk_alphaA_g2_precomp == other.vk_alphaA_g2_precomp &&
+ this->vk_alphaB_g1_precomp == other.vk_alphaB_g1_precomp &&
+ this->vk_alphaC_g2_precomp == other.vk_alphaC_g2_precomp &&
+ this->vk_rC_Z_g2_precomp == other.vk_rC_Z_g2_precomp &&
+ this->vk_gamma_g2_precomp == other.vk_gamma_g2_precomp &&
+ this->vk_gamma_beta_g1_precomp == other.vk_gamma_beta_g1_precomp &&
+ this->vk_gamma_beta_g2_precomp == other.vk_gamma_beta_g2_precomp &&
+ this->encoded_IC_query == other.encoded_IC_query);
+}
+
+template<typename ppT>
+std::ostream& operator<<(std::ostream &out, const r1cs_ppzksnark_processed_verification_key<ppT> &pvk)
+{
+ out << pvk.pp_G2_one_precomp << OUTPUT_NEWLINE;
+ out << pvk.vk_alphaA_g2_precomp << OUTPUT_NEWLINE;
+ out << pvk.vk_alphaB_g1_precomp << OUTPUT_NEWLINE;
+ out << pvk.vk_alphaC_g2_precomp << OUTPUT_NEWLINE;
+ out << pvk.vk_rC_Z_g2_precomp << OUTPUT_NEWLINE;
+ out << pvk.vk_gamma_g2_precomp << OUTPUT_NEWLINE;
+ out << pvk.vk_gamma_beta_g1_precomp << OUTPUT_NEWLINE;
+ out << pvk.vk_gamma_beta_g2_precomp << OUTPUT_NEWLINE;
+ out << pvk.encoded_IC_query << OUTPUT_NEWLINE;
+
+ return out;
+}
+
+template<typename ppT>
+std::istream& operator>>(std::istream &in, r1cs_ppzksnark_processed_verification_key<ppT> &pvk)
+{
+ in >> pvk.pp_G2_one_precomp;
+ consume_OUTPUT_NEWLINE(in);
+ in >> pvk.vk_alphaA_g2_precomp;
+ consume_OUTPUT_NEWLINE(in);
+ in >> pvk.vk_alphaB_g1_precomp;
+ consume_OUTPUT_NEWLINE(in);
+ in >> pvk.vk_alphaC_g2_precomp;
+ consume_OUTPUT_NEWLINE(in);
+ in >> pvk.vk_rC_Z_g2_precomp;
+ consume_OUTPUT_NEWLINE(in);
+ in >> pvk.vk_gamma_g2_precomp;
+ consume_OUTPUT_NEWLINE(in);
+ in >> pvk.vk_gamma_beta_g1_precomp;
+ consume_OUTPUT_NEWLINE(in);
+ in >> pvk.vk_gamma_beta_g2_precomp;
+ consume_OUTPUT_NEWLINE(in);
+ in >> pvk.encoded_IC_query;
+ consume_OUTPUT_NEWLINE(in);
+
+ return in;
+}
+
+template<typename ppT>
+bool r1cs_ppzksnark_proof<ppT>::operator==(const r1cs_ppzksnark_proof<ppT> &other) const
+{
+ return (this->g_A == other.g_A &&
+ this->g_B == other.g_B &&
+ this->g_C == other.g_C &&
+ this->g_H == other.g_H &&
+ this->g_K == other.g_K);
+}
+
+template<typename ppT>
+std::ostream& operator<<(std::ostream &out, const r1cs_ppzksnark_proof<ppT> &proof)
+{
+ out << proof.g_A << OUTPUT_NEWLINE;
+ out << proof.g_B << OUTPUT_NEWLINE;
+ out << proof.g_C << OUTPUT_NEWLINE;
+ out << proof.g_H << OUTPUT_NEWLINE;
+ out << proof.g_K << OUTPUT_NEWLINE;
+
+ return out;
+}
+
+template<typename ppT>
+std::istream& operator>>(std::istream &in, r1cs_ppzksnark_proof<ppT> &proof)
+{
+ in >> proof.g_A;
+ consume_OUTPUT_NEWLINE(in);
+ in >> proof.g_B;
+ consume_OUTPUT_NEWLINE(in);
+ in >> proof.g_C;
+ consume_OUTPUT_NEWLINE(in);
+ in >> proof.g_H;
+ consume_OUTPUT_NEWLINE(in);
+ in >> proof.g_K;
+ consume_OUTPUT_NEWLINE(in);
+
+ return in;
+}
+
+template<typename ppT>
+r1cs_ppzksnark_verification_key<ppT> r1cs_ppzksnark_verification_key<ppT>::dummy_verification_key(const size_t input_size)
+{
+ r1cs_ppzksnark_verification_key<ppT> result;
+ result.alphaA_g2 = Fr<ppT>::random_element() * G2<ppT>::one();
+ result.alphaB_g1 = Fr<ppT>::random_element() * G1<ppT>::one();
+ result.alphaC_g2 = Fr<ppT>::random_element() * G2<ppT>::one();
+ result.gamma_g2 = Fr<ppT>::random_element() * G2<ppT>::one();
+ result.gamma_beta_g1 = Fr<ppT>::random_element() * G1<ppT>::one();
+ result.gamma_beta_g2 = Fr<ppT>::random_element() * G2<ppT>::one();
+ result.rC_Z_g2 = Fr<ppT>::random_element() * G2<ppT>::one();
+
+ G1<ppT> base = Fr<ppT>::random_element() * G1<ppT>::one();
+ G1_vector<ppT> v;
+ for (size_t i = 0; i < input_size; ++i)
+ {
+ v.emplace_back(Fr<ppT>::random_element() * G1<ppT>::one());
+ }
+
+ result.encoded_IC_query = accumulation_vector<G1<ppT> >(std::move(base), std::move(v));
+
+ return result;
+}
+
+template <typename ppT>
+r1cs_ppzksnark_keypair<ppT> r1cs_ppzksnark_generator(const r1cs_ppzksnark_constraint_system<ppT> &cs)
+{
+ /* draw random element at which the QAP is evaluated */
+ const Fr<ppT> t = Fr<ppT>::random_element();
+
+ const Fr<ppT> alphaA = Fr<ppT>::random_element(),
+ alphaB = Fr<ppT>::random_element(),
+ alphaC = Fr<ppT>::random_element(),
+ rA = Fr<ppT>::random_element(),
+ rB = Fr<ppT>::random_element(),
+ beta = Fr<ppT>::random_element(),
+ gamma = Fr<ppT>::random_element();
+
+ return r1cs_ppzksnark_generator<ppT>(cs, t, alphaA, alphaB, alphaC, rA, rB, beta, gamma);
+}
+
+template <typename ppT>
+r1cs_ppzksnark_keypair<ppT> r1cs_ppzksnark_generator(
+ const r1cs_ppzksnark_constraint_system<ppT> &cs,
+ const Fr<ppT>& t,
+ const Fr<ppT>& alphaA,
+ const Fr<ppT>& alphaB,
+ const Fr<ppT>& alphaC,
+ const Fr<ppT>& rA,
+ const Fr<ppT>& rB,
+ const Fr<ppT>& beta,
+ const Fr<ppT>& gamma
+)
+{
+ enter_block("Call to r1cs_ppzksnark_generator");
+
+ /* make the B_query "lighter" if possible */
+ r1cs_ppzksnark_constraint_system<ppT> cs_copy(cs);
+ cs_copy.swap_AB_if_beneficial();
+
+ qap_instance_evaluation<Fr<ppT> > qap_inst = r1cs_to_qap_instance_map_with_evaluation(cs_copy, t);
+
+ print_indent(); printf("* QAP number of variables: %zu\n", qap_inst.num_variables());
+ print_indent(); printf("* QAP pre degree: %zu\n", cs_copy.constraints.size());
+ print_indent(); printf("* QAP degree: %zu\n", qap_inst.degree());
+ print_indent(); printf("* QAP number of input variables: %zu\n", qap_inst.num_inputs());
+
+ enter_block("Compute query densities");
+ size_t non_zero_At = 0, non_zero_Bt = 0, non_zero_Ct = 0, non_zero_Ht = 0;
+ for (size_t i = 0; i < qap_inst.num_variables()+1; ++i)
+ {
+ if (!qap_inst.At[i].is_zero())
+ {
+ ++non_zero_At;
+ }
+ if (!qap_inst.Bt[i].is_zero())
+ {
+ ++non_zero_Bt;
+ }
+ if (!qap_inst.Ct[i].is_zero())
+ {
+ ++non_zero_Ct;
+ }
+ }
+ for (size_t i = 0; i < qap_inst.degree()+1; ++i)
+ {
+ if (!qap_inst.Ht[i].is_zero())
+ {
+ ++non_zero_Ht;
+ }
+ }
+ leave_block("Compute query densities");
+
+ Fr_vector<ppT> At = std::move(qap_inst.At); // qap_inst.At is now in unspecified state, but we do not use it later
+ Fr_vector<ppT> Bt = std::move(qap_inst.Bt); // qap_inst.Bt is now in unspecified state, but we do not use it later
+ Fr_vector<ppT> Ct = std::move(qap_inst.Ct); // qap_inst.Ct is now in unspecified state, but we do not use it later
+ Fr_vector<ppT> Ht = std::move(qap_inst.Ht); // qap_inst.Ht is now in unspecified state, but we do not use it later
+
+ /* append Zt to At,Bt,Ct with */
+ At.emplace_back(qap_inst.Zt);
+ Bt.emplace_back(qap_inst.Zt);
+ Ct.emplace_back(qap_inst.Zt);
+
+ const Fr<ppT> rC = rA * rB;
+
+ // consrtuct the same-coefficient-check query (must happen before zeroing out the prefix of At)
+ Fr_vector<ppT> Kt;
+ Kt.reserve(qap_inst.num_variables()+4);
+ for (size_t i = 0; i < qap_inst.num_variables()+1; ++i)
+ {
+ Kt.emplace_back( beta * (rA * At[i] + rB * Bt[i] + rC * Ct[i] ) );
+ }
+ Kt.emplace_back(beta * rA * qap_inst.Zt);
+ Kt.emplace_back(beta * rB * qap_inst.Zt);
+ Kt.emplace_back(beta * rC * qap_inst.Zt);
+
+ /* zero out prefix of At and stick it into IC coefficients */
+ Fr_vector<ppT> IC_coefficients;
+ IC_coefficients.reserve(qap_inst.num_inputs() + 1);
+ for (size_t i = 0; i < qap_inst.num_inputs() + 1; ++i)
+ {
+ IC_coefficients.emplace_back(At[i]);
+ assert(!IC_coefficients[i].is_zero());
+ At[i] = Fr<ppT>::zero();
+ }
+
+ const size_t g1_exp_count = 2*(non_zero_At - qap_inst.num_inputs() + non_zero_Ct) + non_zero_Bt + non_zero_Ht + Kt.size();
+ const size_t g2_exp_count = non_zero_Bt;
+
+ size_t g1_window = get_exp_window_size<G1<ppT> >(g1_exp_count);
+ size_t g2_window = get_exp_window_size<G2<ppT> >(g2_exp_count);
+ print_indent(); printf("* G1 window: %zu\n", g1_window);
+ print_indent(); printf("* G2 window: %zu\n", g2_window);
+
+#ifdef MULTICORE
+ const size_t chunks = omp_get_max_threads(); // to override, set OMP_NUM_THREADS env var or call omp_set_num_threads()
+#else
+ const size_t chunks = 1;
+#endif
+
+ enter_block("Generating G1 multiexp table");
+ window_table<G1<ppT> > g1_table = get_window_table(Fr<ppT>::size_in_bits(), g1_window, G1<ppT>::one());
+ leave_block("Generating G1 multiexp table");
+
+ enter_block("Generating G2 multiexp table");
+ window_table<G2<ppT> > g2_table = get_window_table(Fr<ppT>::size_in_bits(), g2_window, G2<ppT>::one());
+ leave_block("Generating G2 multiexp table");
+
+ enter_block("Generate R1CS proving key");
+
+ enter_block("Generate knowledge commitments");
+ enter_block("Compute the A-query", false);
+ knowledge_commitment_vector<G1<ppT>, G1<ppT> > A_query = kc_batch_exp(Fr<ppT>::size_in_bits(), g1_window, g1_window, g1_table, g1_table, rA, rA*alphaA, At, chunks);
+ leave_block("Compute the A-query", false);
+
+ enter_block("Compute the B-query", false);
+ knowledge_commitment_vector<G2<ppT>, G1<ppT> > B_query = kc_batch_exp(Fr<ppT>::size_in_bits(), g2_window, g1_window, g2_table, g1_table, rB, rB*alphaB, Bt, chunks);
+ leave_block("Compute the B-query", false);
+
+ enter_block("Compute the C-query", false);
+ knowledge_commitment_vector<G1<ppT>, G1<ppT> > C_query = kc_batch_exp(Fr<ppT>::size_in_bits(), g1_window, g1_window, g1_table, g1_table, rC, rC*alphaC, Ct, chunks);
+ leave_block("Compute the C-query", false);
+
+ enter_block("Compute the H-query", false);
+ G1_vector<ppT> H_query = batch_exp(Fr<ppT>::size_in_bits(), g1_window, g1_table, Ht);
+ leave_block("Compute the H-query", false);
+
+ enter_block("Compute the K-query", false);
+ G1_vector<ppT> K_query = batch_exp(Fr<ppT>::size_in_bits(), g1_window, g1_table, Kt);
+#ifdef USE_MIXED_ADDITION
+ batch_to_special<G1<ppT> >(K_query);
+#endif
+ leave_block("Compute the K-query", false);
+
+ leave_block("Generate knowledge commitments");
+
+ leave_block("Generate R1CS proving key");
+
+ enter_block("Generate R1CS verification key");
+ G2<ppT> alphaA_g2 = alphaA * G2<ppT>::one();
+ G1<ppT> alphaB_g1 = alphaB * G1<ppT>::one();
+ G2<ppT> alphaC_g2 = alphaC * G2<ppT>::one();
+ G2<ppT> gamma_g2 = gamma * G2<ppT>::one();
+ G1<ppT> gamma_beta_g1 = (gamma * beta) * G1<ppT>::one();
+ G2<ppT> gamma_beta_g2 = (gamma * beta) * G2<ppT>::one();
+ G2<ppT> rC_Z_g2 = (rC * qap_inst.Zt) * G2<ppT>::one();
+
+ enter_block("Encode IC query for R1CS verification key");
+ G1<ppT> encoded_IC_base = (rA * IC_coefficients[0]) * G1<ppT>::one();
+ Fr_vector<ppT> multiplied_IC_coefficients;
+ multiplied_IC_coefficients.reserve(qap_inst.num_inputs());
+ for (size_t i = 1; i < qap_inst.num_inputs() + 1; ++i)
+ {
+ multiplied_IC_coefficients.emplace_back(rA * IC_coefficients[i]);
+ }
+ G1_vector<ppT> encoded_IC_values = batch_exp(Fr<ppT>::size_in_bits(), g1_window, g1_table, multiplied_IC_coefficients);
+
+ leave_block("Encode IC query for R1CS verification key");
+ leave_block("Generate R1CS verification key");
+
+ leave_block("Call to r1cs_ppzksnark_generator");
+
+ accumulation_vector<G1<ppT> > encoded_IC_query(std::move(encoded_IC_base), std::move(encoded_IC_values));
+
+ r1cs_ppzksnark_verification_key<ppT> vk = r1cs_ppzksnark_verification_key<ppT>(alphaA_g2,
+ alphaB_g1,
+ alphaC_g2,
+ gamma_g2,
+ gamma_beta_g1,
+ gamma_beta_g2,
+ rC_Z_g2,
+ encoded_IC_query);
+ r1cs_ppzksnark_proving_key<ppT> pk = r1cs_ppzksnark_proving_key<ppT>(std::move(A_query),
+ std::move(B_query),
+ std::move(C_query),
+ std::move(H_query),
+ std::move(K_query));
+
+ pk.print_size();
+ vk.print_size();
+
+ return r1cs_ppzksnark_keypair<ppT>(std::move(pk), std::move(vk));
+}
+
+template <typename ppT>
+r1cs_ppzksnark_proof<ppT> r1cs_ppzksnark_prover(const r1cs_ppzksnark_proving_key<ppT> &pk,
+ const r1cs_ppzksnark_primary_input<ppT> &primary_input,
+ const r1cs_ppzksnark_auxiliary_input<ppT> &auxiliary_input,
+ const r1cs_ppzksnark_constraint_system<ppT> &constraint_system)
+{
+ enter_block("Call to r1cs_ppzksnark_prover");
+
+#ifdef DEBUG
+ assert(constraint_system.is_satisfied(primary_input, auxiliary_input));
+#endif
+
+ const Fr<ppT> d1 = Fr<ppT>::random_element(),
+ d2 = Fr<ppT>::random_element(),
+ d3 = Fr<ppT>::random_element();
+
+ enter_block("Compute the polynomial H");
+ const qap_witness<Fr<ppT> > qap_wit = r1cs_to_qap_witness_map(constraint_system, primary_input, auxiliary_input, d1, d2, d3);
+ leave_block("Compute the polynomial H");
+
+#ifdef DEBUG
+ const Fr<ppT> t = Fr<ppT>::random_element();
+ qap_instance_evaluation<Fr<ppT> > qap_inst = r1cs_to_qap_instance_map_with_evaluation(constraint_system, t);
+ assert(qap_inst.is_satisfied(qap_wit));
+#endif
+
+ knowledge_commitment<G1<ppT>, G1<ppT> > g_A = pk.A_query[0] + qap_wit.d1*pk.A_query[qap_wit.num_variables()+1];
+ knowledge_commitment<G2<ppT>, G1<ppT> > g_B = pk.B_query[0] + qap_wit.d2*pk.B_query[qap_wit.num_variables()+1];
+ knowledge_commitment<G1<ppT>, G1<ppT> > g_C = pk.C_query[0] + qap_wit.d3*pk.C_query[qap_wit.num_variables()+1];
+
+ G1<ppT> g_H = G1<ppT>::zero();
+ G1<ppT> g_K = (pk.K_query[0] +
+ qap_wit.d1*pk.K_query[qap_wit.num_variables()+1] +
+ qap_wit.d2*pk.K_query[qap_wit.num_variables()+2] +
+ qap_wit.d3*pk.K_query[qap_wit.num_variables()+3]);
+
+#ifdef DEBUG
+ for (size_t i = 0; i < qap_wit.num_inputs() + 1; ++i)
+ {
+ assert(pk.A_query[i].g == G1<ppT>::zero());
+ }
+ assert(pk.A_query.domain_size() == qap_wit.num_variables()+2);
+ assert(pk.B_query.domain_size() == qap_wit.num_variables()+2);
+ assert(pk.C_query.domain_size() == qap_wit.num_variables()+2);
+ assert(pk.H_query.size() == qap_wit.degree()+1);
+ assert(pk.K_query.size() == qap_wit.num_variables()+4);
+#endif
+
+#ifdef MULTICORE
+ const size_t chunks = omp_get_max_threads(); // to override, set OMP_NUM_THREADS env var or call omp_set_num_threads()
+#else
+ const size_t chunks = 1;
+#endif
+
+ enter_block("Compute the proof");
+
+ enter_block("Compute answer to A-query", false);
+ g_A = g_A + kc_multi_exp_with_mixed_addition<G1<ppT>, G1<ppT>, Fr<ppT> >(pk.A_query,
+ 1, 1+qap_wit.num_variables(),
+ qap_wit.coefficients_for_ABCs.begin(), qap_wit.coefficients_for_ABCs.begin()+qap_wit.num_variables(),
+ chunks, true);
+ leave_block("Compute answer to A-query", false);
+
+ enter_block("Compute answer to B-query", false);
+ g_B = g_B + kc_multi_exp_with_mixed_addition<G2<ppT>, G1<ppT>, Fr<ppT> >(pk.B_query,
+ 1, 1+qap_wit.num_variables(),
+ qap_wit.coefficients_for_ABCs.begin(), qap_wit.coefficients_for_ABCs.begin()+qap_wit.num_variables(),
+ chunks, true);
+ leave_block("Compute answer to B-query", false);
+
+ enter_block("Compute answer to C-query", false);
+ g_C = g_C + kc_multi_exp_with_mixed_addition<G1<ppT>, G1<ppT>, Fr<ppT> >(pk.C_query,
+ 1, 1+qap_wit.num_variables(),
+ qap_wit.coefficients_for_ABCs.begin(), qap_wit.coefficients_for_ABCs.begin()+qap_wit.num_variables(),
+ chunks, true);
+ leave_block("Compute answer to C-query", false);
+
+ enter_block("Compute answer to H-query", false);
+ g_H = g_H + multi_exp<G1<ppT>, Fr<ppT> >(pk.H_query.begin(), pk.H_query.begin()+qap_wit.degree()+1,
+ qap_wit.coefficients_for_H.begin(), qap_wit.coefficients_for_H.begin()+qap_wit.degree()+1,
+ chunks, true);
+ leave_block("Compute answer to H-query", false);
+
+ enter_block("Compute answer to K-query", false);
+ g_K = g_K + multi_exp_with_mixed_addition<G1<ppT>, Fr<ppT> >(pk.K_query.begin()+1, pk.K_query.begin()+1+qap_wit.num_variables(),
+ qap_wit.coefficients_for_ABCs.begin(), qap_wit.coefficients_for_ABCs.begin()+qap_wit.num_variables(),
+ chunks, true);
+ leave_block("Compute answer to K-query", false);
+
+ leave_block("Compute the proof");
+
+ leave_block("Call to r1cs_ppzksnark_prover");
+
+ r1cs_ppzksnark_proof<ppT> proof = r1cs_ppzksnark_proof<ppT>(std::move(g_A), std::move(g_B), std::move(g_C), std::move(g_H), std::move(g_K));
+ //proof.print_size();
+
+ return proof;
+}
+
+template <typename ppT>
+r1cs_ppzksnark_processed_verification_key<ppT> r1cs_ppzksnark_verifier_process_vk(const r1cs_ppzksnark_verification_key<ppT> &vk)
+{
+ enter_block("Call to r1cs_ppzksnark_verifier_process_vk");
+
+ r1cs_ppzksnark_processed_verification_key<ppT> pvk;
+ pvk.pp_G2_one_precomp = ppT::precompute_G2(G2<ppT>::one());
+ pvk.vk_alphaA_g2_precomp = ppT::precompute_G2(vk.alphaA_g2);
+ pvk.vk_alphaB_g1_precomp = ppT::precompute_G1(vk.alphaB_g1);
+ pvk.vk_alphaC_g2_precomp = ppT::precompute_G2(vk.alphaC_g2);
+ pvk.vk_rC_Z_g2_precomp = ppT::precompute_G2(vk.rC_Z_g2);
+ pvk.vk_gamma_g2_precomp = ppT::precompute_G2(vk.gamma_g2);
+ pvk.vk_gamma_beta_g1_precomp = ppT::precompute_G1(vk.gamma_beta_g1);
+ pvk.vk_gamma_beta_g2_precomp = ppT::precompute_G2(vk.gamma_beta_g2);
+
+ pvk.encoded_IC_query = vk.encoded_IC_query;
+
+ leave_block("Call to r1cs_ppzksnark_verifier_process_vk");
+
+ return pvk;
+}
+
+template <typename ppT>
+bool r1cs_ppzksnark_online_verifier_weak_IC(const r1cs_ppzksnark_processed_verification_key<ppT> &pvk,
+ const r1cs_ppzksnark_primary_input<ppT> &primary_input,
+ const r1cs_ppzksnark_proof<ppT> &proof)
+{
+ assert(pvk.encoded_IC_query.domain_size() >= primary_input.size());
+
+ const accumulation_vector<G1<ppT> > accumulated_IC = pvk.encoded_IC_query.template accumulate_chunk<Fr<ppT> >(primary_input.begin(), primary_input.end(), 0);
+ const G1<ppT> &acc = accumulated_IC.first;
+
+ if (!proof.is_well_formed())
+ {
+ return false;
+ }
+
+ G1_precomp<ppT> proof_g_A_g_precomp = ppT::precompute_G1(proof.g_A.g);
+ G1_precomp<ppT> proof_g_A_h_precomp = ppT::precompute_G1(proof.g_A.h);
+ Fqk<ppT> kc_A_1 = ppT::miller_loop(proof_g_A_g_precomp, pvk.vk_alphaA_g2_precomp);
+ Fqk<ppT> kc_A_2 = ppT::miller_loop(proof_g_A_h_precomp, pvk.pp_G2_one_precomp);
+ GT<ppT> kc_A = ppT::final_exponentiation(kc_A_1 * kc_A_2.unitary_inverse());
+ if (kc_A != GT<ppT>::one())
+ {
+ return false;
+ }
+
+ G2_precomp<ppT> proof_g_B_g_precomp = ppT::precompute_G2(proof.g_B.g);
+ G1_precomp<ppT> proof_g_B_h_precomp = ppT::precompute_G1(proof.g_B.h);
+ Fqk<ppT> kc_B_1 = ppT::miller_loop(pvk.vk_alphaB_g1_precomp, proof_g_B_g_precomp);
+ Fqk<ppT> kc_B_2 = ppT::miller_loop(proof_g_B_h_precomp, pvk.pp_G2_one_precomp);
+ GT<ppT> kc_B = ppT::final_exponentiation(kc_B_1 * kc_B_2.unitary_inverse());
+ if (kc_B != GT<ppT>::one())
+ {
+ return false;
+ }
+
+ G1_precomp<ppT> proof_g_C_g_precomp = ppT::precompute_G1(proof.g_C.g);
+ G1_precomp<ppT> proof_g_C_h_precomp = ppT::precompute_G1(proof.g_C.h);
+ Fqk<ppT> kc_C_1 = ppT::miller_loop(proof_g_C_g_precomp, pvk.vk_alphaC_g2_precomp);
+ Fqk<ppT> kc_C_2 = ppT::miller_loop(proof_g_C_h_precomp, pvk.pp_G2_one_precomp);
+ GT<ppT> kc_C = ppT::final_exponentiation(kc_C_1 * kc_C_2.unitary_inverse());
+ if (kc_C != GT<ppT>::one())
+ {
+ return false;
+ }
+
+ // check that g^((A+acc)*B)=g^(H*\Prod(t-\sigma)+C)
+ // equivalently, via pairings, that e(g^(A+acc), g^B) = e(g^H, g^Z) + e(g^C, g^1)
+ G1_precomp<ppT> proof_g_A_g_acc_precomp = ppT::precompute_G1(proof.g_A.g + acc);
+ G1_precomp<ppT> proof_g_H_precomp = ppT::precompute_G1(proof.g_H);
+ Fqk<ppT> QAP_1 = ppT::miller_loop(proof_g_A_g_acc_precomp, proof_g_B_g_precomp);
+ Fqk<ppT> QAP_23 = ppT::double_miller_loop(proof_g_H_precomp, pvk.vk_rC_Z_g2_precomp, proof_g_C_g_precomp, pvk.pp_G2_one_precomp);
+ GT<ppT> QAP = ppT::final_exponentiation(QAP_1 * QAP_23.unitary_inverse());
+ if (QAP != GT<ppT>::one())
+ {
+ return false;
+ }
+
+ G1_precomp<ppT> proof_g_K_precomp = ppT::precompute_G1(proof.g_K);
+ G1_precomp<ppT> proof_g_A_g_acc_C_precomp = ppT::precompute_G1((proof.g_A.g + acc) + proof.g_C.g);
+ Fqk<ppT> K_1 = ppT::miller_loop(proof_g_K_precomp, pvk.vk_gamma_g2_precomp);
+ Fqk<ppT> K_23 = ppT::double_miller_loop(proof_g_A_g_acc_C_precomp, pvk.vk_gamma_beta_g2_precomp, pvk.vk_gamma_beta_g1_precomp, proof_g_B_g_precomp);
+ GT<ppT> K = ppT::final_exponentiation(K_1 * K_23.unitary_inverse());
+ if (K != GT<ppT>::one())
+ {
+ return false;
+ }
+
+ return true;
+}
+
+template<typename ppT>
+bool r1cs_ppzksnark_verifier_weak_IC(const r1cs_ppzksnark_verification_key<ppT> &vk,
+ const r1cs_ppzksnark_primary_input<ppT> &primary_input,
+ const r1cs_ppzksnark_proof<ppT> &proof)
+{
+ enter_block("Call to r1cs_ppzksnark_verifier_weak_IC");
+ r1cs_ppzksnark_processed_verification_key<ppT> pvk = r1cs_ppzksnark_verifier_process_vk<ppT>(vk);
+ bool result = r1cs_ppzksnark_online_verifier_weak_IC<ppT>(pvk, primary_input, proof);
+ leave_block("Call to r1cs_ppzksnark_verifier_weak_IC");
+ return result;
+}
+
+template<typename ppT>
+bool r1cs_ppzksnark_online_verifier_strong_IC(const r1cs_ppzksnark_processed_verification_key<ppT> &pvk,
+ const r1cs_ppzksnark_primary_input<ppT> &primary_input,
+ const r1cs_ppzksnark_proof<ppT> &proof)
+{
+ bool result = true;
+ enter_block("Call to r1cs_ppzksnark_online_verifier_strong_IC");
+
+ if (pvk.encoded_IC_query.domain_size() != primary_input.size())
+ {
+ print_indent(); printf("Input length differs from expected (got %zu, expected %zu).\n", primary_input.size(), pvk.encoded_IC_query.domain_size());
+ result = false;
+ }
+ else
+ {
+ result = r1cs_ppzksnark_online_verifier_weak_IC(pvk, primary_input, proof);
+ }
+
+ leave_block("Call to r1cs_ppzksnark_online_verifier_strong_IC");
+ return result;
+}
+
+template<typename ppT>
+bool r1cs_ppzksnark_verifier_strong_IC(const r1cs_ppzksnark_verification_key<ppT> &vk,
+ const r1cs_ppzksnark_primary_input<ppT> &primary_input,
+ const r1cs_ppzksnark_proof<ppT> &proof)
+{
+ enter_block("Call to r1cs_ppzksnark_verifier_strong_IC");
+ r1cs_ppzksnark_processed_verification_key<ppT> pvk = r1cs_ppzksnark_verifier_process_vk<ppT>(vk);
+ bool result = r1cs_ppzksnark_online_verifier_strong_IC<ppT>(pvk, primary_input, proof);
+ leave_block("Call to r1cs_ppzksnark_verifier_strong_IC");
+ return result;
+}
+
+template<typename ppT>
+bool r1cs_ppzksnark_affine_verifier_weak_IC(const r1cs_ppzksnark_verification_key<ppT> &vk,
+ const r1cs_ppzksnark_primary_input<ppT> &primary_input,
+ const r1cs_ppzksnark_proof<ppT> &proof)
+{
+ enter_block("Call to r1cs_ppzksnark_affine_verifier_weak_IC");
+ assert(vk.encoded_IC_query.domain_size() >= primary_input.size());
+
+ affine_ate_G2_precomp<ppT> pvk_pp_G2_one_precomp = ppT::affine_ate_precompute_G2(G2<ppT>::one());
+ affine_ate_G2_precomp<ppT> pvk_vk_alphaA_g2_precomp = ppT::affine_ate_precompute_G2(vk.alphaA_g2);
+ affine_ate_G1_precomp<ppT> pvk_vk_alphaB_g1_precomp = ppT::affine_ate_precompute_G1(vk.alphaB_g1);
+ affine_ate_G2_precomp<ppT> pvk_vk_alphaC_g2_precomp = ppT::affine_ate_precompute_G2(vk.alphaC_g2);
+ affine_ate_G2_precomp<ppT> pvk_vk_rC_Z_g2_precomp = ppT::affine_ate_precompute_G2(vk.rC_Z_g2);
+ affine_ate_G2_precomp<ppT> pvk_vk_gamma_g2_precomp = ppT::affine_ate_precompute_G2(vk.gamma_g2);
+ affine_ate_G1_precomp<ppT> pvk_vk_gamma_beta_g1_precomp = ppT::affine_ate_precompute_G1(vk.gamma_beta_g1);
+ affine_ate_G2_precomp<ppT> pvk_vk_gamma_beta_g2_precomp = ppT::affine_ate_precompute_G2(vk.gamma_beta_g2);
+
+ enter_block("Compute input-dependent part of A");
+ const accumulation_vector<G1<ppT> > accumulated_IC = vk.encoded_IC_query.template accumulate_chunk<Fr<ppT> >(primary_input.begin(), primary_input.end(), 0);
+ assert(accumulated_IC.is_fully_accumulated());
+ const G1<ppT> &acc = accumulated_IC.first;
+ leave_block("Compute input-dependent part of A");
+
+ bool result = true;
+ enter_block("Check knowledge commitment for A is valid");
+ affine_ate_G1_precomp<ppT> proof_g_A_g_precomp = ppT::affine_ate_precompute_G1(proof.g_A.g);
+ affine_ate_G1_precomp<ppT> proof_g_A_h_precomp = ppT::affine_ate_precompute_G1(proof.g_A.h);
+ Fqk<ppT> kc_A_miller = ppT::affine_ate_e_over_e_miller_loop(proof_g_A_g_precomp, pvk_vk_alphaA_g2_precomp, proof_g_A_h_precomp, pvk_pp_G2_one_precomp);
+ GT<ppT> kc_A = ppT::final_exponentiation(kc_A_miller);
+
+ if (kc_A != GT<ppT>::one())
+ {
+ print_indent(); printf("Knowledge commitment for A query incorrect.\n");
+ result = false;
+ }
+ leave_block("Check knowledge commitment for A is valid");
+
+ enter_block("Check knowledge commitment for B is valid");
+ affine_ate_G2_precomp<ppT> proof_g_B_g_precomp = ppT::affine_ate_precompute_G2(proof.g_B.g);
+ affine_ate_G1_precomp<ppT> proof_g_B_h_precomp = ppT::affine_ate_precompute_G1(proof.g_B.h);
+ Fqk<ppT> kc_B_miller = ppT::affine_ate_e_over_e_miller_loop(pvk_vk_alphaB_g1_precomp, proof_g_B_g_precomp, proof_g_B_h_precomp, pvk_pp_G2_one_precomp);
+ GT<ppT> kc_B = ppT::final_exponentiation(kc_B_miller);
+ if (kc_B != GT<ppT>::one())
+ {
+ print_indent(); printf("Knowledge commitment for B query incorrect.\n");
+ result = false;
+ }
+ leave_block("Check knowledge commitment for B is valid");
+
+ enter_block("Check knowledge commitment for C is valid");
+ affine_ate_G1_precomp<ppT> proof_g_C_g_precomp = ppT::affine_ate_precompute_G1(proof.g_C.g);
+ affine_ate_G1_precomp<ppT> proof_g_C_h_precomp = ppT::affine_ate_precompute_G1(proof.g_C.h);
+ Fqk<ppT> kc_C_miller = ppT::affine_ate_e_over_e_miller_loop(proof_g_C_g_precomp, pvk_vk_alphaC_g2_precomp, proof_g_C_h_precomp, pvk_pp_G2_one_precomp);
+ GT<ppT> kc_C = ppT::final_exponentiation(kc_C_miller);
+ if (kc_C != GT<ppT>::one())
+ {
+ print_indent(); printf("Knowledge commitment for C query incorrect.\n");
+ result = false;
+ }
+ leave_block("Check knowledge commitment for C is valid");
+
+ enter_block("Check QAP divisibility");
+ affine_ate_G1_precomp<ppT> proof_g_A_g_acc_precomp = ppT::affine_ate_precompute_G1(proof.g_A.g + acc);
+ affine_ate_G1_precomp<ppT> proof_g_H_precomp = ppT::affine_ate_precompute_G1(proof.g_H);
+ Fqk<ppT> QAP_miller = ppT::affine_ate_e_times_e_over_e_miller_loop(proof_g_H_precomp, pvk_vk_rC_Z_g2_precomp, proof_g_C_g_precomp, pvk_pp_G2_one_precomp, proof_g_A_g_acc_precomp, proof_g_B_g_precomp);
+ GT<ppT> QAP = ppT::final_exponentiation(QAP_miller);
+ if (QAP != GT<ppT>::one())
+ {
+ print_indent(); printf("QAP divisibility check failed.\n");
+ result = false;
+ }
+ leave_block("Check QAP divisibility");
+
+ enter_block("Check same coefficients were used");
+ affine_ate_G1_precomp<ppT> proof_g_K_precomp = ppT::affine_ate_precompute_G1(proof.g_K);
+ affine_ate_G1_precomp<ppT> proof_g_A_g_acc_C_precomp = ppT::affine_ate_precompute_G1((proof.g_A.g + acc) + proof.g_C.g);
+ Fqk<ppT> K_miller = ppT::affine_ate_e_times_e_over_e_miller_loop(proof_g_A_g_acc_C_precomp, pvk_vk_gamma_beta_g2_precomp, pvk_vk_gamma_beta_g1_precomp, proof_g_B_g_precomp, proof_g_K_precomp, pvk_vk_gamma_g2_precomp);
+ GT<ppT> K = ppT::final_exponentiation(K_miller);
+ if (K != GT<ppT>::one())
+ {
+ print_indent(); printf("Same-coefficient check failed.\n");
+ result = false;
+ }
+ leave_block("Check same coefficients were used");
+
+ leave_block("Call to r1cs_ppzksnark_affine_verifier_weak_IC");
+
+ return result;
+}
+
+} // libsnark
+#endif // R1CS_PPZKSNARK_TCC_
--- /dev/null
+/** @file
+ *****************************************************************************
+
+ Declaration of public-parameter selector for the R1CS ppzkSNARK.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+
+#ifndef R1CS_PPZKSNARK_PARAMS_HPP_
+#define R1CS_PPZKSNARK_PARAMS_HPP_
+
+#include "relations/constraint_satisfaction_problems/r1cs/r1cs.hpp"
+
+namespace libsnark {
+
+/**
+ * Below are various template aliases (used for convenience).
+ */
+
+template<typename ppT>
+using r1cs_ppzksnark_constraint_system = r1cs_constraint_system<Fr<ppT> >;
+
+template<typename ppT>
+using r1cs_ppzksnark_primary_input = r1cs_primary_input<Fr<ppT> >;
+
+template<typename ppT>
+using r1cs_ppzksnark_auxiliary_input = r1cs_auxiliary_input<Fr<ppT> >;
+
+} // libsnark
+
+#endif // R1CS_PPZKSNARK_PARAMS_HPP_
--- /dev/null
+/** @file
+ *****************************************************************************
+ Test program that exercises the ppzkSNARK (first generator, then
+ prover, then verifier) on a synthetic R1CS instance.
+
+ *****************************************************************************
+ * @author This file is part of libsnark, developed by SCIPR Lab
+ * and contributors (see AUTHORS).
+ * @copyright MIT license (see LICENSE file)
+ *****************************************************************************/
+#include <cassert>
+#include <cstdio>
+
+#include "common/default_types/r1cs_ppzksnark_pp.hpp"
+#include "common/profiling.hpp"
+#include "common/utils.hpp"
+#include "relations/constraint_satisfaction_problems/r1cs/examples/r1cs_examples.hpp"
+#include "zk_proof_systems/ppzksnark/r1cs_ppzksnark/examples/run_r1cs_ppzksnark.hpp"
+
+using namespace libsnark;
+
+template<typename ppT>
+void test_r1cs_ppzksnark(size_t num_constraints,
+ size_t input_size)
+{
+ print_header("(enter) Test R1CS ppzkSNARK");
+
+ const bool test_serialization = true;
+ r1cs_example<Fr<ppT> > example = generate_r1cs_example_with_binary_input<Fr<ppT> >(num_constraints, input_size);
+ const bool bit = run_r1cs_ppzksnark<ppT>(example, test_serialization);
+ assert(bit);
+
+ print_header("(leave) Test R1CS ppzkSNARK");
+}
+
+int main()
+{
+ default_r1cs_ppzksnark_pp::init_public_params();
+ start_profiling();
+
+ test_r1cs_ppzksnark<default_r1cs_ppzksnark_pp>(1000, 100);
+}