Update secp256k1 subtree to latest master

This commit is contained in:
fanquake 2024-01-04 14:40:28 +00:00
commit e2cdeb5925
No known key found for this signature in database
GPG Key ID: 2EEB9F5CC09526C1
46 changed files with 1476 additions and 1925 deletions

95
src/secp256k1/.cirrus.yml Normal file
View File

@ -0,0 +1,95 @@
env:
### cirrus config
CIRRUS_CLONE_DEPTH: 1
### compiler options
HOST:
WRAPPER_CMD:
# Specific warnings can be disabled with -Wno-error=foo.
# -pedantic-errors is not equivalent to -Werror=pedantic and thus not implied by -Werror according to the GCC manual.
WERROR_CFLAGS: -Werror -pedantic-errors
MAKEFLAGS: -j4
BUILD: check
### secp256k1 config
ECMULTWINDOW: auto
ECMULTGENPRECISION: auto
ASM: no
WIDEMUL: auto
WITH_VALGRIND: yes
EXTRAFLAGS:
### secp256k1 modules
EXPERIMENTAL: no
ECDH: no
RECOVERY: no
SCHNORRSIG: no
ELLSWIFT: no
### test options
SECP256K1_TEST_ITERS:
BENCH: yes
SECP256K1_BENCH_ITERS: 2
CTIMETESTS: yes
# Compile and run the tests
EXAMPLES: yes
cat_logs_snippet: &CAT_LOGS
always:
cat_tests_log_script:
- cat tests.log || true
cat_noverify_tests_log_script:
- cat noverify_tests.log || true
cat_exhaustive_tests_log_script:
- cat exhaustive_tests.log || true
cat_ctime_tests_log_script:
- cat ctime_tests.log || true
cat_bench_log_script:
- cat bench.log || true
cat_config_log_script:
- cat config.log || true
cat_test_env_script:
- cat test_env.log || true
cat_ci_env_script:
- env
linux_arm64_container_snippet: &LINUX_ARM64_CONTAINER
env_script:
- env | tee /tmp/env
build_script:
- DOCKER_BUILDKIT=1 docker build --file "ci/linux-debian.Dockerfile" --tag="ci_secp256k1_arm"
- docker image prune --force # Cleanup stale layers
test_script:
- docker run --rm --mount "type=bind,src=./,dst=/ci_secp256k1" --env-file /tmp/env --replace --name "ci_secp256k1_arm" "ci_secp256k1_arm" bash -c "cd /ci_secp256k1/ && ./ci/ci.sh"
task:
name: "ARM64: Linux (Debian stable)"
persistent_worker:
labels:
type: arm64
env:
ECDH: yes
RECOVERY: yes
SCHNORRSIG: yes
ELLSWIFT: yes
matrix:
# Currently only gcc-snapshot, the other compilers are tested on GHA with QEMU
- env: { CC: 'gcc-snapshot' }
<< : *LINUX_ARM64_CONTAINER
<< : *CAT_LOGS
task:
name: "ARM64: Linux (Debian stable), Valgrind"
persistent_worker:
labels:
type: arm64
env:
ECDH: yes
RECOVERY: yes
SCHNORRSIG: yes
ELLSWIFT: yes
WRAPPER_CMD: 'valgrind --error-exitcode=42'
SECP256K1_TEST_ITERS: 2
matrix:
- env: { CC: 'gcc' }
- env: { CC: 'clang' }
- env: { CC: 'gcc-snapshot' }
- env: { CC: 'clang-snapshot' }
<< : *LINUX_ARM64_CONTAINER
<< : *CAT_LOGS

View File

@ -14,9 +14,9 @@ inputs:
runs:
using: "composite"
steps:
- uses: docker/setup-buildx-action@v2
- uses: docker/setup-buildx-action@v3
- uses: docker/build-push-action@v4
- uses: docker/build-push-action@v5
id: main_builder
continue-on-error: true
with:
@ -26,7 +26,7 @@ runs:
load: true
cache-from: type=gha
- uses: docker/build-push-action@v4
- uses: docker/build-push-action@v5
id: retry_builder
if: steps.main_builder.outcome == 'failure'
with:

View File

@ -47,14 +47,14 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v3
with:
# See: https://github.com/moby/buildkit/issues/3969.
driver-opts: |
network=host
- name: Build container
uses: docker/build-push-action@v4
uses: docker/build-push-action@v5
with:
file: ./ci/linux-debian.Dockerfile
tags: linux-debian-image
@ -792,7 +792,7 @@ jobs:
- name: Check installation with Autotools
env:
CI_INSTALL: ${{ runner.temp }}/${{ github.run_id }}${{ github.action }}
CI_INSTALL: ${{ runner.temp }}/${{ github.run_id }}${{ github.action }}/install
run: |
./autogen.sh && ./configure --prefix=${{ env.CI_INSTALL }} && make clean && make install && ls -RlAh ${{ env.CI_INSTALL }}
gcc -o ecdsa examples/ecdsa.c $(PKG_CONFIG_PATH=${{ env.CI_INSTALL }}/lib/pkgconfig pkg-config --cflags --libs libsecp256k1) -Wl,-rpath,"${{ env.CI_INSTALL }}/lib" && ./ecdsa

View File

@ -5,6 +5,17 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [0.4.1] - 2023-12-21
#### Changed
- The point multiplication algorithm used for ECDH operations (module `ecdh`) was replaced with a slightly faster one.
- Optional handwritten x86_64 assembly for field operations was removed because modern C compilers are able to output more efficient assembly. This change results in a significant speedup of some library functions when handwritten x86_64 assembly is enabled (`--with-asm=x86_64` in GNU Autotools, `-DSECP256K1_ASM=x86_64` in CMake), which is the default on x86_64. Benchmarks with GCC 10.5.0 show a 10% speedup for `secp256k1_ecdsa_verify` and `secp256k1_schnorrsig_verify`.
#### ABI Compatibility
The ABI is backward compatible with versions 0.4.0 and 0.3.x.
## [0.4.0] - 2023-09-04
#### Added
@ -104,7 +115,8 @@ This version was in fact never released.
The number was given by the build system since the introduction of autotools in Jan 2014 (ea0fe5a5bf0c04f9cc955b2966b614f5f378c6f6).
Therefore, this version number does not uniquely identify a set of source files.
[unreleased]: https://github.com/bitcoin-core/secp256k1/compare/v0.4.0...HEAD
[unreleased]: https://github.com/bitcoin-core/secp256k1/compare/v0.4.1...HEAD
[0.4.1]: https://github.com/bitcoin-core/secp256k1/compare/v0.4.0...v0.4.1
[0.4.0]: https://github.com/bitcoin-core/secp256k1/compare/v0.3.2...v0.4.0
[0.3.2]: https://github.com/bitcoin-core/secp256k1/compare/v0.3.1...v0.3.2
[0.3.1]: https://github.com/bitcoin-core/secp256k1/compare/v0.3.0...v0.3.1

View File

@ -11,7 +11,7 @@ project(libsecp256k1
# The package (a.k.a. release) version is based on semantic versioning 2.0.0 of
# the API. All changes in experimental modules are treated as
# backwards-compatible and therefore at most increase the minor version.
VERSION 0.4.0
VERSION 0.4.2
DESCRIPTION "Optimized C library for ECDSA signatures and secret/public key operations on curve secp256k1."
HOMEPAGE_URL "https://github.com/bitcoin-core/secp256k1"
LANGUAGES C
@ -35,7 +35,7 @@ endif()
# All changes in experimental modules are treated as if they don't affect the
# interface and therefore only increase the revision.
set(${PROJECT_NAME}_LIB_VERSION_CURRENT 3)
set(${PROJECT_NAME}_LIB_VERSION_REVISION 0)
set(${PROJECT_NAME}_LIB_VERSION_REVISION 2)
set(${PROJECT_NAME}_LIB_VERSION_AGE 1)
set(CMAKE_C_STANDARD 90)
@ -107,7 +107,7 @@ if(SECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY)
endif()
mark_as_advanced(FORCE SECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY)
set(SECP256K1_ASM "AUTO" CACHE STRING "Assembly optimizations to use: \"AUTO\", \"OFF\", \"x86_64\" or \"arm32\" (experimental). [default=AUTO]")
set(SECP256K1_ASM "AUTO" CACHE STRING "Assembly to use: \"AUTO\", \"OFF\", \"x86_64\" or \"arm32\" (experimental). [default=AUTO]")
set_property(CACHE SECP256K1_ASM PROPERTY STRINGS "AUTO" "OFF" "x86_64" "arm32")
check_string_option_value(SECP256K1_ASM)
if(SECP256K1_ASM STREQUAL "arm32")
@ -117,7 +117,7 @@ if(SECP256K1_ASM STREQUAL "arm32")
if(HAVE_ARM32_ASM)
add_compile_definitions(USE_EXTERNAL_ASM=1)
else()
message(FATAL_ERROR "ARM32 assembly optimization requested but not available.")
message(FATAL_ERROR "ARM32 assembly requested but not available.")
endif()
elseif(SECP256K1_ASM)
include(CheckX86_64Assembly)
@ -128,14 +128,14 @@ elseif(SECP256K1_ASM)
elseif(SECP256K1_ASM STREQUAL "AUTO")
set(SECP256K1_ASM "OFF")
else()
message(FATAL_ERROR "x86_64 assembly optimization requested but not available.")
message(FATAL_ERROR "x86_64 assembly requested but not available.")
endif()
endif()
option(SECP256K1_EXPERIMENTAL "Allow experimental configuration options." OFF)
if(NOT SECP256K1_EXPERIMENTAL)
if(SECP256K1_ASM STREQUAL "arm32")
message(FATAL_ERROR "ARM32 assembly optimization is experimental. Use -DSECP256K1_EXPERIMENTAL=ON to allow.")
message(FATAL_ERROR "ARM32 assembly is experimental. Use -DSECP256K1_EXPERIMENTAL=ON to allow.")
endif()
endif()
@ -280,7 +280,7 @@ message("Parameters:")
message(" ecmult window size .................. ${SECP256K1_ECMULT_WINDOW_SIZE}")
message(" ecmult gen precision bits ........... ${SECP256K1_ECMULT_GEN_PREC_BITS}")
message("Optional features:")
message(" assembly optimization ............... ${SECP256K1_ASM}")
message(" assembly ............................ ${SECP256K1_ASM}")
message(" external callbacks .................. ${SECP256K1_USE_EXTERNAL_DEFAULT_CALLBACKS}")
if(SECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY)
message(" wide multiplication (test-only) ..... ${SECP256K1_TEST_OVERRIDE_WIDE_MULTIPLY}")

View File

@ -0,0 +1,107 @@
# Contributing to libsecp256k1
## Scope
libsecp256k1 is a library for elliptic curve cryptography on the curve secp256k1, not a general-purpose cryptography library.
The library primarily serves the needs of the Bitcoin Core project but provides additional functionality for the benefit of the wider Bitcoin ecosystem.
## Adding new functionality or modules
The libsecp256k1 project welcomes contributions in the form of new functionality or modules, provided they are within the project's scope.
It is the responsibility of the contributors to convince the maintainers that the proposed functionality is within the project's scope, high-quality and maintainable.
Contributors are recommended to provide the following in addition to the new code:
* **Specification:**
A specification can help significantly in reviewing the new code as it provides documentation and context.
It may justify various design decisions, give a motivation and outline security goals.
If the specification contains pseudocode, a reference implementation or test vectors, these can be used to compare with the proposed libsecp256k1 code.
* **Security Arguments:**
In addition to a defining the security goals, it should be argued that the new functionality meets these goals.
Depending on the nature of the new functionality, a wide range of security arguments are acceptable, ranging from being "obviously secure" to rigorous proofs of security.
* **Relevance Arguments:**
The relevance of the new functionality for the Bitcoin ecosystem should be argued by outlining clear use cases.
These are not the only factors taken into account when considering to add new functionality.
The proposed new libsecp256k1 code must be of high quality, including API documentation and tests, as well as featuring a misuse-resistant API design.
We recommend reaching out to other contributors (see [Communication Channels](#communication-channels)) and get feedback before implementing new functionality.
## Communication channels
Most communication about libsecp256k1 occurs on the GitHub repository: in issues, pull request or on the discussion board.
Additionally, there is an IRC channel dedicated to libsecp256k1, with biweekly meetings (see channel topic).
The channel is `#secp256k1` on Libera Chat.
The easiest way to participate on IRC is with the web client, [web.libera.chat](https://web.libera.chat/#secp256k1).
Chat history logs can be found at https://gnusha.org/secp256k1/.
## Contributor workflow & peer review
The Contributor Workflow & Peer Review in libsecp256k1 are similar to Bitcoin Core's workflow and review processes described in its [CONTRIBUTING.md](https://github.com/bitcoin/bitcoin/blob/master/CONTRIBUTING.md).
### Coding conventions
In addition, libsecp256k1 tries to maintain the following coding conventions:
* No runtime heap allocation (e.g., no `malloc`) unless explicitly requested by the caller (via `secp256k1_context_create` or `secp256k1_scratch_space_create`, for example). Morever, it should be possible to use the library without any heap allocations.
* The tests should cover all lines and branches of the library (see [Test coverage](#coverage)).
* Operations involving secret data should be tested for being constant time with respect to the secrets (see [src/ctime_tests.c](src/ctime_tests.c)).
* Local variables containing secret data should be cleared explicitly to try to delete secrets from memory.
* Use `secp256k1_memcmp_var` instead of `memcmp` (see [#823](https://github.com/bitcoin-core/secp256k1/issues/823)).
#### Style conventions
* Commits should be atomic and diffs should be easy to read. For this reason, do not mix any formatting fixes or code moves with actual code changes. Make sure each individual commit is hygienic: that it builds successfully on its own without warnings, errors, regressions, or test failures.
* New code should adhere to the style of existing, in particular surrounding, code. Other than that, we do not enforce strict rules for code formatting.
* The code conforms to C89. Most notably, that means that only `/* ... */` comments are allowed (no `//` line comments). Moreover, any declarations in a `{ ... }` block (e.g., a function) must appear at the beginning of the block before any statements. When you would like to declare a variable in the middle of a block, you can open a new block:
```C
void secp256k_foo(void) {
unsigned int x; /* declaration */
int y = 2*x; /* declaration */
x = 17; /* statement */
{
int a, b; /* declaration */
a = x + y; /* statement */
secp256k_bar(x, &b); /* statement */
}
}
```
* Use `unsigned int` instead of just `unsigned`.
* Use `void *ptr` instead of `void* ptr`.
* Arguments of the publicly-facing API must have a specific order defined in [include/secp256k1.h](include/secp256k1.h).
* User-facing comment lines in headers should be limited to 80 chars if possible.
* All identifiers in file scope should start with `secp256k1_`.
* Avoid trailing whitespace.
### Tests
#### Coverage
This library aims to have full coverage of reachable lines and branches.
To create a test coverage report, configure with `--enable-coverage` (use of GCC is necessary):
$ ./configure --enable-coverage
Run the tests:
$ make check
To create a report, `gcovr` is recommended, as it includes branch coverage reporting:
$ gcovr --exclude 'src/bench*' --print-summary
To create a HTML report with coloured and annotated source code:
$ mkdir -p coverage
$ gcovr --exclude 'src/bench*' --html --html-details -o coverage/coverage.html
#### Exhaustive tests
There are tests of several functions in which a small group replaces secp256k1.
These tests are *exhaustive* since they provide all elements and scalars of the small group as input arguments (see [src/tests_exhaustive.c](src/tests_exhaustive.c)).
### Benchmarks
See `src/bench*.c` for examples of benchmarks.

View File

@ -37,7 +37,6 @@ noinst_HEADERS += src/field_10x26_impl.h
noinst_HEADERS += src/field_5x52.h
noinst_HEADERS += src/field_5x52_impl.h
noinst_HEADERS += src/field_5x52_int128_impl.h
noinst_HEADERS += src/field_5x52_asm_impl.h
noinst_HEADERS += src/modinv32.h
noinst_HEADERS += src/modinv32_impl.h
noinst_HEADERS += src/modinv64.h
@ -46,6 +45,7 @@ noinst_HEADERS += src/precomputed_ecmult.h
noinst_HEADERS += src/precomputed_ecmult_gen.h
noinst_HEADERS += src/assumptions.h
noinst_HEADERS += src/checkmem.h
noinst_HEADERS += src/testutil.h
noinst_HEADERS += src/util.h
noinst_HEADERS += src/int128.h
noinst_HEADERS += src/int128_impl.h

View File

@ -1,11 +1,10 @@
libsecp256k1
============
[![Build Status](https://api.cirrus-ci.com/github/bitcoin-core/secp256k1.svg?branch=master)](https://cirrus-ci.com/github/bitcoin-core/secp256k1)
![Dependencies: None](https://img.shields.io/badge/dependencies-none-success)
[![irc.libera.chat #secp256k1](https://img.shields.io/badge/irc.libera.chat-%23secp256k1-success)](https://web.libera.chat/#secp256k1)
Optimized C library for ECDSA signatures and secret/public key operations on curve secp256k1.
High-performance high-assurance C library for digital signatures and other cryptographic primitives on the secp256k1 elliptic curve.
This library is intended to be the highest quality publicly available library for cryptography on the secp256k1 curve. However, the primary focus of its development has been for usage in the Bitcoin system and usage unlike Bitcoin's may be less well tested, verified, or suffer from a less well thought out interface. Correct usage requires some care and consideration that the library is fit for your application's purpose.
@ -34,7 +33,7 @@ Implementation details
* Expose only higher level interfaces to minimize the API surface and improve application security. ("Be difficult to use insecurely.")
* Field operations
* Optimized implementation of arithmetic modulo the curve's field size (2^256 - 0x1000003D1).
* Using 5 52-bit limbs (including hand-optimized assembly for x86_64, by Diederik Huys).
* Using 5 52-bit limbs
* Using 10 26-bit limbs (including hand-optimized assembly for 32-bit ARM, by Wladimir J. van der Laan).
* This is an experimental feature that has not received enough scrutiny to satisfy the standard of quality of this library but is made available for testing and review by the community.
* Scalar operations
@ -117,28 +116,6 @@ Usage examples can be found in the [examples](examples) directory. To compile th
To compile the Schnorr signature and ECDH examples, you also need to configure with `--enable-module-schnorrsig` and `--enable-module-ecdh`.
Test coverage
-----------
This library aims to have full coverage of the reachable lines and branches.
To create a test coverage report, configure with `--enable-coverage` (use of GCC is necessary):
$ ./configure --enable-coverage
Run the tests:
$ make check
To create a report, `gcovr` is recommended, as it includes branch coverage reporting:
$ gcovr --exclude 'src/bench*' --print-summary
To create a HTML report with coloured and annotated source code:
$ mkdir -p coverage
$ gcovr --exclude 'src/bench*' --html --html-details -o coverage/coverage.html
Benchmark
------------
If configured with `--enable-benchmark` (which is the default), binaries for benchmarking the libsecp256k1 functions will be present in the root directory after the build.
@ -155,3 +132,8 @@ Reporting a vulnerability
------------
See [SECURITY.md](SECURITY.md)
Contributing to libsecp256k1
------------
See [CONTRIBUTING.md](CONTRIBUTING.md)

View File

@ -83,7 +83,21 @@ esac
--host="$HOST" $EXTRAFLAGS
# We have set "-j<n>" in MAKEFLAGS.
make
build_exit_code=0
make > make.log 2>&1 || build_exit_code=$?
cat make.log
if [ $build_exit_code -ne 0 ]; then
case "${CC:-undefined}" in
*snapshot*)
# Ignore internal compiler errors in gcc-snapshot and clang-snapshot
grep -e "internal compiler error:" -e "PLEASE submit a bug report" make.log
return $?;
;;
*)
return 1;
;;
esac
fi
# Print information about binaries so that we can see that the architecture is correct
file *tests* || true

View File

@ -29,11 +29,15 @@ RUN apt-get update && apt-get install --no-install-recommends -y \
gcc-i686-linux-gnu libc6-dev-i386-cross libc6-dbg:i386 libubsan1:i386 libasan8:i386 \
gcc-s390x-linux-gnu libc6-dev-s390x-cross libc6-dbg:s390x \
gcc-arm-linux-gnueabihf libc6-dev-armhf-cross libc6-dbg:armhf \
gcc-aarch64-linux-gnu libc6-dev-arm64-cross libc6-dbg:arm64 \
gcc-powerpc64le-linux-gnu libc6-dev-ppc64el-cross libc6-dbg:ppc64el \
gcc-mingw-w64-x86-64-win32 wine64 wine \
gcc-mingw-w64-i686-win32 wine32 \
python3
python3 && \
if ! ( dpkg --print-architecture | grep --quiet "arm64" ) ; then \
apt-get install --no-install-recommends -y \
gcc-aarch64-linux-gnu libc6-dev-arm64-cross libc6-dbg:arm64 ;\
fi && \
apt-get clean && rm -rf /var/lib/apt/lists/*
# Build and install gcc snapshot
ARG GCC_SNAPSHOT_MAJOR=14
@ -44,7 +48,7 @@ RUN apt-get update && apt-get install --no-install-recommends -y wget libgmp-dev
sha512sum --check --ignore-missing sha512.sum && \
# We should have downloaded exactly one tar.xz file
ls && \
[[ $(ls *.tar.xz | wc -l) -eq "1" ]] && \
[ $(ls *.tar.xz | wc -l) -eq "1" ] && \
tar xf *.tar.xz && \
mkdir gcc-build && cd gcc-build && \
../*/configure --prefix=/opt/gcc-snapshot --enable-languages=c --disable-bootstrap --disable-multilib --without-isl && \

View File

@ -0,0 +1,8 @@
function(generate_pkg_config_file in_file)
set(prefix ${CMAKE_INSTALL_PREFIX})
set(exec_prefix \${prefix})
set(libdir \${exec_prefix}/${CMAKE_INSTALL_LIBDIR})
set(includedir \${prefix}/${CMAKE_INSTALL_INCLUDEDIR})
set(PACKAGE_VERSION ${PROJECT_VERSION})
configure_file(${in_file} ${PROJECT_NAME}.pc @ONLY)
endfunction()

View File

@ -5,8 +5,8 @@ AC_PREREQ([2.60])
# backwards-compatible and therefore at most increase the minor version.
define(_PKG_VERSION_MAJOR, 0)
define(_PKG_VERSION_MINOR, 4)
define(_PKG_VERSION_PATCH, 0)
define(_PKG_VERSION_IS_RELEASE, true)
define(_PKG_VERSION_PATCH, 2)
define(_PKG_VERSION_IS_RELEASE, false)
# The library version is based on libtool versioning of the ABI. The set of
# rules for updating the version can be found here:
@ -14,7 +14,7 @@ define(_PKG_VERSION_IS_RELEASE, true)
# All changes in experimental modules are treated as if they don't affect the
# interface and therefore only increase the revision.
define(_LIB_VERSION_CURRENT, 3)
define(_LIB_VERSION_REVISION, 0)
define(_LIB_VERSION_REVISION, 2)
define(_LIB_VERSION_AGE, 1)
AC_INIT([libsecp256k1],m4_join([.], _PKG_VERSION_MAJOR, _PKG_VERSION_MINOR, _PKG_VERSION_PATCH)m4_if(_PKG_VERSION_IS_RELEASE, [true], [], [-dev]),[https://github.com/bitcoin-core/secp256k1/issues],[libsecp256k1],[https://github.com/bitcoin-core/secp256k1])
@ -201,7 +201,7 @@ AC_ARG_ENABLE(external_default_callbacks,
AC_ARG_WITH([test-override-wide-multiply], [] ,[set_widemul=$withval], [set_widemul=auto])
AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm32|no|auto],
[assembly optimizations to use (experimental: arm32) [default=auto]])],[req_asm=$withval], [req_asm=auto])
[assembly to use (experimental: arm32) [default=auto]])],[req_asm=$withval], [req_asm=auto])
AC_ARG_WITH([ecmult-window], [AS_HELP_STRING([--with-ecmult-window=SIZE|auto],
[window size for ecmult precomputation for verification, specified as integer in range [2..24].]
@ -279,24 +279,24 @@ else
x86_64)
SECP_X86_64_ASM_CHECK
if test x"$has_x86_64_asm" != x"yes"; then
AC_MSG_ERROR([x86_64 assembly optimization requested but not available])
AC_MSG_ERROR([x86_64 assembly requested but not available])
fi
;;
arm32)
SECP_ARM32_ASM_CHECK
if test x"$has_arm32_asm" != x"yes"; then
AC_MSG_ERROR([ARM32 assembly optimization requested but not available])
AC_MSG_ERROR([ARM32 assembly requested but not available])
fi
;;
no)
;;
*)
AC_MSG_ERROR([invalid assembly optimization selection])
AC_MSG_ERROR([invalid assembly selection])
;;
esac
fi
# Select assembly optimization
# Select assembly
enable_external_asm=no
case $set_asm in
@ -309,7 +309,7 @@ arm32)
no)
;;
*)
AC_MSG_ERROR([invalid assembly optimizations])
AC_MSG_ERROR([invalid assembly selection])
;;
esac
@ -425,7 +425,7 @@ if test x"$enable_experimental" = x"yes"; then
AC_MSG_NOTICE([******])
else
if test x"$set_asm" = x"arm32"; then
AC_MSG_ERROR([ARM32 assembly optimization is experimental. Use --enable-experimental to allow.])
AC_MSG_ERROR([ARM32 assembly is experimental. Use --enable-experimental to allow.])
fi
fi

View File

@ -24,16 +24,21 @@ Perform these checks before creating a release:
2. Check installation with autotools:
```shell
dir=$(mktemp -d)
./autogen.sh && ./configure --prefix=$dir && make clean && make install && ls -l $dir/include $dir/lib
./autogen.sh && ./configure --prefix=$dir && make clean && make install && ls -RlAh $dir
gcc -o ecdsa examples/ecdsa.c $(PKG_CONFIG_PATH=$dir/lib/pkgconfig pkg-config --cflags --libs libsecp256k1) -Wl,-rpath,"$dir/lib" && ./ecdsa
```
3. Check installation with CMake:
```shell
dir=$(mktemp -d)
build=$(mktemp -d)
cmake -B $build -DCMAKE_INSTALL_PREFIX=$dir && cmake --build $build --target install && ls -l $dir/include $dir/lib*
cmake -B $build -DCMAKE_INSTALL_PREFIX=$dir && cmake --build $build --target install && ls -RlAh $dir
gcc -o ecdsa examples/ecdsa.c -I $dir/include -L $dir/lib*/ -l secp256k1 -Wl,-rpath,"$dir/lib",-rpath,"$dir/lib64" && ./ecdsa
```
4. Use the [`check-abi.sh`](/tools/check-abi.sh) tool to ensure there are no unexpected ABI incompatibilities and that the version number and release notes accurately reflect all potential ABI changes. To run this tool, the `abi-dumper` and `abi-compliance-checker` packages are required.
```shell
tools/check-abi.sh
```
## Regular release
@ -41,7 +46,7 @@ gcc -o ecdsa examples/ecdsa.c -I $dir/include -L $dir/lib*/ -l secp256k1 -Wl,-rp
* finalizes the release notes in [CHANGELOG.md](../CHANGELOG.md) by
* adding a section for the release (make sure that the version number is a link to a diff between the previous and new version),
* removing the `[Unreleased]` section header, and
* including an entry for `### ABI Compatibility` if it doesn't exist that mentions the library soname of the release,
* including an entry for `### ABI Compatibility` if it doesn't exist,
* sets `_PKG_VERSION_IS_RELEASE` to `true` in `configure.ac`, and
* if this is not a patch release
* updates `_PKG_VERSION_*` and `_LIB_VERSION_*` in `configure.ac` and

View File

@ -161,5 +161,13 @@ if(SECP256K1_INSTALL)
${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake
${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config-version.cmake
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}
)
)
include(GeneratePkgConfigFile)
generate_pkg_config_file(${PROJECT_SOURCE_DIR}/libsecp256k1.pc.in)
install(
FILES
${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc
DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig
)
endif()

View File

@ -913,3 +913,4 @@ secp256k1_fe_sqr_inner:
ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size secp256k1_fe_sqr_inner, .-secp256k1_fe_sqr_inner
.section .note.GNU-stack,"",%progbits

View File

@ -14,10 +14,28 @@
#include "field_impl.h"
#include "group_impl.h"
#include "scalar_impl.h"
#include "ecmult_const_impl.h"
#include "ecmult_impl.h"
#include "bench.h"
static void help(int default_iters) {
printf("Benchmarks various internal routines.\n");
printf("\n");
printf("The default number of iterations for each benchmark is %d. This can be\n", default_iters);
printf("customized using the SECP256K1_BENCH_ITERS environment variable.\n");
printf("\n");
printf("Usage: ./bench_internal [args]\n");
printf("By default, all benchmarks will be run.\n");
printf("args:\n");
printf(" help : display this help and exit\n");
printf(" scalar : all scalar operations (add, half, inverse, mul, negate, split)\n");
printf(" field : all field operations (half, inverse, issquare, mul, normalize, sqr, sqrt)\n");
printf(" group : all group operations (add, double, to_affine)\n");
printf(" ecmult : all point multiplication operations (ecmult_wnaf) \n");
printf(" hash : all hash algorithms (hmac, rng6979, sha256)\n");
printf(" context : all context object operations (context_create)\n");
printf("\n");
}
typedef struct {
secp256k1_scalar scalar[2];
secp256k1_fe fe[4];
@ -98,6 +116,18 @@ static void bench_scalar_negate(void* arg, int iters) {
}
}
static void bench_scalar_half(void* arg, int iters) {
int i;
bench_inv *data = (bench_inv*)arg;
secp256k1_scalar s = data->scalar[0];
for (i = 0; i < iters; i++) {
secp256k1_scalar_half(&s, &s);
}
data->scalar[0] = s;
}
static void bench_scalar_mul(void* arg, int iters) {
int i;
bench_inv *data = (bench_inv*)arg;
@ -309,18 +339,6 @@ static void bench_ecmult_wnaf(void* arg, int iters) {
CHECK(bits <= 256*iters);
}
static void bench_wnaf_const(void* arg, int iters) {
int i, bits = 0, overflow = 0;
bench_inv *data = (bench_inv*)arg;
for (i = 0; i < iters; i++) {
bits += secp256k1_wnaf_const(data->wnaf, &data->scalar[0], WINDOW_A, 256);
overflow += secp256k1_scalar_add(&data->scalar[0], &data->scalar[0], &data->scalar[1]);
}
CHECK(overflow >= 0);
CHECK(bits <= 256*iters);
}
static void bench_sha256(void* arg, int iters) {
int i;
bench_inv *data = (bench_inv*)arg;
@ -366,10 +384,22 @@ static void bench_context(void* arg, int iters) {
int main(int argc, char **argv) {
bench_inv data;
int iters = get_iters(20000);
int default_iters = 20000;
int iters = get_iters(default_iters);
int d = argc == 1; /* default */
if (argc > 1) {
if (have_flag(argc, argv, "-h")
|| have_flag(argc, argv, "--help")
|| have_flag(argc, argv, "help")) {
help(default_iters);
return 0;
}
}
print_output_table_header_row();
if (d || have_flag(argc, argv, "scalar") || have_flag(argc, argv, "half")) run_benchmark("scalar_half", bench_scalar_half, bench_setup, NULL, &data, 10, iters*100);
if (d || have_flag(argc, argv, "scalar") || have_flag(argc, argv, "add")) run_benchmark("scalar_add", bench_scalar_add, bench_setup, NULL, &data, 10, iters*100);
if (d || have_flag(argc, argv, "scalar") || have_flag(argc, argv, "negate")) run_benchmark("scalar_negate", bench_scalar_negate, bench_setup, NULL, &data, 10, iters*100);
if (d || have_flag(argc, argv, "scalar") || have_flag(argc, argv, "mul")) run_benchmark("scalar_mul", bench_scalar_mul, bench_setup, NULL, &data, 10, iters*10);
@ -394,7 +424,6 @@ int main(int argc, char **argv) {
if (d || have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_zinv_var", bench_group_add_zinv_var, bench_setup, NULL, &data, 10, iters*10);
if (d || have_flag(argc, argv, "group") || have_flag(argc, argv, "to_affine")) run_benchmark("group_to_affine_var", bench_group_to_affine_var, bench_setup, NULL, &data, 10, iters);
if (d || have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("wnaf_const", bench_wnaf_const, bench_setup, NULL, &data, 10, iters);
if (d || have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("ecmult_wnaf", bench_ecmult_wnaf, bench_setup, NULL, &data, 10, iters);
if (d || have_flag(argc, argv, "hash") || have_flag(argc, argv, "sha256")) run_benchmark("hash_sha256", bench_sha256, bench_setup, NULL, &data, 10, iters);

View File

@ -66,8 +66,7 @@ static int secp256k1_der_read_len(size_t *len, const unsigned char **sigp, const
}
if (lenleft > sizeof(size_t)) {
/* The resulting length would exceed the range of a size_t, so
* certainly longer than the passed array size.
*/
* it is certainly longer than the passed array size. */
return 0;
}
while (lenleft > 0) {
@ -76,7 +75,9 @@ static int secp256k1_der_read_len(size_t *len, const unsigned char **sigp, const
lenleft--;
}
if (*len > (size_t)(sigend - *sigp)) {
/* Result exceeds the length of the passed array. */
/* Result exceeds the length of the passed array.
(Checking this is the responsibility of the caller but it
can't hurt do it here, too.) */
return 0;
}
if (*len < 128) {

View File

@ -1,5 +1,5 @@
/***********************************************************************
* Copyright (c) 2015 Pieter Wuille, Andrew Poelstra *
* Copyright (c) 2015, 2022 Pieter Wuille, Andrew Poelstra *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
@ -12,208 +12,259 @@
#include "ecmult_const.h"
#include "ecmult_impl.h"
#if defined(EXHAUSTIVE_TEST_ORDER)
/* We need 2^ECMULT_CONST_GROUP_SIZE - 1 to be less than EXHAUSTIVE_TEST_ORDER, because
* the tables cannot have infinities in them (this breaks the effective-affine technique's
* z-ratio tracking) */
# if EXHAUSTIVE_TEST_ORDER == 199
# define ECMULT_CONST_GROUP_SIZE 4
# elif EXHAUSTIVE_TEST_ORDER == 13
# define ECMULT_CONST_GROUP_SIZE 3
# elif EXHAUSTIVE_TEST_ORDER == 7
# define ECMULT_CONST_GROUP_SIZE 2
# else
# error "Unknown EXHAUSTIVE_TEST_ORDER"
# endif
#else
/* Group size 4 or 5 appears optimal. */
# define ECMULT_CONST_GROUP_SIZE 5
#endif
#define ECMULT_CONST_TABLE_SIZE (1L << (ECMULT_CONST_GROUP_SIZE - 1))
#define ECMULT_CONST_GROUPS ((129 + ECMULT_CONST_GROUP_SIZE - 1) / ECMULT_CONST_GROUP_SIZE)
#define ECMULT_CONST_BITS (ECMULT_CONST_GROUPS * ECMULT_CONST_GROUP_SIZE)
/** Fill a table 'pre' with precomputed odd multiples of a.
*
* The resulting point set is brought to a single constant Z denominator, stores the X and Y
* coordinates as ge_storage points in pre, and stores the global Z in globalz.
* It only operates on tables sized for WINDOW_A wnaf multiples.
* coordinates as ge points in pre, and stores the global Z in globalz.
*
* 'pre' must be an array of size ECMULT_CONST_TABLE_SIZE.
*/
static void secp256k1_ecmult_odd_multiples_table_globalz_windowa(secp256k1_ge *pre, secp256k1_fe *globalz, const secp256k1_gej *a) {
secp256k1_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)];
static void secp256k1_ecmult_const_odd_multiples_table_globalz(secp256k1_ge *pre, secp256k1_fe *globalz, const secp256k1_gej *a) {
secp256k1_fe zr[ECMULT_CONST_TABLE_SIZE];
secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), pre, zr, globalz, a);
secp256k1_ge_table_set_globalz(ECMULT_TABLE_SIZE(WINDOW_A), pre, zr);
secp256k1_ecmult_odd_multiples_table(ECMULT_CONST_TABLE_SIZE, pre, zr, globalz, a);
secp256k1_ge_table_set_globalz(ECMULT_CONST_TABLE_SIZE, pre, zr);
}
/* This is like `ECMULT_TABLE_GET_GE` but is constant time */
#define ECMULT_CONST_TABLE_GET_GE(r,pre,n,w) do { \
int m = 0; \
/* Extract the sign-bit for a constant time absolute-value. */ \
int volatile mask = (n) >> (sizeof(n) * CHAR_BIT - 1); \
int abs_n = ((n) + mask) ^ mask; \
int idx_n = abs_n >> 1; \
/* Given a table 'pre' with odd multiples of a point, put in r the signed-bit multiplication of n with that point.
*
* For example, if ECMULT_CONST_GROUP_SIZE is 4, then pre is expected to contain 8 entries:
* [1*P, 3*P, 5*P, 7*P, 9*P, 11*P, 13*P, 15*P]. n is then expected to be a 4-bit integer (range 0-15), and its
* bits are interpreted as signs of powers of two to look up.
*
* For example, if n=4, which is 0100 in binary, which is interpreted as [- + - -], so the looked up value is
* [ -(2^3) + (2^2) - (2^1) - (2^0) ]*P = -7*P. Every valid n translates to an odd number in range [-15,15],
* which means we just need to look up one of the precomputed values, and optionally negate it.
*/
#define ECMULT_CONST_TABLE_GET_GE(r,pre,n) do { \
unsigned int m = 0; \
/* If the top bit of n is 0, we want the negation. */ \
volatile unsigned int negative = ((n) >> (ECMULT_CONST_GROUP_SIZE - 1)) ^ 1; \
/* Let n[i] be the i-th bit of n, then the index is
* sum(cnot(n[i]) * 2^i, i=0..l-2)
* where cnot(b) = b if n[l-1] = 1 and 1 - b otherwise.
* For example, if n = 4, in binary 0100, the index is 3, in binary 011.
*
* Proof:
* Let
* x = sum((2*n[i] - 1)*2^i, i=0..l-1)
* = 2*sum(n[i] * 2^i, i=0..l-1) - 2^l + 1
* be the value represented by n.
* The index is (x - 1)/2 if x > 0 and -(x + 1)/2 otherwise.
* Case x > 0:
* n[l-1] = 1
* index = sum(n[i] * 2^i, i=0..l-1) - 2^(l-1)
* = sum(n[i] * 2^i, i=0..l-2)
* Case x <= 0:
* n[l-1] = 0
* index = -(2*sum(n[i] * 2^i, i=0..l-1) - 2^l + 2)/2
* = 2^(l-1) - 1 - sum(n[i] * 2^i, i=0..l-1)
* = sum((1 - n[i]) * 2^i, i=0..l-2)
*/ \
unsigned int index = ((unsigned int)(-negative) ^ n) & ((1U << (ECMULT_CONST_GROUP_SIZE - 1)) - 1U); \
secp256k1_fe neg_y; \
VERIFY_CHECK(((n) & 1) == 1); \
VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \
VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \
VERIFY_SETUP(secp256k1_fe_clear(&(r)->x)); \
VERIFY_SETUP(secp256k1_fe_clear(&(r)->y)); \
/* Unconditionally set r->x = (pre)[m].x. r->y = (pre)[m].y. because it's either the correct one \
VERIFY_CHECK((n) < (1U << ECMULT_CONST_GROUP_SIZE)); \
VERIFY_CHECK(index < (1U << (ECMULT_CONST_GROUP_SIZE - 1))); \
/* Unconditionally set r->x = (pre)[m].x. r->y = (pre)[m].y. because it's either the correct one
* or will get replaced in the later iterations, this is needed to make sure `r` is initialized. */ \
(r)->x = (pre)[m].x; \
(r)->y = (pre)[m].y; \
for (m = 1; m < ECMULT_TABLE_SIZE(w); m++) { \
for (m = 1; m < ECMULT_CONST_TABLE_SIZE; m++) { \
/* This loop is used to avoid secret data in array indices. See
* the comment in ecmult_gen_impl.h for rationale. */ \
secp256k1_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \
secp256k1_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \
secp256k1_fe_cmov(&(r)->x, &(pre)[m].x, m == index); \
secp256k1_fe_cmov(&(r)->y, &(pre)[m].y, m == index); \
} \
(r)->infinity = 0; \
secp256k1_fe_negate(&neg_y, &(r)->y, 1); \
secp256k1_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \
secp256k1_fe_cmov(&(r)->y, &neg_y, negative); \
} while(0)
/** Convert a number to WNAF notation.
* The number becomes represented by sum(2^{wi} * wnaf[i], i=0..WNAF_SIZE(w)+1) - return_val.
* It has the following guarantees:
* - each wnaf[i] an odd integer between -(1 << w) and (1 << w)
* - each wnaf[i] is nonzero
* - the number of words set is always WNAF_SIZE(w) + 1
*
* Adapted from `The Width-w NAF Method Provides Small Memory and Fast Elliptic Scalar
* Multiplications Secure against Side Channel Attacks`, Okeya and Tagaki. M. Joye (Ed.)
* CT-RSA 2003, LNCS 2612, pp. 328-443, 2003. Springer-Verlag Berlin Heidelberg 2003
*
* Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335
*/
static int secp256k1_wnaf_const(int *wnaf, const secp256k1_scalar *scalar, int w, int size) {
int global_sign;
int skew;
int word = 0;
/* For K as defined in the comment of secp256k1_ecmult_const, we have several precomputed
* formulas/constants.
* - in exhaustive test mode, we give an explicit expression to compute it at compile time: */
#ifdef EXHAUSTIVE_TEST_ORDER
static const secp256k1_scalar secp256k1_ecmult_const_K = ((SECP256K1_SCALAR_CONST(0, 0, 0, (1U << (ECMULT_CONST_BITS - 128)) - 2U, 0, 0, 0, 0) + EXHAUSTIVE_TEST_ORDER - 1U) * (1U + EXHAUSTIVE_TEST_LAMBDA)) % EXHAUSTIVE_TEST_ORDER;
/* - for the real secp256k1 group we have constants for various ECMULT_CONST_BITS values. */
#elif ECMULT_CONST_BITS == 129
/* For GROUP_SIZE = 1,3. */
static const secp256k1_scalar secp256k1_ecmult_const_K = SECP256K1_SCALAR_CONST(0xac9c52b3ul, 0x3fa3cf1ful, 0x5ad9e3fdul, 0x77ed9ba4ul, 0xa880b9fcul, 0x8ec739c2ul, 0xe0cfc810ul, 0xb51283ceul);
#elif ECMULT_CONST_BITS == 130
/* For GROUP_SIZE = 2,5. */
static const secp256k1_scalar secp256k1_ecmult_const_K = SECP256K1_SCALAR_CONST(0xa4e88a7dul, 0xcb13034eul, 0xc2bdd6bful, 0x7c118d6bul, 0x589ae848ul, 0x26ba29e4ul, 0xb5c2c1dcul, 0xde9798d9ul);
#elif ECMULT_CONST_BITS == 132
/* For GROUP_SIZE = 4,6 */
static const secp256k1_scalar secp256k1_ecmult_const_K = SECP256K1_SCALAR_CONST(0x76b1d93dul, 0x0fae3c6bul, 0x3215874bul, 0x94e93813ul, 0x7937fe0dul, 0xb66bcaaful, 0xb3749ca5ul, 0xd7b6171bul);
#else
# error "Unknown ECMULT_CONST_BITS"
#endif
/* 1 2 3 */
int u_last;
int u;
int flip;
secp256k1_scalar s = *scalar;
VERIFY_CHECK(w > 0);
VERIFY_CHECK(size > 0);
/* Note that we cannot handle even numbers by negating them to be odd, as is
* done in other implementations, since if our scalars were specified to have
* width < 256 for performance reasons, their negations would have width 256
* and we'd lose any performance benefit. Instead, we use a variation of a
* technique from Section 4.2 of the Okeya/Tagaki paper, which is to add 1 to the
* number we are encoding when it is even, returning a skew value indicating
* this, and having the caller compensate after doing the multiplication.
static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, const secp256k1_scalar *q) {
/* The approach below combines the signed-digit logic from Mike Hamburg's
* "Fast and compact elliptic-curve cryptography" (https://eprint.iacr.org/2012/309)
* Section 3.3, with the GLV endomorphism.
*
* In fact, we _do_ want to negate numbers to minimize their bit-lengths (and in
* particular, to ensure that the outputs from the endomorphism-split fit into
* 128 bits). If we negate, the parity of our number flips, affecting whether
* we want to add to the scalar to ensure that it's odd. */
flip = secp256k1_scalar_is_high(&s);
skew = flip ^ secp256k1_scalar_is_even(&s);
secp256k1_scalar_cadd_bit(&s, 0, skew);
global_sign = secp256k1_scalar_cond_negate(&s, flip);
* The idea there is to interpret the bits of a scalar as signs (1 = +, 0 = -), and compute a
* point multiplication in that fashion. Let v be an n-bit non-negative integer (0 <= v < 2^n),
* and v[i] its i'th bit (so v = sum(v[i] * 2^i, i=0..n-1)). Then define:
*
* C_l(v, A) = sum((2*v[i] - 1) * 2^i*A, i=0..l-1)
*
* Then it holds that C_l(v, A) = sum((2*v[i] - 1) * 2^i*A, i=0..l-1)
* = (2*sum(v[i] * 2^i, i=0..l-1) + 1 - 2^l) * A
* = (2*v + 1 - 2^l) * A
*
* Thus, one can compute q*A as C_256((q + 2^256 - 1) / 2, A). This is the basis for the
* paper's signed-digit multi-comb algorithm for multiplication using a precomputed table.
*
* It is appealing to try to combine this with the GLV optimization: the idea that a scalar
* s can be written as s1 + lambda*s2, where lambda is a curve-specific constant such that
* lambda*A is easy to compute, and where s1 and s2 are small. In particular we have the
* secp256k1_scalar_split_lambda function which performs such a split with the resulting s1
* and s2 in range (-2^128, 2^128) mod n. This does work, but is uninteresting:
*
* To compute q*A:
* - Let s1, s2 = split_lambda(q)
* - Let R1 = C_256((s1 + 2^256 - 1) / 2, A)
* - Let R2 = C_256((s2 + 2^256 - 1) / 2, lambda*A)
* - Return R1 + R2
*
* The issue is that while s1 and s2 are small-range numbers, (s1 + 2^256 - 1) / 2 (mod n)
* and (s2 + 2^256 - 1) / 2 (mod n) are not, undoing the benefit of the splitting.
*
* To make it work, we want to modify the input scalar q first, before splitting, and then only
* add a 2^128 offset of the split results (so that they end up in the single 129-bit range
* [0,2^129]). A slightly smaller offset would work due to the bounds on the split, but we pick
* 2^128 for simplicity. Let s be the scalar fed to split_lambda, and f(q) the function to
* compute it from q:
*
* To compute q*A:
* - Compute s = f(q)
* - Let s1, s2 = split_lambda(s)
* - Let v1 = s1 + 2^128 (mod n)
* - Let v2 = s2 + 2^128 (mod n)
* - Let R1 = C_l(v1, A)
* - Let R2 = C_l(v2, lambda*A)
* - Return R1 + R2
*
* l will thus need to be at least 129, but we may overshoot by a few bits (see
* further), so keep it as a variable.
*
* To solve for s, we reason:
* q*A = R1 + R2
* <=> q*A = C_l(s1 + 2^128, A) + C_l(s2 + 2^128, lambda*A)
* <=> q*A = (2*(s1 + 2^128) + 1 - 2^l) * A + (2*(s2 + 2^128) + 1 - 2^l) * lambda*A
* <=> q*A = (2*(s1 + s2*lambda) + (2^129 + 1 - 2^l) * (1 + lambda)) * A
* <=> q = 2*(s1 + s2*lambda) + (2^129 + 1 - 2^l) * (1 + lambda) (mod n)
* <=> q = 2*s + (2^129 + 1 - 2^l) * (1 + lambda) (mod n)
* <=> s = (q + (2^l - 2^129 - 1) * (1 + lambda)) / 2 (mod n)
* <=> f(q) = (q + K) / 2 (mod n)
* where K = (2^l - 2^129 - 1)*(1 + lambda) (mod n)
*
* We will process the computation of C_l(v1, A) and C_l(v2, lambda*A) in groups of
* ECMULT_CONST_GROUP_SIZE, so we set l to the smallest multiple of ECMULT_CONST_GROUP_SIZE
* that is not less than 129; this equals ECMULT_CONST_BITS.
*/
/* 4 */
u_last = secp256k1_scalar_shr_int(&s, w);
do {
int even;
/* 4.1 4.4 */
u = secp256k1_scalar_shr_int(&s, w);
/* 4.2 */
even = ((u & 1) == 0);
/* In contrast to the original algorithm, u_last is always > 0 and
* therefore we do not need to check its sign. In particular, it's easy
* to see that u_last is never < 0 because u is never < 0. Moreover,
* u_last is never = 0 because u is never even after a loop
* iteration. The same holds analogously for the initial value of
* u_last (in the first loop iteration). */
VERIFY_CHECK(u_last > 0);
VERIFY_CHECK((u_last & 1) == 1);
u += even;
u_last -= even * (1 << w);
/* 4.3, adapted for global sign change */
wnaf[word++] = u_last * global_sign;
u_last = u;
} while (word * w < size);
wnaf[word] = u * global_sign;
VERIFY_CHECK(secp256k1_scalar_is_zero(&s));
VERIFY_CHECK(word == WNAF_SIZE_BITS(size, w));
return skew;
}
static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, const secp256k1_scalar *scalar) {
secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)];
secp256k1_ge tmpa;
secp256k1_fe Z;
int skew_1;
secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)];
int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)];
int skew_lam;
secp256k1_scalar q_1, q_lam;
int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)];
int i;
/* The offset to add to s1 and s2 to make them non-negative. Equal to 2^128. */
static const secp256k1_scalar S_OFFSET = SECP256K1_SCALAR_CONST(0, 0, 0, 1, 0, 0, 0, 0);
secp256k1_scalar s, v1, v2;
secp256k1_ge pre_a[ECMULT_CONST_TABLE_SIZE];
secp256k1_ge pre_a_lam[ECMULT_CONST_TABLE_SIZE];
secp256k1_fe global_z;
int group, i;
/* We're allowed to be non-constant time in the point, and the code below (in particular,
* secp256k1_ecmult_const_odd_multiples_table_globalz) cannot deal with infinity in a
* constant-time manner anyway. */
if (secp256k1_ge_is_infinity(a)) {
secp256k1_gej_set_infinity(r);
return;
}
/* build wnaf representation for q. */
/* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */
secp256k1_scalar_split_lambda(&q_1, &q_lam, scalar);
skew_1 = secp256k1_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128);
skew_lam = secp256k1_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128);
/* Compute v1 and v2. */
secp256k1_scalar_add(&s, q, &secp256k1_ecmult_const_K);
secp256k1_scalar_half(&s, &s);
secp256k1_scalar_split_lambda(&v1, &v2, &s);
secp256k1_scalar_add(&v1, &v1, &S_OFFSET);
secp256k1_scalar_add(&v2, &v2, &S_OFFSET);
/* Calculate odd multiples of a.
#ifdef VERIFY
/* Verify that v1 and v2 are in range [0, 2^129-1]. */
for (i = 129; i < 256; ++i) {
VERIFY_CHECK(secp256k1_scalar_get_bits(&v1, i, 1) == 0);
VERIFY_CHECK(secp256k1_scalar_get_bits(&v2, i, 1) == 0);
}
#endif
/* Calculate odd multiples of A and A*lambda.
* All multiples are brought to the same Z 'denominator', which is stored
* in Z. Due to secp256k1' isomorphism we can do all operations pretending
* in global_z. Due to secp256k1' isomorphism we can do all operations pretending
* that the Z coordinate was 1, use affine addition formulae, and correct
* the Z coordinate of the result once at the end.
*/
VERIFY_CHECK(!a->infinity);
secp256k1_gej_set_ge(r, a);
secp256k1_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r);
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
secp256k1_fe_normalize_weak(&pre_a[i].y);
}
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
secp256k1_ecmult_const_odd_multiples_table_globalz(pre_a, &global_z, r);
for (i = 0; i < ECMULT_CONST_TABLE_SIZE; i++) {
secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]);
}
/* first loop iteration (separated out so we can directly set r, rather
* than having it start at infinity, get doubled several times, then have
* its new value added to it) */
i = wnaf_1[WNAF_SIZE_BITS(128, WINDOW_A - 1)];
VERIFY_CHECK(i != 0);
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A);
secp256k1_gej_set_ge(r, &tmpa);
i = wnaf_lam[WNAF_SIZE_BITS(128, WINDOW_A - 1)];
VERIFY_CHECK(i != 0);
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A);
secp256k1_gej_add_ge(r, r, &tmpa);
/* remaining loop iterations */
for (i = WNAF_SIZE_BITS(128, WINDOW_A - 1) - 1; i >= 0; i--) {
int n;
/* Next, we compute r = C_l(v1, A) + C_l(v2, lambda*A).
*
* We proceed in groups of ECMULT_CONST_GROUP_SIZE bits, operating on that many bits
* at a time, from high in v1, v2 to low. Call these bits1 (from v1) and bits2 (from v2).
*
* Now note that ECMULT_CONST_TABLE_GET_GE(&t, pre_a, bits1) loads into t a point equal
* to C_{ECMULT_CONST_GROUP_SIZE}(bits1, A), and analogously for pre_lam_a / bits2.
* This means that all we need to do is add these looked up values together, multiplied
* by 2^(ECMULT_GROUP_SIZE * group).
*/
for (group = ECMULT_CONST_GROUPS - 1; group >= 0; --group) {
/* Using the _var get_bits function is ok here, since it's only variable in offset and count, not in the scalar. */
unsigned int bits1 = secp256k1_scalar_get_bits_var(&v1, group * ECMULT_CONST_GROUP_SIZE, ECMULT_CONST_GROUP_SIZE);
unsigned int bits2 = secp256k1_scalar_get_bits_var(&v2, group * ECMULT_CONST_GROUP_SIZE, ECMULT_CONST_GROUP_SIZE);
secp256k1_ge t;
int j;
for (j = 0; j < WINDOW_A - 1; ++j) {
secp256k1_gej_double(r, r);
ECMULT_CONST_TABLE_GET_GE(&t, pre_a, bits1);
if (group == ECMULT_CONST_GROUPS - 1) {
/* Directly set r in the first iteration. */
secp256k1_gej_set_ge(r, &t);
} else {
/* Shift the result so far up. */
for (j = 0; j < ECMULT_CONST_GROUP_SIZE; ++j) {
secp256k1_gej_double(r, r);
}
secp256k1_gej_add_ge(r, r, &t);
}
n = wnaf_1[i];
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A);
VERIFY_CHECK(n != 0);
secp256k1_gej_add_ge(r, r, &tmpa);
n = wnaf_lam[i];
ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A);
VERIFY_CHECK(n != 0);
secp256k1_gej_add_ge(r, r, &tmpa);
ECMULT_CONST_TABLE_GET_GE(&t, pre_a_lam, bits2);
secp256k1_gej_add_ge(r, r, &t);
}
{
/* Correct for wNAF skew */
secp256k1_gej tmpj;
secp256k1_ge_neg(&tmpa, &pre_a[0]);
secp256k1_gej_add_ge(&tmpj, r, &tmpa);
secp256k1_gej_cmov(r, &tmpj, skew_1);
secp256k1_ge_neg(&tmpa, &pre_a_lam[0]);
secp256k1_gej_add_ge(&tmpj, r, &tmpa);
secp256k1_gej_cmov(r, &tmpj, skew_lam);
}
secp256k1_fe_mul(&r->z, &r->z, &Z);
/* Map the result back to the secp256k1 curve from the isomorphic curve. */
secp256k1_fe_mul(&r->z, &r->z, &global_z);
}
static int secp256k1_ecmult_const_xonly(secp256k1_fe* r, const secp256k1_fe *n, const secp256k1_fe *d, const secp256k1_scalar *q, int known_on_curve) {
@ -296,9 +347,7 @@ static int secp256k1_ecmult_const_xonly(secp256k1_fe* r, const secp256k1_fe *n,
secp256k1_fe_mul(&g, &g, n);
if (d) {
secp256k1_fe b;
#ifdef VERIFY
VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero(d));
#endif
secp256k1_fe_sqr(&b, d);
VERIFY_CHECK(SECP256K1_B <= 8); /* magnitude of b will be <= 8 after the next call */
secp256k1_fe_mul_int(&b, SECP256K1_B);
@ -331,13 +380,9 @@ static int secp256k1_ecmult_const_xonly(secp256k1_fe* r, const secp256k1_fe *n,
p.infinity = 0;
/* Perform x-only EC multiplication of P with q. */
#ifdef VERIFY
VERIFY_CHECK(!secp256k1_scalar_is_zero(q));
#endif
secp256k1_ecmult_const(&rj, &p, q);
#ifdef VERIFY
VERIFY_CHECK(!secp256k1_gej_is_infinity(&rj));
#endif
/* The resulting (X, Y, Z) point on the effective-affine isomorphic curve corresponds to
* (X, Y, Z*v) on the secp256k1 curve. The affine version of that has X coordinate

View File

@ -184,7 +184,8 @@ static int secp256k1_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b);
*/
static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b);
/** Set a field element equal to a provided 32-byte big endian value, reducing it.
/** Set a field element equal to the element represented by a provided 32-byte big endian value
* interpreted modulo p.
*
* On input, r does not need to be initialized. a must be a pointer to an initialized 32-byte array.
* On output, r = a (mod p). It will have magnitude 1, and not be normalized.
@ -345,8 +346,10 @@ static int secp256k1_fe_is_square_var(const secp256k1_fe *a);
/** Check invariants on a field element (no-op unless VERIFY is enabled). */
static void secp256k1_fe_verify(const secp256k1_fe *a);
#define SECP256K1_FE_VERIFY(a) secp256k1_fe_verify(a)
/** Check that magnitude of a is at most m (no-op unless VERIFY is enabled). */
static void secp256k1_fe_verify_magnitude(const secp256k1_fe *a, int m);
#define SECP256K1_FE_VERIFY_MAGNITUDE(a, m) secp256k1_fe_verify_magnitude(a, m)
#endif /* SECP256K1_FIELD_H */

View File

@ -403,11 +403,7 @@ void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t *a);
#else
#ifdef VERIFY
#define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0)
#else
#define VERIFY_BITS(x, n) do { } while(0)
#endif
SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) {
uint64_t c, d;

View File

@ -1,504 +0,0 @@
/***********************************************************************
* Copyright (c) 2013-2014 Diederik Huys, Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
/**
* Changelog:
* - March 2013, Diederik Huys: original version
* - November 2014, Pieter Wuille: updated to use Peter Dettman's parallel multiplication algorithm
* - December 2014, Pieter Wuille: converted from YASM to GCC inline assembly
*/
#ifndef SECP256K1_FIELD_INNER5X52_IMPL_H
#define SECP256K1_FIELD_INNER5X52_IMPL_H
#include "util.h"
SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) {
/**
* Registers: rdx:rax = multiplication accumulator
* r9:r8 = c
* r15:rcx = d
* r10-r14 = a0-a4
* rbx = b
* rdi = r
* rsi = a / t?
*/
uint64_t tmp1, tmp2, tmp3;
__asm__ __volatile__(
"movq 0(%%rsi),%%r10\n"
"movq 8(%%rsi),%%r11\n"
"movq 16(%%rsi),%%r12\n"
"movq 24(%%rsi),%%r13\n"
"movq 32(%%rsi),%%r14\n"
/* d += a3 * b0 */
"movq 0(%%rbx),%%rax\n"
"mulq %%r13\n"
"movq %%rax,%%rcx\n"
"movq %%rdx,%%r15\n"
/* d += a2 * b1 */
"movq 8(%%rbx),%%rax\n"
"mulq %%r12\n"
"addq %%rax,%%rcx\n"
"adcq %%rdx,%%r15\n"
/* d += a1 * b2 */
"movq 16(%%rbx),%%rax\n"
"mulq %%r11\n"
"addq %%rax,%%rcx\n"
"adcq %%rdx,%%r15\n"
/* d = a0 * b3 */
"movq 24(%%rbx),%%rax\n"
"mulq %%r10\n"
"addq %%rax,%%rcx\n"
"adcq %%rdx,%%r15\n"
/* c = a4 * b4 */
"movq 32(%%rbx),%%rax\n"
"mulq %%r14\n"
"movq %%rax,%%r8\n"
"movq %%rdx,%%r9\n"
/* d += (c & M) * R */
"movq $0xfffffffffffff,%%rdx\n"
"andq %%rdx,%%rax\n"
"movq $0x1000003d10,%%rdx\n"
"mulq %%rdx\n"
"addq %%rax,%%rcx\n"
"adcq %%rdx,%%r15\n"
/* c >>= 52 (%%r8 only) */
"shrdq $52,%%r9,%%r8\n"
/* t3 (tmp1) = d & M */
"movq %%rcx,%%rsi\n"
"movq $0xfffffffffffff,%%rdx\n"
"andq %%rdx,%%rsi\n"
"movq %%rsi,%q1\n"
/* d >>= 52 */
"shrdq $52,%%r15,%%rcx\n"
"xorq %%r15,%%r15\n"
/* d += a4 * b0 */
"movq 0(%%rbx),%%rax\n"
"mulq %%r14\n"
"addq %%rax,%%rcx\n"
"adcq %%rdx,%%r15\n"
/* d += a3 * b1 */
"movq 8(%%rbx),%%rax\n"
"mulq %%r13\n"
"addq %%rax,%%rcx\n"
"adcq %%rdx,%%r15\n"
/* d += a2 * b2 */
"movq 16(%%rbx),%%rax\n"
"mulq %%r12\n"
"addq %%rax,%%rcx\n"
"adcq %%rdx,%%r15\n"
/* d += a1 * b3 */
"movq 24(%%rbx),%%rax\n"
"mulq %%r11\n"
"addq %%rax,%%rcx\n"
"adcq %%rdx,%%r15\n"
/* d += a0 * b4 */
"movq 32(%%rbx),%%rax\n"
"mulq %%r10\n"
"addq %%rax,%%rcx\n"
"adcq %%rdx,%%r15\n"
/* d += c * R */
"movq %%r8,%%rax\n"
"movq $0x1000003d10,%%rdx\n"
"mulq %%rdx\n"
"addq %%rax,%%rcx\n"
"adcq %%rdx,%%r15\n"
/* t4 = d & M (%%rsi) */
"movq %%rcx,%%rsi\n"
"movq $0xfffffffffffff,%%rdx\n"
"andq %%rdx,%%rsi\n"
/* d >>= 52 */
"shrdq $52,%%r15,%%rcx\n"
"xorq %%r15,%%r15\n"
/* tx = t4 >> 48 (tmp3) */
"movq %%rsi,%%rax\n"
"shrq $48,%%rax\n"
"movq %%rax,%q3\n"
/* t4 &= (M >> 4) (tmp2) */
"movq $0xffffffffffff,%%rax\n"
"andq %%rax,%%rsi\n"
"movq %%rsi,%q2\n"
/* c = a0 * b0 */
"movq 0(%%rbx),%%rax\n"
"mulq %%r10\n"
"movq %%rax,%%r8\n"
"movq %%rdx,%%r9\n"
/* d += a4 * b1 */
"movq 8(%%rbx),%%rax\n"
"mulq %%r14\n"
"addq %%rax,%%rcx\n"
"adcq %%rdx,%%r15\n"
/* d += a3 * b2 */
"movq 16(%%rbx),%%rax\n"
"mulq %%r13\n"
"addq %%rax,%%rcx\n"
"adcq %%rdx,%%r15\n"
/* d += a2 * b3 */
"movq 24(%%rbx),%%rax\n"
"mulq %%r12\n"
"addq %%rax,%%rcx\n"
"adcq %%rdx,%%r15\n"
/* d += a1 * b4 */
"movq 32(%%rbx),%%rax\n"
"mulq %%r11\n"
"addq %%rax,%%rcx\n"
"adcq %%rdx,%%r15\n"
/* u0 = d & M (%%rsi) */
"movq %%rcx,%%rsi\n"
"movq $0xfffffffffffff,%%rdx\n"
"andq %%rdx,%%rsi\n"
/* d >>= 52 */
"shrdq $52,%%r15,%%rcx\n"
"xorq %%r15,%%r15\n"
/* u0 = (u0 << 4) | tx (%%rsi) */
"shlq $4,%%rsi\n"
"movq %q3,%%rax\n"
"orq %%rax,%%rsi\n"
/* c += u0 * (R >> 4) */
"movq $0x1000003d1,%%rax\n"
"mulq %%rsi\n"
"addq %%rax,%%r8\n"
"adcq %%rdx,%%r9\n"
/* r[0] = c & M */
"movq %%r8,%%rax\n"
"movq $0xfffffffffffff,%%rdx\n"
"andq %%rdx,%%rax\n"
"movq %%rax,0(%%rdi)\n"
/* c >>= 52 */
"shrdq $52,%%r9,%%r8\n"
"xorq %%r9,%%r9\n"
/* c += a1 * b0 */
"movq 0(%%rbx),%%rax\n"
"mulq %%r11\n"
"addq %%rax,%%r8\n"
"adcq %%rdx,%%r9\n"
/* c += a0 * b1 */
"movq 8(%%rbx),%%rax\n"
"mulq %%r10\n"
"addq %%rax,%%r8\n"
"adcq %%rdx,%%r9\n"
/* d += a4 * b2 */
"movq 16(%%rbx),%%rax\n"
"mulq %%r14\n"
"addq %%rax,%%rcx\n"
"adcq %%rdx,%%r15\n"
/* d += a3 * b3 */
"movq 24(%%rbx),%%rax\n"
"mulq %%r13\n"
"addq %%rax,%%rcx\n"
"adcq %%rdx,%%r15\n"
/* d += a2 * b4 */
"movq 32(%%rbx),%%rax\n"
"mulq %%r12\n"
"addq %%rax,%%rcx\n"
"adcq %%rdx,%%r15\n"
/* c += (d & M) * R */
"movq %%rcx,%%rax\n"
"movq $0xfffffffffffff,%%rdx\n"
"andq %%rdx,%%rax\n"
"movq $0x1000003d10,%%rdx\n"
"mulq %%rdx\n"
"addq %%rax,%%r8\n"
"adcq %%rdx,%%r9\n"
/* d >>= 52 */
"shrdq $52,%%r15,%%rcx\n"
"xorq %%r15,%%r15\n"
/* r[1] = c & M */
"movq %%r8,%%rax\n"
"movq $0xfffffffffffff,%%rdx\n"
"andq %%rdx,%%rax\n"
"movq %%rax,8(%%rdi)\n"
/* c >>= 52 */
"shrdq $52,%%r9,%%r8\n"
"xorq %%r9,%%r9\n"
/* c += a2 * b0 */
"movq 0(%%rbx),%%rax\n"
"mulq %%r12\n"
"addq %%rax,%%r8\n"
"adcq %%rdx,%%r9\n"
/* c += a1 * b1 */
"movq 8(%%rbx),%%rax\n"
"mulq %%r11\n"
"addq %%rax,%%r8\n"
"adcq %%rdx,%%r9\n"
/* c += a0 * b2 (last use of %%r10 = a0) */
"movq 16(%%rbx),%%rax\n"
"mulq %%r10\n"
"addq %%rax,%%r8\n"
"adcq %%rdx,%%r9\n"
/* fetch t3 (%%r10, overwrites a0), t4 (%%rsi) */
"movq %q2,%%rsi\n"
"movq %q1,%%r10\n"
/* d += a4 * b3 */
"movq 24(%%rbx),%%rax\n"
"mulq %%r14\n"
"addq %%rax,%%rcx\n"
"adcq %%rdx,%%r15\n"
/* d += a3 * b4 */
"movq 32(%%rbx),%%rax\n"
"mulq %%r13\n"
"addq %%rax,%%rcx\n"
"adcq %%rdx,%%r15\n"
/* c += (d & M) * R */
"movq %%rcx,%%rax\n"
"movq $0xfffffffffffff,%%rdx\n"
"andq %%rdx,%%rax\n"
"movq $0x1000003d10,%%rdx\n"
"mulq %%rdx\n"
"addq %%rax,%%r8\n"
"adcq %%rdx,%%r9\n"
/* d >>= 52 (%%rcx only) */
"shrdq $52,%%r15,%%rcx\n"
/* r[2] = c & M */
"movq %%r8,%%rax\n"
"movq $0xfffffffffffff,%%rdx\n"
"andq %%rdx,%%rax\n"
"movq %%rax,16(%%rdi)\n"
/* c >>= 52 */
"shrdq $52,%%r9,%%r8\n"
"xorq %%r9,%%r9\n"
/* c += t3 */
"addq %%r10,%%r8\n"
/* c += d * R */
"movq %%rcx,%%rax\n"
"movq $0x1000003d10,%%rdx\n"
"mulq %%rdx\n"
"addq %%rax,%%r8\n"
"adcq %%rdx,%%r9\n"
/* r[3] = c & M */
"movq %%r8,%%rax\n"
"movq $0xfffffffffffff,%%rdx\n"
"andq %%rdx,%%rax\n"
"movq %%rax,24(%%rdi)\n"
/* c >>= 52 (%%r8 only) */
"shrdq $52,%%r9,%%r8\n"
/* c += t4 (%%r8 only) */
"addq %%rsi,%%r8\n"
/* r[4] = c */
"movq %%r8,32(%%rdi)\n"
: "+S"(a), "=&m"(tmp1), "=&m"(tmp2), "=&m"(tmp3)
: "b"(b), "D"(r)
: "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "cc", "memory"
);
}
SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t *a) {
/**
* Registers: rdx:rax = multiplication accumulator
* r9:r8 = c
* rcx:rbx = d
* r10-r14 = a0-a4
* r15 = M (0xfffffffffffff)
* rdi = r
* rsi = a / t?
*/
uint64_t tmp1, tmp2, tmp3;
__asm__ __volatile__(
"movq 0(%%rsi),%%r10\n"
"movq 8(%%rsi),%%r11\n"
"movq 16(%%rsi),%%r12\n"
"movq 24(%%rsi),%%r13\n"
"movq 32(%%rsi),%%r14\n"
"movq $0xfffffffffffff,%%r15\n"
/* d = (a0*2) * a3 */
"leaq (%%r10,%%r10,1),%%rax\n"
"mulq %%r13\n"
"movq %%rax,%%rbx\n"
"movq %%rdx,%%rcx\n"
/* d += (a1*2) * a2 */
"leaq (%%r11,%%r11,1),%%rax\n"
"mulq %%r12\n"
"addq %%rax,%%rbx\n"
"adcq %%rdx,%%rcx\n"
/* c = a4 * a4 */
"movq %%r14,%%rax\n"
"mulq %%r14\n"
"movq %%rax,%%r8\n"
"movq %%rdx,%%r9\n"
/* d += (c & M) * R */
"andq %%r15,%%rax\n"
"movq $0x1000003d10,%%rdx\n"
"mulq %%rdx\n"
"addq %%rax,%%rbx\n"
"adcq %%rdx,%%rcx\n"
/* c >>= 52 (%%r8 only) */
"shrdq $52,%%r9,%%r8\n"
/* t3 (tmp1) = d & M */
"movq %%rbx,%%rsi\n"
"andq %%r15,%%rsi\n"
"movq %%rsi,%q1\n"
/* d >>= 52 */
"shrdq $52,%%rcx,%%rbx\n"
"xorq %%rcx,%%rcx\n"
/* a4 *= 2 */
"addq %%r14,%%r14\n"
/* d += a0 * a4 */
"movq %%r10,%%rax\n"
"mulq %%r14\n"
"addq %%rax,%%rbx\n"
"adcq %%rdx,%%rcx\n"
/* d+= (a1*2) * a3 */
"leaq (%%r11,%%r11,1),%%rax\n"
"mulq %%r13\n"
"addq %%rax,%%rbx\n"
"adcq %%rdx,%%rcx\n"
/* d += a2 * a2 */
"movq %%r12,%%rax\n"
"mulq %%r12\n"
"addq %%rax,%%rbx\n"
"adcq %%rdx,%%rcx\n"
/* d += c * R */
"movq %%r8,%%rax\n"
"movq $0x1000003d10,%%rdx\n"
"mulq %%rdx\n"
"addq %%rax,%%rbx\n"
"adcq %%rdx,%%rcx\n"
/* t4 = d & M (%%rsi) */
"movq %%rbx,%%rsi\n"
"andq %%r15,%%rsi\n"
/* d >>= 52 */
"shrdq $52,%%rcx,%%rbx\n"
"xorq %%rcx,%%rcx\n"
/* tx = t4 >> 48 (tmp3) */
"movq %%rsi,%%rax\n"
"shrq $48,%%rax\n"
"movq %%rax,%q3\n"
/* t4 &= (M >> 4) (tmp2) */
"movq $0xffffffffffff,%%rax\n"
"andq %%rax,%%rsi\n"
"movq %%rsi,%q2\n"
/* c = a0 * a0 */
"movq %%r10,%%rax\n"
"mulq %%r10\n"
"movq %%rax,%%r8\n"
"movq %%rdx,%%r9\n"
/* d += a1 * a4 */
"movq %%r11,%%rax\n"
"mulq %%r14\n"
"addq %%rax,%%rbx\n"
"adcq %%rdx,%%rcx\n"
/* d += (a2*2) * a3 */
"leaq (%%r12,%%r12,1),%%rax\n"
"mulq %%r13\n"
"addq %%rax,%%rbx\n"
"adcq %%rdx,%%rcx\n"
/* u0 = d & M (%%rsi) */
"movq %%rbx,%%rsi\n"
"andq %%r15,%%rsi\n"
/* d >>= 52 */
"shrdq $52,%%rcx,%%rbx\n"
"xorq %%rcx,%%rcx\n"
/* u0 = (u0 << 4) | tx (%%rsi) */
"shlq $4,%%rsi\n"
"movq %q3,%%rax\n"
"orq %%rax,%%rsi\n"
/* c += u0 * (R >> 4) */
"movq $0x1000003d1,%%rax\n"
"mulq %%rsi\n"
"addq %%rax,%%r8\n"
"adcq %%rdx,%%r9\n"
/* r[0] = c & M */
"movq %%r8,%%rax\n"
"andq %%r15,%%rax\n"
"movq %%rax,0(%%rdi)\n"
/* c >>= 52 */
"shrdq $52,%%r9,%%r8\n"
"xorq %%r9,%%r9\n"
/* a0 *= 2 */
"addq %%r10,%%r10\n"
/* c += a0 * a1 */
"movq %%r10,%%rax\n"
"mulq %%r11\n"
"addq %%rax,%%r8\n"
"adcq %%rdx,%%r9\n"
/* d += a2 * a4 */
"movq %%r12,%%rax\n"
"mulq %%r14\n"
"addq %%rax,%%rbx\n"
"adcq %%rdx,%%rcx\n"
/* d += a3 * a3 */
"movq %%r13,%%rax\n"
"mulq %%r13\n"
"addq %%rax,%%rbx\n"
"adcq %%rdx,%%rcx\n"
/* c += (d & M) * R */
"movq %%rbx,%%rax\n"
"andq %%r15,%%rax\n"
"movq $0x1000003d10,%%rdx\n"
"mulq %%rdx\n"
"addq %%rax,%%r8\n"
"adcq %%rdx,%%r9\n"
/* d >>= 52 */
"shrdq $52,%%rcx,%%rbx\n"
"xorq %%rcx,%%rcx\n"
/* r[1] = c & M */
"movq %%r8,%%rax\n"
"andq %%r15,%%rax\n"
"movq %%rax,8(%%rdi)\n"
/* c >>= 52 */
"shrdq $52,%%r9,%%r8\n"
"xorq %%r9,%%r9\n"
/* c += a0 * a2 (last use of %%r10) */
"movq %%r10,%%rax\n"
"mulq %%r12\n"
"addq %%rax,%%r8\n"
"adcq %%rdx,%%r9\n"
/* fetch t3 (%%r10, overwrites a0),t4 (%%rsi) */
"movq %q2,%%rsi\n"
"movq %q1,%%r10\n"
/* c += a1 * a1 */
"movq %%r11,%%rax\n"
"mulq %%r11\n"
"addq %%rax,%%r8\n"
"adcq %%rdx,%%r9\n"
/* d += a3 * a4 */
"movq %%r13,%%rax\n"
"mulq %%r14\n"
"addq %%rax,%%rbx\n"
"adcq %%rdx,%%rcx\n"
/* c += (d & M) * R */
"movq %%rbx,%%rax\n"
"andq %%r15,%%rax\n"
"movq $0x1000003d10,%%rdx\n"
"mulq %%rdx\n"
"addq %%rax,%%r8\n"
"adcq %%rdx,%%r9\n"
/* d >>= 52 (%%rbx only) */
"shrdq $52,%%rcx,%%rbx\n"
/* r[2] = c & M */
"movq %%r8,%%rax\n"
"andq %%r15,%%rax\n"
"movq %%rax,16(%%rdi)\n"
/* c >>= 52 */
"shrdq $52,%%r9,%%r8\n"
"xorq %%r9,%%r9\n"
/* c += t3 */
"addq %%r10,%%r8\n"
/* c += d * R */
"movq %%rbx,%%rax\n"
"movq $0x1000003d10,%%rdx\n"
"mulq %%rdx\n"
"addq %%rax,%%r8\n"
"adcq %%rdx,%%r9\n"
/* r[3] = c & M */
"movq %%r8,%%rax\n"
"andq %%r15,%%rax\n"
"movq %%rax,24(%%rdi)\n"
/* c >>= 52 (%%r8 only) */
"shrdq $52,%%r9,%%r8\n"
/* c += t4 (%%r8 only) */
"addq %%rsi,%%r8\n"
/* r[4] = c */
"movq %%r8,32(%%rdi)\n"
: "+S"(a), "=&m"(tmp1), "=&m"(tmp2), "=&m"(tmp3)
: "D"(r)
: "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "cc", "memory"
);
}
#endif /* SECP256K1_FIELD_INNER5X52_IMPL_H */

View File

@ -12,11 +12,7 @@
#include "field.h"
#include "modinv64_impl.h"
#if defined(USE_ASM_X86_64)
#include "field_5x52_asm_impl.h"
#else
#include "field_5x52_int128_impl.h"
#endif
#ifdef VERIFY
static void secp256k1_fe_impl_verify(const secp256k1_fe *a) {

View File

@ -12,13 +12,8 @@
#include "int128.h"
#include "util.h"
#ifdef VERIFY
#define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0)
#define VERIFY_BITS_128(x, n) VERIFY_CHECK(secp256k1_u128_check_bits((x), (n)))
#else
#define VERIFY_BITS(x, n) do { } while(0)
#define VERIFY_BITS_128(x, n) do { } while(0)
#endif
SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) {
secp256k1_uint128 c, d;
@ -89,18 +84,18 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t
secp256k1_u128_accum_mul(&d, a2, b[3]);
secp256k1_u128_accum_mul(&d, a3, b[2]);
secp256k1_u128_accum_mul(&d, a4, b[1]);
VERIFY_BITS_128(&d, 115);
VERIFY_BITS_128(&d, 114);
/* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
u0 = secp256k1_u128_to_u64(&d) & M; secp256k1_u128_rshift(&d, 52);
VERIFY_BITS(u0, 52);
VERIFY_BITS_128(&d, 63);
VERIFY_BITS_128(&d, 62);
/* [d u0 t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
/* [d 0 t4+(tx<<48)+(u0<<52) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
u0 = (u0 << 4) | tx;
VERIFY_BITS(u0, 56);
/* [d 0 t4+(u0<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
secp256k1_u128_accum_mul(&c, u0, R >> 4);
VERIFY_BITS_128(&c, 115);
VERIFY_BITS_128(&c, 113);
/* [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */
r[0] = secp256k1_u128_to_u64(&c) & M; secp256k1_u128_rshift(&c, 52);
VERIFY_BITS(r[0], 52);
@ -159,7 +154,7 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t
SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t *a) {
secp256k1_uint128 c, d;
uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4];
int64_t t3, t4, tx, u0;
uint64_t t3, t4, tx, u0;
const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL;
VERIFY_BITS(a[0], 56);

View File

@ -20,12 +20,11 @@
SECP256K1_INLINE static int secp256k1_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b) {
secp256k1_fe na;
#ifdef VERIFY
secp256k1_fe_verify(a);
secp256k1_fe_verify(b);
secp256k1_fe_verify_magnitude(a, 1);
secp256k1_fe_verify_magnitude(b, 31);
#endif
SECP256K1_FE_VERIFY(a);
SECP256K1_FE_VERIFY(b);
SECP256K1_FE_VERIFY_MAGNITUDE(a, 1);
SECP256K1_FE_VERIFY_MAGNITUDE(b, 31);
secp256k1_fe_negate(&na, a, 1);
secp256k1_fe_add(&na, b);
return secp256k1_fe_normalizes_to_zero(&na);
@ -44,11 +43,9 @@ static int secp256k1_fe_sqrt(secp256k1_fe * SECP256K1_RESTRICT r, const secp256k
secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1;
int j, ret;
#ifdef VERIFY
VERIFY_CHECK(r != a);
secp256k1_fe_verify(a);
secp256k1_fe_verify_magnitude(a, 8);
#endif
SECP256K1_FE_VERIFY(a);
SECP256K1_FE_VERIFY_MAGNITUDE(a, 8);
/** The binary representation of (p + 1)/4 has 3 blocks of 1s, with lengths in
* { 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block:
@ -151,11 +148,11 @@ static void secp256k1_fe_verify_magnitude(const secp256k1_fe *a, int m) { (void)
static void secp256k1_fe_impl_verify(const secp256k1_fe *a);
static void secp256k1_fe_verify(const secp256k1_fe *a) {
/* Magnitude between 0 and 32. */
secp256k1_fe_verify_magnitude(a, 32);
SECP256K1_FE_VERIFY_MAGNITUDE(a, 32);
/* Normalized is 0 or 1. */
VERIFY_CHECK((a->normalized == 0) || (a->normalized == 1));
/* If normalized, magnitude must be 0 or 1. */
if (a->normalized) secp256k1_fe_verify_magnitude(a, 1);
if (a->normalized) SECP256K1_FE_VERIFY_MAGNITUDE(a, 1);
/* Invoke implementation-specific checks. */
secp256k1_fe_impl_verify(a);
}
@ -168,59 +165,71 @@ static void secp256k1_fe_verify_magnitude(const secp256k1_fe *a, int m) {
static void secp256k1_fe_impl_normalize(secp256k1_fe *r);
SECP256K1_INLINE static void secp256k1_fe_normalize(secp256k1_fe *r) {
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
secp256k1_fe_impl_normalize(r);
r->magnitude = 1;
r->normalized = 1;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
}
static void secp256k1_fe_impl_normalize_weak(secp256k1_fe *r);
SECP256K1_INLINE static void secp256k1_fe_normalize_weak(secp256k1_fe *r) {
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
secp256k1_fe_impl_normalize_weak(r);
r->magnitude = 1;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
}
static void secp256k1_fe_impl_normalize_var(secp256k1_fe *r);
SECP256K1_INLINE static void secp256k1_fe_normalize_var(secp256k1_fe *r) {
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
secp256k1_fe_impl_normalize_var(r);
r->magnitude = 1;
r->normalized = 1;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
}
static int secp256k1_fe_impl_normalizes_to_zero(const secp256k1_fe *r);
SECP256K1_INLINE static int secp256k1_fe_normalizes_to_zero(const secp256k1_fe *r) {
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
return secp256k1_fe_impl_normalizes_to_zero(r);
}
static int secp256k1_fe_impl_normalizes_to_zero_var(const secp256k1_fe *r);
SECP256K1_INLINE static int secp256k1_fe_normalizes_to_zero_var(const secp256k1_fe *r) {
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
return secp256k1_fe_impl_normalizes_to_zero_var(r);
}
static void secp256k1_fe_impl_set_int(secp256k1_fe *r, int a);
SECP256K1_INLINE static void secp256k1_fe_set_int(secp256k1_fe *r, int a) {
VERIFY_CHECK(0 <= a && a <= 0x7FFF);
secp256k1_fe_impl_set_int(r, a);
r->magnitude = (a != 0);
r->normalized = 1;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
}
static void secp256k1_fe_impl_add_int(secp256k1_fe *r, int a);
SECP256K1_INLINE static void secp256k1_fe_add_int(secp256k1_fe *r, int a) {
VERIFY_CHECK(0 <= a && a <= 0x7FFF);
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
secp256k1_fe_impl_add_int(r, a);
r->magnitude += 1;
r->normalized = 0;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
}
static void secp256k1_fe_impl_clear(secp256k1_fe *a);
@ -228,29 +237,33 @@ SECP256K1_INLINE static void secp256k1_fe_clear(secp256k1_fe *a) {
a->magnitude = 0;
a->normalized = 1;
secp256k1_fe_impl_clear(a);
secp256k1_fe_verify(a);
SECP256K1_FE_VERIFY(a);
}
static int secp256k1_fe_impl_is_zero(const secp256k1_fe *a);
SECP256K1_INLINE static int secp256k1_fe_is_zero(const secp256k1_fe *a) {
secp256k1_fe_verify(a);
SECP256K1_FE_VERIFY(a);
VERIFY_CHECK(a->normalized);
return secp256k1_fe_impl_is_zero(a);
}
static int secp256k1_fe_impl_is_odd(const secp256k1_fe *a);
SECP256K1_INLINE static int secp256k1_fe_is_odd(const secp256k1_fe *a) {
secp256k1_fe_verify(a);
SECP256K1_FE_VERIFY(a);
VERIFY_CHECK(a->normalized);
return secp256k1_fe_impl_is_odd(a);
}
static int secp256k1_fe_impl_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b);
SECP256K1_INLINE static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) {
secp256k1_fe_verify(a);
secp256k1_fe_verify(b);
SECP256K1_FE_VERIFY(a);
SECP256K1_FE_VERIFY(b);
VERIFY_CHECK(a->normalized);
VERIFY_CHECK(b->normalized);
return secp256k1_fe_impl_cmp_var(a, b);
}
@ -259,7 +272,8 @@ SECP256K1_INLINE static void secp256k1_fe_set_b32_mod(secp256k1_fe *r, const uns
secp256k1_fe_impl_set_b32_mod(r, a);
r->magnitude = 1;
r->normalized = 0;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
}
static int secp256k1_fe_impl_set_b32_limit(secp256k1_fe *r, const unsigned char *a);
@ -267,7 +281,7 @@ SECP256K1_INLINE static int secp256k1_fe_set_b32_limit(secp256k1_fe *r, const un
if (secp256k1_fe_impl_set_b32_limit(r, a)) {
r->magnitude = 1;
r->normalized = 1;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
return 1;
} else {
/* Mark the output field element as invalid. */
@ -278,83 +292,97 @@ SECP256K1_INLINE static int secp256k1_fe_set_b32_limit(secp256k1_fe *r, const un
static void secp256k1_fe_impl_get_b32(unsigned char *r, const secp256k1_fe *a);
SECP256K1_INLINE static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) {
secp256k1_fe_verify(a);
SECP256K1_FE_VERIFY(a);
VERIFY_CHECK(a->normalized);
secp256k1_fe_impl_get_b32(r, a);
}
static void secp256k1_fe_impl_negate_unchecked(secp256k1_fe *r, const secp256k1_fe *a, int m);
SECP256K1_INLINE static void secp256k1_fe_negate_unchecked(secp256k1_fe *r, const secp256k1_fe *a, int m) {
secp256k1_fe_verify(a);
SECP256K1_FE_VERIFY(a);
VERIFY_CHECK(m >= 0 && m <= 31);
secp256k1_fe_verify_magnitude(a, m);
SECP256K1_FE_VERIFY_MAGNITUDE(a, m);
secp256k1_fe_impl_negate_unchecked(r, a, m);
r->magnitude = m + 1;
r->normalized = 0;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
}
static void secp256k1_fe_impl_mul_int_unchecked(secp256k1_fe *r, int a);
SECP256K1_INLINE static void secp256k1_fe_mul_int_unchecked(secp256k1_fe *r, int a) {
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
VERIFY_CHECK(a >= 0 && a <= 32);
VERIFY_CHECK(a*r->magnitude <= 32);
secp256k1_fe_impl_mul_int_unchecked(r, a);
r->magnitude *= a;
r->normalized = 0;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
}
static void secp256k1_fe_impl_add(secp256k1_fe *r, const secp256k1_fe *a);
SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a) {
secp256k1_fe_verify(r);
secp256k1_fe_verify(a);
SECP256K1_FE_VERIFY(r);
SECP256K1_FE_VERIFY(a);
VERIFY_CHECK(r->magnitude + a->magnitude <= 32);
secp256k1_fe_impl_add(r, a);
r->magnitude += a->magnitude;
r->normalized = 0;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
}
static void secp256k1_fe_impl_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b);
SECP256K1_INLINE static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b) {
secp256k1_fe_verify(a);
secp256k1_fe_verify(b);
secp256k1_fe_verify_magnitude(a, 8);
secp256k1_fe_verify_magnitude(b, 8);
SECP256K1_FE_VERIFY(a);
SECP256K1_FE_VERIFY(b);
SECP256K1_FE_VERIFY_MAGNITUDE(a, 8);
SECP256K1_FE_VERIFY_MAGNITUDE(b, 8);
VERIFY_CHECK(r != b);
VERIFY_CHECK(a != b);
secp256k1_fe_impl_mul(r, a, b);
r->magnitude = 1;
r->normalized = 0;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
}
static void secp256k1_fe_impl_sqr(secp256k1_fe *r, const secp256k1_fe *a);
SECP256K1_INLINE static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) {
secp256k1_fe_verify(a);
secp256k1_fe_verify_magnitude(a, 8);
SECP256K1_FE_VERIFY(a);
SECP256K1_FE_VERIFY_MAGNITUDE(a, 8);
secp256k1_fe_impl_sqr(r, a);
r->magnitude = 1;
r->normalized = 0;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
}
static void secp256k1_fe_impl_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag);
SECP256K1_INLINE static void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) {
VERIFY_CHECK(flag == 0 || flag == 1);
secp256k1_fe_verify(a);
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(a);
SECP256K1_FE_VERIFY(r);
secp256k1_fe_impl_cmov(r, a, flag);
if (a->magnitude > r->magnitude) r->magnitude = a->magnitude;
if (!a->normalized) r->normalized = 0;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
}
static void secp256k1_fe_impl_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a);
SECP256K1_INLINE static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a) {
secp256k1_fe_verify(a);
SECP256K1_FE_VERIFY(a);
VERIFY_CHECK(a->normalized);
secp256k1_fe_impl_to_storage(r, a);
}
@ -363,36 +391,42 @@ SECP256K1_INLINE static void secp256k1_fe_from_storage(secp256k1_fe *r, const se
secp256k1_fe_impl_from_storage(r, a);
r->magnitude = 1;
r->normalized = 1;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
}
static void secp256k1_fe_impl_inv(secp256k1_fe *r, const secp256k1_fe *x);
SECP256K1_INLINE static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *x) {
int input_is_zero = secp256k1_fe_normalizes_to_zero(x);
secp256k1_fe_verify(x);
SECP256K1_FE_VERIFY(x);
secp256k1_fe_impl_inv(r, x);
r->magnitude = x->magnitude > 0;
r->normalized = 1;
VERIFY_CHECK(secp256k1_fe_normalizes_to_zero(r) == input_is_zero);
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
}
static void secp256k1_fe_impl_inv_var(secp256k1_fe *r, const secp256k1_fe *x);
SECP256K1_INLINE static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *x) {
int input_is_zero = secp256k1_fe_normalizes_to_zero(x);
secp256k1_fe_verify(x);
SECP256K1_FE_VERIFY(x);
secp256k1_fe_impl_inv_var(r, x);
r->magnitude = x->magnitude > 0;
r->normalized = 1;
VERIFY_CHECK(secp256k1_fe_normalizes_to_zero(r) == input_is_zero);
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
}
static int secp256k1_fe_impl_is_square_var(const secp256k1_fe *x);
SECP256K1_INLINE static int secp256k1_fe_is_square_var(const secp256k1_fe *x) {
int ret;
secp256k1_fe tmp = *x, sqrt;
secp256k1_fe_verify(x);
SECP256K1_FE_VERIFY(x);
ret = secp256k1_fe_impl_is_square_var(x);
secp256k1_fe_normalize_weak(&tmp);
VERIFY_CHECK(ret == secp256k1_fe_sqrt(&sqrt, &tmp));
@ -403,20 +437,24 @@ static void secp256k1_fe_impl_get_bounds(secp256k1_fe* r, int m);
SECP256K1_INLINE static void secp256k1_fe_get_bounds(secp256k1_fe* r, int m) {
VERIFY_CHECK(m >= 0);
VERIFY_CHECK(m <= 32);
secp256k1_fe_impl_get_bounds(r, m);
r->magnitude = m;
r->normalized = (m == 0);
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
}
static void secp256k1_fe_impl_half(secp256k1_fe *r);
SECP256K1_INLINE static void secp256k1_fe_half(secp256k1_fe *r) {
secp256k1_fe_verify(r);
secp256k1_fe_verify_magnitude(r, 31);
SECP256K1_FE_VERIFY(r);
SECP256K1_FE_VERIFY_MAGNITUDE(r, 31);
secp256k1_fe_impl_half(r);
r->magnitude = (r->magnitude >> 1) + 1;
r->normalized = 0;
secp256k1_fe_verify(r);
SECP256K1_FE_VERIFY(r);
}
#endif /* defined(VERIFY) */

View File

@ -102,6 +102,9 @@ static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a
*/
static void secp256k1_ge_table_set_globalz(size_t len, secp256k1_ge *a, const secp256k1_fe *zr);
/** Check two group elements (affine) for equality in variable time. */
static int secp256k1_ge_eq_var(const secp256k1_ge *a, const secp256k1_ge *b);
/** Set a group element (affine) equal to the point at infinity. */
static void secp256k1_ge_set_infinity(secp256k1_ge *r);
@ -114,6 +117,9 @@ static void secp256k1_gej_set_ge(secp256k1_gej *r, const secp256k1_ge *a);
/** Check two group elements (jacobian) for equality in variable time. */
static int secp256k1_gej_eq_var(const secp256k1_gej *a, const secp256k1_gej *b);
/** Check two group elements (jacobian and affine) for equality in variable time. */
static int secp256k1_gej_eq_ge_var(const secp256k1_gej *a, const secp256k1_ge *b);
/** Compare the X coordinate of a group element (jacobian).
* The magnitude of the group element's X coordinate must not exceed 31. */
static int secp256k1_gej_eq_x_var(const secp256k1_fe *x, const secp256k1_gej *a);
@ -181,8 +187,10 @@ static int secp256k1_ge_is_in_correct_subgroup(const secp256k1_ge* ge);
/** Check invariants on an affine group element (no-op unless VERIFY is enabled). */
static void secp256k1_ge_verify(const secp256k1_ge *a);
#define SECP256K1_GE_VERIFY(a) secp256k1_ge_verify(a)
/** Check invariants on a Jacobian group element (no-op unless VERIFY is enabled). */
static void secp256k1_gej_verify(const secp256k1_gej *a);
#define SECP256K1_GEJ_VERIFY(a) secp256k1_gej_verify(a)
#endif /* SECP256K1_GROUP_H */

View File

@ -74,26 +74,22 @@ static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_G;
/* End of section generated by sage/gen_exhaustive_groups.sage. */
static void secp256k1_ge_verify(const secp256k1_ge *a) {
#ifdef VERIFY
secp256k1_fe_verify(&a->x);
secp256k1_fe_verify(&a->y);
secp256k1_fe_verify_magnitude(&a->x, SECP256K1_GE_X_MAGNITUDE_MAX);
secp256k1_fe_verify_magnitude(&a->y, SECP256K1_GE_Y_MAGNITUDE_MAX);
SECP256K1_FE_VERIFY(&a->x);
SECP256K1_FE_VERIFY(&a->y);
SECP256K1_FE_VERIFY_MAGNITUDE(&a->x, SECP256K1_GE_X_MAGNITUDE_MAX);
SECP256K1_FE_VERIFY_MAGNITUDE(&a->y, SECP256K1_GE_Y_MAGNITUDE_MAX);
VERIFY_CHECK(a->infinity == 0 || a->infinity == 1);
#endif
(void)a;
}
static void secp256k1_gej_verify(const secp256k1_gej *a) {
#ifdef VERIFY
secp256k1_fe_verify(&a->x);
secp256k1_fe_verify(&a->y);
secp256k1_fe_verify(&a->z);
secp256k1_fe_verify_magnitude(&a->x, SECP256K1_GEJ_X_MAGNITUDE_MAX);
secp256k1_fe_verify_magnitude(&a->y, SECP256K1_GEJ_Y_MAGNITUDE_MAX);
secp256k1_fe_verify_magnitude(&a->z, SECP256K1_GEJ_Z_MAGNITUDE_MAX);
SECP256K1_FE_VERIFY(&a->x);
SECP256K1_FE_VERIFY(&a->y);
SECP256K1_FE_VERIFY(&a->z);
SECP256K1_FE_VERIFY_MAGNITUDE(&a->x, SECP256K1_GEJ_X_MAGNITUDE_MAX);
SECP256K1_FE_VERIFY_MAGNITUDE(&a->y, SECP256K1_GEJ_Y_MAGNITUDE_MAX);
SECP256K1_FE_VERIFY_MAGNITUDE(&a->z, SECP256K1_GEJ_Z_MAGNITUDE_MAX);
VERIFY_CHECK(a->infinity == 0 || a->infinity == 1);
#endif
(void)a;
}
@ -101,8 +97,8 @@ static void secp256k1_gej_verify(const secp256k1_gej *a) {
static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) {
secp256k1_fe zi2;
secp256k1_fe zi3;
secp256k1_gej_verify(a);
secp256k1_fe_verify(zi);
SECP256K1_GEJ_VERIFY(a);
SECP256K1_FE_VERIFY(zi);
VERIFY_CHECK(!a->infinity);
secp256k1_fe_sqr(&zi2, zi);
@ -111,15 +107,15 @@ static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, c
secp256k1_fe_mul(&r->y, &a->y, &zi3);
r->infinity = a->infinity;
secp256k1_ge_verify(r);
SECP256K1_GE_VERIFY(r);
}
/* Set r to the affine coordinates of Jacobian point (a.x, a.y, 1/zi). */
static void secp256k1_ge_set_ge_zinv(secp256k1_ge *r, const secp256k1_ge *a, const secp256k1_fe *zi) {
secp256k1_fe zi2;
secp256k1_fe zi3;
secp256k1_ge_verify(a);
secp256k1_fe_verify(zi);
SECP256K1_GE_VERIFY(a);
SECP256K1_FE_VERIFY(zi);
VERIFY_CHECK(!a->infinity);
secp256k1_fe_sqr(&zi2, zi);
@ -128,39 +124,39 @@ static void secp256k1_ge_set_ge_zinv(secp256k1_ge *r, const secp256k1_ge *a, con
secp256k1_fe_mul(&r->y, &a->y, &zi3);
r->infinity = a->infinity;
secp256k1_ge_verify(r);
SECP256K1_GE_VERIFY(r);
}
static void secp256k1_ge_set_xy(secp256k1_ge *r, const secp256k1_fe *x, const secp256k1_fe *y) {
secp256k1_fe_verify(x);
secp256k1_fe_verify(y);
SECP256K1_FE_VERIFY(x);
SECP256K1_FE_VERIFY(y);
r->infinity = 0;
r->x = *x;
r->y = *y;
secp256k1_ge_verify(r);
SECP256K1_GE_VERIFY(r);
}
static int secp256k1_ge_is_infinity(const secp256k1_ge *a) {
secp256k1_ge_verify(a);
SECP256K1_GE_VERIFY(a);
return a->infinity;
}
static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a) {
secp256k1_ge_verify(a);
SECP256K1_GE_VERIFY(a);
*r = *a;
secp256k1_fe_normalize_weak(&r->y);
secp256k1_fe_negate(&r->y, &r->y, 1);
secp256k1_ge_verify(r);
SECP256K1_GE_VERIFY(r);
}
static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a) {
secp256k1_fe z2, z3;
secp256k1_gej_verify(a);
SECP256K1_GEJ_VERIFY(a);
r->infinity = a->infinity;
secp256k1_fe_inv(&a->z, &a->z);
@ -172,13 +168,13 @@ static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a) {
r->x = a->x;
r->y = a->y;
secp256k1_gej_verify(a);
secp256k1_ge_verify(r);
SECP256K1_GEJ_VERIFY(a);
SECP256K1_GE_VERIFY(r);
}
static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) {
secp256k1_fe z2, z3;
secp256k1_gej_verify(a);
SECP256K1_GEJ_VERIFY(a);
if (secp256k1_gej_is_infinity(a)) {
secp256k1_ge_set_infinity(r);
@ -193,8 +189,8 @@ static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) {
secp256k1_fe_set_int(&a->z, 1);
secp256k1_ge_set_xy(r, &a->x, &a->y);
secp256k1_gej_verify(a);
secp256k1_ge_verify(r);
SECP256K1_GEJ_VERIFY(a);
SECP256K1_GE_VERIFY(r);
}
static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len) {
@ -203,7 +199,7 @@ static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a
size_t last_i = SIZE_MAX;
#ifdef VERIFY
for (i = 0; i < len; i++) {
secp256k1_gej_verify(&a[i]);
SECP256K1_GEJ_VERIFY(&a[i]);
}
#endif
@ -245,7 +241,7 @@ static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a
#ifdef VERIFY
for (i = 0; i < len; i++) {
secp256k1_ge_verify(&r[i]);
SECP256K1_GE_VERIFY(&r[i]);
}
#endif
}
@ -255,8 +251,8 @@ static void secp256k1_ge_table_set_globalz(size_t len, secp256k1_ge *a, const se
secp256k1_fe zs;
#ifdef VERIFY
for (i = 0; i < len; i++) {
secp256k1_ge_verify(&a[i]);
secp256k1_fe_verify(&zr[i]);
SECP256K1_GE_VERIFY(&a[i]);
SECP256K1_FE_VERIFY(&zr[i]);
}
#endif
@ -278,7 +274,7 @@ static void secp256k1_ge_table_set_globalz(size_t len, secp256k1_ge *a, const se
#ifdef VERIFY
for (i = 0; i < len; i++) {
secp256k1_ge_verify(&a[i]);
SECP256K1_GE_VERIFY(&a[i]);
}
#endif
}
@ -289,7 +285,7 @@ static void secp256k1_gej_set_infinity(secp256k1_gej *r) {
secp256k1_fe_clear(&r->y);
secp256k1_fe_clear(&r->z);
secp256k1_gej_verify(r);
SECP256K1_GEJ_VERIFY(r);
}
static void secp256k1_ge_set_infinity(secp256k1_ge *r) {
@ -297,7 +293,7 @@ static void secp256k1_ge_set_infinity(secp256k1_ge *r) {
secp256k1_fe_clear(&r->x);
secp256k1_fe_clear(&r->y);
secp256k1_ge_verify(r);
SECP256K1_GE_VERIFY(r);
}
static void secp256k1_gej_clear(secp256k1_gej *r) {
@ -306,7 +302,7 @@ static void secp256k1_gej_clear(secp256k1_gej *r) {
secp256k1_fe_clear(&r->y);
secp256k1_fe_clear(&r->z);
secp256k1_gej_verify(r);
SECP256K1_GEJ_VERIFY(r);
}
static void secp256k1_ge_clear(secp256k1_ge *r) {
@ -314,13 +310,13 @@ static void secp256k1_ge_clear(secp256k1_ge *r) {
secp256k1_fe_clear(&r->x);
secp256k1_fe_clear(&r->y);
secp256k1_ge_verify(r);
SECP256K1_GE_VERIFY(r);
}
static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd) {
secp256k1_fe x2, x3;
int ret;
secp256k1_fe_verify(x);
SECP256K1_FE_VERIFY(x);
r->x = *x;
secp256k1_fe_sqr(&x2, x);
@ -333,45 +329,72 @@ static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int o
secp256k1_fe_negate(&r->y, &r->y, 1);
}
secp256k1_ge_verify(r);
SECP256K1_GE_VERIFY(r);
return ret;
}
static void secp256k1_gej_set_ge(secp256k1_gej *r, const secp256k1_ge *a) {
secp256k1_ge_verify(a);
SECP256K1_GE_VERIFY(a);
r->infinity = a->infinity;
r->x = a->x;
r->y = a->y;
secp256k1_fe_set_int(&r->z, 1);
secp256k1_gej_verify(r);
SECP256K1_GEJ_VERIFY(r);
}
static int secp256k1_gej_eq_var(const secp256k1_gej *a, const secp256k1_gej *b) {
secp256k1_gej tmp;
secp256k1_gej_verify(b);
secp256k1_gej_verify(a);
SECP256K1_GEJ_VERIFY(b);
SECP256K1_GEJ_VERIFY(a);
secp256k1_gej_neg(&tmp, a);
secp256k1_gej_add_var(&tmp, &tmp, b, NULL);
return secp256k1_gej_is_infinity(&tmp);
}
static int secp256k1_gej_eq_ge_var(const secp256k1_gej *a, const secp256k1_ge *b) {
secp256k1_gej tmp;
SECP256K1_GEJ_VERIFY(a);
SECP256K1_GE_VERIFY(b);
secp256k1_gej_neg(&tmp, a);
secp256k1_gej_add_ge_var(&tmp, &tmp, b, NULL);
return secp256k1_gej_is_infinity(&tmp);
}
static int secp256k1_ge_eq_var(const secp256k1_ge *a, const secp256k1_ge *b) {
secp256k1_fe tmp;
SECP256K1_GE_VERIFY(a);
SECP256K1_GE_VERIFY(b);
if (a->infinity != b->infinity) return 0;
if (a->infinity) return 1;
tmp = a->x;
secp256k1_fe_normalize_weak(&tmp);
if (!secp256k1_fe_equal(&tmp, &b->x)) return 0;
tmp = a->y;
secp256k1_fe_normalize_weak(&tmp);
if (!secp256k1_fe_equal(&tmp, &b->y)) return 0;
return 1;
}
static int secp256k1_gej_eq_x_var(const secp256k1_fe *x, const secp256k1_gej *a) {
secp256k1_fe r;
secp256k1_fe_verify(x);
secp256k1_gej_verify(a);
#ifdef VERIFY
SECP256K1_FE_VERIFY(x);
SECP256K1_GEJ_VERIFY(a);
VERIFY_CHECK(!a->infinity);
#endif
secp256k1_fe_sqr(&r, &a->z); secp256k1_fe_mul(&r, &r, x);
return secp256k1_fe_equal(&r, &a->x);
}
static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a) {
secp256k1_gej_verify(a);
SECP256K1_GEJ_VERIFY(a);
r->infinity = a->infinity;
r->x = a->x;
@ -380,18 +403,18 @@ static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a) {
secp256k1_fe_normalize_weak(&r->y);
secp256k1_fe_negate(&r->y, &r->y, 1);
secp256k1_gej_verify(r);
SECP256K1_GEJ_VERIFY(r);
}
static int secp256k1_gej_is_infinity(const secp256k1_gej *a) {
secp256k1_gej_verify(a);
SECP256K1_GEJ_VERIFY(a);
return a->infinity;
}
static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) {
secp256k1_fe y2, x3;
secp256k1_ge_verify(a);
SECP256K1_GE_VERIFY(a);
if (a->infinity) {
return 0;
@ -406,7 +429,7 @@ static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) {
static SECP256K1_INLINE void secp256k1_gej_double(secp256k1_gej *r, const secp256k1_gej *a) {
/* Operations: 3 mul, 4 sqr, 8 add/half/mul_int/negate */
secp256k1_fe l, s, t;
secp256k1_gej_verify(a);
SECP256K1_GEJ_VERIFY(a);
r->infinity = a->infinity;
@ -435,11 +458,11 @@ static SECP256K1_INLINE void secp256k1_gej_double(secp256k1_gej *r, const secp25
secp256k1_fe_add(&r->y, &s); /* Y3 = L*(X3 + T) + S^2 (2) */
secp256k1_fe_negate(&r->y, &r->y, 2); /* Y3 = -(L*(X3 + T) + S^2) (3) */
secp256k1_gej_verify(r);
SECP256K1_GEJ_VERIFY(r);
}
static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr) {
secp256k1_gej_verify(a);
SECP256K1_GEJ_VERIFY(a);
/** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity,
* Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have
@ -466,14 +489,14 @@ static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, s
secp256k1_gej_double(r, a);
secp256k1_gej_verify(r);
SECP256K1_GEJ_VERIFY(r);
}
static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr) {
/* 12 mul, 4 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */
secp256k1_fe z22, z12, u1, u2, s1, s2, h, i, h2, h3, t;
secp256k1_gej_verify(a);
secp256k1_gej_verify(b);
SECP256K1_GEJ_VERIFY(a);
SECP256K1_GEJ_VERIFY(b);
if (a->infinity) {
VERIFY_CHECK(rzr == NULL);
@ -530,14 +553,14 @@ static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, cons
secp256k1_fe_mul(&h3, &h3, &s1);
secp256k1_fe_add(&r->y, &h3);
secp256k1_gej_verify(r);
SECP256K1_GEJ_VERIFY(r);
}
static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, secp256k1_fe *rzr) {
/* Operations: 8 mul, 3 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */
secp256k1_fe z12, u1, u2, s1, s2, h, i, h2, h3, t;
secp256k1_gej_verify(a);
secp256k1_ge_verify(b);
SECP256K1_GEJ_VERIFY(a);
SECP256K1_GE_VERIFY(b);
if (a->infinity) {
VERIFY_CHECK(rzr == NULL);
@ -592,16 +615,16 @@ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, c
secp256k1_fe_mul(&h3, &h3, &s1);
secp256k1_fe_add(&r->y, &h3);
secp256k1_gej_verify(r);
if (rzr != NULL) secp256k1_fe_verify(rzr);
SECP256K1_GEJ_VERIFY(r);
if (rzr != NULL) SECP256K1_FE_VERIFY(rzr);
}
static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv) {
/* Operations: 9 mul, 3 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */
secp256k1_fe az, z12, u1, u2, s1, s2, h, i, h2, h3, t;
secp256k1_gej_verify(a);
secp256k1_ge_verify(b);
secp256k1_fe_verify(bzinv);
SECP256K1_GEJ_VERIFY(a);
SECP256K1_GE_VERIFY(b);
SECP256K1_FE_VERIFY(bzinv);
if (a->infinity) {
secp256k1_fe bzinv2, bzinv3;
@ -611,7 +634,7 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a,
secp256k1_fe_mul(&r->x, &b->x, &bzinv2);
secp256k1_fe_mul(&r->y, &b->y, &bzinv3);
secp256k1_fe_set_int(&r->z, 1);
secp256k1_gej_verify(r);
SECP256K1_GEJ_VERIFY(r);
return;
}
if (b->infinity) {
@ -663,7 +686,7 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a,
secp256k1_fe_mul(&h3, &h3, &s1);
secp256k1_fe_add(&r->y, &h3);
secp256k1_gej_verify(r);
SECP256K1_GEJ_VERIFY(r);
}
@ -672,8 +695,8 @@ static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const
secp256k1_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr;
secp256k1_fe m_alt, rr_alt;
int degenerate;
secp256k1_gej_verify(a);
secp256k1_ge_verify(b);
SECP256K1_GEJ_VERIFY(a);
SECP256K1_GE_VERIFY(b);
VERIFY_CHECK(!b->infinity);
/* In:
@ -801,17 +824,15 @@ static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const
* Then r->infinity = ((y1 + y2)Z == 0) = (y1 == -y2) = false. */
r->infinity = secp256k1_fe_normalizes_to_zero(&r->z);
secp256k1_gej_verify(r);
SECP256K1_GEJ_VERIFY(r);
}
static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *s) {
/* Operations: 4 mul, 1 sqr */
secp256k1_fe zz;
secp256k1_gej_verify(r);
secp256k1_fe_verify(s);
#ifdef VERIFY
SECP256K1_GEJ_VERIFY(r);
SECP256K1_FE_VERIFY(s);
VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero_var(s));
#endif
secp256k1_fe_sqr(&zz, s);
secp256k1_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */
@ -819,12 +840,12 @@ static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *s) {
secp256k1_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */
secp256k1_fe_mul(&r->z, &r->z, s); /* r->z *= s */
secp256k1_gej_verify(r);
SECP256K1_GEJ_VERIFY(r);
}
static void secp256k1_ge_to_storage(secp256k1_ge_storage *r, const secp256k1_ge *a) {
secp256k1_fe x, y;
secp256k1_ge_verify(a);
SECP256K1_GE_VERIFY(a);
VERIFY_CHECK(!a->infinity);
x = a->x;
@ -840,19 +861,19 @@ static void secp256k1_ge_from_storage(secp256k1_ge *r, const secp256k1_ge_storag
secp256k1_fe_from_storage(&r->y, &a->y);
r->infinity = 0;
secp256k1_ge_verify(r);
SECP256K1_GE_VERIFY(r);
}
static SECP256K1_INLINE void secp256k1_gej_cmov(secp256k1_gej *r, const secp256k1_gej *a, int flag) {
secp256k1_gej_verify(r);
secp256k1_gej_verify(a);
SECP256K1_GEJ_VERIFY(r);
SECP256K1_GEJ_VERIFY(a);
secp256k1_fe_cmov(&r->x, &a->x, flag);
secp256k1_fe_cmov(&r->y, &a->y, flag);
secp256k1_fe_cmov(&r->z, &a->z, flag);
r->infinity ^= (r->infinity ^ a->infinity) & flag;
secp256k1_gej_verify(r);
SECP256K1_GEJ_VERIFY(r);
}
static SECP256K1_INLINE void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_ge_storage *a, int flag) {
@ -861,19 +882,19 @@ static SECP256K1_INLINE void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r,
}
static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) {
secp256k1_ge_verify(a);
SECP256K1_GE_VERIFY(a);
*r = *a;
secp256k1_fe_mul(&r->x, &r->x, &secp256k1_const_beta);
secp256k1_ge_verify(r);
SECP256K1_GE_VERIFY(r);
}
static int secp256k1_ge_is_in_correct_subgroup(const secp256k1_ge* ge) {
#ifdef EXHAUSTIVE_TEST_ORDER
secp256k1_gej out;
int i;
secp256k1_ge_verify(ge);
SECP256K1_GE_VERIFY(ge);
/* A very simple EC multiplication ladder that avoids a dependency on ecmult. */
secp256k1_gej_set_infinity(&out);
@ -885,7 +906,7 @@ static int secp256k1_ge_is_in_correct_subgroup(const secp256k1_ge* ge) {
}
return secp256k1_gej_is_infinity(&out);
#else
secp256k1_ge_verify(ge);
SECP256K1_GE_VERIFY(ge);
(void)ge;
/* The real secp256k1 group has cofactor 1, so the subgroup is the entire curve. */
@ -907,9 +928,8 @@ static int secp256k1_ge_x_frac_on_curve_var(const secp256k1_fe *xn, const secp25
* (xn/xd)^3 + 7 is square <=> xd*xn^3 + 7*xd^4 is square (multiplying by xd^4, a square).
*/
secp256k1_fe r, t;
#ifdef VERIFY
VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero_var(xd));
#endif
secp256k1_fe_mul(&r, xd, xn); /* r = xd*xn */
secp256k1_fe_sqr(&t, xn); /* t = xn^2 */
secp256k1_fe_mul(&r, &r, &t); /* r = xd*xn^3 */

View File

@ -144,7 +144,6 @@ static void secp256k1_modinv32_normalize_30(secp256k1_modinv32_signed30 *r, int3
r->v[7] = r7;
r->v[8] = r8;
#ifdef VERIFY
VERIFY_CHECK(r0 >> 30 == 0);
VERIFY_CHECK(r1 >> 30 == 0);
VERIFY_CHECK(r2 >> 30 == 0);
@ -156,7 +155,6 @@ static void secp256k1_modinv32_normalize_30(secp256k1_modinv32_signed30 *r, int3
VERIFY_CHECK(r8 >> 30 == 0);
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 0) >= 0); /* r >= 0 */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */
#endif
}
/* Data type for transition matrices (see section 3 of explanation).
@ -413,14 +411,13 @@ static void secp256k1_modinv32_update_de_30(secp256k1_modinv32_signed30 *d, secp
int32_t di, ei, md, me, sd, se;
int64_t cd, ce;
int i;
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */
VERIFY_CHECK(labs(u) <= (M30 + 1 - labs(v))); /* |u|+|v| <= 2^30 */
VERIFY_CHECK(labs(q) <= (M30 + 1 - labs(r))); /* |q|+|r| <= 2^30 */
#endif
/* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */
sd = d->v[8] >> 31;
se = e->v[8] >> 31;
@ -455,12 +452,11 @@ static void secp256k1_modinv32_update_de_30(secp256k1_modinv32_signed30 *d, secp
/* What remains is limb 9 of t*[d,e]+modulus*[md,me]; store it as output limb 8. */
d->v[8] = (int32_t)cd;
e->v[8] = (int32_t)ce;
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */
#endif
}
/* Compute (t/2^30) * [f, g], where t is a transition matrix for 30 divsteps.
@ -550,25 +546,23 @@ static void secp256k1_modinv32(secp256k1_modinv32_signed30 *x, const secp256k1_m
/* Update d,e using that transition matrix. */
secp256k1_modinv32_update_de_30(&d, &e, &t, modinfo);
/* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
secp256k1_modinv32_update_fg_30(&f, &g, &t);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
}
/* At this point sufficient iterations have been performed that g must have reached 0
* and (if g was not originally 0) f must now equal +/- GCD of the initial f, g
* values i.e. +/- 1, and d now contains +/- the modular inverse. */
#ifdef VERIFY
/* g == 0 */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &SECP256K1_SIGNED30_ONE, 0) == 0);
/* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
@ -578,7 +572,6 @@ static void secp256k1_modinv32(secp256k1_modinv32_signed30 *x, const secp256k1_m
secp256k1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 &&
(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) == 0 ||
secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) == 0)));
#endif
/* Optionally negate d, normalize to [0,modulus), and return it. */
secp256k1_modinv32_normalize_30(&d, f.v[8], modinfo);
@ -607,12 +600,12 @@ static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256
/* Update d,e using that transition matrix. */
secp256k1_modinv32_update_de_30(&d, &e, &t, modinfo);
/* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
secp256k1_modinv32_update_fg_30_var(len, &f, &g, &t);
/* If the bottom limb of g is 0, there is a chance g=0. */
if (g.v[0] == 0) {
@ -637,18 +630,17 @@ static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256
g.v[len - 2] |= (uint32_t)gn << 30;
--len;
}
#ifdef VERIFY
VERIFY_CHECK(++i < 25); /* We should never need more than 25*30 = 750 divsteps */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
}
/* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of
* the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */
#ifdef VERIFY
/* g == 0 */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &SECP256K1_SIGNED30_ONE, 0) == 0);
/* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
@ -658,7 +650,6 @@ static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256
secp256k1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 &&
(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) == 0 ||
secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) == 0)));
#endif
/* Optionally negate d, normalize to [0,modulus), and return it. */
secp256k1_modinv32_normalize_30(&d, f.v[len - 1], modinfo);
@ -697,12 +688,11 @@ static int secp256k1_jacobi32_maybe_var(const secp256k1_modinv32_signed30 *x, co
secp256k1_modinv32_trans2x2 t;
eta = secp256k1_modinv32_posdivsteps_30_var(eta, f.v[0] | ((uint32_t)f.v[1] << 30), g.v[0] | ((uint32_t)g.v[1] << 30), &t, &jac);
/* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
secp256k1_modinv32_update_fg_30_var(len, &f, &g, &t);
/* If the bottom limb of f is 1, there is a chance that f=1. */
if (f.v[0] == 1) {
@ -723,12 +713,11 @@ static int secp256k1_jacobi32_maybe_var(const secp256k1_modinv32_signed30 *x, co
cond |= gn;
/* If so, reduce length. */
if (cond == 0) --len;
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */
VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
}
/* The loop failed to converge to f=g after 1500 iterations. Return 0, indicating unknown result. */

View File

@ -144,7 +144,6 @@ static void secp256k1_modinv64_normalize_62(secp256k1_modinv64_signed62 *r, int6
r->v[3] = r3;
r->v[4] = r4;
#ifdef VERIFY
VERIFY_CHECK(r0 >> 62 == 0);
VERIFY_CHECK(r1 >> 62 == 0);
VERIFY_CHECK(r2 >> 62 == 0);
@ -152,7 +151,6 @@ static void secp256k1_modinv64_normalize_62(secp256k1_modinv64_signed62 *r, int6
VERIFY_CHECK(r4 >> 62 == 0);
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 0) >= 0); /* r >= 0 */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */
#endif
}
/* Compute the transition matrix and eta for 59 divsteps (where zeta=-(delta+1/2)).
@ -216,7 +214,7 @@ static int64_t secp256k1_modinv64_divsteps_59(int64_t zeta, uint64_t f0, uint64_
t->v = (int64_t)v;
t->q = (int64_t)q;
t->r = (int64_t)r;
#ifdef VERIFY
/* The determinant of t must be a power of two. This guarantees that multiplication with t
* does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
* will be divided out again). As each divstep's individual matrix has determinant 2, the
@ -224,7 +222,7 @@ static int64_t secp256k1_modinv64_divsteps_59(int64_t zeta, uint64_t f0, uint64_
* 8*identity (which has determinant 2^6) means the overall outputs has determinant
* 2^65. */
VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 65, 0));
#endif
return zeta;
}
@ -301,13 +299,13 @@ static int64_t secp256k1_modinv64_divsteps_62_var(int64_t eta, uint64_t f0, uint
t->v = (int64_t)v;
t->q = (int64_t)q;
t->r = (int64_t)r;
#ifdef VERIFY
/* The determinant of t must be a power of two. This guarantees that multiplication with t
* does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
* will be divided out again). As each divstep's individual matrix has determinant 2, the
* aggregate of 62 of them will have determinant 2^62. */
VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 62, 0));
#endif
return eta;
}
@ -392,13 +390,13 @@ static int64_t secp256k1_modinv64_posdivsteps_62_var(int64_t eta, uint64_t f0, u
t->v = (int64_t)v;
t->q = (int64_t)q;
t->r = (int64_t)r;
#ifdef VERIFY
/* The determinant of t must be a power of two. This guarantees that multiplication with t
* does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
* will be divided out again). As each divstep's individual matrix has determinant 2 or -2,
* the aggregate of 62 of them will have determinant 2^62 or -2^62. */
VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 62, 1));
#endif
*jacp = jac;
return eta;
}
@ -417,14 +415,13 @@ static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp
const int64_t u = t->u, v = t->v, q = t->q, r = t->r;
int64_t md, me, sd, se;
secp256k1_int128 cd, ce;
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */
VERIFY_CHECK(secp256k1_modinv64_abs(u) <= (((int64_t)1 << 62) - secp256k1_modinv64_abs(v))); /* |u|+|v| <= 2^62 */
VERIFY_CHECK(secp256k1_modinv64_abs(q) <= (((int64_t)1 << 62) - secp256k1_modinv64_abs(r))); /* |q|+|r| <= 2^62 */
#endif
/* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */
sd = d4 >> 63;
se = e4 >> 63;
@ -489,12 +486,11 @@ static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp
/* What remains is limb 5 of t*[d,e]+modulus*[md,me]; store it as output limb 4. */
d->v[4] = secp256k1_i128_to_i64(&cd);
e->v[4] = secp256k1_i128_to_i64(&ce);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */
#endif
}
/* Compute (t/2^62) * [f, g], where t is a transition matrix scaled by 2^62.
@ -606,25 +602,23 @@ static void secp256k1_modinv64(secp256k1_modinv64_signed62 *x, const secp256k1_m
/* Update d,e using that transition matrix. */
secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo);
/* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
secp256k1_modinv64_update_fg_62(&f, &g, &t);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
}
/* At this point sufficient iterations have been performed that g must have reached 0
* and (if g was not originally 0) f must now equal +/- GCD of the initial f, g
* values i.e. +/- 1, and d now contains +/- the modular inverse. */
#ifdef VERIFY
/* g == 0 */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &SECP256K1_SIGNED62_ONE, 0) == 0);
/* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
@ -634,7 +628,6 @@ static void secp256k1_modinv64(secp256k1_modinv64_signed62 *x, const secp256k1_m
secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) == 0 ||
secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) == 0)));
#endif
/* Optionally negate d, normalize to [0,modulus), and return it. */
secp256k1_modinv64_normalize_62(&d, f.v[4], modinfo);
@ -663,12 +656,11 @@ static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256
/* Update d,e using that transition matrix. */
secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo);
/* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
secp256k1_modinv64_update_fg_62_var(len, &f, &g, &t);
/* If the bottom limb of g is zero, there is a chance that g=0. */
if (g.v[0] == 0) {
@ -693,18 +685,17 @@ static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256
g.v[len - 2] |= (uint64_t)gn << 62;
--len;
}
#ifdef VERIFY
VERIFY_CHECK(++i < 12); /* We should never need more than 12*62 = 744 divsteps */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
}
/* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of
* the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */
#ifdef VERIFY
/* g == 0 */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &SECP256K1_SIGNED62_ONE, 0) == 0);
/* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
@ -714,7 +705,6 @@ static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256
secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) == 0 ||
secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) == 0)));
#endif
/* Optionally negate d, normalize to [0,modulus), and return it. */
secp256k1_modinv64_normalize_62(&d, f.v[len - 1], modinfo);
@ -753,12 +743,11 @@ static int secp256k1_jacobi64_maybe_var(const secp256k1_modinv64_signed62 *x, co
secp256k1_modinv64_trans2x2 t;
eta = secp256k1_modinv64_posdivsteps_62_var(eta, f.v[0] | ((uint64_t)f.v[1] << 62), g.v[0] | ((uint64_t)g.v[1] << 62), &t, &jac);
/* Update f,g using that transition matrix. */
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
secp256k1_modinv64_update_fg_62_var(len, &f, &g, &t);
/* If the bottom limb of f is 1, there is a chance that f=1. */
if (f.v[0] == 1) {
@ -779,12 +768,11 @@ static int secp256k1_jacobi64_maybe_var(const secp256k1_modinv64_signed62 *x, co
cond |= gn;
/* If so, reduce length. */
if (cond == 0) --len;
#ifdef VERIFY
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */
VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
#endif
}
/* The loop failed to converge to f=g after 1550 iterations. Return 0, indicating unknown result. */

View File

@ -25,32 +25,19 @@ static int ecdh_hash_function_custom(unsigned char *output, const unsigned char
}
static void test_ecdh_api(void) {
/* Setup context that just counts errors */
secp256k1_context *tctx = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
secp256k1_pubkey point;
unsigned char res[32];
unsigned char s_one[32] = { 0 };
int32_t ecount = 0;
s_one[31] = 1;
secp256k1_context_set_error_callback(tctx, counting_illegal_callback_fn, &ecount);
secp256k1_context_set_illegal_callback(tctx, counting_illegal_callback_fn, &ecount);
CHECK(secp256k1_ec_pubkey_create(tctx, &point, s_one) == 1);
CHECK(secp256k1_ec_pubkey_create(CTX, &point, s_one) == 1);
/* Check all NULLs are detected */
CHECK(secp256k1_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1);
CHECK(ecount == 0);
CHECK(secp256k1_ecdh(tctx, NULL, &point, s_one, NULL, NULL) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_ecdh(tctx, res, NULL, s_one, NULL, NULL) == 0);
CHECK(ecount == 2);
CHECK(secp256k1_ecdh(tctx, res, &point, NULL, NULL, NULL) == 0);
CHECK(ecount == 3);
CHECK(secp256k1_ecdh(tctx, res, &point, s_one, NULL, NULL) == 1);
CHECK(ecount == 3);
/* Cleanup */
secp256k1_context_destroy(tctx);
CHECK(secp256k1_ecdh(CTX, res, &point, s_one, NULL, NULL) == 1);
CHECK_ILLEGAL(CTX, secp256k1_ecdh(CTX, NULL, &point, s_one, NULL, NULL));
CHECK_ILLEGAL(CTX, secp256k1_ecdh(CTX, res, NULL, s_one, NULL, NULL));
CHECK_ILLEGAL(CTX, secp256k1_ecdh(CTX, res, &point, NULL, NULL, NULL));
CHECK(secp256k1_ecdh(CTX, res, &point, s_one, NULL, NULL) == 1);
}
static void test_ecdh_generator_basepoint(void) {

View File

@ -126,9 +126,8 @@ static void secp256k1_ellswift_xswiftec_frac_var(secp256k1_fe *xn, secp256k1_fe
secp256k1_fe_mul(&l, &p, &u1); /* l = u*(g+s) */
secp256k1_fe_add(&n, &l); /* n = u*(c1*s+c2*g)+u*(g+s) */
secp256k1_fe_negate(xn, &n, 2); /* n = -u*(c1*s+c2*g)-u*(g+s) */
#ifdef VERIFY
VERIFY_CHECK(secp256k1_ge_x_frac_on_curve_var(xn, &p));
#endif
/* Return x3 = n/p = -(u*(c1*s+c2*g)/(g+s)+u) */
}
@ -193,10 +192,8 @@ static int secp256k1_ellswift_xswiftec_inv_var(secp256k1_fe *t, const secp256k1_
secp256k1_fe_normalize_weak(&x);
secp256k1_fe_normalize_weak(&u);
#ifdef VERIFY
VERIFY_CHECK(c >= 0 && c < 8);
VERIFY_CHECK(secp256k1_ge_x_on_curve_var(&x));
#endif
if (!(c & 2)) {
/* c is in {0, 1, 4, 5}. In this case we look for an inverse under the x1 (if c=0 or
@ -230,9 +227,7 @@ static int secp256k1_ellswift_xswiftec_inv_var(secp256k1_fe *t, const secp256k1_
* that (-u-x)^3 + B is not square (the secp256k1_ge_x_on_curve_var(&m)
* test above would have failed). This is a contradiction, and thus the
* assumption s=0 is false. */
#ifdef VERIFY
VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero_var(&s));
#endif
/* If s is not square, fail. We have not fully computed s yet, but s is square iff
* -(u^3+7)*(u^2+u*x+x^2) is square (because a/b is square iff a*b is square and b is
@ -272,7 +267,11 @@ static int secp256k1_ellswift_xswiftec_inv_var(secp256k1_fe *t, const secp256k1_
secp256k1_fe_negate(&q, &q, 1); /* q = -s*(4*(u^3+7)+3*u^2*s) */
if (!secp256k1_fe_is_square_var(&q)) return 0;
ret = secp256k1_fe_sqrt(&r, &q); /* r = sqrt(-s*(4*(u^3+7)+3*u^2*s)) */
#ifdef VERIFY
VERIFY_CHECK(ret);
#else
(void)ret;
#endif
/* If (c & 1) = 1 and r = 0, fail. */
if (EXPECT((c & 1) && secp256k1_fe_normalizes_to_zero_var(&r), 0)) return 0;
@ -320,10 +319,9 @@ static void secp256k1_ellswift_prng(unsigned char* out32, const secp256k1_sha256
buf4[3] = cnt >> 24;
secp256k1_sha256_write(&hash, buf4, 4);
secp256k1_sha256_finalize(&hash, out32);
#ifdef VERIFY
/* Writing and finalizing together should trigger exactly one SHA256 compression. */
VERIFY_CHECK(((hash.bytes) >> 6) == (blocks + 1));
#endif
}
/** Find an ElligatorSwift encoding (u, t) for X coordinate x, and random Y coordinate.
@ -361,9 +359,8 @@ static void secp256k1_ellswift_xelligatorswift_var(unsigned char *u32, secp256k1
/* Since u is the output of a hash, it should practically never be 0. We could apply the
* u=0 to u=1 correction here too to deal with that case still, but it's such a low
* probability event that we do not bother. */
#ifdef VERIFY
VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero_var(&u));
#endif
/* Find a remainder t, and return it if found. */
if (EXPECT(secp256k1_ellswift_xswiftec_inv_var(t, x, &u, branch), 0)) break;
}
@ -417,7 +414,11 @@ int secp256k1_ellswift_encode(const secp256k1_context *ctx, unsigned char *ell64
* BIP340 tagged hash with tag "secp256k1_ellswift_encode". */
secp256k1_ellswift_sha256_init_encode(&hash);
ser_ret = secp256k1_eckey_pubkey_serialize(&p, p64, &ser_size, 1);
#ifdef VERIFY
VERIFY_CHECK(ser_ret && ser_size == 33);
#else
(void)ser_ret;
#endif
secp256k1_sha256_write(&hash, p64, sizeof(p64));
secp256k1_sha256_write(&hash, rnd32, 32);

View File

@ -32,7 +32,7 @@ static void test_exhaustive_ellswift(const secp256k1_context *ctx, const secp256
/* Decode ellswift pubkey and check that it matches the precomputed group element. */
secp256k1_ellswift_decode(ctx, &pub_decoded, ell64);
secp256k1_pubkey_load(ctx, &ge_decoded, &pub_decoded);
ge_equals_ge(&ge_decoded, &group[i]);
CHECK(secp256k1_ge_eq_var(&ge_decoded, &group[i]));
}
}

View File

@ -237,7 +237,7 @@ void run_ellswift_tests(void) {
secp256k1_ellswift_decode(CTX, &pubkey2, ell64);
secp256k1_pubkey_load(CTX, &g2, &pubkey2);
/* Compare with original. */
ge_equals_ge(&g, &g2);
CHECK(secp256k1_ge_eq_var(&g, &g2));
}
/* Verify the behavior of secp256k1_ellswift_create */
for (i = 0; i < 400 * COUNT; i++) {
@ -259,7 +259,7 @@ void run_ellswift_tests(void) {
secp256k1_ellswift_decode(CTX, &pub, ell64);
secp256k1_pubkey_load(CTX, &dec, &pub);
secp256k1_ecmult(&res, NULL, &secp256k1_scalar_zero, &sec);
ge_equals_gej(&dec, &res);
CHECK(secp256k1_gej_eq_ge_var(&res, &dec));
}
/* Verify that secp256k1_ellswift_xdh computes the right shared X coordinate. */
for (i = 0; i < 800 * COUNT; i++) {
@ -285,7 +285,7 @@ void run_ellswift_tests(void) {
ret = secp256k1_ellswift_xdh(CTX, share32, ell64, ell64, sec32, i & 1, &ellswift_xdh_hash_x32, NULL);
CHECK(ret);
(void)secp256k1_fe_set_b32_limit(&share_x, share32); /* no overflow is possible */
secp256k1_fe_verify(&share_x);
SECP256K1_FE_VERIFY(&share_x);
/* Compute seckey*pubkey directly. */
secp256k1_ecmult(&resj, &decj, &sec, NULL);
secp256k1_ge_set_gej(&res, &resj);

View File

@ -9,11 +9,6 @@
#include "../../../include/secp256k1_extrakeys.h"
static void set_counting_callbacks(secp256k1_context *ctx0, int *ecount) {
secp256k1_context_set_error_callback(ctx0, counting_illegal_callback_fn, ecount);
secp256k1_context_set_illegal_callback(ctx0, counting_illegal_callback_fn, ecount);
}
static void test_xonly_pubkey(void) {
secp256k1_pubkey pk;
secp256k1_xonly_pubkey xonly_pk, xonly_pk_tmp;
@ -28,10 +23,6 @@ static void test_xonly_pubkey(void) {
int pk_parity;
int i;
int ecount;
set_counting_callbacks(CTX, &ecount);
secp256k1_testrand256(sk);
memset(ones32, 0xFF, 32);
secp256k1_testrand256(xy_sk);
@ -39,16 +30,12 @@ static void test_xonly_pubkey(void) {
CHECK(secp256k1_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, &pk) == 1);
/* Test xonly_pubkey_from_pubkey */
ecount = 0;
CHECK(secp256k1_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, &pk) == 1);
CHECK(secp256k1_xonly_pubkey_from_pubkey(CTX, NULL, &pk_parity, &pk) == 0);
CHECK(ecount == 1);
CHECK_ILLEGAL(CTX, secp256k1_xonly_pubkey_from_pubkey(CTX, NULL, &pk_parity, &pk));
CHECK(secp256k1_xonly_pubkey_from_pubkey(CTX, &xonly_pk, NULL, &pk) == 1);
CHECK(secp256k1_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, NULL) == 0);
CHECK(ecount == 2);
CHECK_ILLEGAL(CTX, secp256k1_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, NULL));
memset(&pk, 0, sizeof(pk));
CHECK(secp256k1_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, &pk) == 0);
CHECK(ecount == 3);
CHECK_ILLEGAL(CTX, secp256k1_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, &pk));
/* Choose a secret key such that the resulting pubkey and xonly_pubkey match. */
memset(sk, 0, sizeof(sk));
@ -72,28 +59,21 @@ static void test_xonly_pubkey(void) {
CHECK(secp256k1_fe_equal(&pk1.y, &y) == 1);
/* Test xonly_pubkey_serialize and xonly_pubkey_parse */
ecount = 0;
CHECK(secp256k1_xonly_pubkey_serialize(CTX, NULL, &xonly_pk) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_xonly_pubkey_serialize(CTX, buf32, NULL) == 0);
CHECK_ILLEGAL(CTX, secp256k1_xonly_pubkey_serialize(CTX, NULL, &xonly_pk));
CHECK_ILLEGAL(CTX, secp256k1_xonly_pubkey_serialize(CTX, buf32, NULL));
CHECK(secp256k1_memcmp_var(buf32, zeros64, 32) == 0);
CHECK(ecount == 2);
{
/* A pubkey filled with 0s will fail to serialize due to pubkey_load
* special casing. */
secp256k1_xonly_pubkey pk_tmp;
memset(&pk_tmp, 0, sizeof(pk_tmp));
CHECK(secp256k1_xonly_pubkey_serialize(CTX, buf32, &pk_tmp) == 0);
/* pubkey_load calls illegal callback */
CHECK_ILLEGAL(CTX, secp256k1_xonly_pubkey_serialize(CTX, buf32, &pk_tmp));
}
/* pubkey_load called illegal callback */
CHECK(ecount == 3);
CHECK(secp256k1_xonly_pubkey_serialize(CTX, buf32, &xonly_pk) == 1);
ecount = 0;
CHECK(secp256k1_xonly_pubkey_parse(CTX, NULL, buf32) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_xonly_pubkey_parse(CTX, &xonly_pk, NULL) == 0);
CHECK(ecount == 2);
CHECK_ILLEGAL(CTX, secp256k1_xonly_pubkey_parse(CTX, NULL, buf32));
CHECK_ILLEGAL(CTX, secp256k1_xonly_pubkey_parse(CTX, &xonly_pk, NULL));
/* Serialization and parse roundtrip */
CHECK(secp256k1_xonly_pubkey_from_pubkey(CTX, &xonly_pk, NULL, &pk) == 1);
@ -125,7 +105,6 @@ static void test_xonly_pubkey(void) {
CHECK(secp256k1_xonly_pubkey_parse(CTX, &xonly_pk, &rand33[1]) == 1);
}
}
CHECK(ecount == 2);
}
static void test_xonly_pubkey_comparison(void) {
@ -139,29 +118,26 @@ static void test_xonly_pubkey_comparison(void) {
};
secp256k1_xonly_pubkey pk1;
secp256k1_xonly_pubkey pk2;
int ecount = 0;
set_counting_callbacks(CTX, &ecount);
CHECK(secp256k1_xonly_pubkey_parse(CTX, &pk1, pk1_ser) == 1);
CHECK(secp256k1_xonly_pubkey_parse(CTX, &pk2, pk2_ser) == 1);
CHECK(secp256k1_xonly_pubkey_cmp(CTX, NULL, &pk2) < 0);
CHECK(ecount == 1);
CHECK(secp256k1_xonly_pubkey_cmp(CTX, &pk1, NULL) > 0);
CHECK(ecount == 2);
CHECK_ILLEGAL_VOID(CTX, CHECK(secp256k1_xonly_pubkey_cmp(CTX, NULL, &pk2) < 0));
CHECK_ILLEGAL_VOID(CTX, CHECK(secp256k1_xonly_pubkey_cmp(CTX, &pk1, NULL) > 0));
CHECK(secp256k1_xonly_pubkey_cmp(CTX, &pk1, &pk2) < 0);
CHECK(secp256k1_xonly_pubkey_cmp(CTX, &pk2, &pk1) > 0);
CHECK(secp256k1_xonly_pubkey_cmp(CTX, &pk1, &pk1) == 0);
CHECK(secp256k1_xonly_pubkey_cmp(CTX, &pk2, &pk2) == 0);
CHECK(ecount == 2);
memset(&pk1, 0, sizeof(pk1)); /* illegal pubkey */
CHECK(secp256k1_xonly_pubkey_cmp(CTX, &pk1, &pk2) < 0);
CHECK(ecount == 3);
CHECK(secp256k1_xonly_pubkey_cmp(CTX, &pk1, &pk1) == 0);
CHECK(ecount == 5);
CHECK(secp256k1_xonly_pubkey_cmp(CTX, &pk2, &pk1) > 0);
CHECK(ecount == 6);
CHECK_ILLEGAL_VOID(CTX, CHECK(secp256k1_xonly_pubkey_cmp(CTX, &pk1, &pk2) < 0));
{
int32_t ecount = 0;
secp256k1_context_set_illegal_callback(CTX, counting_callback_fn, &ecount);
CHECK(secp256k1_xonly_pubkey_cmp(CTX, &pk1, &pk1) == 0);
CHECK(ecount == 2);
secp256k1_context_set_illegal_callback(CTX, NULL, NULL);
}
CHECK_ILLEGAL_VOID(CTX, CHECK(secp256k1_xonly_pubkey_cmp(CTX, &pk2, &pk1) > 0));
}
static void test_xonly_pubkey_tweak(void) {
@ -175,30 +151,20 @@ static void test_xonly_pubkey_tweak(void) {
unsigned char tweak[32];
int i;
int ecount;
set_counting_callbacks(CTX, &ecount);
memset(overflows, 0xff, sizeof(overflows));
secp256k1_testrand256(tweak);
secp256k1_testrand256(sk);
CHECK(secp256k1_ec_pubkey_create(CTX, &internal_pk, sk) == 1);
CHECK(secp256k1_xonly_pubkey_from_pubkey(CTX, &internal_xonly_pk, &pk_parity, &internal_pk) == 1);
ecount = 0;
CHECK(secp256k1_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, tweak) == 1);
CHECK(ecount == 0);
CHECK(secp256k1_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, tweak) == 1);
CHECK(ecount == 0);
CHECK(secp256k1_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, tweak) == 1);
CHECK(secp256k1_xonly_pubkey_tweak_add(CTX, NULL, &internal_xonly_pk, tweak) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_xonly_pubkey_tweak_add(CTX, &output_pk, NULL, tweak) == 0);
CHECK(ecount == 2);
CHECK_ILLEGAL(CTX, secp256k1_xonly_pubkey_tweak_add(CTX, NULL, &internal_xonly_pk, tweak));
CHECK_ILLEGAL(CTX, secp256k1_xonly_pubkey_tweak_add(CTX, &output_pk, NULL, tweak));
/* NULL internal_xonly_pk zeroes the output_pk */
CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
CHECK(secp256k1_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, NULL) == 0);
CHECK(ecount == 3);
CHECK_ILLEGAL(CTX, secp256k1_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, NULL));
/* NULL tweak zeroes the output_pk */
CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
@ -225,9 +191,7 @@ static void test_xonly_pubkey_tweak(void) {
/* Invalid pk with a valid tweak */
memset(&internal_xonly_pk, 0, sizeof(internal_xonly_pk));
secp256k1_testrand256(tweak);
ecount = 0;
CHECK(secp256k1_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, tweak) == 0);
CHECK(ecount == 1);
CHECK_ILLEGAL(CTX, secp256k1_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, tweak));
CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
}
@ -244,34 +208,23 @@ static void test_xonly_pubkey_tweak_check(void) {
int pk_parity;
unsigned char tweak[32];
int ecount;
set_counting_callbacks(CTX, &ecount);
memset(overflows, 0xff, sizeof(overflows));
secp256k1_testrand256(tweak);
secp256k1_testrand256(sk);
CHECK(secp256k1_ec_pubkey_create(CTX, &internal_pk, sk) == 1);
CHECK(secp256k1_xonly_pubkey_from_pubkey(CTX, &internal_xonly_pk, &pk_parity, &internal_pk) == 1);
ecount = 0;
CHECK(secp256k1_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, tweak) == 1);
CHECK(secp256k1_xonly_pubkey_from_pubkey(CTX, &output_xonly_pk, &pk_parity, &output_pk) == 1);
CHECK(secp256k1_xonly_pubkey_serialize(CTX, buf32, &output_xonly_pk) == 1);
CHECK(secp256k1_xonly_pubkey_tweak_add_check(CTX, buf32, pk_parity, &internal_xonly_pk, tweak) == 1);
CHECK(ecount == 0);
CHECK(secp256k1_xonly_pubkey_tweak_add_check(CTX, buf32, pk_parity, &internal_xonly_pk, tweak) == 1);
CHECK(ecount == 0);
CHECK(secp256k1_xonly_pubkey_tweak_add_check(CTX, buf32, pk_parity, &internal_xonly_pk, tweak) == 1);
CHECK(secp256k1_xonly_pubkey_tweak_add_check(CTX, NULL, pk_parity, &internal_xonly_pk, tweak) == 0);
CHECK(ecount == 1);
CHECK_ILLEGAL(CTX, secp256k1_xonly_pubkey_tweak_add_check(CTX, NULL, pk_parity, &internal_xonly_pk, tweak));
/* invalid pk_parity value */
CHECK(secp256k1_xonly_pubkey_tweak_add_check(CTX, buf32, 2, &internal_xonly_pk, tweak) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_xonly_pubkey_tweak_add_check(CTX, buf32, pk_parity, NULL, tweak) == 0);
CHECK(ecount == 2);
CHECK(secp256k1_xonly_pubkey_tweak_add_check(CTX, buf32, pk_parity, &internal_xonly_pk, NULL) == 0);
CHECK(ecount == 3);
CHECK_ILLEGAL(CTX, secp256k1_xonly_pubkey_tweak_add_check(CTX, buf32, pk_parity, NULL, tweak));
CHECK_ILLEGAL(CTX, secp256k1_xonly_pubkey_tweak_add_check(CTX, buf32, pk_parity, &internal_xonly_pk, NULL));
memset(tweak, 1, sizeof(tweak));
CHECK(secp256k1_xonly_pubkey_from_pubkey(CTX, &internal_xonly_pk, NULL, &internal_pk) == 1);
@ -290,7 +243,6 @@ static void test_xonly_pubkey_tweak_check(void) {
CHECK(secp256k1_xonly_pubkey_tweak_add_check(CTX, output_pk32, pk_parity, &internal_xonly_pk, overflows) == 0);
CHECK(secp256k1_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, overflows) == 0);
CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
CHECK(ecount == 3);
}
/* Starts with an initial pubkey and recursively creates N_PUBKEYS - 1
@ -335,33 +287,22 @@ static void test_keypair(void) {
secp256k1_pubkey pk, pk_tmp;
secp256k1_xonly_pubkey xonly_pk, xonly_pk_tmp;
int pk_parity, pk_parity_tmp;
int ecount;
set_counting_callbacks(CTX, &ecount);
set_counting_callbacks(STATIC_CTX, &ecount);
CHECK(sizeof(zeros96) == sizeof(keypair));
memset(overflows, 0xFF, sizeof(overflows));
/* Test keypair_create */
ecount = 0;
secp256k1_testrand256(sk);
CHECK(secp256k1_keypair_create(CTX, &keypair, sk) == 1);
CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) != 0);
CHECK(ecount == 0);
CHECK(secp256k1_keypair_create(CTX, &keypair, sk) == 1);
CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) != 0);
CHECK(ecount == 0);
CHECK(secp256k1_keypair_create(CTX, NULL, sk) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_keypair_create(CTX, &keypair, NULL) == 0);
CHECK_ILLEGAL(CTX, secp256k1_keypair_create(CTX, NULL, sk));
CHECK_ILLEGAL(CTX, secp256k1_keypair_create(CTX, &keypair, NULL));
CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(ecount == 2);
CHECK(secp256k1_keypair_create(CTX, &keypair, sk) == 1);
CHECK(ecount == 2);
CHECK(secp256k1_keypair_create(STATIC_CTX, &keypair, sk) == 0);
CHECK_ILLEGAL(STATIC_CTX, secp256k1_keypair_create(STATIC_CTX, &keypair, sk));
CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
CHECK(ecount == 3);
/* Invalid secret key */
CHECK(secp256k1_keypair_create(CTX, &keypair, zeros96) == 0);
@ -370,14 +311,11 @@ static void test_keypair(void) {
CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
/* Test keypair_pub */
ecount = 0;
secp256k1_testrand256(sk);
CHECK(secp256k1_keypair_create(CTX, &keypair, sk) == 1);
CHECK(secp256k1_keypair_pub(CTX, &pk, &keypair) == 1);
CHECK(secp256k1_keypair_pub(CTX, NULL, &keypair) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_keypair_pub(CTX, &pk, NULL) == 0);
CHECK(ecount == 2);
CHECK_ILLEGAL(CTX, secp256k1_keypair_pub(CTX, NULL, &keypair));
CHECK_ILLEGAL(CTX, secp256k1_keypair_pub(CTX, &pk, NULL));
CHECK(secp256k1_memcmp_var(zeros96, &pk, sizeof(pk)) == 0);
/* Using an invalid keypair is fine for keypair_pub */
@ -392,23 +330,19 @@ static void test_keypair(void) {
CHECK(secp256k1_memcmp_var(&pk, &pk_tmp, sizeof(pk)) == 0);
/** Test keypair_xonly_pub **/
ecount = 0;
secp256k1_testrand256(sk);
CHECK(secp256k1_keypair_create(CTX, &keypair, sk) == 1);
CHECK(secp256k1_keypair_xonly_pub(CTX, &xonly_pk, &pk_parity, &keypair) == 1);
CHECK(secp256k1_keypair_xonly_pub(CTX, NULL, &pk_parity, &keypair) == 0);
CHECK(ecount == 1);
CHECK_ILLEGAL(CTX, secp256k1_keypair_xonly_pub(CTX, NULL, &pk_parity, &keypair));
CHECK(secp256k1_keypair_xonly_pub(CTX, &xonly_pk, NULL, &keypair) == 1);
CHECK(secp256k1_keypair_xonly_pub(CTX, &xonly_pk, &pk_parity, NULL) == 0);
CHECK(ecount == 2);
CHECK_ILLEGAL(CTX, secp256k1_keypair_xonly_pub(CTX, &xonly_pk, &pk_parity, NULL));
CHECK(secp256k1_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0);
/* Using an invalid keypair will set the xonly_pk to 0 (first reset
* xonly_pk). */
CHECK(secp256k1_keypair_xonly_pub(CTX, &xonly_pk, &pk_parity, &keypair) == 1);
memset(&keypair, 0, sizeof(keypair));
CHECK(secp256k1_keypair_xonly_pub(CTX, &xonly_pk, &pk_parity, &keypair) == 0);
CHECK_ILLEGAL(CTX, secp256k1_keypair_xonly_pub(CTX, &xonly_pk, &pk_parity, &keypair));
CHECK(secp256k1_memcmp_var(zeros96, &xonly_pk, sizeof(xonly_pk)) == 0);
CHECK(ecount == 3);
/** keypair holds the same xonly pubkey as pubkey_create **/
CHECK(secp256k1_ec_pubkey_create(CTX, &pk, sk) == 1);
@ -419,14 +353,11 @@ static void test_keypair(void) {
CHECK(pk_parity == pk_parity_tmp);
/* Test keypair_seckey */
ecount = 0;
secp256k1_testrand256(sk);
CHECK(secp256k1_keypair_create(CTX, &keypair, sk) == 1);
CHECK(secp256k1_keypair_sec(CTX, sk_tmp, &keypair) == 1);
CHECK(secp256k1_keypair_sec(CTX, NULL, &keypair) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_keypair_sec(CTX, sk_tmp, NULL) == 0);
CHECK(ecount == 2);
CHECK_ILLEGAL(CTX, secp256k1_keypair_sec(CTX, NULL, &keypair));
CHECK_ILLEGAL(CTX, secp256k1_keypair_sec(CTX, sk_tmp, NULL));
CHECK(secp256k1_memcmp_var(zeros96, sk_tmp, sizeof(sk_tmp)) == 0);
/* keypair returns the same seckey it got */
@ -439,9 +370,6 @@ static void test_keypair(void) {
memset(&keypair, 0, sizeof(keypair));
CHECK(secp256k1_keypair_sec(CTX, sk_tmp, &keypair) == 1);
CHECK(secp256k1_memcmp_var(zeros96, sk_tmp, sizeof(sk_tmp)) == 0);
secp256k1_context_set_error_callback(STATIC_CTX, NULL, NULL);
secp256k1_context_set_illegal_callback(STATIC_CTX, NULL, NULL);
}
static void test_keypair_add(void) {
@ -451,9 +379,6 @@ static void test_keypair_add(void) {
unsigned char zeros96[96] = { 0 };
unsigned char tweak[32];
int i;
int ecount = 0;
set_counting_callbacks(CTX, &ecount);
CHECK(sizeof(zeros96) == sizeof(keypair));
secp256k1_testrand256(sk);
@ -462,14 +387,10 @@ static void test_keypair_add(void) {
CHECK(secp256k1_keypair_create(CTX, &keypair, sk) == 1);
CHECK(secp256k1_keypair_xonly_tweak_add(CTX, &keypair, tweak) == 1);
CHECK(ecount == 0);
CHECK(secp256k1_keypair_xonly_tweak_add(CTX, &keypair, tweak) == 1);
CHECK(ecount == 0);
CHECK(secp256k1_keypair_xonly_tweak_add(CTX, &keypair, tweak) == 1);
CHECK(secp256k1_keypair_xonly_tweak_add(CTX, NULL, tweak) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_keypair_xonly_tweak_add(CTX, &keypair, NULL) == 0);
CHECK(ecount == 2);
CHECK_ILLEGAL(CTX, secp256k1_keypair_xonly_tweak_add(CTX, NULL, tweak));
CHECK_ILLEGAL(CTX, secp256k1_keypair_xonly_tweak_add(CTX, &keypair, NULL));
/* This does not set the keypair to zeroes */
CHECK(secp256k1_memcmp_var(&keypair, zeros96, sizeof(keypair)) != 0);
@ -503,20 +424,16 @@ static void test_keypair_add(void) {
/* Invalid keypair with a valid tweak */
memset(&keypair, 0, sizeof(keypair));
secp256k1_testrand256(tweak);
ecount = 0;
CHECK(secp256k1_keypair_xonly_tweak_add(CTX, &keypair, tweak) == 0);
CHECK(ecount == 1);
CHECK_ILLEGAL(CTX, secp256k1_keypair_xonly_tweak_add(CTX, &keypair, tweak));
CHECK(secp256k1_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0);
/* Only seckey part of keypair invalid */
CHECK(secp256k1_keypair_create(CTX, &keypair, sk) == 1);
memset(&keypair, 0, 32);
CHECK(secp256k1_keypair_xonly_tweak_add(CTX, &keypair, tweak) == 0);
CHECK(ecount == 2);
CHECK_ILLEGAL(CTX, secp256k1_keypair_xonly_tweak_add(CTX, &keypair, tweak));
/* Only pubkey part of keypair invalid */
CHECK(secp256k1_keypair_create(CTX, &keypair, sk) == 1);
memset(&keypair.data[32], 0, 64);
CHECK(secp256k1_keypair_xonly_tweak_add(CTX, &keypair, tweak) == 0);
CHECK(ecount == 3);
CHECK_ILLEGAL(CTX, secp256k1_keypair_xonly_tweak_add(CTX, &keypair, tweak));
/* Check that the keypair_tweak_add implementation is correct */
CHECK(secp256k1_keypair_create(CTX, &keypair, sk) == 1);

View File

@ -36,7 +36,6 @@ static void test_ecdsa_recovery_api(void) {
secp256k1_ecdsa_recoverable_signature recsig;
unsigned char privkey[32] = { 1 };
unsigned char message[32] = { 2 };
int32_t ecount = 0;
int recid = 0;
unsigned char sig[74];
unsigned char zero_privkey[32] = { 0 };
@ -45,86 +44,52 @@ static void test_ecdsa_recovery_api(void) {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
secp256k1_context_set_error_callback(CTX, counting_illegal_callback_fn, &ecount);
secp256k1_context_set_illegal_callback(CTX, counting_illegal_callback_fn, &ecount);
secp256k1_context_set_error_callback(STATIC_CTX, counting_illegal_callback_fn, &ecount);
secp256k1_context_set_illegal_callback(STATIC_CTX, counting_illegal_callback_fn, &ecount);
/* Construct and verify corresponding public key. */
CHECK(secp256k1_ec_seckey_verify(CTX, privkey) == 1);
CHECK(secp256k1_ec_pubkey_create(CTX, &pubkey, privkey) == 1);
/* Check bad contexts and NULLs for signing */
ecount = 0;
CHECK(secp256k1_ecdsa_sign_recoverable(CTX, &recsig, message, privkey, NULL, NULL) == 1);
CHECK(ecount == 0);
CHECK(secp256k1_ecdsa_sign_recoverable(CTX, NULL, message, privkey, NULL, NULL) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_ecdsa_sign_recoverable(CTX, &recsig, NULL, privkey, NULL, NULL) == 0);
CHECK(ecount == 2);
CHECK(secp256k1_ecdsa_sign_recoverable(CTX, &recsig, message, NULL, NULL, NULL) == 0);
CHECK(ecount == 3);
CHECK(secp256k1_ecdsa_sign_recoverable(STATIC_CTX, &recsig, message, privkey, NULL, NULL) == 0);
CHECK(ecount == 4);
CHECK_ILLEGAL(CTX, secp256k1_ecdsa_sign_recoverable(CTX, NULL, message, privkey, NULL, NULL));
CHECK_ILLEGAL(CTX, secp256k1_ecdsa_sign_recoverable(CTX, &recsig, NULL, privkey, NULL, NULL));
CHECK_ILLEGAL(CTX, secp256k1_ecdsa_sign_recoverable(CTX, &recsig, message, NULL, NULL, NULL));
CHECK_ILLEGAL(STATIC_CTX, secp256k1_ecdsa_sign_recoverable(STATIC_CTX, &recsig, message, privkey, NULL, NULL));
/* This will fail or succeed randomly, and in either case will not ARG_CHECK failure */
secp256k1_ecdsa_sign_recoverable(CTX, &recsig, message, privkey, recovery_test_nonce_function, NULL);
CHECK(ecount == 4);
/* These will all fail, but not in ARG_CHECK way */
CHECK(secp256k1_ecdsa_sign_recoverable(CTX, &recsig, message, zero_privkey, NULL, NULL) == 0);
CHECK(secp256k1_ecdsa_sign_recoverable(CTX, &recsig, message, over_privkey, NULL, NULL) == 0);
/* This one will succeed. */
CHECK(secp256k1_ecdsa_sign_recoverable(CTX, &recsig, message, privkey, NULL, NULL) == 1);
CHECK(ecount == 4);
/* Check signing with a goofy nonce function */
/* Check bad contexts and NULLs for recovery */
ecount = 0;
CHECK(secp256k1_ecdsa_recover(CTX, &recpubkey, &recsig, message) == 1);
CHECK(ecount == 0);
CHECK(secp256k1_ecdsa_recover(CTX, NULL, &recsig, message) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_ecdsa_recover(CTX, &recpubkey, NULL, message) == 0);
CHECK(ecount == 2);
CHECK(secp256k1_ecdsa_recover(CTX, &recpubkey, &recsig, NULL) == 0);
CHECK(ecount == 3);
CHECK_ILLEGAL(CTX, secp256k1_ecdsa_recover(CTX, NULL, &recsig, message));
CHECK_ILLEGAL(CTX, secp256k1_ecdsa_recover(CTX, &recpubkey, NULL, message));
CHECK_ILLEGAL(CTX, secp256k1_ecdsa_recover(CTX, &recpubkey, &recsig, NULL));
/* Check NULLs for conversion */
CHECK(secp256k1_ecdsa_sign(CTX, &normal_sig, message, privkey, NULL, NULL) == 1);
ecount = 0;
CHECK(secp256k1_ecdsa_recoverable_signature_convert(CTX, NULL, &recsig) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_ecdsa_recoverable_signature_convert(CTX, &normal_sig, NULL) == 0);
CHECK(ecount == 2);
CHECK_ILLEGAL(CTX, secp256k1_ecdsa_recoverable_signature_convert(CTX, NULL, &recsig));
CHECK_ILLEGAL(CTX, secp256k1_ecdsa_recoverable_signature_convert(CTX, &normal_sig, NULL));
CHECK(secp256k1_ecdsa_recoverable_signature_convert(CTX, &normal_sig, &recsig) == 1);
/* Check NULLs for de/serialization */
CHECK(secp256k1_ecdsa_sign_recoverable(CTX, &recsig, message, privkey, NULL, NULL) == 1);
ecount = 0;
CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(CTX, NULL, &recid, &recsig) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(CTX, sig, NULL, &recsig) == 0);
CHECK(ecount == 2);
CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(CTX, sig, &recid, NULL) == 0);
CHECK(ecount == 3);
CHECK_ILLEGAL(CTX, secp256k1_ecdsa_recoverable_signature_serialize_compact(CTX, NULL, &recid, &recsig));
CHECK_ILLEGAL(CTX, secp256k1_ecdsa_recoverable_signature_serialize_compact(CTX, sig, NULL, &recsig));
CHECK_ILLEGAL(CTX, secp256k1_ecdsa_recoverable_signature_serialize_compact(CTX, sig, &recid, NULL));
CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(CTX, sig, &recid, &recsig) == 1);
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(CTX, NULL, sig, recid) == 0);
CHECK(ecount == 4);
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(CTX, &recsig, NULL, recid) == 0);
CHECK(ecount == 5);
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(CTX, &recsig, sig, -1) == 0);
CHECK(ecount == 6);
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(CTX, &recsig, sig, 5) == 0);
CHECK(ecount == 7);
/* overflow in signature will fail but not affect ecount */
CHECK_ILLEGAL(CTX, secp256k1_ecdsa_recoverable_signature_parse_compact(CTX, NULL, sig, recid));
CHECK_ILLEGAL(CTX, secp256k1_ecdsa_recoverable_signature_parse_compact(CTX, &recsig, NULL, recid));
CHECK_ILLEGAL(CTX, secp256k1_ecdsa_recoverable_signature_parse_compact(CTX, &recsig, sig, -1));
CHECK_ILLEGAL(CTX, secp256k1_ecdsa_recoverable_signature_parse_compact(CTX, &recsig, sig, 5));
/* overflow in signature will not result in calling illegal_callback */
memcpy(sig, over_privkey, 32);
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(CTX, &recsig, sig, recid) == 0);
CHECK(ecount == 7);
/* cleanup */
secp256k1_context_set_error_callback(STATIC_CTX, NULL, NULL);
secp256k1_context_set_illegal_callback(STATIC_CTX, NULL, NULL);
}
static void test_ecdsa_recovery_end_to_end(void) {

View File

@ -116,14 +116,6 @@ static void test_schnorrsig_api(void) {
secp256k1_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT;
secp256k1_schnorrsig_extraparams invalid_extraparams = {{ 0 }, NULL, NULL};
/** setup **/
int ecount = 0;
secp256k1_context_set_error_callback(CTX, counting_illegal_callback_fn, &ecount);
secp256k1_context_set_illegal_callback(CTX, counting_illegal_callback_fn, &ecount);
secp256k1_context_set_error_callback(STATIC_CTX, counting_illegal_callback_fn, &ecount);
secp256k1_context_set_illegal_callback(STATIC_CTX, counting_illegal_callback_fn, &ecount);
secp256k1_testrand256(sk1);
secp256k1_testrand256(sk2);
secp256k1_testrand256(sk3);
@ -137,57 +129,30 @@ static void test_schnorrsig_api(void) {
memset(&zero_pk, 0, sizeof(zero_pk));
/** main test body **/
ecount = 0;
CHECK(secp256k1_schnorrsig_sign32(CTX, sig, msg, &keypairs[0], NULL) == 1);
CHECK(ecount == 0);
CHECK(secp256k1_schnorrsig_sign32(CTX, NULL, msg, &keypairs[0], NULL) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_schnorrsig_sign32(CTX, sig, NULL, &keypairs[0], NULL) == 0);
CHECK(ecount == 2);
CHECK(secp256k1_schnorrsig_sign32(CTX, sig, msg, NULL, NULL) == 0);
CHECK(ecount == 3);
CHECK(secp256k1_schnorrsig_sign32(CTX, sig, msg, &invalid_keypair, NULL) == 0);
CHECK(ecount == 4);
CHECK(secp256k1_schnorrsig_sign32(STATIC_CTX, sig, msg, &keypairs[0], NULL) == 0);
CHECK(ecount == 5);
CHECK_ILLEGAL(CTX, secp256k1_schnorrsig_sign32(CTX, NULL, msg, &keypairs[0], NULL));
CHECK_ILLEGAL(CTX, secp256k1_schnorrsig_sign32(CTX, sig, NULL, &keypairs[0], NULL));
CHECK_ILLEGAL(CTX, secp256k1_schnorrsig_sign32(CTX, sig, msg, NULL, NULL));
CHECK_ILLEGAL(CTX, secp256k1_schnorrsig_sign32(CTX, sig, msg, &invalid_keypair, NULL));
CHECK_ILLEGAL(STATIC_CTX, secp256k1_schnorrsig_sign32(STATIC_CTX, sig, msg, &keypairs[0], NULL));
ecount = 0;
CHECK(secp256k1_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &keypairs[0], &extraparams) == 1);
CHECK(ecount == 0);
CHECK(secp256k1_schnorrsig_sign_custom(CTX, NULL, msg, sizeof(msg), &keypairs[0], &extraparams) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_schnorrsig_sign_custom(CTX, sig, NULL, sizeof(msg), &keypairs[0], &extraparams) == 0);
CHECK(ecount == 2);
CHECK_ILLEGAL(CTX, secp256k1_schnorrsig_sign_custom(CTX, NULL, msg, sizeof(msg), &keypairs[0], &extraparams));
CHECK_ILLEGAL(CTX, secp256k1_schnorrsig_sign_custom(CTX, sig, NULL, sizeof(msg), &keypairs[0], &extraparams));
CHECK(secp256k1_schnorrsig_sign_custom(CTX, sig, NULL, 0, &keypairs[0], &extraparams) == 1);
CHECK(ecount == 2);
CHECK(secp256k1_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), NULL, &extraparams) == 0);
CHECK(ecount == 3);
CHECK(secp256k1_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &invalid_keypair, &extraparams) == 0);
CHECK(ecount == 4);
CHECK_ILLEGAL(CTX, secp256k1_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), NULL, &extraparams));
CHECK_ILLEGAL(CTX, secp256k1_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &invalid_keypair, &extraparams));
CHECK(secp256k1_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &keypairs[0], NULL) == 1);
CHECK(ecount == 4);
CHECK(secp256k1_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &keypairs[0], &invalid_extraparams) == 0);
CHECK(ecount == 5);
CHECK(secp256k1_schnorrsig_sign_custom(STATIC_CTX, sig, msg, sizeof(msg), &keypairs[0], &extraparams) == 0);
CHECK(ecount == 6);
CHECK_ILLEGAL(CTX, secp256k1_schnorrsig_sign_custom(CTX, sig, msg, sizeof(msg), &keypairs[0], &invalid_extraparams));
CHECK_ILLEGAL(STATIC_CTX, secp256k1_schnorrsig_sign_custom(STATIC_CTX, sig, msg, sizeof(msg), &keypairs[0], &extraparams));
ecount = 0;
CHECK(secp256k1_schnorrsig_sign32(CTX, sig, msg, &keypairs[0], NULL) == 1);
CHECK(secp256k1_schnorrsig_verify(CTX, sig, msg, sizeof(msg), &pk[0]) == 1);
CHECK(ecount == 0);
CHECK(secp256k1_schnorrsig_verify(CTX, NULL, msg, sizeof(msg), &pk[0]) == 0);
CHECK(ecount == 1);
CHECK(secp256k1_schnorrsig_verify(CTX, sig, NULL, sizeof(msg), &pk[0]) == 0);
CHECK(ecount == 2);
CHECK_ILLEGAL(CTX, secp256k1_schnorrsig_verify(CTX, NULL, msg, sizeof(msg), &pk[0]));
CHECK_ILLEGAL(CTX, secp256k1_schnorrsig_verify(CTX, sig, NULL, sizeof(msg), &pk[0]));
CHECK(secp256k1_schnorrsig_verify(CTX, sig, NULL, 0, &pk[0]) == 0);
CHECK(ecount == 2);
CHECK(secp256k1_schnorrsig_verify(CTX, sig, msg, sizeof(msg), NULL) == 0);
CHECK(ecount == 3);
CHECK(secp256k1_schnorrsig_verify(CTX, sig, msg, sizeof(msg), &zero_pk) == 0);
CHECK(ecount == 4);
secp256k1_context_set_error_callback(STATIC_CTX, NULL, NULL);
secp256k1_context_set_illegal_callback(STATIC_CTX, NULL, NULL);
CHECK_ILLEGAL(CTX, secp256k1_schnorrsig_verify(CTX, sig, msg, sizeof(msg), NULL));
CHECK_ILLEGAL(CTX, secp256k1_schnorrsig_verify(CTX, sig, msg, sizeof(msg), &zero_pk));
}
/* Checks that hash initialized by secp256k1_schnorrsig_sha256_tagged has the

View File

@ -25,7 +25,7 @@ static void secp256k1_scalar_clear(secp256k1_scalar *r);
/** Access bits from a scalar. All requested bits must belong to the same 32-bit limb. */
static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count);
/** Access bits from a scalar. Not constant time. */
/** Access bits from a scalar. Not constant time in offset and count. */
static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count);
/** Set a scalar from a big endian byte array. The scalar will be reduced modulo group order `n`.
@ -54,10 +54,6 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int
/** Multiply two scalars (modulo the group order). */
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b);
/** Shift a scalar right by some amount strictly between 0 and 16, returning
* the low bits that were shifted off */
static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n);
/** Compute the inverse of a scalar (modulo the group order). */
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *a);
@ -67,6 +63,9 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc
/** Compute the complement of a scalar (modulo the group order). */
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a);
/** Multiply a scalar with the multiplicative inverse of 2. */
static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a);
/** Check whether a scalar equals zero. */
static int secp256k1_scalar_is_zero(const secp256k1_scalar *a);
@ -101,5 +100,6 @@ static void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a
/** Check invariants on a scalar (no-op unless VERIFY is enabled). */
static void secp256k1_scalar_verify(const secp256k1_scalar *r);
#define SECP256K1_SCALAR_VERIFY(r) secp256k1_scalar_verify(r)
#endif /* SECP256K1_SCALAR_H */

View File

@ -42,18 +42,18 @@ SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsig
r->d[2] = 0;
r->d[3] = 0;
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
}
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6);
return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1);
}
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
VERIFY_CHECK(count < 32);
VERIFY_CHECK(offset + count <= 256);
@ -93,15 +93,15 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigne
secp256k1_u128_accum_u64(&t, r->d[3]);
r->d[3] = secp256k1_u128_to_u64(&t);
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
return overflow;
}
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
int overflow;
secp256k1_uint128 t;
secp256k1_scalar_verify(a);
secp256k1_scalar_verify(b);
SECP256K1_SCALAR_VERIFY(a);
SECP256K1_SCALAR_VERIFY(b);
secp256k1_u128_from_u64(&t, a->d[0]);
secp256k1_u128_accum_u64(&t, b->d[0]);
@ -119,14 +119,14 @@ static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a,
VERIFY_CHECK(overflow == 0 || overflow == 1);
secp256k1_scalar_reduce(r, overflow);
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
return overflow;
}
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
secp256k1_uint128 t;
volatile int vflag = flag;
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
VERIFY_CHECK(bit < 256);
bit += ((uint32_t) vflag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */
@ -143,10 +143,8 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int
secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F));
r->d[3] = secp256k1_u128_to_u64(&t);
secp256k1_scalar_verify(r);
#ifdef VERIFY
SECP256K1_SCALAR_VERIFY(r);
VERIFY_CHECK(secp256k1_u128_hi_u64(&t) == 0);
#endif
}
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
@ -160,11 +158,11 @@ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b
*overflow = over;
}
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
}
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
secp256k1_write_be64(&bin[0], a->d[3]);
secp256k1_write_be64(&bin[8], a->d[2]);
@ -173,7 +171,7 @@ static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar*
}
SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0;
}
@ -181,7 +179,7 @@ SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a)
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0);
secp256k1_uint128 t;
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
secp256k1_u128_from_u64(&t, ~a->d[0]);
secp256k1_u128_accum_u64(&t, SECP256K1_N_0 + 1);
@ -196,11 +194,52 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar
secp256k1_u128_accum_u64(&t, SECP256K1_N_3);
r->d[3] = secp256k1_u128_to_u64(&t) & nonzero;
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
}
static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a) {
/* Writing `/` for field division and `//` for integer division, we compute
*
* a/2 = (a - (a&1))/2 + (a&1)/2
* = (a >> 1) + (a&1 ? 1/2 : 0)
* = (a >> 1) + (a&1 ? n//2+1 : 0),
*
* where n is the group order and in the last equality we have used 1/2 = n//2+1 (mod n).
* For n//2, we have the constants SECP256K1_N_H_0, ...
*
* This sum does not overflow. The most extreme case is a = -2, the largest odd scalar. Here:
* - the left summand is: a >> 1 = (a - a&1)/2 = (n-2-1)//2 = (n-3)//2
* - the right summand is: a&1 ? n//2+1 : 0 = n//2+1 = (n-1)//2 + 2//2 = (n+1)//2
* Together they sum to (n-3)//2 + (n+1)//2 = (2n-2)//2 = n - 1, which is less than n.
*/
uint64_t mask = -(uint64_t)(a->d[0] & 1U);
secp256k1_uint128 t;
SECP256K1_SCALAR_VERIFY(a);
secp256k1_u128_from_u64(&t, (a->d[0] >> 1) | (a->d[1] << 63));
secp256k1_u128_accum_u64(&t, (SECP256K1_N_H_0 + 1U) & mask);
r->d[0] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
secp256k1_u128_accum_u64(&t, (a->d[1] >> 1) | (a->d[2] << 63));
secp256k1_u128_accum_u64(&t, SECP256K1_N_H_1 & mask);
r->d[1] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
secp256k1_u128_accum_u64(&t, (a->d[2] >> 1) | (a->d[3] << 63));
secp256k1_u128_accum_u64(&t, SECP256K1_N_H_2 & mask);
r->d[2] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
r->d[3] = secp256k1_u128_to_u64(&t) + (a->d[3] >> 1) + (SECP256K1_N_H_3 & mask);
#ifdef VERIFY
/* The line above only computed the bottom 64 bits of r->d[3]; redo the computation
* in full 128 bits to make sure the top 64 bits are indeed zero. */
secp256k1_u128_accum_u64(&t, a->d[3] >> 1);
secp256k1_u128_accum_u64(&t, SECP256K1_N_H_3 & mask);
secp256k1_u128_rshift(&t, 64);
VERIFY_CHECK(secp256k1_u128_to_u64(&t) == 0);
SECP256K1_SCALAR_VERIFY(r);
#endif
}
SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0;
}
@ -208,7 +247,7 @@ SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
int yes = 0;
int no = 0;
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
no |= (a->d[3] < SECP256K1_N_H_3);
yes |= (a->d[3] > SECP256K1_N_H_3) & ~no;
@ -226,7 +265,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
uint64_t mask = -vflag;
uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1;
secp256k1_uint128 t;
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
secp256k1_u128_from_u64(&t, r->d[0] ^ mask);
secp256k1_u128_accum_u64(&t, (SECP256K1_N_0 + 1) & mask);
@ -241,7 +280,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
secp256k1_u128_accum_u64(&t, SECP256K1_N_3 & mask);
r->d[3] = secp256k1_u128_to_u64(&t) & nonzero;
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
return 2 * (mask == 0) - 1;
}
@ -800,33 +839,17 @@ static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, c
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
uint64_t l[8];
secp256k1_scalar_verify(a);
secp256k1_scalar_verify(b);
SECP256K1_SCALAR_VERIFY(a);
SECP256K1_SCALAR_VERIFY(b);
secp256k1_scalar_mul_512(l, a, b);
secp256k1_scalar_reduce_512(r, l);
secp256k1_scalar_verify(r);
}
static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
int ret;
secp256k1_scalar_verify(r);
VERIFY_CHECK(n > 0);
VERIFY_CHECK(n < 16);
ret = r->d[0] & ((1 << n) - 1);
r->d[0] = (r->d[0] >> n) + (r->d[1] << (64 - n));
r->d[1] = (r->d[1] >> n) + (r->d[2] << (64 - n));
r->d[2] = (r->d[2] >> n) + (r->d[3] << (64 - n));
r->d[3] = (r->d[3] >> n);
secp256k1_scalar_verify(r);
return ret;
SECP256K1_SCALAR_VERIFY(r);
}
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
secp256k1_scalar_verify(k);
SECP256K1_SCALAR_VERIFY(k);
r1->d[0] = k->d[0];
r1->d[1] = k->d[1];
@ -837,13 +860,13 @@ static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r
r2->d[2] = 0;
r2->d[3] = 0;
secp256k1_scalar_verify(r1);
secp256k1_scalar_verify(r2);
SECP256K1_SCALAR_VERIFY(r1);
SECP256K1_SCALAR_VERIFY(r2);
}
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
secp256k1_scalar_verify(a);
secp256k1_scalar_verify(b);
SECP256K1_SCALAR_VERIFY(a);
SECP256K1_SCALAR_VERIFY(b);
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
}
@ -853,8 +876,8 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
unsigned int shiftlimbs;
unsigned int shiftlow;
unsigned int shifthigh;
secp256k1_scalar_verify(a);
secp256k1_scalar_verify(b);
SECP256K1_SCALAR_VERIFY(a);
SECP256K1_SCALAR_VERIFY(b);
VERIFY_CHECK(shift >= 256);
secp256k1_scalar_mul_512(l, a, b);
@ -867,13 +890,13 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
}
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
uint64_t mask0, mask1;
volatile int vflag = flag;
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d));
mask0 = vflag + ~((uint64_t)0);
@ -883,7 +906,7 @@ static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const se
r->d[2] = (r->d[2] & mask0) | (a->d[2] & mask1);
r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1);
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
}
static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_modinv64_signed62 *a) {
@ -903,13 +926,13 @@ static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_
r->d[2] = a2 >> 4 | a3 << 58;
r->d[3] = a3 >> 6 | a4 << 56;
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
}
static void secp256k1_scalar_to_signed62(secp256k1_modinv64_signed62 *r, const secp256k1_scalar *a) {
const uint64_t M62 = UINT64_MAX >> 2;
const uint64_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3];
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
r->v[0] = a0 & M62;
r->v[1] = (a0 >> 62 | a1 << 2) & M62;
@ -928,16 +951,14 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar
#ifdef VERIFY
int zero_in = secp256k1_scalar_is_zero(x);
#endif
secp256k1_scalar_verify(x);
SECP256K1_SCALAR_VERIFY(x);
secp256k1_scalar_to_signed62(&s, x);
secp256k1_modinv64(&s, &secp256k1_const_modinfo_scalar);
secp256k1_scalar_from_signed62(r, &s);
secp256k1_scalar_verify(r);
#ifdef VERIFY
SECP256K1_SCALAR_VERIFY(r);
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
#endif
}
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
@ -945,20 +966,18 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc
#ifdef VERIFY
int zero_in = secp256k1_scalar_is_zero(x);
#endif
secp256k1_scalar_verify(x);
SECP256K1_SCALAR_VERIFY(x);
secp256k1_scalar_to_signed62(&s, x);
secp256k1_modinv64_var(&s, &secp256k1_const_modinfo_scalar);
secp256k1_scalar_from_signed62(r, &s);
secp256k1_scalar_verify(r);
#ifdef VERIFY
SECP256K1_SCALAR_VERIFY(r);
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
#endif
}
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
return !(a->d[0] & 1);
}

View File

@ -59,18 +59,18 @@ SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsig
r->d[6] = 0;
r->d[7] = 0;
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
}
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
VERIFY_CHECK((offset + count - 1) >> 5 == offset >> 5);
return (a->d[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1);
}
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
VERIFY_CHECK(count < 32);
VERIFY_CHECK(offset + count <= 256);
@ -121,15 +121,15 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, uint32_
t += (uint64_t)r->d[7];
r->d[7] = t & 0xFFFFFFFFUL;
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
return overflow;
}
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
int overflow;
uint64_t t = (uint64_t)a->d[0] + b->d[0];
secp256k1_scalar_verify(a);
secp256k1_scalar_verify(b);
SECP256K1_SCALAR_VERIFY(a);
SECP256K1_SCALAR_VERIFY(b);
r->d[0] = t & 0xFFFFFFFFULL; t >>= 32;
t += (uint64_t)a->d[1] + b->d[1];
@ -150,14 +150,14 @@ static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a,
VERIFY_CHECK(overflow == 0 || overflow == 1);
secp256k1_scalar_reduce(r, overflow);
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
return overflow;
}
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
uint64_t t;
volatile int vflag = flag;
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
VERIFY_CHECK(bit < 256);
bit += ((uint32_t) vflag - 1) & 0x100; /* forcing (bit >> 5) > 7 makes this a noop */
@ -178,10 +178,8 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int
t += (uint64_t)r->d[7] + (((uint32_t)((bit >> 5) == 7)) << (bit & 0x1F));
r->d[7] = t & 0xFFFFFFFFULL;
secp256k1_scalar_verify(r);
#ifdef VERIFY
SECP256K1_SCALAR_VERIFY(r);
VERIFY_CHECK((t >> 32) == 0);
#endif
}
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
@ -199,11 +197,11 @@ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b
*overflow = over;
}
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
}
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
secp256k1_write_be32(&bin[0], a->d[7]);
secp256k1_write_be32(&bin[4], a->d[6]);
@ -216,7 +214,7 @@ static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar*
}
SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
return (a->d[0] | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0;
}
@ -224,7 +222,7 @@ SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a)
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(a) == 0);
uint64_t t = (uint64_t)(~a->d[0]) + SECP256K1_N_0 + 1;
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
r->d[0] = t & nonzero; t >>= 32;
t += (uint64_t)(~a->d[1]) + SECP256K1_N_1;
@ -242,11 +240,59 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar
t += (uint64_t)(~a->d[7]) + SECP256K1_N_7;
r->d[7] = t & nonzero;
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
}
static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a) {
/* Writing `/` for field division and `//` for integer division, we compute
*
* a/2 = (a - (a&1))/2 + (a&1)/2
* = (a >> 1) + (a&1 ? 1/2 : 0)
* = (a >> 1) + (a&1 ? n//2+1 : 0),
*
* where n is the group order and in the last equality we have used 1/2 = n//2+1 (mod n).
* For n//2, we have the constants SECP256K1_N_H_0, ...
*
* This sum does not overflow. The most extreme case is a = -2, the largest odd scalar. Here:
* - the left summand is: a >> 1 = (a - a&1)/2 = (n-2-1)//2 = (n-3)//2
* - the right summand is: a&1 ? n//2+1 : 0 = n//2+1 = (n-1)//2 + 2//2 = (n+1)//2
* Together they sum to (n-3)//2 + (n+1)//2 = (2n-2)//2 = n - 1, which is less than n.
*/
uint32_t mask = -(uint32_t)(a->d[0] & 1U);
uint64_t t = (uint32_t)((a->d[0] >> 1) | (a->d[1] << 31));
SECP256K1_SCALAR_VERIFY(a);
t += (SECP256K1_N_H_0 + 1U) & mask;
r->d[0] = t; t >>= 32;
t += (uint32_t)((a->d[1] >> 1) | (a->d[2] << 31));
t += SECP256K1_N_H_1 & mask;
r->d[1] = t; t >>= 32;
t += (uint32_t)((a->d[2] >> 1) | (a->d[3] << 31));
t += SECP256K1_N_H_2 & mask;
r->d[2] = t; t >>= 32;
t += (uint32_t)((a->d[3] >> 1) | (a->d[4] << 31));
t += SECP256K1_N_H_3 & mask;
r->d[3] = t; t >>= 32;
t += (uint32_t)((a->d[4] >> 1) | (a->d[5] << 31));
t += SECP256K1_N_H_4 & mask;
r->d[4] = t; t >>= 32;
t += (uint32_t)((a->d[5] >> 1) | (a->d[6] << 31));
t += SECP256K1_N_H_5 & mask;
r->d[5] = t; t >>= 32;
t += (uint32_t)((a->d[6] >> 1) | (a->d[7] << 31));
t += SECP256K1_N_H_6 & mask;
r->d[6] = t; t >>= 32;
r->d[7] = (uint32_t)t + (uint32_t)(a->d[7] >> 1) + (SECP256K1_N_H_7 & mask);
/* The line above only computed the bottom 32 bits of r->d[7]. Redo the computation
* in full 64 bits to make sure the top 32 bits are indeed zero. */
VERIFY_CHECK((t + (a->d[7] >> 1) + (SECP256K1_N_H_7 & mask)) >> 32 == 0);
SECP256K1_SCALAR_VERIFY(r);
}
SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0;
}
@ -254,7 +300,7 @@ SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
int yes = 0;
int no = 0;
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
no |= (a->d[7] < SECP256K1_N_H_7);
yes |= (a->d[7] > SECP256K1_N_H_7) & ~no;
@ -278,7 +324,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
uint32_t mask = -vflag;
uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(r) == 0);
uint64_t t = (uint64_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask);
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
r->d[0] = t & nonzero; t >>= 32;
t += (uint64_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask);
@ -296,7 +342,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
t += (uint64_t)(r->d[7] ^ mask) + (SECP256K1_N_7 & mask);
r->d[7] = t & nonzero;
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
return 2 * (mask == 0) - 1;
}
@ -604,37 +650,17 @@ static void secp256k1_scalar_mul_512(uint32_t *l, const secp256k1_scalar *a, con
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
uint32_t l[16];
secp256k1_scalar_verify(a);
secp256k1_scalar_verify(b);
SECP256K1_SCALAR_VERIFY(a);
SECP256K1_SCALAR_VERIFY(b);
secp256k1_scalar_mul_512(l, a, b);
secp256k1_scalar_reduce_512(r, l);
secp256k1_scalar_verify(r);
}
static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
int ret;
secp256k1_scalar_verify(r);
VERIFY_CHECK(n > 0);
VERIFY_CHECK(n < 16);
ret = r->d[0] & ((1 << n) - 1);
r->d[0] = (r->d[0] >> n) + (r->d[1] << (32 - n));
r->d[1] = (r->d[1] >> n) + (r->d[2] << (32 - n));
r->d[2] = (r->d[2] >> n) + (r->d[3] << (32 - n));
r->d[3] = (r->d[3] >> n) + (r->d[4] << (32 - n));
r->d[4] = (r->d[4] >> n) + (r->d[5] << (32 - n));
r->d[5] = (r->d[5] >> n) + (r->d[6] << (32 - n));
r->d[6] = (r->d[6] >> n) + (r->d[7] << (32 - n));
r->d[7] = (r->d[7] >> n);
secp256k1_scalar_verify(r);
return ret;
SECP256K1_SCALAR_VERIFY(r);
}
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
secp256k1_scalar_verify(k);
SECP256K1_SCALAR_VERIFY(k);
r1->d[0] = k->d[0];
r1->d[1] = k->d[1];
@ -653,13 +679,13 @@ static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r
r2->d[6] = 0;
r2->d[7] = 0;
secp256k1_scalar_verify(r1);
secp256k1_scalar_verify(r2);
SECP256K1_SCALAR_VERIFY(r1);
SECP256K1_SCALAR_VERIFY(r2);
}
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
secp256k1_scalar_verify(a);
secp256k1_scalar_verify(b);
SECP256K1_SCALAR_VERIFY(a);
SECP256K1_SCALAR_VERIFY(b);
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0;
}
@ -669,8 +695,8 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
unsigned int shiftlimbs;
unsigned int shiftlow;
unsigned int shifthigh;
secp256k1_scalar_verify(a);
secp256k1_scalar_verify(b);
SECP256K1_SCALAR_VERIFY(a);
SECP256K1_SCALAR_VERIFY(b);
VERIFY_CHECK(shift >= 256);
secp256k1_scalar_mul_512(l, a, b);
@ -687,13 +713,13 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0;
secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1);
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
}
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
uint32_t mask0, mask1;
volatile int vflag = flag;
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d));
mask0 = vflag + ~((uint32_t)0);
@ -707,7 +733,7 @@ static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const se
r->d[6] = (r->d[6] & mask0) | (a->d[6] & mask1);
r->d[7] = (r->d[7] & mask0) | (a->d[7] & mask1);
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
}
static void secp256k1_scalar_from_signed30(secp256k1_scalar *r, const secp256k1_modinv32_signed30 *a) {
@ -736,14 +762,14 @@ static void secp256k1_scalar_from_signed30(secp256k1_scalar *r, const secp256k1_
r->d[6] = a6 >> 12 | a7 << 18;
r->d[7] = a7 >> 14 | a8 << 16;
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
}
static void secp256k1_scalar_to_signed30(secp256k1_modinv32_signed30 *r, const secp256k1_scalar *a) {
const uint32_t M30 = UINT32_MAX >> 2;
const uint32_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3],
a4 = a->d[4], a5 = a->d[5], a6 = a->d[6], a7 = a->d[7];
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
r->v[0] = a0 & M30;
r->v[1] = (a0 >> 30 | a1 << 2) & M30;
@ -766,16 +792,14 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar
#ifdef VERIFY
int zero_in = secp256k1_scalar_is_zero(x);
#endif
secp256k1_scalar_verify(x);
SECP256K1_SCALAR_VERIFY(x);
secp256k1_scalar_to_signed30(&s, x);
secp256k1_modinv32(&s, &secp256k1_const_modinfo_scalar);
secp256k1_scalar_from_signed30(r, &s);
secp256k1_scalar_verify(r);
#ifdef VERIFY
SECP256K1_SCALAR_VERIFY(r);
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
#endif
}
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
@ -783,20 +807,18 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc
#ifdef VERIFY
int zero_in = secp256k1_scalar_is_zero(x);
#endif
secp256k1_scalar_verify(x);
SECP256K1_SCALAR_VERIFY(x);
secp256k1_scalar_to_signed30(&s, x);
secp256k1_modinv32_var(&s, &secp256k1_const_modinfo_scalar);
secp256k1_scalar_from_signed30(r, &s);
secp256k1_scalar_verify(r);
#ifdef VERIFY
SECP256K1_SCALAR_VERIFY(r);
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
#endif
}
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
return !(a->d[0] & 1);
}

View File

@ -31,14 +31,12 @@ static int secp256k1_scalar_set_b32_seckey(secp256k1_scalar *r, const unsigned c
int overflow;
secp256k1_scalar_set_b32(r, bin, &overflow);
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
return (!overflow) & (!secp256k1_scalar_is_zero(r));
}
static void secp256k1_scalar_verify(const secp256k1_scalar *r) {
#ifdef VERIFY
VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
#endif
(void)r;
}
@ -63,7 +61,7 @@ static void secp256k1_scalar_verify(const secp256k1_scalar *r) {
* (arbitrarily) set r2 = k + 5 (mod n) and r1 = k - r2 * lambda (mod n).
*/
static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT r1, secp256k1_scalar * SECP256K1_RESTRICT r2, const secp256k1_scalar * SECP256K1_RESTRICT k) {
secp256k1_scalar_verify(k);
SECP256K1_SCALAR_VERIFY(k);
VERIFY_CHECK(r1 != k);
VERIFY_CHECK(r2 != k);
VERIFY_CHECK(r1 != r2);
@ -71,8 +69,8 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT
*r2 = (*k + 5) % EXHAUSTIVE_TEST_ORDER;
*r1 = (*k + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER;
secp256k1_scalar_verify(r1);
secp256k1_scalar_verify(r2);
SECP256K1_SCALAR_VERIFY(r1);
SECP256K1_SCALAR_VERIFY(r2);
}
#else
/**
@ -155,7 +153,7 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT
0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C4UL,
0x221208ACUL, 0x9DF506C6UL, 0x1571B4AEUL, 0x8AC47F71UL
);
secp256k1_scalar_verify(k);
SECP256K1_SCALAR_VERIFY(k);
VERIFY_CHECK(r1 != k);
VERIFY_CHECK(r2 != k);
VERIFY_CHECK(r1 != r2);
@ -170,8 +168,8 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT
secp256k1_scalar_negate(r1, r1);
secp256k1_scalar_add(r1, r1, k);
secp256k1_scalar_verify(r1);
secp256k1_scalar_verify(r2);
SECP256K1_SCALAR_VERIFY(r1);
SECP256K1_SCALAR_VERIFY(r2);
#ifdef VERIFY
secp256k1_scalar_split_lambda_verify(r1, r2, k);
#endif

View File

@ -1,5 +1,5 @@
/***********************************************************************
* Copyright (c) 2015 Andrew Poelstra *
* Copyright (c) 2015, 2022 Andrew Poelstra, Pieter Wuille *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
@ -12,6 +12,13 @@
/** A scalar modulo the group order of the secp256k1 curve. */
typedef uint32_t secp256k1_scalar;
#define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) (d0)
/* A compile-time constant equal to 2^32 (modulo order). */
#define SCALAR_2P32 ((0xffffffffUL % EXHAUSTIVE_TEST_ORDER) + 1U)
/* Compute a*2^32 + b (modulo order). */
#define SCALAR_HORNER(a, b) (((uint64_t)(a) * SCALAR_2P32 + (b)) % EXHAUSTIVE_TEST_ORDER)
/* Evaluates to the provided 256-bit constant reduced modulo order. */
#define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) SCALAR_HORNER(SCALAR_HORNER(SCALAR_HORNER(SCALAR_HORNER(SCALAR_HORNER(SCALAR_HORNER(SCALAR_HORNER((d7), (d6)), (d5)), (d4)), (d3)), (d2)), (d1)), (d0))
#endif /* SECP256K1_SCALAR_REPR_H */

View File

@ -14,7 +14,7 @@
#include <string.h>
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
return !(*a & 1);
}
@ -24,11 +24,11 @@ SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { *r =
SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) {
*r = v % EXHAUSTIVE_TEST_ORDER;
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
}
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
if (offset < 32)
return ((*a >> offset) & ((((uint32_t)1) << count) - 1));
@ -37,7 +37,7 @@ SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_s
}
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
return secp256k1_scalar_get_bits(a, offset, count);
}
@ -45,27 +45,25 @@ SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256
SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; }
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
secp256k1_scalar_verify(a);
secp256k1_scalar_verify(b);
SECP256K1_SCALAR_VERIFY(a);
SECP256K1_SCALAR_VERIFY(b);
*r = (*a + *b) % EXHAUSTIVE_TEST_ORDER;
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
return *r < *b;
}
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
if (flag && bit < 32)
*r += ((uint32_t)1 << bit);
secp256k1_scalar_verify(r);
#ifdef VERIFY
SECP256K1_SCALAR_VERIFY(r);
VERIFY_CHECK(bit < 32);
/* Verify that adding (1 << bit) will not overflow any in-range scalar *r by overflowing the underlying uint32_t. */
VERIFY_CHECK(((uint32_t)1 << bit) - 1 <= UINT32_MAX - EXHAUSTIVE_TEST_ORDER);
#endif
}
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
@ -81,24 +79,24 @@ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b
}
if (overflow) *overflow = over;
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
}
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
memset(bin, 0, 32);
bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a;
}
SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
return *a == 0;
}
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
if (*a == 0) {
*r = 0;
@ -106,65 +104,52 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar
*r = EXHAUSTIVE_TEST_ORDER - *a;
}
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
}
SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
return *a == 1;
}
static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
return *a > EXHAUSTIVE_TEST_ORDER / 2;
}
static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
if (flag) secp256k1_scalar_negate(r, r);
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
return flag ? -1 : 1;
}
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
secp256k1_scalar_verify(a);
secp256k1_scalar_verify(b);
SECP256K1_SCALAR_VERIFY(a);
SECP256K1_SCALAR_VERIFY(b);
*r = (*a * *b) % EXHAUSTIVE_TEST_ORDER;
secp256k1_scalar_verify(r);
}
static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
int ret;
secp256k1_scalar_verify(r);
VERIFY_CHECK(n > 0);
VERIFY_CHECK(n < 16);
ret = *r & ((1 << n) - 1);
*r >>= n;
secp256k1_scalar_verify(r);
return ret;
SECP256K1_SCALAR_VERIFY(r);
}
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
*r1 = *a;
*r2 = 0;
secp256k1_scalar_verify(r1);
secp256k1_scalar_verify(r2);
SECP256K1_SCALAR_VERIFY(r1);
SECP256K1_SCALAR_VERIFY(r2);
}
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
secp256k1_scalar_verify(a);
secp256k1_scalar_verify(b);
SECP256K1_SCALAR_VERIFY(a);
SECP256K1_SCALAR_VERIFY(b);
return *a == *b;
}
@ -172,37 +157,45 @@ SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
uint32_t mask0, mask1;
volatile int vflag = flag;
secp256k1_scalar_verify(a);
SECP256K1_SCALAR_VERIFY(a);
SECP256K1_CHECKMEM_CHECK_VERIFY(r, sizeof(*r));
mask0 = vflag + ~((uint32_t)0);
mask1 = ~mask0;
*r = (*r & mask0) | (*a & mask1);
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
}
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
int i;
*r = 0;
secp256k1_scalar_verify(x);
SECP256K1_SCALAR_VERIFY(x);
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++)
if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1)
*r = i;
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
/* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus
* have a composite group order; fix it in exhaustive_tests.c). */
VERIFY_CHECK(*r != 0);
}
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
secp256k1_scalar_verify(x);
SECP256K1_SCALAR_VERIFY(x);
secp256k1_scalar_inverse(r, x);
secp256k1_scalar_verify(r);
SECP256K1_SCALAR_VERIFY(r);
}
static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a) {
SECP256K1_SCALAR_VERIFY(a);
*r = (*a + ((-(uint32_t)(*a & 1)) & EXHAUSTIVE_TEST_ORDER)) >> 1;
SECP256K1_SCALAR_VERIFY(r);
}
#endif /* SECP256K1_SCALAR_REPR_IMPL_H */

File diff suppressed because it is too large Load Diff

View File

@ -28,61 +28,11 @@
#include "testrand_impl.h"
#include "ecmult_compute_table_impl.h"
#include "ecmult_gen_compute_table_impl.h"
#include "testutil.h"
#include "util.h"
static int count = 2;
/** stolen from tests.c */
static void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) {
CHECK(a->infinity == b->infinity);
if (a->infinity) {
return;
}
CHECK(secp256k1_fe_equal(&a->x, &b->x));
CHECK(secp256k1_fe_equal(&a->y, &b->y));
}
static void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) {
secp256k1_fe z2s;
secp256k1_fe u1, u2, s1, s2;
CHECK(a->infinity == b->infinity);
if (a->infinity) {
return;
}
/* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */
secp256k1_fe_sqr(&z2s, &b->z);
secp256k1_fe_mul(&u1, &a->x, &z2s);
u2 = b->x;
secp256k1_fe_mul(&s1, &a->y, &z2s); secp256k1_fe_mul(&s1, &s1, &b->z);
s2 = b->y;
CHECK(secp256k1_fe_equal(&u1, &u2));
CHECK(secp256k1_fe_equal(&s1, &s2));
}
static void random_fe(secp256k1_fe *x) {
unsigned char bin[32];
do {
secp256k1_testrand256(bin);
if (secp256k1_fe_set_b32_limit(x, bin)) {
return;
}
} while(1);
}
static void random_fe_non_zero(secp256k1_fe *nz) {
int tries = 10;
while (--tries >= 0) {
random_fe(nz);
secp256k1_fe_normalize(nz);
if (!secp256k1_fe_is_zero(nz)) {
break;
}
}
/* Infinitesimal probability of spurious failure here */
CHECK(tries >= 0);
}
/** END stolen from tests.c */
static uint32_t num_cores = 1;
static uint32_t this_core = 0;
@ -117,7 +67,7 @@ static void test_exhaustive_endomorphism(const secp256k1_ge *group) {
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
secp256k1_ge res;
secp256k1_ge_mul_lambda(&res, &group[i]);
ge_equals_ge(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res);
CHECK(secp256k1_ge_eq_var(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res));
}
}
@ -143,21 +93,21 @@ static void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_
secp256k1_gej tmp;
/* add_var */
secp256k1_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL);
ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
CHECK(secp256k1_gej_eq_ge_var(&tmp, &group[(i + j) % EXHAUSTIVE_TEST_ORDER]));
/* add_ge */
if (j > 0) {
secp256k1_gej_add_ge(&tmp, &groupj[i], &group[j]);
ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
CHECK(secp256k1_gej_eq_ge_var(&tmp, &group[(i + j) % EXHAUSTIVE_TEST_ORDER]));
}
/* add_ge_var */
secp256k1_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL);
ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
CHECK(secp256k1_gej_eq_ge_var(&tmp, &group[(i + j) % EXHAUSTIVE_TEST_ORDER]));
/* add_zinv_var */
zless_gej.infinity = groupj[j].infinity;
zless_gej.x = groupj[j].x;
zless_gej.y = groupj[j].y;
secp256k1_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv);
ge_equals_gej(&group[(i + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
CHECK(secp256k1_gej_eq_ge_var(&tmp, &group[(i + j) % EXHAUSTIVE_TEST_ORDER]));
}
}
@ -165,9 +115,9 @@ static void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_
for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) {
secp256k1_gej tmp;
secp256k1_gej_double(&tmp, &groupj[i]);
ge_equals_gej(&group[(2 * i) % EXHAUSTIVE_TEST_ORDER], &tmp);
CHECK(secp256k1_gej_eq_ge_var(&tmp, &group[(2 * i) % EXHAUSTIVE_TEST_ORDER]));
secp256k1_gej_double_var(&tmp, &groupj[i], NULL);
ge_equals_gej(&group[(2 * i) % EXHAUSTIVE_TEST_ORDER], &tmp);
CHECK(secp256k1_gej_eq_ge_var(&tmp, &group[(2 * i) % EXHAUSTIVE_TEST_ORDER]));
}
/* Check negation */
@ -175,9 +125,9 @@ static void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_
secp256k1_ge tmp;
secp256k1_gej tmpj;
secp256k1_ge_neg(&tmp, &group[i]);
ge_equals_ge(&group[EXHAUSTIVE_TEST_ORDER - i], &tmp);
CHECK(secp256k1_ge_eq_var(&tmp, &group[EXHAUSTIVE_TEST_ORDER - i]));
secp256k1_gej_neg(&tmpj, &groupj[i]);
ge_equals_gej(&group[EXHAUSTIVE_TEST_ORDER - i], &tmpj);
CHECK(secp256k1_gej_eq_ge_var(&tmpj, &group[EXHAUSTIVE_TEST_ORDER - i]));
}
}
@ -194,8 +144,7 @@ static void test_exhaustive_ecmult(const secp256k1_ge *group, const secp256k1_ge
secp256k1_scalar_set_int(&ng, j);
secp256k1_ecmult(&tmp, &groupj[r_log], &na, &ng);
ge_equals_gej(&group[(i * r_log + j) % EXHAUSTIVE_TEST_ORDER], &tmp);
CHECK(secp256k1_gej_eq_ge_var(&tmp, &group[(i * r_log + j) % EXHAUSTIVE_TEST_ORDER]));
}
}
}
@ -213,7 +162,7 @@ static void test_exhaustive_ecmult(const secp256k1_ge *group, const secp256k1_ge
/* Test secp256k1_ecmult_const. */
secp256k1_ecmult_const(&tmp, &group[i], &ng);
ge_equals_gej(&group[(i * j) % EXHAUSTIVE_TEST_ORDER], &tmp);
CHECK(secp256k1_gej_eq_ge_var(&tmp, &group[(i * j) % EXHAUSTIVE_TEST_ORDER]));
if (i != 0 && j != 0) {
/* Test secp256k1_ecmult_const_xonly with all curve X coordinates, and xd=NULL. */
@ -265,7 +214,7 @@ static void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const sec
data.pt[1] = group[y];
secp256k1_ecmult_multi_var(&ctx->error_callback, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2);
ge_equals_gej(&group[(i * x + j * y + k) % EXHAUSTIVE_TEST_ORDER], &tmp);
CHECK(secp256k1_gej_eq_ge_var(&tmp, &group[(i * x + j * y + k) % EXHAUSTIVE_TEST_ORDER]));
}
}
}

View File

@ -0,0 +1,29 @@
/***********************************************************************
* Distributed under the MIT software license, see the accompanying *
* file COPYING or https://www.opensource.org/licenses/mit-license.php.*
***********************************************************************/
#ifndef SECP256K1_TESTUTIL_H
#define SECP256K1_TESTUTIL_H
#include "field.h"
#include "testrand.h"
#include "util.h"
static void random_fe(secp256k1_fe *x) {
unsigned char bin[32];
do {
secp256k1_testrand256(bin);
if (secp256k1_fe_set_b32_limit(x, bin)) {
return;
}
} while(1);
}
static void random_fe_non_zero(secp256k1_fe *nz) {
do {
random_fe(nz);
} while (secp256k1_fe_is_zero(nz));
}
#endif /* SECP256K1_TESTUTIL_H */

View File

@ -132,16 +132,11 @@ static const secp256k1_callback default_error_callback = {
} while(0)
#endif
/* Like assert(), but when VERIFY is defined, and side-effect safe. */
#if defined(COVERAGE)
#define VERIFY_CHECK(check)
#define VERIFY_SETUP(stmt)
#elif defined(VERIFY)
/* Like assert(), but when VERIFY is defined. */
#if defined(VERIFY)
#define VERIFY_CHECK CHECK
#define VERIFY_SETUP(stmt) do { stmt; } while(0)
#else
#define VERIFY_CHECK(cond) do { (void)(cond); } while(0)
#define VERIFY_SETUP(stmt)
#define VERIFY_CHECK(cond)
#endif
static SECP256K1_INLINE void *checked_malloc(const secp256k1_callback* cb, size_t size) {

View File

@ -0,0 +1,64 @@
#!/bin/sh
set -eu
default_base_version="$(git describe --match "v*.*.*" --abbrev=0)"
default_new_version="master"
display_help_and_exit() {
echo "Usage: $0 <base_ver> <new_ver>"
echo ""
echo "Description: This script uses the ABI Compliance Checker tool to determine if the ABI"
echo " of a new version of libsecp256k1 has changed in a backward-incompatible way."
echo ""
echo "Options:"
echo " base_ver Specify the base version (default: $default_base_version)"
echo " new_ver Specify the new version (default: $default_new_version)"
echo " -h, --help Display this help message"
exit 0
}
if [ "$#" -eq 0 ]; then
base_version="$default_base_version"
new_version="$default_new_version"
elif [ "$#" -eq 1 ] && { [ "$1" = "-h" ] || [ "$1" = "--help" ]; }; then
display_help_and_exit
elif [ "$#" -eq 2 ]; then
base_version="$1"
new_version="$2"
else
echo "Invalid usage. See help:"
echo ""
display_help_and_exit
fi
checkout_and_build() {
git worktree add -d "$1" "$2"
cd "$1"
mkdir build && cd build
cmake -S .. --preset dev-mode \
-DCMAKE_C_COMPILER=gcc -DCMAKE_BUILD_TYPE=None -DCMAKE_C_FLAGS="-g -Og -gdwarf-4" \
-DSECP256K1_BUILD_BENCHMARK=OFF \
-DSECP256K1_BUILD_TESTS=OFF \
-DSECP256K1_BUILD_EXHAUSTIVE_TESTS=OFF \
-DSECP256K1_BUILD_CTIME_TESTS=OFF \
-DSECP256K1_BUILD_EXAMPLES=OFF
cmake --build . -j "$(nproc)"
abi-dumper src/libsecp256k1.so -o ABI.dump -lver "$2"
}
echo "Comparing $base_version (base version) to $new_version (new version)"
echo
original_dir="$(pwd)"
base_source_dir=$(mktemp -d)
checkout_and_build "$base_source_dir" "$base_version"
new_source_dir=$(mktemp -d)
checkout_and_build "$new_source_dir" "$new_version"
cd "$original_dir"
abi-compliance-checker -lib libsecp256k1 -old "${base_source_dir}/build/ABI.dump" -new "${new_source_dir}/build/ABI.dump"
git worktree remove "$base_source_dir"
git worktree remove "$new_source_dir"