|
|
|
/**********************************************************************
|
|
|
|
* Copyright (c) 2013, 2014 Pieter Wuille *
|
|
|
|
* Distributed under the MIT software license, see the accompanying *
|
|
|
|
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
#if defined HAVE_CONFIG_H
|
|
|
|
#include "libsecp256k1-config.h"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
|
|
|
#include "secp256k1.c"
|
|
|
|
#include "testrand_impl.h"
|
|
|
|
|
|
|
|
#ifdef ENABLE_OPENSSL_TESTS
|
|
|
|
#include "openssl/bn.h"
|
|
|
|
#include "openssl/ec.h"
|
|
|
|
#include "openssl/ecdsa.h"
|
|
|
|
#include "openssl/obj_mac.h"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static int count = 64;
|
|
|
|
|
|
|
|
void random_field_element_test(secp256k1_fe_t *fe) {
|
|
|
|
do {
|
|
|
|
unsigned char b32[32];
|
|
|
|
secp256k1_rand256_test(b32);
|
|
|
|
if (secp256k1_fe_set_b32(fe, b32)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} while(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void random_field_element_magnitude(secp256k1_fe_t *fe) {
|
|
|
|
secp256k1_fe_normalize(fe);
|
|
|
|
int n = secp256k1_rand32() % 4;
|
|
|
|
for (int i = 0; i < n; i++) {
|
|
|
|
secp256k1_fe_negate(fe, fe, 1 + 2*i);
|
|
|
|
secp256k1_fe_negate(fe, fe, 2 + 2*i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void random_group_element_test(secp256k1_ge_t *ge) {
|
|
|
|
secp256k1_fe_t fe;
|
|
|
|
do {
|
|
|
|
random_field_element_test(&fe);
|
|
|
|
if (secp256k1_ge_set_xo_var(ge, &fe, secp256k1_rand32() & 1))
|
|
|
|
break;
|
|
|
|
} while(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void random_group_element_jacobian_test(secp256k1_gej_t *gej, const secp256k1_ge_t *ge) {
|
|
|
|
do {
|
|
|
|
random_field_element_test(&gej->z);
|
|
|
|
if (!secp256k1_fe_is_zero(&gej->z)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} while(1);
|
|
|
|
secp256k1_fe_t z2; secp256k1_fe_sqr(&z2, &gej->z);
|
|
|
|
secp256k1_fe_t z3; secp256k1_fe_mul(&z3, &z2, &gej->z);
|
|
|
|
secp256k1_fe_mul(&gej->x, &ge->x, &z2);
|
|
|
|
secp256k1_fe_mul(&gej->y, &ge->y, &z3);
|
|
|
|
gej->infinity = ge->infinity;
|
|
|
|
}
|
|
|
|
|
|
|
|
void random_scalar_order_test(secp256k1_scalar_t *num) {
|
|
|
|
do {
|
|
|
|
unsigned char b32[32];
|
|
|
|
secp256k1_rand256_test(b32);
|
|
|
|
int overflow = 0;
|
|
|
|
secp256k1_scalar_set_b32(num, b32, &overflow);
|
|
|
|
if (overflow || secp256k1_scalar_is_zero(num))
|
|
|
|
continue;
|
|
|
|
break;
|
|
|
|
} while(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void random_scalar_order(secp256k1_scalar_t *num) {
|
|
|
|
do {
|
|
|
|
unsigned char b32[32];
|
|
|
|
secp256k1_rand256(b32);
|
|
|
|
int overflow = 0;
|
|
|
|
secp256k1_scalar_set_b32(num, b32, &overflow);
|
|
|
|
if (overflow || secp256k1_scalar_is_zero(num))
|
|
|
|
continue;
|
|
|
|
break;
|
|
|
|
} while(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/***** NUM TESTS *****/
|
|
|
|
|
|
|
|
#ifndef USE_NUM_NONE
|
|
|
|
void random_num_negate(secp256k1_num_t *num) {
|
|
|
|
if (secp256k1_rand32() & 1)
|
|
|
|
secp256k1_num_negate(num);
|
|
|
|
}
|
|
|
|
|
|
|
|
void random_num_order_test(secp256k1_num_t *num) {
|
|
|
|
secp256k1_scalar_t sc;
|
|
|
|
random_scalar_order_test(&sc);
|
|
|
|
secp256k1_scalar_get_num(num, &sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
void random_num_order(secp256k1_num_t *num) {
|
|
|
|
secp256k1_scalar_t sc;
|
|
|
|
random_scalar_order(&sc);
|
|
|
|
secp256k1_scalar_get_num(num, &sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_num_negate(void) {
|
|
|
|
secp256k1_num_t n1;
|
|
|
|
secp256k1_num_t n2;
|
|
|
|
random_num_order_test(&n1); /* n1 = R */
|
|
|
|
random_num_negate(&n1);
|
|
|
|
secp256k1_num_copy(&n2, &n1); /* n2 = R */
|
|
|
|
secp256k1_num_sub(&n1, &n2, &n1); /* n1 = n2-n1 = 0 */
|
|
|
|
CHECK(secp256k1_num_is_zero(&n1));
|
|
|
|
secp256k1_num_copy(&n1, &n2); /* n1 = R */
|
|
|
|
secp256k1_num_negate(&n1); /* n1 = -R */
|
|
|
|
CHECK(!secp256k1_num_is_zero(&n1));
|
|
|
|
secp256k1_num_add(&n1, &n2, &n1); /* n1 = n2+n1 = 0 */
|
|
|
|
CHECK(secp256k1_num_is_zero(&n1));
|
|
|
|
secp256k1_num_copy(&n1, &n2); /* n1 = R */
|
|
|
|
secp256k1_num_negate(&n1); /* n1 = -R */
|
|
|
|
CHECK(secp256k1_num_is_neg(&n1) != secp256k1_num_is_neg(&n2));
|
|
|
|
secp256k1_num_negate(&n1); /* n1 = R */
|
|
|
|
CHECK(secp256k1_num_eq(&n1, &n2));
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_num_add_sub(void) {
|
|
|
|
int r = secp256k1_rand32();
|
|
|
|
secp256k1_num_t n1;
|
|
|
|
secp256k1_num_t n2;
|
|
|
|
random_num_order_test(&n1); /* n1 = R1 */
|
|
|
|
if (r & 1) {
|
|
|
|
random_num_negate(&n1);
|
|
|
|
}
|
|
|
|
random_num_order_test(&n2); /* n2 = R2 */
|
|
|
|
if (r & 2) {
|
|
|
|
random_num_negate(&n2);
|
|
|
|
}
|
|
|
|
secp256k1_num_t n1p2, n2p1, n1m2, n2m1;
|
|
|
|
secp256k1_num_add(&n1p2, &n1, &n2); /* n1p2 = R1 + R2 */
|
|
|
|
secp256k1_num_add(&n2p1, &n2, &n1); /* n2p1 = R2 + R1 */
|
|
|
|
secp256k1_num_sub(&n1m2, &n1, &n2); /* n1m2 = R1 - R2 */
|
|
|
|
secp256k1_num_sub(&n2m1, &n2, &n1); /* n2m1 = R2 - R1 */
|
|
|
|
CHECK(secp256k1_num_eq(&n1p2, &n2p1));
|
|
|
|
CHECK(!secp256k1_num_eq(&n1p2, &n1m2));
|
|
|
|
secp256k1_num_negate(&n2m1); /* n2m1 = -R2 + R1 */
|
|
|
|
CHECK(secp256k1_num_eq(&n2m1, &n1m2));
|
|
|
|
CHECK(!secp256k1_num_eq(&n2m1, &n1));
|
|
|
|
secp256k1_num_add(&n2m1, &n2m1, &n2); /* n2m1 = -R2 + R1 + R2 = R1 */
|
|
|
|
CHECK(secp256k1_num_eq(&n2m1, &n1));
|
|
|
|
CHECK(!secp256k1_num_eq(&n2p1, &n1));
|
|
|
|
secp256k1_num_sub(&n2p1, &n2p1, &n2); /* n2p1 = R2 + R1 - R2 = R1 */
|
|
|
|
CHECK(secp256k1_num_eq(&n2p1, &n1));
|
|
|
|
}
|
|
|
|
|
|
|
|
void run_num_smalltests(void) {
|
|
|
|
for (int i=0; i<100*count; i++) {
|
|
|
|
test_num_negate();
|
|
|
|
test_num_add_sub();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/***** SCALAR TESTS *****/
|
|
|
|
|
|
|
|
void scalar_test(void) {
|
|
|
|
unsigned char c[32];
|
|
|
|
|
|
|
|
/* Set 's' to a random scalar, with value 'snum'. */
|
|
|
|
secp256k1_scalar_t s;
|
|
|
|
random_scalar_order_test(&s);
|
|
|
|
|
|
|
|
/* Set 's1' to a random scalar, with value 's1num'. */
|
|
|
|
secp256k1_scalar_t s1;
|
|
|
|
random_scalar_order_test(&s1);
|
|
|
|
|
|
|
|
/* Set 's2' to a random scalar, with value 'snum2', and byte array representation 'c'. */
|
|
|
|
secp256k1_scalar_t s2;
|
|
|
|
random_scalar_order_test(&s2);
|
|
|
|
secp256k1_scalar_get_b32(c, &s2);
|
|
|
|
|
|
|
|
#ifndef USE_NUM_NONE
|
|
|
|
secp256k1_num_t snum, s1num, s2num;
|
|
|
|
secp256k1_scalar_get_num(&snum, &s);
|
|
|
|
secp256k1_scalar_get_num(&s1num, &s1);
|
|
|
|
secp256k1_scalar_get_num(&s2num, &s2);
|
|
|
|
|
|
|
|
secp256k1_num_t order;
|
|
|
|
secp256k1_scalar_order_get_num(&order);
|
|
|
|
secp256k1_num_t half_order = order;
|
|
|
|
secp256k1_num_shift(&half_order, 1);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
{
|
|
|
|
/* Test that fetching groups of 4 bits from a scalar and recursing n(i)=16*n(i-1)+p(i) reconstructs it. */
|
|
|
|
secp256k1_scalar_t n;
|
|
|
|
secp256k1_scalar_set_int(&n, 0);
|
|
|
|
for (int i = 0; i < 256; i += 4) {
|
|
|
|
secp256k1_scalar_t t;
|
|
|
|
secp256k1_scalar_set_int(&t, secp256k1_scalar_get_bits(&s, 256 - 4 - i, 4));
|
|
|
|
for (int j = 0; j < 4; j++) {
|
|
|
|
secp256k1_scalar_add(&n, &n, &n);
|
|
|
|
}
|
|
|
|
secp256k1_scalar_add(&n, &n, &t);
|
|
|
|
}
|
|
|
|
CHECK(secp256k1_scalar_eq(&n, &s));
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
/* Test that fetching groups of randomly-sized bits from a scalar and recursing n(i)=b*n(i-1)+p(i) reconstructs it. */
|
|
|
|
secp256k1_scalar_t n;
|
|
|
|
secp256k1_scalar_set_int(&n, 0);
|
|
|
|
int i = 0;
|
|
|
|
while (i < 256) {
|
|
|
|
int now = (secp256k1_rand32() % 15) + 1;
|
|
|
|
if (now + i > 256) {
|
|
|
|
now = 256 - i;
|
|
|
|
}
|
|
|
|
secp256k1_scalar_t t;
|
|
|
|
secp256k1_scalar_set_int(&t, secp256k1_scalar_get_bits_var(&s, 256 - now - i, now));
|
|
|
|
for (int j = 0; j < now; j++) {
|
|
|
|
secp256k1_scalar_add(&n, &n, &n);
|
|
|
|
}
|
|
|
|
secp256k1_scalar_add(&n, &n, &t);
|
|
|
|
i += now;
|
|
|
|
}
|
|
|
|
CHECK(secp256k1_scalar_eq(&n, &s));
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef USE_NUM_NONE
|
|
|
|
{
|
|
|
|
/* Test that adding the scalars together is equal to adding their numbers together modulo the order. */
|
|
|
|
secp256k1_num_t rnum;
|
|
|
|
secp256k1_num_add(&rnum, &snum, &s2num);
|
|
|
|
secp256k1_num_mod(&rnum, &order);
|
|
|
|
secp256k1_scalar_t r;
|
|
|
|
secp256k1_scalar_add(&r, &s, &s2);
|
|
|
|
secp256k1_num_t r2num;
|
|
|
|
secp256k1_scalar_get_num(&r2num, &r);
|
|
|
|
CHECK(secp256k1_num_eq(&rnum, &r2num));
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
/* Test that multipying the scalars is equal to multiplying their numbers modulo the order. */
|
|
|
|
secp256k1_num_t rnum;
|
|
|
|
secp256k1_num_mul(&rnum, &snum, &s2num);
|
|
|
|
secp256k1_num_mod(&rnum, &order);
|
|
|
|
secp256k1_scalar_t r;
|
|
|
|
secp256k1_scalar_mul(&r, &s, &s2);
|
|
|
|
secp256k1_num_t r2num;
|
|
|
|
secp256k1_scalar_get_num(&r2num, &r);
|
|
|
|
CHECK(secp256k1_num_eq(&rnum, &r2num));
|
|
|
|
/* The result can only be zero if at least one of the factors was zero. */
|
|
|
|
CHECK(secp256k1_scalar_is_zero(&r) == (secp256k1_scalar_is_zero(&s) || secp256k1_scalar_is_zero(&s2)));
|
|
|
|
/* The results can only be equal to one of the factors if that factor was zero, or the other factor was one. */
|
|
|
|
CHECK(secp256k1_num_eq(&rnum, &snum) == (secp256k1_scalar_is_zero(&s) || secp256k1_scalar_is_one(&s2)));
|
|
|
|
CHECK(secp256k1_num_eq(&rnum, &s2num) == (secp256k1_scalar_is_zero(&s2) || secp256k1_scalar_is_one(&s)));
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
/* Check that comparison with zero matches comparison with zero on the number. */
|
|
|
|
CHECK(secp256k1_num_is_zero(&snum) == secp256k1_scalar_is_zero(&s));
|
|
|
|
/* Check that comparison with the half order is equal to testing for high scalar. */
|
|
|
|
CHECK(secp256k1_scalar_is_high(&s) == (secp256k1_num_cmp(&snum, &half_order) > 0));
|
|
|
|
secp256k1_scalar_t neg;
|
|
|
|
secp256k1_scalar_negate(&neg, &s);
|
|
|
|
secp256k1_num_t negnum;
|
|
|
|
secp256k1_num_sub(&negnum, &order, &snum);
|
|
|
|
secp256k1_num_mod(&negnum, &order);
|
|
|
|
/* Check that comparison with the half order is equal to testing for high scalar after negation. */
|
|
|
|
CHECK(secp256k1_scalar_is_high(&neg) == (secp256k1_num_cmp(&negnum, &half_order) > 0));
|
|
|
|
/* Negating should change the high property, unless the value was already zero. */
|
|
|
|
CHECK((secp256k1_scalar_is_high(&s) == secp256k1_scalar_is_high(&neg)) == secp256k1_scalar_is_zero(&s));
|
|
|
|
secp256k1_num_t negnum2;
|
|
|
|
secp256k1_scalar_get_num(&negnum2, &neg);
|
|
|
|
/* Negating a scalar should be equal to (order - n) mod order on the number. */
|
|
|
|
CHECK(secp256k1_num_eq(&negnum, &negnum2));
|
|
|
|
secp256k1_scalar_add(&neg, &neg, &s);
|
|
|
|
/* Adding a number to its negation should result in zero. */
|
|
|
|
CHECK(secp256k1_scalar_is_zero(&neg));
|
|
|
|
secp256k1_scalar_negate(&neg, &neg);
|
|
|
|
/* Negating zero should still result in zero. */
|
|
|
|
CHECK(secp256k1_scalar_is_zero(&neg));
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
/* Test secp256k1_scalar_mul_shift_var. */
|
|
|
|
secp256k1_scalar_t r;
|
|
|
|
unsigned int shift = 256 + (secp256k1_rand32() % 257);
|
|
|
|
secp256k1_scalar_mul_shift_var(&r, &s1, &s2, shift);
|
|
|
|
secp256k1_num_t rnum;
|
|
|
|
secp256k1_num_mul(&rnum, &s1num, &s2num);
|
|
|
|
secp256k1_num_shift(&rnum, shift - 1);
|
|
|
|
secp256k1_num_t one;
|
|
|
|
unsigned char cone[1] = {0x01};
|
|
|
|
secp256k1_num_set_bin(&one, cone, 1);
|
|
|
|
secp256k1_num_add(&rnum, &rnum, &one);
|
|
|
|
secp256k1_num_shift(&rnum, 1);
|
|
|
|
secp256k1_num_t rnum2;
|
|
|
|
secp256k1_scalar_get_num(&rnum2, &r);
|
|
|
|
CHECK(secp256k1_num_eq(&rnum, &rnum2));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
{
|
|
|
|
/* Test that scalar inverses are equal to the inverse of their number modulo the order. */
|
|
|
|
if (!secp256k1_scalar_is_zero(&s)) {
|
|
|
|
secp256k1_scalar_t inv;
|
|
|
|
secp256k1_scalar_inverse(&inv, &s);
|
|
|
|
#ifndef USE_NUM_NONE
|
|
|
|
secp256k1_num_t invnum;
|
|
|
|
secp256k1_num_mod_inverse(&invnum, &snum, &order);
|
|
|
|
secp256k1_num_t invnum2;
|
|
|
|
secp256k1_scalar_get_num(&invnum2, &inv);
|
|
|
|
CHECK(secp256k1_num_eq(&invnum, &invnum2));
|
|
|
|
#endif
|
|
|
|
secp256k1_scalar_mul(&inv, &inv, &s);
|
|
|
|
/* Multiplying a scalar with its inverse must result in one. */
|
|
|
|
CHECK(secp256k1_scalar_is_one(&inv));
|
|
|
|
secp256k1_scalar_inverse(&inv, &inv);
|
|
|
|
/* Inverting one must result in one. */
|
|
|
|
CHECK(secp256k1_scalar_is_one(&inv));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
/* Test commutativity of add. */
|
|
|
|
secp256k1_scalar_t r1, r2;
|
|
|
|
secp256k1_scalar_add(&r1, &s1, &s2);
|
|
|
|
secp256k1_scalar_add(&r2, &s2, &s1);
|
|
|
|
CHECK(secp256k1_scalar_eq(&r1, &r2));
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
/* Test add_bit. */
|
|
|
|
int bit = secp256k1_rand32() % 256;
|
|
|
|
secp256k1_scalar_t b;
|
|
|
|
secp256k1_scalar_set_int(&b, 1);
|
|
|
|
CHECK(secp256k1_scalar_is_one(&b));
|
|
|
|
for (int i = 0; i < bit; i++) {
|
|
|
|
secp256k1_scalar_add(&b, &b, &b);
|
|
|
|
}
|
|
|
|
secp256k1_scalar_t r1 = s1, r2 = s1;
|
|
|
|
if (!secp256k1_scalar_add(&r1, &r1, &b)) {
|
|
|
|
/* No overflow happened. */
|
|
|
|
secp256k1_scalar_add_bit(&r2, bit);
|
|
|
|
CHECK(secp256k1_scalar_eq(&r1, &r2));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
/* Test commutativity of mul. */
|
|
|
|
secp256k1_scalar_t r1, r2;
|
|
|
|
secp256k1_scalar_mul(&r1, &s1, &s2);
|
|
|
|
secp256k1_scalar_mul(&r2, &s2, &s1);
|
|
|
|
CHECK(secp256k1_scalar_eq(&r1, &r2));
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
/* Test associativity of add. */
|
|
|
|
secp256k1_scalar_t r1, r2;
|
|
|
|
secp256k1_scalar_add(&r1, &s1, &s2);
|
|
|
|
secp256k1_scalar_add(&r1, &r1, &s);
|
|
|
|
secp256k1_scalar_add(&r2, &s2, &s);
|
|
|
|
secp256k1_scalar_add(&r2, &s1, &r2);
|
|
|
|
CHECK(secp256k1_scalar_eq(&r1, &r2));
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
/* Test associativity of mul. */
|
|
|
|
secp256k1_scalar_t r1, r2;
|
|
|
|
secp256k1_scalar_mul(&r1, &s1, &s2);
|
|
|
|
secp256k1_scalar_mul(&r1, &r1, &s);
|
|
|
|
secp256k1_scalar_mul(&r2, &s2, &s);
|
|
|
|
secp256k1_scalar_mul(&r2, &s1, &r2);
|
|
|
|
CHECK(secp256k1_scalar_eq(&r1, &r2));
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
/* Test distributitivity of mul over add. */
|
|
|
|
secp256k1_scalar_t r1, r2, t;
|
|
|
|
secp256k1_scalar_add(&r1, &s1, &s2);
|
|
|
|
secp256k1_scalar_mul(&r1, &r1, &s);
|
|
|
|
secp256k1_scalar_mul(&r2, &s1, &s);
|
|
|
|
secp256k1_scalar_mul(&t, &s2, &s);
|
|
|
|
secp256k1_scalar_add(&r2, &r2, &t);
|
|
|
|
CHECK(secp256k1_scalar_eq(&r1, &r2));
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
/* Test square. */
|
|
|
|
secp256k1_scalar_t r1, r2;
|
|
|
|
secp256k1_scalar_sqr(&r1, &s1);
|
|
|
|
secp256k1_scalar_mul(&r2, &s1, &s1);
|
|
|
|
CHECK(secp256k1_scalar_eq(&r1, &r2));
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
void run_scalar_tests(void) {
|
|
|
|
for (int i = 0; i < 128 * count; i++) {
|
|
|
|
scalar_test();
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
/* (-1)+1 should be zero. */
|
|
|
|
secp256k1_scalar_t s, o;
|
|
|
|
secp256k1_scalar_set_int(&s, 1);
|
|
|
|
secp256k1_scalar_negate(&o, &s);
|
|
|
|
secp256k1_scalar_add(&o, &o, &s);
|
|
|
|
CHECK(secp256k1_scalar_is_zero(&o));
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef USE_NUM_NONE
|
|
|
|
{
|
|
|
|
/* A scalar with value of the curve order should be 0. */
|
|
|
|
secp256k1_num_t order;
|
|
|
|
secp256k1_scalar_order_get_num(&order);
|
|
|
|
unsigned char bin[32];
|
|
|
|
secp256k1_num_get_bin(bin, 32, &order);
|
|
|
|
secp256k1_scalar_t zero;
|
|
|
|
int overflow = 0;
|
|
|
|
secp256k1_scalar_set_b32(&zero, bin, &overflow);
|
|
|
|
CHECK(overflow == 1);
|
|
|
|
CHECK(secp256k1_scalar_is_zero(&zero));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/***** FIELD TESTS *****/
|
|
|
|
|
|
|
|
void random_fe(secp256k1_fe_t *x) {
|
|
|
|
unsigned char bin[32];
|
|
|
|
do {
|
|
|
|
secp256k1_rand256(bin);
|
|
|
|
if (secp256k1_fe_set_b32(x, bin)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
} while(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void random_fe_non_zero(secp256k1_fe_t *nz) {
|
|
|
|
int tries = 10;
|
|
|
|
while (--tries >= 0) {
|
|
|
|
random_fe(nz);
|
|
|
|
secp256k1_fe_normalize(nz);
|
|
|
|
if (!secp256k1_fe_is_zero(nz))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Infinitesimal probability of spurious failure here */
|
|
|
|
CHECK(tries >= 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void random_fe_non_square(secp256k1_fe_t *ns) {
|
|
|
|
random_fe_non_zero(ns);
|
|
|
|
secp256k1_fe_t r;
|
|
|
|
if (secp256k1_fe_sqrt_var(&r, ns)) {
|
|
|
|
secp256k1_fe_negate(ns, ns, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int check_fe_equal(const secp256k1_fe_t *a, const secp256k1_fe_t *b) {
|
|
|
|
secp256k1_fe_t an = *a; secp256k1_fe_normalize(&an);
|
|
|
|
secp256k1_fe_t bn = *b; secp256k1_fe_normalize(&bn);
|
|
|
|
return secp256k1_fe_equal(&an, &bn);
|
|
|
|
}
|
|
|
|
|
|
|
|
int check_fe_inverse(const secp256k1_fe_t *a, const secp256k1_fe_t *ai) {
|
|
|
|
secp256k1_fe_t x; secp256k1_fe_mul(&x, a, ai);
|
|
|
|
secp256k1_fe_t one; secp256k1_fe_set_int(&one, 1);
|
|
|
|
return check_fe_equal(&x, &one);
|
|
|
|
}
|
|
|
|
|
|
|
|
void run_field_inv(void) {
|
|
|
|
secp256k1_fe_t x, xi, xii;
|
|
|
|
for (int i=0; i<10*count; i++) {
|
|
|
|
random_fe_non_zero(&x);
|
|
|
|
secp256k1_fe_inv(&xi, &x);
|
|
|
|
CHECK(check_fe_inverse(&x, &xi));
|
|
|
|
secp256k1_fe_inv(&xii, &xi);
|
|
|
|
CHECK(check_fe_equal(&x, &xii));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void run_field_inv_var(void) {
|
|
|
|
secp256k1_fe_t x, xi, xii;
|
|
|
|
for (int i=0; i<10*count; i++) {
|
|
|
|
random_fe_non_zero(&x);
|
|
|
|
secp256k1_fe_inv_var(&xi, &x);
|
|
|
|
CHECK(check_fe_inverse(&x, &xi));
|
|
|
|
secp256k1_fe_inv_var(&xii, &xi);
|
|
|
|
CHECK(check_fe_equal(&x, &xii));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void run_field_inv_all_var(void) {
|
|
|
|
secp256k1_fe_t x[16], xi[16], xii[16];
|
|
|
|
/* Check it's safe to call for 0 elements */
|
|
|
|
secp256k1_fe_inv_all_var(0, xi, x);
|
|
|
|
for (int i=0; i<count; i++) {
|
|
|
|
size_t len = (secp256k1_rand32() & 15) + 1;
|
|
|
|
for (size_t j=0; j<len; j++)
|
|
|
|
random_fe_non_zero(&x[j]);
|
|
|
|
secp256k1_fe_inv_all_var(len, xi, x);
|
|
|
|
for (size_t j=0; j<len; j++)
|
|
|
|
CHECK(check_fe_inverse(&x[j], &xi[j]));
|
|
|
|
secp256k1_fe_inv_all_var(len, xii, xi);
|
|
|
|
for (size_t j=0; j<len; j++)
|
|
|
|
CHECK(check_fe_equal(&x[j], &xii[j]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void run_sqr(void) {
|
|
|
|
secp256k1_fe_t x, s;
|
|
|
|
|
|
|
|
{
|
|
|
|
secp256k1_fe_set_int(&x, 1);
|
|
|
|
secp256k1_fe_negate(&x, &x, 1);
|
|
|
|
|
|
|
|
for (int i=1; i<=512; ++i) {
|
|
|
|
secp256k1_fe_mul_int(&x, 2);
|
|
|
|
secp256k1_fe_normalize(&x);
|
|
|
|
secp256k1_fe_sqr(&s, &x);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_sqrt(const secp256k1_fe_t *a, const secp256k1_fe_t *k) {
|
|
|
|
secp256k1_fe_t r1, r2;
|
|
|
|
int v = secp256k1_fe_sqrt_var(&r1, a);
|
|
|
|
CHECK((v == 0) == (k == NULL));
|
|
|
|
|
|
|
|
if (k != NULL) {
|
|
|
|
/* Check that the returned root is +/- the given known answer */
|
|
|
|
secp256k1_fe_negate(&r2, &r1, 1);
|
|
|
|
secp256k1_fe_add(&r1, k); secp256k1_fe_add(&r2, k);
|
|
|
|
secp256k1_fe_normalize(&r1); secp256k1_fe_normalize(&r2);
|
|
|
|
CHECK(secp256k1_fe_is_zero(&r1) || secp256k1_fe_is_zero(&r2));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void run_sqrt(void) {
|
|
|
|
secp256k1_fe_t ns, x, s, t;
|
|
|
|
|
|
|
|
/* Check sqrt(0) is 0 */
|
|
|
|
secp256k1_fe_set_int(&x, 0);
|
|
|
|
secp256k1_fe_sqr(&s, &x);
|
|
|
|
test_sqrt(&s, &x);
|
|
|
|
|
|
|
|
/* Check sqrt of small squares (and their negatives) */
|
|
|
|
for (int i=1; i<=100; i++) {
|
|
|
|
secp256k1_fe_set_int(&x, i);
|
|
|
|
secp256k1_fe_sqr(&s, &x);
|
|
|
|
test_sqrt(&s, &x);
|
|
|
|
secp256k1_fe_negate(&t, &s, 1);
|
|
|
|
test_sqrt(&t, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Consistency checks for large random values */
|
|
|
|
for (int i=0; i<10; i++) {
|
|
|
|
random_fe_non_square(&ns);
|
|
|
|
for (int j=0; j<count; j++) {
|
|
|
|
random_fe(&x);
|
|
|
|
secp256k1_fe_sqr(&s, &x);
|
|
|
|
test_sqrt(&s, &x);
|
|
|
|
secp256k1_fe_negate(&t, &s, 1);
|
|
|
|
test_sqrt(&t, NULL);
|
|
|
|
secp256k1_fe_mul(&t, &s, &ns);
|
|
|
|
test_sqrt(&t, NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/***** GROUP TESTS *****/
|
|
|
|
|
|
|
|
int ge_equals_ge(const secp256k1_ge_t *a, const secp256k1_ge_t *b) {
|
|
|
|
if (a->infinity && b->infinity)
|
|
|
|
return 1;
|
|
|
|
return check_fe_equal(&a->x, &b->x) && check_fe_equal(&a->y, &b->y);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ge_equals_gej(const secp256k1_ge_t *a, const secp256k1_gej_t *b) {
|
|
|
|
secp256k1_ge_t bb;
|
|
|
|
secp256k1_gej_t bj = *b;
|
|
|
|
secp256k1_ge_set_gej_var(&bb, &bj);
|
|
|
|
CHECK(ge_equals_ge(a, &bb));
|
|
|
|
}
|
|
|
|
|
|
|
|
void gej_equals_gej(const secp256k1_gej_t *a, const secp256k1_gej_t *b) {
|
|
|
|
secp256k1_ge_t aa, bb;
|
|
|
|
secp256k1_gej_t aj = *a, bj = *b;
|
|
|
|
secp256k1_ge_set_gej_var(&aa, &aj);
|
|
|
|
secp256k1_ge_set_gej_var(&bb, &bj);
|
|
|
|
CHECK(ge_equals_ge(&aa, &bb));
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_ge(void) {
|
|
|
|
char ca[135];
|
|
|
|
char cb[68];
|
|
|
|
int rlen;
|
|
|
|
secp256k1_ge_t a, b, i, n;
|
|
|
|
random_group_element_test(&a);
|
|
|
|
random_group_element_test(&b);
|
|
|
|
rlen = sizeof(ca);
|
|
|
|
secp256k1_ge_get_hex(ca,&rlen,&a);
|
|
|
|
CHECK(rlen > 4 && rlen <= (int)sizeof(ca));
|
|
|
|
rlen = sizeof(cb);
|
|
|
|
secp256k1_ge_get_hex(cb,&rlen,&b); /* Intentionally undersized buffer. */
|
|
|
|
n = a;
|
|
|
|
secp256k1_fe_normalize(&a.y);
|
|
|
|
secp256k1_fe_negate(&n.y, &a.y, 1);
|
|
|
|
secp256k1_ge_set_infinity(&i);
|
|
|
|
random_field_element_magnitude(&a.x);
|
|
|
|
random_field_element_magnitude(&a.y);
|
|
|
|
random_field_element_magnitude(&b.x);
|
|
|
|
random_field_element_magnitude(&b.y);
|
|
|
|
random_field_element_magnitude(&n.x);
|
|
|
|
random_field_element_magnitude(&n.y);
|
|
|
|
|
|
|
|
secp256k1_gej_t aj, bj, ij, nj;
|
|
|
|
random_group_element_jacobian_test(&aj, &a);
|
|
|
|
random_group_element_jacobian_test(&bj, &b);
|
|
|
|
secp256k1_gej_set_infinity(&ij);
|
|
|
|
random_group_element_jacobian_test(&nj, &n);
|
|
|
|
random_field_element_magnitude(&aj.x);
|
|
|
|
random_field_element_magnitude(&aj.y);
|
|
|
|
random_field_element_magnitude(&aj.z);
|
|
|
|
random_field_element_magnitude(&bj.x);
|
|
|
|
random_field_element_magnitude(&bj.y);
|
|
|
|
random_field_element_magnitude(&bj.z);
|
|
|
|
random_field_element_magnitude(&nj.x);
|
|
|
|
random_field_element_magnitude(&nj.y);
|
|
|
|
random_field_element_magnitude(&nj.z);
|
|
|
|
|
|
|
|
/* gej + gej adds */
|
|
|
|
secp256k1_gej_t aaj; secp256k1_gej_add_var(&aaj, &aj, &aj);
|
|
|
|
secp256k1_gej_t abj; secp256k1_gej_add_var(&abj, &aj, &bj);
|
|
|
|
secp256k1_gej_t aij; secp256k1_gej_add_var(&aij, &aj, &ij);
|
|
|
|
secp256k1_gej_t anj; secp256k1_gej_add_var(&anj, &aj, &nj);
|
|
|
|
secp256k1_gej_t iaj; secp256k1_gej_add_var(&iaj, &ij, &aj);
|
|
|
|
secp256k1_gej_t iij; secp256k1_gej_add_var(&iij, &ij, &ij);
|
|
|
|
|
|
|
|
/* gej + ge adds */
|
|
|
|
secp256k1_gej_t aa; secp256k1_gej_add_ge_var(&aa, &aj, &a);
|
|
|
|
secp256k1_gej_t ab; secp256k1_gej_add_ge_var(&ab, &aj, &b);
|
|
|
|
secp256k1_gej_t ai; secp256k1_gej_add_ge_var(&ai, &aj, &i);
|
|
|
|
secp256k1_gej_t an; secp256k1_gej_add_ge_var(&an, &aj, &n);
|
|
|
|
secp256k1_gej_t ia; secp256k1_gej_add_ge_var(&ia, &ij, &a);
|
|
|
|
secp256k1_gej_t ii; secp256k1_gej_add_ge_var(&ii, &ij, &i);
|
|
|
|
|
|
|
|
/* const gej + ge adds */
|
|
|
|
secp256k1_gej_t aac; secp256k1_gej_add_ge(&aac, &aj, &a);
|
|
|
|
secp256k1_gej_t abc; secp256k1_gej_add_ge(&abc, &aj, &b);
|
|
|
|
secp256k1_gej_t anc; secp256k1_gej_add_ge(&anc, &aj, &n);
|
|
|
|
secp256k1_gej_t iac; secp256k1_gej_add_ge(&iac, &ij, &a);
|
|
|
|
|
|
|
|
CHECK(secp256k1_gej_is_infinity(&an));
|
|
|
|
CHECK(secp256k1_gej_is_infinity(&anj));
|
|
|
|
CHECK(secp256k1_gej_is_infinity(&anc));
|
|
|
|
gej_equals_gej(&aa, &aaj);
|
|
|
|
gej_equals_gej(&aa, &aac);
|
|
|
|
gej_equals_gej(&ab, &abj);
|
|
|
|
gej_equals_gej(&ab, &abc);
|
|
|
|
gej_equals_gej(&an, &anj);
|
|
|
|
gej_equals_gej(&an, &anc);
|
|
|
|
gej_equals_gej(&ia, &iaj);
|
|
|
|
gej_equals_gej(&ai, &aij);
|
|
|
|
gej_equals_gej(&ii, &iij);
|
|
|
|
ge_equals_gej(&a, &ai);
|
|
|
|
ge_equals_gej(&a, &ai);
|
|
|
|
ge_equals_gej(&a, &iaj);
|
|
|
|
ge_equals_gej(&a, &iaj);
|
|
|
|
ge_equals_gej(&a, &iac);
|
|
|
|
}
|
|
|
|
|
|
|
|
void run_ge(void) {
|
|
|
|
for (int i = 0; i < 2000*count; i++) {
|
|
|
|
test_ge();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/***** ECMULT TESTS *****/
|
|
|
|
|
|
|
|
void run_ecmult_chain(void) {
|
|
|
|
/* random starting point A (on the curve) */
|
|
|
|
secp256k1_fe_t ax; VERIFY_CHECK(secp256k1_fe_set_hex(&ax, "8b30bbe9ae2a990696b22f670709dff3727fd8bc04d3362c6c7bf458e2846004", 64));
|
|
|
|
secp256k1_fe_t ay; VERIFY_CHECK(secp256k1_fe_set_hex(&ay, "a357ae915c4a65281309edf20504740f0eb3343990216b4f81063cb65f2f7e0f", 64));
|
|
|
|
secp256k1_gej_t a; secp256k1_gej_set_xy(&a, &ax, &ay);
|
|
|
|
/* two random initial factors xn and gn */
|
|
|
|
static const unsigned char xni[32] = {
|
|
|
|
0x84, 0xcc, 0x54, 0x52, 0xf7, 0xfd, 0xe1, 0xed,
|
|
|
|
0xb4, 0xd3, 0x8a, 0x8c, 0xe9, 0xb1, 0xb8, 0x4c,
|
|
|
|
0xce, 0xf3, 0x1f, 0x14, 0x6e, 0x56, 0x9b, 0xe9,
|
|
|
|
0x70, 0x5d, 0x35, 0x7a, 0x42, 0x98, 0x54, 0x07
|
|
|
|
};
|
|
|
|
secp256k1_scalar_t xn;
|
|
|
|
secp256k1_scalar_set_b32(&xn, xni, NULL);
|
|
|
|
static const unsigned char gni[32] = {
|
|
|
|
0xa1, 0xe5, 0x8d, 0x22, 0x55, 0x3d, 0xcd, 0x42,
|
|
|
|
0xb2, 0x39, 0x80, 0x62, 0x5d, 0x4c, 0x57, 0xa9,
|
|
|
|
0x6e, 0x93, 0x23, 0xd4, 0x2b, 0x31, 0x52, 0xe5,
|
|
|
|
0xca, 0x2c, 0x39, 0x90, 0xed, 0xc7, 0xc9, 0xde
|
|
|
|
};
|
|
|
|
secp256k1_scalar_t gn;
|
|
|
|
secp256k1_scalar_set_b32(&gn, gni, NULL);
|
|
|
|
/* two small multipliers to be applied to xn and gn in every iteration: */
|
|
|
|
static const unsigned char xfi[32] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0x13,0x37};
|
|
|
|
secp256k1_scalar_t xf;
|
|
|
|
secp256k1_scalar_set_b32(&xf, xfi, NULL);
|
|
|
|
static const unsigned char gfi[32] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0x71,0x13};
|
|
|
|
secp256k1_scalar_t gf;
|
|
|
|
secp256k1_scalar_set_b32(&gf, gfi, NULL);
|
|
|
|
/* accumulators with the resulting coefficients to A and G */
|
|
|
|
secp256k1_scalar_t ae;
|
|
|
|
secp256k1_scalar_set_int(&ae, 1);
|
|
|
|
secp256k1_scalar_t ge;
|
|
|
|
secp256k1_scalar_set_int(&ge, 0);
|
|
|
|
/* the point being computed */
|
|
|
|
secp256k1_gej_t x = a;
|
|
|
|
for (int i=0; i<200*count; i++) {
|
|
|
|
/* in each iteration, compute X = xn*X + gn*G; */
|
|
|
|
secp256k1_ecmult(&x, &x, &xn, &gn);
|
|
|
|
/* also compute ae and ge: the actual accumulated factors for A and G */
|
|
|
|
/* if X was (ae*A+ge*G), xn*X + gn*G results in (xn*ae*A + (xn*ge+gn)*G) */
|
|
|
|
secp256k1_scalar_mul(&ae, &ae, &xn);
|
|
|
|
secp256k1_scalar_mul(&ge, &ge, &xn);
|
|
|
|
secp256k1_scalar_add(&ge, &ge, &gn);
|
|
|
|
/* modify xn and gn */
|
|
|
|
secp256k1_scalar_mul(&xn, &xn, &xf);
|
|
|
|
secp256k1_scalar_mul(&gn, &gn, &gf);
|
|