diff --git a/src/scalar.h b/src/scalar.h index ed67b6fdd..00a390bc5 100644 --- a/src/scalar.h +++ b/src/scalar.h @@ -54,10 +54,6 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int /** Multiply two scalars (modulo the group order). */ static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b); -/** Shift a scalar right by some amount strictly between 0 and 16, returning - * the low bits that were shifted off */ -static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n); - /** Compute the inverse of a scalar (modulo the group order). */ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *a); diff --git a/src/scalar_4x64_impl.h b/src/scalar_4x64_impl.h index 2256bf1d6..b3cc7523f 100644 --- a/src/scalar_4x64_impl.h +++ b/src/scalar_4x64_impl.h @@ -850,22 +850,6 @@ static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, secp256k1_scalar_verify(r); } -static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) { - int ret; - secp256k1_scalar_verify(r); - VERIFY_CHECK(n > 0); - VERIFY_CHECK(n < 16); - - ret = r->d[0] & ((1 << n) - 1); - r->d[0] = (r->d[0] >> n) + (r->d[1] << (64 - n)); - r->d[1] = (r->d[1] >> n) + (r->d[2] << (64 - n)); - r->d[2] = (r->d[2] >> n) + (r->d[3] << (64 - n)); - r->d[3] = (r->d[3] >> n); - - secp256k1_scalar_verify(r); - return ret; -} - static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) { secp256k1_scalar_verify(k); diff --git a/src/scalar_8x32_impl.h b/src/scalar_8x32_impl.h index 709db70e8..fed2e77b6 100644 --- a/src/scalar_8x32_impl.h +++ b/src/scalar_8x32_impl.h @@ -662,26 +662,6 @@ static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, secp256k1_scalar_verify(r); } -static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) { - int ret; - secp256k1_scalar_verify(r); - VERIFY_CHECK(n > 0); - VERIFY_CHECK(n < 16); - - ret = r->d[0] & ((1 << n) - 1); - r->d[0] = (r->d[0] >> n) + (r->d[1] << (32 - n)); - r->d[1] = (r->d[1] >> n) + (r->d[2] << (32 - n)); - r->d[2] = (r->d[2] >> n) + (r->d[3] << (32 - n)); - r->d[3] = (r->d[3] >> n) + (r->d[4] << (32 - n)); - r->d[4] = (r->d[4] >> n) + (r->d[5] << (32 - n)); - r->d[5] = (r->d[5] >> n) + (r->d[6] << (32 - n)); - r->d[6] = (r->d[6] >> n) + (r->d[7] << (32 - n)); - r->d[7] = (r->d[7] >> n); - - secp256k1_scalar_verify(r); - return ret; -} - static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) { secp256k1_scalar_verify(k); diff --git a/src/scalar_low_impl.h b/src/scalar_low_impl.h index 1ee3bf991..1929b175e 100644 --- a/src/scalar_low_impl.h +++ b/src/scalar_low_impl.h @@ -139,19 +139,6 @@ static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, secp256k1_scalar_verify(r); } -static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) { - int ret; - secp256k1_scalar_verify(r); - VERIFY_CHECK(n > 0); - VERIFY_CHECK(n < 16); - - ret = *r & ((1 << n) - 1); - *r >>= n; - - secp256k1_scalar_verify(r); - return ret; -} - static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { secp256k1_scalar_verify(a); diff --git a/src/tests.c b/src/tests.c index 0589514ea..fbaafe5d3 100644 --- a/src/tests.c +++ b/src/tests.c @@ -2180,20 +2180,6 @@ static void scalar_test(void) { CHECK(secp256k1_scalar_eq(&n, &s)); } - { - /* test secp256k1_scalar_shr_int */ - secp256k1_scalar r; - int i; - random_scalar_order_test(&r); - for (i = 0; i < 100; ++i) { - int low; - int shift = 1 + secp256k1_testrand_int(15); - int expected = r.d[0] % (1ULL << shift); - low = secp256k1_scalar_shr_int(&r, shift); - CHECK(expected == low); - } - } - { /* Test commutativity of add. */ secp256k1_scalar r1, r2; @@ -5280,13 +5266,12 @@ static void test_fixed_wnaf(const secp256k1_scalar *number, int w) { int wnaf[256] = {0}; int i; int skew; - secp256k1_scalar num = *number; + secp256k1_scalar num, unused; secp256k1_scalar_set_int(&x, 0); secp256k1_scalar_set_int(&shift, 1 << w); - for (i = 0; i < 16; ++i) { - secp256k1_scalar_shr_int(&num, 8); - } + /* Make num a 128-bit scalar. */ + secp256k1_scalar_split_128(&num, &unused, number); skew = secp256k1_wnaf_fixed(wnaf, &num, w); for (i = WNAF_SIZE(w)-1; i >= 0; --i) {