diff --git a/src/ecmult_impl.h b/src/ecmult_impl.h index b5f486228a..ac2ccf67bc 100644 --- a/src/ecmult_impl.h +++ b/src/ecmult_impl.h @@ -455,7 +455,7 @@ struct secp256k1_strauss_state { struct secp256k1_strauss_point_state* ps; }; -static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, const struct secp256k1_strauss_state *state, secp256k1_gej *r, size_t num, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) { +static int secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, const struct secp256k1_strauss_state *state, secp256k1_gej *r, size_t num, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t cb_offset, const secp256k1_fe *cb_z, const secp256k1_scalar *ng) { secp256k1_ge tmpa; secp256k1_fe Z; /* Splitted G factors. */ @@ -471,13 +471,20 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c secp256k1_fe_set_int(&Z, 1); for (np = 0; np < num; ++np) { - secp256k1_gej tmp; - secp256k1_scalar na_1, na_lam; - if (secp256k1_scalar_is_zero(&na[np]) || secp256k1_gej_is_infinity(&a[np])) { + secp256k1_gej a; + secp256k1_fe az; + secp256k1_scalar na, na_1, na_lam; + + if (!cb(&na, &tmpa, np + cb_offset, cbdata)) return 0; + if (secp256k1_scalar_is_zero(&na) || secp256k1_ge_is_infinity(&tmpa)) { continue; } + + secp256k1_gej_set_ge(&a, &tmpa); + if (cb_z) a.z = *cb_z; + /* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */ - secp256k1_scalar_split_lambda(&na_1, &na_lam, &na[np]); + secp256k1_scalar_split_lambda(&na_1, &na_lam, &na); /* build wnaf representation for na_1 and na_lam. */ state->ps[no].bits_na_1 = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_1, 129, &na_1, WINDOW_A); @@ -501,16 +508,16 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c * of 1/Z, so we can use secp256k1_gej_add_zinv_var, which uses the same * isomorphism to efficiently add with a known Z inverse. */ - tmp = a[np]; + az = a.z; if (no) { #ifdef VERIFY secp256k1_fe_normalize_var(&Z); #endif - secp256k1_gej_rescale(&tmp, &Z); + secp256k1_gej_rescale(&a, &Z); } - secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->pre_a + no * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), &tmp); - if (no) secp256k1_fe_mul(state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), &(a[np].z)); - Z = tmp.z; + secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->pre_a + no * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), &a); + if (no) secp256k1_fe_mul(state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), &az); + Z = a.z; ++no; } @@ -569,6 +576,20 @@ static void secp256k1_ecmult_strauss_wnaf(const secp256k1_ecmult_context *ctx, c if (!r->infinity) { secp256k1_fe_mul(&r->z, &r->z, &Z); } + + return 1; +} + +struct secp256k1_ecmult_cb_data { + const secp256k1_scalar *na; + const secp256k1_ge *a; +}; + +static int secp256k1_ecmult_array_cb(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data) { + struct secp256k1_ecmult_cb_data *array_data = data; + *sc = array_data->na[idx]; + if (array_data->a) *pt = array_data->a[idx]; + return 1; } static void secp256k1_ecmult(const secp256k1_ecmult_context *ctx, secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) { @@ -576,11 +597,20 @@ static void secp256k1_ecmult(const secp256k1_ecmult_context *ctx, secp256k1_gej secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; struct secp256k1_strauss_point_state ps[1]; struct secp256k1_strauss_state state; + struct secp256k1_ecmult_cb_data data; + secp256k1_ge axy; state.aux = aux; state.pre_a = pre_a; state.ps = ps; - secp256k1_ecmult_strauss_wnaf(ctx, &state, r, 1, a, na, ng); + if (a) { + axy.x = a->x; + axy.y = a->y; + axy.infinity = a->infinity; + } + data.na = na; + data.a = a ? &axy : NULL; + secp256k1_ecmult_strauss_wnaf(ctx, &state, r, 1, &secp256k1_ecmult_array_cb, &data, 0, a ? &a->z : NULL, ng); } static size_t secp256k1_strauss_scratch_size(size_t n_points) { @@ -592,7 +622,6 @@ static int secp256k1_ecmult_strauss_batch(const secp256k1_callback* error_callba secp256k1_gej* points; secp256k1_scalar* scalars; struct secp256k1_strauss_state state; - size_t i; const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch); secp256k1_gej_set_infinity(r); @@ -611,15 +640,10 @@ static int secp256k1_ecmult_strauss_batch(const secp256k1_callback* error_callba return 0; } - for (i = 0; i < n_points; i++) { - secp256k1_ge point; - if (!cb(&scalars[i], &point, i+cb_offset, cbdata)) { - secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); - return 0; - } - secp256k1_gej_set_ge(&points[i], &point); + if (!secp256k1_ecmult_strauss_wnaf(ctx, &state, r, n_points, cb, cbdata, cb_offset, NULL, inp_g_sc)) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; } - secp256k1_ecmult_strauss_wnaf(ctx, &state, r, n_points, points, scalars, inp_g_sc); secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 1; }