diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8376e85a2..92a469138 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -119,6 +119,40 @@ jobs: --all-features" if: matrix.rust == 'nightly' + test_assembly: + name: Test assembly + runs-on: ubuntu-latest + env: + RUSTFLAGS: -Dwarnings + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Install Rust nightly + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: nightly + override: true + + - uses: actions/cache@v2 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Test assembly on nightly + env: + RUSTFLAGS: -C target-cpu=native + uses: actions-rs/cargo@v1 + with: + command: test + args: "--workspace \ + --package ark-test-curves \ + --all-features" + check_no_std: name: Check no_std runs-on: ubuntu-latest diff --git a/bench-templates/src/macros/ec.rs b/bench-templates/src/macros/ec.rs index 63a523006..6bbb8abf2 100644 --- a/bench-templates/src/macros/ec.rs +++ b/bench-templates/src/macros/ec.rs @@ -95,14 +95,14 @@ macro_rules! ec_bench { let mut count = 0; b.iter(|| { let mut tmp = v[count].0; - n_fold!(tmp, v, add_assign_mixed, count); + n_fold!(tmp, v, add_assign, count); count = (count + 1) % SAMPLES; tmp }); } fn deser(b: &mut $crate::bencher::Bencher) { - use ark_ec::ProjectiveCurve; + use ark_ec::CurveGroup; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; const SAMPLES: usize = 1000; @@ -128,7 +128,7 @@ macro_rules! ec_bench { } fn ser(b: &mut $crate::bencher::Bencher) { - use ark_ec::ProjectiveCurve; + use ark_ec::CurveGroup; use ark_serialize::CanonicalSerialize; const SAMPLES: usize = 1000; @@ -137,7 +137,7 @@ macro_rules! ec_bench { let mut v: Vec<_> = (0..SAMPLES) .map(|_| <$projective>::rand(&mut rng)) .collect(); - let v = <$projective>::batch_normalization_into_affine(v.as_mut_slice()); + let v = <$projective>::normalize_batch(v.as_mut_slice()); let mut bytes = Vec::with_capacity(1000); let mut count = 0; @@ -150,7 +150,7 @@ macro_rules! ec_bench { } fn deser_unchecked(b: &mut $crate::bencher::Bencher) { - use ark_ec::ProjectiveCurve; + use ark_ec::CurveGroup; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; const SAMPLES: usize = 1000; @@ -184,7 +184,7 @@ macro_rules! ec_bench { let mut v: Vec<_> = (0..SAMPLES) .map(|_| <$projective>::rand(&mut rng)) .collect(); - let v = <$projective>::batch_normalization_into_affine(v.as_mut_slice()); + let v = <$projective>::normalize_batch(v.as_mut_slice()); let mut bytes = Vec::with_capacity(1000); let mut count = 0; @@ -197,7 +197,7 @@ macro_rules! ec_bench { } fn deser_uncompressed(b: &mut $crate::bencher::Bencher) { - use ark_ec::ProjectiveCurve; + use ark_ec::CurveGroup; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; const SAMPLES: usize = 1000; @@ -223,7 +223,7 @@ macro_rules! ec_bench { } fn msm_131072(b: &mut $crate::bencher::Bencher) { - use ark_ec::msm::VariableBaseMSM; + use ark_ec::scalar_mul::variable_base::VariableBaseMSM; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; const SAMPLES: usize = 131072; diff --git a/bench-templates/src/macros/field.rs b/bench-templates/src/macros/field.rs index 23de5d715..4c0bb383f 100644 --- a/bench-templates/src/macros/field.rs +++ b/bench-templates/src/macros/field.rs @@ -27,6 +27,8 @@ macro_rules! f_bench { repr_add_nocarry, repr_sub_noborrow, repr_num_bits, + repr_from_bits_le, + repr_from_bits_be, repr_mul2, repr_div2, into_repr, @@ -451,5 +453,33 @@ macro_rules! prime_field { let _ = $f::from(v[count]); }); } + + fn repr_from_bits_be(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + let mut rng = ark_std::test_rng(); + let v = (0..SAMPLES) + .map(|_| ark_ff::BitIteratorBE::new($f_repr::rand(&mut rng)).collect::>()) + .collect::>(); + let mut count = 0; + b.iter(|| { + let mut tmp = $f_repr::from_bits_be(&v[count]); + count = (count + 1) % SAMPLES; + tmp; + }); + } + + fn repr_from_bits_le(b: &mut $crate::bencher::Bencher) { + const SAMPLES: usize = 1000; + let mut rng = ark_std::test_rng(); + let v = (0..SAMPLES) + .map(|_| ark_ff::BitIteratorLE::new($f_repr::rand(&mut rng)).collect::>()) + .collect::>(); + let mut count = 0; + b.iter(|| { + let mut tmp = $f_repr::from_bits_le(&v[count]); + count = (count + 1) % SAMPLES; + tmp; + }); + } }; } diff --git a/bench-templates/src/macros/pairing.rs b/bench-templates/src/macros/pairing.rs index 5bd64e754..3121df6d0 100644 --- a/bench-templates/src/macros/pairing.rs +++ b/bench-templates/src/macros/pairing.rs @@ -2,32 +2,36 @@ macro_rules! pairing_bench { ($curve:ident, $pairing_field:ident) => { fn miller_loop(b: &mut $crate::bencher::Bencher) { + use ark_ec::pairing::Pairing; const SAMPLES: usize = 1000; let mut rng = ark_std::test_rng(); let g1s = (0..SAMPLES).map(|_| G1::rand(&mut rng)).collect::>(); let g2s = (0..SAMPLES).map(|_| G2::rand(&mut rng)).collect::>(); - let g1s = G1::batch_normalization_into_affine(&g1s); - let g2s = G2::batch_normalization_into_affine(&g2s); - let prepared = g1s + let g1s = G1::normalize_batch(&g1s); + let g2s = G2::normalize_batch(&g2s); + let (prepared_1, prepared_2): ( + Vec<<$curve as Pairing>::G1Prepared>, + Vec<<$curve as Pairing>::G2Prepared>, + ) = g1s .into_iter() .zip(g2s) .map(|(g1, g2)| (g1.into(), g2.into())) - .collect::::G1Prepared, - <$curve as PairingEngine>::G2Prepared, - )>>(); + .unzip(); let mut count = 0; b.iter(|| { - let tmp = - $curve::miller_loop(&[(prepared[count].0.clone(), prepared[count].1.clone())]); + let tmp = $curve::multi_miller_loop( + [prepared_1[count].clone()], + [prepared_2[count].clone()], + ); count = (count + 1) % SAMPLES; tmp }); } fn final_exponentiation(b: &mut $crate::bencher::Bencher) { + use ark_ec::pairing::Pairing; const SAMPLES: usize = 1000; let mut rng = ark_std::test_rng(); @@ -35,33 +39,34 @@ macro_rules! pairing_bench { let v: Vec<_> = (0..SAMPLES) .map(|_| { ( - G1Affine::from(G1::rand(&mut rng)).into(), - G2Affine::from(G2::rand(&mut rng)).into(), + G1Prepared::from(G1::rand(&mut rng)), + G2Prepared::from(G2::rand(&mut rng)), ) }) - .map(|(p, q)| $curve::miller_loop(&[(p, q)])) + .map(|(p, q)| $curve::multi_miller_loop([p], [q])) .collect(); let mut count = 0; b.iter(|| { - let tmp = $curve::final_exponentiation(&v[count]); + let tmp = $curve::final_exponentiation(v[count]); count = (count + 1) % SAMPLES; tmp }); } fn full_pairing(b: &mut $crate::bencher::Bencher) { + use ark_ec::pairing::Pairing; const SAMPLES: usize = 1000; let mut rng = ark_std::test_rng(); - let v: Vec<(G1, G2)> = (0..SAMPLES) + let (v1, v2): (Vec, Vec) = (0..SAMPLES) .map(|_| (G1::rand(&mut rng), G2::rand(&mut rng))) - .collect(); + .unzip(); let mut count = 0; b.iter(|| { - let tmp = $curve::pairing(v[count].0, v[count].1); + let tmp = $curve::pairing(v1[count], v2[count]); count = (count + 1) % SAMPLES; tmp }); diff --git a/ec/Cargo.toml b/ec/Cargo.toml index 6649359d4..95de26cc0 100644 --- a/ec/Cargo.toml +++ b/ec/Cargo.toml @@ -23,6 +23,7 @@ num-traits = { version = "0.2", default-features = false } rayon = { version = "1", optional = true } zeroize = { version = "1", default-features = false, features = ["zeroize_derive"] } hashbrown = "0.12.1" +itertools = { version = "0.10", default-features = false } [dev-dependencies] ark-test-curves = { version = "^0.3.0", path = "../test-curves", default-features = false, features = ["bls12_381_curve"] } diff --git a/ec/README.md b/ec/README.md index 921fd0368..9ba280640 100644 --- a/ec/README.md +++ b/ec/README.md @@ -11,13 +11,13 @@ Implementations of particular curves using these curve models can be found in [` The available elliptic curve traits are: -* [`AffineCurve`](https://github.com/arkworks-rs/algebra/blob/master/ec/src/lib.rs#L223) - Interface for elliptic curve points in the 'canonical form' for serialization. -* [`ProjectiveCurve`](https://github.com/arkworks-rs/algebra/blob/master/ec/src/lib.rs#L118) - Interface for elliptic curve points in a representation that is more efficient for most computation. -* [`PairingEngine`](https://github.com/arkworks-rs/algebra/blob/master/ec/src/lib.rs#L41) - Pairing friendly elliptic curves (Contains the pairing function, and acts as a wrapper type on G1, G2, GT, and the relevant fields). +* [`AffineRepr`](https://github.com/arkworks-rs/algebra/blob/master/ec/src/lib.rs#L223) - Interface for elliptic curve points in the 'canonical form' for serialization. +* [`CurveGroup`](https://github.com/arkworks-rs/algebra/blob/master/ec/src/lib.rs#L118) - Interface for elliptic curve points in a representation that is more efficient for most computation. +* [`Pairing`](https://github.com/arkworks-rs/algebra/blob/master/ec/src/lib.rs#L41) - Pairing friendly elliptic curves (Contains the pairing function, and acts as a wrapper type on G1, G2, GT, and the relevant fields). * [`CurveCycle`](https://github.com/arkworks-rs/algebra/blob/master/ec/src/lib.rs#L319) - Trait representing a cycle of elliptic curves. * [`PairingFriendlyCycle`](https://github.com/arkworks-rs/algebra/blob/master/ec/src/lib.rs#L331) - Trait representing a cycle of pairing friendly elliptic curves. The elliptic curve models implemented are: -* [*Short Weierstrass*](https://github.com/arkworks-rs/algebra/blob/master/ec/src/models/short_weierstrass.rs) curves. The `AffineCurve` in this case is in typical Short Weierstrass point representation, and the `ProjectiveCurve` is using points in [Jacobian Coordinates](https://en.wikibooks.org/wiki/Cryptography/Prime_Curve/Jacobian_Coordinates). -* [*Twisted Edwards*](https://github.com/arkworks-rs/algebra/blob/master/ec/src/models/twisted_edwards.rs) curves. The `AffineCurve` in this case is in standard Twisted Edwards curve representation, whereas the `ProjectiveCurve` uses points in [Extended Twisted Edwards Coordinates](https://eprint.iacr.org/2008/522.pdf). +* [*Short Weierstrass*](https://github.com/arkworks-rs/algebra/blob/master/ec/src/models/short_weierstrass.rs) curves. The `AffineRepr` in this case is in typical Short Weierstrass point representation, and the `CurveGroup` is using points in [Jacobian Coordinates](https://en.wikibooks.org/wiki/Cryptography/Prime_Curve/Jacobian_Coordinates). +* [*Twisted Edwards*](https://github.com/arkworks-rs/algebra/blob/master/ec/src/models/twisted_edwards.rs) curves. The `AffineRepr` in this case is in standard Twisted Edwards curve representation, whereas the `CurveGroup` uses points in [Extended Twisted Edwards Coordinates](https://eprint.iacr.org/2008/522.pdf). diff --git a/ec/src/hashing/curve_maps/swu/mod.rs b/ec/src/hashing/curve_maps/swu/mod.rs index 250321cb6..c3a100b15 100644 --- a/ec/src/hashing/curve_maps/swu/mod.rs +++ b/ec/src/hashing/curve_maps/swu/mod.rs @@ -5,7 +5,7 @@ use core::marker::PhantomData; use crate::{ hashing::{map_to_curve_hasher::MapToCurve, HashToCurveError}, - models::short_weierstrass::Affine, + models::short_weierstrass::{Affine, Projective}, }; /// Trait defining the necessary parameters for the SWU hash-to-curve method @@ -36,7 +36,7 @@ pub fn parity(element: &F) -> bool { .map_or(false, |x| x.into_bigint().is_odd()) } -impl MapToCurve> for SWUMap

{ +impl MapToCurve> for SWUMap

{ /// Constructs a new map if `P` represents a valid map. fn new() -> Result { // Verifying that ZETA is a non-square @@ -155,9 +155,10 @@ impl MapToCurve> for SWUMap

{ #[cfg(test)] mod test { - use crate::hashing::map_to_curve_hasher::MapToCurveBasedHasher; - use crate::hashing::HashToCurve; - use crate::CurveConfig; + use crate::{ + hashing::{map_to_curve_hasher::MapToCurveBasedHasher, HashToCurve}, + CurveConfig, + }; use ark_ff::field_hashers::DefaultFieldHasher; use ark_std::vec::Vec; @@ -240,7 +241,7 @@ mod test { #[test] fn hash_arbitary_string_to_curve_swu() { let test_swu_to_curve_hasher = MapToCurveBasedHasher::< - Affine, + Projective, DefaultFieldHasher, SWUMap, >::new(&[1]) diff --git a/ec/src/hashing/curve_maps/wb/mod.rs b/ec/src/hashing/curve_maps/wb/mod.rs index c6be86fde..f737f57b9 100644 --- a/ec/src/hashing/curve_maps/wb/mod.rs +++ b/ec/src/hashing/curve_maps/wb/mod.rs @@ -6,8 +6,8 @@ use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial}; use crate::{ hashing::{map_to_curve_hasher::MapToCurve, HashToCurveError}, - models::short_weierstrass::Affine, - AffineCurve, + models::short_weierstrass::{Affine, Projective}, + AffineRepr, }; use super::swu::{SWUMap, SWUParams}; @@ -33,21 +33,22 @@ pub trait WBParams: SWCurveConfig + Sized { fn isogeny_map( domain_point: Affine, ) -> Result, HashToCurveError> { - let x_num = DensePolynomial::from_coefficients_slice(Self::PHI_X_NOM); - let x_den = DensePolynomial::from_coefficients_slice(Self::PHI_X_DEN); - - let y_num = DensePolynomial::from_coefficients_slice(Self::PHI_Y_NOM); - let y_den = DensePolynomial::from_coefficients_slice(Self::PHI_Y_DEN); - - let mut v: [BaseField; 2] = [ - x_den.evaluate(&domain_point.x), - y_den.evaluate(&domain_point.x), - ]; - batch_inversion(&mut v); - let img_x = x_num.evaluate(&domain_point.x) * v[0]; - let img_y = (y_num.evaluate(&domain_point.x) * domain_point.y) * v[1]; - - Ok(Affine::new_unchecked(img_x, img_y)) + match domain_point.xy() { + Some((x, y)) => { + let x_num = DensePolynomial::from_coefficients_slice(Self::PHI_X_NOM); + let x_den = DensePolynomial::from_coefficients_slice(Self::PHI_X_DEN); + + let y_num = DensePolynomial::from_coefficients_slice(Self::PHI_Y_NOM); + let y_den = DensePolynomial::from_coefficients_slice(Self::PHI_Y_DEN); + + let mut v: [BaseField; 2] = [x_den.evaluate(x), y_den.evaluate(x)]; + batch_inversion(&mut v); + let img_x = x_num.evaluate(x) * v[0]; + let img_y = (y_num.evaluate(x) * y) * v[1]; + Ok(Affine::new_unchecked(img_x, img_y)) + }, + None => Ok(Affine::identity()), + } } } @@ -56,7 +57,7 @@ pub struct WBMap { curve_params: PhantomData P>, } -impl MapToCurve> for WBMap

{ +impl MapToCurve> for WBMap

{ /// Constructs a new map if `P` represents a valid map. fn new() -> Result { match P::isogeny_map(P::IsogenousCurve::GENERATOR) { @@ -79,7 +80,7 @@ impl MapToCurve> for WBMap

{ /// fn map_to_curve( &self, - element: as AffineCurve>::BaseField, + element: as AffineRepr>::BaseField, ) -> Result, HashToCurveError> { // first we need to map the field point to the isogenous curve let point_on_isogenious_curve = self.swu_field_curve_hasher.map_to_curve(element).unwrap(); @@ -89,7 +90,6 @@ impl MapToCurve> for WBMap

{ #[cfg(test)] mod test { - use crate::hashing::HashToCurve; use crate::{ hashing::{ curve_maps::{ @@ -97,13 +97,13 @@ mod test { wb::{WBMap, WBParams}, }, map_to_curve_hasher::MapToCurveBasedHasher, + HashToCurve, }, models::short_weierstrass::SWCurveConfig, - short_weierstrass::Affine, + short_weierstrass::{Affine, Projective}, CurveConfig, }; - use ark_ff::field_hashers::DefaultFieldHasher; - use ark_ff::{fields::Fp64, MontBackend, MontFp}; + use ark_ff::{field_hashers::DefaultFieldHasher, fields::Fp64, MontBackend, MontFp}; #[derive(ark_ff::MontConfig)] #[modulus = "127"] @@ -282,7 +282,7 @@ mod test { fn hash_arbitrary_string_to_curve_wb() { use sha2::Sha256; let test_wb_to_curve_hasher = MapToCurveBasedHasher::< - Affine, + Projective, DefaultFieldHasher, WBMap, >::new(&[1]) diff --git a/ec/src/hashing/map_to_curve_hasher.rs b/ec/src/hashing/map_to_curve_hasher.rs index 1afd17078..59fe4bbf5 100644 --- a/ec/src/hashing/map_to_curve_hasher.rs +++ b/ec/src/hashing/map_to_curve_hasher.rs @@ -1,15 +1,14 @@ -use crate::{hashing::*, AffineCurve}; +use crate::{hashing::*, AffineRepr, CurveGroup}; use ark_ff::field_hashers::HashToField; use ark_std::marker::PhantomData; /// Trait for mapping a random field element to a random curve point. -pub trait MapToCurve { +pub trait MapToCurve: Sized { /// Constructs a new mapping. - fn new() -> Result - where - Self: Sized; + fn new() -> Result; + /// Map an arbitary field element to a corresponding curve point. - fn map_to_curve(&self, point: T::BaseField) -> Result; + fn map_to_curve(&self, point: T::BaseField) -> Result; } /// Helper struct that can be used to construct elements on the elliptic curve @@ -17,7 +16,7 @@ pub trait MapToCurve { /// and then mapping it to the elliptic curve defined over that field. pub struct MapToCurveBasedHasher where - T: AffineCurve, + T: CurveGroup, H2F: HashToField, M2C: MapToCurve, { @@ -28,7 +27,7 @@ where impl HashToCurve for MapToCurveBasedHasher where - T: AffineCurve, + T: CurveGroup, H2F: HashToField, M2C: MapToCurve, { @@ -47,7 +46,7 @@ where // traits. This uses the IETF hash to curve's specification for Random // oracle encoding (hash_to_curve) defined by combining these components. // See https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-09#section-3 - fn hash(&self, msg: &[u8]) -> Result { + fn hash(&self, msg: &[u8]) -> Result { // IETF spec of hash_to_curve, from hash_to_field and map_to_curve // sub-components // 1. u = hash_to_field(msg, 2) @@ -62,7 +61,7 @@ where let rand_curve_elem_0 = self.curve_mapper.map_to_curve(rand_field_elems[0])?; let rand_curve_elem_1 = self.curve_mapper.map_to_curve(rand_field_elems[1])?; - let rand_curve_elem = rand_curve_elem_0 + rand_curve_elem_1; + let rand_curve_elem = (rand_curve_elem_0 + rand_curve_elem_1).into(); let rand_subgroup_elem = rand_curve_elem.clear_cofactor(); Ok(rand_subgroup_elem) diff --git a/ec/src/hashing/mod.rs b/ec/src/hashing/mod.rs index 23901ea39..e4034b44b 100644 --- a/ec/src/hashing/mod.rs +++ b/ec/src/hashing/mod.rs @@ -1,4 +1,4 @@ -use crate::AffineCurve; +use crate::CurveGroup; use ark_std::string::String; use core::fmt; @@ -6,14 +6,14 @@ pub mod curve_maps; pub mod map_to_curve_hasher; /// Trait for hashing arbitrary data to a group element on an elliptic curve -pub trait HashToCurve: Sized { +pub trait HashToCurve: Sized { /// Create a new hash to curve instance, with a given domain. fn new(domain: &[u8]) -> Result; /// Produce a hash of the message, which also depends on the domain. /// The output of the hash is a curve point in the prime order subgroup /// of the given elliptic curve. - fn hash(&self, message: &[u8]) -> Result; + fn hash(&self, message: &[u8]) -> Result; } /// This is an error that could occur during the hash to curve process diff --git a/ec/src/hashing/tests/suites.rs b/ec/src/hashing/tests/suites.rs index ff2b79bca..75108ce20 100644 --- a/ec/src/hashing/tests/suites.rs +++ b/ec/src/hashing/tests/suites.rs @@ -1,5 +1,7 @@ -use std::fs::{read_dir, File}; -use std::io::BufReader; +use std::{ + fs::{read_dir, File}, + io::BufReader, +}; use super::json::SuiteVector; use ark_ff::field_hashers::{DefaultFieldHasher, HashToField}; @@ -7,7 +9,7 @@ use libtest_mimic::{run_tests, Arguments, Outcome, Test}; use ark_test_curves::{ hashing::{curve_maps::wb::WBMap, map_to_curve_hasher::MapToCurveBasedHasher, HashToCurve}, - short_weierstrass::Affine, + short_weierstrass::{Affine, Projective}, }; use ark_ff::{Field, PrimeField}; @@ -42,13 +44,13 @@ fn run_test_w(Test { data, .. }: &Test) -> Outcome { let hasher; let m; let g1_mapper = MapToCurveBasedHasher::< - Affine, + Projective, DefaultFieldHasher, WBMap, >::new(dst) .unwrap(); let g2_mapper = MapToCurveBasedHasher::< - Affine, + Projective, DefaultFieldHasher, WBMap, >::new(dst) diff --git a/ec/src/lib.rs b/ec/src/lib.rs index 79b07b870..234ee4abc 100644 --- a/ec/src/lib.rs +++ b/ec/src/lib.rs @@ -30,106 +30,23 @@ use ark_std::{ ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}, vec::Vec, }; -use msm::VariableBaseMSM; use num_traits::Zero; +pub use scalar_mul::{variable_base::VariableBaseMSM, ScalarMul}; use zeroize::Zeroize; pub mod models; pub use self::models::*; -pub mod glv; - -pub mod msm; +pub mod scalar_mul; /// Provides a `HashToCurve` trait and implementations of this trait via /// different hashing strategies. pub mod hashing; -pub mod wnaf; - -/// Collection of types (mainly fields and curves) that together describe -/// how to compute a pairing over a pairing-friendly curve. -pub trait PairingEngine: Sized + 'static + Copy + Debug + Sync + Send + Eq + PartialEq { - /// This is the scalar field of the G1/G2 groups. - type Fr: PrimeField; - - /// The projective representation of an element in G1. - type G1Projective: ProjectiveCurve - + From - + Into - // needed due to https://github.com/rust-lang/rust/issues/69640 - + MulAssign - + VariableBaseMSM; - - /// The affine representation of an element in G1. - type G1Affine: AffineCurve - + From - + Into - + Into; - - /// A G1 element that has been preprocessed for use in a pairing. - type G1Prepared: Default + Clone + Send + Sync + Debug + From; - - /// The projective representation of an element in G2. - type G2Projective: ProjectiveCurve - + From - + Into - // needed due to https://github.com/rust-lang/rust/issues/69640 - + MulAssign - + VariableBaseMSM; - - /// The affine representation of an element in G2. - type G2Affine: AffineCurve - + From - + Into - + Into; - - /// A G2 element that has been preprocessed for use in a pairing. - type G2Prepared: Default + Clone + Send + Sync + Debug + From; - - /// The base field that hosts G1. - type Fq: PrimeField; - - /// The extension field that hosts G2. - type Fqe: Field; - - /// The extension field that hosts the target group of the pairing. - type Fqk: Field; - - /// Computes the product of miller loops for some number of (G1, G2) pairs. - #[must_use] - fn miller_loop<'a, I>(i: I) -> Self::Fqk - where - I: IntoIterator; - - /// Performs final exponentiation of the result of a miller loop. - #[must_use] - fn final_exponentiation(_: &Self::Fqk) -> Option; - - /// Computes a product of pairings. - #[must_use] - fn product_of_pairings<'a, I>(i: I) -> Self::Fqk - where - I: IntoIterator, - { - Self::final_exponentiation(&Self::miller_loop(i)).unwrap() - } - /// Performs multiple pairing operations - #[must_use] - fn pairing(p: G1, q: G2) -> Self::Fqk - where - G1: Into, - G2: Into, - { - let g1_prep = Self::G1Prepared::from(p.into()); - let g2_prep = Self::G2Prepared::from(q.into()); - Self::product_of_pairings(core::iter::once(&(g1_prep, g2_prep))) - } -} +pub mod pairing; -/// Projective representation of an elliptic curve point guaranteed to be -/// in the correct prime order subgroup. -pub trait ProjectiveCurve: +/// Represents (elements of) a group of prime order `r`. +pub trait Group: Eq + 'static + Sized @@ -149,53 +66,27 @@ pub trait ProjectiveCurve: + Neg + Add + Sub - + Mul<::ScalarField, Output = Self> + + Mul<::ScalarField, Output = Self> + AddAssign + SubAssign - + MulAssign<::ScalarField> + + MulAssign<::ScalarField> + for<'a> Add<&'a Self, Output = Self> + for<'a> Sub<&'a Self, Output = Self> - + for<'a> Mul<&'a ::ScalarField, Output = Self> + + for<'a> Mul<&'a ::ScalarField, Output = Self> + for<'a> AddAssign<&'a Self> + for<'a> SubAssign<&'a Self> - + for<'a> MulAssign<&'a ::ScalarField> + + for<'a> MulAssign<&'a ::ScalarField> + core::iter::Sum + for<'a> core::iter::Sum<&'a Self> - + From<::Affine> { - type Config: CurveConfig; + /// The scalar field `F_r`, where `r` is the order of this group. type ScalarField: PrimeField; - type BaseField: Field; - type Affine: AffineCurve< - Config = Self::Config, - Projective = Self, - ScalarField = Self::ScalarField, - BaseField = Self::BaseField, - > + From - + Into; - - /// Returns a fixed generator of unknown exponent. - #[must_use] - fn prime_subgroup_generator() -> Self; - - /// Normalizes a slice of projective elements so that - /// conversion to affine is cheap. - fn batch_normalization(v: &mut [Self]); - - /// Normalizes a slice of projective elements and outputs a vector - /// containing the affine equivalents. - fn batch_normalization_into_affine(v: &[Self]) -> Vec { - let mut v = v.to_vec(); - Self::batch_normalization(&mut v); - v.into_iter().map(|v| v.into()).collect() - } - /// Checks if the point is already "normalized" so that - /// cheap affine conversion is possible. + /// Returns a fixed generator of this group. #[must_use] - fn is_normalized(&self) -> bool; + fn generator() -> Self; - /// Doubles this element. + /// Doubles `self`. #[must_use] fn double(&self) -> Self { let mut copy = *self; @@ -203,32 +94,75 @@ pub trait ProjectiveCurve: copy } - /// Doubles this element in place. + /// Double `self` in place. fn double_in_place(&mut self) -> &mut Self; - /// Converts self into the affine representation. - fn into_affine(self) -> Self::Affine { - self.into() + /// Performs scalar multiplication of this element. + fn mul_bigint(&self, other: impl AsRef<[u64]>) -> Self; + + /// Computes `other * self`, where `other` is a *big-endian* + /// bit representation of some integer. + fn mul_bits_be(&self, other: impl Iterator) -> Self { + let mut res = Self::zero(); + for b in other.skip_while(|b| !b) { + // skip leading zeros + res.double_in_place(); + if b { + res += self; + } + } + res } +} - /// Sets `self` to be `self + other`, where `other: Self::Affine`. - /// This is usually faster than adding `other` in projective form. - fn add_mixed(mut self, other: &Self::Affine) -> Self { - self.add_assign_mixed(other); - self - } +/// An opaque representation of an elliptic curve group element that is suitable +/// for efficient group arithmetic. +/// +/// The point is guaranteed to be in the correct prime order subgroup. +pub trait CurveGroup: + Group + + Add + + AddAssign + // + for<'a> Add<&'a Self::Affine, Output = Self> + // + for<'a> AddAssign<&'a Self::Affine> + + VariableBaseMSM + + ScalarMul + + From + + Into + + core::iter::Sum + + for<'a> core::iter::Sum<&'a Self::Affine> +{ + type Config: CurveConfig; + /// The field over which this curve is defined. + type BaseField: Field; + /// The affine representation of this element. + type Affine: AffineRepr< + Config = Self::Config, + Group = Self, + ScalarField = Self::ScalarField, + BaseField = Self::BaseField, + > + From + + Into; - /// Sets `self` to be `self + other`, where `other: Self::Affine`. - /// This is usually faster than adding `other` in projective form. - fn add_assign_mixed(&mut self, other: &Self::Affine); + /// Type representing an element of the full elliptic curve group, not just the + /// prime order subgroup. + type FullGroup; - /// Performs scalar multiplication of this element. - fn mul_bigint>(self, other: S) -> Self; + /// Normalizes a slice of group elements into affine. + fn normalize_batch(v: &[Self]) -> Vec; + + /// Converts `self` into the affine representation. + fn into_affine(self) -> Self::Affine { + self.into() + } } -/// Affine representation of an elliptic curve point guaranteed to be -/// in the correct prime order subgroup. -pub trait AffineCurve: +/// The canonical representation of an elliptic curve group element. +/// This should represent the affine coordinates of the point corresponding +/// to this group element. +/// +/// The point is guaranteed to be in the correct prime order subgroup. +pub trait AffineRepr: Eq + 'static + Sized @@ -243,26 +177,23 @@ pub trait AffineCurve: + Hash + Debug + Display - + Zero - + Neg + Zeroize - + core::iter::Sum - + Mul - + for<'a> Mul<&'a Self::ScalarField, Output = Self::Projective> - + for<'a> core::iter::Sum<&'a Self> - + From<::Projective> + + From<::Group> + + Into<::Group> + + Add + + for<'a> Add<&'a Self, Output = Self::Group> + + Add + + for<'a> Add<&'a Self::Group, Output = Self::Group> + + Mul + + for<'a> Mul<&'a Self::ScalarField, Output = Self::Group> { type Config: CurveConfig; - - /// The group defined by this curve has order `h * r` where `r` is a large - /// prime. `Self::ScalarField` is the prime field defined by `r` type ScalarField: PrimeField + Into<::BigInt>; - /// The finite field over which this curve is defined. type BaseField: Field; /// The projective representation of points on this curve. - type Projective: ProjectiveCurve< + type Group: CurveGroup< Config = Self::Config, Affine = Self, ScalarField = Self::ScalarField, @@ -271,15 +202,33 @@ pub trait AffineCurve: + Into + MulAssign; // needed due to https://github.com/rust-lang/rust/issues/69640 - /// Returns the x and y coordinates of this affine point + /// Returns the x and y coordinates of this affine point. fn xy(&self) -> Option<(&Self::BaseField, &Self::BaseField)>; + /// Returns the x coordinate of this affine point. + fn x(&self) -> Option<&Self::BaseField> { + self.xy().map(|(x, _)| x) + } + + /// Returns the y coordinate of this affine point. + fn y(&self) -> Option<&Self::BaseField> { + self.xy().map(|(_, y)| y) + } + + /// Returns the point at infinity. + fn identity() -> Self; + + /// Is `self` the point at infinity? + fn is_identity(&self) -> bool { + self.xy().is_none() + } + /// Returns a fixed generator of unknown exponent. #[must_use] - fn prime_subgroup_generator() -> Self; + fn generator() -> Self; /// Converts self into the projective representation. - fn into_projective(self) -> Self::Projective { + fn into_group(self) -> Self::Group { self.into() } @@ -290,7 +239,7 @@ pub trait AffineCurve: /// Performs scalar multiplication of this element with mixed addition. #[must_use] - fn mul_bigint>(&self, by: S) -> Self::Projective; + fn mul_bigint(&self, by: impl AsRef<[u64]>) -> Self::Group; /// Performs cofactor clearing. /// The default method is simply to multiply by the cofactor. @@ -301,12 +250,12 @@ pub trait AffineCurve: /// Multiplies this element by the cofactor and output the /// resulting projective element. #[must_use] - fn mul_by_cofactor_to_projective(&self) -> Self::Projective; + fn mul_by_cofactor_to_group(&self) -> Self::Group; /// Multiplies this element by the cofactor. #[must_use] fn mul_by_cofactor(&self) -> Self { - self.mul_by_cofactor_to_projective().into() + self.mul_by_cofactor_to_group().into() } /// Multiplies this element by the inverse of the cofactor in @@ -318,46 +267,32 @@ pub trait AffineCurve: } } -/// Preprocesses a G1 element for use in a pairing. -pub fn prepare_g1(g: impl Into) -> E::G1Prepared { - let g: E::G1Affine = g.into(); - E::G1Prepared::from(g) -} - -/// Preprocesses a G2 element for use in a pairing. -pub fn prepare_g2(g: impl Into) -> E::G2Prepared { - let g: E::G2Affine = g.into(); - E::G2Prepared::from(g) -} - /// Wrapper trait representing a cycle of elliptic curves (E1, E2) such that /// the base field of E1 is the scalar field of E2, and the scalar field of E1 /// is the base field of E2. pub trait CurveCycle where - ::Projective: MulAssign<::BaseField>, - ::Projective: MulAssign<::BaseField>, + Self::E1: MulAssign<::BaseField>, + Self::E2: MulAssign<::BaseField>, { - type E1: AffineCurve< - BaseField = ::ScalarField, - ScalarField = ::BaseField, + type E1: CurveGroup< + BaseField = ::ScalarField, + ScalarField = ::BaseField, >; - type E2: AffineCurve; + type E2: CurveGroup; } /// A cycle of curves where both curves are pairing-friendly. pub trait PairingFriendlyCycle: CurveCycle { - type Engine1: PairingEngine< - G1Affine = Self::E1, - G1Projective = ::Projective, - Fq = ::BaseField, - Fr = ::ScalarField, + type Engine1: pairing::Pairing< + G1 = Self::E1, + G1Affine = ::Affine, + ScalarField = ::ScalarField, >; - type Engine2: PairingEngine< - G1Affine = Self::E2, - G1Projective = ::Projective, - Fq = ::BaseField, - Fr = ::ScalarField, + type Engine2: pairing::Pairing< + G1 = Self::E2, + G1Affine = ::Affine, + ScalarField = ::ScalarField, >; } diff --git a/ec/src/models/bls12/g1.rs b/ec/src/models/bls12/g1.rs index 8375ca173..f66046dc9 100644 --- a/ec/src/models/bls12/g1.rs +++ b/ec/src/models/bls12/g1.rs @@ -1,9 +1,8 @@ use crate::{ bls12::Bls12Parameters, short_weierstrass::{Affine, Projective}, - AffineCurve, + AffineRepr, CurveGroup, }; -use num_traits::Zero; pub type G1Affine

= Affine<

::G1Parameters>; pub type G1Projective

= Projective<

::G1Parameters>; @@ -23,14 +22,32 @@ impl From> for G1Prepared

{ } } +impl From> for G1Prepared

{ + fn from(q: G1Projective

) -> Self { + q.into_affine().into() + } +} + +impl<'a, P: Bls12Parameters> From<&'a G1Affine

> for G1Prepared

{ + fn from(other: &'a G1Affine

) -> Self { + G1Prepared(*other) + } +} + +impl<'a, P: Bls12Parameters> From<&'a G1Projective

> for G1Prepared

{ + fn from(q: &'a G1Projective

) -> Self { + q.into_affine().into() + } +} + impl G1Prepared

{ pub fn is_zero(&self) -> bool { - self.0.is_zero() + self.0.is_identity() } } impl Default for G1Prepared

{ fn default() -> Self { - G1Prepared(G1Affine::

::prime_subgroup_generator()) + G1Prepared(G1Affine::

::generator()) } } diff --git a/ec/src/models/bls12/g2.rs b/ec/src/models/bls12/g2.rs index 336dfcfe6..1a76d996a 100644 --- a/ec/src/models/bls12/g2.rs +++ b/ec/src/models/bls12/g2.rs @@ -1,14 +1,11 @@ -use ark_ff::{ - fields::{BitIteratorBE, Field, Fp2}, - Zero, -}; +use ark_ff::fields::{BitIteratorBE, Field, Fp2}; use ark_std::{vec::Vec, One}; use crate::{ bls12::{Bls12Parameters, TwistType}, models::short_weierstrass::SWCurveConfig, short_weierstrass::{Affine, Projective}, - AffineCurve, + AffineRepr, CurveGroup, }; pub type G2Affine

= Affine<

::G2Parameters>; @@ -24,11 +21,15 @@ pub type G2Projective

= Projective<

::G2Parameters>; pub struct G2Prepared { // Stores the coefficients of the line evaluations as calculated in // https://eprint.iacr.org/2013/722.pdf - pub ell_coeffs: Vec>>, + pub ell_coeffs: Vec>, pub infinity: bool, } -pub(crate) type EllCoeff = (F, F, F); +pub(crate) type EllCoeff

= ( + Fp2<

::Fp2Config>, + Fp2<

::Fp2Config>, + Fp2<

::Fp2Config>, +); #[derive(Derivative)] #[derivative( @@ -44,40 +45,56 @@ struct G2HomProjective { impl Default for G2Prepared

{ fn default() -> Self { - Self::from(G2Affine::

::prime_subgroup_generator()) + Self::from(G2Affine::

::generator()) } } impl From> for G2Prepared

{ fn from(q: G2Affine

) -> Self { let two_inv = P::Fp::one().double().inverse().unwrap(); - match q.is_zero() { - true => G2Prepared { - ell_coeffs: vec![], - infinity: true, - }, - false => { - let mut ell_coeffs = vec![]; - let mut r = G2HomProjective { - x: q.x, - y: q.y, - z: Fp2::one(), - }; - - for i in BitIteratorBE::new(P::X).skip(1) { - ell_coeffs.push(doubling_step::

(&mut r, &two_inv)); - - if i { - ell_coeffs.push(addition_step::

(&mut r, &q)); - } + let zero = G2Prepared { + ell_coeffs: vec![], + infinity: true, + }; + q.xy().map_or(zero, |(&q_x, &q_y)| { + let mut ell_coeffs = vec![]; + let mut r = G2HomProjective::

{ + x: q_x, + y: q_y, + z: Fp2::one(), + }; + + for i in BitIteratorBE::new(P::X).skip(1) { + ell_coeffs.push(r.double_in_place(&two_inv)); + + if i { + ell_coeffs.push(r.add_in_place(&q)); } + } - Self { - ell_coeffs, - infinity: false, - } - }, - } + Self { + ell_coeffs, + infinity: false, + } + }) + } +} + +impl From> for G2Prepared

{ + fn from(q: G2Projective

) -> Self { + q.into_affine().into() + } +} + +impl<'a, P: Bls12Parameters> From<&'a G2Affine

> for G2Prepared

{ + fn from(other: &'a G2Affine

) -> Self { + (*other).into() + } +} + +impl<'a, P: Bls12Parameters> From<&'a G2Projective

> for G2Prepared

{ + fn from(q: &'a G2Projective

) -> Self { + q.into_affine().into() } } @@ -87,57 +104,53 @@ impl G2Prepared

{ } } -fn doubling_step( - r: &mut G2HomProjective, - two_inv: &B::Fp, -) -> EllCoeff> { - // Formula for line function when working with - // homogeneous projective coordinates. - - let mut a = r.x * &r.y; - a.mul_assign_by_fp(two_inv); - let b = r.y.square(); - let c = r.z.square(); - let e = B::G2Parameters::COEFF_B * &(c.double() + &c); - let f = e.double() + &e; - let mut g = b + &f; - g.mul_assign_by_fp(two_inv); - let h = (r.y + &r.z).square() - &(b + &c); - let i = e - &b; - let j = r.x.square(); - let e_square = e.square(); - - r.x = a * &(b - &f); - r.y = g.square() - &(e_square.double() + &e_square); - r.z = b * &h; - match B::TWIST_TYPE { - TwistType::M => (i, j.double() + &j, -h), - TwistType::D => (-h, j.double() + &j, i), +impl G2HomProjective

{ + fn double_in_place(&mut self, two_inv: &P::Fp) -> EllCoeff

{ + // Formula for line function when working with + // homogeneous projective coordinates. + + let mut a = self.x * &self.y; + a.mul_assign_by_fp(two_inv); + let b = self.y.square(); + let c = self.z.square(); + let e = P::G2Parameters::COEFF_B * &(c.double() + &c); + let f = e.double() + &e; + let mut g = b + &f; + g.mul_assign_by_fp(two_inv); + let h = (self.y + &self.z).square() - &(b + &c); + let i = e - &b; + let j = self.x.square(); + let e_square = e.square(); + + self.x = a * &(b - &f); + self.y = g.square() - &(e_square.double() + &e_square); + self.z = b * &h; + match P::TWIST_TYPE { + TwistType::M => (i, j.double() + &j, -h), + TwistType::D => (-h, j.double() + &j, i), + } } -} -fn addition_step( - r: &mut G2HomProjective, - q: &G2Affine, -) -> EllCoeff> { - let (&qx, &qy) = q.xy().unwrap(); - // Formula for line function when working with - // homogeneous projective coordinates. - let theta = r.y - &(qy * &r.z); - let lambda = r.x - &(qx * &r.z); - let c = theta.square(); - let d = lambda.square(); - let e = lambda * &d; - let f = r.z * &c; - let g = r.x * &d; - let h = e + &f - &g.double(); - r.x = lambda * &h; - r.y = theta * &(g - &h) - &(e * &r.y); - r.z *= &e; - let j = theta * &qx - &(lambda * &qy); - - match B::TWIST_TYPE { - TwistType::M => (j, -theta, lambda), - TwistType::D => (lambda, -theta, j), + fn add_in_place(&mut self, q: &G2Affine

) -> EllCoeff

{ + let (&qx, &qy) = q.xy().unwrap(); + // Formula for line function when working with + // homogeneous projective coordinates. + let theta = self.y - &(qy * &self.z); + let lambda = self.x - &(qx * &self.z); + let c = theta.square(); + let d = lambda.square(); + let e = lambda * &d; + let f = self.z * &c; + let g = self.x * &d; + let h = e + &f - &g.double(); + self.x = lambda * &h; + self.y = theta * &(g - &h) - &(e * &self.y); + self.z *= &e; + let j = theta * &qx - &(lambda * &qy); + + match P::TWIST_TYPE { + TwistType::M => (j, -theta, lambda), + TwistType::D => (lambda, -theta, j), + } } } diff --git a/ec/src/models/bls12/mod.rs b/ec/src/models/bls12/mod.rs index af390f145..d14d28d87 100644 --- a/ec/src/models/bls12/mod.rs +++ b/ec/src/models/bls12/mod.rs @@ -1,6 +1,7 @@ use crate::{ models::{short_weierstrass::SWCurveConfig, CurveConfig}, - AffineCurve, PairingEngine, + pairing::{MillerLoopOutput, Pairing, PairingOutput}, + AffineRepr, }; use ark_ff::{ fields::{ @@ -11,15 +12,11 @@ use ark_ff::{ }, CyclotomicMultSubgroup, }; -use core::marker::PhantomData; +use ark_std::{marker::PhantomData, vec::Vec}; use num_traits::{One, Zero}; #[cfg(feature = "parallel")] -use ark_std::cfg_iter; -#[cfg(feature = "parallel")] -use core::slice::Iter; -#[cfg(feature = "parallel")] -use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator}; +use rayon::prelude::*; /// A particular BLS12 group can have G2 being either a multiplicative or a /// divisive twist. @@ -61,7 +58,7 @@ pub struct Bls12(PhantomData P>); impl Bls12

{ // Evaluate the line function at point p. - fn ell(f: &mut Fp12, coeffs: &g2::EllCoeff>, p: &G1Affine

) { + fn ell(f: &mut Fp12, coeffs: &g2::EllCoeff

, p: &G1Affine

) { let mut c0 = coeffs.0; let mut c1 = coeffs.1; let mut c2 = coeffs.2; @@ -85,118 +82,72 @@ impl Bls12

{ fn exp_by_x(f: &Fp12, result: &mut Fp12) { *result = f.cyclotomic_exp(P::X); if P::X_IS_NEGATIVE { - result.conjugate(); + result.cyclotomic_inverse_in_place(); } } } -impl PairingEngine for Bls12

{ - type Fr = ::ScalarField; - type G1Projective = G1Projective

; +impl Pairing for Bls12

{ + type ScalarField = ::ScalarField; + type G1 = G1Projective

; type G1Affine = G1Affine

; type G1Prepared = G1Prepared

; - type G2Projective = G2Projective

; + type G2 = G2Projective

; type G2Affine = G2Affine

; type G2Prepared = G2Prepared

; - type Fq = P::Fp; - type Fqe = Fp2; - type Fqk = Fp12; - - #[cfg(not(feature = "parallel"))] - fn miller_loop<'a, I>(i: I) -> Self::Fqk - where - I: IntoIterator, - { - let mut pairs = vec![]; - for (p, q) in i { - if !p.is_zero() && !q.is_zero() { - pairs.push((p, q.ell_coeffs.iter())); - } - } - let mut f = Self::Fqk::one(); - for i in BitIteratorBE::without_leading_zeros(P::X).skip(1) { - f.square_in_place(); - for (p, ref mut coeffs) in &mut pairs { - Self::ell(&mut f, coeffs.next().unwrap(), &p.0); - } - if i { - for &mut (p, ref mut coeffs) in &mut pairs { - Self::ell(&mut f, coeffs.next().unwrap(), &p.0); + type TargetField = Fp12; + + fn multi_miller_loop( + a: impl IntoIterator>, + b: impl IntoIterator>, + ) -> MillerLoopOutput { + use itertools::Itertools; + + let mut pairs = a + .into_iter() + .zip_eq(b) + .filter_map(|(p, q)| { + let (p, q) = (p.into(), q.into()); + match !p.is_zero() && !q.is_zero() { + true => Some((p, q.ell_coeffs.into_iter())), + false => None, } - } - } - if P::X_IS_NEGATIVE { - f.conjugate(); - } - f - } - - #[cfg(feature = "parallel")] - fn miller_loop<'a, I>(i: I) -> Self::Fqk - where - I: IntoIterator, - { - let mut pairs = vec![]; - for (p, q) in i { - if !p.is_zero() && !q.is_zero() { - pairs.push((p, q.ell_coeffs.iter())); - } - } - - let mut f_vec = vec![]; - for _ in 0..pairs.len() { - f_vec.push(Self::Fqk::one()); - } - - let a = |p: &&G1Prepared

, - coeffs: &Iter< - '_, - ( - Fp2<

::Fp2Config>, - Fp2<

::Fp2Config>, - Fp2<

::Fp2Config>, - ), - >, - mut f: Fp12<

::Fp12Config>| - -> Fp12<

::Fp12Config> { - let coeffs = coeffs.as_slice(); - let mut j = 0; - for i in BitIteratorBE::without_leading_zeros(P::X).skip(1) { - f.square_in_place(); - Self::ell(&mut f, &coeffs[j], &p.0); - j += 1; - if i { - Self::ell(&mut f, &coeffs[j], &p.0); - j += 1; + }) + .collect::>(); + + let mut f = cfg_chunks_mut!(pairs, 4) + .map(|pairs| { + let mut f = Self::TargetField::one(); + for i in BitIteratorBE::without_leading_zeros(P::X).skip(1) { + f.square_in_place(); + for (p, coeffs) in pairs.iter_mut() { + Self::ell(&mut f, &coeffs.next().unwrap(), &p.0); + } + if i { + for (p, coeffs) in pairs.iter_mut() { + Self::ell(&mut f, &coeffs.next().unwrap(), &p.0); + } + } } - } - f - }; - - let mut products = vec![]; - cfg_iter!(pairs) - .zip(f_vec) - .map(|(p, f)| a(&p.0, &p.1, f)) - .collect_into_vec(&mut products); + f + }) + .product::(); - let mut f = Self::Fqk::one(); - for ff in products { - f *= ff; - } if P::X_IS_NEGATIVE { - f.conjugate(); + f.cyclotomic_inverse_in_place(); } - f + MillerLoopOutput(f) } - fn final_exponentiation(f: &Self::Fqk) -> Option { + fn final_exponentiation(f: MillerLoopOutput) -> Option> { // Computing the final exponentation following // https://eprint.iacr.org/2020/875 // Adapted from the implementation in https://github.com/ConsenSys/gurvy/pull/29 - // f1 = r.conjugate() = f^(p^6) - let mut f1 = *f; - f1.conjugate(); + // f1 = r.cyclotomic_inverse_in_place() = f^(p^6) + let f = f.0; + let mut f1 = f; + f1.cyclotomic_inverse_in_place(); f.inverse().map(|mut f2| { // f2 = f^(-1); @@ -220,13 +171,13 @@ impl PairingEngine for Bls12

{ Self::exp_by_x(&r, &mut y1); // t[2].InverseUnitary(&result) let mut y2 = r; - y2.conjugate(); + y2.cyclotomic_inverse_in_place(); // t[1].Mul(&t[1], &t[2]) y1 *= &y2; // t[2].Expt(&t[1]) Self::exp_by_x(&y1, &mut y2); // t[1].InverseUnitary(&t[1]) - y1.conjugate(); + y1.cyclotomic_inverse_in_place(); // t[1].Mul(&t[1], &t[2]) y1 *= &y2; // t[2].Expt(&t[1]) @@ -245,14 +196,14 @@ impl PairingEngine for Bls12

{ y0 = y1; y0.frobenius_map(2); // t[1].InverseUnitary(&t[1]) - y1.conjugate(); + y1.cyclotomic_inverse_in_place(); // t[1].Mul(&t[1], &t[2]) y1 *= &y2; // t[1].Mul(&t[1], &t[0]) y1 *= &y0; // result.Mul(&result, &t[1]) r *= &y1; - r + PairingOutput(r) }) } } diff --git a/ec/src/models/bn/g1.rs b/ec/src/models/bn/g1.rs index e60f0d87d..051419acf 100644 --- a/ec/src/models/bn/g1.rs +++ b/ec/src/models/bn/g1.rs @@ -1,9 +1,8 @@ use crate::{ bn::BnParameters, short_weierstrass::{Affine, Projective}, - AffineCurve, + AffineRepr, CurveGroup, }; -use num_traits::Zero; pub type G1Affine

= Affine<

::G1Parameters>; pub type G1Projective

= Projective<

::G1Parameters>; @@ -23,14 +22,32 @@ impl From> for G1Prepared

{ } } +impl From> for G1Prepared

{ + fn from(q: G1Projective

) -> Self { + q.into_affine().into() + } +} + +impl<'a, P: BnParameters> From<&'a G1Affine

> for G1Prepared

{ + fn from(other: &'a G1Affine

) -> Self { + G1Prepared(*other) + } +} + +impl<'a, P: BnParameters> From<&'a G1Projective

> for G1Prepared

{ + fn from(q: &'a G1Projective

) -> Self { + q.into_affine().into() + } +} + impl G1Prepared

{ pub fn is_zero(&self) -> bool { - self.0.is_zero() + self.0.infinity } } impl Default for G1Prepared

{ fn default() -> Self { - G1Prepared(G1Affine::

::prime_subgroup_generator()) + G1Prepared(G1Affine::

::generator()) } } diff --git a/ec/src/models/bn/g2.rs b/ec/src/models/bn/g2.rs index 7c3a487fd..94d857a6a 100644 --- a/ec/src/models/bn/g2.rs +++ b/ec/src/models/bn/g2.rs @@ -2,13 +2,13 @@ use ark_std::vec::Vec; use ark_ff::fields::{Field, Fp2}; -use num_traits::{One, Zero}; +use num_traits::One; use crate::{ bn::{BnParameters, TwistType}, models::short_weierstrass::SWCurveConfig, short_weierstrass::{Affine, Projective}, - AffineCurve, + AffineRepr, CurveGroup, }; pub type G2Affine

= Affine<

::G2Parameters>; @@ -24,11 +24,15 @@ pub type G2Projective

= Projective<

::G2Parameters>; pub struct G2Prepared { // Stores the coefficients of the line evaluations as calculated in // https://eprint.iacr.org/2013/722.pdf - pub ell_coeffs: Vec>>, + pub ell_coeffs: Vec>, pub infinity: bool, } -pub(crate) type EllCoeff = (F, F, F); +pub(crate) type EllCoeff

= ( + Fp2<

::Fp2Config>, + Fp2<

::Fp2Config>, + Fp2<

::Fp2Config>, +); #[derive(Derivative)] #[derivative( @@ -42,67 +46,128 @@ struct G2HomProjective { z: Fp2, } +impl G2HomProjective

{ + fn double_in_place(&mut self, two_inv: &P::Fp) -> EllCoeff

{ + // Formula for line function when working with + // homogeneous projective coordinates. + + let mut a = self.x * &self.y; + a.mul_assign_by_fp(two_inv); + let b = self.y.square(); + let c = self.z.square(); + let e = P::G2Parameters::COEFF_B * &(c.double() + &c); + let f = e.double() + &e; + let mut g = b + &f; + g.mul_assign_by_fp(two_inv); + let h = (self.y + &self.z).square() - &(b + &c); + let i = e - &b; + let j = self.x.square(); + let e_square = e.square(); + + self.x = a * &(b - &f); + self.y = g.square() - &(e_square.double() + &e_square); + self.z = b * &h; + match P::TWIST_TYPE { + TwistType::M => (i, j.double() + &j, -h), + TwistType::D => (-h, j.double() + &j, i), + } + } + + fn add_in_place(&mut self, q: &G2Affine

) -> EllCoeff

{ + // Formula for line function when working with + // homogeneous projective coordinates. + let theta = self.y - &(q.y * &self.z); + let lambda = self.x - &(q.x * &self.z); + let c = theta.square(); + let d = lambda.square(); + let e = lambda * &d; + let f = self.z * &c; + let g = self.x * &d; + let h = e + &f - &g.double(); + self.x = lambda * &h; + self.y = theta * &(g - &h) - &(e * &self.y); + self.z *= &e; + let j = theta * &q.x - &(lambda * &q.y); + + match P::TWIST_TYPE { + TwistType::M => (j, -theta, lambda), + TwistType::D => (lambda, -theta, j), + } + } +} + impl Default for G2Prepared

{ fn default() -> Self { - Self::from(G2Affine::

::prime_subgroup_generator()) + Self::from(G2Affine::

::generator()) } } impl From> for G2Prepared

{ fn from(q: G2Affine

) -> Self { - let two_inv = P::Fp::one().double().inverse().unwrap(); - match q.is_zero() { - true => G2Prepared { + if q.infinity { + G2Prepared { ell_coeffs: vec![], infinity: true, - }, - false => { - let mut ell_coeffs = vec![]; - let mut r = G2HomProjective { - x: q.x, - y: q.y, - z: Fp2::one(), - }; - - let negq = -q; - - for i in (1..P::ATE_LOOP_COUNT.len()).rev() { - ell_coeffs.push(doubling_step::

(&mut r, &two_inv)); - - let bit = P::ATE_LOOP_COUNT[i - 1]; - - match bit { - 1 => { - ell_coeffs.push(addition_step::

(&mut r, &q)); - }, - -1 => { - ell_coeffs.push(addition_step::

(&mut r, &negq)); - }, - _ => continue, - } + } + } else { + let two_inv = P::Fp::one().double().inverse().unwrap(); + let mut ell_coeffs = vec![]; + let mut r = G2HomProjective::

{ + x: q.x, + y: q.y, + z: Fp2::one(), + }; + + let neg_q = -q; + + for bit in P::ATE_LOOP_COUNT.iter().rev().skip(1) { + ell_coeffs.push(r.double_in_place(&two_inv)); + + match bit { + 1 => ell_coeffs.push(r.add_in_place(&q)), + -1 => ell_coeffs.push(r.add_in_place(&neg_q)), + _ => continue, } + } - let q1 = mul_by_char::

(q); - let mut q2 = mul_by_char::

(q1); + let q1 = mul_by_char::

(q); + let mut q2 = mul_by_char::

(q1); - if P::X_IS_NEGATIVE { - r.y = -r.y; - } + if P::X_IS_NEGATIVE { + r.y = -r.y; + } - q2.y = -q2.y; + q2.y = -q2.y; - ell_coeffs.push(addition_step::

(&mut r, &q1)); - ell_coeffs.push(addition_step::

(&mut r, &q2)); + ell_coeffs.push(r.add_in_place(&q1)); + ell_coeffs.push(r.add_in_place(&q2)); - Self { - ell_coeffs, - infinity: false, - } - }, + Self { + ell_coeffs, + infinity: false, + } } } } +impl From> for G2Prepared

{ + fn from(q: G2Projective

) -> Self { + q.into_affine().into() + } +} + +impl<'a, P: BnParameters> From<&'a G2Affine

> for G2Prepared

{ + fn from(other: &'a G2Affine

) -> Self { + (*other).into() + } +} + +impl<'a, P: BnParameters> From<&'a G2Projective

> for G2Prepared

{ + fn from(q: &'a G2Projective

) -> Self { + q.into_affine().into() + } +} + impl G2Prepared

{ pub fn is_zero(&self) -> bool { self.infinity @@ -120,57 +185,3 @@ fn mul_by_char(r: G2Affine

) -> G2Affine

{ s } - -fn doubling_step( - r: &mut G2HomProjective, - two_inv: &B::Fp, -) -> EllCoeff> { - // Formula for line function when working with - // homogeneous projective coordinates. - - let mut a = r.x * &r.y; - a.mul_assign_by_fp(two_inv); - let b = r.y.square(); - let c = r.z.square(); - let e = B::G2Parameters::COEFF_B * &(c.double() + &c); - let f = e.double() + &e; - let mut g = b + &f; - g.mul_assign_by_fp(two_inv); - let h = (r.y + &r.z).square() - &(b + &c); - let i = e - &b; - let j = r.x.square(); - let e_square = e.square(); - - r.x = a * &(b - &f); - r.y = g.square() - &(e_square.double() + &e_square); - r.z = b * &h; - match B::TWIST_TYPE { - TwistType::M => (i, j.double() + &j, -h), - TwistType::D => (-h, j.double() + &j, i), - } -} - -fn addition_step( - r: &mut G2HomProjective, - q: &G2Affine, -) -> EllCoeff> { - // Formula for line function when working with - // homogeneous projective coordinates. - let theta = r.y - &(q.y * &r.z); - let lambda = r.x - &(q.x * &r.z); - let c = theta.square(); - let d = lambda.square(); - let e = lambda * &d; - let f = r.z * &c; - let g = r.x * &d; - let h = e + &f - &g.double(); - r.x = lambda * &h; - r.y = theta * &(g - &h) - &(e * &r.y); - r.z *= &e; - let j = theta * &q.x - &(lambda * &q.y); - - match B::TWIST_TYPE { - TwistType::M => (j, -theta, lambda), - TwistType::D => (lambda, -theta, j), - } -} diff --git a/ec/src/models/bn/mod.rs b/ec/src/models/bn/mod.rs index fca8d98f3..05c3a0f6a 100644 --- a/ec/src/models/bn/mod.rs +++ b/ec/src/models/bn/mod.rs @@ -1,6 +1,6 @@ use crate::{ models::{short_weierstrass::SWCurveConfig, CurveConfig}, - PairingEngine, + pairing::{MillerLoopOutput, Pairing, PairingOutput}, }; use ark_ff::{ fields::{ @@ -11,9 +11,12 @@ use ark_ff::{ }, CyclotomicMultSubgroup, }; +use ark_std::{marker::PhantomData, vec::Vec}; +use itertools::Itertools; use num_traits::One; -use core::marker::PhantomData; +#[cfg(feature = "parallel")] +use rayon::prelude::*; pub enum TwistType { M, @@ -59,7 +62,7 @@ pub struct Bn(PhantomData P>); impl Bn

{ /// Evaluates the line function at point p. - fn ell(f: &mut Fp12, coeffs: &g2::EllCoeff>, p: &G1Affine

) { + fn ell(f: &mut Fp12, coeffs: &g2::EllCoeff

, p: &G1Affine

) { let mut c0 = coeffs.0; let mut c1 = coeffs.1; let mut c2 = coeffs.2; @@ -81,86 +84,86 @@ impl Bn

{ fn exp_by_neg_x(mut f: Fp12) -> Fp12 { f = f.cyclotomic_exp(&P::X); if !P::X_IS_NEGATIVE { - f.conjugate(); + f.cyclotomic_inverse_in_place(); } f } } -impl PairingEngine for Bn

{ - type Fr = ::ScalarField; - type G1Projective = G1Projective

; +impl Pairing for Bn

{ + type ScalarField = ::ScalarField; + type G1 = G1Projective

; type G1Affine = G1Affine

; type G1Prepared = G1Prepared

; - type G2Projective = G2Projective

; + type G2 = G2Projective

; type G2Affine = G2Affine

; type G2Prepared = G2Prepared

; - type Fq = P::Fp; - type Fqe = Fp2; - type Fqk = Fp12; - - fn miller_loop<'a, I>(i: I) -> Self::Fqk - where - I: IntoIterator, - { - let mut pairs = vec![]; - for (p, q) in i { - if !p.is_zero() && !q.is_zero() { - pairs.push((p, q.ell_coeffs.iter())); - } - } - - let mut f = Self::Fqk::one(); - - for i in (1..P::ATE_LOOP_COUNT.len()).rev() { - if i != P::ATE_LOOP_COUNT.len() - 1 { - f.square_in_place(); - } - - for (p, ref mut coeffs) in &mut pairs { - Self::ell(&mut f, coeffs.next().unwrap(), &p.0); - } + type TargetField = Fp12; + + fn multi_miller_loop( + a: impl IntoIterator>, + b: impl IntoIterator>, + ) -> MillerLoopOutput { + let mut pairs = a + .into_iter() + .zip_eq(b) + .filter_map(|(p, q)| { + let (p, q) = (p.into(), q.into()); + match !p.is_zero() && !q.is_zero() { + true => Some((p, q.ell_coeffs.into_iter())), + false => None, + } + }) + .collect::>(); + + let mut f = cfg_chunks_mut!(pairs, 4) + .map(|pairs| { + let mut f = Self::TargetField::one(); + for i in (1..P::ATE_LOOP_COUNT.len()).rev() { + if i != P::ATE_LOOP_COUNT.len() - 1 { + f.square_in_place(); + } - let bit = P::ATE_LOOP_COUNT[i - 1]; - match bit { - 1 => { - for &mut (p, ref mut coeffs) in &mut pairs { - Self::ell(&mut f, coeffs.next().unwrap(), &p.0); + for (p, coeffs) in pairs.iter_mut() { + Self::ell(&mut f, &coeffs.next().unwrap(), &p.0); } - }, - -1 => { - for &mut (p, ref mut coeffs) in &mut pairs { - Self::ell(&mut f, coeffs.next().unwrap(), &p.0); + + let bit = P::ATE_LOOP_COUNT[i - 1]; + if bit == 1 || bit == -1 { + for (p, coeffs) in pairs.iter_mut() { + Self::ell(&mut f, &coeffs.next().unwrap(), &p.0); + } } - }, - _ => continue, - } - } + } + f + }) + .product::(); if P::X_IS_NEGATIVE { - f.conjugate(); + f.cyclotomic_inverse_in_place(); } - for &mut (p, ref mut coeffs) in &mut pairs { - Self::ell(&mut f, coeffs.next().unwrap(), &p.0); + for (p, coeffs) in &mut pairs { + Self::ell(&mut f, &coeffs.next().unwrap(), &p.0); } - for &mut (p, ref mut coeffs) in &mut pairs { - Self::ell(&mut f, coeffs.next().unwrap(), &p.0); + for (p, coeffs) in &mut pairs { + Self::ell(&mut f, &coeffs.next().unwrap(), &p.0); } - f + MillerLoopOutput(f) } #[allow(clippy::let_and_return)] - fn final_exponentiation(f: &Self::Fqk) -> Option { + fn final_exponentiation(f: MillerLoopOutput) -> Option> { // Easy part: result = elt^((q^6-1)*(q^2+1)). // Follows, e.g., Beuchat et al page 9, by computing result as follows: // elt^((q^6-1)*(q^2+1)) = (conj(elt) * elt^(-1))^(q^2+1) + let f = f.0; - // f1 = r.conjugate() = f^(p^6) - let mut f1 = *f; - f1.conjugate(); + // f1 = r.cyclotomic_inverse_in_place() = f^(p^6) + let mut f1 = f; + f1.cyclotomic_inverse_in_place(); f.inverse().map(|mut f2| { // f2 = f^(-1); @@ -194,8 +197,8 @@ impl PairingEngine for Bn

{ let y4 = Self::exp_by_neg_x(y3); let y5 = y4.cyclotomic_square(); let mut y6 = Self::exp_by_neg_x(y5); - y3.conjugate(); - y6.conjugate(); + y3.cyclotomic_inverse_in_place(); + y6.cyclotomic_inverse_in_place(); let y7 = y6 * &y4; let mut y8 = y7 * &y3; let y9 = y8 * &y1; @@ -206,12 +209,12 @@ impl PairingEngine for Bn

{ let y13 = y12 * &y11; y8.frobenius_map(2); let y14 = y8 * &y13; - r.conjugate(); + r.cyclotomic_inverse_in_place(); let mut y15 = r * &y9; y15.frobenius_map(3); let y16 = y15 * &y14; - y16 + PairingOutput(y16) }) } } diff --git a/ec/src/models/bw6/g1.rs b/ec/src/models/bw6/g1.rs index ff8306465..b71de209b 100644 --- a/ec/src/models/bw6/g1.rs +++ b/ec/src/models/bw6/g1.rs @@ -1,15 +1,15 @@ use crate::{ bw6::BW6Parameters, short_weierstrass::{Affine, Projective}, - AffineCurve, + AffineRepr, CurveGroup, }; -use num_traits::Zero; pub type G1Affine

= Affine<

::G1Parameters>; pub type G1Projective

= Projective<

::G1Parameters>; #[derive(Derivative)] #[derivative( + Copy(bound = "P: BW6Parameters"), Clone(bound = "P: BW6Parameters"), Debug(bound = "P: BW6Parameters"), PartialEq(bound = "P: BW6Parameters"), @@ -23,14 +23,32 @@ impl From> for G1Prepared

{ } } +impl From> for G1Prepared

{ + fn from(q: G1Projective

) -> Self { + q.into_affine().into() + } +} + +impl<'a, P: BW6Parameters> From<&'a G1Affine

> for G1Prepared

{ + fn from(other: &'a G1Affine

) -> Self { + G1Prepared(*other) + } +} + +impl<'a, P: BW6Parameters> From<&'a G1Projective

> for G1Prepared

{ + fn from(q: &'a G1Projective

) -> Self { + q.into_affine().into() + } +} + impl G1Prepared

{ pub fn is_zero(&self) -> bool { - self.0.is_zero() + self.0.infinity } } impl Default for G1Prepared

{ fn default() -> Self { - G1Prepared(G1Affine::

::prime_subgroup_generator()) + G1Prepared(G1Affine::

::generator()) } } diff --git a/ec/src/models/bw6/g2.rs b/ec/src/models/bw6/g2.rs index 1186c5f39..4db801c98 100644 --- a/ec/src/models/bw6/g2.rs +++ b/ec/src/models/bw6/g2.rs @@ -2,13 +2,13 @@ use ark_std::vec::Vec; use ark_ff::fields::{BitIteratorBE, Field}; -use num_traits::{One, Zero}; +use num_traits::One; use crate::{ bw6::{BW6Parameters, TwistType}, models::short_weierstrass::SWCurveConfig, short_weierstrass::{Affine, Projective}, - AffineCurve, + AffineRepr, CurveGroup, }; pub type G2Affine

= Affine<

::G2Parameters>; @@ -43,13 +43,13 @@ struct G2HomProjective { impl Default for G2Prepared

{ fn default() -> Self { - Self::from(G2Affine::

::prime_subgroup_generator()) + Self::from(G2Affine::

::generator()) } } impl From> for G2Prepared

{ fn from(q: G2Affine

) -> Self { - if q.is_zero() { + if q.infinity { return Self { ell_coeffs_1: vec![], ell_coeffs_2: vec![], @@ -59,23 +59,23 @@ impl From> for G2Prepared

{ // f_{u+1,Q}(P) let mut ell_coeffs_1 = vec![]; - let mut r = G2HomProjective { + let mut r = G2HomProjective::

{ x: q.x, y: q.y, z: P::Fp::one(), }; for i in BitIteratorBE::new(P::ATE_LOOP_COUNT_1).skip(1) { - ell_coeffs_1.push(doubling_step::

(&mut r)); + ell_coeffs_1.push(r.double_in_place()); if i { - ell_coeffs_1.push(addition_step::

(&mut r, &q)); + ell_coeffs_1.push(r.add_in_place(&q)); } } // f_{u^3-u^2-u,Q}(P) let mut ell_coeffs_2 = vec![]; - let mut r = G2HomProjective { + let mut r = G2HomProjective::

{ x: q.x, y: q.y, z: P::Fp::one(), @@ -83,17 +83,12 @@ impl From> for G2Prepared

{ let negq = -q; - for i in (1..P::ATE_LOOP_COUNT_2.len()).rev() { - ell_coeffs_2.push(doubling_step::

(&mut r)); + for bit in P::ATE_LOOP_COUNT_2.iter().rev().skip(1) { + ell_coeffs_2.push(r.double_in_place()); - let bit = P::ATE_LOOP_COUNT_2[i - 1]; match bit { - 1 => { - ell_coeffs_2.push(addition_step::

(&mut r, &q)); - }, - -1 => { - ell_coeffs_2.push(addition_step::

(&mut r, &negq)); - }, + 1 => ell_coeffs_2.push(r.add_in_place(&q)), + -1 => ell_coeffs_2.push(r.add_in_place(&negq)), _ => continue, } } @@ -106,58 +101,75 @@ impl From> for G2Prepared

{ } } +impl<'a, P: BW6Parameters> From<&'a G2Affine

> for G2Prepared

{ + fn from(q: &'a G2Affine

) -> Self { + (*q).into() + } +} + +impl<'a, P: BW6Parameters> From<&'a G2Projective

> for G2Prepared

{ + fn from(q: &'a G2Projective

) -> Self { + q.into_affine().into() + } +} + +impl From> for G2Prepared

{ + fn from(q: G2Projective

) -> Self { + q.into_affine().into() + } +} + impl G2Prepared

{ pub fn is_zero(&self) -> bool { self.infinity } } -fn doubling_step(r: &mut G2HomProjective) -> (B::Fp, B::Fp, B::Fp) { - // Formula for line function when working with - // homogeneous projective coordinates, as described in https://eprint.iacr.org/2013/722.pdf. - - let a = r.x * &r.y; - let b = r.y.square(); - let b4 = b.double().double(); - let c = r.z.square(); - let e = B::G2Parameters::COEFF_B * &(c.double() + &c); - let f = e.double() + &e; - let g = b + &f; - let h = (r.y + &r.z).square() - &(b + &c); - let i = e - &b; - let j = r.x.square(); - let e2_square = e.double().square(); - - r.x = a.double() * &(b - &f); - r.y = g.square() - &(e2_square.double() + &e2_square); - r.z = b4 * &h; - match B::TWIST_TYPE { - TwistType::M => (i, j.double() + &j, -h), - TwistType::D => (-h, j.double() + &j, i), +impl G2HomProjective

{ + fn double_in_place(&mut self) -> (P::Fp, P::Fp, P::Fp) { + // Formula for line function when working with + // homogeneous projective coordinates, as described in https://eprint.iacr.org/2013/722.pdf. + + let a = self.x * &self.y; + let b = self.y.square(); + let b4 = b.double().double(); + let c = self.z.square(); + let e = P::G2Parameters::COEFF_B * &(c.double() + &c); + let f = e.double() + &e; + let g = b + &f; + let h = (self.y + &self.z).square() - &(b + &c); + let i = e - &b; + let j = self.x.square(); + let e2_square = e.double().square(); + + self.x = a.double() * &(b - &f); + self.y = g.square() - &(e2_square.double() + &e2_square); + self.z = b4 * &h; + match P::TWIST_TYPE { + TwistType::M => (i, j.double() + &j, -h), + TwistType::D => (-h, j.double() + &j, i), + } } -} -fn addition_step( - r: &mut G2HomProjective, - q: &G2Affine, -) -> (B::Fp, B::Fp, B::Fp) { - // Formula for line function when working with - // homogeneous projective coordinates, as described in https://eprint.iacr.org/2013/722.pdf. - let theta = r.y - &(q.y * &r.z); - let lambda = r.x - &(q.x * &r.z); - let c = theta.square(); - let d = lambda.square(); - let e = lambda * &d; - let f = r.z * &c; - let g = r.x * &d; - let h = e + &f - &g.double(); - r.x = lambda * &h; - r.y = theta * &(g - &h) - &(e * &r.y); - r.z *= &e; - let j = theta * &q.x - &(lambda * &q.y); - - match B::TWIST_TYPE { - TwistType::M => (j, -theta, lambda), - TwistType::D => (lambda, -theta, j), + fn add_in_place(&mut self, q: &G2Affine

) -> (P::Fp, P::Fp, P::Fp) { + // Formula for line function when working with + // homogeneous projective coordinates, as described in https://eprint.iacr.org/2013/722.pdf. + let theta = self.y - &(q.y * &self.z); + let lambda = self.x - &(q.x * &self.z); + let c = theta.square(); + let d = lambda.square(); + let e = lambda * &d; + let f = self.z * &c; + let g = self.x * &d; + let h = e + &f - &g.double(); + self.x = lambda * &h; + self.y = theta * &(g - &h) - &(e * &self.y); + self.z *= &e; + let j = theta * &q.x - &(lambda * &q.y); + + match P::TWIST_TYPE { + TwistType::M => (j, -theta, lambda), + TwistType::D => (lambda, -theta, j), + } } } diff --git a/ec/src/models/bw6/mod.rs b/ec/src/models/bw6/mod.rs index 469829d50..ea00be9bc 100644 --- a/ec/src/models/bw6/mod.rs +++ b/ec/src/models/bw6/mod.rs @@ -1,6 +1,6 @@ use crate::{ models::{short_weierstrass::SWCurveConfig, CurveConfig}, - PairingEngine, + pairing::{MillerLoopOutput, Pairing, PairingOutput}, }; use ark_ff::{ fields::{ @@ -10,9 +10,13 @@ use ark_ff::{ }, CyclotomicMultSubgroup, }; +use itertools::Itertools; use num_traits::One; -use core::marker::PhantomData; +use ark_std::{marker::PhantomData, vec::Vec}; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; pub enum TwistType { M, @@ -73,17 +77,11 @@ impl BW6

{ fn exp_by_x(mut f: Fp6) -> Fp6 { f = f.cyclotomic_exp(&P::X); if P::X_IS_NEGATIVE { - f.conjugate(); + f.cyclotomic_inverse_in_place(); } f } - pub fn final_exponentiation(value: &Fp6) -> Fp6 { - let value_inv = value.inverse().unwrap(); - let value_to_first_chunk = Self::final_exponentiation_first_chunk(value, &value_inv); - Self::final_exponentiation_last_chunk(&value_to_first_chunk) - } - fn final_exponentiation_first_chunk( elt: &Fp6, elt_inv: &Fp6, @@ -92,7 +90,7 @@ impl BW6

{ // elt_q3 = elt^(q^3) let mut elt_q3 = *elt; - elt_q3.conjugate(); + elt_q3.cyclotomic_inverse_in_place(); // elt_q3_over_elt = elt^(q^3-1) let elt_q3_over_elt = elt_q3 * elt_inv; // alpha = elt^((q^3-1) * q) @@ -144,20 +142,20 @@ impl BW6

{ // step 5 let mut f5p_p3 = f5p; - f5p_p3.conjugate(); + f5p_p3.cyclotomic_inverse_in_place(); let result1 = f3p * &f6p * &f5p_p3; // step 6 let result2 = result1.square(); let f4_2p = f4 * &f2p; let mut tmp1_p3 = f0 * &f1 * &f3 * &f4_2p * &f8p; - tmp1_p3.conjugate(); + tmp1_p3.cyclotomic_inverse_in_place(); let result3 = result2 * &f5 * &f0p * &tmp1_p3; // step 7 let result4 = result3.square(); let mut f7_p3 = f7; - f7_p3.conjugate(); + f7_p3.cyclotomic_inverse_in_place(); let result5 = result4 * &f9p * &f7_p3; // step 8 @@ -165,13 +163,13 @@ impl BW6

{ let f2_4p = f2 * &f4p; let f4_2p_5p = f4_2p * &f5p; let mut tmp2_p3 = f2_4p * &f3 * &f3p; - tmp2_p3.conjugate(); + tmp2_p3.cyclotomic_inverse_in_place(); let result7 = result6 * &f4_2p_5p * &f6 * &f7p * &tmp2_p3; // step 9 let result8 = result7.square(); let mut tmp3_p3 = f0p * &f9p; - tmp3_p3.conjugate(); + tmp3_p3.cyclotomic_inverse_in_place(); let result9 = result8 * &f0 * &f7 * &f1p * &tmp3_p3; // step 10 @@ -179,7 +177,7 @@ impl BW6

{ let f6p_8p = f6p * &f8p; let f5_7p = f5 * &f7p; let mut tmp4_p3 = f6p_8p; - tmp4_p3.conjugate(); + tmp4_p3.cyclotomic_inverse_in_place(); let result11 = result10 * &f5_7p * &f2p * &tmp4_p3; // step 11 @@ -187,116 +185,119 @@ impl BW6

{ let f3_6 = f3 * &f6; let f1_7 = f1 * &f7; let mut tmp5_p3 = f1_7 * &f2; - tmp5_p3.conjugate(); + tmp5_p3.cyclotomic_inverse_in_place(); let result13 = result12 * &f3_6 * &f9p * &tmp5_p3; // step 12 let result14 = result13.square(); let mut tmp6_p3 = f4_2p * &f5_7p * &f6p_8p; - tmp6_p3.conjugate(); + tmp6_p3.cyclotomic_inverse_in_place(); let result15 = result14 * &f0 * &f0p * &f3p * &f5p * &tmp6_p3; // step 13 let result16 = result15.square(); let mut tmp7_p3 = f3_6; - tmp7_p3.conjugate(); + tmp7_p3.cyclotomic_inverse_in_place(); let result17 = result16 * &f1p * &tmp7_p3; // step 14 let result18 = result17.square(); let mut tmp8_p3 = f2_4p * &f4_2p_5p * &f9p; - tmp8_p3.conjugate(); + tmp8_p3.cyclotomic_inverse_in_place(); let result19 = result18 * &f1_7 * &f5_7p * &f0p * &tmp8_p3; result19 } } -impl PairingEngine for BW6

{ - type Fr = ::ScalarField; - type G1Projective = G1Projective

; +impl Pairing for BW6

{ + type ScalarField = ::ScalarField; + type G1 = G1Projective

; type G1Affine = G1Affine

; type G1Prepared = G1Prepared

; - type G2Projective = G2Projective

; + type G2 = G2Projective

; type G2Affine = G2Affine

; type G2Prepared = G2Prepared

; - type Fq = P::Fp; - type Fqe = P::Fp; - type Fqk = Fp6; - - fn miller_loop<'a, I>(i: I) -> Self::Fqk - where - I: IntoIterator, - { - // Alg.5 in https://eprint.iacr.org/2020/351.pdf + type TargetField = Fp6; - let mut pairs_1 = vec![]; - let mut pairs_2 = vec![]; - for (p, q) in i { - if !p.is_zero() && !q.is_zero() { - pairs_1.push((p, q.ell_coeffs_1.iter())); - pairs_2.push((p, q.ell_coeffs_2.iter())); - } - } - - // f_{u+1,Q}(P) - let mut f_1 = Self::Fqk::one(); - - for i in BitIteratorBE::without_leading_zeros(P::ATE_LOOP_COUNT_1).skip(1) { - f_1.square_in_place(); + fn multi_miller_loop( + a: impl IntoIterator>, + b: impl IntoIterator>, + ) -> MillerLoopOutput { + // Alg.5 in https://eprint.iacr.org/2020/351.pdf - for (p, ref mut coeffs) in &mut pairs_1 { - Self::ell(&mut f_1, coeffs.next().unwrap(), &p.0); - } - if i { - for &mut (p, ref mut coeffs) in &mut pairs_1 { - Self::ell(&mut f_1, coeffs.next().unwrap(), &p.0); + let (mut pairs_1, mut pairs_2) = a + .into_iter() + .zip_eq(b) + .filter_map(|(p, q)| { + let (p, q): (G1Prepared

, G2Prepared

) = (p.into(), q.into()); + match !p.is_zero() && !q.is_zero() { + true => Some(( + (p, q.ell_coeffs_1.into_iter()), + (p, q.ell_coeffs_2.into_iter()), + )), + false => None, } - } - } + }) + .unzip::<_, _, Vec<_>, Vec<_>>(); + + let mut f_1 = cfg_chunks_mut!(pairs_1, 4) + .map(|pairs| { + let mut f = Self::TargetField::one(); + for i in BitIteratorBE::without_leading_zeros(P::ATE_LOOP_COUNT_1).skip(1) { + f.square_in_place(); + for (p, coeffs) in pairs.iter_mut() { + Self::ell(&mut f, &coeffs.next().unwrap(), &p.0); + } + if i { + for (p, coeffs) in pairs.iter_mut() { + Self::ell(&mut f, &coeffs.next().unwrap(), &p.0); + } + } + } + f + }) + .product::(); if P::ATE_LOOP_COUNT_1_IS_NEGATIVE { - f_1.conjugate(); + f_1.cyclotomic_inverse_in_place(); } + let mut f_2 = cfg_chunks_mut!(pairs_2, 4) + .map(|pairs| { + let mut f = Self::TargetField::one(); + for i in (1..P::ATE_LOOP_COUNT_2.len()).rev() { + if i != P::ATE_LOOP_COUNT_2.len() - 1 { + f.square_in_place(); + } - // f_{u^2-u^2-u,Q}(P) - let mut f_2 = Self::Fqk::one(); - - for i in (1..P::ATE_LOOP_COUNT_2.len()).rev() { - if i != P::ATE_LOOP_COUNT_2.len() - 1 { - f_2.square_in_place(); - } - - for (p, ref mut coeffs) in &mut pairs_2 { - Self::ell(&mut f_2, coeffs.next().unwrap(), &p.0); - } - - let bit = P::ATE_LOOP_COUNT_2[i - 1]; - match bit { - 1 => { - for &mut (p, ref mut coeffs) in &mut pairs_2 { - Self::ell(&mut f_2, coeffs.next().unwrap(), &p.0); + for (p, ref mut coeffs) in pairs.iter_mut() { + Self::ell(&mut f, &coeffs.next().unwrap(), &p.0); } - }, - -1 => { - for &mut (p, ref mut coeffs) in &mut pairs_2 { - Self::ell(&mut f_2, coeffs.next().unwrap(), &p.0); + + let bit = P::ATE_LOOP_COUNT_2[i - 1]; + if bit == 1 || bit == -1 { + for &mut (p, ref mut coeffs) in pairs.iter_mut() { + Self::ell(&mut f, &coeffs.next().unwrap(), &p.0); + } } - }, - _ => continue, - } - } + } + f + }) + .product::(); if P::ATE_LOOP_COUNT_2_IS_NEGATIVE { - f_2.conjugate(); + f_2.cyclotomic_inverse_in_place(); } f_2.frobenius_map(1); - f_1 * &f_2 + MillerLoopOutput(f_1 * &f_2) } - fn final_exponentiation(f: &Self::Fqk) -> Option { - Some(Self::final_exponentiation(f)) + fn final_exponentiation(f: MillerLoopOutput) -> Option> { + let value = f.0; + let value_inv = value.inverse().unwrap(); + let value_to_first_chunk = Self::final_exponentiation_first_chunk(&value, &value_inv); + Some(Self::final_exponentiation_last_chunk(&value_to_first_chunk)).map(PairingOutput) } } diff --git a/ec/src/models/mnt4/g1.rs b/ec/src/models/mnt4/g1.rs index 55f0fc3ff..a33e75a0a 100644 --- a/ec/src/models/mnt4/g1.rs +++ b/ec/src/models/mnt4/g1.rs @@ -1,7 +1,7 @@ use crate::{ mnt4::MNT4Parameters, short_weierstrass::{Affine, Projective}, - AffineCurve, + AffineRepr, CurveGroup, }; use ark_ff::Fp2; @@ -40,8 +40,25 @@ impl From> for G1Prepared

{ } } +impl<'a, P: MNT4Parameters> From<&'a G1Affine

> for G1Prepared

{ + fn from(g1: &'a G1Affine

) -> Self { + (*g1).into() + } +} + +impl From> for G1Prepared

{ + fn from(g1: G1Projective

) -> Self { + g1.into_affine().into() + } +} +impl<'a, P: MNT4Parameters> From<&'a G1Projective

> for G1Prepared

{ + fn from(g1: &'a G1Projective

) -> Self { + (*g1).into() + } +} + impl Default for G1Prepared

{ fn default() -> Self { - Self::from(G1Affine::

::prime_subgroup_generator()) + Self::from(G1Affine::

::generator()) } } diff --git a/ec/src/models/mnt4/g2.rs b/ec/src/models/mnt4/g2.rs index 17898cbec..d270b7387 100644 --- a/ec/src/models/mnt4/g2.rs +++ b/ec/src/models/mnt4/g2.rs @@ -4,7 +4,7 @@ use crate::{ mnt4::MNT4Parameters, models::mnt4::MNT4, short_weierstrass::{Affine, Projective}, - AffineCurve, + AffineRepr, CurveGroup, }; use ark_ff::fields::{Field, Fp2}; use ark_std::vec::Vec; @@ -31,7 +31,7 @@ pub struct G2Prepared { impl Default for G2Prepared

{ fn default() -> Self { - Self::from(G2Affine::

::prime_subgroup_generator()) + Self::from(G2Affine::

::generator()) } } @@ -91,6 +91,23 @@ impl From> for G2Prepared

{ } } +impl<'a, P: MNT4Parameters> From<&'a G2Affine

> for G2Prepared

{ + fn from(g2: &'a G2Affine

) -> Self { + (*g2).into() + } +} + +impl From> for G2Prepared

{ + fn from(g2: G2Projective

) -> Self { + g2.into_affine().into() + } +} +impl<'a, P: MNT4Parameters> From<&'a G2Projective

> for G2Prepared

{ + fn from(g2: &'a G2Projective

) -> Self { + (*g2).into() + } +} + pub(super) struct G2ProjectiveExtended { pub(crate) x: Fp2, pub(crate) y: Fp2, diff --git a/ec/src/models/mnt4/mod.rs b/ec/src/models/mnt4/mod.rs index 296f9f265..6c78a840f 100644 --- a/ec/src/models/mnt4/mod.rs +++ b/ec/src/models/mnt4/mod.rs @@ -1,15 +1,19 @@ use crate::{ models::{short_weierstrass::SWCurveConfig, CurveConfig}, - PairingEngine, + pairing::{MillerLoopOutput, Pairing, PairingOutput}, }; use ark_ff::{ fp2::{Fp2, Fp2Config}, fp4::{Fp4, Fp4Config}, CyclotomicMultSubgroup, Field, PrimeField, }; +use itertools::Itertools; use num_traits::{One, Zero}; -use core::marker::PhantomData; +use ark_std::{marker::PhantomData, vec::Vec}; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; pub mod g1; pub mod g2; @@ -158,13 +162,6 @@ impl MNT4

{ f } - pub fn final_exponentiation(value: &Fp4) -> GT { - let value_inv = value.inverse().unwrap(); - let value_to_first_chunk = Self::final_exponentiation_first_chunk(value, &value_inv); - let value_inv_to_first_chunk = Self::final_exponentiation_first_chunk(&value_inv, value); - Self::final_exponentiation_last_chunk(&value_to_first_chunk, &value_inv_to_first_chunk) - } - fn final_exponentiation_first_chunk( elt: &Fp4, elt_inv: &Fp4, @@ -173,7 +170,7 @@ impl MNT4

{ // elt_q2 = elt^(q^2) let mut elt_q2 = *elt; - elt_q2.conjugate(); + elt_q2.cyclotomic_inverse_in_place(); // elt_q2_over_elt = elt^(q^2-1) elt_q2 * elt_inv } @@ -199,30 +196,38 @@ impl MNT4

{ } } -impl PairingEngine for MNT4

{ - type Fr = ::ScalarField; - type G1Projective = G1Projective

; +impl Pairing for MNT4

{ + type ScalarField = ::ScalarField; + type G1 = G1Projective

; type G1Affine = G1Affine

; type G1Prepared = G1Prepared

; - type G2Projective = G2Projective

; + type G2 = G2Projective

; type G2Affine = G2Affine

; type G2Prepared = G2Prepared

; - type Fq = P::Fp; - type Fqe = Fp2; - type Fqk = Fp4; - - fn miller_loop<'a, I>(i: I) -> Self::Fqk - where - I: IntoIterator, - { - let mut result = Self::Fqk::one(); - for (p, q) in i { - result *= &Self::ate_miller_loop(p, q); - } - result + type TargetField = Fp4; + + fn multi_miller_loop( + a: impl IntoIterator>, + b: impl IntoIterator>, + ) -> MillerLoopOutput { + let pairs = a + .into_iter() + .zip_eq(b) + .map(|(a, b)| (a.into(), b.into())) + .collect::>(); + let result = cfg_into_iter!(pairs) + .map(|(a, b)| Self::ate_miller_loop(&a, &b)) + .product(); + MillerLoopOutput(result) } - fn final_exponentiation(r: &Self::Fqk) -> Option { - Some(Self::final_exponentiation(r)) + fn final_exponentiation(f: MillerLoopOutput) -> Option> { + let value = f.0; + let value_inv = value.inverse().unwrap(); + let value_to_first_chunk = Self::final_exponentiation_first_chunk(&value, &value_inv); + let value_inv_to_first_chunk = Self::final_exponentiation_first_chunk(&value_inv, &value); + let result = + Self::final_exponentiation_last_chunk(&value_to_first_chunk, &value_inv_to_first_chunk); + Some(PairingOutput(result)) } } diff --git a/ec/src/models/mnt6/g1.rs b/ec/src/models/mnt6/g1.rs index bd1aef20c..b3eb20928 100644 --- a/ec/src/models/mnt6/g1.rs +++ b/ec/src/models/mnt6/g1.rs @@ -1,7 +1,7 @@ use crate::{ mnt6::MNT6Parameters, short_weierstrass::{Affine, Projective}, - AffineCurve, + AffineRepr, CurveGroup, }; use ark_ff::Fp3; @@ -40,8 +40,25 @@ impl From> for G1Prepared

{ } } +impl<'a, P: MNT6Parameters> From<&'a G1Affine

> for G1Prepared

{ + fn from(g1: &'a G1Affine

) -> Self { + (*g1).into() + } +} + +impl From> for G1Prepared

{ + fn from(g1: G1Projective

) -> Self { + g1.into_affine().into() + } +} +impl<'a, P: MNT6Parameters> From<&'a G1Projective

> for G1Prepared

{ + fn from(g1: &'a G1Projective

) -> Self { + (*g1).into() + } +} + impl Default for G1Prepared

{ fn default() -> Self { - Self::from(G1Affine::

::prime_subgroup_generator()) + Self::from(G1Affine::

::generator()) } } diff --git a/ec/src/models/mnt6/g2.rs b/ec/src/models/mnt6/g2.rs index d4bc50eac..b7a586a75 100644 --- a/ec/src/models/mnt6/g2.rs +++ b/ec/src/models/mnt6/g2.rs @@ -4,7 +4,7 @@ use crate::{ mnt6::MNT6Parameters, models::mnt6::MNT6, short_weierstrass::{Affine, Projective}, - AffineCurve, + AffineRepr, CurveGroup, }; use ark_ff::fields::{Field, Fp3}; use ark_std::vec::Vec; @@ -31,7 +31,7 @@ pub struct G2Prepared { impl Default for G2Prepared

{ fn default() -> Self { - Self::from(G2Affine::

::prime_subgroup_generator()) + Self::from(G2Affine::

::generator()) } } @@ -88,6 +88,23 @@ impl From> for G2Prepared

{ } } +impl<'a, P: MNT6Parameters> From<&'a G2Affine

> for G2Prepared

{ + fn from(g2: &'a G2Affine

) -> Self { + (*g2).into() + } +} + +impl From> for G2Prepared

{ + fn from(g2: G2Projective

) -> Self { + g2.into_affine().into() + } +} +impl<'a, P: MNT6Parameters> From<&'a G2Projective

> for G2Prepared

{ + fn from(g2: &'a G2Projective

) -> Self { + (*g2).into() + } +} + pub(super) struct G2ProjectiveExtended { pub(crate) x: Fp3, pub(crate) y: Fp3, diff --git a/ec/src/models/mnt6/mod.rs b/ec/src/models/mnt6/mod.rs index a79cd0ee3..aec8ca89a 100644 --- a/ec/src/models/mnt6/mod.rs +++ b/ec/src/models/mnt6/mod.rs @@ -1,15 +1,19 @@ use crate::{ models::{short_weierstrass::SWCurveConfig, CurveConfig}, - PairingEngine, + pairing::{MillerLoopOutput, Pairing, PairingOutput}, }; use ark_ff::{ fp3::{Fp3, Fp3Config}, fp6_2over3::{Fp6, Fp6Config}, CyclotomicMultSubgroup, Field, PrimeField, }; +use itertools::Itertools; use num_traits::{One, Zero}; -use core::marker::PhantomData; +use ark_std::{marker::PhantomData, vec::Vec}; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; pub mod g1; pub mod g2; @@ -174,7 +178,7 @@ impl MNT6

{ // elt_q3 = elt^(q^3) let mut elt_q3 = *elt; - elt_q3.conjugate(); + elt_q3.cyclotomic_inverse_in_place(); // elt_q3_over_elt = elt^(q^3-1) let elt_q3_over_elt = elt_q3 * elt_inv; // alpha = elt^((q^3-1) * q) @@ -205,30 +209,38 @@ impl MNT6

{ } } -impl PairingEngine for MNT6

{ - type Fr = ::ScalarField; - type G1Projective = G1Projective

; +impl Pairing for MNT6

{ + type ScalarField = ::ScalarField; + type G1 = G1Projective

; type G1Affine = G1Affine

; type G1Prepared = G1Prepared

; - type G2Projective = G2Projective

; + type G2 = G2Projective

; type G2Affine = G2Affine

; type G2Prepared = G2Prepared

; - type Fq = P::Fp; - type Fqe = Fp3; - type Fqk = Fp6; - - fn miller_loop<'a, I>(i: I) -> Self::Fqk - where - I: IntoIterator, - { - let mut result = Self::Fqk::one(); - for (p, q) in i { - result *= &Self::ate_miller_loop(p, q); - } - result + type TargetField = Fp6; + + fn multi_miller_loop( + a: impl IntoIterator>, + b: impl IntoIterator>, + ) -> MillerLoopOutput { + let pairs = a + .into_iter() + .zip_eq(b) + .map(|(a, b)| (a.into(), b.into())) + .collect::>(); + let result = cfg_into_iter!(pairs) + .map(|(a, b)| Self::ate_miller_loop(&a, &b)) + .product(); + MillerLoopOutput(result) } - fn final_exponentiation(r: &Self::Fqk) -> Option { - Some(Self::final_exponentiation(r)) + fn final_exponentiation(f: MillerLoopOutput) -> Option> { + let value = f.0; + let value_inv = value.inverse().unwrap(); + let value_to_first_chunk = Self::final_exponentiation_first_chunk(&value, &value_inv); + let value_inv_to_first_chunk = Self::final_exponentiation_first_chunk(&value_inv, &value); + let result = + Self::final_exponentiation_last_chunk(&value_to_first_chunk, &value_inv_to_first_chunk); + Some(PairingOutput(result)) } } diff --git a/ec/src/models/mod.rs b/ec/src/models/mod.rs index c44ce75a5..1ec3c2d61 100644 --- a/ec/src/models/mod.rs +++ b/ec/src/models/mod.rs @@ -5,6 +5,7 @@ pub mod bn; pub mod bw6; pub mod mnt4; pub mod mnt6; + pub mod short_weierstrass; pub mod twisted_edwards; @@ -21,6 +22,11 @@ pub trait CurveConfig: Send + Sync + Sized + 'static { /// of the curve group. type ScalarField: PrimeField + Into<::BigInt>; + /// The cofactor of this curve, represented as a sequence of little-endian limbs. const COFACTOR: &'static [u64]; const COFACTOR_INV: Self::ScalarField; + + fn cofactor_is_one() -> bool { + Self::COFACTOR[0] == 1 && Self::COFACTOR.iter().skip(1).all(|&e| e == 0) + } } diff --git a/ec/src/models/short_weierstrass.rs b/ec/src/models/short_weierstrass.rs deleted file mode 100644 index 11af2e28b..000000000 --- a/ec/src/models/short_weierstrass.rs +++ /dev/null @@ -1,1029 +0,0 @@ -use ark_serialize::{ - CanonicalDeserialize, CanonicalDeserializeWithFlags, CanonicalSerialize, - CanonicalSerializeWithFlags, SWFlags, SerializationError, -}; -use ark_std::{ - borrow::Borrow, - fmt::{Display, Formatter, Result as FmtResult}, - hash::{Hash, Hasher}, - io::{Read, Write}, - ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}, - vec::Vec, -}; - -use ark_ff::{fields::Field, PrimeField, ToConstraintField, UniformRand}; - -use crate::{msm::VariableBaseMSM, AffineCurve, ProjectiveCurve}; - -use num_traits::{One, Zero}; -use zeroize::Zeroize; - -use ark_std::rand::{ - distributions::{Distribution, Standard}, - Rng, -}; - -#[cfg(feature = "parallel")] -use rayon::prelude::*; - -/// Constants and convenience functions that collectively define the [Short Weierstrass model](https://www.hyperelliptic.org/EFD/g1p/auto-shortw.html) -/// of the curve. In this model, the curve equation is `y² = x³ + a * x + b`, -/// for constants `a` and `b`. -pub trait SWCurveConfig: super::CurveConfig { - /// Coefficient `a` of the curve equation. - const COEFF_A: Self::BaseField; - /// Coefficient `b` of the curve equation. - const COEFF_B: Self::BaseField; - /// Generator of the prime-order subgroup. - const GENERATOR: Affine; - - /// Helper method for computing `elem * Self::COEFF_A`. - /// - /// The default implementation should be overridden only if - /// the product can be computed faster than standard field multiplication - /// (eg: via doubling if `COEFF_A == 2`, or if `COEFF_A.is_zero()`). - #[inline(always)] - fn mul_by_a(elem: &Self::BaseField) -> Self::BaseField { - let mut copy = *elem; - copy *= &Self::COEFF_A; - copy - } - - /// Helper method for computing `elem + Self::COEFF_B`. - /// - /// The default implementation should be overridden only if - /// the sum can be computed faster than standard field addition (eg: via - /// doubling). - #[inline(always)] - fn add_b(elem: &Self::BaseField) -> Self::BaseField { - if !Self::COEFF_B.is_zero() { - let mut copy = *elem; - copy += &Self::COEFF_B; - return copy; - } - *elem - } - - /// Check if the provided curve point is in the prime-order subgroup. - /// - /// The default implementation multiplies `item` by the order `r` of the - /// prime-order subgroup, and checks if the result is one. - /// Implementors can choose to override this default impl - /// if the given curve has faster methods - /// for performing this check (for example, via leveraging curve - /// isomorphisms). - fn is_in_correct_subgroup_assuming_on_curve(item: &Affine) -> bool { - Self::mul_affine(item, Self::ScalarField::characteristic()).is_zero() - } - - /// Performs cofactor clearing. - /// The default method is simply to multiply by the cofactor. - /// Some curves can implement a more efficient algorithm. - fn clear_cofactor(item: &Affine) -> Affine { - item.mul_by_cofactor() - } - - /// Default implementation of group multiplication for projective - /// coordinates - fn mul_projective(base: &Projective, scalar: &[u64]) -> Projective { - let mut res = Projective::::zero(); - for b in ark_ff::BitIteratorBE::without_leading_zeros(scalar) { - res.double_in_place(); - if b { - res += base; - } - } - - res - } - - /// Default implementation of group multiplication for affine - /// coordinates. - fn mul_affine(base: &Affine, scalar: &[u64]) -> Projective { - let mut res = Projective::::zero(); - for b in ark_ff::BitIteratorBE::without_leading_zeros(scalar) { - res.double_in_place(); - if b { - res.add_assign_mixed(base) - } - } - - res - } -} - -/// Affine coordinates for a point on an elliptic curve in short Weierstrass -/// form, over the base field `P::BaseField`. -#[derive(Derivative)] -#[derivative( - Copy(bound = "P: SWCurveConfig"), - Clone(bound = "P: SWCurveConfig"), - PartialEq(bound = "P: SWCurveConfig"), - Eq(bound = "P: SWCurveConfig"), - Debug(bound = "P: SWCurveConfig"), - Hash(bound = "P: SWCurveConfig") -)] -#[must_use] -pub struct Affine { - #[doc(hidden)] - pub x: P::BaseField, - #[doc(hidden)] - pub y: P::BaseField, - #[doc(hidden)] - pub infinity: bool, -} - -impl PartialEq> for Affine

{ - fn eq(&self, other: &Projective

) -> bool { - self.into_projective() == *other - } -} - -impl PartialEq> for Projective

{ - fn eq(&self, other: &Affine

) -> bool { - *self == other.into_projective() - } -} - -impl Display for Affine

{ - fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { - match self.infinity { - true => write!(f, "infinity"), - false => write!(f, "({}, {})", self.x, self.y), - } - } -} - -impl Affine

{ - /// Constructs a group element from x and y coordinates. - /// Performs checks to ensure that the point is on the curve and is in the right subgroup. - pub fn new(x: P::BaseField, y: P::BaseField) -> Self { - let point = Self { - x, - y, - infinity: false, - }; - assert!(point.is_on_curve()); - assert!(point.is_in_correct_subgroup_assuming_on_curve()); - point - } - - /// Constructs a group element from x and y coordinates. - /// - /// # Warning - /// - /// Does *not* perform any checks to ensure the point is in the curve or - /// is in the right subgroup. - pub const fn new_unchecked(x: P::BaseField, y: P::BaseField) -> Self { - Self { - x, - y, - infinity: false, - } - } - - pub const fn identity() -> Self { - Self { - x: P::BaseField::ZERO, - y: P::BaseField::ZERO, - infinity: true, - } - } - - /// Attempts to construct an affine point given an x-coordinate. The - /// point is not guaranteed to be in the prime order subgroup. - /// - /// If and only if `greatest` is set will the lexicographically - /// largest y-coordinate be selected. - #[allow(dead_code)] - pub fn get_point_from_x(x: P::BaseField, greatest: bool) -> Option { - // Compute x^3 + ax + b - // Rust does not optimise away addition with zero - let x3b = if P::COEFF_A.is_zero() { - P::add_b(&(x.square() * &x)) - } else { - P::add_b(&((x.square() * &x) + &P::mul_by_a(&x))) - }; - - x3b.sqrt().map(|y| { - let negy = -y; - - let y = if (y < negy) ^ greatest { y } else { negy }; - Self::new_unchecked(x, y) - }) - } - - /// Checks if `self` is a valid point on the curve. - pub fn is_on_curve(&self) -> bool { - if !self.infinity { - // Rust does not optimise away addition with zero - let mut x3b = P::add_b(&(self.x.square() * self.x)); - if !P::COEFF_A.is_zero() { - x3b += &P::mul_by_a(&self.x); - }; - self.y.square() == x3b - } else { - true - } - } -} - -impl Affine

{ - /// Checks if `self` is in the subgroup having order that equaling that of - /// `P::ScalarField`. - // DISCUSS Maybe these function names are too verbose? - pub fn is_in_correct_subgroup_assuming_on_curve(&self) -> bool { - P::is_in_correct_subgroup_assuming_on_curve(self) - } -} - -impl Zeroize for Affine

{ - // The phantom data does not contain element-specific data - // and thus does not need to be zeroized. - fn zeroize(&mut self) { - self.x.zeroize(); - self.y.zeroize(); - self.infinity.zeroize(); - } -} - -impl Zero for Affine

{ - /// Returns the point at infinity. Note that in affine coordinates, - /// the point at infinity does not lie on the curve, and this is indicated - /// by setting the `infinity` flag to true. - #[inline] - fn zero() -> Self { - Self::identity() - } - - /// Checks if `self` is the point at infinity. - #[inline] - fn is_zero(&self) -> bool { - self == &Self::zero() - } -} - -impl Add for Affine

{ - type Output = Self; - fn add(self, other: Self) -> Self { - let mut copy = self; - copy += &other; - copy - } -} - -impl<'a, P: SWCurveConfig> AddAssign<&'a Self> for Affine

{ - fn add_assign(&mut self, other: &'a Self) { - let mut s_proj = Projective::from(*self); - s_proj.add_assign_mixed(other); - *self = s_proj.into(); - } -} - -impl Distribution> for Standard { - #[inline] - fn sample(&self, rng: &mut R) -> Affine

{ - loop { - let x = P::BaseField::rand(rng); - let greatest = rng.gen(); - - if let Some(p) = Affine::get_point_from_x(x, greatest) { - return p.mul_by_cofactor(); - } - } - } -} - -impl AffineCurve for Affine

{ - type Config = P; - type BaseField = P::BaseField; - type ScalarField = P::ScalarField; - type Projective = Projective

; - - fn xy(&self) -> Option<(&Self::BaseField, &Self::BaseField)> { - (!self.infinity).then(|| (&self.x, &self.y)) - } - - #[inline] - fn prime_subgroup_generator() -> Self { - P::GENERATOR - } - - fn from_random_bytes(bytes: &[u8]) -> Option { - P::BaseField::from_random_bytes_with_flags::(bytes).and_then(|(x, flags)| { - // if x is valid and is zero and only the infinity flag is set, then parse this - // point as infinity. For all other choices, get the original point. - if x.is_zero() && flags.is_infinity() { - Some(Self::zero()) - } else if let Some(y_is_positive) = flags.is_positive() { - Self::get_point_from_x(x, y_is_positive) - // Unwrap is safe because it's not zero. - } else { - None - } - }) - } - - fn mul_bigint>(&self, by: S) -> Self::Projective { - P::mul_affine(self, by.as_ref()) - } - - /// Multiplies this element by the cofactor and output the - /// resulting projective element. - #[must_use] - fn mul_by_cofactor_to_projective(&self) -> Self::Projective { - P::mul_affine(self, Self::Config::COFACTOR) - } - - /// Performs cofactor clearing. - /// The default method is simply to multiply by the cofactor. - /// Some curves can implement a more efficient algorithm. - fn clear_cofactor(&self) -> Self { - P::clear_cofactor(self) - } -} - -impl Neg for Affine

{ - type Output = Self; - - /// If `self.is_zero()`, returns `self` (`== Self::zero()`). - /// Else, returns `(x, -y)`, where `self = (x, y)`. - #[inline] - fn neg(mut self) -> Self { - self.y = -self.y; - self - } -} - -impl Default for Affine

{ - #[inline] - fn default() -> Self { - Self::zero() - } -} - -impl core::iter::Sum for Affine

{ - fn sum>(iter: I) -> Self { - iter.fold(Projective::

::zero(), |sum, x| sum.add_mixed(&x)) - .into() - } -} - -impl<'a, P: SWCurveConfig> core::iter::Sum<&'a Self> for Affine

{ - fn sum>(iter: I) -> Self { - iter.fold(Projective::

::zero(), |sum, x| sum.add_mixed(x)) - .into() - } -} - -impl<'a, P: SWCurveConfig, T: Borrow> Mul for Affine

{ - type Output = Projective

; - - #[inline] - fn mul(self, other: T) -> Self::Output { - self.mul_bigint(other.borrow().into_bigint()) - } -} - -/// Jacobian coordinates for a point on an elliptic curve in short Weierstrass -/// form, over the base field `P::BaseField`. This struct implements arithmetic -/// via the Jacobian formulae -#[derive(Derivative)] -#[derivative( - Copy(bound = "P: SWCurveConfig"), - Clone(bound = "P: SWCurveConfig"), - Debug(bound = "P: SWCurveConfig") -)] -#[must_use] -pub struct Projective { - /// `X / Z` projection of the affine `X` - pub x: P::BaseField, - /// `Y / Z` projection of the affine `Y` - pub y: P::BaseField, - /// Projective multiplicative inverse. Will be `0` only at infinity. - pub z: P::BaseField, -} - -impl Display for Projective

{ - fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { - write!(f, "{}", Affine::from(*self)) - } -} - -impl Eq for Projective

{} -impl PartialEq for Projective

{ - fn eq(&self, other: &Self) -> bool { - if self.is_zero() { - return other.is_zero(); - } - - if other.is_zero() { - return false; - } - - // The points (X, Y, Z) and (X', Y', Z') - // are equal when (X * Z^2) = (X' * Z'^2) - // and (Y * Z^3) = (Y' * Z'^3). - let z1z1 = self.z.square(); - let z2z2 = other.z.square(); - - if self.x * &z2z2 != other.x * &z1z1 { - false - } else { - self.y * &(z2z2 * &other.z) == other.y * &(z1z1 * &self.z) - } - } -} - -impl Hash for Projective

{ - fn hash(&self, state: &mut H) { - self.into_affine().hash(state) - } -} - -impl Distribution> for Standard { - #[inline] - fn sample(&self, rng: &mut R) -> Projective

{ - loop { - let x = P::BaseField::rand(rng); - let greatest = rng.gen(); - - if let Some(p) = Affine::get_point_from_x(x, greatest) { - return p.mul_by_cofactor_to_projective(); - } - } - } -} - -impl Default for Projective

{ - #[inline] - fn default() -> Self { - Self::zero() - } -} - -impl Projective

{ - /// Construct a new group element without checking whether the coordinates - /// specify a point in the subgroup. - pub const fn new_unchecked(x: P::BaseField, y: P::BaseField, z: P::BaseField) -> Self { - Self { x, y, z } - } - - /// Construct a new group element in a way while enforcing that points are in - /// the prime-order subgroup. - pub fn new(x: P::BaseField, y: P::BaseField, z: P::BaseField) -> Self { - let p = Self::new_unchecked(x, y, z).into_affine(); - assert!(p.is_on_curve()); - assert!(p.is_in_correct_subgroup_assuming_on_curve()); - p.into() - } -} - -impl Zeroize for Projective

{ - fn zeroize(&mut self) { - self.x.zeroize(); - self.y.zeroize(); - self.z.zeroize(); - } -} - -impl Zero for Projective

{ - /// Returns the point at infinity, which always has Z = 0. - #[inline] - fn zero() -> Self { - Self::new_unchecked( - P::BaseField::one(), - P::BaseField::one(), - P::BaseField::zero(), - ) - } - - /// Checks whether `self.z.is_zero()`. - #[inline] - fn is_zero(&self) -> bool { - self.z.is_zero() - } -} - -impl ProjectiveCurve for Projective

{ - type Config = P; - type BaseField = P::BaseField; - type ScalarField = P::ScalarField; - type Affine = Affine

; - - #[inline] - fn prime_subgroup_generator() -> Self { - Affine::prime_subgroup_generator().into() - } - - #[inline] - fn is_normalized(&self) -> bool { - self.is_zero() || self.z.is_one() - } - - /// Normalizes a slice of projective elements so that - /// conversion to affine is cheap. - /// - /// In more detail, this method converts a curve point in Jacobian - /// coordinates (x, y, z) into an equivalent representation (x/z^2, - /// y/z^3, 1). - /// - /// For `N = v.len()`, this costs 1 inversion + 6N field multiplications + N - /// field squarings. - /// - /// (Where batch inversion comprises 3N field multiplications + 1 inversion - /// of these operations) - #[inline] - fn batch_normalization(v: &mut [Self]) { - let mut z_s = v.iter().map(|g| g.z).collect::>(); - ark_ff::batch_inversion(&mut z_s); - - // Perform affine transformations - ark_std::cfg_iter_mut!(v) - .zip(z_s) - .filter(|(g, _)| !g.is_normalized()) - .for_each(|(g, z)| { - let z2 = z.square(); // 1/z - g.x *= &z2; // x/z^2 - g.y *= &(z2 * &z); // y/z^3 - g.z = P::BaseField::one(); // z = 1 - }); - } - - /// Sets `self = 2 * self`. Note that Jacobian formulae are incomplete, and - /// so doubling cannot be computed as `self + self`. Instead, this - /// implementation uses the following specialized doubling formulae: - /// * [`P::A` is zero](http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l) - /// * [`P::A` is not zero](https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#doubling-dbl-2007-bl) - fn double_in_place(&mut self) -> &mut Self { - if self.is_zero() { - return self; - } - - if P::COEFF_A.is_zero() { - // A = X1^2 - let mut a = self.x.square(); - - // B = Y1^2 - let b = self.y.square(); - - // C = B^2 - let mut c = b.square(); - - // D = 2*((X1+B)2-A-C) - let d = ((self.x + &b).square() - &a - &c).double(); - - // E = 3*A - let e = a + &*a.double_in_place(); - - // F = E^2 - let f = e.square(); - - // Z3 = 2*Y1*Z1 - self.z *= &self.y; - self.z.double_in_place(); - - // X3 = F-2*D - self.x = f - &d.double(); - - // Y3 = E*(D-X3)-8*C - self.y = (d - &self.x) * &e - &*c.double_in_place().double_in_place().double_in_place(); - self - } else { - // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l - // XX = X1^2 - let xx = self.x.square(); - - // YY = Y1^2 - let yy = self.y.square(); - - // YYYY = YY^2 - let mut yyyy = yy.square(); - - // ZZ = Z1^2 - let zz = self.z.square(); - - // S = 2*((X1+YY)^2-XX-YYYY) - let s = ((self.x + &yy).square() - &xx - &yyyy).double(); - - // M = 3*XX+a*ZZ^2 - let m = xx + xx.double() + P::mul_by_a(&zz.square()); - - // T = M^2-2*S - let t = m.square() - &s.double(); - - // X3 = T - self.x = t; - // Y3 = M*(S-T)-8*YYYY - let old_y = self.y; - self.y = m * &(s - &t) - &*yyyy.double_in_place().double_in_place().double_in_place(); - // Z3 = (Y1+Z1)^2-YY-ZZ - self.z = (old_y + &self.z).square() - &yy - &zz; - self - } - } - - /// When `other.is_normalized()` (i.e., `other.z == 1`), we can use a more - /// efficient [formula](http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl) - /// to compute `self + other`. - fn add_assign_mixed(&mut self, other: &Affine

) { - match other.is_zero() { - true => {}, - false => { - if self.is_zero() { - self.x = other.x; - self.y = other.y; - self.z = P::BaseField::one(); - return; - } - - // Z1Z1 = Z1^2 - let z1z1 = self.z.square(); - - // U2 = X2*Z1Z1 - let u2 = z1z1 * other.x; - - // S2 = Y2*Z1*Z1Z1 - let s2 = (self.z * other.y) * &z1z1; - - if self.x == u2 && self.y == s2 { - // The two points are equal, so we double. - self.double_in_place(); - } else { - // If we're adding -a and a together, self.z becomes zero as H becomes zero. - - // H = U2-X1 - let h = u2 - &self.x; - - // HH = H^2 - let hh = h.square(); - - // I = 4*HH - let mut i = hh; - i.double_in_place().double_in_place(); - - // J = H*I - let j = h * &i; - - // r = 2*(S2-Y1) - let r = (s2 - &self.y).double(); - - // V = X1*I - let v = self.x * &i; - - // X3 = r^2 - J - 2*V - self.x = r.square(); - self.x -= &j; - self.x -= &v.double(); - - // Y3 = r*(V-X3)-2*Y1*J - self.y = - P::BaseField::sum_of_products(&[r, -self.y.double()], &[(v - &self.x), j]); - - // Z3 = (Z1+H)^2-Z1Z1-HH - self.z += &h; - self.z.square_in_place(); - self.z -= &z1z1; - self.z -= &hh; - } - }, - } - } - - #[inline] - fn mul_bigint>(self, other: S) -> Self { - P::mul_projective(&self, other.as_ref()) - } -} - -impl Neg for Projective

{ - type Output = Self; - - #[inline] - fn neg(mut self) -> Self { - self.y = -self.y; - self - } -} - -ark_ff::impl_additive_ops_from_ref!(Projective, SWCurveConfig); - -impl<'a, P: SWCurveConfig> Add<&'a Self> for Projective

{ - type Output = Self; - - #[inline] - fn add(mut self, other: &'a Self) -> Self { - self += other; - self - } -} - -impl<'a, P: SWCurveConfig> AddAssign<&'a Self> for Projective

{ - fn add_assign(&mut self, other: &'a Self) { - if self.is_zero() { - *self = *other; - return; - } - - if other.is_zero() { - return; - } - - // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl - // Works for all curves. - - // Z1Z1 = Z1^2 - let z1z1 = self.z.square(); - - // Z2Z2 = Z2^2 - let z2z2 = other.z.square(); - - // U1 = X1*Z2Z2 - let u1 = self.x * &z2z2; - - // U2 = X2*Z1Z1 - let u2 = other.x * &z1z1; - - // S1 = Y1*Z2*Z2Z2 - let s1 = self.y * &other.z * &z2z2; - - // S2 = Y2*Z1*Z1Z1 - let s2 = other.y * &self.z * &z1z1; - - if u1 == u2 && s1 == s2 { - // The two points are equal, so we double. - self.double_in_place(); - } else { - // If we're adding -a and a together, self.z becomes zero as H becomes zero. - - // H = U2-U1 - let h = u2 - &u1; - - // I = (2*H)^2 - let i = (h.double()).square(); - - // J = H*I - let j = h * &i; - - // r = 2*(S2-S1) - let r = (s2 - &s1).double(); - - // V = U1*I - let v = u1 * &i; - - // X3 = r^2 - J - 2*V - self.x = r.square() - &j - &(v.double()); - - // Y3 = r*(V - X3) - 2*S1*J - self.y = P::BaseField::sum_of_products(&[r, -s1.double()], &[(v - &self.x), j]); - - // Z3 = ((Z1+Z2)^2 - Z1Z1 - Z2Z2)*H - self.z = ((self.z + &other.z).square() - &z1z1 - &z2z2) * &h; - } - } -} - -impl<'a, P: SWCurveConfig> Sub<&'a Self> for Projective

{ - type Output = Self; - - #[inline] - fn sub(mut self, other: &'a Self) -> Self { - self -= other; - self - } -} - -impl<'a, P: SWCurveConfig> SubAssign<&'a Self> for Projective

{ - fn sub_assign(&mut self, other: &'a Self) { - *self += &(-(*other)); - } -} - -impl> MulAssign for Projective

{ - fn mul_assign(&mut self, other: T) { - *self = self.mul_bigint(other.borrow().into_bigint()) - } -} - -impl<'a, P: SWCurveConfig, T: Borrow> Mul for Projective

{ - type Output = Self; - - #[inline] - fn mul(mut self, other: T) -> Self { - self *= other; - self - } -} - -// The affine point X, Y is represented in the Jacobian -// coordinates with Z = 1. -impl From> for Projective

{ - #[inline] - fn from(p: Affine

) -> Projective

{ - match p.infinity { - true => Self::zero(), - false => Self { - x: p.x, - y: p.y, - z: P::BaseField::one(), - }, - } - } -} - -// The projective point X, Y, Z is represented in the affine -// coordinates as X/Z^2, Y/Z^3. -impl From> for Affine

{ - #[inline] - fn from(p: Projective

) -> Affine

{ - if p.is_zero() { - Affine::zero() - } else if p.z.is_one() { - // If Z is one, the point is already normalized. - Affine::new_unchecked(p.x, p.y) - } else { - // Z is nonzero, so it must have an inverse in a field. - let zinv = p.z.inverse().unwrap(); - let zinv_squared = zinv.square(); - - // X/Z^2 - let x = p.x * &zinv_squared; - - // Y/Z^3 - let y = p.y * &(zinv_squared * &zinv); - - Affine::new_unchecked(x, y) - } - } -} - -impl CanonicalSerialize for Affine

{ - #[allow(unused_qualifications)] - #[inline] - fn serialize(&self, writer: W) -> Result<(), SerializationError> { - let (x, flags) = match self.infinity { - true => (P::BaseField::zero(), SWFlags::infinity()), - false => (self.x, SWFlags::from_y_sign(self.y > -self.y)), - }; - x.serialize_with_flags(writer, flags) - } - - #[inline] - fn serialized_size(&self) -> usize { - P::BaseField::zero().serialized_size_with_flags::() - } - - #[allow(unused_qualifications)] - #[inline] - fn serialize_uncompressed(&self, mut writer: W) -> Result<(), SerializationError> { - let (x, y, flags) = match self.infinity { - true => ( - P::BaseField::zero(), - P::BaseField::zero(), - SWFlags::infinity(), - ), - false => (self.x, self.y, SWFlags::from_y_sign(self.y > -self.y)), - }; - x.serialize(&mut writer)?; - y.serialize_with_flags(&mut writer, flags)?; - Ok(()) - } - - #[inline] - fn uncompressed_size(&self) -> usize { - // The size of the serialization is independent of the values - // of `x` and `y`, and depends only on the size of the modulus. - P::BaseField::zero().serialized_size() - + P::BaseField::zero().serialized_size_with_flags::() - } -} - -impl CanonicalSerialize for Projective

{ - #[allow(unused_qualifications)] - #[inline] - fn serialize(&self, writer: W) -> Result<(), SerializationError> { - let aff = Affine::

::from(*self); - aff.serialize(writer) - } - - #[inline] - fn serialized_size(&self) -> usize { - let aff = Affine::

::from(*self); - aff.serialized_size() - } - - #[allow(unused_qualifications)] - #[inline] - fn serialize_uncompressed(&self, writer: W) -> Result<(), SerializationError> { - let aff = Affine::

::from(*self); - aff.serialize_uncompressed(writer) - } - - #[inline] - fn uncompressed_size(&self) -> usize { - let aff = Affine::

::from(*self); - aff.uncompressed_size() - } -} - -impl CanonicalDeserialize for Affine

{ - #[allow(unused_qualifications)] - fn deserialize(reader: R) -> Result { - let (x, flags): (P::BaseField, SWFlags) = - CanonicalDeserializeWithFlags::deserialize_with_flags(reader)?; - if flags.is_infinity() { - Ok(Self::zero()) - } else { - let p = Affine::

::get_point_from_x(x, flags.is_positive().unwrap()) - .ok_or(SerializationError::InvalidData)?; - if !p.is_in_correct_subgroup_assuming_on_curve() { - return Err(SerializationError::InvalidData); - } - Ok(p) - } - } - - #[allow(unused_qualifications)] - fn deserialize_uncompressed( - reader: R, - ) -> Result { - let p = Self::deserialize_unchecked(reader)?; - - if !p.is_in_correct_subgroup_assuming_on_curve() { - return Err(SerializationError::InvalidData); - } - Ok(p) - } - - #[allow(unused_qualifications)] - fn deserialize_unchecked(mut reader: R) -> Result { - let x: P::BaseField = CanonicalDeserialize::deserialize(&mut reader)?; - let (y, flags): (P::BaseField, SWFlags) = - CanonicalDeserializeWithFlags::deserialize_with_flags(&mut reader)?; - match flags.is_infinity() { - true => Ok(Self::zero()), - false => Ok(Self::new_unchecked(x, y)), - } - } -} - -impl CanonicalDeserialize for Projective

{ - #[allow(unused_qualifications)] - fn deserialize(reader: R) -> Result { - let aff = Affine::

::deserialize(reader)?; - Ok(aff.into()) - } - - #[allow(unused_qualifications)] - fn deserialize_uncompressed(reader: R) -> Result { - let aff = Affine::

::deserialize_uncompressed(reader)?; - Ok(aff.into()) - } - - #[allow(unused_qualifications)] - fn deserialize_unchecked(reader: R) -> Result { - let aff = Affine::

::deserialize_unchecked(reader)?; - Ok(aff.into()) - } -} - -impl ToConstraintField for Affine -where - M::BaseField: ToConstraintField, -{ - #[inline] - fn to_field_elements(&self) -> Option> { - let mut x = self.x.to_field_elements()?; - let y = self.y.to_field_elements()?; - let infinity = self.infinity.to_field_elements()?; - x.extend_from_slice(&y); - x.extend_from_slice(&infinity); - Some(x) - } -} - -impl ToConstraintField for Projective -where - M::BaseField: ToConstraintField, -{ - #[inline] - fn to_field_elements(&self) -> Option> { - Affine::from(*self).to_field_elements() - } -} - -impl VariableBaseMSM for Projective

{ - type MSMBase = Affine

; - - type Scalar = ::ScalarField; - - #[inline] - fn _double_in_place(&mut self) -> &mut Self { - self.double_in_place() - } - - #[inline] - fn _add_assign_mixed(&mut self, other: &Self::MSMBase) { - self.add_assign_mixed(other) - } -} diff --git a/ec/src/models/short_weierstrass/affine.rs b/ec/src/models/short_weierstrass/affine.rs new file mode 100644 index 000000000..ca1ed994d --- /dev/null +++ b/ec/src/models/short_weierstrass/affine.rs @@ -0,0 +1,415 @@ +use ark_serialize::{ + CanonicalDeserialize, CanonicalDeserializeWithFlags, CanonicalSerialize, + CanonicalSerializeWithFlags, SWFlags, SerializationError, +}; +use ark_std::{ + borrow::Borrow, + fmt::{Debug, Display, Formatter, Result as FmtResult}, + io::{Read, Write}, + ops::{Add, Mul, Neg, Sub}, + rand::{ + distributions::{Distribution, Standard}, + Rng, + }, + vec::Vec, + One, Zero, +}; + +use ark_ff::{fields::Field, PrimeField, ToConstraintField, UniformRand}; + +use zeroize::Zeroize; + +use super::{Projective, SWCurveConfig}; +use crate::AffineRepr; + +/// Affine coordinates for a point on an elliptic curve in short Weierstrass +/// form, over the base field `P::BaseField`. +#[derive(Derivative)] +#[derivative( + Copy(bound = "P: SWCurveConfig"), + Clone(bound = "P: SWCurveConfig"), + PartialEq(bound = "P: SWCurveConfig"), + Eq(bound = "P: SWCurveConfig"), + Hash(bound = "P: SWCurveConfig") +)] +#[must_use] +pub struct Affine { + #[doc(hidden)] + pub x: P::BaseField, + #[doc(hidden)] + pub y: P::BaseField, + #[doc(hidden)] + pub infinity: bool, +} + +impl PartialEq> for Affine

{ + fn eq(&self, other: &Projective

) -> bool { + self.into_group() == *other + } +} + +impl Display for Affine

{ + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + match self.infinity { + true => write!(f, "infinity"), + false => write!(f, "({}, {})", self.x, self.y), + } + } +} + +impl Debug for Affine

{ + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + match self.infinity { + true => write!(f, "infinity"), + false => write!(f, "({}, {})", self.x, self.y), + } + } +} + +impl Affine

{ + /// Constructs a group element from x and y coordinates. + /// Performs checks to ensure that the point is on the curve and is in the right subgroup. + pub fn new(x: P::BaseField, y: P::BaseField) -> Self { + let point = Self { + x, + y, + infinity: false, + }; + assert!(point.is_on_curve()); + assert!(point.is_in_correct_subgroup_assuming_on_curve()); + point + } + + /// Constructs a group element from x and y coordinates. + /// + /// # Warning + /// + /// Does *not* perform any checks to ensure the point is in the curve or + /// is in the right subgroup. + pub const fn new_unchecked(x: P::BaseField, y: P::BaseField) -> Self { + Self { + x, + y, + infinity: false, + } + } + + pub const fn identity() -> Self { + Self { + x: P::BaseField::ZERO, + y: P::BaseField::ZERO, + infinity: true, + } + } + + /// Attempts to construct an affine point given an x-coordinate. The + /// point is not guaranteed to be in the prime order subgroup. + /// + /// If and only if `greatest` is set will the lexicographically + /// largest y-coordinate be selected. + #[allow(dead_code)] + pub fn get_point_from_x(x: P::BaseField, greatest: bool) -> Option { + // Compute x^3 + ax + b + // Rust does not optimise away addition with zero + let x3b = if P::COEFF_A.is_zero() { + P::add_b(&(x.square() * &x)) + } else { + P::add_b(&((x.square() * &x) + &P::mul_by_a(&x))) + }; + + x3b.sqrt().map(|y| { + let negy = -y; + + let y = if (y < negy) ^ greatest { y } else { negy }; + Self::new_unchecked(x, y) + }) + } + + /// Checks if `self` is a valid point on the curve. + pub fn is_on_curve(&self) -> bool { + if !self.infinity { + // Rust does not optimise away addition with zero + let mut x3b = P::add_b(&(self.x.square() * self.x)); + if !P::COEFF_A.is_zero() { + x3b += &P::mul_by_a(&self.x); + }; + self.y.square() == x3b + } else { + true + } + } +} + +impl Affine

{ + /// Checks if `self` is in the subgroup having order that equaling that of + /// `P::ScalarField`. + // DISCUSS Maybe these function names are too verbose? + pub fn is_in_correct_subgroup_assuming_on_curve(&self) -> bool { + P::is_in_correct_subgroup_assuming_on_curve(self) + } +} + +impl Zeroize for Affine

{ + // The phantom data does not contain element-specific data + // and thus does not need to be zeroized. + fn zeroize(&mut self) { + self.x.zeroize(); + self.y.zeroize(); + self.infinity.zeroize(); + } +} + +impl Distribution> for Standard { + #[inline] + fn sample(&self, rng: &mut R) -> Affine

{ + loop { + let x = P::BaseField::rand(rng); + let greatest = rng.gen(); + + if let Some(p) = Affine::get_point_from_x(x, greatest) { + return p.mul_by_cofactor(); + } + } + } +} + +impl AffineRepr for Affine

{ + type Config = P; + type BaseField = P::BaseField; + type ScalarField = P::ScalarField; + type Group = Projective

; + + fn xy(&self) -> Option<(&Self::BaseField, &Self::BaseField)> { + (!self.infinity).then(|| (&self.x, &self.y)) + } + + #[inline] + fn generator() -> Self { + P::GENERATOR + } + + fn identity() -> Self { + Self { + x: P::BaseField::ZERO, + y: P::BaseField::ZERO, + infinity: true, + } + } + + fn from_random_bytes(bytes: &[u8]) -> Option { + P::BaseField::from_random_bytes_with_flags::(bytes).and_then(|(x, flags)| { + // if x is valid and is zero and only the infinity flag is set, then parse this + // point as infinity. For all other choices, get the original point. + if x.is_zero() && flags.is_infinity() { + Some(Self::identity()) + } else if let Some(y_is_positive) = flags.is_positive() { + Self::get_point_from_x(x, y_is_positive) + // Unwrap is safe because it's not zero. + } else { + None + } + }) + } + + fn mul_bigint(&self, by: impl AsRef<[u64]>) -> Self::Group { + P::mul_affine(self, by.as_ref()) + } + + /// Multiplies this element by the cofactor and output the + /// resulting projective element. + #[must_use] + fn mul_by_cofactor_to_group(&self) -> Self::Group { + P::mul_affine(self, Self::Config::COFACTOR) + } + + /// Performs cofactor clearing. + /// The default method is simply to multiply by the cofactor. + /// Some curves can implement a more efficient algorithm. + fn clear_cofactor(&self) -> Self { + P::clear_cofactor(self) + } +} + +impl Neg for Affine

{ + type Output = Self; + + /// If `self.is_zero()`, returns `self` (`== Self::zero()`). + /// Else, returns `(x, -y)`, where `self = (x, y)`. + #[inline] + fn neg(mut self) -> Self { + self.y = -self.y; + self + } +} + +impl> Add for Affine

{ + type Output = Projective

; + fn add(self, other: T) -> Projective

{ + // TODO implement more efficient formulae when z1 = z2 = 1. + let mut copy = self.into_group(); + copy += other.borrow(); + copy + } +} + +impl Add> for Affine

{ + type Output = Projective

; + fn add(self, other: Projective

) -> Projective

{ + other + self + } +} + +impl<'a, P: SWCurveConfig> Add<&'a Projective

> for Affine

{ + type Output = Projective

; + fn add(self, other: &'a Projective

) -> Projective

{ + *other + self + } +} + +impl> Sub for Affine

{ + type Output = Projective

; + fn sub(self, other: T) -> Projective

{ + let mut copy = self.into_group(); + copy -= other.borrow(); + copy + } +} + +impl Default for Affine

{ + #[inline] + fn default() -> Self { + Self::identity() + } +} + +impl<'a, P: SWCurveConfig, T: Borrow> Mul for Affine

{ + type Output = Projective

; + + #[inline] + fn mul(self, other: T) -> Self::Output { + self.mul_bigint(other.borrow().into_bigint()) + } +} + +// The projective point X, Y, Z is represented in the affine +// coordinates as X/Z^2, Y/Z^3. +impl From> for Affine

{ + #[inline] + fn from(p: Projective

) -> Affine

{ + if p.is_zero() { + Affine::identity() + } else if p.z.is_one() { + // If Z is one, the point is already normalized. + Affine::new_unchecked(p.x, p.y) + } else { + // Z is nonzero, so it must have an inverse in a field. + let zinv = p.z.inverse().unwrap(); + let zinv_squared = zinv.square(); + + // X/Z^2 + let x = p.x * &zinv_squared; + + // Y/Z^3 + let y = p.y * &(zinv_squared * &zinv); + + Affine::new_unchecked(x, y) + } + } +} + +impl CanonicalSerialize for Affine

{ + #[allow(unused_qualifications)] + #[inline] + fn serialize(&self, writer: W) -> Result<(), SerializationError> { + let (x, flags) = match self.infinity { + true => (P::BaseField::zero(), SWFlags::infinity()), + false => (self.x, SWFlags::from_y_sign(self.y > -self.y)), + }; + x.serialize_with_flags(writer, flags) + } + + #[inline] + fn serialized_size(&self) -> usize { + P::BaseField::zero().serialized_size_with_flags::() + } + + #[allow(unused_qualifications)] + #[inline] + fn serialize_uncompressed(&self, mut writer: W) -> Result<(), SerializationError> { + let (x, y, flags) = match self.infinity { + true => ( + P::BaseField::zero(), + P::BaseField::zero(), + SWFlags::infinity(), + ), + false => (self.x, self.y, SWFlags::from_y_sign(self.y > -self.y)), + }; + x.serialize(&mut writer)?; + y.serialize_with_flags(&mut writer, flags)?; + Ok(()) + } + + #[inline] + fn uncompressed_size(&self) -> usize { + // The size of the serialization is independent of the values + // of `x` and `y`, and depends only on the size of the modulus. + P::BaseField::zero().serialized_size() + + P::BaseField::zero().serialized_size_with_flags::() + } +} + +impl CanonicalDeserialize for Affine

{ + #[allow(unused_qualifications)] + fn deserialize(reader: R) -> Result { + let (x, flags): (P::BaseField, SWFlags) = + CanonicalDeserializeWithFlags::deserialize_with_flags(reader)?; + if flags.is_infinity() { + Ok(Self::identity()) + } else { + let p = Affine::

::get_point_from_x(x, flags.is_positive().unwrap()) + .ok_or(SerializationError::InvalidData)?; + if !p.is_in_correct_subgroup_assuming_on_curve() { + return Err(SerializationError::InvalidData); + } + Ok(p) + } + } + + #[allow(unused_qualifications)] + fn deserialize_uncompressed( + reader: R, + ) -> Result { + let p = Self::deserialize_unchecked(reader)?; + + if !p.is_in_correct_subgroup_assuming_on_curve() { + return Err(SerializationError::InvalidData); + } + Ok(p) + } + + #[allow(unused_qualifications)] + fn deserialize_unchecked(mut reader: R) -> Result { + let x: P::BaseField = CanonicalDeserialize::deserialize(&mut reader)?; + let (y, flags): (P::BaseField, SWFlags) = + CanonicalDeserializeWithFlags::deserialize_with_flags(&mut reader)?; + match flags.is_infinity() { + true => Ok(Self::identity()), + false => Ok(Self::new_unchecked(x, y)), + } + } +} + +impl ToConstraintField for Affine +where + M::BaseField: ToConstraintField, +{ + #[inline] + fn to_field_elements(&self) -> Option> { + let mut x = self.x.to_field_elements()?; + let y = self.y.to_field_elements()?; + let infinity = self.infinity.to_field_elements()?; + x.extend_from_slice(&y); + x.extend_from_slice(&infinity); + Some(x) + } +} diff --git a/ec/src/models/short_weierstrass/group.rs b/ec/src/models/short_weierstrass/group.rs new file mode 100644 index 000000000..c4dbf80c1 --- /dev/null +++ b/ec/src/models/short_weierstrass/group.rs @@ -0,0 +1,576 @@ +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::{ + borrow::Borrow, + fmt::{Debug, Display, Formatter, Result as FmtResult}, + hash::{Hash, Hasher}, + io::{Read, Write}, + ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}, + rand::{ + distributions::{Distribution, Standard}, + Rng, + }, + vec::Vec, + One, Zero, +}; + +use ark_ff::{fields::Field, PrimeField, ToConstraintField, UniformRand}; + +use zeroize::Zeroize; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +use super::{Affine, SWCurveConfig}; +use crate::{ + scalar_mul::{variable_base::VariableBaseMSM, ScalarMul}, + AffineRepr, CurveGroup, Group, +}; + +/// Jacobian coordinates for a point on an elliptic curve in short Weierstrass +/// form, over the base field `P::BaseField`. This struct implements arithmetic +/// via the Jacobian formulae +#[derive(Derivative)] +#[derivative(Copy(bound = "P: SWCurveConfig"), Clone(bound = "P: SWCurveConfig"))] +#[must_use] +pub struct Projective { + /// `X / Z` projection of the affine `X` + pub x: P::BaseField, + /// `Y / Z` projection of the affine `Y` + pub y: P::BaseField, + /// Projective multiplicative inverse. Will be `0` only at infinity. + pub z: P::BaseField, +} + +impl Display for Projective

{ + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + write!(f, "{}", Affine::from(*self)) + } +} + +impl Debug for Projective

{ + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + match self.is_zero() { + true => write!(f, "infinity"), + false => write!(f, "({}, {}, {})", self.x, self.y, self.z), + } + } +} + +impl Eq for Projective

{} +impl PartialEq for Projective

{ + fn eq(&self, other: &Self) -> bool { + if self.is_zero() { + return other.is_zero(); + } + + if other.is_zero() { + return false; + } + + // The points (X, Y, Z) and (X', Y', Z') + // are equal when (X * Z^2) = (X' * Z'^2) + // and (Y * Z^3) = (Y' * Z'^3). + let z1z1 = self.z.square(); + let z2z2 = other.z.square(); + + if self.x * &z2z2 != other.x * &z1z1 { + false + } else { + self.y * &(z2z2 * &other.z) == other.y * &(z1z1 * &self.z) + } + } +} + +impl PartialEq> for Projective

{ + fn eq(&self, other: &Affine

) -> bool { + *self == other.into_group() + } +} + +impl Hash for Projective

{ + fn hash(&self, state: &mut H) { + self.into_affine().hash(state) + } +} + +impl Distribution> for Standard { + #[inline] + fn sample(&self, rng: &mut R) -> Projective

{ + loop { + let x = P::BaseField::rand(rng); + let greatest = rng.gen(); + + if let Some(p) = Affine::get_point_from_x(x, greatest) { + return p.mul_by_cofactor_to_group(); + } + } + } +} + +impl Default for Projective

{ + #[inline] + fn default() -> Self { + Self::zero() + } +} + +impl Projective

{ + /// Constructs a new group element without checking whether the coordinates + /// specify a point in the subgroup. + pub const fn new_unchecked(x: P::BaseField, y: P::BaseField, z: P::BaseField) -> Self { + Self { x, y, z } + } + + /// Constructs a new group element in a way while enforcing that points are in + /// the prime-order subgroup. + pub fn new(x: P::BaseField, y: P::BaseField, z: P::BaseField) -> Self { + let p = Self::new_unchecked(x, y, z).into_affine(); + assert!(p.is_on_curve()); + assert!(p.is_in_correct_subgroup_assuming_on_curve()); + p.into() + } +} + +impl Zeroize for Projective

{ + fn zeroize(&mut self) { + self.x.zeroize(); + self.y.zeroize(); + self.z.zeroize(); + } +} + +impl Zero for Projective

{ + /// Returns the point at infinity, which always has Z = 0. + #[inline] + fn zero() -> Self { + Self::new_unchecked( + P::BaseField::one(), + P::BaseField::one(), + P::BaseField::zero(), + ) + } + + /// Checks whether `self.z.is_zero()`. + #[inline] + fn is_zero(&self) -> bool { + self.z.is_zero() + } +} + +impl Group for Projective

{ + type ScalarField = P::ScalarField; + + #[inline] + fn generator() -> Self { + Affine::generator().into() + } + + /// Sets `self = 2 * self`. Note that Jacobian formulae are incomplete, and + /// so doubling cannot be computed as `self + self`. Instead, this + /// implementation uses the following specialized doubling formulae: + /// * [`P::A` is zero](http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l) + /// * [`P::A` is not zero](https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#doubling-dbl-2007-bl) + fn double_in_place(&mut self) -> &mut Self { + if self.is_zero() { + return self; + } + + if P::COEFF_A.is_zero() { + // A = X1^2 + let mut a = self.x.square(); + + // B = Y1^2 + let b = self.y.square(); + + // C = B^2 + let mut c = b.square(); + + // D = 2*((X1+B)2-A-C) + let d = ((self.x + &b).square() - &a - &c).double(); + + // E = 3*A + let e = a + &*a.double_in_place(); + + // F = E^2 + let f = e.square(); + + // Z3 = 2*Y1*Z1 + self.z *= &self.y; + self.z.double_in_place(); + + // X3 = F-2*D + self.x = f - &d.double(); + + // Y3 = E*(D-X3)-8*C + self.y = (d - &self.x) * &e - &*c.double_in_place().double_in_place().double_in_place(); + self + } else { + // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l + // XX = X1^2 + let xx = self.x.square(); + + // YY = Y1^2 + let yy = self.y.square(); + + // YYYY = YY^2 + let mut yyyy = yy.square(); + + // ZZ = Z1^2 + let zz = self.z.square(); + + // S = 2*((X1+YY)^2-XX-YYYY) + let s = ((self.x + &yy).square() - &xx - &yyyy).double(); + + // M = 3*XX+a*ZZ^2 + let m = xx + xx.double() + P::mul_by_a(&zz.square()); + + // T = M^2-2*S + let t = m.square() - &s.double(); + + // X3 = T + self.x = t; + // Y3 = M*(S-T)-8*YYYY + let old_y = self.y; + self.y = m * &(s - &t) - &*yyyy.double_in_place().double_in_place().double_in_place(); + // Z3 = (Y1+Z1)^2-YY-ZZ + self.z = (old_y + &self.z).square() - &yy - &zz; + self + } + } + + #[inline] + fn mul_bigint(&self, other: impl AsRef<[u64]>) -> Self { + P::mul_projective(&self, other.as_ref()) + } +} + +impl CurveGroup for Projective

{ + type Config = P; + type BaseField = P::BaseField; + type Affine = Affine

; + type FullGroup = Affine

; + + /// Normalizes a slice of projective elements so that + /// conversion to affine is cheap. + /// + /// In more detail, this method converts a curve point in Jacobian + /// coordinates (x, y, z) into an equivalent representation (x/z^2, + /// y/z^3, 1). + /// + /// For `N = v.len()`, this costs 1 inversion + 6N field multiplications + N + /// field squarings. + /// + /// (Where batch inversion comprises 3N field multiplications + 1 inversion + /// of these operations) + #[inline] + fn normalize_batch(v: &[Self]) -> Vec { + let mut z_s = v.iter().map(|g| g.z).collect::>(); + ark_ff::batch_inversion(&mut z_s); + + // Perform affine transformations + ark_std::cfg_iter!(v) + .zip(z_s) + .map(|(g, z)| match g.is_zero() { + true => Affine::identity(), + false => { + let z2 = z.square(); + let x = g.x * z2; + let y = g.y * z2 * z; + Affine::new_unchecked(x, y) + }, + }) + .collect() + } +} + +impl Neg for Projective

{ + type Output = Self; + + #[inline] + fn neg(mut self) -> Self { + self.y = -self.y; + self + } +} + +impl>> AddAssign for Projective

{ + fn add_assign(&mut self, other: T) { + let other = other.borrow(); + if let Some((&other_x, &other_y)) = other.xy() { + if self.is_zero() { + self.x = other_x; + self.y = other_y; + self.z = P::BaseField::one(); + return; + } + + // Z1Z1 = Z1^2 + let z1z1 = self.z.square(); + + // U2 = X2*Z1Z1 + let u2 = z1z1 * other_x; + + // S2 = Y2*Z1*Z1Z1 + let s2 = (self.z * other_y) * &z1z1; + + if self.x == u2 && self.y == s2 { + // The two points are equal, so we double. + self.double_in_place(); + } else { + // If we're adding -a and a together, self.z becomes zero as H becomes zero. + + // H = U2-X1 + let h = u2 - &self.x; + + // HH = H^2 + let hh = h.square(); + + // I = 4*HH + let mut i = hh; + i.double_in_place().double_in_place(); + + // J = H*I + let j = h * &i; + + // r = 2*(S2-Y1) + let r = (s2 - &self.y).double(); + + // V = X1*I + let v = self.x * &i; + + // X3 = r^2 - J - 2*V + self.x = r.square(); + self.x -= &j; + self.x -= &v.double(); + + // Y3 = r*(V-X3)-2*Y1*J + self.y = P::BaseField::sum_of_products(&[r, -self.y.double()], &[(v - &self.x), j]); + + // Z3 = (Z1+H)^2-Z1Z1-HH + self.z += &h; + self.z.square_in_place(); + self.z -= &z1z1; + self.z -= &hh; + } + } + } +} + +impl>> Add for Projective

{ + type Output = Self; + fn add(mut self, other: T) -> Self { + let other = other.borrow(); + self += other; + self + } +} + +impl>> SubAssign for Projective

{ + fn sub_assign(&mut self, other: T) { + *self += -(*other.borrow()); + } +} + +impl>> Sub for Projective

{ + type Output = Self; + fn sub(mut self, other: T) -> Self { + self -= other.borrow(); + self + } +} + +ark_ff::impl_additive_ops_from_ref!(Projective, SWCurveConfig); + +impl<'a, P: SWCurveConfig> Add<&'a Self> for Projective

{ + type Output = Self; + + #[inline] + fn add(mut self, other: &'a Self) -> Self { + self += other; + self + } +} + +impl<'a, P: SWCurveConfig> AddAssign<&'a Self> for Projective

{ + fn add_assign(&mut self, other: &'a Self) { + if self.is_zero() { + *self = *other; + return; + } + + if other.is_zero() { + return; + } + + // http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl + // Works for all curves. + + // Z1Z1 = Z1^2 + let z1z1 = self.z.square(); + + // Z2Z2 = Z2^2 + let z2z2 = other.z.square(); + + // U1 = X1*Z2Z2 + let u1 = self.x * &z2z2; + + // U2 = X2*Z1Z1 + let u2 = other.x * &z1z1; + + // S1 = Y1*Z2*Z2Z2 + let s1 = self.y * &other.z * &z2z2; + + // S2 = Y2*Z1*Z1Z1 + let s2 = other.y * &self.z * &z1z1; + + if u1 == u2 && s1 == s2 { + // The two points are equal, so we double. + self.double_in_place(); + } else { + // If we're adding -a and a together, self.z becomes zero as H becomes zero. + + // H = U2-U1 + let h = u2 - &u1; + + // I = (2*H)^2 + let i = (h.double()).square(); + + // J = H*I + let j = h * &i; + + // r = 2*(S2-S1) + let r = (s2 - &s1).double(); + + // V = U1*I + let v = u1 * &i; + + // X3 = r^2 - J - 2*V + self.x = r.square() - &j - &(v.double()); + + // Y3 = r*(V - X3) - 2*S1*J + self.y = P::BaseField::sum_of_products(&[r, -s1.double()], &[(v - &self.x), j]); + + // Z3 = ((Z1+Z2)^2 - Z1Z1 - Z2Z2)*H + self.z = ((self.z + &other.z).square() - &z1z1 - &z2z2) * &h; + } + } +} + +impl<'a, P: SWCurveConfig> Sub<&'a Self> for Projective

{ + type Output = Self; + + #[inline] + fn sub(mut self, other: &'a Self) -> Self { + self -= other; + self + } +} + +impl<'a, P: SWCurveConfig> SubAssign<&'a Self> for Projective

{ + fn sub_assign(&mut self, other: &'a Self) { + *self += &(-(*other)); + } +} + +impl> MulAssign for Projective

{ + fn mul_assign(&mut self, other: T) { + *self = self.mul_bigint(other.borrow().into_bigint()) + } +} + +impl<'a, P: SWCurveConfig, T: Borrow> Mul for Projective

{ + type Output = Self; + + #[inline] + fn mul(mut self, other: T) -> Self { + self *= other; + self + } +} + +// The affine point X, Y is represented in the Jacobian +// coordinates with Z = 1. +impl From> for Projective

{ + #[inline] + fn from(p: Affine

) -> Projective

{ + p.xy().map_or(Projective::zero(), |(&x, &y)| Self { + x, + y, + z: P::BaseField::one(), + }) + } +} + +impl CanonicalSerialize for Projective

{ + #[allow(unused_qualifications)] + #[inline] + fn serialize(&self, writer: W) -> Result<(), SerializationError> { + let aff = Affine::

::from(*self); + aff.serialize(writer) + } + + #[inline] + fn serialized_size(&self) -> usize { + let aff = Affine::

::from(*self); + aff.serialized_size() + } + + #[allow(unused_qualifications)] + #[inline] + fn serialize_uncompressed(&self, writer: W) -> Result<(), SerializationError> { + let aff = Affine::

::from(*self); + aff.serialize_uncompressed(writer) + } + + #[inline] + fn uncompressed_size(&self) -> usize { + let aff = Affine::

::from(*self); + aff.uncompressed_size() + } +} + +impl CanonicalDeserialize for Projective

{ + #[allow(unused_qualifications)] + fn deserialize(reader: R) -> Result { + let aff = Affine::

::deserialize(reader)?; + Ok(aff.into()) + } + + #[allow(unused_qualifications)] + fn deserialize_uncompressed(reader: R) -> Result { + let aff = Affine::

::deserialize_uncompressed(reader)?; + Ok(aff.into()) + } + + #[allow(unused_qualifications)] + fn deserialize_unchecked(reader: R) -> Result { + let aff = Affine::

::deserialize_unchecked(reader)?; + Ok(aff.into()) + } +} + +impl ToConstraintField for Projective +where + M::BaseField: ToConstraintField, +{ + #[inline] + fn to_field_elements(&self) -> Option> { + Affine::from(*self).to_field_elements() + } +} + +impl ScalarMul for Projective

{ + type MulBase = Affine

; + + fn batch_convert_to_mul_base(bases: &[Self]) -> Vec { + Self::normalize_batch(bases) + } +} + +impl VariableBaseMSM for Projective

{} + +impl>> core::iter::Sum for Projective

{ + fn sum>(iter: I) -> Self { + iter.fold(Projective::zero(), |sum, x| sum + x.borrow()) + } +} diff --git a/ec/src/models/short_weierstrass/mod.rs b/ec/src/models/short_weierstrass/mod.rs new file mode 100644 index 000000000..679a71e55 --- /dev/null +++ b/ec/src/models/short_weierstrass/mod.rs @@ -0,0 +1,97 @@ +use ark_ff::fields::Field; + +use crate::{AffineRepr, Group}; + +use num_traits::Zero; + +mod affine; +pub use affine::*; + +mod group; +pub use group::*; + +/// Constants and convenience functions that collectively define the [Short Weierstrass model](https://www.hyperelliptic.org/EFD/g1p/auto-shortw.html) +/// of the curve. In this model, the curve equation is `y² = x³ + a * x + b`, +/// for constants `a` and `b`. +pub trait SWCurveConfig: super::CurveConfig { + /// Coefficient `a` of the curve equation. + const COEFF_A: Self::BaseField; + /// Coefficient `b` of the curve equation. + const COEFF_B: Self::BaseField; + /// Generator of the prime-order subgroup. + const GENERATOR: Affine; + + /// Helper method for computing `elem * Self::COEFF_A`. + /// + /// The default implementation should be overridden only if + /// the product can be computed faster than standard field multiplication + /// (eg: via doubling if `COEFF_A == 2`, or if `COEFF_A.is_zero()`). + #[inline(always)] + fn mul_by_a(elem: &Self::BaseField) -> Self::BaseField { + let mut copy = *elem; + copy *= &Self::COEFF_A; + copy + } + + /// Helper method for computing `elem + Self::COEFF_B`. + /// + /// The default implementation should be overridden only if + /// the sum can be computed faster than standard field addition (eg: via + /// doubling). + #[inline(always)] + fn add_b(elem: &Self::BaseField) -> Self::BaseField { + if !Self::COEFF_B.is_zero() { + let mut copy = *elem; + copy += &Self::COEFF_B; + return copy; + } + *elem + } + + /// Check if the provided curve point is in the prime-order subgroup. + /// + /// The default implementation multiplies `item` by the order `r` of the + /// prime-order subgroup, and checks if the result is one. + /// Implementors can choose to override this default impl + /// if the given curve has faster methods + /// for performing this check (for example, via leveraging curve + /// isomorphisms). + fn is_in_correct_subgroup_assuming_on_curve(item: &Affine) -> bool { + Self::mul_affine(item, Self::ScalarField::characteristic()).is_zero() + } + + /// Performs cofactor clearing. + /// The default method is simply to multiply by the cofactor. + /// Some curves can implement a more efficient algorithm. + fn clear_cofactor(item: &Affine) -> Affine { + item.mul_by_cofactor() + } + + /// Default implementation of group multiplication for projective + /// coordinates + fn mul_projective(base: &Projective, scalar: &[u64]) -> Projective { + let mut res = Projective::::zero(); + for b in ark_ff::BitIteratorBE::without_leading_zeros(scalar) { + res.double_in_place(); + if b { + res += base; + } + } + + res + } + + /// Default implementation of group multiplication for affine + /// coordinates. + fn mul_affine(base: &Affine, scalar: &[u64]) -> Projective { + let mut res = Projective::::zero(); + for b in ark_ff::BitIteratorBE::without_leading_zeros(scalar) { + res.double_in_place(); + if b { + res += base + } + } + + res + } +} diff --git a/ec/src/models/twisted_edwards.rs b/ec/src/models/twisted_edwards.rs deleted file mode 100644 index ae2762c9d..000000000 --- a/ec/src/models/twisted_edwards.rs +++ /dev/null @@ -1,1052 +0,0 @@ -use crate::{msm::VariableBaseMSM, AffineCurve, ProjectiveCurve}; -use ark_serialize::{ - CanonicalDeserialize, CanonicalDeserializeWithFlags, CanonicalSerialize, - CanonicalSerializeWithFlags, EdwardsFlags, SerializationError, -}; -use ark_std::{ - borrow::Borrow, - fmt::{Display, Formatter, Result as FmtResult}, - hash::{Hash, Hasher}, - io::{Read, Write}, - ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}, - rand::{ - distributions::{Distribution, Standard}, - Rng, - }, - vec::Vec, -}; -use num_traits::{One, Zero}; -use zeroize::Zeroize; - -use ark_ff::{fields::Field, PrimeField, ToConstraintField, UniformRand}; - -#[cfg(feature = "parallel")] -use rayon::prelude::*; - -/// Constants and convenience functions that collectively define the [Twisted Edwards model](https://www.hyperelliptic.org/EFD/g1p/auto-twisted.html) -/// of the curve. In this model, the curve equation is -/// `a * x² + y² = 1 + d * x² * y²`, for constants `a` and `d`. -pub trait TECurveConfig: super::CurveConfig { - /// Coefficient `a` of the curve equation. - const COEFF_A: Self::BaseField; - /// Coefficient `d` of the curve equation. - const COEFF_D: Self::BaseField; - /// Generator of the prime-order subgroup. - const GENERATOR: Affine; - - /// Model parameters for the Montgomery curve that is birationally - /// equivalent to this curve. - type MontCurveConfig: MontCurveConfig; - - /// Helper method for computing `elem * Self::COEFF_A`. - /// - /// The default implementation should be overridden only if - /// the product can be computed faster than standard field multiplication - /// (eg: via doubling if `COEFF_A == 2`, or if `COEFF_A.is_zero()`). - #[inline(always)] - fn mul_by_a(elem: &Self::BaseField) -> Self::BaseField { - let mut copy = *elem; - copy *= &Self::COEFF_A; - copy - } - - /// Checks that the current point is in the prime order subgroup given - /// the point on the curve. - fn is_in_correct_subgroup_assuming_on_curve(item: &Affine) -> bool { - Self::mul_affine(item, Self::ScalarField::characteristic()).is_zero() - } - - /// Performs cofactor clearing. - /// The default method is simply to multiply by the cofactor. - /// For some curve families though, it is sufficient to multiply - /// by a smaller scalar. - fn clear_cofactor(item: &Affine) -> Affine { - item.mul_by_cofactor() - } - - /// Default implementation of group multiplication for projective - /// coordinates - fn mul_projective(base: &Projective, scalar: &[u64]) -> Projective { - let mut res = Projective::::zero(); - for b in ark_ff::BitIteratorBE::without_leading_zeros(scalar) { - res.double_in_place(); - if b { - res += base; - } - } - - res - } - - /// Default implementation of group multiplication for affine - /// coordinates - fn mul_affine(base: &Affine, scalar: &[u64]) -> Projective { - let mut res = Projective::::zero(); - for b in ark_ff::BitIteratorBE::without_leading_zeros(scalar) { - res.double_in_place(); - if b { - res.add_assign_mixed(base) - } - } - - res - } -} - -/// Constants and convenience functions that collectively define the [Montgomery model](https://www.hyperelliptic.org/EFD/g1p/auto-montgom.html) -/// of the curve. In this model, the curve equation is -/// `b * y² = x³ + a * x² + x`, for constants `a` and `b`. -pub trait MontCurveConfig: super::CurveConfig { - /// Coefficient `a` of the curve equation. - const COEFF_A: Self::BaseField; - /// Coefficient `b` of the curve equation. - const COEFF_B: Self::BaseField; - - /// Model parameters for the Twisted Edwards curve that is birationally - /// equivalent to this curve. - type TECurveConfig: TECurveConfig; -} - -/// Affine coordinates for a point on a twisted Edwards curve, over the -/// base field `P::BaseField`. -#[derive(Derivative)] -#[derivative( - Copy(bound = "P: TECurveConfig"), - Clone(bound = "P: TECurveConfig"), - PartialEq(bound = "P: TECurveConfig"), - Eq(bound = "P: TECurveConfig"), - Debug(bound = "P: TECurveConfig"), - Hash(bound = "P: TECurveConfig") -)] -#[must_use] -pub struct Affine { - /// X coordinate of the point represented as a field element - pub x: P::BaseField, - /// Y coordinate of the point represented as a field element - pub y: P::BaseField, -} - -impl Display for Affine

{ - fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { - write!(f, "Affine(x={}, y={})", self.x, self.y) - } -} - -impl Affine

{ - /// Construct a new group element without checking whether the coordinates - /// specify a point in the subgroup. - pub const fn new_unchecked(x: P::BaseField, y: P::BaseField) -> Self { - Self { x, y } - } - - /// Construct a new group element in a way while enforcing that points are in - /// the prime-order subgroup. - pub fn new(x: P::BaseField, y: P::BaseField) -> Self { - let p = Self::new_unchecked(x, y); - assert!(p.is_on_curve()); - assert!(p.is_in_correct_subgroup_assuming_on_curve()); - p - } - - /// Attempts to construct an affine point given an y-coordinate. The - /// point is not guaranteed to be in the prime order subgroup. - /// - /// If and only if `greatest` is set will the lexicographically - /// largest x-coordinate be selected. - /// - /// a * X^2 + Y^2 = 1 + d * X^2 * Y^2 - /// a * X^2 - d * X^2 * Y^2 = 1 - Y^2 - /// X^2 * (a - d * Y^2) = 1 - Y^2 - /// X^2 = (1 - Y^2) / (a - d * Y^2) - #[allow(dead_code)] - pub fn get_point_from_y(y: P::BaseField, greatest: bool) -> Option { - let y2 = y.square(); - - let numerator = P::BaseField::one() - y2; - let denominator = P::COEFF_A - (y2 * P::COEFF_D); - - denominator - .inverse() - .map(|denom| denom * &numerator) - .and_then(|x2| x2.sqrt()) - .map(|x| { - let negx = -x; - let x = if (x < negx) ^ greatest { x } else { negx }; - Self::new_unchecked(x, y) - }) - } - - /// Checks that the current point is on the elliptic curve. - pub fn is_on_curve(&self) -> bool { - let x2 = self.x.square(); - let y2 = self.y.square(); - - let lhs = y2 + &P::mul_by_a(&x2); - let rhs = P::BaseField::one() + &(P::COEFF_D * &(x2 * &y2)); - - lhs == rhs - } -} - -impl Affine

{ - /// Checks if `self` is in the subgroup having order equaling that of - /// `P::ScalarField` given it is on the curve. - pub fn is_in_correct_subgroup_assuming_on_curve(&self) -> bool { - P::is_in_correct_subgroup_assuming_on_curve(self) - } -} - -impl Zero for Affine

{ - fn zero() -> Self { - Self::new_unchecked(P::BaseField::zero(), P::BaseField::one()) - } - - fn is_zero(&self) -> bool { - self.x.is_zero() & self.y.is_one() - } -} - -impl AffineCurve for Affine

{ - type Config = P; - type BaseField = P::BaseField; - type ScalarField = P::ScalarField; - type Projective = Projective

; - - fn xy(&self) -> Option<(&Self::BaseField, &Self::BaseField)> { - (!self.is_zero()).then(|| (&self.x, &self.y)) - } - - fn prime_subgroup_generator() -> Self { - P::GENERATOR - } - - fn from_random_bytes(bytes: &[u8]) -> Option { - P::BaseField::from_random_bytes_with_flags::(bytes).and_then(|(y, flags)| { - // if y is valid and is zero, then parse this - // point as infinity. - if y.is_zero() { - Some(Self::zero()) - } else { - Self::get_point_from_y(y, flags.is_positive()) - } - }) - } - - fn mul_bigint>(&self, by: S) -> Self::Projective { - P::mul_affine(self, by.as_ref()) - } - - /// Multiplies this element by the cofactor and output the - /// resulting projective element. - #[must_use] - fn mul_by_cofactor_to_projective(&self) -> Self::Projective { - P::mul_affine(self, Self::Config::COFACTOR) - } - - /// Performs cofactor clearing. - /// The default method is simply to multiply by the cofactor. - /// Some curves can implement a more efficient algorithm. - fn clear_cofactor(&self) -> Self { - P::clear_cofactor(self) - } -} - -impl Zeroize for Affine

{ - // The phantom data does not contain element-specific data - // and thus does not need to be zeroized. - fn zeroize(&mut self) { - self.x.zeroize(); - self.y.zeroize(); - } -} - -impl Neg for Affine

{ - type Output = Self; - - fn neg(self) -> Self { - Self::new_unchecked(-self.x, self.y) - } -} - -ark_ff::impl_additive_ops_from_ref!(Affine, TECurveConfig); - -impl<'a, P: TECurveConfig> Add<&'a Self> for Affine

{ - type Output = Self; - fn add(self, other: &'a Self) -> Self { - let mut copy = self; - copy += other; - copy - } -} - -impl<'a, P: TECurveConfig> AddAssign<&'a Self> for Affine

{ - fn add_assign(&mut self, other: &'a Self) { - let y1y2 = self.y * &other.y; - let x1x2 = self.x * &other.x; - let dx1x2y1y2 = P::COEFF_D * &y1y2 * &x1x2; - - let d1 = P::BaseField::one() + &dx1x2y1y2; - let d2 = P::BaseField::one() - &dx1x2y1y2; - - let x1y2 = self.x * &other.y; - let y1x2 = self.y * &other.x; - - self.x = (x1y2 + &y1x2) / &d1; - self.y = (y1y2 - &P::mul_by_a(&x1x2)) / &d2; - } -} - -impl<'a, P: TECurveConfig> Sub<&'a Self> for Affine

{ - type Output = Self; - fn sub(self, other: &'a Self) -> Self { - let mut copy = self; - copy -= other; - copy - } -} - -impl<'a, P: TECurveConfig> SubAssign<&'a Self> for Affine

{ - fn sub_assign(&mut self, other: &'a Self) { - *self += &(-(*other)); - } -} - -impl MulAssign for Affine

{ - fn mul_assign(&mut self, other: P::ScalarField) { - *self = self.mul_bigint(&other.into_bigint()).into() - } -} - -impl Default for Affine

{ - #[inline] - fn default() -> Self { - Self::zero() - } -} - -impl Distribution> for Standard { - #[inline] - fn sample(&self, rng: &mut R) -> Affine

{ - loop { - let y = P::BaseField::rand(rng); - let greatest = rng.gen(); - - if let Some(p) = Affine::get_point_from_y(y, greatest) { - return p.mul_by_cofactor(); - } - } - } -} - -impl<'a, P: TECurveConfig, T: Borrow> Mul for Affine

{ - type Output = Projective

; - - #[inline] - fn mul(self, other: T) -> Self::Output { - self.mul_bigint(other.borrow().into_bigint()) - } -} - -////////////////////////////////////////////////////////////////////////////// - -/// `Projective` implements Extended Twisted Edwards Coordinates -/// as described in [\[HKCD08\]](https://eprint.iacr.org/2008/522.pdf). -/// -/// This implementation uses the unified addition formulae from that paper (see -/// Section 3.1). -#[derive(Derivative)] -#[derivative( - Copy(bound = "P: TECurveConfig"), - Clone(bound = "P: TECurveConfig"), - Eq(bound = "P: TECurveConfig"), - Debug(bound = "P: TECurveConfig") -)] -#[must_use] -pub struct Projective { - pub x: P::BaseField, - pub y: P::BaseField, - pub t: P::BaseField, - pub z: P::BaseField, -} - -impl PartialEq> for Affine

{ - fn eq(&self, other: &Projective

) -> bool { - self.into_projective() == *other - } -} - -impl PartialEq> for Projective

{ - fn eq(&self, other: &Affine

) -> bool { - *self == other.into_projective() - } -} - -impl Display for Projective

{ - fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { - write!(f, "{}", Affine::from(*self)) - } -} - -impl PartialEq for Projective

{ - fn eq(&self, other: &Self) -> bool { - if self.is_zero() { - return other.is_zero(); - } - - if other.is_zero() { - return false; - } - - // x1/z1 == x2/z2 <==> x1 * z2 == x2 * z1 - (self.x * &other.z) == (other.x * &self.z) && (self.y * &other.z) == (other.y * &self.z) - } -} - -impl Hash for Projective

{ - fn hash(&self, state: &mut H) { - self.into_affine().hash(state) - } -} - -impl Distribution> for Standard { - #[inline] - fn sample(&self, rng: &mut R) -> Projective

{ - loop { - let y = P::BaseField::rand(rng); - let greatest = rng.gen(); - - if let Some(p) = Affine::get_point_from_y(y, greatest) { - return p.mul_by_cofactor_to_projective(); - } - } - } -} - -impl Default for Projective

{ - #[inline] - fn default() -> Self { - Self::zero() - } -} - -impl Projective

{ - /// Construct a new group element without checking whether the coordinates - /// specify a point in the subgroup. - pub const fn new_unchecked( - x: P::BaseField, - y: P::BaseField, - t: P::BaseField, - z: P::BaseField, - ) -> Self { - Self { x, y, t, z } - } - - /// Construct a new group element in a way while enforcing that points are in - /// the prime-order subgroup. - pub fn new(x: P::BaseField, y: P::BaseField, t: P::BaseField, z: P::BaseField) -> Self { - let p = Self::new_unchecked(x, y, t, z).into_affine(); - assert!(p.is_on_curve()); - assert!(p.is_in_correct_subgroup_assuming_on_curve()); - p.into() - } -} -impl Zeroize for Projective

{ - // The phantom data does not contain element-specific data - // and thus does not need to be zeroized. - fn zeroize(&mut self) { - self.x.zeroize(); - self.y.zeroize(); - self.t.zeroize(); - self.z.zeroize(); - } -} - -impl Zero for Projective

{ - fn zero() -> Self { - Self::new_unchecked( - P::BaseField::zero(), - P::BaseField::one(), - P::BaseField::zero(), - P::BaseField::one(), - ) - } - - fn is_zero(&self) -> bool { - self.x.is_zero() && self.y == self.z && !self.y.is_zero() && self.t.is_zero() - } -} - -impl ProjectiveCurve for Projective

{ - type Config = P; - type BaseField = P::BaseField; - type ScalarField = P::ScalarField; - type Affine = Affine

; - - fn prime_subgroup_generator() -> Self { - Affine::prime_subgroup_generator().into() - } - - fn is_normalized(&self) -> bool { - self.z.is_one() - } - - fn batch_normalization(v: &mut [Self]) { - // A projective curve element (x, y, t, z) is normalized - // to its affine representation, by the conversion - // (x, y, t, z) -> (x/z, y/z, t/z, 1) - // Batch normalizing N twisted edwards curve elements costs: - // 1 inversion + 6N field multiplications - // (batch inversion requires 3N multiplications + 1 inversion) - let mut z_s = v.iter().map(|g| g.z).collect::>(); - ark_ff::batch_inversion(&mut z_s); - - // Perform affine transformations - ark_std::cfg_iter_mut!(v) - .zip(z_s) - .filter(|(g, _)| !g.is_normalized()) - .for_each(|(g, z)| { - g.x *= &z; // x/z - g.y *= &z; - g.t *= &z; - g.z = P::BaseField::one(); // z = 1 - }); - } - - fn double_in_place(&mut self) -> &mut Self { - // See "Twisted Edwards Curves Revisited" - // Huseyin Hisil, Kenneth Koon-Ho Wong, Gary Carter, and Ed Dawson - // 3.3 Doubling in E^e - // Source: https://www.hyperelliptic.org/EFD/g1p/data/twisted/extended/doubling/dbl-2008-hwcd - - // A = X1^2 - let a = self.x.square(); - // B = Y1^2 - let b = self.y.square(); - // C = 2 * Z1^2 - let c = self.z.square().double(); - // D = a * A - let d = P::mul_by_a(&a); - // E = (X1 + Y1)^2 - A - B - let e = (self.x + &self.y).square() - &a - &b; - // G = D + B - let g = d + &b; - // F = G - C - let f = g - &c; - // H = D - B - let h = d - &b; - // X3 = E * F - self.x = e * &f; - // Y3 = G * H - self.y = g * &h; - // T3 = E * H - self.t = e * &h; - // Z3 = F * G - self.z = f * &g; - - self - } - - fn add_assign_mixed(&mut self, other: &Affine

) { - // See "Twisted Edwards Curves Revisited" - // Huseyin Hisil, Kenneth Koon-Ho Wong, Gary Carter, and Ed Dawson - // 3.1 Unified Addition in E^e - // Source: https://www.hyperelliptic.org/EFD/g1p/data/twisted/extended/addition/madd-2008-hwcd - - // A = X1*X2 - let a = self.x * &other.x; - // B = Y1*Y2 - let b = self.y * &other.y; - // C = T1*d*T2 - let c = P::COEFF_D * &self.t * &other.x * &other.y; - - // D = Z1 - let d = self.z; - // E = (X1+Y1)*(X2+Y2)-A-B - let e = (self.x + &self.y) * &(other.x + &other.y) - &a - &b; - // F = D-C - let f = d - &c; - // G = D+C - let g = d + &c; - // H = B-a*A - let h = b - &P::mul_by_a(&a); - // X3 = E*F - self.x = e * &f; - // Y3 = G*H - self.y = g * &h; - // T3 = E*H - self.t = e * &h; - // Z3 = F*G - self.z = f * &g; - } - - #[inline] - fn mul_bigint>(self, other: S) -> Self { - P::mul_projective(&self, other.as_ref()) - } -} - -impl Neg for Projective

{ - type Output = Self; - fn neg(mut self) -> Self { - self.x = -self.x; - self.t = -self.t; - self - } -} - -ark_ff::impl_additive_ops_from_ref!(Projective, TECurveConfig); - -impl<'a, P: TECurveConfig> Add<&'a Self> for Projective

{ - type Output = Self; - fn add(mut self, other: &'a Self) -> Self { - self += other; - self - } -} - -impl<'a, P: TECurveConfig> AddAssign<&'a Self> for Projective

{ - fn add_assign(&mut self, other: &'a Self) { - // See "Twisted Edwards Curves Revisited" (https://eprint.iacr.org/2008/522.pdf) - // by Huseyin Hisil, Kenneth Koon-Ho Wong, Gary Carter, and Ed Dawson - // 3.1 Unified Addition in E^e - - // A = x1 * x2 - let a = self.x * &other.x; - - // B = y1 * y2 - let b = self.y * &other.y; - - // C = d * t1 * t2 - let c = P::COEFF_D * &self.t * &other.t; - - // D = z1 * z2 - let d = self.z * &other.z; - - // H = B - aA - let h = b - &P::mul_by_a(&a); - - // E = (x1 + y1) * (x2 + y2) - A - B - let e = (self.x + &self.y) * &(other.x + &other.y) - &a - &b; - - // F = D - C - let f = d - &c; - - // G = D + C - let g = d + &c; - - // x3 = E * F - self.x = e * &f; - - // y3 = G * H - self.y = g * &h; - - // t3 = E * H - self.t = e * &h; - - // z3 = F * G - self.z = f * &g; - } -} - -impl<'a, P: TECurveConfig> Sub<&'a Self> for Projective

{ - type Output = Self; - fn sub(mut self, other: &'a Self) -> Self { - self -= other; - self - } -} - -impl<'a, P: TECurveConfig> SubAssign<&'a Self> for Projective

{ - fn sub_assign(&mut self, other: &'a Self) { - *self += &(-(*other)); - } -} - -impl> MulAssign for Projective

{ - fn mul_assign(&mut self, other: T) { - *self = self.mul_bigint(other.borrow().into_bigint()) - } -} - -impl> Mul for Projective

{ - type Output = Self; - - #[inline] - fn mul(mut self, other: T) -> Self { - self *= other; - self - } -} - -// The affine point (X, Y) is represented in the Extended Projective coordinates -// with Z = 1. -impl From> for Projective

{ - fn from(p: Affine

) -> Projective

{ - Self::new_unchecked(p.x, p.y, p.x * &p.y, P::BaseField::one()) - } -} - -// The projective point X, Y, T, Z is represented in the affine -// coordinates as X/Z, Y/Z. -impl From> for Affine

{ - fn from(p: Projective

) -> Affine

{ - if p.is_zero() { - Affine::zero() - } else if p.z.is_one() { - // If Z is one, the point is already normalized. - Affine::new_unchecked(p.x, p.y) - } else { - // Z is nonzero, so it must have an inverse in a field. - let z_inv = p.z.inverse().unwrap(); - let x = p.x * &z_inv; - let y = p.y * &z_inv; - Affine::new_unchecked(x, y) - } - } -} - -impl core::str::FromStr for Affine

-where - P::BaseField: core::str::FromStr, -{ - type Err = (); - - fn from_str(mut s: &str) -> Result { - s = s.trim(); - if s.is_empty() { - return Err(()); - } - if s.len() < 3 { - return Err(()); - } - if !(s.starts_with('(') && s.ends_with(')')) { - return Err(()); - } - let mut point = Vec::new(); - for substr in s.split(|c| c == '(' || c == ')' || c == ',' || c == ' ') { - if !substr.is_empty() { - point.push(P::BaseField::from_str(substr)?); - } - } - if point.len() != 2 { - return Err(()); - } - let point = Self::new_unchecked(point[0], point[1]); - - if !point.is_on_curve() { - Err(()) - } else { - Ok(point) - } - } -} - -#[derive(Derivative)] -#[derivative( - Copy(bound = "P: MontCurveConfig"), - Clone(bound = "P: MontCurveConfig"), - PartialEq(bound = "P: MontCurveConfig"), - Eq(bound = "P: MontCurveConfig"), - Debug(bound = "P: MontCurveConfig"), - Hash(bound = "P: MontCurveConfig") -)] -pub struct MontgomeryAffine { - pub x: P::BaseField, - pub y: P::BaseField, -} - -impl Display for MontgomeryAffine

{ - fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { - write!(f, "MontgomeryAffine(x={}, y={})", self.x, self.y) - } -} - -impl MontgomeryAffine

{ - pub fn new(x: P::BaseField, y: P::BaseField) -> Self { - Self { x, y } - } -} - -impl CanonicalSerialize for Affine

{ - #[allow(unused_qualifications)] - #[inline] - fn serialize(&self, writer: W) -> Result<(), SerializationError> { - if self.is_zero() { - let flags = EdwardsFlags::default(); - // Serialize 0. - P::BaseField::zero().serialize_with_flags(writer, flags) - } else { - let flags = EdwardsFlags::from_x_sign(self.x > -self.x); - self.y.serialize_with_flags(writer, flags) - } - } - - #[inline] - fn serialized_size(&self) -> usize { - P::BaseField::zero().serialized_size_with_flags::() - } - - #[allow(unused_qualifications)] - #[inline] - fn serialize_uncompressed(&self, mut writer: W) -> Result<(), SerializationError> { - self.x.serialize_uncompressed(&mut writer)?; - self.y.serialize_uncompressed(&mut writer)?; - Ok(()) - } - - #[inline] - fn uncompressed_size(&self) -> usize { - // x + y - self.x.serialized_size() + self.y.serialized_size() - } -} - -impl CanonicalSerialize for Projective

{ - #[allow(unused_qualifications)] - #[inline] - fn serialize(&self, writer: W) -> Result<(), SerializationError> { - let aff = Affine::

::from(*self); - aff.serialize(writer) - } - - #[inline] - fn serialized_size(&self) -> usize { - let aff = Affine::

::from(*self); - aff.serialized_size() - } - - #[allow(unused_qualifications)] - #[inline] - fn serialize_uncompressed(&self, writer: W) -> Result<(), SerializationError> { - let aff = Affine::

::from(*self); - aff.serialize_uncompressed(writer) - } - - #[inline] - fn uncompressed_size(&self) -> usize { - let aff = Affine::

::from(*self); - aff.uncompressed_size() - } -} - -impl CanonicalDeserialize for Affine

{ - #[allow(unused_qualifications)] - fn deserialize(mut reader: R) -> Result { - let (y, flags): (P::BaseField, EdwardsFlags) = - CanonicalDeserializeWithFlags::deserialize_with_flags(&mut reader)?; - if y == P::BaseField::zero() { - Ok(Self::zero()) - } else { - let p = Affine::

::get_point_from_y(y, flags.is_positive()) - .ok_or(SerializationError::InvalidData)?; - if !p.is_in_correct_subgroup_assuming_on_curve() { - return Err(SerializationError::InvalidData); - } - Ok(p) - } - } - - #[allow(unused_qualifications)] - fn deserialize_uncompressed(reader: R) -> Result { - let p = Self::deserialize_unchecked(reader)?; - - if !p.is_in_correct_subgroup_assuming_on_curve() { - return Err(SerializationError::InvalidData); - } - Ok(p) - } - - #[allow(unused_qualifications)] - fn deserialize_unchecked(mut reader: R) -> Result { - let x: P::BaseField = CanonicalDeserialize::deserialize(&mut reader)?; - let y: P::BaseField = CanonicalDeserialize::deserialize(&mut reader)?; - - let p = Affine::

::new_unchecked(x, y); - Ok(p) - } -} - -impl CanonicalDeserialize for Projective

{ - #[allow(unused_qualifications)] - fn deserialize(reader: R) -> Result { - let aff = Affine::

::deserialize(reader)?; - Ok(aff.into()) - } - - #[allow(unused_qualifications)] - fn deserialize_uncompressed(reader: R) -> Result { - let aff = Affine::

::deserialize_uncompressed(reader)?; - Ok(aff.into()) - } - - #[allow(unused_qualifications)] - fn deserialize_unchecked(reader: R) -> Result { - let aff = Affine::

::deserialize_unchecked(reader)?; - Ok(aff.into()) - } -} - -impl ToConstraintField for Affine -where - M::BaseField: ToConstraintField, -{ - #[inline] - fn to_field_elements(&self) -> Option> { - let mut x_fe = self.x.to_field_elements()?; - let y_fe = self.y.to_field_elements()?; - x_fe.extend_from_slice(&y_fe); - Some(x_fe) - } -} - -impl ToConstraintField for Projective -where - M::BaseField: ToConstraintField, -{ - #[inline] - fn to_field_elements(&self) -> Option> { - Affine::from(*self).to_field_elements() - } -} - -// This impl block and the one following are being used to encapsulate all of -// the methods that are needed for backwards compatibility with the old -// serialization format -// See Issue #330 -impl Affine

{ - /// Attempts to construct an affine point given an x-coordinate. The - /// point is not guaranteed to be in the prime order subgroup. - /// - /// If and only if `greatest` is set will the lexicographically - /// largest y-coordinate be selected. - /// - /// This method is implemented for backwards compatibility with the old - /// serialization format and will be deprecated and then removed in a - /// future version. - #[allow(dead_code)] - pub fn get_point_from_x_old(x: P::BaseField, greatest: bool) -> Option { - let x2 = x.square(); - let one = P::BaseField::one(); - let numerator = P::mul_by_a(&x2) - &one; - let denominator = P::COEFF_D * &x2 - &one; - let y2 = denominator.inverse().map(|denom| denom * &numerator); - y2.and_then(|y2| y2.sqrt()).map(|y| { - let negy = -y; - let y = if (y < negy) ^ greatest { y } else { negy }; - Self::new_unchecked(x, y) - }) - } - /// This method is implemented for backwards compatibility with the old - /// serialization format and will be deprecated and then removed in a - /// future version. - pub fn serialize_old(&self, writer: W) -> Result<(), SerializationError> { - if self.is_zero() { - let flags = EdwardsFlags::default(); - // Serialize 0. - P::BaseField::zero().serialize_with_flags(writer, flags) - } else { - // Note: although this says `from_x_sign` and we are - // using the sign of `y`. The logic works the same. - let flags = EdwardsFlags::from_x_sign(self.y > -self.y); - self.x.serialize_with_flags(writer, flags) - } - } - - #[allow(unused_qualifications)] - #[inline] - /// This method is implemented for backwards compatibility with the old - /// serialization format and will be deprecated and then removed in a - /// future version. - pub fn serialize_uncompressed_old( - &self, - mut writer: W, - ) -> Result<(), SerializationError> { - self.x.serialize_uncompressed(&mut writer)?; - self.y.serialize_uncompressed(&mut writer)?; - Ok(()) - } - - #[allow(unused_qualifications)] - /// This method is implemented for backwards compatibility with the old - /// serialization format and will be deprecated and then removed in a - /// future version. - pub fn deserialize_uncompressed_old(reader: R) -> Result { - let p = Self::deserialize_unchecked(reader)?; - - if !p.is_in_correct_subgroup_assuming_on_curve() { - return Err(SerializationError::InvalidData); - } - Ok(p) - } - /// This method is implemented for backwards compatibility with the old - /// serialization format and will be deprecated and then removed in a - /// future version. - pub fn deserialize_old(mut reader: R) -> Result { - let (x, flags): (P::BaseField, EdwardsFlags) = - CanonicalDeserializeWithFlags::deserialize_with_flags(&mut reader)?; - if x == P::BaseField::zero() { - Ok(Self::zero()) - } else { - let p = Affine::

::get_point_from_x_old(x, flags.is_positive()) - .ok_or(SerializationError::InvalidData)?; - if !p.is_in_correct_subgroup_assuming_on_curve() { - return Err(SerializationError::InvalidData); - } - Ok(p) - } - } -} -impl Projective

{ - /// This method is implemented for backwards compatibility with the old - /// serialization format and will be deprecated and then removed in a - /// future version. - pub fn serialize_old(&self, writer: W) -> Result<(), SerializationError> { - let aff = Affine::

::from(*self); - aff.serialize_old(writer) - } - - #[allow(unused_qualifications)] - #[inline] - /// This method is implemented for backwards compatibility with the old - /// serialization format and will be deprecated and then removed in a - /// future version. - pub fn serialize_uncompressed_old( - &self, - writer: W, - ) -> Result<(), SerializationError> { - let aff = Affine::

::from(*self); - aff.serialize_uncompressed(writer) - } - - #[allow(unused_qualifications)] - /// This method is implemented for backwards compatibility with the old - /// serialization format and will be deprecated and then removed in a - /// future version. - pub fn deserialize_uncompressed_old(reader: R) -> Result { - let aff = Affine::

::deserialize_uncompressed(reader)?; - Ok(aff.into()) - } - /// This method is implemented for backwards compatibility with the old - /// serialization format and will be deprecated and then removed in a - /// future version. - pub fn deserialize_old(reader: R) -> Result { - let aff = Affine::

::deserialize_old(reader)?; - Ok(aff.into()) - } -} - -impl VariableBaseMSM for Projective

{ - type MSMBase = Affine

; - - type Scalar = ::ScalarField; - - #[inline] - fn _double_in_place(&mut self) -> &mut Self { - self.double_in_place() - } - - #[inline] - fn _add_assign_mixed(&mut self, other: &Self::MSMBase) { - self.add_assign_mixed(other) - } -} diff --git a/ec/src/models/twisted_edwards/affine.rs b/ec/src/models/twisted_edwards/affine.rs new file mode 100644 index 000000000..fa72536d7 --- /dev/null +++ b/ec/src/models/twisted_edwards/affine.rs @@ -0,0 +1,457 @@ +use ark_serialize::{ + CanonicalDeserialize, CanonicalDeserializeWithFlags, CanonicalSerialize, + CanonicalSerializeWithFlags, EdwardsFlags, SerializationError, +}; +use ark_std::{ + borrow::Borrow, + fmt::{Debug, Display, Formatter, Result as FmtResult}, + io::{Read, Write}, + ops::{Add, Mul, Neg, Sub}, + rand::{ + distributions::{Distribution, Standard}, + Rng, + }, + vec::Vec, +}; +use num_traits::{One, Zero}; +use zeroize::Zeroize; + +use ark_ff::{fields::Field, PrimeField, ToConstraintField, UniformRand}; + +use super::{Projective, TECurveConfig}; +use crate::AffineRepr; + +/// Affine coordinates for a point on a twisted Edwards curve, over the +/// base field `P::BaseField`. +#[derive(Derivative)] +#[derivative( + Copy(bound = "P: TECurveConfig"), + Clone(bound = "P: TECurveConfig"), + PartialEq(bound = "P: TECurveConfig"), + Eq(bound = "P: TECurveConfig"), + Hash(bound = "P: TECurveConfig") +)] +#[must_use] +pub struct Affine { + /// X coordinate of the point represented as a field element + pub x: P::BaseField, + /// Y coordinate of the point represented as a field element + pub y: P::BaseField, +} + +impl Display for Affine

{ + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + match self.is_identity() { + true => write!(f, "infinity"), + false => write!(f, "({}, {})", self.x, self.y), + } + } +} + +impl Debug for Affine

{ + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + match self.is_identity() { + true => write!(f, "infinity"), + false => write!(f, "({}, {})", self.x, self.y), + } + } +} + +impl PartialEq> for Affine

{ + fn eq(&self, other: &Projective

) -> bool { + self.into_group() == *other + } +} + +impl Affine

{ + /// Construct a new group element without checking whether the coordinates + /// specify a point in the subgroup. + pub const fn new_unchecked(x: P::BaseField, y: P::BaseField) -> Self { + Self { x, y } + } + + /// Construct a new group element in a way while enforcing that points are in + /// the prime-order subgroup. + pub fn new(x: P::BaseField, y: P::BaseField) -> Self { + let p = Self::new_unchecked(x, y); + assert!(p.is_on_curve()); + assert!(p.is_in_correct_subgroup_assuming_on_curve()); + p + } + + /// Construct the identity of the group + pub const fn identity() -> Self { + Self::new_unchecked(P::BaseField::ZERO, P::BaseField::ONE) + } + + /// Is this point the identity? + pub fn is_identity(&self) -> bool { + self.x.is_zero() && self.y.is_one() + } + + /// Attempts to construct an affine point given an y-coordinate. The + /// point is not guaranteed to be in the prime order subgroup. + /// + /// If and only if `greatest` is set will the lexicographically + /// largest x-coordinate be selected. + /// + /// a * X^2 + Y^2 = 1 + d * X^2 * Y^2 + /// a * X^2 - d * X^2 * Y^2 = 1 - Y^2 + /// X^2 * (a - d * Y^2) = 1 - Y^2 + /// X^2 = (1 - Y^2) / (a - d * Y^2) + #[allow(dead_code)] + pub fn get_point_from_y(y: P::BaseField, greatest: bool) -> Option { + let y2 = y.square(); + + let numerator = P::BaseField::one() - y2; + let denominator = P::COEFF_A - (y2 * P::COEFF_D); + + denominator + .inverse() + .map(|denom| denom * &numerator) + .and_then(|x2| x2.sqrt()) + .map(|x| { + let negx = -x; + let x = if (x < negx) ^ greatest { x } else { negx }; + Self::new_unchecked(x, y) + }) + } + + /// Checks that the current point is on the elliptic curve. + pub fn is_on_curve(&self) -> bool { + let x2 = self.x.square(); + let y2 = self.y.square(); + + let lhs = y2 + &P::mul_by_a(&x2); + let rhs = P::BaseField::one() + &(P::COEFF_D * &(x2 * &y2)); + + lhs == rhs + } +} + +impl Affine

{ + /// Checks if `self` is in the subgroup having order equaling that of + /// `P::ScalarField` given it is on the curve. + pub fn is_in_correct_subgroup_assuming_on_curve(&self) -> bool { + P::is_in_correct_subgroup_assuming_on_curve(self) + } +} + +impl AffineRepr for Affine

{ + type Config = P; + type BaseField = P::BaseField; + type ScalarField = P::ScalarField; + type Group = Projective

; + + fn xy(&self) -> Option<(&Self::BaseField, &Self::BaseField)> { + (!self.is_identity()).then(|| (&self.x, &self.y)) + } + + fn generator() -> Self { + P::GENERATOR + } + + fn identity() -> Self { + Self::new_unchecked(P::BaseField::ZERO, P::BaseField::ONE) + } + + fn from_random_bytes(bytes: &[u8]) -> Option { + P::BaseField::from_random_bytes_with_flags::(bytes).and_then(|(y, flags)| { + // if y is valid and is zero, then parse this + // point as infinity. + if y.is_zero() { + Some(Self::identity()) + } else { + Self::get_point_from_y(y, flags.is_positive()) + } + }) + } + + fn mul_bigint(&self, by: impl AsRef<[u64]>) -> Self::Group { + P::mul_affine(self, by.as_ref()) + } + + /// Multiplies this element by the cofactor and output the + /// resulting projective element. + #[must_use] + fn mul_by_cofactor_to_group(&self) -> Self::Group { + P::mul_affine(self, Self::Config::COFACTOR) + } + + /// Performs cofactor clearing. + /// The default method is simply to multiply by the cofactor. + /// Some curves can implement a more efficient algorithm. + fn clear_cofactor(&self) -> Self { + P::clear_cofactor(self) + } +} + +impl Zeroize for Affine

{ + // The phantom data does not contain element-specific data + // and thus does not need to be zeroized. + fn zeroize(&mut self) { + self.x.zeroize(); + self.y.zeroize(); + } +} + +impl Neg for Affine

{ + type Output = Self; + + fn neg(self) -> Self { + Self::new_unchecked(-self.x, self.y) + } +} + +impl> Add for Affine

{ + type Output = Projective

; + fn add(self, other: T) -> Self::Output { + let mut copy = self.into_group(); + copy += other.borrow(); + copy + } +} + +impl Add> for Affine

{ + type Output = Projective

; + fn add(self, other: Projective

) -> Projective

{ + other + self + } +} + +impl<'a, P: TECurveConfig> Add<&'a Projective

> for Affine

{ + type Output = Projective

; + fn add(self, other: &'a Projective

) -> Projective

{ + *other + self + } +} + +impl> Sub for Affine

{ + type Output = Projective

; + fn sub(self, other: T) -> Self::Output { + let mut copy = self.into_group(); + copy -= other.borrow(); + copy + } +} + +impl Default for Affine

{ + #[inline] + fn default() -> Self { + Self::identity() + } +} + +impl Distribution> for Standard { + #[inline] + fn sample(&self, rng: &mut R) -> Affine

{ + loop { + let y = P::BaseField::rand(rng); + let greatest = rng.gen(); + + if let Some(p) = Affine::get_point_from_y(y, greatest) { + return p.mul_by_cofactor(); + } + } + } +} + +impl<'a, P: TECurveConfig, T: Borrow> Mul for Affine

{ + type Output = Projective

; + + #[inline] + fn mul(self, other: T) -> Self::Output { + self.mul_bigint(other.borrow().into_bigint()) + } +} + +// The projective point X, Y, T, Z is represented in the affine +// coordinates as X/Z, Y/Z. +impl From> for Affine

{ + fn from(p: Projective

) -> Affine

{ + if p.is_zero() { + Affine::identity() + } else if p.z.is_one() { + // If Z is one, the point is already normalized. + Affine::new_unchecked(p.x, p.y) + } else { + // Z is nonzero, so it must have an inverse in a field. + let z_inv = p.z.inverse().unwrap(); + let x = p.x * &z_inv; + let y = p.y * &z_inv; + Affine::new_unchecked(x, y) + } + } +} + +impl CanonicalSerialize for Affine

{ + #[allow(unused_qualifications)] + #[inline] + fn serialize(&self, writer: W) -> Result<(), SerializationError> { + if self.is_identity() { + let flags = EdwardsFlags::default(); + // Serialize 0. + P::BaseField::zero().serialize_with_flags(writer, flags) + } else { + let flags = EdwardsFlags::from_x_sign(self.x > -self.x); + self.y.serialize_with_flags(writer, flags) + } + } + + #[inline] + fn serialized_size(&self) -> usize { + P::BaseField::zero().serialized_size_with_flags::() + } + + #[allow(unused_qualifications)] + #[inline] + fn serialize_uncompressed(&self, mut writer: W) -> Result<(), SerializationError> { + self.x.serialize_uncompressed(&mut writer)?; + self.y.serialize_uncompressed(&mut writer)?; + Ok(()) + } + + #[inline] + fn uncompressed_size(&self) -> usize { + // x + y + self.x.serialized_size() + self.y.serialized_size() + } +} + +impl CanonicalDeserialize for Affine

{ + #[allow(unused_qualifications)] + fn deserialize(mut reader: R) -> Result { + let (y, flags): (P::BaseField, EdwardsFlags) = + CanonicalDeserializeWithFlags::deserialize_with_flags(&mut reader)?; + if y == P::BaseField::zero() { + Ok(Self::identity()) + } else { + let p = Affine::

::get_point_from_y(y, flags.is_positive()) + .ok_or(SerializationError::InvalidData)?; + if !p.is_in_correct_subgroup_assuming_on_curve() { + return Err(SerializationError::InvalidData); + } + Ok(p) + } + } + + #[allow(unused_qualifications)] + fn deserialize_uncompressed(reader: R) -> Result { + let p = Self::deserialize_unchecked(reader)?; + + if !p.is_in_correct_subgroup_assuming_on_curve() { + return Err(SerializationError::InvalidData); + } + Ok(p) + } + + #[allow(unused_qualifications)] + fn deserialize_unchecked(mut reader: R) -> Result { + let x: P::BaseField = CanonicalDeserialize::deserialize(&mut reader)?; + let y: P::BaseField = CanonicalDeserialize::deserialize(&mut reader)?; + + let p = Affine::

::new_unchecked(x, y); + Ok(p) + } +} + +impl ToConstraintField for Affine +where + M::BaseField: ToConstraintField, +{ + #[inline] + fn to_field_elements(&self) -> Option> { + let mut x_fe = self.x.to_field_elements()?; + let y_fe = self.y.to_field_elements()?; + x_fe.extend_from_slice(&y_fe); + Some(x_fe) + } +} + +// This impl block and the one following are being used to encapsulate all of +// the methods that are needed for backwards compatibility with the old +// serialization format +// See Issue #330 +impl Affine

{ + /// Attempts to construct an affine point given an x-coordinate. The + /// point is not guaranteed to be in the prime order subgroup. + /// + /// If and only if `greatest` is set will the lexicographically + /// largest y-coordinate be selected. + /// + /// This method is implemented for backwards compatibility with the old + /// serialization format and will be deprecated and then removed in a + /// future version. + #[allow(dead_code)] + pub fn get_point_from_x_old(x: P::BaseField, greatest: bool) -> Option { + let x2 = x.square(); + let one = P::BaseField::one(); + let numerator = P::mul_by_a(&x2) - &one; + let denominator = P::COEFF_D * &x2 - &one; + let y2 = denominator.inverse().map(|denom| denom * &numerator); + y2.and_then(|y2| y2.sqrt()).map(|y| { + let negy = -y; + let y = if (y < negy) ^ greatest { y } else { negy }; + Self::new_unchecked(x, y) + }) + } + /// This method is implemented for backwards compatibility with the old + /// serialization format and will be deprecated and then removed in a + /// future version. + pub fn serialize_old(&self, writer: W) -> Result<(), SerializationError> { + if self.is_identity() { + let flags = EdwardsFlags::default(); + // Serialize 0. + P::BaseField::zero().serialize_with_flags(writer, flags) + } else { + // Note: although this says `from_x_sign` and we are + // using the sign of `y`. The logic works the same. + let flags = EdwardsFlags::from_x_sign(self.y > -self.y); + self.x.serialize_with_flags(writer, flags) + } + } + + #[allow(unused_qualifications)] + #[inline] + /// This method is implemented for backwards compatibility with the old + /// serialization format and will be deprecated and then removed in a + /// future version. + pub fn serialize_uncompressed_old( + &self, + mut writer: W, + ) -> Result<(), SerializationError> { + self.x.serialize_uncompressed(&mut writer)?; + self.y.serialize_uncompressed(&mut writer)?; + Ok(()) + } + + #[allow(unused_qualifications)] + /// This method is implemented for backwards compatibility with the old + /// serialization format and will be deprecated and then removed in a + /// future version. + pub fn deserialize_uncompressed_old(reader: R) -> Result { + let p = Self::deserialize_unchecked(reader)?; + + if !p.is_in_correct_subgroup_assuming_on_curve() { + return Err(SerializationError::InvalidData); + } + Ok(p) + } + /// This method is implemented for backwards compatibility with the old + /// serialization format and will be deprecated and then removed in a + /// future version. + pub fn deserialize_old(mut reader: R) -> Result { + let (x, flags): (P::BaseField, EdwardsFlags) = + CanonicalDeserializeWithFlags::deserialize_with_flags(&mut reader)?; + if x == P::BaseField::zero() { + Ok(Self::identity()) + } else { + let p = Affine::

::get_point_from_x_old(x, flags.is_positive()) + .ok_or(SerializationError::InvalidData)?; + if !p.is_in_correct_subgroup_assuming_on_curve() { + return Err(SerializationError::InvalidData); + } + Ok(p) + } + } +} diff --git a/ec/src/models/twisted_edwards/group.rs b/ec/src/models/twisted_edwards/group.rs new file mode 100644 index 000000000..f904f3cba --- /dev/null +++ b/ec/src/models/twisted_edwards/group.rs @@ -0,0 +1,527 @@ +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::{ + borrow::Borrow, + fmt::{Display, Formatter, Result as FmtResult}, + hash::{Hash, Hasher}, + io::{Read, Write}, + ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}, + rand::{ + distributions::{Distribution, Standard}, + Rng, + }, + vec::Vec, + One, Zero, +}; + +use ark_ff::{fields::Field, PrimeField, ToConstraintField, UniformRand}; + +use zeroize::Zeroize; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +use super::{Affine, MontCurveConfig, TECurveConfig}; +use crate::{ + scalar_mul::{variable_base::VariableBaseMSM, ScalarMul}, + AffineRepr, CurveGroup, Group, +}; + +/// `Projective` implements Extended Twisted Edwards Coordinates +/// as described in [\[HKCD08\]](https://eprint.iacr.org/2008/522.pdf). +/// +/// This implementation uses the unified addition formulae from that paper (see +/// Section 3.1). +#[derive(Derivative)] +#[derivative( + Copy(bound = "P: TECurveConfig"), + Clone(bound = "P: TECurveConfig"), + Eq(bound = "P: TECurveConfig"), + Debug(bound = "P: TECurveConfig") +)] +#[must_use] +pub struct Projective { + pub x: P::BaseField, + pub y: P::BaseField, + pub t: P::BaseField, + pub z: P::BaseField, +} + +impl PartialEq> for Projective

{ + fn eq(&self, other: &Affine

) -> bool { + *self == other.into_group() + } +} + +impl Display for Projective

{ + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + write!(f, "{}", Affine::from(*self)) + } +} + +impl PartialEq for Projective

{ + fn eq(&self, other: &Self) -> bool { + if self.is_zero() { + return other.is_zero(); + } + + if other.is_zero() { + return false; + } + + // x1/z1 == x2/z2 <==> x1 * z2 == x2 * z1 + (self.x * &other.z) == (other.x * &self.z) && (self.y * &other.z) == (other.y * &self.z) + } +} + +impl Hash for Projective

{ + fn hash(&self, state: &mut H) { + self.into_affine().hash(state) + } +} + +impl Distribution> for Standard { + #[inline] + fn sample(&self, rng: &mut R) -> Projective

{ + loop { + let y = P::BaseField::rand(rng); + let greatest = rng.gen(); + + if let Some(p) = Affine::get_point_from_y(y, greatest) { + return p.mul_by_cofactor_to_group(); + } + } + } +} + +impl Default for Projective

{ + #[inline] + fn default() -> Self { + Self::zero() + } +} + +impl Projective

{ + /// Construct a new group element without checking whether the coordinates + /// specify a point in the subgroup. + pub const fn new_unchecked( + x: P::BaseField, + y: P::BaseField, + t: P::BaseField, + z: P::BaseField, + ) -> Self { + Self { x, y, t, z } + } + + /// Construct a new group element in a way while enforcing that points are in + /// the prime-order subgroup. + pub fn new(x: P::BaseField, y: P::BaseField, t: P::BaseField, z: P::BaseField) -> Self { + let p = Self::new_unchecked(x, y, t, z).into_affine(); + assert!(p.is_on_curve()); + assert!(p.is_in_correct_subgroup_assuming_on_curve()); + p.into() + } +} +impl Zeroize for Projective

{ + // The phantom data does not contain element-specific data + // and thus does not need to be zeroized. + fn zeroize(&mut self) { + self.x.zeroize(); + self.y.zeroize(); + self.t.zeroize(); + self.z.zeroize(); + } +} + +impl Zero for Projective

{ + fn zero() -> Self { + Self::new_unchecked( + P::BaseField::zero(), + P::BaseField::one(), + P::BaseField::zero(), + P::BaseField::one(), + ) + } + + fn is_zero(&self) -> bool { + self.x.is_zero() && self.y == self.z && !self.y.is_zero() && self.t.is_zero() + } +} + +impl Group for Projective

{ + type ScalarField = P::ScalarField; + + fn generator() -> Self { + Affine::generator().into() + } + + fn double_in_place(&mut self) -> &mut Self { + // See "Twisted Edwards Curves Revisited" + // Huseyin Hisil, Kenneth Koon-Ho Wong, Gary Carter, and Ed Dawson + // 3.3 Doubling in E^e + // Source: https://www.hyperelliptic.org/EFD/g1p/data/twisted/extended/doubling/dbl-2008-hwcd + + // A = X1^2 + let a = self.x.square(); + // B = Y1^2 + let b = self.y.square(); + // C = 2 * Z1^2 + let c = self.z.square().double(); + // D = a * A + let d = P::mul_by_a(&a); + // E = (X1 + Y1)^2 - A - B + let e = (self.x + &self.y).square() - &a - &b; + // G = D + B + let g = d + &b; + // F = G - C + let f = g - &c; + // H = D - B + let h = d - &b; + // X3 = E * F + self.x = e * &f; + // Y3 = G * H + self.y = g * &h; + // T3 = E * H + self.t = e * &h; + // Z3 = F * G + self.z = f * &g; + + self + } + + #[inline] + fn mul_bigint(&self, other: impl AsRef<[u64]>) -> Self { + P::mul_projective(&self, other.as_ref()) + } +} + +impl CurveGroup for Projective

{ + type Config = P; + type BaseField = P::BaseField; + type Affine = Affine

; + type FullGroup = Affine

; + + fn normalize_batch(v: &[Self]) -> Vec { + // A projective curve element (x, y, t, z) is normalized + // to its affine representation, by the conversion + // (x, y, t, z) -> (x/z, y/z, t/z, 1) + // Batch normalizing N twisted edwards curve elements costs: + // 1 inversion + 6N field multiplications + // (batch inversion requires 3N multiplications + 1 inversion) + let mut z_s = v.iter().map(|g| g.z).collect::>(); + ark_ff::batch_inversion(&mut z_s); + + // Perform affine transformations + ark_std::cfg_iter!(v) + .zip(z_s) + .map(|(g, z)| match g.is_zero() { + true => Affine::identity(), + false => { + let x = g.x * &z; + let y = g.y * &z; + Affine::new_unchecked(x, y) + }, + }) + .collect() + } +} + +impl Neg for Projective

{ + type Output = Self; + fn neg(mut self) -> Self { + self.x = -self.x; + self.t = -self.t; + self + } +} + +impl>> AddAssign for Projective

{ + fn add_assign(&mut self, other: T) { + let other = other.borrow(); + // See "Twisted Edwards Curves Revisited" + // Huseyin Hisil, Kenneth Koon-Ho Wong, Gary Carter, and Ed Dawson + // 3.1 Unified Addition in E^e + // Source: https://www.hyperelliptic.org/EFD/g1p/data/twisted/extended/addition/madd-2008-hwcd + + // A = X1*X2 + let a = self.x * &other.x; + // B = Y1*Y2 + let b = self.y * &other.y; + // C = T1*d*T2 + let c = P::COEFF_D * &self.t * &other.x * &other.y; + + // D = Z1 + let d = self.z; + // E = (X1+Y1)*(X2+Y2)-A-B + let e = (self.x + &self.y) * &(other.x + &other.y) - &a - &b; + // F = D-C + let f = d - &c; + // G = D+C + let g = d + &c; + // H = B-a*A + let h = b - &P::mul_by_a(&a); + // X3 = E*F + self.x = e * &f; + // Y3 = G*H + self.y = g * &h; + // T3 = E*H + self.t = e * &h; + // Z3 = F*G + self.z = f * &g; + } +} + +impl>> Add for Projective

{ + type Output = Self; + fn add(mut self, other: T) -> Self { + let other = other.borrow(); + self += other; + self + } +} + +impl>> SubAssign for Projective

{ + fn sub_assign(&mut self, other: T) { + *self += -(*other.borrow()); + } +} + +impl>> Sub for Projective

{ + type Output = Self; + fn sub(mut self, other: T) -> Self { + self -= other.borrow(); + self + } +} +ark_ff::impl_additive_ops_from_ref!(Projective, TECurveConfig); + +impl<'a, P: TECurveConfig> Add<&'a Self> for Projective

{ + type Output = Self; + fn add(mut self, other: &'a Self) -> Self { + self += other; + self + } +} + +impl<'a, P: TECurveConfig> Sub<&'a Self> for Projective

{ + type Output = Self; + fn sub(mut self, other: &'a Self) -> Self { + self -= other; + self + } +} + +impl<'a, P: TECurveConfig> AddAssign<&'a Self> for Projective

{ + fn add_assign(&mut self, other: &'a Self) { + // See "Twisted Edwards Curves Revisited" (https://eprint.iacr.org/2008/522.pdf) + // by Huseyin Hisil, Kenneth Koon-Ho Wong, Gary Carter, and Ed Dawson + // 3.1 Unified Addition in E^e + + // A = x1 * x2 + let a = self.x * &other.x; + + // B = y1 * y2 + let b = self.y * &other.y; + + // C = d * t1 * t2 + let c = P::COEFF_D * &self.t * &other.t; + + // D = z1 * z2 + let d = self.z * &other.z; + + // H = B - aA + let h = b - &P::mul_by_a(&a); + + // E = (x1 + y1) * (x2 + y2) - A - B + let e = (self.x + &self.y) * &(other.x + &other.y) - &a - &b; + + // F = D - C + let f = d - &c; + + // G = D + C + let g = d + &c; + + // x3 = E * F + self.x = e * &f; + + // y3 = G * H + self.y = g * &h; + + // t3 = E * H + self.t = e * &h; + + // z3 = F * G + self.z = f * &g; + } +} + +impl<'a, P: TECurveConfig> SubAssign<&'a Self> for Projective

{ + fn sub_assign(&mut self, other: &'a Self) { + *self += -(*other); + } +} + +impl> MulAssign for Projective

{ + fn mul_assign(&mut self, other: T) { + *self = self.mul_bigint(other.borrow().into_bigint()) + } +} + +impl> Mul for Projective

{ + type Output = Self; + + #[inline] + fn mul(mut self, other: T) -> Self { + self *= other; + self + } +} + +impl>> ark_std::iter::Sum for Projective

{ + fn sum(iter: I) -> Self + where + I: Iterator, + { + iter.fold(Self::zero(), |acc, x| acc + x.borrow()) + } +} + +// The affine point (X, Y) is represented in the Extended Projective coordinates +// with Z = 1. +impl From> for Projective

{ + fn from(p: Affine

) -> Projective

{ + Self::new_unchecked(p.x, p.y, p.x * &p.y, P::BaseField::one()) + } +} + +#[derive(Derivative)] +#[derivative( + Copy(bound = "P: MontCurveConfig"), + Clone(bound = "P: MontCurveConfig"), + PartialEq(bound = "P: MontCurveConfig"), + Eq(bound = "P: MontCurveConfig"), + Debug(bound = "P: MontCurveConfig"), + Hash(bound = "P: MontCurveConfig") +)] +pub struct MontgomeryAffine { + pub x: P::BaseField, + pub y: P::BaseField, +} + +impl Display for MontgomeryAffine

{ + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + write!(f, "MontgomeryAffine(x={}, y={})", self.x, self.y) + } +} + +impl MontgomeryAffine

{ + pub fn new(x: P::BaseField, y: P::BaseField) -> Self { + Self { x, y } + } +} + +impl CanonicalSerialize for Projective

{ + #[allow(unused_qualifications)] + #[inline] + fn serialize(&self, writer: W) -> Result<(), SerializationError> { + let aff = Affine::

::from(*self); + aff.serialize(writer) + } + + #[inline] + fn serialized_size(&self) -> usize { + let aff = Affine::

::from(*self); + aff.serialized_size() + } + + #[allow(unused_qualifications)] + #[inline] + fn serialize_uncompressed(&self, writer: W) -> Result<(), SerializationError> { + let aff = Affine::

::from(*self); + aff.serialize_uncompressed(writer) + } + + #[inline] + fn uncompressed_size(&self) -> usize { + let aff = Affine::

::from(*self); + aff.uncompressed_size() + } +} + +impl CanonicalDeserialize for Projective

{ + #[allow(unused_qualifications)] + fn deserialize(reader: R) -> Result { + let aff = Affine::

::deserialize(reader)?; + Ok(aff.into()) + } + + #[allow(unused_qualifications)] + fn deserialize_uncompressed(reader: R) -> Result { + let aff = Affine::

::deserialize_uncompressed(reader)?; + Ok(aff.into()) + } + + #[allow(unused_qualifications)] + fn deserialize_unchecked(reader: R) -> Result { + let aff = Affine::

::deserialize_unchecked(reader)?; + Ok(aff.into()) + } +} + +impl ToConstraintField for Projective +where + M::BaseField: ToConstraintField, +{ + #[inline] + fn to_field_elements(&self) -> Option> { + Affine::from(*self).to_field_elements() + } +} + +impl Projective

{ + /// This method is implemented for backwards compatibility with the old + /// serialization format and will be deprecated and then removed in a + /// future version. + pub fn serialize_old(&self, writer: W) -> Result<(), SerializationError> { + let aff = Affine::

::from(*self); + aff.serialize_old(writer) + } + + #[allow(unused_qualifications)] + #[inline] + /// This method is implemented for backwards compatibility with the old + /// serialization format and will be deprecated and then removed in a + /// future version. + pub fn serialize_uncompressed_old( + &self, + writer: W, + ) -> Result<(), SerializationError> { + let aff = Affine::

::from(*self); + aff.serialize_uncompressed(writer) + } + + #[allow(unused_qualifications)] + /// This method is implemented for backwards compatibility with the old + /// serialization format and will be deprecated and then removed in a + /// future version. + pub fn deserialize_uncompressed_old(reader: R) -> Result { + let aff = Affine::

::deserialize_uncompressed(reader)?; + Ok(aff.into()) + } + /// This method is implemented for backwards compatibility with the old + /// serialization format and will be deprecated and then removed in a + /// future version. + pub fn deserialize_old(reader: R) -> Result { + let aff = Affine::

::deserialize_old(reader)?; + Ok(aff.into()) + } +} + +impl ScalarMul for Projective

{ + type MulBase = Affine

; + + fn batch_convert_to_mul_base(bases: &[Self]) -> Vec { + Self::normalize_batch(bases) + } +} + +impl VariableBaseMSM for Projective

{} diff --git a/ec/src/models/twisted_edwards/mod.rs b/ec/src/models/twisted_edwards/mod.rs new file mode 100644 index 000000000..d5b206a9d --- /dev/null +++ b/ec/src/models/twisted_edwards/mod.rs @@ -0,0 +1,96 @@ +use crate::{AffineRepr, Group}; +use num_traits::Zero; + +use ark_ff::fields::Field; + +mod affine; +pub use affine::*; + +mod group; +pub use group::*; + +/// Constants and convenience functions that collectively define the [Twisted Edwards model](https://www.hyperelliptic.org/EFD/g1p/auto-twisted.html) +/// of the curve. In this model, the curve equation is +/// `a * x² + y² = 1 + d * x² * y²`, for constants `a` and `d`. +pub trait TECurveConfig: super::CurveConfig { + /// Coefficient `a` of the curve equation. + const COEFF_A: Self::BaseField; + /// Coefficient `d` of the curve equation. + const COEFF_D: Self::BaseField; + /// Generator of the prime-order subgroup. + const GENERATOR: Affine; + + /// Model parameters for the Montgomery curve that is birationally + /// equivalent to this curve. + type MontCurveConfig: MontCurveConfig; + + /// Helper method for computing `elem * Self::COEFF_A`. + /// + /// The default implementation should be overridden only if + /// the product can be computed faster than standard field multiplication + /// (eg: via doubling if `COEFF_A == 2`, or if `COEFF_A.is_zero()`). + #[inline(always)] + fn mul_by_a(elem: &Self::BaseField) -> Self::BaseField { + let mut copy = *elem; + copy *= &Self::COEFF_A; + copy + } + + /// Checks that the current point is in the prime order subgroup given + /// the point on the curve. + fn is_in_correct_subgroup_assuming_on_curve(item: &Affine) -> bool { + Self::mul_affine(item, Self::ScalarField::characteristic()).is_zero() + } + + /// Performs cofactor clearing. + /// The default method is simply to multiply by the cofactor. + /// For some curve families though, it is sufficient to multiply + /// by a smaller scalar. + fn clear_cofactor(item: &Affine) -> Affine { + item.mul_by_cofactor() + } + + /// Default implementation of group multiplication for projective + /// coordinates + fn mul_projective(base: &Projective, scalar: &[u64]) -> Projective { + let mut res = Projective::::zero(); + for b in ark_ff::BitIteratorBE::without_leading_zeros(scalar) { + res.double_in_place(); + if b { + res += base; + } + } + + res + } + + /// Default implementation of group multiplication for affine + /// coordinates + fn mul_affine(base: &Affine, scalar: &[u64]) -> Projective { + let mut res = Projective::::zero(); + for b in ark_ff::BitIteratorBE::without_leading_zeros(scalar) { + res.double_in_place(); + if b { + res += base + } + } + + res + } +} + +/// Constants and convenience functions that collectively define the [Montgomery model](https://www.hyperelliptic.org/EFD/g1p/auto-montgom.html) +/// of the curve. In this model, the curve equation is +/// `b * y² = x³ + a * x² + x`, for constants `a` and `b`. +pub trait MontCurveConfig: super::CurveConfig { + /// Coefficient `a` of the curve equation. + const COEFF_A: Self::BaseField; + /// Coefficient `b` of the curve equation. + const COEFF_B: Self::BaseField; + + /// Model parameters for the Twisted Edwards curve that is birationally + /// equivalent to this curve. + type TECurveConfig: TECurveConfig; +} + +////////////////////////////////////////////////////////////////////////////// diff --git a/ec/src/msm/mod.rs b/ec/src/msm/mod.rs deleted file mode 100644 index 941dcbb99..000000000 --- a/ec/src/msm/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -mod fixed_base; -mod variable_base; -pub use fixed_base::*; -pub use variable_base::*; - -/// The result of this function is only approximately `ln(a)` -/// [`Explanation of usage`] -/// -/// [`Explanation of usage`]: https://github.com/scipr-lab/zexe/issues/79#issue-556220473 -fn ln_without_floats(a: usize) -> usize { - // log2(a) * ln(2) - (ark_std::log2(a) * 69 / 100) as usize -} diff --git a/ec/src/pairing.rs b/ec/src/pairing.rs new file mode 100644 index 000000000..4d6b79bfd --- /dev/null +++ b/ec/src/pairing.rs @@ -0,0 +1,350 @@ +use ark_ff::{CyclotomicMultSubgroup, Field, One, PrimeField}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_std::{ + borrow::Borrow, + fmt::{Debug, Display, Formatter, Result as FmtResult}, + io::{Read, Write}, + ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}, + rand::{ + distributions::{Distribution, Standard}, + Rng, + }, + vec::Vec, + UniformRand, Zero, +}; +use zeroize::Zeroize; + +use crate::{AffineRepr, CurveGroup, Group, VariableBaseMSM}; + +/// Collection of types (mainly fields and curves) that together describe +/// how to compute a pairing over a pairing-friendly curve. +pub trait Pairing: Sized + 'static + Copy + Debug + Sync + Send + Eq { + /// This is the scalar field of the G1/G2 groups. + type ScalarField: PrimeField; + + /// An element in G1. + type G1: CurveGroup + + From + + Into + // needed due to https://github.com/rust-lang/rust/issues/69640 + + MulAssign; + + type G1Affine: AffineRepr + + From + + Into + + Into; + + /// A G1 element that has been preprocessed for use in a pairing. + type G1Prepared: Default + + Clone + + Send + + Sync + + Debug + + for<'a> From<&'a Self::G1> + + for<'a> From<&'a Self::G1Affine> + + From + + From; + + /// An element of G2. + type G2: CurveGroup + + From + + Into + // needed due to https://github.com/rust-lang/rust/issues/69640 + + MulAssign; + + /// The affine representation of an element in G2. + type G2Affine: AffineRepr + + From + + Into + + Into; + + /// A G2 element that has been preprocessed for use in a pairing. + type G2Prepared: Default + + Clone + + Send + + Sync + + Debug + + for<'a> From<&'a Self::G2> + + for<'a> From<&'a Self::G2Affine> + + From + + From; + + /// The extension field that hosts the target group of the pairing. + type TargetField: CyclotomicMultSubgroup; + + /// Computes the product of Miller loops for some number of (G1, G2) pairs. + #[must_use] + fn multi_miller_loop( + a: impl IntoIterator>, + b: impl IntoIterator>, + ) -> MillerLoopOutput; + + /// Computes the Miller loop over `a` and `b`. + #[must_use] + fn miller_loop( + a: impl Into, + b: impl Into, + ) -> MillerLoopOutput { + Self::multi_miller_loop([a], [b]) + } + + /// Performs final exponentiation of the result of a `Self::multi_miller_loop`. + #[must_use] + fn final_exponentiation(mlo: MillerLoopOutput) -> Option>; + + /// Computes a "product" of pairings. + #[must_use] + fn multi_pairing( + a: impl IntoIterator>, + b: impl IntoIterator>, + ) -> PairingOutput { + Self::final_exponentiation(Self::multi_miller_loop(a, b)).unwrap() + } + + /// Performs multiple pairing operations + #[must_use] + fn pairing( + p: impl Into, + q: impl Into, + ) -> PairingOutput { + Self::multi_pairing([p], [q]) + } +} + +/// Represents the target group of a pairing. This struct is a +/// wrapper around the field that the target group is embedded in. +#[derive(Derivative)] +#[derivative( + Copy(bound = "P: Pairing"), + Clone(bound = "P: Pairing"), + Debug(bound = "P: Pairing"), + PartialEq(bound = "P: Pairing"), + Eq(bound = "P: Pairing"), + PartialOrd(bound = "P: Pairing"), + Ord(bound = "P: Pairing"), + Default(bound = "P: Pairing"), + Hash(bound = "P: Pairing") +)] +#[must_use] +pub struct PairingOutput(pub P::TargetField); + +impl CanonicalSerialize for PairingOutput

{ + #[allow(unused_qualifications)] + #[inline] + fn serialize(&self, writer: W) -> Result<(), SerializationError> { + self.0.serialize(writer) + } + + #[inline] + fn serialized_size(&self) -> usize { + self.0.serialized_size() + } + + #[allow(unused_qualifications)] + #[inline] + fn serialize_uncompressed(&self, writer: W) -> Result<(), SerializationError> { + self.0.serialize_uncompressed(writer) + } + + #[inline] + fn uncompressed_size(&self) -> usize { + self.0.uncompressed_size() + } +} + +impl CanonicalDeserialize for PairingOutput

{ + #[allow(unused_qualifications)] + fn deserialize(reader: R) -> Result { + Self::deserialize_uncompressed(reader) + } + + #[allow(unused_qualifications)] + fn deserialize_uncompressed( + reader: R, + ) -> Result { + let f = Self::deserialize_unchecked(reader)?; + // Check that the output is within the field. + if f.0.pow(&P::ScalarField::characteristic()).is_one() { + Ok(f) + } else { + Err(SerializationError::InvalidData) + } + } + + #[allow(unused_qualifications)] + fn deserialize_unchecked(reader: R) -> Result { + P::TargetField::deserialize_unchecked(reader).map(Self) + } +} + +impl Display for PairingOutput

{ + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { + write!(f, "{}", self.0) + } +} + +impl Zero for PairingOutput

{ + /// The identity element, or "zero", of the group is the identity element of the multiplicative group of the underlying field, i.e., `P::TargetField::one()`. + fn zero() -> Self { + Self(P::TargetField::one()) + } + + fn is_zero(&self) -> bool { + self.0.is_one() + } +} + +impl<'a, P: Pairing> Add<&'a Self> for PairingOutput

{ + type Output = Self; + + #[inline] + fn add(mut self, other: &'a Self) -> Self { + self += other; + self + } +} + +impl<'a, P: Pairing> AddAssign<&'a Self> for PairingOutput

{ + fn add_assign(&mut self, other: &'a Self) { + self.0 *= other.0; + } +} + +impl<'a, P: Pairing> SubAssign<&'a Self> for PairingOutput

{ + fn sub_assign(&mut self, other: &'a Self) { + self.0 *= other.0.cyclotomic_inverse().unwrap(); + } +} + +impl<'a, P: Pairing> Sub<&'a Self> for PairingOutput

{ + type Output = Self; + + #[inline] + fn sub(mut self, other: &'a Self) -> Self { + self -= other; + self + } +} + +ark_ff::impl_additive_ops_from_ref!(PairingOutput, Pairing); + +impl> MulAssign for PairingOutput

{ + fn mul_assign(&mut self, other: T) { + *self = self.mul_bigint(other.borrow().into_bigint()); + } +} + +impl> Mul for PairingOutput

{ + type Output = Self; + + fn mul(self, other: T) -> Self { + self.mul_bigint(other.borrow().into_bigint()) + } +} + +impl Zeroize for PairingOutput

{ + fn zeroize(&mut self) { + self.0.zeroize() + } +} + +impl Neg for PairingOutput

{ + type Output = Self; + + #[inline] + fn neg(self) -> Self { + Self(self.0.cyclotomic_inverse().unwrap()) + } +} + +impl Distribution> for Standard { + #[inline] + fn sample(&self, rng: &mut R) -> PairingOutput

{ + // Sample a random G1 element + let g1 = P::G1::rand(rng); + // Sample a random G2 element + let g2 = P::G2::rand(rng); + P::pairing(g1, g2) + } +} + +impl Group for PairingOutput

{ + type ScalarField = P::ScalarField; + + fn generator() -> Self { + // TODO: hardcode these values. + // Sample a random G1 element + let g1 = P::G1::generator(); + // Sample a random G2 element + let g2 = P::G2::generator(); + P::pairing(g1.into(), g2.into()) + } + + fn double_in_place(&mut self) -> &mut Self { + self.0.cyclotomic_square_in_place(); + self + } + + fn mul_bigint(&self, other: impl AsRef<[u64]>) -> Self { + Self(self.0.cyclotomic_exp(other.as_ref())) + } + + fn mul_bits_be(&self, other: impl Iterator) -> Self { + // Convert back from bits to [u64] limbs + let other = other + .collect::>() + .chunks(64) + .map(|chunk| { + chunk + .iter() + .enumerate() + .fold(0, |r, (i, bit)| r | u64::from(*bit) << i) + }) + .collect::>(); + Self(self.0.cyclotomic_exp(&other)) + } +} + +impl crate::ScalarMul for PairingOutput

{ + type MulBase = Self; + + fn batch_convert_to_mul_base(bases: &[Self]) -> Vec { + bases.to_vec() + } +} + +impl VariableBaseMSM for PairingOutput

{} + +/// Represents the output of the Miller loop of the pairing. +#[derive(Derivative)] +#[derivative( + Copy(bound = "P: Pairing"), + Clone(bound = "P: Pairing"), + Debug(bound = "P: Pairing"), + PartialEq(bound = "P: Pairing"), + Eq(bound = "P: Pairing"), + PartialOrd(bound = "P: Pairing"), + Ord(bound = "P: Pairing") +)] +#[must_use] +pub struct MillerLoopOutput(pub P::TargetField); + +impl Mul for MillerLoopOutput

{ + type Output = Self; + + fn mul(self, other: P::ScalarField) -> Self { + Self(self.0.pow(other.into_bigint())) + } +} + +/// Preprocesses a G1 element for use in a pairing. +pub fn prepare_g1(g: impl Into) -> E::G1Prepared { + let g: E::G1Affine = g.into(); + E::G1Prepared::from(g) +} + +/// Preprocesses a G2 element for use in a pairing. +pub fn prepare_g2(g: impl Into) -> E::G2Prepared { + let g: E::G2Affine = g.into(); + E::G2Prepared::from(g) +} diff --git a/ec/src/msm/fixed_base.rs b/ec/src/scalar_mul/fixed_base.rs similarity index 84% rename from ec/src/msm/fixed_base.rs rename to ec/src/scalar_mul/fixed_base.rs index d9b6c0e35..ce8001ccd 100644 --- a/ec/src/msm/fixed_base.rs +++ b/ec/src/scalar_mul/fixed_base.rs @@ -1,10 +1,11 @@ -use crate::{AffineCurve, ProjectiveCurve}; use ark_ff::{BigInteger, PrimeField}; use ark_std::{cfg_iter, cfg_iter_mut, vec::Vec}; #[cfg(feature = "parallel")] use rayon::prelude::*; +use super::ScalarMul; + pub struct FixedBase; impl FixedBase { @@ -16,11 +17,11 @@ impl FixedBase { } } - pub fn get_window_table( + pub fn get_window_table( scalar_size: usize, window: usize, g: T, - ) -> Vec> { + ) -> Vec> { let in_window = 1 << window; let outerc = (scalar_size + window - 1) / window; let last_in_window = 1 << (scalar_size - (outerc - 1) * window); @@ -53,20 +54,20 @@ impl FixedBase { } }); cfg_iter!(multiples_of_g) - .map(|s| T::batch_normalization_into_affine(s)) + .map(|s| T::batch_convert_to_mul_base(s)) .collect() } - pub fn windowed_mul( + pub fn windowed_mul( outerc: usize, window: usize, - multiples_of_g: &[Vec], + multiples_of_g: &[Vec<::MulBase>], scalar: &T::ScalarField, ) -> T { let modulus_size = T::ScalarField::MODULUS_BIT_SIZE as usize; let scalar_val = scalar.into_bigint().to_bits_le(); - let mut res = multiples_of_g[0][0].into_projective(); + let mut res = T::from(multiples_of_g[0][0]); for outer in 0..outerc { let mut inner = 0usize; for i in 0..window { @@ -74,17 +75,17 @@ impl FixedBase { inner |= 1 << i; } } - res.add_assign_mixed(&multiples_of_g[outer][inner]); + res += &multiples_of_g[outer][inner]; } res } // TODO use const-generics for the scalar size and window // TODO use iterators of iterators of T::Affine instead of taking owned Vec - pub fn msm( + pub fn msm( scalar_size: usize, window: usize, - table: &[Vec], + table: &[Vec<::MulBase>], v: &[T::ScalarField], ) -> Vec { let outerc = (scalar_size + window - 1) / window; diff --git a/ec/src/glv.rs b/ec/src/scalar_mul/glv.rs similarity index 87% rename from ec/src/glv.rs rename to ec/src/scalar_mul/glv.rs index 7232890a8..7feb60693 100644 --- a/ec/src/glv.rs +++ b/ec/src/scalar_mul/glv.rs @@ -1,12 +1,10 @@ -use crate::CurveConfig; +use crate::{CurveConfig, CurveGroup}; /// The GLV parameters for computing the endomorphism and scalar decomposition. pub trait GLVParameters: Send + Sync + 'static + CurveConfig { - /// Affine representation of curve points. - type CurveAffine; /// A representation of curve points that enables efficient arithmetic by /// avoiding inversions. - type CurveProjective; + type Curve: CurveGroup; // Constants that are used to calculate `phi(G) := lambda*G`. @@ -49,11 +47,16 @@ pub trait GLVParameters: Send + Sync + 'static + CurveConfig { // return (x',y') where // x' = x * f(y) / y // y' = g(y) / h(y) - fn endomorphism(base: &Self::CurveAffine) -> Self::CurveAffine; + fn endomorphism( + base: &::Affine, + ) -> ::Affine; /// Decomposes a scalar s into k1, k2, s.t. s = k1 + lambda k2, fn scalar_decomposition(k: &Self::ScalarField) -> (Self::ScalarField, Self::ScalarField); /// Performs GLV multiplication. - fn glv_mul(base: &Self::CurveAffine, scalar: &Self::ScalarField) -> Self::CurveProjective; + fn glv_mul( + base: &::Affine, + scalar: &Self::ScalarField, + ) -> Self::Curve; } diff --git a/ec/src/scalar_mul/mod.rs b/ec/src/scalar_mul/mod.rs new file mode 100644 index 000000000..eb4e63e44 --- /dev/null +++ b/ec/src/scalar_mul/mod.rs @@ -0,0 +1,39 @@ +pub mod glv; +pub mod wnaf; + +pub mod fixed_base; +pub mod variable_base; + +use crate::Group; +use ark_std::{ + ops::{Add, AddAssign, Mul}, + vec::Vec, +}; + +/// The result of this function is only approximately `ln(a)` +/// [`Explanation of usage`] +/// +/// [`Explanation of usage`]: https://github.com/scipr-lab/zexe/issues/79#issue-556220473 +fn ln_without_floats(a: usize) -> usize { + // log2(a) * ln(2) + (ark_std::log2(a) * 69 / 100) as usize +} + +pub trait ScalarMul: + Group + + Add + + AddAssign + + for<'a> Add<&'a Self::MulBase, Output = Self> + + for<'a> AddAssign<&'a Self::MulBase> + + From +{ + type MulBase: Send + + Sync + + Copy + + Eq + + core::hash::Hash + + Mul + + for<'a> Mul<&'a Self::ScalarField, Output = Self>; + + fn batch_convert_to_mul_base(bases: &[Self]) -> Vec; +} diff --git a/ec/src/msm/variable_base/mod.rs b/ec/src/scalar_mul/variable_base/mod.rs similarity index 74% rename from ec/src/msm/variable_base/mod.rs rename to ec/src/scalar_mul/variable_base/mod.rs index a0ce91133..f8840763c 100644 --- a/ec/src/msm/variable_base/mod.rs +++ b/ec/src/scalar_mul/variable_base/mod.rs @@ -1,10 +1,5 @@ use ark_ff::{prelude::*, PrimeField}; -use ark_std::{ - borrow::Borrow, - iterable::Iterable, - ops::{Add, AddAssign}, - vec::Vec, -}; +use ark_std::{borrow::Borrow, iterable::Iterable, vec::Vec}; #[cfg(feature = "parallel")] use rayon::prelude::*; @@ -12,53 +7,34 @@ use rayon::prelude::*; pub mod stream_pippenger; pub use stream_pippenger::*; -pub trait VariableBaseMSM: - Eq - + Sized - + Sync - + Zero - + Clone - + Copy - + Send - + AddAssign - + for<'a> AddAssign<&'a Self> - + for<'a> Add<&'a Self, Output = Self> -{ - type MSMBase: Sync + Copy; +use super::ScalarMul; - type Scalar: PrimeField; - - #[doc(hidden)] - fn _double_in_place(&mut self) -> &mut Self; - - #[doc(hidden)] - fn _add_assign_mixed(&mut self, other: &Self::MSMBase); - - /// Optimized implementation of multi-scalar multiplication. +pub trait VariableBaseMSM: ScalarMul { + /// Computes an inner product between the [`PrimeField`] elements in `scalars` + /// and the corresponding group elements in `bases`. /// - /// Multiply the [`PrimeField`] elements in `scalars` with the - /// respective group elements in `bases` and sum the resulting set. + /// This method checks that `bases` and `scalars` have the same length. + /// If they are unequal, it returns an error containing + /// the shortest length over which the MSM can be performed. /// - ///

- /// - /// If the elements have different length, it will chop the slices to the - /// shortest length between `scalars.len()` and `bases.len()`. - /// - ///
- fn msm(bases: &[Self::MSMBase], scalars: &[Self::Scalar]) -> Self { + /// Reference: [`VariableBaseMSM::msm`] + fn msm(bases: &[Self::MulBase], scalars: &[Self::ScalarField]) -> Self { let bigints = cfg_into_iter!(scalars) .map(|s| s.into_bigint()) .collect::>(); Self::msm_bigint(bases, &bigints) } - /// Optimized implementation of multi-scalar multiplication, that checks bounds. + /// Performs multi-scalar multiplication, without checking that `bases.len() == scalars.len()`. /// - /// Performs `Self::msm`, checking that `bases` and `scalars` have the same length. - /// If the length are not equal, returns an error containing the shortest legth over which msm can be performed. + /// # Warning /// - /// Reference: [`VariableBaseMSM::msm`] - fn msm_checked(bases: &[Self::MSMBase], scalars: &[Self::Scalar]) -> Result { + /// If the elements have different length, it will chop the slices to the + /// shortest length between `scalars.len()` and `bases.len()`. + fn msm_unchecked( + bases: &[Self::MulBase], + scalars: &[Self::ScalarField], + ) -> Result { (bases.len() == scalars.len()) .then(|| Self::msm(bases, scalars)) .ok_or(usize::min(bases.len(), scalars.len())) @@ -66,8 +42,8 @@ pub trait VariableBaseMSM: /// Optimized implementation of multi-scalar multiplication. fn msm_bigint( - bases: &[Self::MSMBase], - bigints: &[::BigInt], + bases: &[Self::MulBase], + bigints: &[::BigInt], ) -> Self { let size = ark_std::cmp::min(bases.len(), bigints.len()); let scalars = &bigints[..size]; @@ -80,8 +56,8 @@ pub trait VariableBaseMSM: super::ln_without_floats(size) + 2 }; - let num_bits = Self::Scalar::MODULUS_BIT_SIZE as usize; - let fr_one = Self::Scalar::one().into_bigint(); + let num_bits = Self::ScalarField::MODULUS_BIT_SIZE as usize; + let fr_one = Self::ScalarField::one().into_bigint(); let zero = Self::zero(); let window_starts: Vec<_> = (0..num_bits).step_by(c).collect(); @@ -100,7 +76,7 @@ pub trait VariableBaseMSM: if scalar == fr_one { // We only process unit scalars once in the first window. if w_start == 0 { - res._add_assign_mixed(base); + res += base; } } else { let mut scalar = scalar; @@ -116,7 +92,7 @@ pub trait VariableBaseMSM: // bucket. // (Recall that `buckets` doesn't have a zero bucket.) if scalar != 0 { - buckets[(scalar - 1) as usize]._add_assign_mixed(base); + buckets[(scalar - 1) as usize] += base; } } }); @@ -155,19 +131,20 @@ pub trait VariableBaseMSM: .fold(zero, |mut total, sum_i| { total += sum_i; for _ in 0..c { - total._double_in_place(); + total.double_in_place(); } total }) } + /// Streaming multi-scalar multiplication algorithm with hard-coded chunk /// size. fn msm_chunks(bases_stream: &J, scalars_stream: &I) -> Self where I: Iterable, - I::Item: Borrow, + I::Item: Borrow, J: Iterable, - J::Item: Borrow, + J::Item: Borrow, { assert!(scalars_stream.len() <= bases_stream.len()); @@ -190,10 +167,7 @@ pub trait VariableBaseMSM: .take(step) .map(|s| s.borrow().into_bigint()) .collect::>(); - result.add_assign(Self::msm_bigint( - bases_step.as_slice(), - scalars_step.as_slice(), - )); + result += Self::msm_bigint(bases_step.as_slice(), scalars_step.as_slice()); } result } diff --git a/ec/src/msm/variable_base/stream_pippenger.rs b/ec/src/scalar_mul/variable_base/stream_pippenger.rs similarity index 67% rename from ec/src/msm/variable_base/stream_pippenger.rs rename to ec/src/scalar_mul/variable_base/stream_pippenger.rs index 149cfb211..dc62ba4cb 100644 --- a/ec/src/msm/variable_base/stream_pippenger.rs +++ b/ec/src/scalar_mul/variable_base/stream_pippenger.rs @@ -1,31 +1,26 @@ //! A space-efficient implementation of Pippenger's algorithm. -use crate::AffineCurve; use ark_ff::{PrimeField, Zero}; -use ark_std::{borrow::Borrow, ops::AddAssign, vec::Vec}; +use ark_std::{borrow::Borrow, vec::Vec}; use hashbrown::HashMap; use super::VariableBaseMSM; /// Struct for the chunked Pippenger algorithm. -pub struct ChunkedPippenger { +pub struct ChunkedPippenger { scalars_buffer: Vec<::BigInt>, - bases_buffer: Vec, - result: G::Projective, + bases_buffer: Vec, + result: G, buf_size: usize, } -impl ChunkedPippenger -where - G: AffineCurve, - G::Projective: VariableBaseMSM, -{ +impl ChunkedPippenger { /// Initialize a chunked Pippenger instance with default parameters. pub fn new(max_msm_buffer: usize) -> Self { Self { scalars_buffer: Vec::with_capacity(max_msm_buffer), bases_buffer: Vec::with_capacity(max_msm_buffer), - result: G::Projective::zero(), + result: G::zero(), buf_size: max_msm_buffer, } } @@ -35,7 +30,7 @@ where Self { scalars_buffer: Vec::with_capacity(buf_size), bases_buffer: Vec::with_capacity(buf_size), - result: G::Projective::zero(), + result: G::zero(), buf_size, } } @@ -44,17 +39,16 @@ where #[inline(always)] pub fn add(&mut self, base: B, scalar: S) where - B: Borrow, + B: Borrow, S: Borrow<::BigInt>, { self.scalars_buffer.push(*scalar.borrow()); self.bases_buffer.push(*base.borrow()); if self.scalars_buffer.len() == self.buf_size { - self.result - .add_assign(::msm_bigint( - self.bases_buffer.as_slice(), - self.scalars_buffer.as_slice(), - )); + self.result.add_assign(G::msm_bigint( + self.bases_buffer.as_slice(), + self.scalars_buffer.as_slice(), + )); self.scalars_buffer.clear(); self.bases_buffer.clear(); } @@ -62,35 +56,28 @@ where /// Output the final Pippenger algorithm result. #[inline(always)] - pub fn finalize(mut self) -> G::Projective { + pub fn finalize(mut self) -> G { if !self.scalars_buffer.is_empty() { - self.result - .add_assign(::msm_bigint( - self.bases_buffer.as_slice(), - self.scalars_buffer.as_slice(), - )); + self.result += + G::msm_bigint(self.bases_buffer.as_slice(), self.scalars_buffer.as_slice()); } self.result } } /// Hash map struct for Pippenger algorithm. -pub struct HashMapPippenger { - buffer: HashMap, - result: G::Projective, +pub struct HashMapPippenger { + buffer: HashMap, + result: G, buf_size: usize, } -impl HashMapPippenger -where - G: AffineCurve, - G::Projective: VariableBaseMSM, -{ +impl HashMapPippenger { /// Produce a new hash map with the maximum msm buffer size. pub fn new(max_msm_buffer: usize) -> Self { Self { buffer: HashMap::with_capacity(max_msm_buffer), - result: G::Projective::zero(), + result: G::zero(), buf_size: max_msm_buffer, } } @@ -99,7 +86,7 @@ where #[inline(always)] pub fn add(&mut self, base: B, scalar: S) where - B: Borrow, + B: Borrow, S: Borrow, { // update the entry, guarding the possibility that it has been already set. @@ -115,15 +102,14 @@ where .values() .map(|s| s.into_bigint()) .collect::>(); - self.result - .add_assign(G::Projective::msm_bigint(&bases, &scalars)); + self.result += G::msm_bigint(&bases, &scalars); self.buffer.clear(); } } /// Update the final result with (base, scalar) pairs in the hash map. #[inline(always)] - pub fn finalize(mut self) -> G::Projective { + pub fn finalize(mut self) -> G { if !self.buffer.is_empty() { let bases = self.buffer.keys().cloned().collect::>(); let scalars = self @@ -132,8 +118,7 @@ where .map(|s| s.into_bigint()) .collect::>(); - self.result - .add_assign(G::Projective::msm_bigint(&bases, &scalars)); + self.result += G::msm_bigint(&bases, &scalars); } self.result } diff --git a/ec/src/wnaf.rs b/ec/src/scalar_mul/wnaf.rs similarity index 87% rename from ec/src/wnaf.rs rename to ec/src/scalar_mul/wnaf.rs index 3c7cb57f5..d3e0b5437 100644 --- a/ec/src/wnaf.rs +++ b/ec/src/scalar_mul/wnaf.rs @@ -1,4 +1,4 @@ -use crate::ProjectiveCurve; +use crate::Group; use ark_ff::{BigInteger, PrimeField}; use ark_std::vec::Vec; @@ -20,7 +20,7 @@ impl WnafContext { Self { window_size } } - pub fn table(&self, mut base: G) -> Vec { + pub fn table(&self, mut base: G) -> Vec { let mut table = Vec::with_capacity(1 << (self.window_size - 1)); let dbl = base.double(); @@ -37,7 +37,7 @@ impl WnafContext { /// multiplication; first, it uses `Self::table` to calculate an /// appropriate table of multiples of `g`, and then uses the wNAF /// algorithm to compute the scalar multiple. - pub fn mul(&self, g: G, scalar: &G::ScalarField) -> G { + pub fn mul(&self, g: G, scalar: &G::ScalarField) -> G { let table = self.table(g); self.mul_with_table(&table, scalar).unwrap() } @@ -48,11 +48,7 @@ impl WnafContext { /// `G::ScalarField`. /// /// Returns `None` if the table is too small. - pub fn mul_with_table( - &self, - base_table: &[G], - scalar: &G::ScalarField, - ) -> Option { + pub fn mul_with_table(&self, base_table: &[G], scalar: &G::ScalarField) -> Option { if 1 << (self.window_size - 1) > base_table.len() { return None; } diff --git a/ff/Cargo.toml b/ff/Cargo.toml index f4d3b8d8e..33def4568 100644 --- a/ff/Cargo.toml +++ b/ff/Cargo.toml @@ -11,8 +11,7 @@ categories = ["cryptography"] include = ["Cargo.toml", "build.rs", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] license = "MIT/Apache-2.0" edition = "2021" -build = "build.rs" -rust-version = "1.57" +rust-version = "1.59" [dependencies] ark-ff-asm = { version = "^0.3.0", path = "../ff-asm" } diff --git a/ff/build.rs b/ff/build.rs deleted file mode 100644 index 01d7a1b21..000000000 --- a/ff/build.rs +++ /dev/null @@ -1,10 +0,0 @@ -use rustc_version::Version; - -fn main() { - println!("cargo:rerun-if-changed=build.rs"); - - let rust_version = rustc_version::version().unwrap(); - if rust_version >= Version::new(1, 59, 0) { - println!("cargo:rustc-cfg=inline_asm_stable"); - } -} diff --git a/ff/src/biginteger/arithmetic.rs b/ff/src/biginteger/arithmetic.rs index 4735a3158..9b25e0eb0 100644 --- a/ff/src/biginteger/arithmetic.rs +++ b/ff/src/biginteger/arithmetic.rs @@ -148,7 +148,6 @@ pub fn find_naf(num: &[u64]) -> Vec { /// exception of non-adjacence for the most significant bit. /// /// Since this representation is no longer a strict NAF, we call it "relaxed NAF". -/// pub fn find_relaxed_naf(num: &[u64]) -> Vec { let mut res = find_naf(num); diff --git a/ff/src/biginteger/mod.rs b/ff/src/biginteger/mod.rs index 46b404cb9..5d7cc4fd2 100644 --- a/ff/src/biginteger/mod.rs +++ b/ff/src/biginteger/mod.rs @@ -465,17 +465,11 @@ impl BigInteger for BigInt { } fn from_bits_le(bits: &[bool]) -> Self { - let mut res = Self::default(); - let mut acc: u64 = 0; - - let bits = bits.to_vec(); - for (i, bits64) in bits.chunks(64).enumerate() { - for bit in bits64.iter().rev() { - acc <<= 1; - acc += *bit as u64; + let mut res = Self::zero(); + for (bits64, res_i) in bits.chunks(64).zip(&mut res.0) { + for (i, bit) in bits64.iter().enumerate() { + *res_i |= (*bit as u64) << i; } - res.0[i] = acc; - acc = 0; } res } diff --git a/ff/src/fields/field_hashers/expander/tests.rs b/ff/src/fields/field_hashers/expander/tests.rs index c8274f377..94ddc1c62 100644 --- a/ff/src/fields/field_hashers/expander/tests.rs +++ b/ff/src/fields/field_hashers/expander/tests.rs @@ -2,8 +2,10 @@ use libtest_mimic::{run_tests, Arguments, Outcome, Test}; use sha2::{Sha256, Sha384, Sha512}; use sha3::{Shake128, Shake256}; -use std::fs::{read_dir, File}; -use std::io::BufReader; +use std::{ + fs::{read_dir, File}, + io::BufReader, +}; use super::{Expander, ExpanderXmd, ExpanderXof}; diff --git a/ff/src/fields/models/fp/montgomery_backend.rs b/ff/src/fields/models/fp/montgomery_backend.rs index 1c14a3e18..a12473bee 100644 --- a/ff/src/fields/models/fp/montgomery_backend.rs +++ b/ff/src/fields/models/fp/montgomery_backend.rs @@ -23,7 +23,7 @@ pub trait MontConfig: 'static + Sync + Send + Sized { const R2: BigInt = Self::MODULUS.montgomery_r2(); /// INV = -MODULUS^{-1} mod 2^64 - const INV: u64 = inv(&Self::MODULUS); + const INV: u64 = inv::(); /// A multiplicative generator of the field. /// `Self::GENERATOR` is an element having multiplicative order @@ -37,7 +37,7 @@ pub trait MontConfig: 'static + Sync + Send + Sized { /// `Self::MODULUS` has (a) a non-zero MSB, and (b) at least one /// zero bit in the rest of the modulus. #[doc(hidden)] - const CAN_USE_NO_CARRY_OPT: bool = can_use_no_carry_optimization(&Self::MODULUS); + const CAN_USE_NO_CARRY_OPT: bool = can_use_no_carry_optimization::(); /// 2^s root of unity computed by GENERATOR^t const TWO_ADIC_ROOT_OF_UNITY: Fp, N>; @@ -115,16 +115,23 @@ pub trait MontConfig: 'static + Sync + Send + Sized { && N > 1 && cfg!(all( feature = "asm", - inline_asm_stable, target_feature = "bmi2", target_feature = "adx", target_arch = "x86_64" )) { - #[cfg(all(feature = "asm", inline_asm_stable, target_feature = "bmi2", target_feature = "adx", target_arch = "x86_64"))] + #[cfg( + all( + feature = "asm", + target_feature = "bmi2", + target_feature = "adx", + target_arch = "x86_64" + ) + )] #[allow(unsafe_code, unused_mut)] - // Tentatively avoid using assembly for `N == 1`. #[rustfmt::skip] + + // Tentatively avoid using assembly for `N == 1`. match N { 2 => { ark_ff_asm::x86_64_asm_mul!(2, (a.0).0, (b.0).0); }, 3 => { ark_ff_asm::x86_64_asm_mul!(3, (a.0).0, (b.0).0); }, @@ -152,7 +159,7 @@ pub trait MontConfig: 'static + Sync + Send + Sized { } } else { // Alternative implementation - *a = a.mul_without_reduce(b); + *a = a.mul_without_cond_subtract(b); } a.subtract_modulus(); } @@ -392,23 +399,34 @@ pub trait MontConfig: 'static + Sync + Send + Sized { } /// Compute -M^{-1} mod 2^64. -pub const fn inv(m: &BigInt) -> u64 { +pub const fn inv, const N: usize>() -> u64 { + // We compute this as follows. + // First, MODULUS mod 2^64 is just the lower 64 bits of MODULUS. + // Hence MODULUS mod 2^64 = MODULUS.0[0] mod 2^64. + // + // Next, computing the inverse mod 2^64 involves exponentiating by + // the multiplicative group order, which is euler_totient(2^64) - 1. + // Now, euler_totient(2^64) = 1 << 63, and so + // euler_totient(2^64) - 1 = (1 << 63) - 1 = 1111111... (63 digits). + // We compute this powering via standard square and multiply. let mut inv = 1u64; crate::const_for!((_i in 0..63) { + // Square inv = inv.wrapping_mul(inv); - inv = inv.wrapping_mul(m.0[0]); + // Multiply + inv = inv.wrapping_mul(T::MODULUS.0[0]); }); inv.wrapping_neg() } #[inline] -pub const fn can_use_no_carry_optimization(modulus: &BigInt) -> bool { +pub const fn can_use_no_carry_optimization, const N: usize>() -> bool { // Checking the modulus at compile time - let first_bit_set = modulus.0[N - 1] >> 63 != 0; + let first_bit_set = T::MODULUS.0[N - 1] >> 63 != 0; // N can be 1, hence we can run into a case with an unused mut. - let mut all_bits_set = modulus.0[N - 1] == !0 - (1 << 63); + let mut all_bits_set = T::MODULUS.0[N - 1] == !0 - (1 << 63); crate::const_for!((i in 1..N) { - all_bits_set &= modulus.0[N - i - 1] == !0u64; + all_bits_set &= T::MODULUS.0[N - i - 1] == !0u64; }); !(first_bit_set || all_bits_set) } @@ -546,6 +564,8 @@ impl, const N: usize> FpConfig for MontBackend { } impl, const N: usize> Fp, N> { + #[doc(hidden)] + pub const R: BigInt = T::R; #[doc(hidden)] pub const R2: BigInt = T::R2; #[doc(hidden)] @@ -607,7 +627,7 @@ impl, const N: usize> Fp, N> { } } - const fn mul_without_reduce(mut self, other: &Self) -> Self { + const fn mul_without_cond_subtract(mut self, other: &Self) -> Self { let (mut lo, mut hi) = ([0u64; N], [0u64; N]); crate::const_for!((i in 0..N) { let mut carry = 0; @@ -645,8 +665,8 @@ impl, const N: usize> Fp, N> { } const fn mul(mut self, other: &Self) -> Self { - self = self.mul_without_reduce(other); - self.const_reduce() + self = self.mul_without_cond_subtract(other); + self.const_subtract_modulus() } const fn const_is_valid(&self) -> bool { @@ -661,7 +681,7 @@ impl, const N: usize> Fp, N> { } #[inline] - const fn const_reduce(mut self) -> Self { + const fn const_subtract_modulus(mut self) -> Self { if !self.const_is_valid() { self.0 = Self::sub_with_borrow(&self.0, &T::MODULUS); } diff --git a/ff/src/fields/models/fp12_2over3over2.rs b/ff/src/fields/models/fp12_2over3over2.rs index 38e5760b7..de060c71b 100644 --- a/ff/src/fields/models/fp12_2over3over2.rs +++ b/ff/src/fields/models/fp12_2over3over2.rs @@ -107,19 +107,18 @@ impl Fp12

{ } } -// TODO: make `const fn` in 1.46. -pub fn characteristic_square_mod_6_is_one(characteristic: &[u64]) -> bool { - // characteristic mod 6 = (a_0 + 2**64 * a_1 + ...) mod 6 - // = a_0 mod 6 + (2**64 * a_1 mod 6) + (...) mod 6 - // = a_0 mod 6 + (4 * a_1 mod 6) + (4 * ...) mod 6 +pub const fn characteristic_square_mod_6_is_one(characteristic: &[u64]) -> bool { + // char mod 6 = (a_0 + 2**64 * a_1 + ...) mod 6 + // = a_0 mod 6 + (2**64 * a_1 mod 6) + (...) mod 6 + // = a_0 mod 6 + (4 * a_1 mod 6) + (4 * ...) mod 6 let mut char_mod_6 = 0u64; - for (i, limb) in characteristic.iter().enumerate() { + crate::const_for!((i in 0..(characteristic.len())) { char_mod_6 += if i == 0 { - limb % 6 + characteristic[i] % 6 } else { - (4 * (limb % 6)) % 6 + (4 * (characteristic[i] % 6)) % 6 }; - } + }); (char_mod_6 * char_mod_6) % 6 == 1 } @@ -127,10 +126,7 @@ impl CyclotomicMultSubgroup for Fp12

{ const INVERSE_IS_FAST: bool = true; fn cyclotomic_inverse_in_place(&mut self) -> Option<&mut Self> { - self.is_zero().not().then(|| { - self.conjugate(); - self - }) + self.is_zero().not().then(|| self.conjugate_in_place()) } fn cyclotomic_square_in_place(&mut self) -> &mut Self { diff --git a/ff/src/fields/models/fp2.rs b/ff/src/fields/models/fp2.rs index 350bd21cb..7eed61c35 100644 --- a/ff/src/fields/models/fp2.rs +++ b/ff/src/fields/models/fp2.rs @@ -141,7 +141,7 @@ impl CyclotomicMultSubgroup for Fp2

{ // for this subgroup, x.inverse() = x.conjugate() self.is_zero().not().then(|| { - self.conjugate(); + self.conjugate_in_place(); self }) } diff --git a/ff/src/fields/models/fp4.rs b/ff/src/fields/models/fp4.rs index ad85e40da..9be5f6b3a 100644 --- a/ff/src/fields/models/fp4.rs +++ b/ff/src/fields/models/fp4.rs @@ -67,7 +67,7 @@ impl CyclotomicMultSubgroup for Fp4

{ const INVERSE_IS_FAST: bool = true; fn cyclotomic_inverse_in_place(&mut self) -> Option<&mut Self> { self.is_zero().not().then(|| { - self.conjugate(); + self.conjugate_in_place(); self }) } diff --git a/ff/src/fields/models/fp6_2over3.rs b/ff/src/fields/models/fp6_2over3.rs index 5a8ddbbab..349d13506 100644 --- a/ff/src/fields/models/fp6_2over3.rs +++ b/ff/src/fields/models/fp6_2over3.rs @@ -121,7 +121,7 @@ impl CyclotomicMultSubgroup for Fp6

{ const INVERSE_IS_FAST: bool = true; fn cyclotomic_inverse_in_place(&mut self) -> Option<&mut Self> { self.is_zero().not().then(|| { - self.conjugate(); + self.conjugate_in_place(); self }) } diff --git a/ff/src/fields/models/quadratic_extension.rs b/ff/src/fields/models/quadratic_extension.rs index 3edea3ed0..d53f3cedf 100644 --- a/ff/src/fields/models/quadratic_extension.rs +++ b/ff/src/fields/models/quadratic_extension.rs @@ -135,8 +135,9 @@ impl QuadExtField

{ /// This is only to be used when the element is *known* to be in the /// cyclotomic subgroup. - pub fn conjugate(&mut self) { + pub fn conjugate_in_place(&mut self) -> &mut Self { self.c1 = -self.c1; + self } /// Norm of QuadExtField over `P::BaseField`:`Norm(a) = a * a.conjugate()`. diff --git a/ff/src/lib.rs b/ff/src/lib.rs index 344245bc2..76676aa0d 100644 --- a/ff/src/lib.rs +++ b/ff/src/lib.rs @@ -1,6 +1,5 @@ //! Utilities for a field $\FF$. //! -//! #![cfg_attr(not(feature = "std"), no_std)] #![warn( diff --git a/rustfmt.toml b/rustfmt.toml index 967def746..c04603447 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,9 +1,14 @@ -condense_wildcard_suffixes = true edition = "2021" -imports_granularity = "Crate" + +condense_wildcard_suffixes = true match_block_trailing_comma = true -normalize_comments = true + reorder_imports = true +imports_granularity = "Crate" + use_field_init_shorthand = true use_try_shorthand = true -wrap_comments = true + +normalize_comments = true + +format_macro_bodies = true diff --git a/test-curves/benches/bls12_381.rs b/test-curves/benches/bls12_381.rs index 596044d87..beac48624 100644 --- a/test-curves/benches/bls12_381.rs +++ b/test-curves/benches/bls12_381.rs @@ -1,14 +1,14 @@ use ark_algebra_bench_templates::*; use ark_std::ops::{AddAssign, MulAssign, SubAssign}; -use ark_ec::{PairingEngine, ProjectiveCurve}; +use ark_ec::{CurveGroup, Group}; use ark_ff::{ biginteger::{BigInteger256 as FrRepr, BigInteger384 as FqRepr}, BigInteger, Field, PrimeField, UniformRand, }; use ark_test_curves::bls12_381::{ - fq::Fq, fq2::Fq2, fr::Fr, Bls12_381, Fq12, G1Affine, G1Projective as G1, G2Affine, - G2Projective as G2, + fq::Fq, fq2::Fq2, fr::Fr, Bls12_381, Fq12, G1Affine, G1Prepared, G1Projective as G1, G2Affine, + G2Prepared, G2Projective as G2, }; mod g1 { diff --git a/test-curves/benches/bn384_small_two_adicity.rs b/test-curves/benches/bn384_small_two_adicity.rs index 127ce98c1..8afaed701 100644 --- a/test-curves/benches/bn384_small_two_adicity.rs +++ b/test-curves/benches/bn384_small_two_adicity.rs @@ -1,7 +1,7 @@ use ark_algebra_bench_templates::*; use ark_std::ops::{AddAssign, MulAssign, SubAssign}; -use ark_ec::ProjectiveCurve; +use ark_ec::{CurveGroup, Group}; use ark_ff::{biginteger::BigInteger384 as Repr, BigInteger, Field, PrimeField, UniformRand}; use ark_test_curves::bn384_small_two_adicity::{fq::Fq, fr::Fr, G1Affine, G1Projective as G1}; diff --git a/test-curves/benches/mnt4_753.rs b/test-curves/benches/mnt4_753.rs index 9d32892f5..1cb09f56f 100644 --- a/test-curves/benches/mnt4_753.rs +++ b/test-curves/benches/mnt4_753.rs @@ -1,7 +1,7 @@ use ark_algebra_bench_templates::*; use ark_std::ops::{AddAssign, MulAssign, SubAssign}; -use ark_ec::ProjectiveCurve; +use ark_ec::{CurveGroup, Group}; use ark_ff::{biginteger::BigInteger768 as FqRepr, BigInteger, Field, PrimeField, UniformRand}; use ark_test_curves::mnt4_753::{fq::Fq, fr::Fr, G1Affine, G1Projective as G1}; diff --git a/test-curves/src/bls12_381/g1.rs b/test-curves/src/bls12_381/g1.rs index 2a66ac1cd..e68cf8e87 100644 --- a/test-curves/src/bls12_381/g1.rs +++ b/test-curves/src/bls12_381/g1.rs @@ -138,7 +138,7 @@ pub const G1_GENERATOR_Y: Fq = MontFp!("1339506544944476473020471379941921221584 #[cfg(test)] mod test { use super::*; - use ark_ec::ProjectiveCurve; + use ark_ec::CurveGroup; use ark_std::UniformRand; #[test] @@ -150,12 +150,12 @@ mod test { g_s[i] = G1Projective::rand(&mut rng); } - let mut g_s_affine_naive = [G1Affine::zero(); 100]; + let mut g_s_affine_naive = [G1Affine::identity(); 100]; for (i, g) in g_s.iter().enumerate() { g_s_affine_naive[i] = g.into_affine(); } - let g_s_affine_fast = G1Projective::batch_normalization_into_affine(&g_s); + let g_s_affine_fast = G1Projective::normalize_batch(&g_s); assert_eq!(g_s_affine_naive.as_ref(), g_s_affine_fast.as_slice()); } } diff --git a/test-curves/src/bls12_381/g1_swu_iso.rs b/test-curves/src/bls12_381/g1_swu_iso.rs index 5be970f37..9d04eb071 100644 --- a/test-curves/src/bls12_381/g1_swu_iso.rs +++ b/test-curves/src/bls12_381/g1_swu_iso.rs @@ -1,8 +1,10 @@ use crate::bls12_381::*; -use ark_ec::hashing::curve_maps::swu::SWUParams; -use ark_ec::models::{ - short_weierstrass::{Affine, SWCurveConfig}, - CurveConfig, +use ark_ec::{ + hashing::curve_maps::swu::SWUParams, + models::{ + short_weierstrass::{Affine, SWCurveConfig}, + CurveConfig, + }, }; use ark_ff::MontFp; diff --git a/test-curves/src/bls12_381/g2.rs b/test-curves/src/bls12_381/g2.rs index af04a3fad..30d27ec09 100644 --- a/test-curves/src/bls12_381/g2.rs +++ b/test-curves/src/bls12_381/g2.rs @@ -6,7 +6,7 @@ use ark_ec::{ hashing::curve_maps::wb::WBParams, models::CurveConfig, short_weierstrass::{self, *}, - AffineCurve, ProjectiveCurve, + AffineRepr, CurveGroup, Group, }; use ark_ff::{BigInt, Field, MontFp, Zero}; @@ -84,7 +84,7 @@ impl short_weierstrass::SWCurveConfig for Parameters { // more efficient, since the scalar -c1 has less limbs and a much lower Hamming // weight. let x: &'static [u64] = crate::bls12_381::Parameters::X; - let p_projective = p.into_projective(); + let p_projective = p.into_group(); // [x]P let x_p = Parameters::mul_affine(p, &x).neg(); @@ -93,18 +93,13 @@ impl short_weierstrass::SWCurveConfig for Parameters { // (ψ^2)(2P) let mut psi2_p2 = double_p_power_endomorphism(&p_projective.double()); - // tmp = [x]P + ψ(P) - let mut tmp = x_p.clone(); - tmp.add_assign_mixed(&psi_p); - - // tmp2 = [x^2]P + [x]ψ(P) - let mut tmp2: Projective = tmp; - tmp2 = tmp2.mul_bigint(x).neg(); + // tmp = [x^2]P + [x]ψ(P) + let tmp = (x_p.clone() + psi_p).mul_bigint(x).neg(); // add up all the terms - psi2_p2 += tmp2; + psi2_p2 += tmp; psi2_p2 -= x_p; - psi2_p2.add_assign_mixed(&-psi_p); + psi2_p2 -= psi_p; (psi2_p2 - p_projective).into_affine() } } diff --git a/test-curves/src/bls12_381/mod.rs b/test-curves/src/bls12_381/mod.rs index 6239a19df..52bb4838c 100644 --- a/test-curves/src/bls12_381/mod.rs +++ b/test-curves/src/bls12_381/mod.rs @@ -39,3 +39,6 @@ impl Bls12Parameters for Parameters { type G1Parameters = self::g1::Parameters; type G2Parameters = self::g2::Parameters; } + +pub type G1Prepared = ark_ec::bls12::G1Prepared; +pub type G2Prepared = ark_ec::bls12::G2Prepared; diff --git a/test-curves/src/bls12_381/tests.rs b/test-curves/src/bls12_381/tests.rs index ba7e2c9bb..d05526965 100644 --- a/test-curves/src/bls12_381/tests.rs +++ b/test-curves/src/bls12_381/tests.rs @@ -1,18 +1,12 @@ -#![allow(unused_imports)] -use ark_ec::{ - models::short_weierstrass::SWCurveConfig, AffineCurve, PairingEngine, ProjectiveCurve, -}; -use ark_ff::{Field, One, UniformRand, Zero}; +use crate::bls12_381::*; +use ark_algebra_test_templates::*; -use crate::bls12_381::{g1, Fq, Fq2, Fq6, FqConfig, Fr, FrConfig, G1Affine, G1Projective}; -use ark_algebra_test_templates::{ - curves::*, fields::*, generate_field_test, generate_g1_test, msm::*, -}; -use ark_std::{ - ops::{AddAssign, MulAssign, SubAssign}, - rand::Rng, - test_rng, -}; - -generate_field_test!(bls12_381; fq2; fq6; mont(6, 4); ); -generate_g1_test!(bls12_381; curve_tests; sw_tests;); +test_field!(fr; Fr; mont_prime_field); +test_field!(fq; Fq; mont_prime_field); +test_field!(fq2; Fq2); +test_field!(fq6; Fq6); +test_field!(fq12; Fq12); +test_group!(g1; G1Projective; sw); +test_group!(g2; G2Projective; sw); +test_group!(pairing_output; ark_ec::pairing::PairingOutput; msm); +test_pairing!(pairing; crate::bls12_381::Bls12_381); diff --git a/test-curves/src/bn384_small_two_adicity/tests.rs b/test-curves/src/bn384_small_two_adicity/tests.rs index 2c34fe99e..1313324c4 100644 --- a/test-curves/src/bn384_small_two_adicity/tests.rs +++ b/test-curves/src/bn384_small_two_adicity/tests.rs @@ -1,16 +1,12 @@ #![allow(unused_imports)] -use ark_ec::{ - models::short_weierstrass::SWCurveConfig, AffineCurve, PairingEngine, ProjectiveCurve, -}; +use ark_ec::{models::short_weierstrass::SWCurveConfig, pairing::Pairing, AffineRepr, CurveGroup}; use ark_ff::{Field, One, UniformRand, Zero}; use ark_std::{rand::Rng, test_rng}; -use crate::bn384_small_two_adicity::{g1, Fq, FqConfig, Fr, FrConfig, G1Affine, G1Projective}; -use ark_algebra_test_templates::{ - curves::*, fields::*, generate_field_test, generate_g1_test, msm::*, -}; - +use crate::bn384_small_two_adicity::{Fq, FqConfig, Fr, FrConfig, G1Affine, G1Projective}; +use ark_algebra_test_templates::*; use ark_std::ops::{AddAssign, MulAssign, SubAssign}; -generate_field_test!(bn384_small_two_adicity; mont(6, 6);); -generate_g1_test!(bn384_small_two_adicity; curve_tests; sw_tests;); +test_field!(fr; Fr; mont_prime_field); +test_field!(fq; Fq; mont_prime_field); +test_group!(g1; G1Projective; sw); diff --git a/test-curves/src/mnt4_753/mod.rs b/test-curves/src/mnt4_753/mod.rs index e3d286cbd..5d899901d 100644 --- a/test-curves/src/mnt4_753/mod.rs +++ b/test-curves/src/mnt4_753/mod.rs @@ -12,3 +12,6 @@ pub use fr::*; pub mod g1; #[cfg(feature = "mnt4_753_curve")] pub use g1::*; + +#[cfg(test)] +mod tests; diff --git a/test-curves/src/mnt4_753/tests.rs b/test-curves/src/mnt4_753/tests.rs new file mode 100644 index 000000000..c96e052fc --- /dev/null +++ b/test-curves/src/mnt4_753/tests.rs @@ -0,0 +1,6 @@ +use crate::mnt4_753::{Fq, Fr, G1Projective}; +use ark_algebra_test_templates::{test_field, test_group}; + +test_field!(fq; Fq; mont_prime_field); +test_field!(fr; Fr; mont_prime_field); +test_group!(g1; G1Projective); diff --git a/test-curves/src/mnt6_753/tests.rs b/test-curves/src/mnt6_753/tests.rs index ff1e87f02..06aa0d0aa 100644 --- a/test-curves/src/mnt6_753/tests.rs +++ b/test-curves/src/mnt6_753/tests.rs @@ -1,17 +1,4 @@ -#![allow(unused_imports)] -use ark_ec::{ - models::short_weierstrass::SWCurveConfig, AffineCurve, PairingEngine, ProjectiveCurve, -}; -use ark_ff::{Field, One, UniformRand, Zero}; +use crate::mnt6_753::Fq3; +use ark_algebra_test_templates::test_field; -use crate::mnt6_753::{Fq, Fq3, FqConfig, Fr, FrConfig}; -use ark_algebra_test_templates::{ - curves::*, fields::*, generate_field_test, generate_g1_test, msm::*, -}; -use ark_std::{ - ops::{AddAssign, MulAssign, SubAssign}, - rand::Rng, - test_rng, -}; - -generate_field_test!(mnt6_753; fq3; ); +test_field!(fq3; Fq3); diff --git a/test-templates/Cargo.toml b/test-templates/Cargo.toml index 58577765e..0f76126e1 100644 --- a/test-templates/Cargo.toml +++ b/test-templates/Cargo.toml @@ -20,6 +20,7 @@ ark-ff = { version = "^0.3.0", path = "../ff", default-features = false } ark-ec = { version = "^0.3.0", path = "../ec", default-features = false } num-bigint = { version = "0.4", default-features = false } num-integer = { version = "0.1", default-features = false } +num-traits = { version = "0.2", default-features = false } [features] default = [] diff --git a/test-templates/src/curves.rs b/test-templates/src/curves.rs deleted file mode 100644 index 07965bfa3..000000000 --- a/test-templates/src/curves.rs +++ /dev/null @@ -1,666 +0,0 @@ -#![allow(unused)] -use ark_ec::{ - short_weierstrass::{Affine, SWCurveConfig}, - twisted_edwards::{MontCurveConfig, Projective, TECurveConfig}, - wnaf::WnafContext, - AffineCurve, ProjectiveCurve, -}; -use ark_ff::{Field, One, PrimeField, UniformRand, Zero}; -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SWFlags, SerializationError}; -use ark_std::{io::Cursor, vec::Vec}; - -pub const ITERATIONS: usize = 10; - -fn random_addition_test() { - let mut rng = ark_std::test_rng(); - - for _ in 0..ITERATIONS { - let a = G::rand(&mut rng); - let b = G::rand(&mut rng); - let c = G::rand(&mut rng); - let a_affine = a.into_affine(); - let b_affine = b.into_affine(); - let c_affine = c.into_affine(); - - // a + a should equal the doubling - { - let mut aplusa = a; - aplusa.add_assign(&a); - - let mut aplusamixed = a; - aplusamixed.add_assign_mixed(&a.into_affine()); - - let mut adouble = a; - adouble.double_in_place(); - - assert_eq!(aplusa, adouble); - assert_eq!(aplusa, aplusamixed); - } - - let mut tmp = vec![G::zero(); 6]; - - // (a + b) + c - tmp[0] = (a + &b) + &c; - - // a + (b + c) - tmp[1] = a + &(b + &c); - - // (a + c) + b - tmp[2] = (a + &c) + &b; - - // Mixed addition - - // (a + b) + c - tmp[3] = a_affine.into_projective(); - tmp[3].add_assign_mixed(&b_affine); - tmp[3].add_assign_mixed(&c_affine); - - // a + (b + c) - tmp[4] = b_affine.into_projective(); - tmp[4].add_assign_mixed(&c_affine); - tmp[4].add_assign_mixed(&a_affine); - - // (a + c) + b - tmp[5] = a_affine.into_projective(); - tmp[5].add_assign_mixed(&c_affine); - tmp[5].add_assign_mixed(&b_affine); - - // Comparisons - for i in 0..6 { - for j in 0..6 { - if tmp[i] != tmp[j] { - println!("{} \n{}", tmp[i], tmp[j]); - } - assert_eq!(tmp[i], tmp[j], "Associativity failed {} {}", i, j); - assert_eq!( - tmp[i].into_affine(), - tmp[j].into_affine(), - "Associativity failed" - ); - } - - assert!(tmp[i] != a); - assert!(tmp[i] != b); - assert!(tmp[i] != c); - - assert!(a != tmp[i]); - assert!(b != tmp[i]); - assert!(c != tmp[i]); - } - } -} - -fn random_multiplication_test() { - let mut rng = ark_std::test_rng(); - - for _ in 0..ITERATIONS { - let mut a = G::rand(&mut rng); - let mut b = G::rand(&mut rng); - let a_affine = a.into_affine(); - let b_affine = b.into_affine(); - - let s = G::ScalarField::rand(&mut rng); - - // s ( a + b ) - let mut tmp1 = a; - tmp1.add_assign(&b); - tmp1.mul_assign(s); - - // s ( a + b) using wNAF for several window values in [2,5] - for w in 2..=5 { - let mut tmp4 = a + &b; - let context = WnafContext::new(w); - assert_eq!(tmp1, context.mul(tmp4, &s)); - - if w > 2 { - let bad_context = WnafContext::new(w - 1); - let bad_table = bad_context.table(tmp4); - assert_eq!(context.mul_with_table(&bad_table, &s), None); - } - } - - // sa + sb - a.mul_assign(s); - b.mul_assign(s); - - let mut tmp2 = a; - tmp2.add_assign(&b); - - // Affine multiplication - let mut tmp3 = a_affine.mul_bigint(&s.into_bigint()); - tmp3.add_assign(&b_affine.mul_bigint(&s.into_bigint())); - assert_eq!(tmp1, tmp2); - assert_eq!(tmp1, tmp3); - - let expected = a_affine.mul_bigint(s.into_bigint()); - let got = a_affine.mul_bigint(&s.into_bigint()); - assert_eq!(expected, got); - } -} - -fn random_doubling_test() { - let mut rng = ark_std::test_rng(); - - for _ in 0..ITERATIONS { - let mut a = G::rand(&mut rng); - let mut b = G::rand(&mut rng); - - // 2(a + b) - let mut tmp1 = a; - tmp1.add_assign(&b); - tmp1.double_in_place(); - - // 2a + 2b - a.double_in_place(); - b.double_in_place(); - - let mut tmp2 = a; - tmp2.add_assign(&b); - - let mut tmp3 = a; - tmp3.add_assign_mixed(&b.into_affine()); - - assert_eq!(tmp1, tmp2); - assert_eq!(tmp1, tmp3); - } -} - -fn random_negation_test() { - let mut rng = ark_std::test_rng(); - - for _ in 0..ITERATIONS { - let r = G::rand(&mut rng); - - let s = G::ScalarField::rand(&mut rng); - let sneg = -s; - assert!((s + &sneg).is_zero()); - - let mut t1 = r; - t1.mul_assign(s); - - let mut t2 = r; - t2.mul_assign(sneg); - - let mut t3 = t1; - t3.add_assign(&t2); - assert!(t3.is_zero()); - - let mut t4 = t1; - t4.add_assign_mixed(&t2.into_affine()); - assert!(t4.is_zero()); - - t1 = -t1; - assert_eq!(t1, t2); - } -} - -fn random_transformation_test() { - let mut rng = ark_std::test_rng(); - - for _ in 0..ITERATIONS { - let g = G::rand(&mut rng); - let g_affine = g.into_affine(); - let g_projective = g_affine.into_projective(); - assert_eq!(g, g_projective); - } - - // Batch normalization - for _ in 0..10 { - let mut v = (0..ITERATIONS) - .map(|_| G::rand(&mut rng).double()) - .collect::>(); - - use ark_std::rand::distributions::{Distribution, Uniform}; - let between = Uniform::from(0..ITERATIONS); - // Sprinkle in some normalized points - for _ in 0..5 { - v[between.sample(&mut rng)] = G::zero(); - } - for _ in 0..5 { - let s = between.sample(&mut rng); - v[s] = v[s].into_affine().into_projective(); - } - - let expected_v = v - .iter() - .map(|v| v.into_affine().into_projective()) - .collect::>(); - G::batch_normalization(&mut v); - - for i in &v { - assert!(i.is_normalized()); - } - - assert_eq!(v, expected_v); - } -} - -pub fn curve_tests() { - let mut rng = ark_std::test_rng(); - - // Negation edge case with zero. - { - let z = -G::zero(); - assert!(z.is_zero()); - } - - // Doubling edge case with zero. - { - let mut z = -G::zero(); - z.double_in_place(); - assert!(z.is_zero()); - } - - // Addition edge cases with zero - { - let mut r = G::rand(&mut rng); - let rcopy = r; - r.add_assign(&G::zero()); - assert_eq!(r, rcopy); - r.add_assign_mixed(&G::Affine::zero()); - assert_eq!(r, rcopy); - - let mut z = G::zero(); - z.add_assign(&G::zero()); - assert!(z.is_zero()); - z.add_assign_mixed(&G::Affine::zero()); - assert!(z.is_zero()); - - let mut z2 = z; - z2.add_assign(&r); - - z.add_assign_mixed(&r.into_affine()); - - assert_eq!(z, z2); - assert_eq!(z, r); - } - - // Transformations - { - let a = G::rand(&mut rng); - let b = a.into_affine().into_projective(); - let c = a - .into_affine() - .into_projective() - .into_affine() - .into_projective(); - assert_eq!(a, b); - assert_eq!(b, c); - } - - // Test COFACTOR and COFACTOR_INV - { - let a = G::rand(&mut rng); - let b = a.into_affine(); - let c = b.mul_by_cofactor_inv().mul_by_cofactor(); - assert_eq!(b, c); - } - - { - let mut rng = ark_std::test_rng(); - let a = G::rand(&mut rng); - let mut b = G::rand(&mut rng); - let zero = G::zero(); - let fr_zero = G::ScalarField::zero(); - let fr_one = G::ScalarField::one(); - let fr_two = fr_one + &fr_one; - - assert_eq!(zero, zero); - assert_eq!(zero.is_zero(), true); - assert_eq!(a.mul(&fr_one), a); - assert_eq!(a.mul(&fr_two), a + &a); - assert_eq!(a.mul(&fr_zero), zero); - assert_eq!(a.mul(&fr_zero) - &a, -a); - assert_eq!(a.mul(&fr_one) - &a, zero); - assert_eq!(a.mul(&fr_two) - &a, a); - - // a == a - assert_eq!(a, a); - // a + 0 = a - assert_eq!(a + &zero, a); - // a - 0 = a - assert_eq!(a - &zero, a); - // a - a = 0 - assert_eq!(a - &a, zero); - // 0 - a = -a - assert_eq!(zero - &a, -a); - // a.double() = a + a - assert_eq!(a.double(), a + &a); - // b.double() = b + b - assert_eq!(b.double(), b + &b); - // a + b = b + a - assert_eq!(a + &b, b + &a); - // a - b = -(b - a) - assert_eq!(a - &b, -(b - &a)); - // (a + b) + a = a + (b + a) - assert_eq!((a + &b) + &a, a + &(b + &a)); - // (a + b).double() = (a + b) + (b + a) - assert_eq!((a + &b).double(), (a + &b) + &(b + &a)); - - // Check that double_in_place and double give the same result - let original_b = b; - b.double_in_place(); - assert_eq!(original_b.double(), b); - - let fr_rand1 = G::ScalarField::rand(&mut rng); - let fr_rand2 = G::ScalarField::rand(&mut rng); - let a_rand1 = a.mul(&fr_rand1); - let a_rand2 = a.mul(&fr_rand2); - let fr_three = fr_two + &fr_rand1; - let a_two = a.mul(&fr_two); - assert_eq!(a_two, a.double(), "(a * 2) != a.double()"); - let a_six = a.mul(&(fr_three * &fr_two)); - assert_eq!(a_two.mul(&fr_three), a_six, "(a * 2) * 3 != a * (2 * 3)"); - - assert_eq!( - a_rand1.mul(&fr_rand2), - a_rand2.mul(&fr_rand1), - "(a * r1) * r2 != (a * r2) * r1" - ); - assert_eq!( - a_rand2.mul(&fr_rand1), - a.mul(&(fr_rand1 * &fr_rand2)), - "(a * r2) * r1 != a * (r1 * r2)" - ); - assert_eq!( - a_rand1.mul(&fr_rand2), - a.mul(&(fr_rand1 * &fr_rand2)), - "(a * r1) * r2 != a * (r1 * r2)" - ); - } - - random_addition_test::(); - random_multiplication_test::(); - random_doubling_test::(); - random_negation_test::(); - random_transformation_test::(); -} - -pub fn sw_tests() { - sw_curve_serialization_test::

(); - sw_from_random_bytes::

(); - sw_affine_sum_test::

(); - sw_cofactor_clearing_test::

(); -} - -pub fn sw_from_random_bytes() { - use ark_ec::models::short_weierstrass::{Affine, Projective}; - - let buf_size = Affine::

::zero().serialized_size(); - - let mut rng = ark_std::test_rng(); - - for _ in 0..ITERATIONS { - let a = Projective::

::rand(&mut rng); - let mut a = a.into_affine(); - { - let mut serialized = vec![0; buf_size]; - let mut cursor = Cursor::new(&mut serialized[..]); - a.serialize(&mut cursor).unwrap(); - - let mut cursor = Cursor::new(&serialized[..]); - let p1 = Affine::

::deserialize(&mut cursor).unwrap(); - let p2 = Affine::

::from_random_bytes(&serialized).unwrap(); - assert_eq!(p1, p2); - } - } -} - -pub fn sw_curve_serialization_test() { - use ark_ec::models::short_weierstrass::{Affine, Projective}; - - let buf_size = Affine::

::zero().serialized_size(); - - let mut rng = ark_std::test_rng(); - - for _ in 0..ITERATIONS { - let a = Projective::

::rand(&mut rng); - let mut a = a.into_affine(); - { - let mut serialized = vec![0; buf_size]; - let mut cursor = Cursor::new(&mut serialized[..]); - a.serialize(&mut cursor).unwrap(); - - let mut cursor = Cursor::new(&serialized[..]); - let b = Affine::

::deserialize(&mut cursor).unwrap(); - assert_eq!(a, b); - } - - { - a.y = -a.y; - let mut serialized = vec![0; buf_size]; - let mut cursor = Cursor::new(&mut serialized[..]); - a.serialize(&mut cursor).unwrap(); - let mut cursor = Cursor::new(&serialized[..]); - let b = Affine::

::deserialize(&mut cursor).unwrap(); - assert_eq!(a, b); - } - - { - let a = Affine::

::zero(); - let mut serialized = vec![0; buf_size]; - let mut cursor = Cursor::new(&mut serialized[..]); - a.serialize(&mut cursor).unwrap(); - let mut cursor = Cursor::new(&serialized[..]); - let b = Affine::

::deserialize(&mut cursor).unwrap(); - assert_eq!(a, b); - } - - { - let a = Affine::

::zero(); - let mut serialized = vec![0; buf_size - 1]; - let mut cursor = Cursor::new(&mut serialized[..]); - a.serialize(&mut cursor).unwrap_err(); - } - - { - let serialized = vec![0; buf_size - 1]; - let mut cursor = Cursor::new(&serialized[..]); - Affine::

::deserialize(&mut cursor).unwrap_err(); - } - - { - let mut serialized = vec![0; a.uncompressed_size()]; - let mut cursor = Cursor::new(&mut serialized[..]); - a.serialize_uncompressed(&mut cursor).unwrap(); - - let mut cursor = Cursor::new(&serialized[..]); - let b = Affine::

::deserialize_uncompressed(&mut cursor).unwrap(); - assert_eq!(a, b); - } - - { - a.y = -a.y; - let mut serialized = vec![0; a.uncompressed_size()]; - let mut cursor = Cursor::new(&mut serialized[..]); - a.serialize_uncompressed(&mut cursor).unwrap(); - let mut cursor = Cursor::new(&serialized[..]); - let b = Affine::

::deserialize_uncompressed(&mut cursor).unwrap(); - assert_eq!(a, b); - } - - { - let a = Affine::

::zero(); - let mut serialized = vec![0; a.uncompressed_size()]; - let mut cursor = Cursor::new(&mut serialized[..]); - a.serialize_uncompressed(&mut cursor).unwrap(); - let mut cursor = Cursor::new(&serialized[..]); - let b = Affine::

::deserialize_uncompressed(&mut cursor).unwrap(); - assert_eq!(a, b); - } - } -} - -pub fn sw_affine_sum_test() { - use ark_ec::models::short_weierstrass::{Affine, Projective}; - - let mut rng = ark_std::test_rng(); - - for _ in 0..ITERATIONS { - let mut test_vec = Vec::new(); - for _ in 0..10 { - test_vec.push(Projective::

::rand(&mut rng).into_affine()); - } - - let sum_computed: Affine

= test_vec.iter().sum(); - let mut sum_expected = Affine::zero(); - for p in test_vec.iter() { - sum_expected += p; - } - - assert_eq!(sum_computed, sum_expected); - } -} - -fn sw_cofactor_clearing_test() { - let mut rng = ark_std::test_rng(); - - for _ in 0..ITERATIONS { - let a = Affine::

::rand(&mut rng); - let b = a.clear_cofactor(); - assert!(b.is_in_correct_subgroup_assuming_on_curve()); - } -} - -pub fn montgomery_conversion_test

() -where - P: TECurveConfig, -{ - // A = 2 * (a + d) / (a - d) - let a = P::BaseField::one().double() - * &(P::COEFF_A + &P::COEFF_D) - * &(P::COEFF_A - &P::COEFF_D).inverse().unwrap(); - // B = 4 / (a - d) - let b = P::BaseField::one().double().double() * &(P::COEFF_A - &P::COEFF_D).inverse().unwrap(); - - assert_eq!(a, P::MontCurveConfig::COEFF_A); - assert_eq!(b, P::MontCurveConfig::COEFF_B); -} - -pub fn edwards_tests() -where - P::BaseField: PrimeField, -{ - edwards_curve_serialization_test::

(); - edwards_from_random_bytes::

(); - edwards_cofactor_clearing_test::

(); -} - -pub fn edwards_from_random_bytes() -where - P::BaseField: PrimeField, -{ - use ark_ec::models::twisted_edwards::{Affine, Projective}; - - let buf_size = Affine::

::zero().serialized_size(); - - let mut rng = ark_std::test_rng(); - - for _ in 0..ITERATIONS { - let a = Projective::

::rand(&mut rng); - let mut a = a.into_affine(); - { - let mut serialized = vec![0; buf_size]; - let mut cursor = Cursor::new(&mut serialized[..]); - a.serialize(&mut cursor).unwrap(); - - let mut cursor = Cursor::new(&serialized[..]); - let p1 = Affine::

::deserialize(&mut cursor).unwrap(); - let p2 = Affine::

::from_random_bytes(&serialized).unwrap(); - assert_eq!(p1, p2); - } - } - - for _ in 0..ITERATIONS { - let mut biginteger = - < as AffineCurve>::BaseField as PrimeField>::BigInt::rand(&mut rng); - let mut bytes = { - let mut result = vec![0u8; biginteger.serialized_size()]; - biginteger - .serialize(&mut Cursor::new(&mut result[..])) - .unwrap(); - result - }; - let mut g = Affine::

::from_random_bytes(&bytes); - while g.is_none() { - bytes.iter_mut().for_each(|i| *i = i.wrapping_sub(1)); - g = Affine::

::from_random_bytes(&bytes); - } - let _g = g.unwrap(); - } -} - -pub fn edwards_curve_serialization_test() { - use ark_ec::models::twisted_edwards::{Affine, Projective}; - - let buf_size = Affine::

::zero().serialized_size(); - - let mut rng = ark_std::test_rng(); - - for _ in 0..ITERATIONS { - let a = Projective::

::rand(&mut rng); - let a = a.into_affine(); - { - let mut serialized = vec![0; buf_size]; - let mut cursor = Cursor::new(&mut serialized[..]); - a.serialize(&mut cursor).unwrap(); - - let mut cursor = Cursor::new(&serialized[..]); - let b = Affine::

::deserialize(&mut cursor).unwrap(); - assert_eq!(a, b); - } - - { - let a = Affine::

::zero(); - let mut serialized = vec![0; buf_size]; - let mut cursor = Cursor::new(&mut serialized[..]); - a.serialize(&mut cursor).unwrap(); - let mut cursor = Cursor::new(&serialized[..]); - let b = Affine::

::deserialize(&mut cursor).unwrap(); - assert_eq!(a, b); - } - - { - let a = Affine::

::zero(); - let mut serialized = vec![0; buf_size - 1]; - let mut cursor = Cursor::new(&mut serialized[..]); - a.serialize(&mut cursor).unwrap_err(); - } - - { - let serialized = vec![0; buf_size - 1]; - let mut cursor = Cursor::new(&serialized[..]); - Affine::

::deserialize(&mut cursor).unwrap_err(); - } - - { - let mut serialized = vec![0; a.uncompressed_size()]; - let mut cursor = Cursor::new(&mut serialized[..]); - a.serialize_uncompressed(&mut cursor).unwrap(); - - let mut cursor = Cursor::new(&serialized[..]); - let b = Affine::

::deserialize_uncompressed(&mut cursor).unwrap(); - assert_eq!(a, b); - } - - { - let a = Affine::

::zero(); - let mut serialized = vec![0; a.uncompressed_size()]; - let mut cursor = Cursor::new(&mut serialized[..]); - a.serialize_uncompressed(&mut cursor).unwrap(); - let mut cursor = Cursor::new(&serialized[..]); - let b = Affine::

::deserialize_uncompressed(&mut cursor).unwrap(); - assert_eq!(a, b); - } - } -} - -fn edwards_cofactor_clearing_test() { - let mut rng = ark_std::test_rng(); - - for _ in 0..ITERATIONS { - let a = Projective::

::rand(&mut rng).into_affine(); - let b = a.clear_cofactor(); - assert!(b.is_in_correct_subgroup_assuming_on_curve()); - } -} diff --git a/test-templates/src/fields.rs b/test-templates/src/fields.rs index cb2725ff0..4d07e0d8e 100644 --- a/test-templates/src/fields.rs +++ b/test-templates/src/fields.rs @@ -1,571 +1,517 @@ #![allow(unused)] #![allow(clippy::eq_op)] -use ark_ff::{ - fields::{FftField, Field, LegendreSymbol, PrimeField}, - Fp, MontBackend, MontConfig, -}; -use ark_serialize::{buffer_bit_byte_size, Flags, SWFlags}; -use ark_std::{io::Cursor, rand::Rng}; - -pub const ITERATIONS: u32 = 40; - -fn random_negation_tests(rng: &mut R) { - for _ in 0..ITERATIONS { - let a = F::rand(rng); - let mut b = -a; - b += &a; - - assert!(b.is_zero()); - } -} - -fn random_addition_tests(rng: &mut R) { - for _ in 0..ITERATIONS { - let a = F::rand(rng); - let b = F::rand(rng); - let c = F::rand(rng); - - let t0 = (a + &b) + &c; // (a + b) + c - - let t1 = (a + &c) + &b; // (a + c) + b - - let t2 = (b + &c) + &a; // (b + c) + a - - assert_eq!(t0, t1); - assert_eq!(t1, t2); - } -} - -fn random_subtraction_tests(rng: &mut R) { - for _ in 0..ITERATIONS { - let a = F::rand(rng); - let b = F::rand(rng); - - let t0 = a - &b; // (a - b) +#[derive(Default, Clone, Copy, Debug)] +pub struct DummyFlags; - let mut t1 = b; // (b - a) - t1 -= &a; +impl ark_serialize::Flags for DummyFlags { + const BIT_SIZE: usize = 200; - let mut t2 = t0; // (a - b) + (b - a) = 0 - t2 += &t1; - - assert!(t2.is_zero()); + fn u8_bitmask(&self) -> u8 { + 0 } -} - -fn random_multiplication_tests(rng: &mut R) { - for _ in 0..ITERATIONS { - let a = F::rand(rng); - let b = F::rand(rng); - let c = F::rand(rng); - let mut t0 = a; // (a * b) * c - t0 *= &b; - t0 *= &c; - - let mut t1 = a; // (a * c) * b - t1 *= &c; - t1 *= &b; - - let mut t2 = b; // (b * c) * a - t2 *= &c; - t2 *= &a; - - assert_eq!(t0, t1); - assert_eq!(t1, t2); + fn from_u8(_value: u8) -> Option { + Some(DummyFlags) } } -fn random_sum_of_products_tests(rng: &mut R) { - for _ in 0..ITERATIONS { - for length in 1..100 { - let a = (0..length).map(|_| F::rand(rng)).collect::>(); - let b = (0..length).map(|_| F::rand(rng)).collect::>(); - let result_1 = F::sum_of_products(&a, &b); - let result_2 = a.into_iter().zip(b).map(|(a, b)| a * b).sum::(); - assert_eq!(result_1, result_2, "length: {length}"); +#[macro_export] +#[doc(hidden)] +macro_rules! __test_field { + ($field: ty) => { + #[test] + pub fn test_frobenius() { + use ark_ff::Field; + use ark_std::UniformRand; + let mut rng = ark_std::test_rng(); + let characteristic = <$field>::characteristic(); + let max_power = (<$field>::extension_degree() + 1) as usize; + + for _ in 0..ITERATIONS { + let a = <$field>::rand(&mut rng); + + let mut a_0 = a; + a_0.frobenius_map(0); + assert_eq!(a, a_0); + + let mut a_q = a.pow(&characteristic); + for power in 1..max_power { + let mut a_qi = a; + a_qi.frobenius_map(power); + assert_eq!(a_qi, a_q, "failed on power {}", power); + + a_q = a_q.pow(&characteristic); + } + } } - } -} -fn edge_case_sum_of_products_tests() { - use ark_ff::BigInteger; - let mut a_max = F::ZERO.into_bigint(); - for (i, limb) in a_max.as_mut().iter_mut().enumerate() { - if i == F::BigInt::NUM_LIMBS - 1 { - *limb = u64::MAX >> (64 - ((F::MODULUS_BIT_SIZE - 1) % 64)); - } else { - *limb = u64::MAX; + #[test] + fn test_serialization() { + use ark_serialize::*; + use ark_std::UniformRand; + let buf_size = <$field>::zero().serialized_size(); + + let buffer_size = + buffer_bit_byte_size(<$field as Field>::BasePrimeField::MODULUS_BIT_SIZE as usize).1 * + (<$field>::extension_degree() as usize); + assert_eq!(buffer_size, buf_size); + + let mut rng = ark_std::test_rng(); + + for _ in 0..ITERATIONS { + let a = <$field>::rand(&mut rng); + { + let mut serialized = vec![0u8; buf_size]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize(&mut cursor).unwrap(); + + let mut cursor = Cursor::new(&serialized[..]); + let b = <$field>::deserialize(&mut cursor).unwrap(); + assert_eq!(a, b); + } + + { + let mut serialized = vec![0u8; a.uncompressed_size()]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize_uncompressed(&mut cursor).unwrap(); + + let mut cursor = Cursor::new(&serialized[..]); + let b = <$field>::deserialize_uncompressed(&mut cursor).unwrap(); + assert_eq!(a, b); + } + + { + let mut serialized = vec![0u8; buf_size + 1]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize_with_flags(&mut cursor, SWFlags::from_y_sign(true)) + .unwrap(); + let mut cursor = Cursor::new(&serialized[..]); + let (b, flags) = <$field>::deserialize_with_flags::<_, SWFlags>(&mut cursor).unwrap(); + assert_eq!(flags.is_positive(), Some(true)); + assert!(!flags.is_infinity()); + assert_eq!(a, b); + } + + + + { + let mut serialized = vec![0; buf_size]; + let result = matches!( + a.serialize_with_flags(&mut &mut serialized[..], $crate::fields::DummyFlags).unwrap_err(), + SerializationError::NotEnoughSpace + ); + assert!(result); + + let result = matches!( + <$field>::deserialize_with_flags::<_, $crate::fields::DummyFlags>(&mut &serialized[..]).unwrap_err(), + SerializationError::NotEnoughSpace, + ); + assert!(result); + + { + let mut serialized = vec![0; buf_size - 1]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize(&mut cursor).unwrap_err(); + + let mut cursor = Cursor::new(&serialized[..]); + <$field>::deserialize(&mut cursor).unwrap_err(); + } + } + } } - } - let a_max = F::from_bigint(a_max).unwrap(); - let b_max = -F::one(); // p - 1. - for length in 1..100 { - let a = vec![a_max; length]; - let b = vec![b_max; length]; - let result_1 = F::sum_of_products(&a, &b); - let result_2 = a.into_iter().zip(b).map(|(a, b)| a * b).sum::(); - assert_eq!(result_1, result_2, "length: {length}"); - } - let two_inv = F::from(2u64).inverse().unwrap(); - let neg_one = -F::one(); - let a_max = neg_one * two_inv - F::one(); - let b_max = neg_one * two_inv - F::one(); - for length in 1..100 { - let a = vec![a_max; length]; - let b = vec![b_max; length]; - let result_1 = F::sum_of_products(&a, &b); - let result_2 = a.into_iter().zip(b).map(|(a, b)| a * b).sum::(); - assert_eq!(result_1, result_2, "length: {length}"); - } -} - -fn random_inversion_tests(rng: &mut R) { - assert!(F::zero().inverse().is_none()); - - for _ in 0..ITERATIONS { - let mut a = F::rand(rng); - let b = a.inverse().map(|b| { - a *= &b; - assert_eq!(a, F::one()); - }); - } -} - -fn random_doubling_tests(rng: &mut R) { - for _ in 0..ITERATIONS { - let mut a = F::rand(rng); - let mut b = a; - a += &b; - b.double_in_place(); - - assert_eq!(a, b); - } -} - -fn random_squaring_tests(rng: &mut R) { - for _ in 0..ITERATIONS { - let mut a = F::rand(rng); - let mut b = a; - a *= &b; - b.square_in_place(); - - assert_eq!(a, b); - } -} - -fn random_expansion_tests(rng: &mut R) { - for _ in 0..ITERATIONS { - // Compare (a + b)(c + d) and (a*c + b*c + a*d + b*d) - - let a = F::rand(rng); - let b = F::rand(rng); - let c = F::rand(rng); - let d = F::rand(rng); - - let mut t0 = a; - t0 += &b; - let mut t1 = c; - t1 += &d; - t0 *= &t1; - - let mut t2 = a; - t2 *= &c; - let mut t3 = b; - t3 *= &c; - let mut t4 = a; - t4 *= &d; - let mut t5 = b; - t5 *= &d; - - t2 += &t3; - t2 += &t4; - t2 += &t5; - - assert_eq!(t0, t2); - } - - for _ in 0..ITERATIONS { - // Compare (a + b)c and (a*c + b*c) - - let a = F::rand(rng); - let b = F::rand(rng); - let c = F::rand(rng); - - let t0 = (a + &b) * &c; - let t2 = a * &c + &(b * &c); - - assert_eq!(t0, t2); - } -} - -fn random_field_tests() { - let mut rng = ark_std::test_rng(); - - random_negation_tests::(&mut rng); - random_addition_tests::(&mut rng); - random_subtraction_tests::(&mut rng); - random_multiplication_tests::(&mut rng); - random_sum_of_products_tests::(&mut rng); - random_inversion_tests::(&mut rng); - random_doubling_tests::(&mut rng); - random_squaring_tests::(&mut rng); - random_expansion_tests::(&mut rng); - - assert!(F::zero().is_zero()); - { - let z = -F::zero(); - assert!(z.is_zero()); - } - - assert!(F::zero().inverse().is_none()); - - // Multiplication by zero - { - let a = F::rand(&mut rng) * &F::zero(); - assert!(a.is_zero()); - } - - // Addition by zero - { - let mut a = F::rand(&mut rng); - let copy = a; - a += &F::zero(); - assert_eq!(a, copy); - } -} - -fn random_sqrt_tests() { - let mut rng = ark_std::test_rng(); - - for _ in 0..ITERATIONS { - let a = F::rand(&mut rng); - let b = a.square(); - assert_eq!(b.legendre(), LegendreSymbol::QuadraticResidue); - - let b = b.sqrt().unwrap(); - assert!(a == b || a == -b); - } - - let mut c = F::one(); - for _ in 0..ITERATIONS { - let mut b = c.square(); - assert_eq!(b.legendre(), LegendreSymbol::QuadraticResidue); - b = b.sqrt().unwrap(); - - if b != c { - b = -b; + #[test] + fn test_add_properties() { + use ark_std::UniformRand; + let mut rng = test_rng(); + let zero = <$field>::zero(); + assert_eq!(-zero, zero); + assert!(zero.is_zero()); + assert!(<$field>::ZERO.is_zero()); + assert_eq!(<$field>::ZERO, zero); + + for _ in 0..(ITERATIONS * ITERATIONS) { + // Associativity + let a = <$field>::rand(&mut rng); + let b = <$field>::rand(&mut rng); + let c = <$field>::rand(&mut rng); + + assert_eq!((a + b) + c, a + (b + c)); + + // Commutativity + assert_eq!(a + b, b + a); + + // Identity + assert_eq!(zero + a, a); + assert_eq!(zero + b, b); + assert_eq!(zero + c, c); + + // Negation + assert_eq!(-a + a, zero); + assert_eq!(-b + b, zero); + assert_eq!(-c + c, zero); + assert_eq!(-zero, zero); + + // Associativity and commutativity simultaneously + let t0 = (a + &b) + &c; // (a + b) + c + let t1 = (a + &c) + &b; // (a + c) + b + let t2 = (b + &c) + &a; // (b + c) + a + + assert_eq!(t0, t1); + assert_eq!(t1, t2); + + // Doubling + assert_eq!(a.double(), a + a); + assert_eq!(b.double(), b + b); + assert_eq!(c.double(), c + c); + } } - assert_eq!(b, c); - - c += &F::one(); - } -} - -pub fn from_str_test() { - { - let mut rng = ark_std::test_rng(); + #[test] + fn test_sub_properties() { + use ark_std::UniformRand; + let mut rng = test_rng(); + let zero = <$field>::zero(); - for _ in 0..ITERATIONS { - let n: u64 = rng.gen(); + for _ in 0..(ITERATIONS * ITERATIONS){ + // Anti-commutativity + let a = <$field>::rand(&mut rng); + let b = <$field>::rand(&mut rng); + assert!(((a - b) + (b - a)).is_zero()); - let a = F::from_str(&ark_std::format!("{}", n)) - .map_err(|_| ()) - .unwrap(); - let b = F::from(n); + // Identity + assert_eq!(zero - a, -a); + assert_eq!(zero - b, -b); - assert_eq!(a, b); - let c = F::from_str(&ark_std::format!("{}", a)) - .map_err(|_| ()) - .unwrap(); - assert_eq!(a, c); + assert_eq!(a - zero, a); + assert_eq!(b - zero, b); + } } - } - - assert!(F::from_str("").is_err()); - assert!(F::from_str("0").map_err(|_| ()).unwrap().is_zero()); - assert!(F::from_str("00").is_err()); - assert!(F::from_str("00000000000").is_err()); - assert!(F::from_str("000000000007").is_err()); -} - -pub fn field_test(a: F, b: F) { - let zero = F::zero(); - assert_eq!(zero, zero); - assert_eq!(zero.is_zero(), true); - assert_eq!(zero.is_one(), false); - - let one = F::one(); - assert_eq!(one, one); - assert_eq!(one.is_zero(), false); - assert_eq!(one.is_one(), true); - assert_eq!(zero + &one, one); - - let two = one + &one; - assert_eq!(two, two); - assert_ne!(zero, two); - assert_ne!(one, two); - - // a == a - assert_eq!(a, a); - // a + 0 = a - assert_eq!(a + &zero, a); - // a - 0 = a - assert_eq!(a - &zero, a); - // a - a = 0 - assert_eq!(a - &a, zero); - // 0 - a = -a - assert_eq!(zero - &a, -a); - // a.double() = a + a - assert_eq!(a.double(), a + &a); - // b.double() = b + b - assert_eq!(b.double(), b + &b); - // a + b = b + a - assert_eq!(a + &b, b + &a); - // a - b = -(b - a) - assert_eq!(a - &b, -(b - &a)); - // (a + b) + a = a + (b + a) - assert_eq!((a + &b) + &a, a + &(b + &a)); - // (a + b).double() = (a + b) + (b + a) - assert_eq!((a + &b).double(), (a + &b) + &(b + &a)); - - // a * 0 = 0 - assert_eq!(a * &zero, zero); - // a * 1 = a - assert_eq!(a * &one, a); - // a * 2 = a.double() - assert_eq!(a * &two, a.double()); - // a * a^-1 = 1 - assert_eq!(a * &a.inverse().unwrap(), one); - // a * a = a^2 - assert_eq!(a * &a, a.square()); - // a * a * a = a^3 - assert_eq!(a * &(a * &a), a.pow([0x3, 0x0, 0x0, 0x0])); - // a * b = b * a - assert_eq!(a * &b, b * &a); - // (a * b) * a = a * (b * a) - assert_eq!((a * &b) * &a, a * &(b * &a)); - // (a + b)^2 = a^2 + 2ab + b^2 - assert_eq!( - (a + &b).square(), - a.square() + &((a * &b) + &(a * &b)) + &b.square() - ); - // (a - b)^2 = (-(b - a))^2 - assert_eq!((a - &b).square(), (-(b - &a)).square()); - random_field_tests::(); -} -pub fn fft_field_test() { - assert_eq!( - F::TWO_ADIC_ROOT_OF_UNITY.pow([1 << F::TWO_ADICITY]), - F::one() - ); - - if let Some(small_subgroup_base) = F::SMALL_SUBGROUP_BASE { - let small_subgroup_base_adicity = F::SMALL_SUBGROUP_BASE_ADICITY.unwrap(); - let large_subgroup_root_of_unity = F::LARGE_SUBGROUP_ROOT_OF_UNITY.unwrap(); - let pow = - (1 << F::TWO_ADICITY) * (small_subgroup_base as u64).pow(small_subgroup_base_adicity); - assert_eq!(large_subgroup_root_of_unity.pow([pow]), F::one()); - - for i in 0..F::TWO_ADICITY { - for j in 0..small_subgroup_base_adicity { - use core::convert::TryFrom; - let size = usize::try_from(1 << i as usize).unwrap() - * usize::try_from((small_subgroup_base as u64).pow(j)).unwrap(); - let root = F::get_root_of_unity(size as u64).unwrap(); - assert_eq!(root.pow([size as u64]), F::one()); + #[test] + fn test_mul_properties() { + use ark_std::UniformRand; + let mut rng = test_rng(); + let zero = <$field>::zero(); + let one = <$field>::one(); + assert_eq!(one.inverse().unwrap(), one); + assert!(one.is_one()); + + assert!(<$field>::ONE.is_one()); + assert_eq!(<$field>::ONE, one); + + for _ in 0..ITERATIONS { + // Associativity + let a = <$field>::rand(&mut rng); + let b = <$field>::rand(&mut rng); + let c = <$field>::rand(&mut rng); + assert_eq!((a * b) * c, a * (b * c)); + + // Commutativity + assert_eq!(a * b, b * a); + + // Identity + assert_eq!(one * a, a); + assert_eq!(one * b, b); + assert_eq!(one * c, c); + + assert_eq!(zero * a, zero); + assert_eq!(zero * b, zero); + assert_eq!(zero * c, zero); + + // Inverses + assert_eq!(a * a.inverse().unwrap(), one); + assert_eq!(b * b.inverse().unwrap(), one); + assert_eq!(c * c.inverse().unwrap(), one); + + // Associativity and commutativity simultaneously + let t0 = (a * b) * c; + let t1 = (a * c) * b; + let t2 = (b * c) * a; + assert_eq!(t0, t1); + assert_eq!(t1, t2); + + // Squaring + assert_eq!(a * a, a.square()); + assert_eq!(b * b, b.square()); + assert_eq!(c * c, c.square()); + + // Distributivity + assert_eq!(a * (b + c), a * b + a * c); + assert_eq!(b * (a + c), b * a + b * c); + assert_eq!(c * (a + b), c * a + c * b); + assert_eq!((a + b).square(), a.square() + b.square() + a * b.double()); + assert_eq!((b + c).square(), c.square() + b.square() + c * b.double()); + assert_eq!((c + a).square(), a.square() + c.square() + a * c.double()); } } - } else { - for i in 0..F::TWO_ADICITY { - let size = 1 << i; - let root = F::get_root_of_unity(size).unwrap(); - assert_eq!(root.pow([size as u64]), F::one()); - } - } -} - -pub fn primefield_test() { - from_str_test::(); - let one = F::one(); - assert_eq!(F::from(one.into_bigint()), one); - - let mut rng = ark_std::test_rng(); - edge_case_sum_of_products_tests::(); - - fft_field_test::(); -} -pub fn montgomery_primefield_test, const N: usize>() { - use ark_ff::FpConfig; - use num_bigint::BigUint; - use num_integer::Integer; - let modulus: BigUint = T::MODULUS.into(); - let r = BigUint::from(2u8).modpow(&((N * 64) as u64).into(), &modulus); - let r2 = (&r * &r) % &modulus; - assert_eq!(r, T::R.into()); - assert_eq!(r2, T::R2.into()); - assert_eq!( - Fp::, N>::MODULUS_BIT_SIZE as u64, - modulus.bits() - ); - - if &modulus % 4u8 == BigUint::from(3u8) { - assert_eq!( - BigUint::from(T::MODULUS_PLUS_ONE_DIV_FOUR.unwrap()), - (&modulus + 1u8) / 4u8 - ); - } - - let modulus_minus_one = &modulus - 1u8; - assert_eq!( - BigUint::from(Fp::, N>::MODULUS_MINUS_ONE_DIV_TWO), - &modulus_minus_one / 2u32 - ); - - let mut two_adicity = 0; - let mut trace = modulus_minus_one; - while trace.is_even() { - trace /= 2u8; - two_adicity += 1; - } - assert_eq!(two_adicity, MontBackend::::TWO_ADICITY); - assert_eq!(BigUint::from(Fp::, N>::TRACE), trace); - let trace_minus_one_div_two = (&trace - 1u8) / 2u8; - assert_eq!( - BigUint::from(Fp::, N>::TRACE_MINUS_ONE_DIV_TWO), - trace_minus_one_div_two - ); - - let two_adic_root_of_unity: BigUint = >::TWO_ADIC_ROOT_OF_UNITY.into(); - let generator: BigUint = >::GENERATOR.into_bigint().into(); - assert_eq!(two_adic_root_of_unity, generator.modpow(&trace, &modulus)); - match (T::SMALL_SUBGROUP_BASE, T::SMALL_SUBGROUP_BASE_ADICITY) { - (Some(base), Some(adicity)) => { - let mut e = generator; - for _i in 0..adicity { - e = e.modpow(&base.into(), &modulus) + #[test] + fn test_pow() { + use ark_std::UniformRand; + let mut rng = test_rng(); + for _ in 0..(ITERATIONS / 10) { + for i in 0..20 { + // Exponentiate by various small numbers and ensure it is + // consistent with repeated multiplication. + let a = <$field>::rand(&mut rng); + let target = a.pow(&[i]); + let mut c = <$field>::one(); + for _ in 0..i { + c *= a; + } + assert_eq!(c, target); + + } + let a = <$field>::rand(&mut rng); + + // Exponentiating by the modulus should have no effect; + let mut result = a; + for i in 0..<$field>::extension_degree() { + result = result.pow(<$field>::characteristic()) + } + assert_eq!(a, result); + + // Commutativity + let e1: [u64; 10] = rng.gen(); + let e2: [u64; 10] = rng.gen(); + assert_eq!(a.pow(&e1).pow(&e2), a.pow(&e2).pow(&e1)); + + // Distributivity + let e3: [u64; 10] = rng.gen(); + let a_to_e1 = a.pow(e1); + let a_to_e2 = a.pow(e2); + let a_to_e1_plus_e2 = a.pow(e1) * a.pow(e2); + assert_eq!(a_to_e1_plus_e2.pow(&e3), a_to_e1.pow(&e3) * a_to_e2.pow(&e3)); } - }, - (None, None) => {}, - (_, _) => { - panic!("Should specify both `SMALL_SUBGROUP_BASE` and `SMALL_SUBGROUP_BASE_ADICITY`") - }, - } -} - -pub fn sqrt_field_test(elem: F) { - let square = elem.square(); - let sqrt = square.sqrt().unwrap(); - assert!(sqrt == elem || sqrt == -elem); - if let Some(sqrt) = elem.sqrt() { - assert!(sqrt.square() == elem || sqrt.square() == -elem); - } - random_sqrt_tests::(); -} - -pub fn frobenius_test>(characteristic: C, maxpower: usize) { - let mut rng = ark_std::test_rng(); - - for _ in 0..ITERATIONS { - let a = F::rand(&mut rng); - - let mut a_0 = a; - a_0.frobenius_map(0); - assert_eq!(a, a_0); - - let mut a_q = a.pow(&characteristic); - for power in 1..maxpower { - let mut a_qi = a; - a_qi.frobenius_map(power); - assert_eq!(a_qi, a_q, "failed on power {}", power); - - a_q = a_q.pow(&characteristic); } - } -} - -pub fn field_serialization_test(buf_size: usize) { - let mut rng = ark_std::test_rng(); - for _ in 0..ITERATIONS { - let a = F::rand(&mut rng); - { - let mut serialized = vec![0u8; buf_size]; - let mut cursor = Cursor::new(&mut serialized[..]); - a.serialize(&mut cursor).unwrap(); - - let mut cursor = Cursor::new(&serialized[..]); - let b = F::deserialize(&mut cursor).unwrap(); - assert_eq!(a, b); + #[test] + fn test_sum_of_products_tests() { + use ark_std::UniformRand; + let rng = &mut test_rng(); + for _ in 0..ITERATIONS { + for length in 1..20 { + let a = (0..length).map(|_| <$field>::rand(rng)).collect::>(); + let b = (0..length).map(|_| <$field>::rand(rng)).collect::>(); + let result_1 = <$field>::sum_of_products(&a, &b); + let result_2 = a.into_iter().zip(b).map(|(a, b)| a * b).sum::<$field>(); + assert_eq!(result_1, result_2, "length: {length}"); + } + + let two_inv = <$field>::from(2u64).inverse().unwrap(); + let neg_one = -<$field>::one(); + let a_max = neg_one * two_inv - <$field>::one(); + let b_max = neg_one * two_inv - <$field>::one(); + for length in 1..20 { + let a = vec![a_max; length]; + let b = vec![b_max; length]; + let result_1 = <$field>::sum_of_products(&a, &b); + let result_2 = a.into_iter().zip(b).map(|(a, b)| a * b).sum::<$field>(); + assert_eq!(result_1, result_2, "length: {length}"); + } + } } - { - let mut serialized = vec![0u8; a.uncompressed_size()]; - let mut cursor = Cursor::new(&mut serialized[..]); - a.serialize_uncompressed(&mut cursor).unwrap(); - - let mut cursor = Cursor::new(&serialized[..]); - let b = F::deserialize_uncompressed(&mut cursor).unwrap(); - assert_eq!(a, b); + #[test] + fn test_sqrt() { + if <$field>::SQRT_PRECOMP.is_some() { + use ark_std::UniformRand; + let rng = &mut test_rng(); + + assert!(<$field>::zero().sqrt().unwrap().is_zero()); + + for _ in 0..ITERATIONS { + // Ensure sqrt(a^2) = a or -a + let a = <$field>::rand(rng); + let b = a.square(); + let sqrt = b.sqrt().unwrap(); + assert!(a == sqrt || -a == sqrt); + + if let Some(mut b) = a.sqrt() { + b.square_in_place(); + assert_eq!(a, b); + } + + let a = <$field>::rand(rng); + let b = a.square(); + assert_eq!(b.legendre(), LegendreSymbol::QuadraticResidue); + } + } } - - { - let mut serialized = vec![0u8; buf_size + 1]; - let mut cursor = Cursor::new(&mut serialized[..]); - a.serialize_with_flags(&mut cursor, SWFlags::from_y_sign(true)) - .unwrap(); - let mut cursor = Cursor::new(&serialized[..]); - let (b, flags) = F::deserialize_with_flags::<_, SWFlags>(&mut cursor).unwrap(); - assert_eq!(flags.is_positive(), Some(true)); - assert!(!flags.is_infinity()); - assert_eq!(a, b); + }; + ($field: ty; fft) => { + $crate::__test_field!($field); + + #[test] + fn test_fft() { + use ark_ff::FftField; + assert_eq!( + <$field>::TWO_ADIC_ROOT_OF_UNITY.pow([1 << <$field>::TWO_ADICITY]), + <$field>::one() + ); + + if let Some(small_subgroup_base) = <$field>::SMALL_SUBGROUP_BASE { + let small_subgroup_base_adicity = <$field>::SMALL_SUBGROUP_BASE_ADICITY.unwrap(); + let large_subgroup_root_of_unity = <$field>::LARGE_SUBGROUP_ROOT_OF_UNITY.unwrap(); + let pow = + (1 << <$field>::TWO_ADICITY) * (small_subgroup_base as u64).pow(small_subgroup_base_adicity); + assert_eq!(large_subgroup_root_of_unity.pow([pow]), <$field>::one()); + + for i in 0..<$field>::TWO_ADICITY { + for j in 0..small_subgroup_base_adicity { + use core::convert::TryFrom; + let size = usize::try_from(1 << i as usize).unwrap() + * usize::try_from((small_subgroup_base as u64).pow(j)).unwrap(); + let root = <$field>::get_root_of_unity(size as u64).unwrap(); + assert_eq!(root.pow([size as u64]), <$field>::one()); + } + } + } else { + for i in 0..<$field>::TWO_ADICITY { + let size = 1 << i; + let root = <$field>::get_root_of_unity(size).unwrap(); + assert_eq!(root.pow([size as u64]), <$field>::one()); + } + } + } + }; + ($field: ty; prime) => { + $crate::__test_field!($field; fft); + + #[test] + fn test_sum_of_products_edge_case() { + use ark_ff::BigInteger; + let mut a_max = <$field>::ZERO.into_bigint(); + for (i, limb) in a_max.as_mut().iter_mut().enumerate() { + if i == <$field as PrimeField>::BigInt::NUM_LIMBS - 1 { + *limb = u64::MAX >> (64 - ((<$field>::MODULUS_BIT_SIZE - 1) % 64)); + } else { + *limb = u64::MAX; + } + } + let a_max = <$field>::from_bigint(a_max).unwrap(); + let b_max = -<$field>::one(); // p - 1. + for length in 1..100 { + let a = vec![a_max; length]; + let b = vec![b_max; length]; + let result_1 = <$field>::sum_of_products(&a, &b); + let result_2 = a.into_iter().zip(b).map(|(a, b)| a * b).sum::<$field>(); + assert_eq!(result_1, result_2, "length: {length}"); + } } - #[derive(Default, Clone, Copy, Debug)] - struct DummyFlags; - impl Flags for DummyFlags { - const BIT_SIZE: usize = 200; - - fn u8_bitmask(&self) -> u8 { - 0 + #[test] + fn test_constants() { + use ark_ff::{FpConfig, BigInteger, SqrtPrecomputation}; + use $crate::num_bigint::BigUint; + use $crate::num_integer::Integer; + + let modulus: BigUint = <$field>::MODULUS.into(); + let modulus_minus_one = &modulus - 1u8; + assert_eq!(BigUint::from(<$field>::MODULUS_MINUS_ONE_DIV_TWO), &modulus_minus_one / 2u32); + assert_eq!(<$field>::MODULUS_BIT_SIZE as u64, modulus.bits()); + if let Some(SqrtPrecomputation::Case3Mod4 { modulus_plus_one_div_four }) = <$field>::SQRT_PRECOMP { + assert_eq!(modulus_plus_one_div_four, &((&modulus + 1u8) / 4u8).to_u64_digits()); } - fn from_u8(_value: u8) -> Option { - Some(DummyFlags) + let mut two_adicity = 0; + let mut trace = modulus_minus_one; + while trace.is_even() { + trace /= 2u8; + two_adicity += 1; + } + assert_eq!(two_adicity, <$field>::TWO_ADICITY); + assert_eq!(BigUint::from(<$field>::TRACE), trace); + let trace_minus_one_div_two = (&trace - 1u8) / 2u8; + assert_eq!(BigUint::from(<$field>::TRACE_MINUS_ONE_DIV_TWO), trace_minus_one_div_two); + + let two_adic_root_of_unity: BigUint = <$field>::TWO_ADIC_ROOT_OF_UNITY.into(); + let generator: BigUint = <$field>::GENERATOR.into_bigint().into(); + assert_eq!(two_adic_root_of_unity, generator.modpow(&trace, &modulus)); + match (<$field>::SMALL_SUBGROUP_BASE, <$field>::SMALL_SUBGROUP_BASE_ADICITY) { + (Some(base), Some(adicity)) => { + let mut e = generator; + for _i in 0..adicity { + e = e.modpow(&base.into(), &modulus) + } + }, + (None, None) => {}, + (_, _) => { + panic!("Should specify both `SMALL_SUBGROUP_BASE` and `SMALL_SUBGROUP_BASE_ADICITY`") + }, } } - - use ark_serialize::SerializationError; - { - let mut serialized = vec![0; buf_size]; - assert!(if let SerializationError::NotEnoughSpace = a - .serialize_with_flags(&mut &mut serialized[..], DummyFlags) - .unwrap_err() - { - true - } else { - false - }); - assert!(if let SerializationError::NotEnoughSpace = - F::deserialize_with_flags::<_, DummyFlags>(&mut &serialized[..]).unwrap_err() - { - true - } else { - false - }); + }; + ($field: ty; mont_prime_field) => { + $crate::__test_field!($field; prime); + + #[test] + pub fn test_montgomery_config() { + use ark_ff::{FpConfig, BigInteger}; + use $crate::num_bigint::{BigUint, BigInt}; + use $crate::num_integer::Integer; + use $crate::num_traits::{Signed, cast::ToPrimitive}; + + let limbs = <$field as PrimeField>::BigInt::NUM_LIMBS; + let modulus: BigUint = <$field>::MODULUS.into(); + let r = BigUint::from(2u8).modpow(&((limbs * 64) as u64).into(), &modulus); + let r2 = (&r * &r) % &modulus; + let inv = { + // We compute this as follows. + // First, MODULUS mod 2^64 is just the lower 64 bits of MODULUS. + // Hence MODULUS mod 2^64 = MODULUS.0[0] mod 2^64. + // + // Next, computing the inverse mod 2^64 involves exponentiating by + // the multiplicative group order, which is euler_totient(2^64) - 1. + // Now, euler_totient(2^64) = 1 << 63, and so + // euler_totient(2^64) - 1 = (1 << 63) - 1 = 1111111... (63 digits). + // We compute this powering via standard square and multiply. + let mut inv = 1u128; + let two_to_64 = 1u128 << 64; + for _ in 0..63 { + // Square + inv = inv.checked_mul(inv).unwrap() % two_to_64; + // Multiply + inv = inv.checked_mul(<$field>::MODULUS.0[0] as u128).unwrap() % &two_to_64; + }; + let mut inv = inv as i128; + let two_to_64 = two_to_64 as i128; + inv = (-inv) % two_to_64; + inv as u64 + }; + let group_order = 0b111111111111111111111111111111111111111111111111111111111111111u64; + let group_order_lower = ((group_order << 32) >> 32) as u32; // clear the upper 32 bits + let group_order_upper = ((group_order) >> 32) as u32; // drop the lower 32 bits + let modulus_lower_limb = <$field>::MODULUS.0[0]; + let modulus_lower_limb_to2_32 = modulus_lower_limb.wrapping_pow(u32::MAX).wrapping_mul(modulus_lower_limb); + let inv2 = modulus_lower_limb + .wrapping_pow(group_order_lower) + .wrapping_mul(modulus_lower_limb_to2_32.wrapping_pow(group_order_upper)) + .wrapping_neg(); + + assert_eq!(r, <$field>::R.into()); + assert_eq!(r2, <$field>::R2.into()); + assert_eq!(inv, <$field>::INV.into()); + assert_eq!(inv2, <$field>::INV); } + } +} - { - let mut serialized = vec![0; buf_size - 1]; - let mut cursor = Cursor::new(&mut serialized[..]); - a.serialize(&mut cursor).unwrap_err(); - - let mut cursor = Cursor::new(&serialized[..]); - F::deserialize(&mut cursor).unwrap_err(); +#[macro_export] +macro_rules! test_field { + ($mod_name: ident; $field: ty $(; $tail:tt)*) => { + mod $mod_name { + use super::*; + use ark_ff::{ + fields::{FftField, Field, LegendreSymbol, PrimeField}, + Fp, MontBackend, MontConfig, + }; + use ark_serialize::{buffer_bit_byte_size, Flags, SWFlags}; + use ark_std::{io::Cursor, rand::Rng, vec::Vec, test_rng, vec, Zero, One, UniformRand}; + const ITERATIONS: usize = 1000; + + $crate::__test_field!($field $(; $tail)*); } - } + }; } diff --git a/test-templates/src/groups.rs b/test-templates/src/groups.rs new file mode 100644 index 000000000..6735a1a0f --- /dev/null +++ b/test-templates/src/groups.rs @@ -0,0 +1,395 @@ +#[macro_export] +#[doc(hidden)] +macro_rules! __test_group { + ($group: ty) => { + type ScalarField = <$group as Group>::ScalarField; + fn test_add_properties() { + let mut rng = &mut ark_std::test_rng(); + let zero = <$group>::zero(); + for _ in 0..ITERATIONS { + let a = <$group>::rand(rng); + let b = <$group>::rand(rng); + let c = <$group>::rand(rng); + + // Associativity + assert_eq!((a + b) + c, a + (b + c)); + + // Commutativity + assert_eq!(a + b, b + a); + + // Identity + assert_eq!(zero + a, a); + assert_eq!(zero + b, b); + assert_eq!(zero + c, c); + assert_eq!(a + zero, a); + assert_eq!(b + zero, b); + assert_eq!(c + zero, c); + + // Negation + assert_eq!(-a + a, zero); + assert_eq!(-b + b, zero); + assert_eq!(-c + c, zero); + assert_eq!(-zero, zero); + + // Associativity and commutativity simultaneously + let t0 = (a + &b) + &c; // (a + b) + c + let t1 = (a + &c) + &b; // (a + c) + b + let t2 = (b + &c) + &a; // (b + c) + a + + assert_eq!(t0, t1); + assert_eq!(t1, t2); + + // Doubling + assert_eq!(a.double(), a + a); + assert_eq!(b.double(), b + b); + assert_eq!(c.double(), c + c); + assert_eq!(zero.double(), zero); + assert_eq!((-zero).double(), zero); + } + } + + #[test] + fn test_sub_properties() { + use ark_std::UniformRand; + let mut rng = test_rng(); + let zero = <$group>::zero(); + + for _ in 0..ITERATIONS{ + // Anti-commutativity + let a = <$group>::rand(&mut rng); + let b = <$group>::rand(&mut rng); + assert!(((a - b) + (b - a)).is_zero()); + + // Identity + assert_eq!(zero - a, -a); + assert_eq!(zero - b, -b); + + assert_eq!(a - zero, a); + assert_eq!(b - zero, b); + } + } + + #[test] + fn test_mul_properties() { + use ark_std::UniformRand; + let mut rng = test_rng(); + let zero = ScalarField::zero(); + let one = ScalarField::one(); + assert_eq!(one.inverse().unwrap(), one); + assert!(one.is_one()); + + for _ in 0..ITERATIONS { + // Associativity + let a = <$group>::rand(&mut rng); + let b = ScalarField::rand(&mut rng); + let c = ScalarField::rand(&mut rng); + assert_eq!((a * b) * c, a * (b * c)); + + // Identity + assert_eq!(a * one, a); + + assert_eq!(a * zero, <$group>::zero()); + + // Inverses + assert_eq!((a * b.inverse().unwrap()) * b, a); + + // Distributivity + assert_eq!(a * (b + c), a * b + a * c); + + // s ( a + b) using wNAF for several window values in [2,5] + for w in 2..=5 { + let context = WnafContext::new(w); + assert_eq!(a * b, context.mul(a, &b)); + + let table = context.table(a); + assert_eq!(a * b, context.mul_with_table(&table, &b).unwrap()); + + if w > 2 { + let bad_context = WnafContext::new(w - 1); + let bad_table = bad_context.table(a); + assert_eq!(context.mul_with_table(&bad_table, &b), None); + } + } + } + } + + #[test] + fn test_serialization() { + let buf_size = <$group>::zero().serialized_size(); + + let mut rng = ark_std::test_rng(); + + for _ in 0..ITERATIONS { + let a = <$group>::rand(&mut rng); + { + let mut serialized = vec![0; buf_size]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize(&mut cursor).unwrap(); + + let mut cursor = Cursor::new(&serialized[..]); + let b = <$group>::deserialize(&mut cursor).unwrap(); + assert_eq!(a, b); + } + + { + let a = <$group>::zero(); + let mut serialized = vec![0; buf_size]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize(&mut cursor).unwrap(); + let mut cursor = Cursor::new(&serialized[..]); + let b = <$group>::deserialize(&mut cursor).unwrap(); + assert_eq!(a, b); + } + + { + let a = <$group>::zero(); + let mut serialized = vec![0; buf_size - 1]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize(&mut cursor).unwrap_err(); + } + + { + let serialized = vec![0; buf_size - 1]; + let mut cursor = Cursor::new(&serialized[..]); + <$group>::deserialize(&mut cursor).unwrap_err(); + } + + { + let mut serialized = vec![0; a.uncompressed_size()]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize_uncompressed(&mut cursor).unwrap(); + + let mut cursor = Cursor::new(&serialized[..]); + let b = <$group>::deserialize_uncompressed(&mut cursor).unwrap(); + assert_eq!(a, b); + } + + { + let a = <$group>::zero(); + let mut serialized = vec![0; a.uncompressed_size()]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize_uncompressed(&mut cursor).unwrap(); + let mut cursor = Cursor::new(&serialized[..]); + let b = <$group>::deserialize_uncompressed(&mut cursor).unwrap(); + assert_eq!(a, b); + } + } + } + }; + ($group:ty; msm) => { + #[test] + fn test_var_base_msm() { + $crate::msm::test_var_base_msm::<$group>(); + } + + #[test] + fn test_chunked_pippenger() { + $crate::msm::test_chunked_pippenger::<$group>(); + } + + #[test] + fn test_hashmap_pippenger() { + $crate::msm::test_hashmap_pippenger::<$group>(); + } + }; + ($group:ty; curve) => { + $crate::__test_group!($group; msm); + type Affine = <$group as CurveGroup>::Affine; + type Config = <$group as CurveGroup>::Config; + type BaseField = <$group as CurveGroup>::BaseField; + + #[test] + fn test_affine_conversion() { + let mut rng = &mut ark_std::test_rng(); + + for _ in 0..ITERATIONS { + let g = <$group>::rand(&mut rng); + let g_affine = g.into_affine(); + let g_projective = g_affine.into_group(); + assert_eq!(g, g_projective); + } + + // Batch normalization + for _ in 0..10 { + let mut v = (0..ITERATIONS) + .map(|_| <$group>::rand(&mut rng).double()) + .collect::>(); + + use ark_std::rand::distributions::{Distribution, Uniform}; + let between = Uniform::from(0..ITERATIONS); + // Sprinkle in some normalized points + for _ in 0..5 { + v[between.sample(&mut rng)] = <$group>::zero(); + } + for _ in 0..5 { + let s = between.sample(&mut rng); + v[s] = v[s].into_affine().into_group(); + } + + let expected_v = v.iter().map(|v| v.into_affine()).collect::>(); + let actual_v = <$group>::normalize_batch(&v); + + assert_eq!(actual_v, expected_v); + } + } + + #[test] + pub fn test_from_random_bytes() { + let buf_size = Affine::identity().serialized_size(); + + let mut rng = ark_std::test_rng(); + + for _ in 0..ITERATIONS { + let a = <$group>::rand(&mut rng); + let mut a = a.into_affine(); + { + let mut serialized = vec![0; buf_size]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize(&mut cursor).unwrap(); + + let mut cursor = Cursor::new(&serialized[..]); + let p1 = Affine::deserialize(&mut cursor).unwrap(); + let p2 = Affine::from_random_bytes(&serialized).unwrap(); + assert_eq!(p1, p2); + } + } + } + + #[test] + fn test_cofactor_ops() { + let rng = &mut ark_std::test_rng(); + for _ in 0..ITERATIONS { + let a = Affine::rand(rng); + assert_eq!(a.mul_by_cofactor_to_group(), a.mul_bigint(&Config::COFACTOR)); + assert_eq!(a.mul_by_cofactor(), a.mul_bigint(&Config::COFACTOR)); + assert_eq!(a.mul_by_cofactor().mul_by_cofactor_inv(), a); + assert_eq!(a.mul_by_cofactor_inv().mul_by_cofactor(), a); + assert_eq!(a.mul_by_cofactor_inv(), a * Config::COFACTOR_INV); + + assert!(a.clear_cofactor().is_in_correct_subgroup_assuming_on_curve()); + } + } + + #[test] + fn test_mixed_addition() { + let rng = &mut ark_std::test_rng(); + for _ in 0..ITERATIONS { + let a = Affine::rand(rng); + let b = <$group>::rand(rng); + assert_eq!(a + b, a.into_group() + b); + assert_eq!(b + a, a.into_group() + b); + } + } + }; + ($group:ty; sw) => { + $crate::__test_group!($group; curve); + + #[test] + fn test_sw_properties() { + let mut rng = &mut ark_std::test_rng(); + + let generator = <$group>::generator().into_affine(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); + + let mut x = BaseField::zero(); + let mut i = 0; + loop { + // y^2 = x^3 + a * x + b + let rhs = x * x.square() + x * Config::COEFF_A + Config::COEFF_B; + + if let Some(y) = rhs.sqrt() { + let p = Affine::new_unchecked(x, if y < -y { y } else { -y }); + if !<<$group as CurveGroup>::Config as CurveConfig>::cofactor_is_one() { + assert!(!p.is_in_correct_subgroup_assuming_on_curve()); + } + + let g1 = p.mul_by_cofactor_to_group(); + if !g1.is_zero() { + let g1 = Affine::from(g1); + assert!(g1.is_in_correct_subgroup_assuming_on_curve()); + break; + } + } + + i += 1; + x += BaseField::one(); + } + + for _ in 0..ITERATIONS { + let f = BaseField::rand(rng); + assert_eq!(Config::mul_by_a(&f), f * Config::COEFF_A); + assert_eq!(Config::add_b(&f), f + Config::COEFF_B); + } + } + }; + ($group:ty; te) => { + $crate::__test_group!($group, curve); + + #[test] + fn test_te_properties() { + let mut rng = &mut ark_std::test_rng(); + + let generator = <$group>::generator(); + assert!(generator.is_on_curve()); + assert!(generator.is_in_correct_subgroup_assuming_on_curve()); + let mut y = BaseField::zero(); + let one = BaseField::one(); + let mut i = 0; + loop { + + let y2 = y.square(); + + let numerator = one - y2; + let denominator = Config::COEFF_A - (y2 * Config::COEFF_D); + + let candidate_point = denominator + .inverse() + .map(|denom| denom * &numerator) + .and_then(|x2| x2.sqrt()) + .map(|x| { + let negx = -x; + let x = if (x < negx) ^ greatest { x } else { negx }; + Affine::new_unchecked(x, y) + }); + if let Some(p) = candidate_point { + assert!(!p.is_in_correct_subgroup_assuming_on_curve()); + let g1 = p.mul_by_cofactor_to_projective(); + if !g1.is_zero() { + assert_eq!(i, $const); + let g1 = <$group>::Affine::from(g1); + + assert!(g1.is_in_correct_subgroup_assuming_on_curve()); + + assert_eq!(g1, $group::generator()); + break; + } + } + i += 1; + x += BaseField::one(); + } + + for _ in 0..ITERATIONS { + let f = BaseField::rand(rng); + assert_eq!(Config::mul_by_a(&f), f * Config::COEFF_A); + assert_eq!(Config::add_b(&f), f + Config::COEFF_B); + } + } + } +} + +#[macro_export] +macro_rules! test_group { + ($mod_name: ident; $group: ty $(; $tail:tt)*) => { + mod $mod_name { + use super::*; + use ark_ff::*; + use ark_ec::{Group, CurveGroup, ScalarMul, AffineRepr, CurveConfig, short_weierstrass::SWCurveConfig, twisted_edwards::TECurveConfig, scalar_mul::{*, wnaf::*}}; + use ark_serialize::*; + use ark_std::{io::Cursor, rand::Rng, vec::Vec, test_rng, vec, Zero, One, UniformRand}; + const ITERATIONS: usize = 500; + + $crate::__test_group!($group $(; $tail)*); + } + }; +} diff --git a/test-templates/src/lib.rs b/test-templates/src/lib.rs index 87d49b672..13fc45d85 100644 --- a/test-templates/src/lib.rs +++ b/test-templates/src/lib.rs @@ -1,606 +1,11 @@ -pub mod curves; +#[macro_use] +pub mod groups; +#[macro_use] pub mod fields; pub mod msm; +#[macro_use] +pub mod pairing; -#[macro_export] -macro_rules! generate_g1_test { - () => {}; - - (curve_tests; $($tail:tt)*) => { - #[test] - fn test_g1_projective_curve() { - curve_tests::(); - } - generate_g1_test!($($tail)*); - }; - - (sw_tests; $($tail:tt)*) => { - #[test] - fn test_g1_projective_sw() { - sw_tests::(); - } - generate_g1_test!($($tail)*); - }; - - - (edwards_tests; $($tail:tt)*) => { - #[test] - fn test_g1_projective_edwards() { - edwards_tests::(); - } - generate_g1_test!($($tail)*); - }; - - ($curve_name: ident; $($tail:tt)*) => { - #[test] - fn test_g1_affine_curve() { - test_var_base_msm::(); - ark_algebra_test_templates::msm::test_chunked_pippenger::(); - ark_algebra_test_templates::msm::test_hashmap_pippenger::(); - } - - #[test] - fn test_g1_generator() { - let generator = G1Affine::prime_subgroup_generator(); - assert!(generator.is_on_curve()); - assert!(generator.is_in_correct_subgroup_assuming_on_curve()); - } - - generate_g1_test!($($tail)*); - }; -} - -#[macro_export] -macro_rules! generate_g2_test { - () => {}; - - (curve_tests; $($tail:tt)*) => { - #[test] - fn test_g2_projective_curve() { - curve_tests::(); - } - generate_g2_test!($($tail)*); - }; - - (sw_tests; $($tail:tt)*) => { - #[test] - fn test_g2_projective_sw() { - sw_tests::(); - } - generate_g2_test!($($tail)*); - }; - - - (edwards_tests; $($tail:tt)*) => { - #[test] - fn test_g2_projective_edwards() { - edwards_tests::(); - } - generate_g2_test!($($tail)*); - }; - - ($curve_name: ident; $($tail:tt)*) => { - #[test] - fn test_g2_generator() { - let generator = G2Affine::prime_subgroup_generator(); - assert!(generator.is_on_curve()); - assert!(generator.is_in_correct_subgroup_assuming_on_curve()); - } - - generate_g2_test!($($tail)*); - }; -} - -#[macro_export] -macro_rules! generate_bilinearity_test { - () => {}; - - ($curve_name: ident, $field_name: ident) => { - #[test] - fn test_bilinearity() { - let mut rng = test_rng(); - let a: G1Projective = rng.gen(); - let b: G2Projective = rng.gen(); - let s: Fr = rng.gen(); - - let mut sa = a; - sa.mul_assign(s); - let mut sb = b; - sb.mul_assign(s); - - let ans1 = $curve_name::pairing(sa, b); - let ans2 = $curve_name::pairing(a, sb); - let ans3 = $curve_name::pairing(a, b).pow(s.into_bigint()); - - assert_eq!(ans1, ans2); - assert_eq!(ans2, ans3); - - assert_ne!(ans1, $field_name::one()); - assert_ne!(ans2, $field_name::one()); - assert_ne!(ans3, $field_name::one()); - - assert_eq!(ans1.pow(Fr::characteristic()), $field_name::one()); - assert_eq!(ans2.pow(Fr::characteristic()), $field_name::one()); - assert_eq!(ans3.pow(Fr::characteristic()), $field_name::one()); - } - }; -} - -#[macro_export] -macro_rules! generate_product_of_pairings_test { - () => {}; - - ($curve_name: ident) => { - #[test] - fn test_product_of_pairings() { - let rng = &mut test_rng(); - - let a = G1Projective::rand(rng).into_affine(); - let b = G2Projective::rand(rng).into_affine(); - let c = G1Projective::rand(rng).into_affine(); - let d = G2Projective::rand(rng).into_affine(); - let ans1 = $curve_name::pairing(a, b) * &$curve_name::pairing(c, d); - let ans2 = - $curve_name::product_of_pairings(&[(a.into(), b.into()), (c.into(), d.into())]); - assert_eq!(ans1, ans2); - } - }; -} - -#[macro_export] -macro_rules! generate_g1_generator_raw_test { - () => {}; - - ($curve_name: ident, $const: expr) => { - #[test] - fn test_g1_generator_raw() { - let mut x = Fq::zero(); - let mut i = 0; - loop { - // y^2 = x^3 + b - let mut rhs = x; - rhs.square_in_place(); - rhs.mul_assign(&x); - rhs.add_assign(&g1::Parameters::COEFF_B); - - if let Some(y) = rhs.sqrt() { - let p = G1Affine::new_unchecked(x, if y < -y { y } else { -y }); - assert!(!p.is_in_correct_subgroup_assuming_on_curve()); - - let g1 = p.mul_by_cofactor_to_projective(); - if !g1.is_zero() { - assert_eq!(i, $const); - let g1 = G1Affine::from(g1); - - assert!(g1.is_in_correct_subgroup_assuming_on_curve()); - - assert_eq!(g1, G1Affine::prime_subgroup_generator()); - break; - } - } - - i += 1; - x.add_assign(&Fq::one()); - } - } - }; -} - -#[macro_export] -macro_rules! generate_field_test { - () => { - pub(crate) const ITERATIONS: usize = 5; - }; - - (fq2; $($tail:tt)*) => { - #[test] - fn test_fq2() { - let mut rng = ark_std::test_rng(); - for _ in 0..ITERATIONS { - let a: Fq2 = UniformRand::rand(&mut rng); - let b: Fq2 = UniformRand::rand(&mut rng); - field_test(a, b); - sqrt_field_test(a); - } - frobenius_test::(Fq::characteristic(), 13); - } - - generate_field_test!($($tail)*); - }; - - (fq3; $($tail:tt)*) => { - #[test] - fn test_fq3() { - let mut rng = ark_std::test_rng(); - for _ in 0..ITERATIONS { - let a: Fq3 = UniformRand::rand(&mut rng); - let b: Fq3 = UniformRand::rand(&mut rng); - field_test(a, b); - sqrt_field_test(a); - } - frobenius_test::(Fq::characteristic(), 13); - } - - generate_field_test!($($tail)*); - }; - - (fq4; $($tail:tt)*) => { - #[test] - fn test_fq4() { - let mut rng = ark_std::test_rng(); - for _ in 0..ITERATIONS { - let g: Fq4 = UniformRand::rand(&mut rng); - let h: Fq4 = UniformRand::rand(&mut rng); - field_test(g, h); - } - frobenius_test::(Fq::characteristic(), 13); - } - - generate_field_test!($($tail)*); - }; - - // Fq6 which is a cubic extension of Fq2. - (fq6; $($tail:tt)*) => { - #[test] - fn test_fq6() { - let mut rng = ark_std::test_rng(); - for _ in 0..ITERATIONS { - let g: Fq6 = UniformRand::rand(&mut rng); - let h: Fq6 = UniformRand::rand(&mut rng); - field_test(g, h); - } - frobenius_test::(Fq::characteristic(), 13); - } - - #[test] - #[should_panic(expected = "not implemented")] - fn test_fq6_sqrt() { - let mut rng = ark_std::test_rng(); - let g: Fq6 = UniformRand::rand(&mut rng); - sqrt_field_test(g); - } - - generate_field_test!($($tail)*); - }; - - // Fq6 which is a quadratic extension of Fq3. - (fq6_2_on_3; $($tail:tt)*) => { - #[test] - fn test_fq6() { - let mut rng = ark_std::test_rng(); - for _ in 0..ITERATIONS { - let g: Fq6 = UniformRand::rand(&mut rng); - let h: Fq6 = UniformRand::rand(&mut rng); - field_test(g, h); - sqrt_field_test(g); - } - frobenius_test::(Fq::characteristic(), 13); - } - }; - - (fq12; $($tail:tt)*) => { - #[test] - fn test_fq12() { - let mut rng = test_rng(); - for _ in 0..ITERATIONS { - let g: Fq12 = rng.gen(); - let h: Fq12 = rng.gen(); - field_test(g, h); - } - frobenius_test::(Fq::characteristic(), 13); - } - - generate_field_test!($($tail)*); - }; - - ($curve_name: ident; $($tail:tt)*) => { - #[test] - fn test_fr() { - let mut rng = ark_std::test_rng(); - for _ in 0..ITERATIONS { - let a: Fr = UniformRand::rand(&mut rng); - let b: Fr = UniformRand::rand(&mut rng); - field_test(a, b); - primefield_test::(); - sqrt_field_test(b); - } - } - - #[test] - fn test_fq() { - let mut rng = ark_std::test_rng(); - for _ in 0..ITERATIONS { - let a: Fq = UniformRand::rand(&mut rng); - let b: Fq = UniformRand::rand(&mut rng); - field_test(a, b); - primefield_test::(); - sqrt_field_test(a); - } - } - - #[test] - fn test_fq_add_assign() { - // Test associativity - - let mut rng = test_rng(); - - for _ in 0..1000 { - // Generate a, b, c and ensure (a + b) + c == a + (b + c). - let a = Fq::rand(&mut rng); - let b = Fq::rand(&mut rng); - let c = Fq::rand(&mut rng); - - let mut tmp1 = a; - tmp1.add_assign(&b); - tmp1.add_assign(&c); - - let mut tmp2 = b; - tmp2.add_assign(&c); - tmp2.add_assign(&a); - - assert_eq!(tmp1, tmp2); - } - } - - #[test] - fn test_fq_sub_assign() { - let mut rng = test_rng(); - - for _ in 0..1000 { - // Ensure that (a - b) + (b - a) = 0. - let a = Fq::rand(&mut rng); - let b = Fq::rand(&mut rng); - - let mut tmp1 = a; - tmp1.sub_assign(&b); - - let mut tmp2 = b; - tmp2.sub_assign(&a); - - tmp1.add_assign(&tmp2); - assert!(tmp1.is_zero()); - } - } - - #[test] - fn test_fq_mul_assign() { - let mut rng = test_rng(); - - for _ in 0..1000000 { - // Ensure that (a * b) * c = a * (b * c) - let a = Fq::rand(&mut rng); - let b = Fq::rand(&mut rng); - let c = Fq::rand(&mut rng); - - let mut tmp1 = a; - tmp1.mul_assign(&b); - tmp1.mul_assign(&c); - - let mut tmp2 = b; - tmp2.mul_assign(&c); - tmp2.mul_assign(&a); - - assert_eq!(tmp1, tmp2); - } - - for _ in 0..1000000 { - // Ensure that r * (a + b + c) = r*a + r*b + r*c - - let r = Fq::rand(&mut rng); - let mut a = Fq::rand(&mut rng); - let mut b = Fq::rand(&mut rng); - let mut c = Fq::rand(&mut rng); - - let mut tmp1 = a; - tmp1.add_assign(&b); - tmp1.add_assign(&c); - tmp1.mul_assign(&r); - - a.mul_assign(&r); - b.mul_assign(&r); - c.mul_assign(&r); - - a.add_assign(&b); - a.add_assign(&c); - - assert_eq!(tmp1, a); - } - } - - #[test] - fn test_fq_squaring() { - let mut rng = test_rng(); - - for _ in 0..1000000 { - // Ensure that (a * a) = a^2 - let a = Fq::rand(&mut rng); - - let mut tmp = a; - tmp.square_in_place(); - - let mut tmp2 = a; - tmp2.mul_assign(&a); - - assert_eq!(tmp, tmp2); - } - } - - #[test] - fn test_fq_inverse() { - assert!(Fq::zero().inverse().is_none()); - - let mut rng = test_rng(); - - let one = Fq::one(); - - for _ in 0..1000 { - // Ensure that a * a^-1 = 1 - let mut a = Fq::rand(&mut rng); - let ainv = a.inverse().unwrap(); - a.mul_assign(&ainv); - assert_eq!(a, one); - } - } - - #[test] - fn test_fq_double_in_place() { - let mut rng = test_rng(); - - for _ in 0..1000 { - // Ensure doubling a is equivalent to adding a to itself. - let mut a = Fq::rand(&mut rng); - let mut b = a; - b.add_assign(&a); - a.double_in_place(); - assert_eq!(a, b); - } - } - - #[test] - fn test_fq_negate() { - { - let a = -Fq::zero(); - - assert!(a.is_zero()); - } - - let mut rng = test_rng(); - - for _ in 0..1000 { - // Ensure (a - (-a)) = 0. - let mut a = Fq::rand(&mut rng); - let b = -a; - a.add_assign(&b); - - assert!(a.is_zero()); - } - } - - #[test] - fn test_fq_pow() { - let mut rng = test_rng(); - - for i in 0..1000 { - // Exponentiate by various small numbers and ensure it consists with repeated - // multiplication. - let a = Fq::rand(&mut rng); - let target = a.pow(&[i]); - let mut c = Fq::one(); - for _ in 0..i { - c.mul_assign(&a); - } - assert_eq!(c, target); - } - - for _ in 0..1000 { - // Exponentiating by the modulus should have no effect in a prime field. - let a = Fq::rand(&mut rng); - - assert_eq!(a, a.pow(Fq::characteristic())); - } - } - - #[test] - fn test_fq_sqrt() { - let mut rng = test_rng(); - - assert_eq!(Fq::zero().sqrt().unwrap(), Fq::zero()); - - for _ in 0..1000 { - // Ensure sqrt(a^2) = a or -a - let a = Fq::rand(&mut rng); - let nega = -a; - let mut b = a; - b.square_in_place(); - - let b = b.sqrt().unwrap(); - - assert!(a == b || nega == b); - } - - for _ in 0..1000 { - // Ensure sqrt(a)^2 = a for random a - let a = Fq::rand(&mut rng); - - if let Some(mut tmp) = a.sqrt() { - tmp.square_in_place(); - - assert_eq!(a, tmp); - } - } - } - - generate_field_test!($($tail)*); - }; - (mont($fq_num_limbs:expr, $fr_num_limbs:expr); $($tail:tt)*) => { - #[test] - fn test_fq_mont() { - montgomery_primefield_test::(); - } - - #[test] - fn test_fr_mont() { - montgomery_primefield_test::(); - } - - generate_field_test!($($tail)*); - } -} - -#[macro_export] -macro_rules! generate_field_serialization_test { - () => {}; - - (fq2; $($tail:tt)*) => { - #[test] - fn test_fq2_serialization() { - let byte_size = Fq2::zero().serialized_size(); - field_serialization_test::(byte_size); - } - - generate_field_serialization_test!($($tail)*); - }; - - (fq6; $($tail:tt)*) => { - #[test] - fn test_fq6_serialization() { - let byte_size = Fq6::zero().serialized_size(); - field_serialization_test::(byte_size); - } - - generate_field_serialization_test!($($tail)*); - }; - - (fq12; $($tail:tt)*) => { - #[test] - fn test_fq12_serialization() { - let byte_size = Fq12::zero().serialized_size(); - field_serialization_test::(byte_size); - } - - generate_field_serialization_test!($($tail)*); - }; - - ($curve_name: ident; $($tail:tt)*) => { - #[test] - fn test_field_serialization() { - let mut rng = test_rng(); - for _ in 0..ITERATIONS { - let a: Fr = rng.gen(); - let b: Fq = rng.gen(); - - let byte_size = a.serialized_size(); - let (_, buffer_size) = buffer_bit_byte_size(Fr::MODULUS_BIT_SIZE as usize); - assert_eq!(byte_size, buffer_size); - field_serialization_test::(byte_size); - - let byte_size = b.serialized_size(); - let (_, buffer_size) = buffer_bit_byte_size(Fq::MODULUS_BIT_SIZE as usize); - assert_eq!(byte_size, buffer_size); - field_serialization_test::(byte_size); - } - } - - generate_field_serialization_test!($($tail)*); - }; -} +pub use num_bigint; +pub use num_integer; +pub use num_traits; diff --git a/test-templates/src/msm.rs b/test-templates/src/msm.rs index 246b26334..4bbc780c0 100644 --- a/test-templates/src/msm.rs +++ b/test-templates/src/msm.rs @@ -1,23 +1,19 @@ use ark_ec::{ - msm::{ChunkedPippenger, HashMapPippenger, VariableBaseMSM}, - AffineCurve, ProjectiveCurve, + scalar_mul::variable_base::{ChunkedPippenger, HashMapPippenger, VariableBaseMSM}, + ScalarMul, }; -use ark_ff::{PrimeField, UniformRand, Zero}; +use ark_ff::{PrimeField, UniformRand}; -fn naive_var_base_msm(bases: &[G], scalars: &[G::ScalarField]) -> G::Projective { - let mut acc = G::Projective::zero(); +fn naive_var_base_msm(bases: &[G::MulBase], scalars: &[G::ScalarField]) -> G { + let mut acc = G::zero(); for (base, scalar) in bases.iter().zip(scalars.iter()) { - acc += base.mul_bigint(&scalar.into_bigint()); + acc += *base * scalar; } acc } -pub fn test_var_base_msm() -where - G: AffineCurve, - G::Projective: VariableBaseMSM, -{ +pub fn test_var_base_msm() { const SAMPLES: usize = 1 << 10; let mut rng = ark_std::test_rng(); @@ -25,23 +21,16 @@ where let v = (0..SAMPLES) .map(|_| G::ScalarField::rand(&mut rng)) .collect::>(); - let g = (0..SAMPLES) - .map(|_| G::Projective::rand(&mut rng)) - .collect::>(); - let g = ::batch_normalization_into_affine(&g); + let g = (0..SAMPLES).map(|_| G::rand(&mut rng)).collect::>(); + let g = G::batch_convert_to_mul_base(&g); - let naive = naive_var_base_msm(g.as_slice(), v.as_slice()); - let fast = ::msm(g.as_slice(), v.as_slice()); + let naive = naive_var_base_msm::(g.as_slice(), v.as_slice()); + let fast = G::msm(g.as_slice(), v.as_slice()); - // assert!(ark_std::panic::catch_unwind(|| ::msm(&g[.. SAMPLES], &v[.. SAMPLES-1])).is_err()); - assert_eq!(naive.into_affine(), fast.into_affine()); + assert_eq!(naive, fast); } -pub fn test_chunked_pippenger() -where - G: AffineCurve, - G::Projective: VariableBaseMSM, -{ +pub fn test_chunked_pippenger() { const SAMPLES: usize = 1 << 10; let mut rng = ark_std::test_rng(); @@ -49,26 +38,20 @@ where let v = (0..SAMPLES) .map(|_| G::ScalarField::rand(&mut rng).into_bigint()) .collect::>(); - let g = (0..SAMPLES) - .map(|_| G::Projective::rand(&mut rng)) - .collect::>(); - let g = ::batch_normalization_into_affine(&g); + let g = (0..SAMPLES).map(|_| G::rand(&mut rng)).collect::>(); + let g = G::batch_convert_to_mul_base(&g); - let arkworks = ::msm_bigint(g.as_slice(), v.as_slice()); + let arkworks = G::msm_bigint(g.as_slice(), v.as_slice()); let mut p = ChunkedPippenger::::new(1 << 20); for (s, g) in v.iter().zip(g) { p.add(g, s); } let mine = p.finalize(); - assert_eq!(arkworks.into_affine(), mine.into_affine()); + assert_eq!(arkworks, mine); } -pub fn test_hashmap_pippenger() -where - G: AffineCurve, - G::Projective: VariableBaseMSM, -{ +pub fn test_hashmap_pippenger() { const SAMPLES: usize = 1 << 10; let mut rng = ark_std::test_rng(); @@ -81,17 +64,15 @@ where x.into_bigint() }) .collect::>(); - let g = (0..SAMPLES) - .map(|_| G::Projective::rand(&mut rng)) - .collect::>(); - let g = ::batch_normalization_into_affine(&g); + let g = (0..SAMPLES).map(|_| G::rand(&mut rng)).collect::>(); + let g = G::batch_convert_to_mul_base(&g); - let arkworks = ::msm_bigint(g.as_slice(), v.as_slice()); + let arkworks = G::msm_bigint(g.as_slice(), v.as_slice()); let mut p = HashMapPippenger::::new(1 << 20); for (s, g) in v_scal.iter().zip(g) { p.add(g, s); } let mine = p.finalize(); - assert_eq!(arkworks.into_affine(), mine.into_affine()); + assert_eq!(arkworks, mine); } diff --git a/test-templates/src/pairing.rs b/test-templates/src/pairing.rs new file mode 100644 index 000000000..8386969ca --- /dev/null +++ b/test-templates/src/pairing.rs @@ -0,0 +1,54 @@ +#[macro_export] +macro_rules! test_pairing { + ($mod_name: ident; $Pairing: ty) => { + mod $mod_name { + pub const ITERATIONS: usize = 100; + use ark_ec::{pairing::*, CurveGroup, Group}; + use ark_ff::{Field, PrimeField}; + use ark_std::{test_rng, One, UniformRand, Zero}; + #[test] + fn test_bilinearity() { + for _ in 0..100 { + let mut rng = test_rng(); + let a: <$Pairing as Pairing>::G1 = UniformRand::rand(&mut rng); + let b: <$Pairing as Pairing>::G2 = UniformRand::rand(&mut rng); + let s: <$Pairing as Pairing>::ScalarField = UniformRand::rand(&mut rng); + + let sa = a * s; + let sb = b * s; + + let ans1 = <$Pairing>::pairing(sa, b); + let ans2 = <$Pairing>::pairing(a, sb); + let ans3 = <$Pairing>::pairing(a, b) * s; + + assert_eq!(ans1, ans2); + assert_eq!(ans2, ans3); + + assert_ne!(ans1, PairingOutput::zero()); + assert_ne!(ans2, PairingOutput::zero()); + assert_ne!(ans3, PairingOutput::zero()); + let group_order = <<$Pairing as Pairing>::ScalarField>::characteristic(); + + assert_eq!(ans1.mul_bigint(group_order), PairingOutput::zero()); + assert_eq!(ans2.mul_bigint(group_order), PairingOutput::zero()); + assert_eq!(ans3.mul_bigint(group_order), PairingOutput::zero()); + } + } + + #[test] + fn test_multi_pairing() { + for _ in 0..ITERATIONS { + let rng = &mut test_rng(); + + let a = <$Pairing as Pairing>::G1::rand(rng).into_affine(); + let b = <$Pairing as Pairing>::G2::rand(rng).into_affine(); + let c = <$Pairing as Pairing>::G1::rand(rng).into_affine(); + let d = <$Pairing as Pairing>::G2::rand(rng).into_affine(); + let ans1 = <$Pairing>::pairing(a, b) + &<$Pairing>::pairing(c, d); + let ans2 = <$Pairing>::multi_pairing(&[a, c], &[b, d]); + assert_eq!(ans1, ans2); + } + } + } + }; +}