From 401a2f0a3a4941319be1751b8e44a26d50622009 Mon Sep 17 00:00:00 2001 From: gpestana Date: Thu, 9 Feb 2023 13:36:06 -0300 Subject: [PATCH 01/24] Implements the approval voting methods in sp_npos_elections --- .../npos-elections/src/approval_voting.rs | 90 +++++++++++++++++++ primitives/npos-elections/src/lib.rs | 4 + primitives/npos-elections/src/phragmen.rs | 2 +- primitives/npos-elections/src/tests.rs | 33 ++++++- 4 files changed, 126 insertions(+), 3 deletions(-) create mode 100644 primitives/npos-elections/src/approval_voting.rs diff --git a/primitives/npos-elections/src/approval_voting.rs b/primitives/npos-elections/src/approval_voting.rs new file mode 100644 index 0000000000000..00dab169f7612 --- /dev/null +++ b/primitives/npos-elections/src/approval_voting.rs @@ -0,0 +1,90 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2023 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of the approval voting election method. +//! +//! This method allows voters to select many candidates and backing each of them with the same +//! vote weight. The candidates with the most backing are the election winners. + +use crate::{setup_inputs, ElectionResult, IdentifierT, PerThing128, VoteWeight}; +use sp_arithmetic::{traits::{Bounded, Zero}, Rounding, helpers_128bit::multiply_by_rational_with_rounding}; +use sp_std::cmp::Reverse; + +/// Execute an approvals voting election scheme. The return type is a list of winners and a weight +/// distribution vector of all voters who contribute to the winners. +/// +/// - Returning winners are sorted based on desirability. Voters are unsorted. +/// - The returning winners are zipped with their final backing stake. Yet, to get the exact final +/// weight distribution from the winner's point of view, one needs to build a support map. See +/// [`crate::SupportMap`] for more info. Note that this backing stake is computed in +/// ExtendedBalance and may be slightly different that what will be computed from the support map, +/// due to accuracy loss. +/// +/// This can only fail of the normalization fails. This can happen if for any of the resulting +/// assignments, `assignment.distribution.map(|p| p.deconstruct()).sum()` fails to fit inside +/// `UpperOf

`. A user of this crate may statically assert that this can never happen and safely +/// `expect` this to return `Ok`. +pub fn approval_voting( + to_elect: usize, + candidates: Vec, + voters: Vec<(AccountId, VoteWeight, impl IntoIterator)>, +) -> Result, crate::Error> { + let to_elect = to_elect.min(candidates.len()); + + let (mut candidates, mut voters) = setup_inputs(candidates, voters); + + candidates.sort_by_key(|c| Reverse(c.borrow().approval_stake)); + + let mut winners_count = 0; + let winners = candidates + .into_iter() + .take_while(|_| { + winners_count += 1; + winners_count <= to_elect + }) + .map(|w| { + w.borrow_mut().elected = true; + w + }) + .map(|w_ptr| (w_ptr.borrow().who.clone(), w_ptr.borrow().approval_stake)) + .collect(); + + for voter in &mut voters { + for edge in &mut voter.edges { + if edge.candidate.borrow().elected { + edge.weight = multiply_by_rational_with_rounding( + voter.budget, + edge.load.n(), + voter.load.n(), + Rounding::Down, + ).unwrap_or(Bounded::max_value()); + } else { + edge.weight = Zero::zero() + } + } + } + + let mut assignments = voters + .into_iter() + .filter_map(|v| v.into_assignment()) + .collect::>(); + let _ = assignments + .iter_mut() + .try_for_each(|a| a.try_normalize().map_err(crate::Error::ArithmeticError))?; + + Ok(ElectionResult { winners, assignments }) +} diff --git a/primitives/npos-elections/src/lib.rs b/primitives/npos-elections/src/lib.rs index d0c9ed18caddc..5fbbd277cae06 100644 --- a/primitives/npos-elections/src/lib.rs +++ b/primitives/npos-elections/src/lib.rs @@ -24,6 +24,8 @@ //! - [`balance`](balancing::balance): Implements the star balancing algorithm. This iterative //! process can push a solution toward being more "balanced", which in turn can increase its //! score. +//! - [`approval_voting`](approval_voting::approval_voting): Implements an approval voting electoral +//! system where voters can back multiple candidates with the same stake. //! //! ### Terminology //! @@ -89,6 +91,7 @@ mod mock; #[cfg(test)] mod tests; +pub mod approval_voting; mod assignments; pub mod balancing; pub mod helpers; @@ -99,6 +102,7 @@ pub mod pjr; pub mod reduce; pub mod traits; +pub use approval_voting::*; pub use assignments::{Assignment, StakedAssignment}; pub use balancing::*; pub use helpers::*; diff --git a/primitives/npos-elections/src/phragmen.rs b/primitives/npos-elections/src/phragmen.rs index ca32780ed84b4..99c6522bdde77 100644 --- a/primitives/npos-elections/src/phragmen.rs +++ b/primitives/npos-elections/src/phragmen.rs @@ -57,7 +57,7 @@ const DEN: ExtendedBalance = ExtendedBalance::max_value(); /// - The returning weight distribution is _normalized_, meaning that it is guaranteed that the sum /// of the ratios in each voter's distribution sums up to exactly `P::one()`. /// -/// This can only fail of the normalization fails. This can happen if for any of the resulting +/// This can only fail if the normalization fails. This can happen if for any of the resulting /// assignments, `assignment.distribution.map(|p| p.deconstruct()).sum()` fails to fit inside /// `UpperOf

`. A user of this crate may statically assert that this can never happen and safely /// `expect` this to return `Ok`. diff --git a/primitives/npos-elections/src/tests.rs b/primitives/npos-elections/src/tests.rs index 6f2e4fca77115..fca201de1e751 100644 --- a/primitives/npos-elections/src/tests.rs +++ b/primitives/npos-elections/src/tests.rs @@ -18,12 +18,41 @@ //! Tests for npos-elections. use crate::{ - balancing, helpers::*, mock::*, seq_phragmen, seq_phragmen_core, setup_inputs, to_support_map, - Assignment, BalancingConfig, ElectionResult, ExtendedBalance, StakedAssignment, Support, Voter, + approval_voting::*, balancing, helpers::*, mock::*, seq_phragmen, seq_phragmen_core, + setup_inputs, to_support_map, Assignment, BalancingConfig, ElectionResult, ExtendedBalance, + StakedAssignment, Support, Voter, }; use sp_arithmetic::{PerU16, Perbill, Percent, Permill}; use substrate_test_utils::assert_eq_uvec; +#[test] +fn approval_voting_works() { + let candidates = vec![1, 2, 3, 4]; + let voters = vec![ + (10, vec![1, 2]), + (20, vec![1, 2]), + (30, vec![1, 2, 3]), + (40, vec![4]), + ]; + let stake_of = create_stake_of(&[(10, 10), (20, 20), (30, 30), (40, 40)]); + + let voters = voters.iter().map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())).collect::>(); + + let ElectionResult::<_, Perbill> { winners, assignments } = + approval_voting(3, candidates, voters).unwrap(); + + assert_eq_uvec!(winners, vec![(1, 60), (2, 60), (4, 40)]); + assert_eq_uvec!( + assignments, + vec![ + Assignment { who: 10u64, distribution: vec![(1, Perbill::from_percent(50)), (2, Perbill::from_percent(50))] }, + Assignment { who: 20u64, distribution: vec![(1, Perbill::from_percent(50)), (2, Perbill::from_percent(50))] }, + Assignment { who: 30u64, distribution: vec![(1, Perbill::from_percent(50)), (2, Perbill::from_percent(50))] }, + Assignment { who: 40u64, distribution: vec![(4, Perbill::from_percent(100))] }, + ] + ); +} + #[test] fn float_phragmen_poc_works() { let candidates = vec![1, 2, 3]; From 5e5d0ff57a0ddf5386bfa124a3bdb03e2e7cceba Mon Sep 17 00:00:00 2001 From: gpestana Date: Sun, 12 Feb 2023 17:58:02 -0300 Subject: [PATCH 02/24] Replaces phragmen elections in pallet-elections tests for approval voting --- .../benchmarking/src/lib.rs | 15 +++++++++++- frame/election-provider-support/src/lib.rs | 24 +++++++++++++++++++ .../election-provider-support/src/weights.rs | 8 +++++++ frame/elections/src/lib.rs | 6 ++--- .../npos-elections/src/approval_voting.rs | 2 +- 5 files changed, 50 insertions(+), 5 deletions(-) diff --git a/frame/election-provider-support/benchmarking/src/lib.rs b/frame/election-provider-support/benchmarking/src/lib.rs index 5323513da98d5..7f27cf7521032 100644 --- a/frame/election-provider-support/benchmarking/src/lib.rs +++ b/frame/election-provider-support/benchmarking/src/lib.rs @@ -23,7 +23,7 @@ use codec::Decode; use frame_benchmarking::v1::{benchmarks, Vec}; -use frame_election_provider_support::{NposSolver, PhragMMS, SequentialPhragmen}; +use frame_election_provider_support::{ApprovalVoting, NposSolver, PhragMMS, SequentialPhragmen}; pub struct Pallet(frame_system::Pallet); pub trait Config: frame_system::Config {} @@ -88,4 +88,17 @@ benchmarks! { ::solve(d as usize, targets, voters).is_ok() ); } + + approval_voting { + let v in (VOTERS[0]) .. VOTERS[1]; + let t in (TARGETS[0]) .. TARGETS[1]; + let d in (VOTES_PER_VOTER[0]) .. VOTES_PER_VOTER[1]; + + let (voters, targets) = set_up_voters_targets::(v, t, d as usize); + }: { + assert!( + ApprovalVoting:: + ::solve(d as usize, targets, voters).is_ok() + ); + } } diff --git a/frame/election-provider-support/src/lib.rs b/frame/election-provider-support/src/lib.rs index 9d5d6c018e5e1..5c86cf94d0f0f 100644 --- a/frame/election-provider-support/src/lib.rs +++ b/frame/election-provider-support/src/lib.rs @@ -659,6 +659,30 @@ impl( + sp_std::marker::PhantomData<(AccountId, Accuracy)> +); + +impl NposSolver for ApprovalVoting +{ + type AccountId = AccountId; + type Accuracy = Accuracy; + type Error = sp_npos_elections::Error; + fn solve( + winners: usize, + targets: Vec, + voters: Vec<(Self::AccountId, VoteWeight, impl IntoIterator)>, + ) -> Result, Self::Error> { + sp_npos_elections::approval_voting(winners, targets, voters) + } + + fn weight(voters: u32, targets: u32, vote_degree: u32) -> Weight { + T::approval_voting(voters, targets, vote_degree) + } +} + /// A voter, at the level of abstraction of this crate. pub type Voter = (AccountId, VoteWeight, BoundedVec); diff --git a/frame/election-provider-support/src/weights.rs b/frame/election-provider-support/src/weights.rs index 44075ba871228..26e637310ecb0 100644 --- a/frame/election-provider-support/src/weights.rs +++ b/frame/election-provider-support/src/weights.rs @@ -47,6 +47,7 @@ use sp_std::marker::PhantomData; pub trait WeightInfo { fn phragmen(v: u32, t: u32, d: u32, ) -> Weight; fn phragmms(v: u32, t: u32, d: u32, ) -> Weight; + fn approval_voting(v: u32, t: u32, d: u32, ) -> Weight; } /// Weights for pallet_election_provider_support_benchmarking using the Substrate node and recommended hardware. @@ -70,6 +71,10 @@ impl WeightInfo for SubstrateWeight { // Standard Error: 6_649_000 .saturating_add(Weight::from_ref_time(1_711_424_000 as u64).saturating_mul(d as u64)) } + + fn approval_voting(v: u32, t: u32, d: u32, ) -> Weight { + Weight::zero() + } } // For backwards compatibility and tests @@ -92,4 +97,7 @@ impl WeightInfo for () { // Standard Error: 6_649_000 .saturating_add(Weight::from_ref_time(1_711_424_000 as u64).saturating_mul(d as u64)) } + fn approval_voting(v: u32, t: u32, d: u32, ) -> Weight { + Weight::zero() + } } diff --git a/frame/elections/src/lib.rs b/frame/elections/src/lib.rs index d9713988433f7..41968019f4c6d 100644 --- a/frame/elections/src/lib.rs +++ b/frame/elections/src/lib.rs @@ -1286,7 +1286,7 @@ impl ContainsLengthBound for Pallet { mod tests { use super::*; use crate as elections; - use frame_election_provider_support::{weights::SubstrateWeight, SequentialPhragmen}; + use frame_election_provider_support::{weights::SubstrateWeight, ApprovalVoting}; use frame_support::{ assert_noop, assert_ok, dispatch::DispatchResultWithPostInfo, @@ -1398,7 +1398,7 @@ mod tests { parameter_types! { pub const ElectionsPalletId: LockIdentifier = *b"phrelect"; - pub const MaxVoters: u32 = 512; + pub const MaxVoters: u32 = 128; pub const MaxCandidates: u32 = 64; } @@ -1420,7 +1420,7 @@ mod tests { type WeightInfo = (); type MaxVoters = MaxVoters; type MaxCandidates = MaxCandidates; - type ElectionSolver = SequentialPhragmen; + type ElectionSolver = ApprovalVoting; type SolverWeightInfo = SubstrateWeight; type MaxVotesPerVoter = ConstU32<16>; } diff --git a/primitives/npos-elections/src/approval_voting.rs b/primitives/npos-elections/src/approval_voting.rs index 00dab169f7612..3a23c7c500853 100644 --- a/primitives/npos-elections/src/approval_voting.rs +++ b/primitives/npos-elections/src/approval_voting.rs @@ -22,7 +22,7 @@ use crate::{setup_inputs, ElectionResult, IdentifierT, PerThing128, VoteWeight}; use sp_arithmetic::{traits::{Bounded, Zero}, Rounding, helpers_128bit::multiply_by_rational_with_rounding}; -use sp_std::cmp::Reverse; +use sp_std::{vec::Vec, cmp::Reverse}; /// Execute an approvals voting election scheme. The return type is a list of winners and a weight /// distribution vector of all voters who contribute to the winners. From 57822160a4f73a625874ba385d503d420e0bc7bf Mon Sep 17 00:00:00 2001 From: gpestana Date: Sun, 12 Feb 2023 18:50:31 -0300 Subject: [PATCH 03/24] nits for checks --- frame/election-provider-support/src/weights.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/election-provider-support/src/weights.rs b/frame/election-provider-support/src/weights.rs index 26e637310ecb0..ad26debf382a1 100644 --- a/frame/election-provider-support/src/weights.rs +++ b/frame/election-provider-support/src/weights.rs @@ -72,7 +72,7 @@ impl WeightInfo for SubstrateWeight { .saturating_add(Weight::from_ref_time(1_711_424_000 as u64).saturating_mul(d as u64)) } - fn approval_voting(v: u32, t: u32, d: u32, ) -> Weight { + fn approval_voting(_v: u32, _t: u32, _d: u32, ) -> Weight { Weight::zero() } } @@ -97,7 +97,7 @@ impl WeightInfo for () { // Standard Error: 6_649_000 .saturating_add(Weight::from_ref_time(1_711_424_000 as u64).saturating_mul(d as u64)) } - fn approval_voting(v: u32, t: u32, d: u32, ) -> Weight { + fn approval_voting(_v: u32, _t: u32, _d: u32, ) -> Weight { Weight::zero() } } From a7643af6dad59cdedafc2fba78e3be9fa31c9be4 Mon Sep 17 00:00:00 2001 From: gpestana Date: Sun, 12 Feb 2023 18:55:40 -0300 Subject: [PATCH 04/24] rfmt --- frame/election-provider-support/src/lib.rs | 35 ++++++------ .../npos-elections/src/approval_voting.rs | 53 ++++++++++--------- primitives/npos-elections/src/tests.rs | 29 ++++++---- 3 files changed, 63 insertions(+), 54 deletions(-) diff --git a/frame/election-provider-support/src/lib.rs b/frame/election-provider-support/src/lib.rs index 5c86cf94d0f0f..a0f4dfb974c8d 100644 --- a/frame/election-provider-support/src/lib.rs +++ b/frame/election-provider-support/src/lib.rs @@ -661,26 +661,25 @@ impl( - sp_std::marker::PhantomData<(AccountId, Accuracy)> -); +pub struct ApprovalVoting(sp_std::marker::PhantomData<(AccountId, Accuracy)>); -impl NposSolver for ApprovalVoting +impl NposSolver + for ApprovalVoting { - type AccountId = AccountId; - type Accuracy = Accuracy; - type Error = sp_npos_elections::Error; - fn solve( - winners: usize, - targets: Vec, - voters: Vec<(Self::AccountId, VoteWeight, impl IntoIterator)>, - ) -> Result, Self::Error> { - sp_npos_elections::approval_voting(winners, targets, voters) - } - - fn weight(voters: u32, targets: u32, vote_degree: u32) -> Weight { - T::approval_voting(voters, targets, vote_degree) - } + type AccountId = AccountId; + type Accuracy = Accuracy; + type Error = sp_npos_elections::Error; + fn solve( + winners: usize, + targets: Vec, + voters: Vec<(Self::AccountId, VoteWeight, impl IntoIterator)>, + ) -> Result, Self::Error> { + sp_npos_elections::approval_voting(winners, targets, voters) + } + + fn weight(voters: u32, targets: u32, vote_degree: u32) -> Weight { + T::approval_voting(voters, targets, vote_degree) + } } /// A voter, at the level of abstraction of this crate. diff --git a/primitives/npos-elections/src/approval_voting.rs b/primitives/npos-elections/src/approval_voting.rs index 3a23c7c500853..da4b0992bee4b 100644 --- a/primitives/npos-elections/src/approval_voting.rs +++ b/primitives/npos-elections/src/approval_voting.rs @@ -21,8 +21,12 @@ //! vote weight. The candidates with the most backing are the election winners. use crate::{setup_inputs, ElectionResult, IdentifierT, PerThing128, VoteWeight}; -use sp_arithmetic::{traits::{Bounded, Zero}, Rounding, helpers_128bit::multiply_by_rational_with_rounding}; -use sp_std::{vec::Vec, cmp::Reverse}; +use sp_arithmetic::{ + helpers_128bit::multiply_by_rational_with_rounding, + traits::{Bounded, Zero}, + Rounding, +}; +use sp_std::{cmp::Reverse, vec::Vec}; /// Execute an approvals voting election scheme. The return type is a list of winners and a weight /// distribution vector of all voters who contribute to the winners. @@ -56,33 +60,32 @@ pub fn approval_voting( winners_count += 1; winners_count <= to_elect }) - .map(|w| { - w.borrow_mut().elected = true; - w - }) + .map(|w| { + w.borrow_mut().elected = true; + w + }) .map(|w_ptr| (w_ptr.borrow().who.clone(), w_ptr.borrow().approval_stake)) .collect(); - for voter in &mut voters { - for edge in &mut voter.edges { - if edge.candidate.borrow().elected { - edge.weight = multiply_by_rational_with_rounding( - voter.budget, - edge.load.n(), - voter.load.n(), - Rounding::Down, - ).unwrap_or(Bounded::max_value()); - } else { - edge.weight = Zero::zero() - } - } - } + for voter in &mut voters { + for edge in &mut voter.edges { + if edge.candidate.borrow().elected { + edge.weight = multiply_by_rational_with_rounding( + voter.budget, + edge.load.n(), + voter.load.n(), + Rounding::Down, + ) + .unwrap_or(Bounded::max_value()); + } else { + edge.weight = Zero::zero() + } + } + } - let mut assignments = voters - .into_iter() - .filter_map(|v| v.into_assignment()) - .collect::>(); - let _ = assignments + let mut assignments = + voters.into_iter().filter_map(|v| v.into_assignment()).collect::>(); + let _ = assignments .iter_mut() .try_for_each(|a| a.try_normalize().map_err(crate::Error::ArithmeticError))?; diff --git a/primitives/npos-elections/src/tests.rs b/primitives/npos-elections/src/tests.rs index fca201de1e751..81b065fa5a75a 100644 --- a/primitives/npos-elections/src/tests.rs +++ b/primitives/npos-elections/src/tests.rs @@ -28,15 +28,13 @@ use substrate_test_utils::assert_eq_uvec; #[test] fn approval_voting_works() { let candidates = vec![1, 2, 3, 4]; - let voters = vec![ - (10, vec![1, 2]), - (20, vec![1, 2]), - (30, vec![1, 2, 3]), - (40, vec![4]), - ]; - let stake_of = create_stake_of(&[(10, 10), (20, 20), (30, 30), (40, 40)]); + let voters = vec![(10, vec![1, 2]), (20, vec![1, 2]), (30, vec![1, 2, 3]), (40, vec![4])]; + let stake_of = create_stake_of(&[(10, 10), (20, 20), (30, 30), (40, 40)]); - let voters = voters.iter().map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())).collect::>(); + let voters = voters + .iter() + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) + .collect::>(); let ElectionResult::<_, Perbill> { winners, assignments } = approval_voting(3, candidates, voters).unwrap(); @@ -45,9 +43,18 @@ fn approval_voting_works() { assert_eq_uvec!( assignments, vec![ - Assignment { who: 10u64, distribution: vec![(1, Perbill::from_percent(50)), (2, Perbill::from_percent(50))] }, - Assignment { who: 20u64, distribution: vec![(1, Perbill::from_percent(50)), (2, Perbill::from_percent(50))] }, - Assignment { who: 30u64, distribution: vec![(1, Perbill::from_percent(50)), (2, Perbill::from_percent(50))] }, + Assignment { + who: 10u64, + distribution: vec![(1, Perbill::from_percent(50)), (2, Perbill::from_percent(50))] + }, + Assignment { + who: 20u64, + distribution: vec![(1, Perbill::from_percent(50)), (2, Perbill::from_percent(50))] + }, + Assignment { + who: 30u64, + distribution: vec![(1, Perbill::from_percent(50)), (2, Perbill::from_percent(50))] + }, Assignment { who: 40u64, distribution: vec![(4, Perbill::from_percent(100))] }, ] ); From e22b87c7412b57b1d8d82da3eff6c786cba65d88 Mon Sep 17 00:00:00 2001 From: gpestana Date: Sun, 12 Feb 2023 19:51:08 -0300 Subject: [PATCH 05/24] Uses ApprovalVoting in node runtime --- bin/node/runtime/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 4ae26abc882ba..c1bc984b7b85a 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -25,7 +25,7 @@ use codec::{Decode, Encode, MaxEncodedLen}; use frame_election_provider_support::{ onchain, weights::SubstrateWeight, BalancingConfig, ElectionDataProvider, SequentialPhragmen, - VoteWeight, + VoteWeight, ApprovalVoting, }; use frame_support::{ construct_runtime, @@ -1036,7 +1036,7 @@ impl pallet_elections::Config for Runtime { type MaxVoters = MaxVoters; type MaxVotesPerVoter = MaxVotesPerVoter; type MaxCandidates = MaxCandidates; - type ElectionSolver = SequentialPhragmen; + type ElectionSolver = ApprovalVoting; type SolverWeightInfo = SubstrateWeight; type WeightInfo = pallet_elections::weights::SubstrateWeight; } From 46697cc7c2b7e487a8523317738e95175c4aded6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A7alo=20Pestana?= Date: Tue, 14 Feb 2023 05:24:21 +0000 Subject: [PATCH 06/24] Update primitives/npos-elections/src/approval_voting.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- primitives/npos-elections/src/approval_voting.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/npos-elections/src/approval_voting.rs b/primitives/npos-elections/src/approval_voting.rs index da4b0992bee4b..9e5ed047035cd 100644 --- a/primitives/npos-elections/src/approval_voting.rs +++ b/primitives/npos-elections/src/approval_voting.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020-2023 Parity Technologies (UK) Ltd. +// Copyright (C) 2023 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); From 0b4a52da6d4c29b8eb270598af8ff9af01385a13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A7alo=20Pestana?= Date: Tue, 14 Feb 2023 06:45:35 +0100 Subject: [PATCH 07/24] reverts num max voters tests --- frame/elections/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/elections/src/lib.rs b/frame/elections/src/lib.rs index 41968019f4c6d..683c7de46c52f 100644 --- a/frame/elections/src/lib.rs +++ b/frame/elections/src/lib.rs @@ -1398,7 +1398,7 @@ mod tests { parameter_types! { pub const ElectionsPalletId: LockIdentifier = *b"phrelect"; - pub const MaxVoters: u32 = 128; + pub const MaxVoters: u32 = 512; pub const MaxCandidates: u32 = 64; } From e337c269cef4698c7d5dd75b0b8576fe8f131d2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Wed, 8 Feb 2023 16:40:45 -0300 Subject: [PATCH 08/24] Rework generated API docs (#13178) --- frame/contracts/proc-macro/src/lib.rs | 219 +++++++++++++++----------- frame/contracts/src/wasm/mod.rs | 7 +- 2 files changed, 132 insertions(+), 94 deletions(-) diff --git a/frame/contracts/proc-macro/src/lib.rs b/frame/contracts/proc-macro/src/lib.rs index de7b9b881d305..c5f52f43a7d87 100644 --- a/frame/contracts/proc-macro/src/lib.rs +++ b/frame/contracts/proc-macro/src/lib.rs @@ -25,10 +25,12 @@ extern crate alloc; use alloc::{ + collections::BTreeMap, format, string::{String, ToString}, vec::Vec, }; +use core::cmp::Reverse; use proc_macro::TokenStream; use proc_macro2::{Span, TokenStream as TokenStream2}; use quote::{quote, quote_spanned, ToTokens}; @@ -160,7 +162,7 @@ struct EnvDef { /// Parsed host function definition. struct HostFn { item: syn::ItemFn, - module: String, + version: u8, name: String, returns: HostFnReturn, is_stable: bool, @@ -208,7 +210,7 @@ impl HostFn { let span = item.span(); let mut attrs = item.attrs.clone(); attrs.retain(|a| !a.path.is_ident("doc")); - let mut maybe_module = None; + let mut maybe_version = None; let mut is_stable = true; let mut alias_to = None; let mut not_deprecated = true; @@ -216,12 +218,11 @@ impl HostFn { let ident = attr.path.get_ident().ok_or(err(span, msg))?.to_string(); match ident.as_str() { "version" => { - if maybe_module.is_some() { + if maybe_version.is_some() { return Err(err(span, "#[version] can only be specified once")) } - let ver: u8 = - attr.parse_args::().and_then(|lit| lit.base10_parse())?; - maybe_module = Some(format!("seal{}", ver)); + maybe_version = + Some(attr.parse_args::().and_then(|lit| lit.base10_parse())?); }, "unstable" => { if !is_stable { @@ -341,7 +342,7 @@ impl HostFn { Ok(Self { item, - module: maybe_module.unwrap_or_else(|| "seal0".to_string()), + version: maybe_version.unwrap_or_default(), name, returns, is_stable, @@ -355,6 +356,10 @@ impl HostFn { _ => Err(err(span, &msg)), } } + + fn module(&self) -> String { + format!("seal{}", self.version) + } } impl EnvDef { @@ -409,83 +414,116 @@ fn is_valid_special_arg(idx: usize, arg: &FnArg) -> bool { matches!(*pat.ty, syn::Type::Infer(_)) } -/// Expands documentation for host functions. -fn expand_docs(def: &mut EnvDef) -> TokenStream2 { - let mut modules = def.host_funcs.iter().map(|f| f.module.clone()).collect::>(); - modules.sort(); - modules.dedup(); - - let doc_selector = |a: &syn::Attribute| a.path.is_ident("doc"); - let docs = modules.iter().map(|m| { - let funcs = def.host_funcs.iter_mut().map(|f| { - if *m == f.module { - // Remove auxiliary args: `ctx: _` and `memory: _` - f.item.sig.inputs = f - .item - .sig - .inputs - .iter() - .skip(2) - .map(|p| p.clone()) - .collect::>(); - let func_decl = f.item.sig.to_token_stream(); - let func_doc = if let Some(origin_fn) = &f.alias_to { - let alias_doc = format!( - "This is just an alias function to [`{0}()`][`Self::{0}`] with backwards-compatible prefixed identifier.", - origin_fn, - ); - quote! { #[doc = #alias_doc] } - - } else { - let func_docs = f.item.attrs.iter().filter(|a| doc_selector(a)).map(|d| { - let docs = d.to_token_stream(); - quote! { #docs } - }); - let unstable_notice = if !f.is_stable { - let warning = "\n # Unstable\n\n \ - This function is unstable and it is a subject to change (or removal) in the future.\n \ - Do not deploy a contract using it to a production chain."; - quote! { #[doc = #warning] } - } else { - quote! {} - }; - quote! { - #( #func_docs )* - #unstable_notice - } - }; - quote! { - #func_doc - #func_decl; - } - } else { - quote! {} - } - }); +fn expand_func_doc(func: &HostFn) -> TokenStream2 { + // Remove auxiliary args: `ctx: _` and `memory: _` + let func_decl = { + let mut sig = func.item.sig.clone(); + sig.inputs = sig + .inputs + .iter() + .skip(2) + .map(|p| p.clone()) + .collect::>(); + sig.to_token_stream() + }; + let func_doc = { + let func_docs = if let Some(origin_fn) = &func.alias_to { + let alias_doc = format!( + "This is just an alias function to [`{0}()`][`Self::{0}`] with backwards-compatible prefixed identifier.", + origin_fn, + ); + quote! { #[doc = #alias_doc] } + } else { + let docs = func.item.attrs.iter().filter(|a| a.path.is_ident("doc")).map(|d| { + let docs = d.to_token_stream(); + quote! { #docs } + }); + quote! { #( #docs )* } + }; + let deprecation_notice = if !func.not_deprecated { + let warning = "\n # Deprecated\n\n \ + This function is deprecated and will be removed in future versions.\n \ + No new code or contracts with this API can be deployed."; + quote! { #[doc = #warning] } + } else { + quote! {} + }; + let import_notice = { + let info = format!( + "\n# Wasm Import Statement\n```wat\n(import \"seal{}\" \"{}\" (func ...))\n```", + func.version, func.name, + ); + quote! { #[doc = #info] } + }; + let unstable_notice = if !func.is_stable { + let warning = "\n # Unstable\n\n \ + This function is unstable and it is a subject to change (or removal) in the future.\n \ + Do not deploy a contract using it to a production chain."; + quote! { #[doc = #warning] } + } else { + quote! {} + }; + quote! { + #deprecation_notice + #func_docs + #import_notice + #unstable_notice + } + }; + quote! { + #func_doc + #func_decl; + } +} - let module = Ident::new(m, Span::call_site()); - let module_doc = format!( - "Documentation of the API available to contracts by importing `{}` WASM module.", - module - ); +/// Expands documentation for host functions. +fn expand_docs(def: &EnvDef) -> TokenStream2 { + // Create the `Current` trait with only the newest versions + // we sort so that only the newest versions make it into `docs` + let mut current_docs = BTreeMap::new(); + let mut funcs: Vec<_> = def.host_funcs.iter().filter(|f| f.alias_to.is_none()).collect(); + funcs.sort_unstable_by_key(|func| Reverse(func.version)); + for func in funcs { + if current_docs.contains_key(&func.name) { + continue + } + current_docs.insert(func.name.clone(), expand_func_doc(&func)); + } + let current_docs = current_docs.values(); + // Create the `legacy` module with all functions + // Maps from version to list of functions that have this version + let mut legacy_doc = BTreeMap::>::new(); + for func in def.host_funcs.iter() { + legacy_doc.entry(func.version).or_default().push(expand_func_doc(&func)); + } + let legacy_doc = legacy_doc.into_iter().map(|(version, funcs)| { + let doc = format!("All functions available in the **seal{}** module", version); + let version = Ident::new(&format!("Version{version}"), Span::call_site()); quote! { - #[doc = #module_doc] - pub mod #module { - use crate::wasm::runtime::{TrapReason, ReturnCode}; - /// Every function in this trait represents (at least) one function that can be imported by a contract. - /// - /// The function's identifier is to be set as the name in the import definition. - /// Where it is specifically indicated, an _alias_ function having `seal_`-prefixed identifier and - /// just the same signature and body, is also available (for backwards-compatibility purposes). - pub trait Api { - #( #funcs )* - } + #[doc = #doc] + pub trait #version { + #( #funcs )* } } }); + quote! { - #( #docs )* + /// Contains only the latest version of each function. + /// + /// In reality there are more functions available but they are all obsolete: When a function + /// is updated a new **version** is added and the old versions stays available as-is. + /// We only list the newest version here. Some functions are available under additional + /// names (aliases) for historic reasons which are omitted here. + /// + /// If you want an overview of all the functions available to a contact all you need + /// to look at is this trait. It contains only the latest version of each + /// function and no aliases. If you are writing a contract(language) from scratch + /// this is where you should look at. + pub trait Current { + #( #current_docs )* + } + #( #legacy_doc )* } } @@ -493,25 +531,26 @@ fn expand_docs(def: &mut EnvDef) -> TokenStream2 { /// Should generate source code for: /// - implementations of the host functions to be added to the wasm runtime environment (see /// `expand_impls()`). -fn expand_env(def: &mut EnvDef, docs: bool) -> TokenStream2 { +fn expand_env(def: &EnvDef, docs: bool) -> TokenStream2 { let impls = expand_impls(def); let docs = docs.then_some(expand_docs(def)).unwrap_or(TokenStream2::new()); quote! { pub struct Env; #impls - /// Contains the documentation of the API available to contracts. + /// Documentation of the API (host functions) available to contracts. /// - /// In order to generate this documentation, pass `doc` attribute to the [`#[define_env]`][`macro@define_env`] macro: - /// `#[define_env(doc)]`, and then run `cargo doc`. + /// The `Current` trait might be the most useful doc to look at. The versioned + /// traits only exist for reference: If trying to find out if a specific version of + /// `pallet-contracts` contains a certain function. /// - /// This module is not meant to be used by any code. Rather, it is meant to be consumed by humans through rustdoc. + /// # Note /// - /// Every function described in this module's sub module's traits uses this sub module's identifier - /// as its imported module name. The identifier of the function is the function's imported name. - /// According to the [WASM spec of imports](https://webassembly.github.io/spec/core/text/modules.html#text-import). + /// This module is not meant to be used by any code. Rather, it is meant to be + /// consumed by humans through rustdoc. #[cfg(doc)] pub mod api_doc { + use super::{TrapReason, ReturnCode}; #docs } } @@ -520,7 +559,7 @@ fn expand_env(def: &mut EnvDef, docs: bool) -> TokenStream2 { /// Generates for every host function: /// - real implementation, to register it in the contract execution environment; /// - dummy implementation, to be used as mocks for contract validation step. -fn expand_impls(def: &mut EnvDef) -> TokenStream2 { +fn expand_impls(def: &EnvDef) -> TokenStream2 { let impls = expand_functions(def, true, quote! { crate::wasm::Runtime }); let dummy_impls = expand_functions(def, false, quote! { () }); @@ -553,16 +592,12 @@ fn expand_impls(def: &mut EnvDef) -> TokenStream2 { } } -fn expand_functions( - def: &mut EnvDef, - expand_blocks: bool, - host_state: TokenStream2, -) -> TokenStream2 { +fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) -> TokenStream2 { let impls = def.host_funcs.iter().map(|f| { // skip the context and memory argument let params = f.item.sig.inputs.iter().skip(2); let (module, name, body, wasm_output, output) = ( - &f.module, + f.module(), &f.name, &f.item.block, f.returns.to_wasm_sig(), diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 6b9cefcdd2d96..9bf36b47f0b06 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -24,8 +24,13 @@ mod runtime; #[cfg(feature = "runtime-benchmarks")] pub use crate::wasm::code_cache::reinstrument; + #[cfg(doc)] pub use crate::wasm::runtime::api_doc; + +#[cfg(test)] +pub use tests::MockExt; + pub use crate::wasm::{ prepare::TryInstantiate, runtime::{ @@ -45,8 +50,6 @@ use frame_support::dispatch::{DispatchError, DispatchResult}; use sp_core::Get; use sp_runtime::RuntimeDebug; use sp_std::prelude::*; -#[cfg(test)] -pub use tests::MockExt; use wasmi::{ Config as WasmiConfig, Engine, Instance, Linker, Memory, MemoryType, Module, StackLimits, Store, }; From 8511c9b03c63eb0b658675a0f22794a2ffa11f2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 9 Feb 2023 11:00:55 +0100 Subject: [PATCH 09/24] pallet-scheduler: Ensure we request a preimage (#13340) * pallet-scheduler: Ensure we request a preimage The scheduler was not requesting a preimage. When a preimage is requested, a user can deposit it without paying any fees. * Review changes --- frame/scheduler/src/lib.rs | 23 ++++++++++++++++-- frame/scheduler/src/tests.rs | 23 +++++++++++++----- frame/support/src/traits/preimages.rs | 35 ++++++++++++++++++--------- 3 files changed, 62 insertions(+), 19 deletions(-) diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index afc4fd66e76cb..dfec98d4e443a 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -777,6 +777,8 @@ impl Pallet { ) -> Result, DispatchError> { let when = Self::resolve_time(when)?; + let lookup_hash = call.lookup_hash(); + // sanitize maybe_periodic let maybe_periodic = maybe_periodic .filter(|p| p.1 > 1 && !p.0.is_zero()) @@ -790,7 +792,14 @@ impl Pallet { origin, _phantom: PhantomData, }; - Self::place_task(when, task).map_err(|x| x.0) + let res = Self::place_task(when, task).map_err(|x| x.0)?; + + if let Some(hash) = lookup_hash { + // Request the call to be made available. + T::Preimages::request(&hash); + } + + Ok(res) } fn do_cancel( @@ -862,6 +871,8 @@ impl Pallet { let when = Self::resolve_time(when)?; + let lookup_hash = call.lookup_hash(); + // sanitize maybe_periodic let maybe_periodic = maybe_periodic .filter(|p| p.1 > 1 && !p.0.is_zero()) @@ -876,7 +887,14 @@ impl Pallet { origin, _phantom: Default::default(), }; - Self::place_task(when, task).map_err(|x| x.0) + let res = Self::place_task(when, task).map_err(|x| x.0)?; + + if let Some(hash) = lookup_hash { + // Request the call to be made available. + T::Preimages::request(&hash); + } + + Ok(res) } fn do_cancel_named(origin: Option, id: TaskName) -> DispatchResult { @@ -1027,6 +1045,7 @@ impl Pallet { } else { Agenda::::remove(when); } + postponed == 0 } diff --git a/frame/scheduler/src/tests.rs b/frame/scheduler/src/tests.rs index e5467fc8db55f..a261c6718616d 100644 --- a/frame/scheduler/src/tests.rs +++ b/frame/scheduler/src/tests.rs @@ -58,9 +58,10 @@ fn scheduling_with_preimages_works() { RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); let hash = ::Hashing::hash_of(&call); let len = call.using_encoded(|x| x.len()) as u32; - let hashed = Preimage::pick(hash, len); - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(0), call.encode())); + // Important to use here `Bounded::Lookup` to ensure that we request the hash. + let hashed = Bounded::Lookup { hash, len }; assert_ok!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), hashed)); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(0), call.encode())); assert!(Preimage::is_requested(&hash)); run_to_block(3); assert!(logger::log().is_empty()); @@ -479,8 +480,10 @@ fn scheduler_handles_periodic_unavailable_preimage() { let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: (max_weight / 3) * 2 }); let hash = ::Hashing::hash_of(&call); let len = call.using_encoded(|x| x.len()) as u32; - let bound = Preimage::pick(hash, len); - assert_ok!(Preimage::note(call.encode().into())); + // Important to use here `Bounded::Lookup` to ensure that we request the hash. + let bound = Bounded::Lookup { hash, len }; + // The preimage isn't requested yet. + assert!(!Preimage::is_requested(&hash)); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), @@ -489,11 +492,18 @@ fn scheduler_handles_periodic_unavailable_preimage() { root(), bound.clone(), )); + + // The preimage is requested. + assert!(Preimage::is_requested(&hash)); + + // Note the preimage. + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), call.encode())); + // Executes 1 times till block 4. run_to_block(4); assert_eq!(logger::log().len(), 1); - // Unnote the preimage. + // Unnote the preimage Preimage::unnote(&hash); // Does not ever execute again. @@ -1129,7 +1139,8 @@ fn postponed_named_task_cannot_be_rescheduled() { RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); let hash = ::Hashing::hash_of(&call); let len = call.using_encoded(|x| x.len()) as u32; - let hashed = Preimage::pick(hash, len); + // Important to use here `Bounded::Lookup` to ensure that we request the hash. + let hashed = Bounded::Lookup { hash, len }; let name: [u8; 32] = hash.as_ref().try_into().unwrap(); let address = Scheduler::do_schedule_named( diff --git a/frame/support/src/traits/preimages.rs b/frame/support/src/traits/preimages.rs index 594532ba96903..ce3537c792c08 100644 --- a/frame/support/src/traits/preimages.rs +++ b/frame/support/src/traits/preimages.rs @@ -26,6 +26,9 @@ use sp_std::borrow::Cow; pub type Hash = H256; pub type BoundedInline = crate::BoundedVec>; +/// The maximum we expect a single legacy hash lookup to be. +const MAX_LEGACY_LEN: u32 = 1_000_000; + #[derive( Encode, Decode, MaxEncodedLen, Clone, Eq, PartialEq, scale_info::TypeInfo, RuntimeDebug, )] @@ -67,20 +70,25 @@ impl Bounded { /// Returns the hash of the preimage. /// /// The hash is re-calculated every time if the preimage is inlined. - pub fn hash(&self) -> H256 { + pub fn hash(&self) -> Hash { use Bounded::*; match self { - Legacy { hash, .. } => *hash, + Lookup { hash, .. } | Legacy { hash, .. } => *hash, Inline(x) => blake2_256(x.as_ref()).into(), - Lookup { hash, .. } => *hash, } } -} -// The maximum we expect a single legacy hash lookup to be. -const MAX_LEGACY_LEN: u32 = 1_000_000; + /// Returns the hash to lookup the preimage. + /// + /// If this is a `Bounded::Inline`, `None` is returned as no lookup is required. + pub fn lookup_hash(&self) -> Option { + use Bounded::*; + match self { + Lookup { hash, .. } | Legacy { hash, .. } => Some(*hash), + Inline(_) => None, + } + } -impl Bounded { /// Returns the length of the preimage or `None` if the length is unknown. pub fn len(&self) -> Option { match self { @@ -168,8 +176,11 @@ pub trait QueryPreimage { } } - /// Create a `Bounded` instance based on the `hash` and `len` of the encoded value. This may not - /// be `peek`-able or `realize`-able. + /// Create a `Bounded` instance based on the `hash` and `len` of the encoded value. + /// + /// It also directly requests the given `hash` using [`Self::request`]. + /// + /// This may not be `peek`-able or `realize`-able. fn pick(hash: Hash, len: u32) -> Bounded { Self::request(&hash); Bounded::Lookup { hash, len } @@ -228,10 +239,12 @@ pub trait StorePreimage: QueryPreimage { Self::unrequest(hash) } - /// Convert an otherwise unbounded or large value into a type ready for placing in storage. The - /// result is a type whose `MaxEncodedLen` is 131 bytes. + /// Convert an otherwise unbounded or large value into a type ready for placing in storage. + /// + /// The result is a type whose `MaxEncodedLen` is 131 bytes. /// /// NOTE: Once this API is used, you should use either `drop` or `realize`. + /// The value is also noted using [`Self::note`]. fn bound(t: T) -> Result, DispatchError> { let data = t.encode(); let len = data.len() as u32; From 165155326cd5e27a47029c5820d637d3cace4598 Mon Sep 17 00:00:00 2001 From: Roman Useinov Date: Thu, 9 Feb 2023 12:47:39 +0100 Subject: [PATCH 10/24] [Fix] Try-state feature-gated for BagsList (#13296) * [Fix] Try-state feature-gated for BagsList * fix comment * fix try_state remote-tests * feature-gate try-state remote test for bags-list * remove try-state from a migration * more SortedListProvider fixes * more fixes * more fixes to allow do_try_state usage in other crates * do-try-state for fuzz * more fixes * more fixes * remove feature-flag * do-try-state * fix review comments * Update frame/bags-list/src/mock.rs Co-authored-by: Anton --------- Co-authored-by: parity-processbot <> Co-authored-by: Anton --- frame/bags-list/Cargo.toml | 2 +- frame/bags-list/fuzzer/src/main.rs | 2 +- frame/bags-list/remote-tests/Cargo.toml | 2 +- frame/bags-list/remote-tests/src/try_state.rs | 5 ++-- frame/bags-list/src/lib.rs | 10 +++++++- frame/bags-list/src/list/mod.rs | 17 +++++++------ frame/bags-list/src/list/tests.rs | 25 ++++++++++--------- frame/bags-list/src/mock.rs | 2 +- frame/election-provider-support/Cargo.toml | 1 + frame/election-provider-support/src/lib.rs | 3 ++- frame/staking/Cargo.toml | 2 +- frame/staking/src/migrations.rs | 1 - frame/staking/src/pallet/impls.rs | 4 +++ 13 files changed, 46 insertions(+), 30 deletions(-) diff --git a/frame/bags-list/Cargo.toml b/frame/bags-list/Cargo.toml index e3a4965f6c086..379698b1d39e1 100644 --- a/frame/bags-list/Cargo.toml +++ b/frame/bags-list/Cargo.toml @@ -75,4 +75,4 @@ fuzz = [ "sp-tracing", "frame-election-provider-support/fuzz", ] -try-runtime = [ "frame-support/try-runtime" ] +try-runtime = [ "frame-support/try-runtime", "frame-election-provider-support/try-runtime" ] diff --git a/frame/bags-list/fuzzer/src/main.rs b/frame/bags-list/fuzzer/src/main.rs index 9f7ca464cc2b8..c78e2a13076d5 100644 --- a/frame/bags-list/fuzzer/src/main.rs +++ b/frame/bags-list/fuzzer/src/main.rs @@ -88,7 +88,7 @@ fn main() { }, } - assert!(BagsList::try_state().is_ok()); + assert!(BagsList::do_try_state().is_ok()); }) }); } diff --git a/frame/bags-list/remote-tests/Cargo.toml b/frame/bags-list/remote-tests/Cargo.toml index 9fb6d0154b302..6e951b43a4aeb 100644 --- a/frame/bags-list/remote-tests/Cargo.toml +++ b/frame/bags-list/remote-tests/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # frame pallet-staking = { path = "../../staking", version = "4.0.0-dev" } -pallet-bags-list = { path = "../../bags-list", version = "4.0.0-dev" } +pallet-bags-list = { path = "../../bags-list", version = "4.0.0-dev", features = ["fuzz"] } frame-election-provider-support = { path = "../../election-provider-support", version = "4.0.0-dev" } frame-system = { path = "../../system", version = "4.0.0-dev" } frame-support = { path = "../../support", version = "4.0.0-dev" } diff --git a/frame/bags-list/remote-tests/src/try_state.rs b/frame/bags-list/remote-tests/src/try_state.rs index 514c80d72ab67..9ed877a43afe1 100644 --- a/frame/bags-list/remote-tests/src/try_state.rs +++ b/frame/bags-list/remote-tests/src/try_state.rs @@ -16,7 +16,6 @@ //! Test to execute the sanity-check of the voter bag. -use frame_election_provider_support::SortedListProvider; use frame_support::{ storage::generator::StorageMap, traits::{Get, PalletInfoAccess}, @@ -51,7 +50,9 @@ pub async fn execute( ext.execute_with(|| { sp_core::crypto::set_default_ss58_version(Runtime::SS58Prefix::get().try_into().unwrap()); - pallet_bags_list::Pallet::::try_state().unwrap(); + + pallet_bags_list::Pallet::::do_try_state().unwrap(); + log::info!(target: crate::LOG_TARGET, "executed bags-list sanity check with no errors."); crate::display_and_check_bags::(currency_unit, currency_name); diff --git a/frame/bags-list/src/lib.rs b/frame/bags-list/src/lib.rs index 14f8a613eb798..f04c685ae386c 100644 --- a/frame/bags-list/src/lib.rs +++ b/frame/bags-list/src/lib.rs @@ -274,6 +274,13 @@ pub mod pallet { } } +#[cfg(any(test, feature = "try-runtime", feature = "fuzz"))] +impl, I: 'static> Pallet { + pub fn do_try_state() -> Result<(), &'static str> { + List::::do_try_state() + } +} + impl, I: 'static> Pallet { /// Move an account from one bag to another, depositing an event on success. /// @@ -348,8 +355,9 @@ impl, I: 'static> SortedListProvider for Pallet List::::unsafe_regenerate(all, score_of) } + #[cfg(feature = "try-runtime")] fn try_state() -> Result<(), &'static str> { - List::::try_state() + Self::do_try_state() } fn unsafe_clear() { diff --git a/frame/bags-list/src/list/mod.rs b/frame/bags-list/src/list/mod.rs index 272526ad1a636..4082a5324091b 100644 --- a/frame/bags-list/src/list/mod.rs +++ b/frame/bags-list/src/list/mod.rs @@ -220,9 +220,6 @@ impl, I: 'static> List { crate::ListBags::::remove(removed_bag); } - #[cfg(feature = "std")] - debug_assert_eq!(Self::try_state(), Ok(())); - num_affected } @@ -514,7 +511,8 @@ impl, I: 'static> List { /// * length of this list is in sync with `ListNodes::count()`, /// * and sanity-checks all bags and nodes. This will cascade down all the checks and makes sure /// all bags and nodes are checked per *any* update to `List`. - pub(crate) fn try_state() -> Result<(), &'static str> { + #[cfg(any(test, feature = "try-runtime", feature = "fuzz"))] + pub(crate) fn do_try_state() -> Result<(), &'static str> { let mut seen_in_list = BTreeSet::new(); ensure!( Self::iter().map(|node| node.id).all(|id| seen_in_list.insert(id)), @@ -542,7 +540,7 @@ impl, I: 'static> List { thresholds.into_iter().filter_map(|t| Bag::::get(t)) }; - let _ = active_bags.clone().try_for_each(|b| b.try_state())?; + let _ = active_bags.clone().try_for_each(|b| b.do_try_state())?; let nodes_in_bags_count = active_bags.clone().fold(0u32, |acc, cur| acc + cur.iter().count() as u32); @@ -553,7 +551,7 @@ impl, I: 'static> List { // check that all nodes are sane. We check the `ListNodes` storage item directly in case we // have some "stale" nodes that are not in a bag. for (_id, node) in crate::ListNodes::::iter() { - node.try_state()? + node.do_try_state()? } Ok(()) @@ -751,7 +749,8 @@ impl, I: 'static> Bag { /// * Ensures head has no prev. /// * Ensures tail has no next. /// * Ensures there are no loops, traversal from head to tail is correct. - fn try_state(&self) -> Result<(), &'static str> { + #[cfg(any(test, feature = "try-runtime", feature = "fuzz"))] + fn do_try_state(&self) -> Result<(), &'static str> { frame_support::ensure!( self.head() .map(|head| head.prev().is_none()) @@ -790,6 +789,7 @@ impl, I: 'static> Bag { } /// Check if the bag contains a node with `id`. + #[cfg(any(test, feature = "try-runtime", feature = "fuzz"))] fn contains(&self, id: &T::AccountId) -> bool { self.iter().any(|n| n.id() == id) } @@ -894,7 +894,8 @@ impl, I: 'static> Node { self.bag_upper } - fn try_state(&self) -> Result<(), &'static str> { + #[cfg(any(test, feature = "try-runtime", feature = "fuzz"))] + fn do_try_state(&self) -> Result<(), &'static str> { let expected_bag = Bag::::get(self.bag_upper).ok_or("bag not found for node")?; let id = self.id(); diff --git a/frame/bags-list/src/list/tests.rs b/frame/bags-list/src/list/tests.rs index 966ea1a74c71c..3c4aa7c86634d 100644 --- a/frame/bags-list/src/list/tests.rs +++ b/frame/bags-list/src/list/tests.rs @@ -137,6 +137,7 @@ fn migrate_works() { BagThresholds::set(NEW_THRESHOLDS); // and we call List::::migrate(old_thresholds); + assert_eq!(List::::do_try_state(), Ok(())); // then assert_eq!( @@ -352,13 +353,13 @@ mod list { #[test] fn try_state_works() { ExtBuilder::default().build_and_execute_no_post_check(|| { - assert_ok!(List::::try_state()); + assert_ok!(List::::do_try_state()); }); // make sure there are no duplicates. ExtBuilder::default().build_and_execute_no_post_check(|| { Bag::::get(10).unwrap().insert_unchecked(2, 10); - assert_eq!(List::::try_state(), Err("duplicate identified")); + assert_eq!(List::::do_try_state(), Err("duplicate identified")); }); // ensure count is in sync with `ListNodes::count()`. @@ -372,7 +373,7 @@ mod list { CounterForListNodes::::mutate(|counter| *counter += 1); assert_eq!(crate::ListNodes::::count(), 5); - assert_eq!(List::::try_state(), Err("iter_count != stored_count")); + assert_eq!(List::::do_try_state(), Err("iter_count != stored_count")); }); } @@ -804,7 +805,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_1000), vec![2, 3, 13, 14]); - assert_ok!(bag_1000.try_state()); + assert_ok!(bag_1000.do_try_state()); // and the node isn't mutated when its removed assert_eq!(node_4, node_4_pre_remove); @@ -814,7 +815,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_1000), vec![3, 13, 14]); - assert_ok!(bag_1000.try_state()); + assert_ok!(bag_1000.do_try_state()); // when removing a tail that is not pointing at the head let node_14 = Node::::get(&14).unwrap(); @@ -822,7 +823,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_1000), vec![3, 13]); - assert_ok!(bag_1000.try_state()); + assert_ok!(bag_1000.do_try_state()); // when removing a tail that is pointing at the head let node_13 = Node::::get(&13).unwrap(); @@ -830,7 +831,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_1000), vec![3]); - assert_ok!(bag_1000.try_state()); + assert_ok!(bag_1000.do_try_state()); // when removing a node that is both the head & tail let node_3 = Node::::get(&3).unwrap(); @@ -846,7 +847,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_10), vec![1, 12]); - assert_ok!(bag_10.try_state()); + assert_ok!(bag_10.do_try_state()); // when removing a head that is pointing at the tail let node_1 = Node::::get(&1).unwrap(); @@ -854,7 +855,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_10), vec![12]); - assert_ok!(bag_10.try_state()); + assert_ok!(bag_10.do_try_state()); // and since we updated the bag's head/tail, we need to write this storage so we // can correctly `get` it again in later checks bag_10.put(); @@ -865,7 +866,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_2000), vec![15, 17, 18, 19]); - assert_ok!(bag_2000.try_state()); + assert_ok!(bag_2000.do_try_state()); // when removing a node that is pointing at tail, but not head let node_18 = Node::::get(&18).unwrap(); @@ -873,7 +874,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_2000), vec![15, 17, 19]); - assert_ok!(bag_2000.try_state()); + assert_ok!(bag_2000.do_try_state()); // finally, when reading from storage, the state of all bags is as expected assert_eq!( @@ -905,7 +906,7 @@ mod bags { // .. and the bag it was removed from let bag_1000 = Bag::::get(1_000).unwrap(); // is sane - assert_ok!(bag_1000.try_state()); + assert_ok!(bag_1000.do_try_state()); // and has the correct head and tail. assert_eq!(bag_1000.head, Some(3)); assert_eq!(bag_1000.tail, Some(4)); diff --git a/frame/bags-list/src/mock.rs b/frame/bags-list/src/mock.rs index 8cc96a988e72a..32f6bb09e4772 100644 --- a/frame/bags-list/src/mock.rs +++ b/frame/bags-list/src/mock.rs @@ -148,7 +148,7 @@ impl ExtBuilder { pub fn build_and_execute(self, test: impl FnOnce() -> ()) { self.build().execute_with(|| { test(); - List::::try_state().expect("Try-state post condition failed") + List::::do_try_state().expect("do_try_state post condition failed") }) } diff --git a/frame/election-provider-support/Cargo.toml b/frame/election-provider-support/Cargo.toml index c7f47e721d1aa..114caee793f1a 100644 --- a/frame/election-provider-support/Cargo.toml +++ b/frame/election-provider-support/Cargo.toml @@ -43,3 +43,4 @@ std = [ "sp-std/std", ] runtime-benchmarks = [] +try-runtime = [] diff --git a/frame/election-provider-support/src/lib.rs b/frame/election-provider-support/src/lib.rs index a0f4dfb974c8d..14018949e6da3 100644 --- a/frame/election-provider-support/src/lib.rs +++ b/frame/election-provider-support/src/lib.rs @@ -562,7 +562,8 @@ pub trait SortedListProvider { /// unbounded amount of storage accesses. fn unsafe_clear(); - /// Check internal state of list. Only meant for debugging. + /// Check internal state of the list. Only meant for debugging. + #[cfg(feature = "try-runtime")] fn try_state() -> Result<(), &'static str>; /// If `who` changes by the returned amount they are guaranteed to have a worst case change diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index 3d5cf1161e8e5..79c0bb5c2a32d 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -74,4 +74,4 @@ runtime-benchmarks = [ "rand_chacha", "sp-staking/runtime-benchmarks", ] -try-runtime = ["frame-support/try-runtime"] +try-runtime = ["frame-support/try-runtime", "frame-election-provider-support/try-runtime"] diff --git a/frame/staking/src/migrations.rs b/frame/staking/src/migrations.rs index 6253c3feed17d..a8d360c098137 100644 --- a/frame/staking/src/migrations.rs +++ b/frame/staking/src/migrations.rs @@ -386,7 +386,6 @@ pub mod v8 { Nominators::::iter().map(|(id, _)| id), Pallet::::weight_of_fn(), ); - debug_assert_eq!(T::VoterList::try_state(), Ok(())); StorageVersion::::put(ObsoleteReleases::V8_0_0); crate::log!( diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index e59c2c5a0620e..6a35e2b861565 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -1451,9 +1451,11 @@ impl SortedListProvider for UseValidatorsMap { // nothing to do upon regenerate. 0 } + #[cfg(feature = "try-runtime")] fn try_state() -> Result<(), &'static str> { Ok(()) } + fn unsafe_clear() { #[allow(deprecated)] Validators::::remove_all(); @@ -1525,6 +1527,8 @@ impl SortedListProvider for UseNominatorsAndValidatorsM // nothing to do upon regenerate. 0 } + + #[cfg(feature = "try-runtime")] fn try_state() -> Result<(), &'static str> { Ok(()) } From eb4cff9101004e56d9daaaaae17c6027faced199 Mon Sep 17 00:00:00 2001 From: Javier Viola Date: Fri, 10 Feb 2023 11:11:10 -0300 Subject: [PATCH 11/24] bump version of zombienet and update snaps links (#13359) --- .gitlab-ci.yml | 2 +- zombienet/0001-basic-warp-sync/test-warp-sync.toml | 6 +++--- .../test-validators-warp-sync.toml | 6 +++--- .../test-block-building-warp-sync.toml | 6 +++--- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index ae25a2c5953f5..ab3fd84d6e9ee 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -55,7 +55,7 @@ variables: RUSTY_CACHIER_COMPRESSION_METHOD: zstd NEXTEST_FAILURE_OUTPUT: immediate-final NEXTEST_SUCCESS_OUTPUT: final - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.22" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.34" .shared-default: &shared-default retry: diff --git a/zombienet/0001-basic-warp-sync/test-warp-sync.toml b/zombienet/0001-basic-warp-sync/test-warp-sync.toml index ae2810b6ecccd..272b5862e8e89 100644 --- a/zombienet/0001-basic-warp-sync/test-warp-sync.toml +++ b/zombienet/0001-basic-warp-sync/test-warp-sync.toml @@ -11,18 +11,18 @@ chain_spec_path = "zombienet/0001-basic-warp-sync/chain-spec.json" [[relaychain.nodes]] name = "alice" validator = false - db_snapshot="https://storage.googleapis.com/zombienet-db-snaps/substrate/0001-basic-warp-sync/chains-0bb3f0be2ce41b5615b224215bcc8363aa0416a6.tgz" + db_snapshot="https://storage.googleapis.com/zombienet-db-snaps/substrate/0001-basic-warp-sync/chains-a233bbacff650aac6bcb56b4e4ef7db7dc143cfb.tgz" [[relaychain.nodes]] name = "bob" validator = false - db_snapshot="https://storage.googleapis.com/zombienet-db-snaps/substrate/0001-basic-warp-sync/chains-0bb3f0be2ce41b5615b224215bcc8363aa0416a6.tgz" + db_snapshot="https://storage.googleapis.com/zombienet-db-snaps/substrate/0001-basic-warp-sync/chains-a233bbacff650aac6bcb56b4e4ef7db7dc143cfb.tgz" #we need at least 3 nodes for warp sync [[relaychain.nodes]] name = "charlie" validator = false - db_snapshot="https://storage.googleapis.com/zombienet-db-snaps/substrate/0001-basic-warp-sync/chains-0bb3f0be2ce41b5615b224215bcc8363aa0416a6.tgz" + db_snapshot="https://storage.googleapis.com/zombienet-db-snaps/substrate/0001-basic-warp-sync/chains-a233bbacff650aac6bcb56b4e4ef7db7dc143cfb.tgz" [[relaychain.nodes]] name = "dave" diff --git a/zombienet/0002-validators-warp-sync/test-validators-warp-sync.toml b/zombienet/0002-validators-warp-sync/test-validators-warp-sync.toml index dae6e406f04da..df4414f5c8b5f 100644 --- a/zombienet/0002-validators-warp-sync/test-validators-warp-sync.toml +++ b/zombienet/0002-validators-warp-sync/test-validators-warp-sync.toml @@ -22,14 +22,14 @@ chain_spec_path = "zombienet/0002-validators-warp-sync/chain-spec.json" [[relaychain.nodes]] name = "charlie" validator = false - db_snapshot="https://storage.googleapis.com/zombienet-db-snaps/substrate/0001-basic-warp-sync/chains-0bb3f0be2ce41b5615b224215bcc8363aa0416a6.tgz" + db_snapshot="https://storage.googleapis.com/zombienet-db-snaps/substrate/0001-basic-warp-sync/chains-a233bbacff650aac6bcb56b4e4ef7db7dc143cfb.tgz" [[relaychain.nodes]] name = "dave" validator = false - db_snapshot="https://storage.googleapis.com/zombienet-db-snaps/substrate/0001-basic-warp-sync/chains-0bb3f0be2ce41b5615b224215bcc8363aa0416a6.tgz" + db_snapshot="https://storage.googleapis.com/zombienet-db-snaps/substrate/0001-basic-warp-sync/chains-a233bbacff650aac6bcb56b4e4ef7db7dc143cfb.tgz" [[relaychain.nodes]] name = "eve" validator = false - db_snapshot="https://storage.googleapis.com/zombienet-db-snaps/substrate/0001-basic-warp-sync/chains-0bb3f0be2ce41b5615b224215bcc8363aa0416a6.tgz" + db_snapshot="https://storage.googleapis.com/zombienet-db-snaps/substrate/0001-basic-warp-sync/chains-a233bbacff650aac6bcb56b4e4ef7db7dc143cfb.tgz" diff --git a/zombienet/0003-block-building-warp-sync/test-block-building-warp-sync.toml b/zombienet/0003-block-building-warp-sync/test-block-building-warp-sync.toml index 66176845cc78d..5119c919c70c4 100644 --- a/zombienet/0003-block-building-warp-sync/test-block-building-warp-sync.toml +++ b/zombienet/0003-block-building-warp-sync/test-block-building-warp-sync.toml @@ -12,17 +12,17 @@ chain_spec_path = "zombienet/0003-block-building-warp-sync/chain-spec.json" [[relaychain.nodes]] name = "alice" validator = true - db_snapshot="https://storage.googleapis.com/zombienet-db-snaps/substrate/0001-basic-warp-sync/chains-0bb3f0be2ce41b5615b224215bcc8363aa0416a6.tgz" + db_snapshot="https://storage.googleapis.com/zombienet-db-snaps/substrate/0001-basic-warp-sync/chains-a233bbacff650aac6bcb56b4e4ef7db7dc143cfb.tgz" [[relaychain.nodes]] name = "bob" validator = true - db_snapshot="https://storage.googleapis.com/zombienet-db-snaps/substrate/0001-basic-warp-sync/chains-0bb3f0be2ce41b5615b224215bcc8363aa0416a6.tgz" + db_snapshot="https://storage.googleapis.com/zombienet-db-snaps/substrate/0001-basic-warp-sync/chains-a233bbacff650aac6bcb56b4e4ef7db7dc143cfb.tgz" [[relaychain.nodes]] name = "charlie" validator = false - db_snapshot="https://storage.googleapis.com/zombienet-db-snaps/substrate/0001-basic-warp-sync/chains-0bb3f0be2ce41b5615b224215bcc8363aa0416a6.tgz" + db_snapshot="https://storage.googleapis.com/zombienet-db-snaps/substrate/0001-basic-warp-sync/chains-a233bbacff650aac6bcb56b4e4ef7db7dc143cfb.tgz" [[relaychain.nodes]] name = "dave" From e52858cc923dcb98329834cab5ea5cfe3726892b Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Sat, 11 Feb 2023 18:35:04 +0100 Subject: [PATCH 12/24] Fix longest chain finalization target lookup (#13289) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Finalization target should be chosed as some ancestor of SelectChain::best_chain * More test assertions * Improve docs * Removed stale docs * Rename 'target' to 'base' in lookup method * Fix typo * Apply suggestions from code review Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Rename 'target_hash' to 'base_hash' in 'SelectChain::finality_target()' * Apply suggestions from code review Co-authored-by: Anton * Docs improvement * Doc fix * Apply suggestions from code review Co-authored-by: Bastian Köcher * Apply more code suggestions --------- Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> Co-authored-by: Anton Co-authored-by: Bastian Köcher --- client/consensus/common/src/longest_chain.rs | 80 ++++++- client/finality-grandpa/src/tests.rs | 2 +- client/service/test/src/client/mod.rs | 217 ++++++++++++------ primitives/blockchain/src/backend.rs | 84 ++----- .../consensus/common/src/select_chain.rs | 8 +- 5 files changed, 242 insertions(+), 149 deletions(-) diff --git a/client/consensus/common/src/longest_chain.rs b/client/consensus/common/src/longest_chain.rs index fab2c3a4c06fd..ece2486209ff3 100644 --- a/client/consensus/common/src/longest_chain.rs +++ b/client/consensus/common/src/longest_chain.rs @@ -21,7 +21,7 @@ use sc_client_api::backend; use sp_blockchain::{Backend, HeaderBackend}; use sp_consensus::{Error as ConsensusError, SelectChain}; -use sp_runtime::traits::{Block as BlockT, NumberFor}; +use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; use std::{marker::PhantomData, sync::Arc}; /// Implement Longest Chain Select implementation @@ -48,15 +48,19 @@ where LongestChain { backend, _phantom: Default::default() } } - fn best_block_header(&self) -> sp_blockchain::Result<::Header> { + fn best_hash(&self) -> sp_blockchain::Result<::Hash> { let info = self.backend.blockchain().info(); let import_lock = self.backend.get_import_lock(); let best_hash = self .backend .blockchain() - .best_containing(info.best_hash, None, import_lock)? + .longest_containing(info.best_hash, import_lock)? .unwrap_or(info.best_hash); + Ok(best_hash) + } + fn best_header(&self) -> sp_blockchain::Result<::Header> { + let best_hash = self.best_hash()?; Ok(self .backend .blockchain() @@ -64,6 +68,65 @@ where .expect("given block hash was fetched from block in db; qed")) } + /// Returns the highest descendant of the given block that is a valid + /// candidate to be finalized. + /// + /// In this context, being a valid target means being an ancestor of + /// the best chain according to the `best_header` method. + /// + /// If `maybe_max_number` is `Some(max_block_number)` the search is + /// limited to block `number <= max_block_number`. In other words + /// as if there were no blocks greater than `max_block_number`. + fn finality_target( + &self, + base_hash: Block::Hash, + maybe_max_number: Option>, + ) -> sp_blockchain::Result { + use sp_blockchain::Error::{Application, MissingHeader}; + let blockchain = self.backend.blockchain(); + + let mut current_head = self.best_header()?; + let mut best_hash = current_head.hash(); + + let base_header = blockchain + .header(base_hash)? + .ok_or_else(|| MissingHeader(base_hash.to_string()))?; + let base_number = *base_header.number(); + + if let Some(max_number) = maybe_max_number { + if max_number < base_number { + let msg = format!( + "Requested a finality target using max number {} below the base number {}", + max_number, base_number + ); + return Err(Application(msg.into())) + } + + while current_head.number() > &max_number { + best_hash = *current_head.parent_hash(); + current_head = blockchain + .header(best_hash)? + .ok_or_else(|| MissingHeader(format!("{best_hash:?}")))?; + } + } + + while current_head.hash() != base_hash { + if *current_head.number() < base_number { + let msg = format!( + "Requested a finality target using a base {:?} not in the best chain {:?}", + base_hash, best_hash, + ); + return Err(Application(msg.into())) + } + let current_hash = *current_head.parent_hash(); + current_head = blockchain + .header(current_hash)? + .ok_or_else(|| MissingHeader(format!("{best_hash:?}")))?; + } + + Ok(best_hash) + } + fn leaves(&self) -> Result::Hash>, sp_blockchain::Error> { self.backend.blockchain().leaves() } @@ -80,20 +143,15 @@ where } async fn best_chain(&self) -> Result<::Header, ConsensusError> { - LongestChain::best_block_header(self) - .map_err(|e| ConsensusError::ChainLookup(e.to_string())) + LongestChain::best_header(self).map_err(|e| ConsensusError::ChainLookup(e.to_string())) } async fn finality_target( &self, - target_hash: Block::Hash, + base_hash: Block::Hash, maybe_max_number: Option>, ) -> Result { - let import_lock = self.backend.get_import_lock(); - self.backend - .blockchain() - .best_containing(target_hash, maybe_max_number, import_lock) - .map(|maybe_hash| maybe_hash.unwrap_or(target_hash)) + LongestChain::finality_target(self, base_hash, maybe_max_number) .map_err(|e| ConsensusError::ChainLookup(e.to_string())) } } diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 07ea4649148fb..62ef4709e1909 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -237,7 +237,7 @@ impl SelectChain for MockSelectChain { async fn finality_target( &self, - _target_hash: Hash, + _base_hash: Hash, _maybe_max_number: Option>, ) -> Result { Ok(self.finality_target.lock().take().unwrap()) diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 97c22a1cb509e..12b92afc458b4 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -581,7 +581,7 @@ fn uncles_with_multiple_forks() { } #[test] -fn best_containing_on_longest_chain_with_single_chain_3_blocks() { +fn finality_target_on_longest_chain_with_single_chain_3_blocks() { // block tree: // G -> A1 -> A2 @@ -606,7 +606,7 @@ fn best_containing_on_longest_chain_with_single_chain_3_blocks() { } #[test] -fn best_containing_on_longest_chain_with_multiple_forks() { +fn finality_target_on_longest_chain_with_multiple_forks() { // block tree: // G -> A1 -> A2 -> A3 -> A4 -> A5 // A1 -> B2 -> B3 -> B4 @@ -731,8 +731,13 @@ fn best_containing_on_longest_chain_with_multiple_forks() { assert!(leaves.contains(&d2.hash())); assert_eq!(leaves.len(), 4); - let finality_target = |target_hash, number| { - block_on(longest_chain_select.finality_target(target_hash, number)).unwrap() + // On error we return a quite improbable hash + let error_hash = Hash::from([0xff; 32]); + let finality_target = |target_hash, number| match block_on( + longest_chain_select.finality_target(target_hash, number), + ) { + Ok(hash) => hash, + Err(_) => error_hash, }; // search without restriction @@ -742,11 +747,11 @@ fn best_containing_on_longest_chain_with_multiple_forks() { assert_eq!(a5.hash(), finality_target(a3.hash(), None)); assert_eq!(a5.hash(), finality_target(a4.hash(), None)); assert_eq!(a5.hash(), finality_target(a5.hash(), None)); - assert_eq!(b4.hash(), finality_target(b2.hash(), None)); - assert_eq!(b4.hash(), finality_target(b3.hash(), None)); - assert_eq!(b4.hash(), finality_target(b4.hash(), None)); - assert_eq!(c3.hash(), finality_target(c3.hash(), None)); - assert_eq!(d2.hash(), finality_target(d2.hash(), None)); + assert_eq!(error_hash, finality_target(b2.hash(), None)); + assert_eq!(error_hash, finality_target(b3.hash(), None)); + assert_eq!(error_hash, finality_target(b4.hash(), None)); + assert_eq!(error_hash, finality_target(c3.hash(), None)); + assert_eq!(error_hash, finality_target(d2.hash(), None)); // search only blocks with number <= 5. equivalent to without restriction for this scenario assert_eq!(a5.hash(), finality_target(genesis_hash, Some(5))); @@ -755,11 +760,11 @@ fn best_containing_on_longest_chain_with_multiple_forks() { assert_eq!(a5.hash(), finality_target(a3.hash(), Some(5))); assert_eq!(a5.hash(), finality_target(a4.hash(), Some(5))); assert_eq!(a5.hash(), finality_target(a5.hash(), Some(5))); - assert_eq!(b4.hash(), finality_target(b2.hash(), Some(5))); - assert_eq!(b4.hash(), finality_target(b3.hash(), Some(5))); - assert_eq!(b4.hash(), finality_target(b4.hash(), Some(5))); - assert_eq!(c3.hash(), finality_target(c3.hash(), Some(5))); - assert_eq!(d2.hash(), finality_target(d2.hash(), Some(5))); + assert_eq!(error_hash, finality_target(b2.hash(), Some(5))); + assert_eq!(error_hash, finality_target(b3.hash(), Some(5))); + assert_eq!(error_hash, finality_target(b4.hash(), Some(5))); + assert_eq!(error_hash, finality_target(c3.hash(), Some(5))); + assert_eq!(error_hash, finality_target(d2.hash(), Some(5))); // search only blocks with number <= 4 assert_eq!(a4.hash(), finality_target(genesis_hash, Some(4))); @@ -767,73 +772,72 @@ fn best_containing_on_longest_chain_with_multiple_forks() { assert_eq!(a4.hash(), finality_target(a2.hash(), Some(4))); assert_eq!(a4.hash(), finality_target(a3.hash(), Some(4))); assert_eq!(a4.hash(), finality_target(a4.hash(), Some(4))); - assert_eq!(a5.hash(), finality_target(a5.hash(), Some(4))); - assert_eq!(b4.hash(), finality_target(b2.hash(), Some(4))); - assert_eq!(b4.hash(), finality_target(b3.hash(), Some(4))); - assert_eq!(b4.hash(), finality_target(b4.hash(), Some(4))); - assert_eq!(c3.hash(), finality_target(c3.hash(), Some(4))); - assert_eq!(d2.hash(), finality_target(d2.hash(), Some(4))); + assert_eq!(error_hash, finality_target(a5.hash(), Some(4))); + assert_eq!(error_hash, finality_target(b2.hash(), Some(4))); + assert_eq!(error_hash, finality_target(b3.hash(), Some(4))); + assert_eq!(error_hash, finality_target(b4.hash(), Some(4))); + assert_eq!(error_hash, finality_target(c3.hash(), Some(4))); + assert_eq!(error_hash, finality_target(d2.hash(), Some(4))); // search only blocks with number <= 3 assert_eq!(a3.hash(), finality_target(genesis_hash, Some(3))); assert_eq!(a3.hash(), finality_target(a1.hash(), Some(3))); assert_eq!(a3.hash(), finality_target(a2.hash(), Some(3))); assert_eq!(a3.hash(), finality_target(a3.hash(), Some(3))); - assert_eq!(a4.hash(), finality_target(a4.hash(), Some(3))); - assert_eq!(a5.hash(), finality_target(a5.hash(), Some(3))); - assert_eq!(b3.hash(), finality_target(b2.hash(), Some(3))); - assert_eq!(b3.hash(), finality_target(b3.hash(), Some(3))); - assert_eq!(b4.hash(), finality_target(b4.hash(), Some(3))); - assert_eq!(c3.hash(), finality_target(c3.hash(), Some(3))); - assert_eq!(d2.hash(), finality_target(d2.hash(), Some(3))); + assert_eq!(error_hash, finality_target(a4.hash(), Some(3))); + assert_eq!(error_hash, finality_target(a5.hash(), Some(3))); + assert_eq!(error_hash, finality_target(b2.hash(), Some(3))); + assert_eq!(error_hash, finality_target(b3.hash(), Some(3))); + assert_eq!(error_hash, finality_target(b4.hash(), Some(3))); + assert_eq!(error_hash, finality_target(c3.hash(), Some(3))); + assert_eq!(error_hash, finality_target(d2.hash(), Some(3))); // search only blocks with number <= 2 assert_eq!(a2.hash(), finality_target(genesis_hash, Some(2))); assert_eq!(a2.hash(), finality_target(a1.hash(), Some(2))); assert_eq!(a2.hash(), finality_target(a2.hash(), Some(2))); - assert_eq!(a3.hash(), finality_target(a3.hash(), Some(2))); - assert_eq!(a4.hash(), finality_target(a4.hash(), Some(2))); - assert_eq!(a5.hash(), finality_target(a5.hash(), Some(2))); - assert_eq!(b2.hash(), finality_target(b2.hash(), Some(2))); - assert_eq!(b3.hash(), finality_target(b3.hash(), Some(2))); - assert_eq!(b4.hash(), finality_target(b4.hash(), Some(2))); - assert_eq!(c3.hash(), finality_target(c3.hash(), Some(2))); - assert_eq!(d2.hash(), finality_target(d2.hash(), Some(2))); + assert_eq!(error_hash, finality_target(a3.hash(), Some(2))); + assert_eq!(error_hash, finality_target(a4.hash(), Some(2))); + assert_eq!(error_hash, finality_target(a5.hash(), Some(2))); + assert_eq!(error_hash, finality_target(b2.hash(), Some(2))); + assert_eq!(error_hash, finality_target(b3.hash(), Some(2))); + assert_eq!(error_hash, finality_target(b4.hash(), Some(2))); + assert_eq!(error_hash, finality_target(c3.hash(), Some(2))); + assert_eq!(error_hash, finality_target(d2.hash(), Some(2))); // search only blocks with number <= 1 assert_eq!(a1.hash(), finality_target(genesis_hash, Some(1))); assert_eq!(a1.hash(), finality_target(a1.hash(), Some(1))); - assert_eq!(a2.hash(), finality_target(a2.hash(), Some(1))); - assert_eq!(a3.hash(), finality_target(a3.hash(), Some(1))); - assert_eq!(a4.hash(), finality_target(a4.hash(), Some(1))); - assert_eq!(a5.hash(), finality_target(a5.hash(), Some(1))); - - assert_eq!(b2.hash(), finality_target(b2.hash(), Some(1))); - assert_eq!(b3.hash(), finality_target(b3.hash(), Some(1))); - assert_eq!(b4.hash(), finality_target(b4.hash(), Some(1))); - assert_eq!(c3.hash(), finality_target(c3.hash(), Some(1))); - assert_eq!(d2.hash(), finality_target(d2.hash(), Some(1))); + assert_eq!(error_hash, finality_target(a2.hash(), Some(1))); + assert_eq!(error_hash, finality_target(a3.hash(), Some(1))); + assert_eq!(error_hash, finality_target(a4.hash(), Some(1))); + assert_eq!(error_hash, finality_target(a5.hash(), Some(1))); + assert_eq!(error_hash, finality_target(b2.hash(), Some(1))); + assert_eq!(error_hash, finality_target(b3.hash(), Some(1))); + assert_eq!(error_hash, finality_target(b4.hash(), Some(1))); + assert_eq!(error_hash, finality_target(c3.hash(), Some(1))); + assert_eq!(error_hash, finality_target(d2.hash(), Some(1))); // search only blocks with number <= 0 assert_eq!(genesis_hash, finality_target(genesis_hash, Some(0))); - assert_eq!(a1.hash(), finality_target(a1.hash(), Some(0))); - assert_eq!(a2.hash(), finality_target(a2.hash(), Some(0))); - assert_eq!(a3.hash(), finality_target(a3.hash(), Some(0))); - assert_eq!(a4.hash(), finality_target(a4.hash(), Some(0))); - assert_eq!(a5.hash(), finality_target(a5.hash(), Some(0))); - assert_eq!(b2.hash(), finality_target(b2.hash(), Some(0))); - assert_eq!(b3.hash(), finality_target(b3.hash(), Some(0))); - assert_eq!(b4.hash(), finality_target(b4.hash(), Some(0))); - assert_eq!(c3.hash(), finality_target(c3.hash(), Some(0))); - assert_eq!(d2.hash(), finality_target(d2.hash(), Some(0))); + assert_eq!(error_hash, finality_target(a1.hash(), Some(0))); + assert_eq!(error_hash, finality_target(a2.hash(), Some(0))); + assert_eq!(error_hash, finality_target(a3.hash(), Some(0))); + assert_eq!(error_hash, finality_target(a4.hash(), Some(0))); + assert_eq!(error_hash, finality_target(a5.hash(), Some(0))); + assert_eq!(error_hash, finality_target(b2.hash(), Some(0))); + assert_eq!(error_hash, finality_target(b3.hash(), Some(0))); + assert_eq!(error_hash, finality_target(b4.hash(), Some(0))); + assert_eq!(error_hash, finality_target(c3.hash(), Some(0))); + assert_eq!(error_hash, finality_target(d2.hash(), Some(0))); } #[test] -fn best_containing_on_longest_chain_with_max_depth_higher_than_best() { +fn finality_target_on_longest_chain_with_max_depth_higher_than_best() { // block tree: // G -> A1 -> A2 - let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); + let (mut client, chain_select) = TestClientBuilder::new().build_with_longest_chain(); // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; @@ -845,10 +849,93 @@ fn best_containing_on_longest_chain_with_max_depth_higher_than_best() { let genesis_hash = client.chain_info().genesis_hash; - assert_eq!( - a2.hash(), - block_on(longest_chain_select.finality_target(genesis_hash, Some(10))).unwrap(), - ); + assert_eq!(a2.hash(), block_on(chain_select.finality_target(genesis_hash, Some(10))).unwrap(),); +} + +#[test] +fn finality_target_with_best_not_on_longest_chain() { + // block tree: + // G -> A1 -> A2 -> A3 -> A4 -> A5 + // -> B2 -> (B3) -> B4 + // ^best + + let (mut client, chain_select) = TestClientBuilder::new().build_with_longest_chain(); + let genesis_hash = client.chain_info().genesis_hash; + + // G -> A1 + let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); + + // A1 -> A2 + let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); + + // A2 -> A3 + let a3 = client.new_block(Default::default()).unwrap().build().unwrap().block; + block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); + + // A3 -> A4 + let a4 = client.new_block(Default::default()).unwrap().build().unwrap().block; + block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); + + // A3 -> A5 + let a5 = client.new_block(Default::default()).unwrap().build().unwrap().block; + block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); + + // A1 -> B2 + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); + // this push is required as otherwise B2 has the same hash as A2 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); + let b2 = builder.build().unwrap().block; + block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); + + assert_eq!(a5.hash(), block_on(chain_select.finality_target(genesis_hash, None)).unwrap()); + assert_eq!(a5.hash(), block_on(chain_select.finality_target(a1.hash(), None)).unwrap()); + assert_eq!(a5.hash(), block_on(chain_select.finality_target(a2.hash(), None)).unwrap()); + assert_eq!(a5.hash(), block_on(chain_select.finality_target(a3.hash(), None)).unwrap()); + assert_eq!(a5.hash(), block_on(chain_select.finality_target(a4.hash(), None)).unwrap()); + assert_eq!(a5.hash(), block_on(chain_select.finality_target(a5.hash(), None)).unwrap()); + + // B2 -> B3 + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + block_on(client.import_as_best(BlockOrigin::Own, b3.clone())).unwrap(); + + // B3 -> B4 + let b4 = client + .new_block_at(&BlockId::Hash(b3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + let (header, extrinsics) = b4.clone().deconstruct(); + let mut import_params = BlockImportParams::new(BlockOrigin::Own, header); + import_params.body = Some(extrinsics); + import_params.fork_choice = Some(ForkChoiceStrategy::Custom(false)); + block_on(client.import_block(import_params, Default::default())).unwrap(); + + // double check that B3 is still the best... + assert_eq!(client.info().best_hash, b3.hash()); + + assert_eq!(b4.hash(), block_on(chain_select.finality_target(genesis_hash, None)).unwrap()); + assert_eq!(b4.hash(), block_on(chain_select.finality_target(a1.hash(), None)).unwrap()); + assert!(block_on(chain_select.finality_target(a2.hash(), None)).is_err()); + assert_eq!(b4.hash(), block_on(chain_select.finality_target(b2.hash(), None)).unwrap()); + assert_eq!(b4.hash(), block_on(chain_select.finality_target(b3.hash(), None)).unwrap()); + assert_eq!(b4.hash(), block_on(chain_select.finality_target(b4.hash(), None)).unwrap()); } #[test] @@ -995,7 +1082,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { .block; block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); - // A2 is the current best since it's the longest chain + // A2 is the current best since it's the (first) longest chain assert_eq!(client.chain_info().best_hash, a2.hash()); // we finalize block B1 which is on a different branch from current best @@ -1012,8 +1099,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { // `SelectChain` should report B2 as best block though assert_eq!(block_on(select_chain.best_chain()).unwrap().hash(), b2.hash()); - // after we build B3 on top of B2 and import it - // it should be the new best block, + // after we build B3 on top of B2 and import it, it should be the new best block let b3 = client .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) .unwrap() @@ -1022,6 +1108,9 @@ fn finalizing_diverged_block_should_trigger_reorg() { .block; block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); + // `SelectChain` should report B3 as best block though + assert_eq!(block_on(select_chain.best_chain()).unwrap().hash(), b3.hash()); + assert_eq!(client.chain_info().best_hash, b3.hash()); ClientExt::finalize_block(&client, b3.hash(), None).unwrap(); diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index ec9c8ac0d5780..7339d4c1a6804 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -183,96 +183,43 @@ pub trait Backend: /// Return hashes of all blocks that are children of the block with `parent_hash`. fn children(&self, parent_hash: Block::Hash) -> Result>; - /// Get the most recent block hash of the best (longest) chains - /// that contain block with the given `target_hash`. + /// Get the most recent block hash of the longest chain that contains + /// a block with the given `base_hash`. /// /// The search space is always limited to blocks which are in the finalized /// chain or descendents of it. /// - /// If `maybe_max_block_number` is `Some(max_block_number)` - /// the search is limited to block `numbers <= max_block_number`. - /// in other words as if there were no blocks greater `max_block_number`. - /// Returns `Ok(None)` if `target_hash` is not found in search space. - /// TODO: document time complexity of this, see [#1444](https://github.com/paritytech/substrate/issues/1444) - fn best_containing( + /// Returns `Ok(None)` if `base_hash` is not found in search space. + // TODO: document time complexity of this, see [#1444](https://github.com/paritytech/substrate/issues/1444) + fn longest_containing( &self, - target_hash: Block::Hash, - maybe_max_number: Option>, + base_hash: Block::Hash, import_lock: &RwLock<()>, ) -> Result> { - let target_header = { - match self.header(target_hash)? { - Some(x) => x, - // target not in blockchain - None => return Ok(None), - } + let Some(base_header) = self.header(base_hash)? else { + return Ok(None) }; - if let Some(max_number) = maybe_max_number { - // target outside search range - if target_header.number() > &max_number { - return Ok(None) - } - } - let leaves = { // ensure no blocks are imported during this code block. // an import could trigger a reorg which could change the canonical chain. // we depend on the canonical chain staying the same during this code block. let _import_guard = import_lock.read(); - let info = self.info(); - - // this can be `None` if the best chain is shorter than the target header. - let maybe_canon_hash = self.hash(*target_header.number())?; - - if maybe_canon_hash.as_ref() == Some(&target_hash) { - // if a `max_number` is given we try to fetch the block at the - // given depth, if it doesn't exist or `max_number` is not - // provided, we continue to search from all leaves below. - if let Some(max_number) = maybe_max_number { - if let Some(header) = self.hash(max_number)? { - return Ok(Some(header)) - } - } - } else if info.finalized_number >= *target_header.number() { - // header is on a dead fork. + if info.finalized_number > *base_header.number() { + // `base_header` is on a dead fork. return Ok(None) } - self.leaves()? }; // for each chain. longest chain first. shortest last for leaf_hash in leaves { - // start at the leaf let mut current_hash = leaf_hash; - - // if search is not restricted then the leaf is the best - let mut best_hash = leaf_hash; - - // go backwards entering the search space - // waiting until we are <= max_number - if let Some(max_number) = maybe_max_number { - loop { - let current_header = self - .header(current_hash)? - .ok_or_else(|| Error::MissingHeader(current_hash.to_string()))?; - - if current_header.number() <= &max_number { - best_hash = current_header.hash(); - break - } - - current_hash = *current_header.parent_hash(); - } - } - // go backwards through the chain (via parent links) loop { - // until we find target - if current_hash == target_hash { - return Ok(Some(best_hash)) + if current_hash == base_hash { + return Ok(Some(leaf_hash)) } let current_header = self @@ -280,7 +227,7 @@ pub trait Backend: .ok_or_else(|| Error::MissingHeader(current_hash.to_string()))?; // stop search in this chain once we go below the target's block number - if current_header.number() < target_header.number() { + if current_header.number() < base_header.number() { break } @@ -293,9 +240,8 @@ pub trait Backend: // // FIXME #1558 only issue this warning when not on a dead fork warn!( - "Block {:?} exists in chain but not found when following all \ - leaves backwards. Number limit = {:?}", - target_hash, maybe_max_number, + "Block {:?} exists in chain but not found when following all leaves backwards", + base_hash, ); Ok(None) diff --git a/primitives/consensus/common/src/select_chain.rs b/primitives/consensus/common/src/select_chain.rs index f366cd34c51ea..5beab6705046d 100644 --- a/primitives/consensus/common/src/select_chain.rs +++ b/primitives/consensus/common/src/select_chain.rs @@ -43,14 +43,14 @@ pub trait SelectChain: Sync + Send + Clone { /// finalize. async fn best_chain(&self) -> Result<::Header, Error>; - /// Get the best descendent of `target_hash` that we should attempt to - /// finalize next, if any. It is valid to return the given `target_hash` + /// Get the best descendent of `base_hash` that we should attempt to + /// finalize next, if any. It is valid to return the given `base_hash` /// itself if no better descendent exists. async fn finality_target( &self, - target_hash: ::Hash, + base_hash: ::Hash, _maybe_max_number: Option>, ) -> Result<::Hash, Error> { - Ok(target_hash) + Ok(base_hash) } } From 87190e9b88e8bb58be1cacc261a60e5ddb8cafdd Mon Sep 17 00:00:00 2001 From: girazoki Date: Sun, 12 Feb 2023 11:43:03 +0100 Subject: [PATCH 13/24] SetMembers configurable origin (#13159) * SetMembers configurable origin * root origin comment replaced * fmt --- bin/node/runtime/src/lib.rs | 3 +++ frame/alliance/src/mock.rs | 1 + frame/collective/src/lib.rs | 7 +++++-- frame/collective/src/tests.rs | 5 ++++- frame/utility/src/tests.rs | 1 + 5 files changed, 14 insertions(+), 3 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index c1bc984b7b85a..d862037ad384e 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -993,6 +993,7 @@ impl pallet_collective::Config for Runtime { type MaxMembers = CouncilMaxMembers; type DefaultVote = pallet_collective::PrimeDefaultVote; type WeightInfo = pallet_collective::weights::SubstrateWeight; + type SetMembersOrigin = EnsureRoot; } parameter_types! { @@ -1057,6 +1058,7 @@ impl pallet_collective::Config for Runtime { type MaxMembers = TechnicalMaxMembers; type DefaultVote = pallet_collective::PrimeDefaultVote; type WeightInfo = pallet_collective::weights::SubstrateWeight; + type SetMembersOrigin = EnsureRoot; } type EnsureRootOrHalfCouncil = EitherOfDiverse< @@ -1658,6 +1660,7 @@ impl pallet_collective::Config for Runtime { type MaxMembers = AllianceMaxMembers; type DefaultVote = pallet_collective::PrimeDefaultVote; type WeightInfo = pallet_collective::weights::SubstrateWeight; + type SetMembersOrigin = EnsureRoot; } parameter_types! { diff --git a/frame/alliance/src/mock.rs b/frame/alliance/src/mock.rs index e708d29d529fe..0f774dc4853fa 100644 --- a/frame/alliance/src/mock.rs +++ b/frame/alliance/src/mock.rs @@ -104,6 +104,7 @@ impl pallet_collective::Config for Test { type MaxMembers = MaxMembers; type DefaultVote = pallet_collective::PrimeDefaultVote; type WeightInfo = (); + type SetMembersOrigin = EnsureRoot; } parameter_types! { diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index 0fe05dbb01ac0..7d625a69a4bf7 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -215,6 +215,9 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; + + /// Origin allowed to set collective members + type SetMembersOrigin: EnsureOrigin<::RuntimeOrigin>; } #[pallet::genesis_config] @@ -349,7 +352,7 @@ pub mod pallet { /// - `old_count`: The upper bound for the previous number of members in storage. Used for /// weight estimation. /// - /// Requires root origin. + /// The dispatch of this call must be `SetMembersOrigin`. /// /// NOTE: Does not enforce the expected `MaxMembers` limit on the amount of members, but /// the weight estimations rely on it to estimate dispatchable weight. @@ -389,7 +392,7 @@ pub mod pallet { prime: Option, old_count: MemberCount, ) -> DispatchResultWithPostInfo { - ensure_root(origin)?; + T::SetMembersOrigin::ensure_origin(origin)?; if new_members.len() > T::MaxMembers::get() as usize { log::error!( target: LOG_TARGET, diff --git a/frame/collective/src/tests.rs b/frame/collective/src/tests.rs index 5c90de9f91bf7..b7cdeb3383375 100644 --- a/frame/collective/src/tests.rs +++ b/frame/collective/src/tests.rs @@ -24,7 +24,7 @@ use frame_support::{ traits::{ConstU32, ConstU64, GenesisBuild, StorageVersion}, Hashable, }; -use frame_system::{EventRecord, Phase}; +use frame_system::{EnsureRoot, EventRecord, Phase}; use sp_core::H256; use sp_runtime::{ testing::Header, @@ -127,6 +127,7 @@ impl Config for Test { type MaxMembers = MaxMembers; type DefaultVote = PrimeDefaultVote; type WeightInfo = (); + type SetMembersOrigin = EnsureRoot; } impl Config for Test { type RuntimeOrigin = RuntimeOrigin; @@ -137,6 +138,7 @@ impl Config for Test { type MaxMembers = MaxMembers; type DefaultVote = MoreThanMajorityThenPrimeDefaultVote; type WeightInfo = (); + type SetMembersOrigin = EnsureRoot; } impl mock_democracy::Config for Test { type RuntimeEvent = RuntimeEvent; @@ -151,6 +153,7 @@ impl Config for Test { type MaxMembers = MaxMembers; type DefaultVote = PrimeDefaultVote; type WeightInfo = (); + type SetMembersOrigin = EnsureRoot; } pub fn new_test_ext() -> sp_io::TestExternalities { diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index f9d6a16c1a0d4..c63ed24c6aafb 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -217,6 +217,7 @@ impl pallet_collective::Config for Test { type MaxMembers = MaxMembers; type DefaultVote = pallet_collective::PrimeDefaultVote; type WeightInfo = (); + type SetMembersOrigin = frame_system::EnsureRoot; } impl example::Config for Test {} From 3b282df86c3776bad8de3e769e1861446d7498e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Sun, 12 Feb 2023 12:35:01 +0000 Subject: [PATCH 14/24] grandpa: don't error if best block and finality target are inconsistent (#13364) * grandpa: don't error if best block and finality target are inconsistent * grandpa: add test for best block override * grandpa: make clippy happy * grandpa: log selectchain override as debug instead of warn --- client/finality-grandpa/src/environment.rs | 35 ++++- client/finality-grandpa/src/tests.rs | 144 +++++++++++++++++++-- 2 files changed, 166 insertions(+), 13 deletions(-) diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index 40ec720c4e7fe..a9aa204f12e2d 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -1213,7 +1213,7 @@ where // NOTE: this is purposefully done after `finality_target` to prevent a case // where in-between these two requests there is a block import and // `finality_target` returns something higher than `best_chain`. - let best_header = match select_chain.best_chain().await { + let mut best_header = match select_chain.best_chain().await { Ok(best_header) => best_header, Err(err) => { warn!( @@ -1227,12 +1227,30 @@ where }, }; - if target_header.number() > best_header.number() { - return Err(Error::Safety( - "SelectChain returned a finality target higher than its best block".into(), - )) + let is_descendent_of = is_descendent_of(&*client, None); + + if target_header.number() > best_header.number() || + target_header.number() == best_header.number() && + target_header.hash() != best_header.hash() || + !is_descendent_of(&target_header.hash(), &best_header.hash())? + { + debug!( + target: LOG_TARGET, + "SelectChain returned a finality target inconsistent with its best block. Restricting best block to target block" + ); + + best_header = target_header.clone(); } + debug!( + target: LOG_TARGET, + "SelectChain: finality target: #{} ({}), best block: #{} ({})", + target_header.number(), + target_header.hash(), + best_header.number(), + best_header.hash(), + ); + // check if our vote is currently being limited due to a pending change, // in which case we will restrict our target header to the given limit if let Some(target_number) = limit.filter(|limit| limit < target_header.number()) { @@ -1254,6 +1272,13 @@ where .header(*target_header.parent_hash())? .expect("Header known to exist after `finality_target` call; qed"); } + + debug!( + target: LOG_TARGET, + "Finality target restricted to #{} ({}) due to pending authority set change", + target_header.number(), + target_header.hash() + ) } // restrict vote according to the given voting rule, if the voting rule diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 62ef4709e1909..d132abd940ba3 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -244,6 +244,35 @@ impl SelectChain for MockSelectChain { } } +// A mock voting rule that allows asserting an expected value for best block +#[derive(Clone, Default)] +struct AssertBestBlock(Arc>>); + +impl VotingRule for AssertBestBlock +where + B: HeaderBackend, +{ + fn restrict_vote( + &self, + _backend: Arc, + _base: &::Header, + best_target: &::Header, + _current_target: &::Header, + ) -> VotingRuleResult { + if let Some(expected) = *self.0.lock() { + assert_eq!(best_target.hash(), expected); + } + + Box::pin(std::future::ready(None)) + } +} + +impl AssertBestBlock { + fn set_expected_best_block(&self, hash: Hash) { + *self.0.lock() = Some(hash); + } +} + const TEST_GOSSIP_DURATION: Duration = Duration::from_millis(500); fn make_ids(keys: &[Ed25519Keyring]) -> AuthorityList { @@ -1566,16 +1595,115 @@ async fn grandpa_environment_passes_actual_best_block_to_voting_rules() { .1, 10, ); +} - // returning a finality target that's higher than the best block is an - // inconsistent state that should be handled - let hashof42 = client.expect_block_hash_from_id(&BlockId::Number(42)).unwrap(); - select_chain.set_best_chain(client.expect_header(hashof21).unwrap()); - select_chain.set_finality_target(client.expect_header(hashof42).unwrap().hash()); +#[tokio::test] +async fn grandpa_environment_checks_if_best_block_is_descendent_of_finality_target() { + use finality_grandpa::voter::Environment; + + let peers = &[Ed25519Keyring::Alice]; + let voters = make_ids(peers); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0); + let peer = net.peer(0); + let network_service = peer.network_service().clone(); + let link = peer.data.lock().take().unwrap(); + let client = peer.client().as_client().clone(); + let select_chain = MockSelectChain::default(); + let voting_rule = AssertBestBlock::default(); + let env = test_environment_with_select_chain( + &link, + None, + network_service.clone(), + select_chain.clone(), + voting_rule.clone(), + ); + + // create a chain that is 10 blocks long + peer.push_blocks(10, false); + + let hashof5_a = client.expect_block_hash_from_id(&BlockId::Number(5)).unwrap(); + let hashof10_a = client.expect_block_hash_from_id(&BlockId::Number(10)).unwrap(); + + // create a fork starting at block 4 that is 6 blocks long + let fork = peer.generate_blocks_at( + BlockId::Number(4), + 6, + BlockOrigin::File, + |builder| { + let mut block = builder.build().unwrap().block; + block.header.digest_mut().push(DigestItem::Other(vec![1])); + block + }, + false, + false, + true, + ForkChoiceStrategy::LongestChain, + ); + + let hashof5_b = *fork.first().unwrap(); + let hashof10_b = *fork.last().unwrap(); + + // returning a finality target that's higher than the best block is inconsistent, + // therefore the best block should be set to be the same block as the target + select_chain.set_best_chain(client.expect_header(hashof5_a).unwrap()); + select_chain.set_finality_target(client.expect_header(hashof10_a).unwrap().hash()); + voting_rule.set_expected_best_block(hashof10_a); + + // the voting rule will internally assert that the best block that was passed was `hashof10_a`, + // instead of the one returned by `SelectChain` + assert_eq!( + env.best_chain_containing(peer.client().info().finalized_hash) + .await + .unwrap() + .unwrap() + .0, + hashof10_a, + ); + + // best block and finality target are blocks at the same height but on different forks, + // we should override the initial best block (#5B) with the target block (#5A) + select_chain.set_best_chain(client.expect_header(hashof5_b).unwrap()); + select_chain.set_finality_target(client.expect_header(hashof5_a).unwrap().hash()); + voting_rule.set_expected_best_block(hashof5_a); + + assert_eq!( + env.best_chain_containing(peer.client().info().finalized_hash) + .await + .unwrap() + .unwrap() + .0, + hashof5_a, + ); - assert_matches!( - env.best_chain_containing(peer.client().info().finalized_hash).await, - Err(CommandOrError::Error(Error::Safety(_))) + // best block is higher than finality target but it's on a different fork, + // we should override the initial best block (#5A) with the target block (#5B) + select_chain.set_best_chain(client.expect_header(hashof10_b).unwrap()); + select_chain.set_finality_target(client.expect_header(hashof5_a).unwrap().hash()); + voting_rule.set_expected_best_block(hashof5_a); + + assert_eq!( + env.best_chain_containing(peer.client().info().finalized_hash) + .await + .unwrap() + .unwrap() + .0, + hashof5_a, + ); + + // best block is higher than finality target and it's on the same fork, + // the best block passed to the voting rule should not be overriden + select_chain.set_best_chain(client.expect_header(hashof10_a).unwrap()); + select_chain.set_finality_target(client.expect_header(hashof5_a).unwrap().hash()); + voting_rule.set_expected_best_block(hashof10_a); + + assert_eq!( + env.best_chain_containing(peer.client().info().finalized_hash) + .await + .unwrap() + .unwrap() + .0, + hashof5_a, ); } From 4d83204567f8c7ba3a86802f3736b48a35b8f25e Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Sun, 12 Feb 2023 12:01:06 -0300 Subject: [PATCH 15/24] Improve Weight Template and API (#13355) * improve weights template and api * follow template --- .maintain/frame-weight-template.hbs | 10 ++----- primitives/weights/src/weight_v2.rs | 26 ++++++++++++++----- .../benchmarking-cli/src/pallet/template.hbs | 5 +--- 3 files changed, 23 insertions(+), 18 deletions(-) diff --git a/.maintain/frame-weight-template.hbs b/.maintain/frame-weight-template.hbs index ff69fcb4a2d3d..186e3cb23b333 100644 --- a/.maintain/frame-weight-template.hbs +++ b/.maintain/frame-weight-template.hbs @@ -53,11 +53,8 @@ impl WeightInfo for SubstrateWeight { // Measured: `{{benchmark.base_recorded_proof_size}}{{#each benchmark.component_recorded_proof_size as |cp|}} + {{cp.name}} * ({{cp.slope}} ±{{underscore cp.error}}){{/each}}` // Estimated: `{{benchmark.base_calculated_proof_size}}{{#each benchmark.component_calculated_proof_size as |cp|}} + {{cp.name}} * ({{cp.slope}} ±{{underscore cp.error}}){{/each}}` // Minimum execution time: {{underscore benchmark.min_execution_time}} nanoseconds. - {{#if (ne benchmark.base_calculated_proof_size "0")}} - Weight::from_parts({{underscore benchmark.base_weight}}, {{benchmark.base_calculated_proof_size}}) - {{else}} Weight::from_ref_time({{underscore benchmark.base_weight}}) - {{/if}} + .saturating_add(Weight::from_proof_size({{benchmark.base_calculated_proof_size}})) {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} .saturating_add(Weight::from_ref_time({{underscore cw.slope}}).saturating_mul({{cw.name}}.into())) @@ -99,11 +96,8 @@ impl WeightInfo for () { // Measured: `{{benchmark.base_recorded_proof_size}}{{#each benchmark.component_recorded_proof_size as |cp|}} + {{cp.name}} * ({{cp.slope}} ±{{underscore cp.error}}){{/each}}` // Estimated: `{{benchmark.base_calculated_proof_size}}{{#each benchmark.component_calculated_proof_size as |cp|}} + {{cp.name}} * ({{cp.slope}} ±{{underscore cp.error}}){{/each}}` // Minimum execution time: {{underscore benchmark.min_execution_time}} nanoseconds. - {{#if (ne benchmark.base_calculated_proof_size "0")}} - Weight::from_parts({{underscore benchmark.base_weight}}, {{benchmark.base_calculated_proof_size}}) - {{else}} Weight::from_ref_time({{underscore benchmark.base_weight}}) - {{/if}} + .saturating_add(Weight::from_proof_size({{benchmark.base_calculated_proof_size}})) {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} .saturating_add(Weight::from_ref_time({{underscore cw.slope}}).saturating_mul({{cw.name}}.into())) diff --git a/primitives/weights/src/weight_v2.rs b/primitives/weights/src/weight_v2.rs index 33e61b6954a37..3a15b117955f2 100644 --- a/primitives/weights/src/weight_v2.rs +++ b/primitives/weights/src/weight_v2.rs @@ -247,18 +247,32 @@ impl Weight { Self { ref_time: 0, proof_size: 0 } } - /// Constant version of Add with u64. + /// Constant version of Add for `ref_time` component with u64. /// /// Is only overflow safe when evaluated at compile-time. - pub const fn add(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time + scalar, proof_size: self.proof_size + scalar } + pub const fn add_ref_time(self, scalar: u64) -> Self { + Self { ref_time: self.ref_time + scalar, proof_size: self.proof_size } } - /// Constant version of Sub with u64. + /// Constant version of Add for `proof_size` component with u64. /// /// Is only overflow safe when evaluated at compile-time. - pub const fn sub(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time - scalar, proof_size: self.proof_size - scalar } + pub const fn add_proof_size(self, scalar: u64) -> Self { + Self { ref_time: self.ref_time, proof_size: self.proof_size + scalar } + } + + /// Constant version of Sub for `ref_time` component with u64. + /// + /// Is only overflow safe when evaluated at compile-time. + pub const fn sub_ref_time(self, scalar: u64) -> Self { + Self { ref_time: self.ref_time - scalar, proof_size: self.proof_size } + } + + /// Constant version of Sub for `proof_size` component with u64. + /// + /// Is only overflow safe when evaluated at compile-time. + pub const fn sub_proof_size(self, scalar: u64) -> Self { + Self { ref_time: self.ref_time, proof_size: self.proof_size - scalar } } /// Constant version of Div with u64. diff --git a/utils/frame/benchmarking-cli/src/pallet/template.hbs b/utils/frame/benchmarking-cli/src/pallet/template.hbs index 00dd1a3532d95..0bec34eaae774 100644 --- a/utils/frame/benchmarking-cli/src/pallet/template.hbs +++ b/utils/frame/benchmarking-cli/src/pallet/template.hbs @@ -38,11 +38,8 @@ impl {{pallet}}::WeightInfo for WeightInfo { // Measured: `{{benchmark.base_recorded_proof_size}}{{#each benchmark.component_recorded_proof_size as |cp|}} + {{cp.name}} * ({{cp.slope}} ±{{underscore cp.error}}){{/each}}` // Estimated: `{{benchmark.base_calculated_proof_size}}{{#each benchmark.component_calculated_proof_size as |cp|}} + {{cp.name}} * ({{cp.slope}} ±{{underscore cp.error}}){{/each}}` // Minimum execution time: {{underscore benchmark.min_execution_time}} nanoseconds. - {{#if (ne benchmark.base_calculated_proof_size "0")}} - Weight::from_parts({{underscore benchmark.base_weight}}, {{benchmark.base_calculated_proof_size}}) - {{else}} Weight::from_ref_time({{underscore benchmark.base_weight}}) - {{/if}} + .saturating_add(Weight::from_proof_size({{benchmark.base_calculated_proof_size}})) {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} .saturating_add(Weight::from_ref_time({{underscore cw.slope}}).saturating_mul({{cw.name}}.into())) From 53e059af844394df1bf40c8e7a6ddc704386cda3 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Mon, 13 Feb 2023 10:54:47 +0100 Subject: [PATCH 16/24] [ci] Change label checker (#13360) * [ci] Change label checker * rm pr autolabel * fix specs file name to substrate --- .github/workflows/auto-label-prs.yml | 21 ------------ .github/workflows/check-D-labels.yml | 48 ++++++++++++++++++++++++++++ .github/workflows/check-labels.yml | 38 ++++++++++++++++++---- 3 files changed, 79 insertions(+), 28 deletions(-) delete mode 100644 .github/workflows/auto-label-prs.yml create mode 100644 .github/workflows/check-D-labels.yml diff --git a/.github/workflows/auto-label-prs.yml b/.github/workflows/auto-label-prs.yml deleted file mode 100644 index 50539b80b98b7..0000000000000 --- a/.github/workflows/auto-label-prs.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: Label PRs -on: - pull_request: - types: [opened,ready_for_review] - -jobs: - label-new-prs: - runs-on: ubuntu-latest - steps: - - name: Label drafts - uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90 # 1.0.4 - if: github.event.pull_request.draft == true - with: - add-labels: 'A3-inprogress' - remove-labels: 'A0-pleasereview' - - name: Label PRs - uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90 # 1.0.4 - if: github.event.pull_request.draft == false && ! contains(github.event.pull_request.labels.*.name, 'A2-insubstantial') - with: - add-labels: 'A0-pleasereview' - remove-labels: 'A3-inprogress' diff --git a/.github/workflows/check-D-labels.yml b/.github/workflows/check-D-labels.yml new file mode 100644 index 0000000000000..7bb358ce1182e --- /dev/null +++ b/.github/workflows/check-D-labels.yml @@ -0,0 +1,48 @@ +name: Check D labels + +on: + pull_request: + types: [labeled, opened, synchronize, unlabeled] + paths: + - frame/** + - primitives/** + +env: + IMAGE: paritytech/ruled_labels:0.4.0 + +jobs: + check-labels: + runs-on: ubuntu-latest + steps: + - name: Pull image + run: docker pull $IMAGE + + - name: Check labels + env: + MOUNT: /work + GITHUB_PR: ${{ github.event.pull_request.number }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + API_BASE: https://api.github.com/repos + REPO: ${{ github.repository }} + RULES_PATH: labels/ruled_labels + CHECK_SPECS: specs_substrate.yaml + run: | + echo "REPO: ${REPO}" + echo "GITHUB_PR: ${GITHUB_PR}" + # Clone repo with labels specs + git clone https://github.com/paritytech/labels + # Fetch the labels for the PR under test + labels=$( curl -H "Authorization: token ${GITHUB_TOKEN}" -s "$API_BASE/${REPO}/pulls/${GITHUB_PR}" | jq '.labels | .[] | .name' | tr "\n" ",") + + if [ -z "${labels}" ]; then + docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --tags audit --no-label + fi + + labels_args=${labels: :-1} + printf "Checking labels: %s\n" "${labels_args}" + + # Prevent the shell from splitting labels with spaces + IFS="," + + # --dev is more useful to debug mode to debug + docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --labels ${labels_args} --dev --tags audit diff --git a/.github/workflows/check-labels.yml b/.github/workflows/check-labels.yml index de204ce9d3776..55b8f7389fa7f 100644 --- a/.github/workflows/check-labels.yml +++ b/.github/workflows/check-labels.yml @@ -4,18 +4,42 @@ on: pull_request: types: [labeled, opened, synchronize, unlabeled] +env: + IMAGE: paritytech/ruled_labels:0.4.0 + jobs: check-labels: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 - ref: ${{ github.event.pull_request.head.ref }} - repository: ${{ github.event.pull_request.head.repo.full_name }} + - name: Pull image + run: docker pull $IMAGE + - name: Check labels - run: bash ${{ github.workspace }}/scripts/ci/github/check_labels.sh env: + MOUNT: /work GITHUB_PR: ${{ github.event.pull_request.number }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - HEAD_SHA: ${{ github.event.pull_request.head.sha }} + API_BASE: https://api.github.com/repos + REPO: ${{ github.repository }} + RULES_PATH: labels/ruled_labels + CHECK_SPECS: specs_substrate.yaml + run: | + echo "REPO: ${REPO}" + echo "GITHUB_PR: ${GITHUB_PR}" + # Clone repo with labels specs + git clone https://github.com/paritytech/labels + # Fetch the labels for the PR under test + labels=$( curl -H "Authorization: token ${GITHUB_TOKEN}" -s "$API_BASE/${REPO}/pulls/${GITHUB_PR}" | jq '.labels | .[] | .name' | tr "\n" ",") + + if [ -z "${labels}" ]; then + docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --tags PR --no-label + fi + + labels_args=${labels: :-1} + printf "Checking labels: %s\n" "${labels_args}" + + # Prevent the shell from splitting labels with spaces + IFS="," + + # --dev is more useful to debug mode to debug + docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --labels ${labels_args} --dev --tags PR From c92e2b6fa3d7c7f6dc139ad7362ca38964206ad6 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Mon, 13 Feb 2023 12:21:20 +0200 Subject: [PATCH 17/24] client/beefy: request justifs from peers further in consensus (#13343) For on-demand justifications, peer selection is based on witnessed gossip votes. This commit changes the condition for selecting a peer to request justification for `block` from "last voted on >= `block`" to "peer last voted on strict > `block`". When allowing `>=` we see nodes continuously spamming unsuccessful on-demand requests to nodes which are still voting on a block without having a justification available. One way to fix the spam would be to add some rate-limiting or backoff period when requesting justifications. The other solution (present in this commit) is to simply request justifications from peers that are voting on future blocks so we know they're _guaranteed_ to have the wanted mandatory justification available to send back. Signed-off-by: acatangiu --- client/beefy/src/communication/peers.rs | 32 +++++++++---------- .../outgoing_requests_engine.rs | 3 +- client/beefy/src/tests.rs | 12 +++---- client/beefy/src/worker.rs | 2 +- 4 files changed, 24 insertions(+), 25 deletions(-) diff --git a/client/beefy/src/communication/peers.rs b/client/beefy/src/communication/peers.rs index d7927ea72e661..50158126a0a07 100644 --- a/client/beefy/src/communication/peers.rs +++ b/client/beefy/src/communication/peers.rs @@ -57,11 +57,11 @@ impl KnownPeers { self.live.remove(peer); } - /// Return _filtered and cloned_ list of peers that have voted on `block` or higher. - pub fn at_least_at_block(&self, block: NumberFor) -> VecDeque { + /// Return _filtered and cloned_ list of peers that have voted on higher than `block`. + pub fn further_than(&self, block: NumberFor) -> VecDeque { self.live .iter() - .filter_map(|(k, v)| (v.last_voted_on >= block).then_some(k)) + .filter_map(|(k, v)| (v.last_voted_on > block).then_some(k)) .cloned() .collect() } @@ -92,22 +92,22 @@ mod tests { assert!(peers.contains(&bob)); assert!(peers.contains(&charlie)); - // Get peers at block >= 5 - let at_5 = peers.at_least_at_block(5); + // Get peers at block > 4 + let further_than_4 = peers.further_than(4); // Should be Bob and Charlie - assert_eq!(at_5.len(), 2); - assert!(at_5.contains(&bob)); - assert!(at_5.contains(&charlie)); + assert_eq!(further_than_4.len(), 2); + assert!(further_than_4.contains(&bob)); + assert!(further_than_4.contains(&charlie)); // 'Tracked' Alice seen voting for 10. peers.note_vote_for(alice, 10); - // Get peers at block >= 9 - let at_9 = peers.at_least_at_block(9); + // Get peers at block > 9 + let further_than_9 = peers.further_than(9); // Should be Charlie and Alice - assert_eq!(at_9.len(), 2); - assert!(at_9.contains(&charlie)); - assert!(at_9.contains(&alice)); + assert_eq!(further_than_9.len(), 2); + assert!(further_than_9.contains(&charlie)); + assert!(further_than_9.contains(&alice)); // Remove Alice peers.remove(&alice); @@ -115,9 +115,9 @@ mod tests { assert!(!peers.contains(&alice)); // Get peers at block >= 9 - let at_9 = peers.at_least_at_block(9); + let further_than_9 = peers.further_than(9); // Now should be just Charlie - assert_eq!(at_9.len(), 1); - assert!(at_9.contains(&charlie)); + assert_eq!(further_than_9.len(), 1); + assert!(further_than_9.contains(&charlie)); } } diff --git a/client/beefy/src/communication/request_response/outgoing_requests_engine.rs b/client/beefy/src/communication/request_response/outgoing_requests_engine.rs index c7dc269b495cc..766480f781ff5 100644 --- a/client/beefy/src/communication/request_response/outgoing_requests_engine.rs +++ b/client/beefy/src/communication/request_response/outgoing_requests_engine.rs @@ -80,7 +80,7 @@ impl OnDemandJustificationsEngine { fn reset_peers_cache_for_block(&mut self, block: NumberFor) { // TODO (issue #12296): replace peer selection with generic one that involves all protocols. - self.peers_cache = self.live_peers.lock().at_least_at_block(block); + self.peers_cache = self.live_peers.lock().further_than(block); } fn try_next_peer(&mut self) -> Option { @@ -199,7 +199,6 @@ impl OnDemandJustificationsEngine { let (peer, req_info, resp) = match &mut self.state { State::Idle => { futures::pending!(); - // Doesn't happen as 'futures::pending!()' is an 'await' barrier that never passes. return None }, State::AwaitingResponse(peer, req_info, receiver) => { diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 69cc9b2f652c4..932897c775895 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -893,7 +893,7 @@ async fn on_demand_beefy_justification_sync() { [BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie, BeefyKeyring::Dave]; let validator_set = ValidatorSet::new(make_beefy_ids(&all_peers), 0).unwrap(); let session_len = 5; - let min_block_delta = 5; + let min_block_delta = 4; let mut net = BeefyTestNet::new(4); @@ -912,7 +912,7 @@ async fn on_demand_beefy_justification_sync() { let dave_index = 3; // push 30 blocks - let hashes = net.generate_blocks_and_sync(30, session_len, &validator_set, false).await; + let hashes = net.generate_blocks_and_sync(35, session_len, &validator_set, false).await; let fast_peers = fast_peers.into_iter().enumerate(); let net = Arc::new(Mutex::new(net)); @@ -921,7 +921,7 @@ async fn on_demand_beefy_justification_sync() { finalize_block_and_wait_for_beefy( &net, fast_peers.clone(), - &[hashes[1], hashes[6], hashes[10], hashes[17], hashes[24]], + &[hashes[1], hashes[6], hashes[10], hashes[17], hashes[23]], &[1, 5, 10, 15, 20], ) .await; @@ -941,12 +941,12 @@ async fn on_demand_beefy_justification_sync() { // freshly spun up Dave now needs to listen for gossip to figure out the state of their peers. // Have the other peers do some gossip so Dave finds out about their progress. - finalize_block_and_wait_for_beefy(&net, fast_peers, &[hashes[25]], &[25]).await; + finalize_block_and_wait_for_beefy(&net, fast_peers, &[hashes[25], hashes[29]], &[25, 29]).await; // Now verify Dave successfully finalized #1 (through on-demand justification request). wait_for_best_beefy_blocks(dave_best_blocks, &net, &[1]).await; - // Give Dave all tasks some cpu cycles to burn through their events queues, + // Give all tasks some cpu cycles to burn through their events queues, run_for(Duration::from_millis(100), &net).await; // then verify Dave catches up through on-demand justification requests. finalize_block_and_wait_for_beefy( @@ -959,7 +959,7 @@ async fn on_demand_beefy_justification_sync() { let all_peers = all_peers.into_iter().enumerate(); // Now that Dave has caught up, sanity check voting works for all of them. - finalize_block_and_wait_for_beefy(&net, all_peers, &[hashes[30]], &[30]).await; + finalize_block_and_wait_for_beefy(&net, all_peers, &[hashes[30], hashes[34]], &[30]).await; } #[tokio::test] diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index be5443d3dead8..f367c8b46d037 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -555,7 +555,6 @@ where let block_number = vote.commitment.block_number; match rounds.add_vote(vote) { VoteImportResult::RoundConcluded(signed_commitment) => { - self.gossip_validator.conclude_round(block_number); metric_set!(self, beefy_round_concluded, block_number); let finality_proof = VersionedFinalityProof::V1(signed_commitment); @@ -602,6 +601,7 @@ where // Finalize inner round and update voting_oracle state. self.persisted_state.voting_oracle.finalize(block_num)?; + self.gossip_validator.conclude_round(block_num); if block_num > self.best_beefy_block() { // Set new best BEEFY block number. From 483bbfb4dec4918ef12d2d2dd0c7eec9549be61d Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Mon, 13 Feb 2023 12:27:52 +0100 Subject: [PATCH 18/24] [ci] Change GHA to add J2 labels instead Z0 (#13375) --- .github/workflows/auto-label-issues.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/auto-label-issues.yml b/.github/workflows/auto-label-issues.yml index 2633bf55f0789..629966ed39618 100644 --- a/.github/workflows/auto-label-issues.yml +++ b/.github/workflows/auto-label-issues.yml @@ -14,4 +14,4 @@ jobs: uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90 # 1.0.4 if: github.event.issue.author_association == 'NONE' with: - add-labels: 'Z0-unconfirmed' + add-labels: "J2-unconfirmed" From a056cd4ef9eda0298bb45f49d8173c9bddc6de9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 13 Feb 2023 12:39:15 +0100 Subject: [PATCH 19/24] sc-client-db: Fix `PruningMode::ArchiveCanonical` (#13361) * sc-client-db: Fix `PruningMode::ArchiveCanonical` When running a node with `--state-pruning archive-canonical` it was directly failing on genesis. There was an issue in the state-db `pin` implementation. It was not checking the state of a block correctly when running with archive canonical (and also not for every other block after they are canonicalized). * FMT --- client/db/src/lib.rs | 367 +++++++++++++++---------------------- client/state-db/src/lib.rs | 15 +- 2 files changed, 160 insertions(+), 222 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index f54f1bdb131c7..03de383f2c7f4 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1929,13 +1929,13 @@ impl Backend { Ok(()) } - fn empty_state(&self) -> ClientResult, Block>> { + fn empty_state(&self) -> RecordStatsState, Block> { let root = EmptyStorage::::new().0; // Empty trie let db_state = DbStateBuilder::::new(self.storage.clone(), root) .with_optional_cache(self.shared_trie_cache.as_ref().map(|c| c.local_cache())) .build(); let state = RefTrackingState::new(db_state, self.storage.clone(), None); - Ok(RecordStatsState::new(state, None, self.state_usage.clone())) + RecordStatsState::new(state, None, self.state_usage.clone()) } } @@ -2063,7 +2063,7 @@ impl sc_client_api::backend::Backend for Backend { fn begin_operation(&self) -> ClientResult { Ok(BlockImportOperation { pending_block: None, - old_state: self.empty_state()?, + old_state: self.empty_state(), db_updates: PrefixedMemoryDB::default(), storage_updates: Default::default(), child_storage_updates: Default::default(), @@ -2082,7 +2082,7 @@ impl sc_client_api::backend::Backend for Backend { block: Block::Hash, ) -> ClientResult<()> { if block == Default::default() { - operation.old_state = self.empty_state()?; + operation.old_state = self.empty_state(); } else { operation.old_state = self.state_at(block)?; } @@ -2439,6 +2439,7 @@ impl sc_client_api::backend::Backend for Backend { .unwrap_or(None) .is_some() }; + if let Ok(()) = self.storage.state_db.pin(&hash, hdr.number.saturated_into::(), hint) { @@ -2593,25 +2594,30 @@ pub(crate) mod tests { use sp_runtime::testing::Digest; let digest = Digest::default(); - let header = Header { - number, - parent_hash, - state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), - digest, - extrinsics_root, - }; - let header_hash = header.hash(); + let mut header = + Header { number, parent_hash, state_root: Default::default(), digest, extrinsics_root }; let block_hash = if number == 0 { Default::default() } else { parent_hash }; let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, block_hash).unwrap(); - op.set_block_data(header, Some(body), None, None, NewBlockState::Best).unwrap(); if let Some(index) = transaction_index { op.update_transaction_index(index).unwrap(); } + + // Insert some fake data to ensure that the block can be found in the state column. + let (root, overlay) = op.old_state.storage_root( + vec![(block_hash.as_ref(), Some(block_hash.as_ref()))].into_iter(), + StateVersion::V1, + ); + op.update_db_storage(overlay).unwrap(); + header.state_root = root.into(); + + op.set_block_data(header.clone(), Some(body), None, None, NewBlockState::Best) + .unwrap(); + backend.commit_operation(op)?; - Ok(header_hash) + Ok(header.hash()) } pub fn insert_header_no_head( @@ -2623,18 +2629,31 @@ pub(crate) mod tests { use sp_runtime::testing::Digest; let digest = Digest::default(); - let header = Header { - number, - parent_hash, - state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), - digest, - extrinsics_root, - }; - let header_hash = header.hash(); + let mut header = + Header { number, parent_hash, state_root: Default::default(), digest, extrinsics_root }; let mut op = backend.begin_operation().unwrap(); - op.set_block_data(header, None, None, None, NewBlockState::Normal).unwrap(); + + let root = backend + .state_at(parent_hash) + .unwrap_or_else(|_| { + if parent_hash == Default::default() { + backend.empty_state() + } else { + panic!("Unknown block: {parent_hash:?}") + } + }) + .storage_root( + vec![(parent_hash.as_ref(), Some(parent_hash.as_ref()))].into_iter(), + StateVersion::V1, + ) + .0; + header.state_root = root.into(); + + op.set_block_data(header.clone(), None, None, None, NewBlockState::Normal) + .unwrap(); backend.commit_operation(op).unwrap(); - header_hash + + header.hash() } #[test] @@ -3369,204 +3388,131 @@ pub(crate) mod tests { #[test] fn prune_blocks_on_finalize() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 0); - let mut blocks = Vec::new(); - let mut prev_hash = Default::default(); - for i in 0..5 { - let hash = insert_block( - &backend, - i, - prev_hash, - None, - Default::default(), - vec![i.into()], - None, - ) - .unwrap(); - blocks.push(hash); - prev_hash = hash; - } + let pruning_modes = + vec![BlocksPruning::Some(2), BlocksPruning::KeepFinalized, BlocksPruning::KeepAll]; - { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, blocks[4]).unwrap(); - for i in 1..5 { - op.mark_finalized(blocks[i], None).unwrap(); + for pruning_mode in pruning_modes { + let backend = Backend::::new_test_with_tx_storage(pruning_mode, 0); + let mut blocks = Vec::new(); + let mut prev_hash = Default::default(); + for i in 0..5 { + let hash = insert_block( + &backend, + i, + prev_hash, + None, + Default::default(), + vec![i.into()], + None, + ) + .unwrap(); + blocks.push(hash); + prev_hash = hash; } - backend.commit_operation(op).unwrap(); - } - let bc = backend.blockchain(); - assert_eq!(None, bc.body(blocks[0]).unwrap()); - assert_eq!(None, bc.body(blocks[1]).unwrap()); - assert_eq!(None, bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); - } - #[test] - fn prune_blocks_on_finalize_in_keep_all() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::KeepAll, 0); - let mut blocks = Vec::new(); - let mut prev_hash = Default::default(); - for i in 0..5 { - let hash = insert_block( - &backend, - i, - prev_hash, - None, - Default::default(), - vec![i.into()], - None, - ) - .unwrap(); - blocks.push(hash); - prev_hash = hash; - } + { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, blocks[4]).unwrap(); + for i in 1..5 { + op.mark_finalized(blocks[i], None).unwrap(); + } + backend.commit_operation(op).unwrap(); + } + let bc = backend.blockchain(); - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, blocks[4]).unwrap(); - for i in 1..3 { - op.mark_finalized(blocks[i], None).unwrap(); + if matches!(pruning_mode, BlocksPruning::Some(_)) { + assert_eq!(None, bc.body(blocks[0]).unwrap()); + assert_eq!(None, bc.body(blocks[1]).unwrap()); + assert_eq!(None, bc.body(blocks[2]).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + } else { + for i in 0..5 { + assert_eq!(Some(vec![(i as u64).into()]), bc.body(blocks[i]).unwrap()); + } + } } - backend.commit_operation(op).unwrap(); - - let bc = backend.blockchain(); - assert_eq!(Some(vec![0.into()]), bc.body(blocks[0]).unwrap()); - assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); - assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); } #[test] - fn prune_blocks_on_finalize_with_fork_in_keep_all() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::KeepAll, 10); - let mut blocks = Vec::new(); - let mut prev_hash = Default::default(); - for i in 0..5 { - let hash = insert_block( + fn prune_blocks_on_finalize_with_fork() { + sp_tracing::try_init_simple(); + + let pruning_modes = + vec![BlocksPruning::Some(2), BlocksPruning::KeepFinalized, BlocksPruning::KeepAll]; + + for pruning in pruning_modes { + let backend = Backend::::new_test_with_tx_storage(pruning, 10); + let mut blocks = Vec::new(); + let mut prev_hash = Default::default(); + for i in 0..5 { + let hash = insert_block( + &backend, + i, + prev_hash, + None, + Default::default(), + vec![i.into()], + None, + ) + .unwrap(); + blocks.push(hash); + prev_hash = hash; + } + + // insert a fork at block 2 + let fork_hash_root = + insert_block(&backend, 2, blocks[1], None, H256::random(), vec![2.into()], None) + .unwrap(); + insert_block( &backend, - i, - prev_hash, + 3, + fork_hash_root, None, - Default::default(), - vec![i.into()], + H256::random(), + vec![3.into(), 11.into()], None, ) .unwrap(); - blocks.push(hash); - prev_hash = hash; - } - - // insert a fork at block 2 - let fork_hash_root = insert_block( - &backend, - 2, - blocks[1], - None, - sp_core::H256::random(), - vec![2.into()], - None, - ) - .unwrap(); - insert_block( - &backend, - 3, - fork_hash_root, - None, - H256::random(), - vec![3.into(), 11.into()], - None, - ) - .unwrap(); - - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, blocks[4]).unwrap(); - op.mark_head(blocks[4]).unwrap(); - backend.commit_operation(op).unwrap(); - - let bc = backend.blockchain(); - assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap()); - - for i in 1..5 { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, blocks[i]).unwrap(); - op.mark_finalized(blocks[i], None).unwrap(); + backend.begin_state_operation(&mut op, blocks[4]).unwrap(); + op.mark_head(blocks[4]).unwrap(); backend.commit_operation(op).unwrap(); - } - assert_eq!(Some(vec![0.into()]), bc.body(blocks[0]).unwrap()); - assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); - assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + let bc = backend.blockchain(); + assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap()); - assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap()); - assert_eq!(bc.info().best_number, 4); - for i in 0..5 { - assert!(bc.hash(i).unwrap().is_some()); - } - } + for i in 1..5 { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, blocks[4]).unwrap(); + op.mark_finalized(blocks[i], None).unwrap(); + backend.commit_operation(op).unwrap(); + } - #[test] - fn prune_blocks_on_finalize_with_fork() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); - let mut blocks = Vec::new(); - let mut prev_hash = Default::default(); - for i in 0..5 { - let hash = insert_block( - &backend, - i, - prev_hash, - None, - Default::default(), - vec![i.into()], - None, - ) - .unwrap(); - blocks.push(hash); - prev_hash = hash; - } + if matches!(pruning, BlocksPruning::Some(_)) { + assert_eq!(None, bc.body(blocks[0]).unwrap()); + assert_eq!(None, bc.body(blocks[1]).unwrap()); + assert_eq!(None, bc.body(blocks[2]).unwrap()); - // insert a fork at block 2 - let fork_hash_root = insert_block( - &backend, - 2, - blocks[1], - None, - sp_core::H256::random(), - vec![2.into()], - None, - ) - .unwrap(); - insert_block( - &backend, - 3, - fork_hash_root, - None, - H256::random(), - vec![3.into(), 11.into()], - None, - ) - .unwrap(); - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, blocks[4]).unwrap(); - op.mark_head(blocks[4]).unwrap(); - backend.commit_operation(op).unwrap(); + assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + } else { + for i in 0..5 { + assert_eq!(Some(vec![(i as u64).into()]), bc.body(blocks[i]).unwrap()); + } + } - for i in 1..5 { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, blocks[4]).unwrap(); - op.mark_finalized(blocks[i], None).unwrap(); - backend.commit_operation(op).unwrap(); - } + if matches!(pruning, BlocksPruning::KeepAll) { + assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap()); + } else { + assert_eq!(None, bc.body(fork_hash_root).unwrap()); + } - let bc = backend.blockchain(); - assert_eq!(None, bc.body(blocks[0]).unwrap()); - assert_eq!(None, bc.body(blocks[1]).unwrap()); - assert_eq!(None, bc.body(blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!(bc.info().best_number, 4); + for i in 0..5 { + assert!(bc.hash(i).unwrap().is_some()); + } + } } #[test] @@ -3841,13 +3787,7 @@ pub(crate) mod tests { assert!(matches!(backend.commit_operation(op), Err(sp_blockchain::Error::SetHeadTooOld))); // Insert 2 as best again. - let header = Header { - number: 2, - parent_hash: block1, - state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), - digest: Default::default(), - extrinsics_root: Default::default(), - }; + let header = backend.blockchain().header(block2).unwrap().unwrap(); let mut op = backend.begin_operation().unwrap(); op.set_block_data(header, None, None, None, NewBlockState::Best).unwrap(); backend.commit_operation(op).unwrap(); @@ -3864,13 +3804,7 @@ pub(crate) mod tests { assert_eq!(backend.blockchain().info().finalized_hash, block0); // Insert 1 as final again. - let header = Header { - number: 1, - parent_hash: block0, - state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), - digest: Default::default(), - extrinsics_root: Default::default(), - }; + let header = backend.blockchain().header(block1).unwrap().unwrap(); let mut op = backend.begin_operation().unwrap(); op.set_block_data(header, None, None, None, NewBlockState::Final).unwrap(); @@ -4021,7 +3955,8 @@ pub(crate) mod tests { #[test] fn force_delayed_canonicalize_waiting_for_blocks_to_be_finalized() { - let pruning_modes = [BlocksPruning::Some(10), BlocksPruning::KeepAll]; + let pruning_modes = + [BlocksPruning::Some(10), BlocksPruning::KeepAll, BlocksPruning::KeepFinalized]; for pruning_mode in pruning_modes { eprintln!("Running with pruning mode: {:?}", pruning_mode); diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 1befd6dff3bc8..6186a45f3d646 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -152,6 +152,7 @@ impl From for Error { } /// Pinning error type. +#[derive(Debug)] pub enum PinError { /// Trying to pin invalid block. InvalidBlock, @@ -389,7 +390,8 @@ impl StateDbSync { } } else { match self.pruning.as_ref() { - None => IsPruned::NotPruned, + // We don't know for sure. + None => IsPruned::MaybePruned, Some(pruning) => match pruning.have_block(hash, number) { HaveBlock::No => IsPruned::Pruned, HaveBlock::Yes => IsPruned::NotPruned, @@ -457,13 +459,14 @@ impl StateDbSync { PruningMode::ArchiveAll => Ok(()), PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { let have_block = self.non_canonical.have_block(hash) || - self.pruning.as_ref().map_or(false, |pruning| { - match pruning.have_block(hash, number) { + self.pruning.as_ref().map_or_else( + || hint(), + |pruning| match pruning.have_block(hash, number) { HaveBlock::No => false, HaveBlock::Yes => true, HaveBlock::Maybe => hint(), - } - }); + }, + ); if have_block { let refs = self.pinned.entry(hash.clone()).or_default(); if *refs == 0 { @@ -642,7 +645,7 @@ impl StateDb { /// Check if block is pruned away. pub fn is_pruned(&self, hash: &BlockHash, number: u64) -> IsPruned { - return self.db.read().is_pruned(hash, number) + self.db.read().is_pruned(hash, number) } /// Reset in-memory changes to the last disk-backed state. From aceb7615c42f783d23f0e53bd264d2d6d23f60a3 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 13 Feb 2023 17:09:34 +0100 Subject: [PATCH 20/24] subkey: only decode hex if requested, CLI `0x` prefixed hex for all `stdout` (#13258) * Only decode hex if requested Signed-off-by: Oliver Tale-Yazdi * Cleanup code Signed-off-by: Oliver Tale-Yazdi * Add some tests Signed-off-by: Oliver Tale-Yazdi * Add license Signed-off-by: Oliver Tale-Yazdi * Docs Signed-off-by: Oliver Tale-Yazdi * Clippy Signed-off-by: Oliver Tale-Yazdi * bump version, breaking (tiny) change in output. * Move integration tests to own folder Signed-off-by: Oliver Tale-Yazdi --------- Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Dan Shields --- Cargo.lock | 2 +- bin/utils/subkey/Cargo.toml | 2 +- client/cli/src/commands/mod.rs | 1 + client/cli/src/commands/sign.rs | 75 +++++++--- client/cli/src/commands/test/mod.rs | 21 +++ client/cli/src/commands/test/sig_verify.rs | 152 +++++++++++++++++++++ client/cli/src/commands/utils.rs | 19 +-- client/cli/src/commands/verify.rs | 69 ++++++++-- client/cli/src/params/message_params.rs | 121 ++++++++++++++++ client/cli/src/params/mod.rs | 3 +- 10 files changed, 412 insertions(+), 53 deletions(-) create mode 100644 client/cli/src/commands/test/mod.rs create mode 100644 client/cli/src/commands/test/sig_verify.rs create mode 100644 client/cli/src/params/message_params.rs diff --git a/Cargo.lock b/Cargo.lock index d659b33d1de6c..8ceb685b25604 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10561,7 +10561,7 @@ dependencies = [ [[package]] name = "subkey" -version = "2.0.2" +version = "3.0.0" dependencies = [ "clap 4.1.4", "sc-cli", diff --git a/bin/utils/subkey/Cargo.toml b/bin/utils/subkey/Cargo.toml index 77c323821a508..5d2d0f7779ced 100644 --- a/bin/utils/subkey/Cargo.toml +++ b/bin/utils/subkey/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "subkey" -version = "2.0.2" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" diff --git a/client/cli/src/commands/mod.rs b/client/cli/src/commands/mod.rs index 8e84afa34e24a..eeb9f3be3eb14 100644 --- a/client/cli/src/commands/mod.rs +++ b/client/cli/src/commands/mod.rs @@ -31,6 +31,7 @@ mod purge_chain_cmd; mod revert_cmd; mod run_cmd; mod sign; +mod test; pub mod utils; mod vanity; mod verify; diff --git a/client/cli/src/commands/sign.rs b/client/cli/src/commands/sign.rs index 2c3ff3a1575fd..b3da31e56658b 100644 --- a/client/cli/src/commands/sign.rs +++ b/client/cli/src/commands/sign.rs @@ -17,9 +17,13 @@ // along with this program. If not, see . //! Implementation of the `sign` subcommand -use crate::{error, utils, with_crypto_scheme, CryptoSchemeFlag, KeystoreParams}; +use crate::{ + error, params::MessageParams, utils, with_crypto_scheme, CryptoSchemeFlag, KeystoreParams, +}; +use array_bytes::bytes2hex; use clap::Parser; use sp_core::crypto::SecretString; +use std::io::{BufRead, Write}; /// The `sign` command #[derive(Debug, Clone, Parser)] @@ -31,14 +35,9 @@ pub struct SignCmd { #[arg(long)] suri: Option, - /// Message to sign, if not provided you will be prompted to - /// pass the message via STDIN - #[arg(long)] - message: Option, - - /// The message on STDIN is hex-encoded data - #[arg(long)] - hex: bool, + #[allow(missing_docs)] + #[clap(flatten)] + pub message_params: MessageParams, #[allow(missing_docs)] #[clap(flatten)] @@ -52,15 +51,26 @@ pub struct SignCmd { impl SignCmd { /// Run the command pub fn run(&self) -> error::Result<()> { - let message = utils::read_message(self.message.as_ref(), self.hex)?; + let sig = self.sign(|| std::io::stdin().lock())?; + std::io::stdout().lock().write_all(sig.as_bytes())?; + Ok(()) + } + + /// Sign a message. + /// + /// The message can either be provided as immediate argument via CLI or otherwise read from the + /// reader created by `create_reader`. The reader will only be created in case that the message + /// is not passed as immediate. + pub(crate) fn sign(&self, create_reader: F) -> error::Result + where + R: BufRead, + F: FnOnce() -> R, + { + let message = self.message_params.message_from(create_reader)?; let suri = utils::read_uri(self.suri.as_ref())?; let password = self.keystore_params.read_password()?; - let signature = - with_crypto_scheme!(self.crypto_scheme.scheme, sign(&suri, password, message))?; - - println!("{}", signature); - Ok(()) + with_crypto_scheme!(self.crypto_scheme.scheme, sign(&suri, password, message)) } } @@ -70,26 +80,47 @@ fn sign( message: Vec, ) -> error::Result { let pair = utils::pair_from_suri::

(suri, password)?; - Ok(array_bytes::bytes2hex("", pair.sign(&message).as_ref())) + Ok(bytes2hex("0x", pair.sign(&message).as_ref())) } #[cfg(test)] mod test { use super::*; + const SEED: &str = "0xe5be9a5092b81bca64be81d212e7f2f9eba183bb7a90954f7b76361f6edb5c0a"; + #[test] - fn sign() { - let seed = "0xad1fb77243b536b90cfe5f0d351ab1b1ac40e3890b41dc64f766ee56340cfca5"; + fn sign_arg() { + let cmd = SignCmd::parse_from(&[ + "sign", + "--suri", + &SEED, + "--message", + &SEED, + "--password", + "12345", + "--hex", + ]); + let sig = cmd.sign(|| std::io::stdin().lock()).expect("Must sign"); + + assert!(sig.starts_with("0x"), "Signature must start with 0x"); + assert!(array_bytes::hex2bytes(&sig).is_ok(), "Signature is valid hex"); + } - let sign = SignCmd::parse_from(&[ + #[test] + fn sign_stdin() { + let cmd = SignCmd::parse_from(&[ "sign", "--suri", - seed, + SEED, "--message", - &seed[2..], + &SEED, "--password", "12345", ]); - assert!(sign.run().is_ok()); + let sig = cmd.sign(|| SEED.as_bytes()).expect("Must sign"); + + assert!(sig.starts_with("0x"), "Signature must start with 0x"); + assert!(array_bytes::hex2bytes(&sig).is_ok(), "Signature is valid hex"); } } diff --git a/client/cli/src/commands/test/mod.rs b/client/cli/src/commands/test/mod.rs new file mode 100644 index 0000000000000..762918f70ff0c --- /dev/null +++ b/client/cli/src/commands/test/mod.rs @@ -0,0 +1,21 @@ +// This file is part of Substrate. + +// Copyright (C) 2023 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Integration tests for subkey commands. + +mod sig_verify; diff --git a/client/cli/src/commands/test/sig_verify.rs b/client/cli/src/commands/test/sig_verify.rs new file mode 100644 index 0000000000000..8d7247480be87 --- /dev/null +++ b/client/cli/src/commands/test/sig_verify.rs @@ -0,0 +1,152 @@ +// This file is part of Substrate. + +// Copyright (C) 2023 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#![cfg(test)] + +//! Integration test that the `sign` and `verify` sub-commands work together. + +use crate::*; +use clap::Parser; + +const SEED: &str = "0xe5be9a5092b81bca64be81d212e7f2f9eba183bb7a90954f7b76361f6edb5c0a"; +const ALICE: &str = "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY"; +const BOB: &str = "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty"; + +/// Sign a valid UFT-8 message which can be `hex` and passed either via `stdin` or as an argument. +fn sign(msg: &str, hex: bool, stdin: bool) -> String { + sign_raw(msg.as_bytes(), hex, stdin) +} + +/// Sign a raw message which can be `hex` and passed either via `stdin` or as an argument. +fn sign_raw(msg: &[u8], hex: bool, stdin: bool) -> String { + let mut args = vec!["sign", "--suri", SEED]; + if !stdin { + args.push("--message"); + args.push(std::str::from_utf8(msg).expect("Can only pass valid UTF-8 as arg")); + } + if hex { + args.push("--hex"); + } + let cmd = SignCmd::parse_from(&args); + cmd.sign(|| msg).expect("Static data is good; Must sign; qed") +} + +/// Verify a valid UFT-8 message which can be `hex` and passed either via `stdin` or as an argument. +fn verify(msg: &str, hex: bool, stdin: bool, who: &str, sig: &str) -> bool { + verify_raw(msg.as_bytes(), hex, stdin, who, sig) +} + +/// Verify a raw message which can be `hex` and passed either via `stdin` or as an argument. +fn verify_raw(msg: &[u8], hex: bool, stdin: bool, who: &str, sig: &str) -> bool { + let mut args = vec!["verify", sig, who]; + if !stdin { + args.push("--message"); + args.push(std::str::from_utf8(msg).expect("Can only pass valid UTF-8 as arg")); + } + if hex { + args.push("--hex"); + } + let cmd = VerifyCmd::parse_from(&args); + cmd.verify(|| msg).is_ok() +} + +/// Test that sig/verify works with UTF-8 bytes passed as arg. +#[test] +fn sig_verify_arg_utf8_work() { + let sig = sign("Something", false, false); + + assert!(verify("Something", false, false, ALICE, &sig)); + assert!(!verify("Something", false, false, BOB, &sig)); + + assert!(!verify("Wrong", false, false, ALICE, &sig)); + assert!(!verify("Not hex", true, false, ALICE, &sig)); + assert!(!verify("0x1234", true, false, ALICE, &sig)); + assert!(!verify("Wrong", false, false, BOB, &sig)); + assert!(!verify("Not hex", true, false, BOB, &sig)); + assert!(!verify("0x1234", true, false, BOB, &sig)); +} + +/// Test that sig/verify works with UTF-8 bytes passed via stdin. +#[test] +fn sig_verify_stdin_utf8_work() { + let sig = sign("Something", false, true); + + assert!(verify("Something", false, true, ALICE, &sig)); + assert!(!verify("Something", false, true, BOB, &sig)); + + assert!(!verify("Wrong", false, true, ALICE, &sig)); + assert!(!verify("Not hex", true, true, ALICE, &sig)); + assert!(!verify("0x1234", true, true, ALICE, &sig)); + assert!(!verify("Wrong", false, true, BOB, &sig)); + assert!(!verify("Not hex", true, true, BOB, &sig)); + assert!(!verify("0x1234", true, true, BOB, &sig)); +} + +/// Test that sig/verify works with hex bytes passed as arg. +#[test] +fn sig_verify_arg_hex_work() { + let sig = sign("0xaabbcc", true, false); + + assert!(verify("0xaabbcc", true, false, ALICE, &sig)); + assert!(verify("aabBcc", true, false, ALICE, &sig)); + assert!(verify("0xaAbbCC", true, false, ALICE, &sig)); + assert!(!verify("0xaabbcc", true, false, BOB, &sig)); + + assert!(!verify("0xaabbcc", false, false, ALICE, &sig)); +} + +/// Test that sig/verify works with hex bytes passed via stdin. +#[test] +fn sig_verify_stdin_hex_work() { + let sig = sign("0xaabbcc", true, true); + + assert!(verify("0xaabbcc", true, true, ALICE, &sig)); + assert!(verify("aabBcc", true, true, ALICE, &sig)); + assert!(verify("0xaAbbCC", true, true, ALICE, &sig)); + assert!(!verify("0xaabbcc", true, true, BOB, &sig)); + + assert!(!verify("0xaabbcc", false, true, ALICE, &sig)); +} + +/// Test that sig/verify works with random bytes. +#[test] +fn sig_verify_stdin_non_utf8_work() { + use rand::RngCore; + let mut rng = rand::thread_rng(); + + for _ in 0..100 { + let mut raw = [0u8; 32]; + rng.fill_bytes(&mut raw); + let sig = sign_raw(&raw, false, true); + + assert!(verify_raw(&raw, false, true, ALICE, &sig)); + assert!(!verify_raw(&raw, false, true, BOB, &sig)); + } +} + +/// Test that sig/verify works with invalid UTF-8 bytes. +#[test] +fn sig_verify_stdin_invalid_utf8_work() { + let raw = vec![192u8, 193]; + assert!(String::from_utf8(raw.clone()).is_err(), "Must be invalid UTF-8"); + + let sig = sign_raw(&raw, false, true); + + assert!(verify_raw(&raw, false, true, ALICE, &sig)); + assert!(!verify_raw(&raw, false, true, BOB, &sig)); +} diff --git a/client/cli/src/commands/utils.rs b/client/cli/src/commands/utils.rs index 1ce2b23221691..8ced185d7f3bc 100644 --- a/client/cli/src/commands/utils.rs +++ b/client/cli/src/commands/utils.rs @@ -31,7 +31,7 @@ use sp_core::{ Pair, }; use sp_runtime::{traits::IdentifyAccount, MultiSigner}; -use std::{io::Read, path::PathBuf}; +use std::path::PathBuf; /// Public key type for Runtime pub type PublicFor

=

::Public; @@ -273,23 +273,6 @@ where format!("0x{}", HexDisplay::from(&public_key.into().into_account().as_ref())) } -/// checks if message is Some, otherwise reads message from stdin and optionally decodes hex -pub fn read_message(msg: Option<&String>, should_decode: bool) -> Result, Error> { - let mut message = vec![]; - match msg { - Some(m) => { - message = array_bytes::hex2bytes(m.as_str())?; - }, - None => { - std::io::stdin().lock().read_to_end(&mut message)?; - if should_decode { - message = array_bytes::hex2bytes(array_bytes::hex_bytes2hex_str(&message)?)?; - } - }, - } - Ok(message) -} - /// Allows for calling $method with appropriate crypto impl. #[macro_export] macro_rules! with_crypto_scheme { diff --git a/client/cli/src/commands/verify.rs b/client/cli/src/commands/verify.rs index 82554fbf268fa..183bf507bd114 100644 --- a/client/cli/src/commands/verify.rs +++ b/client/cli/src/commands/verify.rs @@ -18,9 +18,10 @@ //! implementation of the `verify` subcommand -use crate::{error, utils, with_crypto_scheme, CryptoSchemeFlag}; +use crate::{error, params::MessageParams, utils, with_crypto_scheme, CryptoSchemeFlag}; use clap::Parser; use sp_core::crypto::{ByteArray, Ss58Codec}; +use std::io::BufRead; /// The `verify` command #[derive(Debug, Clone, Parser)] @@ -37,14 +38,9 @@ pub struct VerifyCmd { /// If not given, you will be prompted for the URI. uri: Option, - /// Message to verify, if not provided you will be prompted to - /// pass the message via STDIN - #[arg(long)] - message: Option, - - /// The message on STDIN is hex-encoded data - #[arg(long)] - hex: bool, + #[allow(missing_docs)] + #[clap(flatten)] + pub message_params: MessageParams, #[allow(missing_docs)] #[clap(flatten)] @@ -54,7 +50,20 @@ pub struct VerifyCmd { impl VerifyCmd { /// Run the command pub fn run(&self) -> error::Result<()> { - let message = utils::read_message(self.message.as_ref(), self.hex)?; + self.verify(|| std::io::stdin().lock()) + } + + /// Verify a signature for a message. + /// + /// The message can either be provided as immediate argument via CLI or otherwise read from the + /// reader created by `create_reader`. The reader will only be created in case that the message + /// is not passed as immediate. + pub(crate) fn verify(&self, create_reader: F) -> error::Result<()> + where + R: BufRead, + F: FnOnce() -> R, + { + let message = self.message_params.message_from(create_reader)?; let sig_data = array_bytes::hex2bytes(&self.sig)?; let uri = utils::read_uri(self.uri.as_ref())?; let uri = if let Some(uri) = uri.strip_prefix("0x") { uri } else { &uri }; @@ -86,3 +95,43 @@ where Ok(()) } + +#[cfg(test)] +mod test { + use super::*; + + const ALICE: &str = "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY"; + const SIG1: &str = "0x4eb25a2285a82374888880af0024eb30c3a21ce086eae3862888d345af607f0ad6fb081312f11730932564f24a9f8ebcee2d46861413ae61307eca58db2c3e81"; + const SIG2: &str = "0x026342225155056ea797118c1c8c8b3cc002aa2020c36f4217fa3c302783a572ad3dcd38c231cbaf86cadb93984d329c963ceac0685cc1ee4c1ed50fa443a68f"; + + // Verify work with `--message` argument. + #[test] + fn verify_immediate() { + let cmd = VerifyCmd::parse_from(&["verify", SIG1, ALICE, "--message", "test message"]); + assert!(cmd.run().is_ok(), "Alice' signature should verify"); + } + + // Verify work without `--message` argument. + #[test] + fn verify_stdin() { + let cmd = VerifyCmd::parse_from(&["verify", SIG1, ALICE]); + let message = "test message"; + assert!(cmd.verify(|| message.as_bytes()).is_ok(), "Alice' signature should verify"); + } + + // Verify work with `--message` argument for hex message. + #[test] + fn verify_immediate_hex() { + let cmd = VerifyCmd::parse_from(&["verify", SIG2, ALICE, "--message", "0xaabbcc", "--hex"]); + assert!(cmd.run().is_ok(), "Alice' signature should verify"); + } + + // Verify work without `--message` argument for hex message. + #[test] + fn verify_stdin_hex() { + let cmd = VerifyCmd::parse_from(&["verify", SIG2, ALICE, "--hex"]); + assert!(cmd.verify(|| "0xaabbcc".as_bytes()).is_ok()); + assert!(cmd.verify(|| "aabbcc".as_bytes()).is_ok()); + assert!(cmd.verify(|| "0xaABBcC".as_bytes()).is_ok()); + } +} diff --git a/client/cli/src/params/message_params.rs b/client/cli/src/params/message_params.rs new file mode 100644 index 0000000000000..9f88c24dcd222 --- /dev/null +++ b/client/cli/src/params/message_params.rs @@ -0,0 +1,121 @@ +// This file is part of Substrate. + +// Copyright (C) 2023 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Params to configure how a message should be passed into a command. + +use crate::error::Error; +use array_bytes::{hex2bytes, hex_bytes2hex_str}; +use clap::Parser; +use std::io::BufRead; + +/// Params to configure how a message should be passed into a command. +#[derive(Parser, Debug, Clone)] +pub struct MessageParams { + /// Message to process. Will be read from STDIN otherwise. + /// + /// The message is assumed to be raw bytes per default. Use `--hex` for hex input. Can + /// optionally be prefixed with `0x` in the hex case. + #[arg(long)] + message: Option, + + /// The message is hex-encoded data. + #[arg(long)] + hex: bool, +} + +impl MessageParams { + /// Produces the message by either using its immediate value or reading from stdin. + /// + /// This function should only be called once and the result cached. + pub(crate) fn message_from(&self, create_reader: F) -> Result, Error> + where + R: BufRead, + F: FnOnce() -> R, + { + let raw = match &self.message { + Some(raw) => raw.as_bytes().to_vec(), + None => { + let mut raw = vec![]; + create_reader().read_to_end(&mut raw)?; + raw + }, + }; + if self.hex { + hex2bytes(hex_bytes2hex_str(&raw)?).map_err(Into::into) + } else { + Ok(raw) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + /// Test that decoding an immediate message works. + #[test] + fn message_decode_immediate() { + for (name, input, hex, output) in test_closures() { + println!("Testing: immediate_{}", name); + let params = MessageParams { message: Some(input.into()), hex }; + let message = params.message_from(|| std::io::stdin().lock()); + + match output { + Some(output) => { + let message = message.expect(&format!("{}: should decode but did not", name)); + assert_eq!(message, output, "{}: decoded a wrong message", name); + }, + None => { + message.err().expect(&format!("{}: should not decode but did", name)); + }, + } + } + } + + /// Test that decoding a message from a stream works. + #[test] + fn message_decode_stream() { + for (name, input, hex, output) in test_closures() { + println!("Testing: stream_{}", name); + let params = MessageParams { message: None, hex }; + let message = params.message_from(|| input.as_bytes()); + + match output { + Some(output) => { + let message = message.expect(&format!("{}: should decode but did not", name)); + assert_eq!(message, output, "{}: decoded a wrong message", name); + }, + None => { + message.err().expect(&format!("{}: should not decode but did", name)); + }, + } + } + } + + /// Returns (test_name, input, hex, output). + fn test_closures() -> Vec<(&'static str, &'static str, bool, Option<&'static [u8]>)> { + vec![ + ("decode_no_hex_works", "Hello this is not hex", false, Some(b"Hello this is not hex")), + ("decode_no_hex_with_hex_string_works", "0xffffffff", false, Some(b"0xffffffff")), + ("decode_hex_works", "0x00112233", true, Some(&[0, 17, 34, 51])), + ("decode_hex_without_prefix_works", "00112233", true, Some(&[0, 17, 34, 51])), + ("decode_hex_uppercase_works", "0xaAbbCCDd", true, Some(&[170, 187, 204, 221])), + ("decode_hex_wrong_len_errors", "0x0011223", true, None), + ] + } +} diff --git a/client/cli/src/params/mod.rs b/client/cli/src/params/mod.rs index 3197deb101bcc..4ec7d1d958e8f 100644 --- a/client/cli/src/params/mod.rs +++ b/client/cli/src/params/mod.rs @@ -18,6 +18,7 @@ mod database_params; mod import_params; mod keystore_params; +mod message_params; mod network_params; mod node_key_params; mod offchain_worker_params; @@ -35,7 +36,7 @@ use sp_runtime::{ use std::{fmt::Debug, str::FromStr}; pub use crate::params::{ - database_params::*, import_params::*, keystore_params::*, network_params::*, + database_params::*, import_params::*, keystore_params::*, message_params::*, network_params::*, node_key_params::*, offchain_worker_params::*, pruning_params::*, shared_params::*, transaction_pool_params::*, }; From 819a42ae217b32e9b19a559287b22956b69240f1 Mon Sep 17 00:00:00 2001 From: Roman Useinov Date: Mon, 13 Feb 2023 23:22:51 +0100 Subject: [PATCH 21/24] [Feature] Introduce storage_alias for CountedStorageMap (#13366) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [Feature] Introduce storagage_alias for CountedStorageMap * bit more dry * bit more dry * address review comments * some tests and fixes * fix ui tests * Apply suggestions from code review Co-authored-by: Bastian Köcher * compare metadata --------- Co-authored-by: parity-processbot <> Co-authored-by: Bastian Köcher --- frame/support/procedural/src/lib.rs | 6 ++ .../procedural/src/pallet/expand/storage.rs | 15 ++-- frame/support/procedural/src/storage_alias.rs | 81 ++++++++++++++++++- .../support/src/storage/types/counted_map.rs | 10 +++ frame/support/test/tests/pallet.rs | 18 ++++- .../forbid_underscore_as_prefix.stderr | 2 +- 6 files changed, 119 insertions(+), 13 deletions(-) diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index 8fa46b48d005f..67e592f4ca2db 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -69,6 +69,12 @@ fn get_cargo_env_var(version_env: &str) -> std::result::Result String { + format!("CounterFor{}", prefix) +} + /// Declares strongly-typed wrappers around codec-compatible types in storage. /// /// ## Example diff --git a/frame/support/procedural/src/pallet/expand/storage.rs b/frame/support/procedural/src/pallet/expand/storage.rs index 195a62431f279..0bb19ad6c29cd 100644 --- a/frame/support/procedural/src/pallet/expand/storage.rs +++ b/frame/support/procedural/src/pallet/expand/storage.rs @@ -15,9 +15,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::pallet::{ - parse::storage::{Metadata, QueryKind, StorageDef, StorageGenerics}, - Def, +use crate::{ + counter_prefix, + pallet::{ + parse::storage::{Metadata, QueryKind, StorageDef, StorageGenerics}, + Def, + }, }; use quote::ToTokens; use std::{collections::HashMap, ops::IndexMut}; @@ -39,12 +42,6 @@ fn counter_prefix_ident(storage_ident: &syn::Ident) -> syn::Ident { ) } -/// Generate the counter_prefix related to the storage. -/// counter_prefix is used by counted storage map. -fn counter_prefix(prefix: &str) -> String { - format!("CounterFor{}", prefix) -} - /// Check for duplicated storage prefixes. This step is necessary since users can specify an /// alternative storage prefix using the #[pallet::storage_prefix] syntax, and we need to ensure /// that the prefix specified by the user is not a duplicate of an existing one. diff --git a/frame/support/procedural/src/storage_alias.rs b/frame/support/procedural/src/storage_alias.rs index e0df0123595b9..eaf8591a1d999 100644 --- a/frame/support/procedural/src/storage_alias.rs +++ b/frame/support/procedural/src/storage_alias.rs @@ -17,6 +17,7 @@ //! Implementation of the `storage_alias` attribute macro. +use crate::counter_prefix; use frame_support_procedural_tools::generate_crate_access_2018; use proc_macro2::{Span, TokenStream}; use quote::{quote, ToTokens}; @@ -136,6 +137,7 @@ impl ToTokens for SimpleGenerics { mod storage_types { syn::custom_keyword!(StorageValue); syn::custom_keyword!(StorageMap); + syn::custom_keyword!(CountedStorageMap); syn::custom_keyword!(StorageDoubleMap); syn::custom_keyword!(StorageNMap); } @@ -168,6 +170,21 @@ enum StorageType { _trailing_comma: Option, _gt_token: Token![>], }, + CountedMap { + _kw: storage_types::CountedStorageMap, + _lt_token: Token![<], + prefix: SimplePath, + prefix_generics: Option, + _hasher_comma: Token![,], + hasher_ty: Type, + _key_comma: Token![,], + key_ty: Type, + _value_comma: Token![,], + value_ty: Type, + query_type: Option<(Token![,], Type)>, + _trailing_comma: Option, + _gt_token: Token![>], + }, DoubleMap { _kw: storage_types::StorageDoubleMap, _lt_token: Token![<], @@ -235,12 +252,22 @@ impl StorageType { >; } }, + Self::CountedMap { + value_ty, query_type, hasher_ty, key_ty, prefix_generics, .. + } | Self::Map { value_ty, query_type, hasher_ty, key_ty, prefix_generics, .. } => { let query_type = query_type.as_ref().map(|(c, t)| quote!(#c #t)); + let map_type = Ident::new( + match self { + Self::Map { .. } => "StorageMap", + _ => "CountedStorageMap", + }, + Span::call_site(), + ); quote! { #( #attributes )* - #visibility type #storage_name #storage_generics = #crate_::storage::types::StorageMap< + #visibility type #storage_name #storage_generics = #crate_::storage::types::#map_type< #storage_instance #prefix_generics, #hasher_ty, #key_ty, @@ -296,6 +323,7 @@ impl StorageType { match self { Self::Value { prefix, .. } | Self::Map { prefix, .. } | + Self::CountedMap { prefix, .. } | Self::NMap { prefix, .. } | Self::DoubleMap { prefix, .. } => prefix, } @@ -306,6 +334,7 @@ impl StorageType { match self { Self::Value { prefix_generics, .. } | Self::Map { prefix_generics, .. } | + Self::CountedMap { prefix_generics, .. } | Self::NMap { prefix_generics, .. } | Self::DoubleMap { prefix_generics, .. } => prefix_generics.as_ref(), } @@ -363,6 +392,22 @@ impl Parse for StorageType { _trailing_comma: input.peek(Token![,]).then(|| input.parse()).transpose()?, _gt_token: input.parse()?, }) + } else if lookahead.peek(storage_types::CountedStorageMap) { + Ok(Self::CountedMap { + _kw: input.parse()?, + _lt_token: input.parse()?, + prefix: input.parse()?, + prefix_generics: parse_pallet_generics(input)?, + _hasher_comma: input.parse()?, + hasher_ty: input.parse()?, + _key_comma: input.parse()?, + key_ty: input.parse()?, + _value_comma: input.parse()?, + value_ty: input.parse()?, + query_type: parse_query_type(input)?, + _trailing_comma: input.peek(Token![,]).then(|| input.parse()).transpose()?, + _gt_token: input.parse()?, + }) } else if lookahead.peek(storage_types::StorageDoubleMap) { Ok(Self::DoubleMap { _kw: input.parse()?, @@ -476,6 +521,7 @@ pub fn storage_alias(input: TokenStream) -> Result { input.storage_type.prefix(), input.storage_type.prefix_generics(), &input.visibility, + matches!(input.storage_type, StorageType::CountedMap { .. }), )?; let definition = input.storage_type.generate_type_declaration( @@ -511,6 +557,7 @@ fn generate_storage_instance( prefix: &SimplePath, prefix_generics: Option<&TypeGenerics>, visibility: &Visibility, + is_counted_map: bool, ) -> Result { if let Some(ident) = prefix.get_ident().filter(|i| *i == "_") { return Err(Error::new(ident.span(), "`_` is not allowed as prefix by `storage_alias`.")) @@ -546,9 +593,37 @@ fn generate_storage_instance( let where_clause = storage_where_clause.map(|w| quote!(#w)).unwrap_or_default(); - let name = Ident::new(&format!("{}_Storage_Instance", storage_name), Span::call_site()); + let name_str = format!("{}_Storage_Instance", storage_name); + let name = Ident::new(&name_str, Span::call_site()); let storage_name_str = storage_name.to_string(); + let counter_code = is_counted_map.then(|| { + let counter_name = Ident::new(&counter_prefix(&name_str), Span::call_site()); + let counter_storage_name_str = counter_prefix(&storage_name_str); + + quote! { + #visibility struct #counter_name< #impl_generics >( + #crate_::sp_std::marker::PhantomData<(#type_generics)> + ) #where_clause; + + impl<#impl_generics> #crate_::traits::StorageInstance + for #counter_name< #type_generics > #where_clause + { + fn pallet_prefix() -> &'static str { + #pallet_prefix + } + + const STORAGE_PREFIX: &'static str = #counter_storage_name_str; + } + + impl<#impl_generics> #crate_::storage::types::CountedStorageMapInstance + for #name< #type_generics > #where_clause + { + type CounterPrefix = #counter_name < #type_generics >; + } + } + }); + // Implement `StorageInstance` trait. let code = quote! { #visibility struct #name< #impl_generics >( @@ -564,6 +639,8 @@ fn generate_storage_instance( const STORAGE_PREFIX: &'static str = #storage_name_str; } + + #counter_code }; Ok(StorageInstance { name, code }) diff --git a/frame/support/src/storage/types/counted_map.rs b/frame/support/src/storage/types/counted_map.rs index 3361c4093dce0..7d7d9ebcf70cf 100644 --- a/frame/support/src/storage/types/counted_map.rs +++ b/frame/support/src/storage/types/counted_map.rs @@ -543,6 +543,16 @@ mod test { 97 } } + #[crate::storage_alias] + type ExampleCountedMap = CountedStorageMap; + + #[test] + fn storage_alias_works() { + TestExternalities::default().execute_with(|| { + assert_eq!(ExampleCountedMap::count(), 0); + ExampleCountedMap::insert(3, 10); + }) + } #[test] fn test_value_query() { diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index c0376d5aa450f..36150b28058a7 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -19,7 +19,7 @@ use frame_support::{ dispatch::{ DispatchClass, DispatchInfo, GetDispatchInfo, Parameter, Pays, UnfilteredDispatchable, }, - pallet_prelude::ValueQuery, + pallet_prelude::{StorageInfoTrait, ValueQuery}, storage::unhashed, traits::{ ConstU32, GetCallName, GetStorageVersion, OnFinalize, OnGenesis, OnInitialize, @@ -1906,14 +1906,30 @@ fn assert_type_all_pallets_without_system_reversed_is_correct() { #[test] fn test_storage_alias() { + use frame_support::Twox64Concat; + #[frame_support::storage_alias] type Value where ::AccountId: From + SomeAssociation1, = StorageValue, u32, ValueQuery>; + #[frame_support::storage_alias] + type SomeCountedStorageMap + where + ::AccountId: From + SomeAssociation1, + = CountedStorageMap, Twox64Concat, u8, u32>; + TestExternalities::default().execute_with(|| { pallet::Value::::put(10); assert_eq!(10, Value::::get()); + + pallet2::SomeCountedStorageMap::::insert(10, 100); + assert_eq!(Some(100), SomeCountedStorageMap::::get(10)); + assert_eq!(1, SomeCountedStorageMap::::count()); + assert_eq!( + SomeCountedStorageMap::::storage_info(), + pallet2::SomeCountedStorageMap::::storage_info() + ); }) } diff --git a/frame/support/test/tests/storage_alias_ui/forbid_underscore_as_prefix.stderr b/frame/support/test/tests/storage_alias_ui/forbid_underscore_as_prefix.stderr index 3aa517ecfa314..3b5e3e9c23cca 100644 --- a/frame/support/test/tests/storage_alias_ui/forbid_underscore_as_prefix.stderr +++ b/frame/support/test/tests/storage_alias_ui/forbid_underscore_as_prefix.stderr @@ -1,4 +1,4 @@ -error: expected one of: `StorageValue`, `StorageMap`, `StorageDoubleMap`, `StorageNMap` +error: expected one of: `StorageValue`, `StorageMap`, `CountedStorageMap`, `StorageDoubleMap`, `StorageNMap` --> tests/storage_alias_ui/forbid_underscore_as_prefix.rs:2:14 | 2 | type Ident = CustomStorage; From b6587231607682d534395f1d7c56d22f6b282897 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A7alo=20Pestana?= Date: Tue, 14 Feb 2023 14:42:05 +0100 Subject: [PATCH 22/24] simplifies approval edge weight calculation --- .../npos-elections/src/approval_voting.rs | 20 +++---------------- 1 file changed, 3 insertions(+), 17 deletions(-) diff --git a/primitives/npos-elections/src/approval_voting.rs b/primitives/npos-elections/src/approval_voting.rs index 9e5ed047035cd..c7a2189dbafc9 100644 --- a/primitives/npos-elections/src/approval_voting.rs +++ b/primitives/npos-elections/src/approval_voting.rs @@ -21,11 +21,7 @@ //! vote weight. The candidates with the most backing are the election winners. use crate::{setup_inputs, ElectionResult, IdentifierT, PerThing128, VoteWeight}; -use sp_arithmetic::{ - helpers_128bit::multiply_by_rational_with_rounding, - traits::{Bounded, Zero}, - Rounding, -}; +use sp_arithmetic::traits::Zero; use sp_std::{cmp::Reverse, vec::Vec}; /// Execute an approvals voting election scheme. The return type is a list of winners and a weight @@ -53,13 +49,9 @@ pub fn approval_voting( candidates.sort_by_key(|c| Reverse(c.borrow().approval_stake)); - let mut winners_count = 0; let winners = candidates .into_iter() - .take_while(|_| { - winners_count += 1; - winners_count <= to_elect - }) + .take(to_elect) .map(|w| { w.borrow_mut().elected = true; w @@ -70,13 +62,7 @@ pub fn approval_voting( for voter in &mut voters { for edge in &mut voter.edges { if edge.candidate.borrow().elected { - edge.weight = multiply_by_rational_with_rounding( - voter.budget, - edge.load.n(), - voter.load.n(), - Rounding::Down, - ) - .unwrap_or(Bounded::max_value()); + edge.weight = voter.budget } else { edge.weight = Zero::zero() } From 87503ee648c8431a490870e6f80ab8352061a7b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A7alo=20Pestana?= Date: Tue, 14 Feb 2023 22:24:49 +0100 Subject: [PATCH 23/24] Adds test --- .../election-provider-support/src/onchain.rs | 29 ++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/frame/election-provider-support/src/onchain.rs b/frame/election-provider-support/src/onchain.rs index 483c402fe249c..f28023449ec14 100644 --- a/frame/election-provider-support/src/onchain.rs +++ b/frame/election-provider-support/src/onchain.rs @@ -185,7 +185,7 @@ impl ElectionProvider for OnChainExecution { #[cfg(test)] mod tests { use super::*; - use crate::{ElectionProvider, PhragMMS, SequentialPhragmen}; + use crate::{ApprovalVoting, ElectionProvider, PhragMMS, SequentialPhragmen}; use frame_support::{assert_noop, parameter_types, traits::ConstU32}; use sp_npos_elections::Support; use sp_runtime::Perbill; @@ -235,6 +235,7 @@ mod tests { struct PhragmenParams; struct PhragMMSParams; + struct ApprovalVotingParams; parameter_types! { pub static MaxWinners: u32 = 10; @@ -261,6 +262,16 @@ mod tests { type TargetsBound = ConstU32<400>; } + impl Config for ApprovalVotingParams { + type System = Runtime; + type Solver = ApprovalVoting; + type DataProvider = mock_data_provider::DataProvider; + type WeightInfo = (); + type MaxWinners = MaxWinners; + type VotersBound = ConstU32<600>; + type TargetsBound = ConstU32<400>; + } + mod mock_data_provider { use frame_support::{bounded_vec, traits::ConstU32}; @@ -333,4 +344,20 @@ mod tests { ); }) } + + #[test] + fn onchain_approval_voting_works() { + sp_io::TestExternalities::new_empty().execute_with(|| { + DesiredTargets::set(3); + + assert_eq!( + as ElectionProvider>::elect().unwrap(), + vec![ + (10, Support { total: 20, voters: vec![(1, 5), (3, 15)] }), + (20, Support { total: 15, voters: vec![(1, 5), (2, 10)] }), + (30, Support { total: 25, voters: vec![(2, 10), (3, 15)] }) + ] + ) + }) + } } From 186cf8d263b1d4976a515a1d9b099024679dbd35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A7alo=20Pestana?= Date: Wed, 15 Feb 2023 01:37:03 +0100 Subject: [PATCH 24/24] Removes vote normalization for approval voting --- frame/election-provider-support/src/onchain.rs | 1 + primitives/npos-elections/src/approval_voting.rs | 9 ++++----- primitives/npos-elections/src/tests.rs | 15 ++++++++++++--- 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/frame/election-provider-support/src/onchain.rs b/frame/election-provider-support/src/onchain.rs index f28023449ec14..6102a1c67f5f3 100644 --- a/frame/election-provider-support/src/onchain.rs +++ b/frame/election-provider-support/src/onchain.rs @@ -350,6 +350,7 @@ mod tests { sp_io::TestExternalities::new_empty().execute_with(|| { DesiredTargets::set(3); + // note that the `OnChainExecution::elect` implementation normalizes the vote weights. assert_eq!( as ElectionProvider>::elect().unwrap(), vec![ diff --git a/primitives/npos-elections/src/approval_voting.rs b/primitives/npos-elections/src/approval_voting.rs index c7a2189dbafc9..8c1d496d77f75 100644 --- a/primitives/npos-elections/src/approval_voting.rs +++ b/primitives/npos-elections/src/approval_voting.rs @@ -27,6 +27,9 @@ use sp_std::{cmp::Reverse, vec::Vec}; /// Execute an approvals voting election scheme. The return type is a list of winners and a weight /// distribution vector of all voters who contribute to the winners. /// +/// - The vote assignment distribution for each vote is always 100%, since a voter backs a candidate +/// with its full stake, regardless of how many candidates are backed by the same stake. However, +/// the caller may normalize votes on site if required. /// - Returning winners are sorted based on desirability. Voters are unsorted. /// - The returning winners are zipped with their final backing stake. Yet, to get the exact final /// weight distribution from the winner's point of view, one needs to build a support map. See @@ -69,11 +72,7 @@ pub fn approval_voting( } } - let mut assignments = - voters.into_iter().filter_map(|v| v.into_assignment()).collect::>(); - let _ = assignments - .iter_mut() - .try_for_each(|a| a.try_normalize().map_err(crate::Error::ArithmeticError))?; + let assignments = voters.into_iter().filter_map(|v| v.into_assignment()).collect::>(); Ok(ElectionResult { winners, assignments }) } diff --git a/primitives/npos-elections/src/tests.rs b/primitives/npos-elections/src/tests.rs index 81b065fa5a75a..bd821f2571335 100644 --- a/primitives/npos-elections/src/tests.rs +++ b/primitives/npos-elections/src/tests.rs @@ -45,15 +45,24 @@ fn approval_voting_works() { vec![ Assignment { who: 10u64, - distribution: vec![(1, Perbill::from_percent(50)), (2, Perbill::from_percent(50))] + distribution: vec![ + (1, Perbill::from_percent(100)), + (2, Perbill::from_percent(100)) + ] }, Assignment { who: 20u64, - distribution: vec![(1, Perbill::from_percent(50)), (2, Perbill::from_percent(50))] + distribution: vec![ + (1, Perbill::from_percent(100)), + (2, Perbill::from_percent(100)) + ] }, Assignment { who: 30u64, - distribution: vec![(1, Perbill::from_percent(50)), (2, Perbill::from_percent(50))] + distribution: vec![ + (1, Perbill::from_percent(100)), + (2, Perbill::from_percent(100)) + ] }, Assignment { who: 40u64, distribution: vec![(4, Perbill::from_percent(100))] }, ]