From 7b22fbfc6ba567af8ac0a693ef6593e179f46a53 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Feb 2020 11:56:59 +0100 Subject: [PATCH 01/25] Fix missing overrides of NetworkBehaviour (#4829) --- client/network/src/debug_info.rs | 12 ++++++++ client/network/src/discovery.rs | 29 ++++++++++++++++++- client/network/src/protocol.rs | 10 ++++++- .../src/protocol/legacy_proto/tests.rs | 12 ++++++-- 4 files changed, 59 insertions(+), 4 deletions(-) diff --git a/client/network/src/debug_info.rs b/client/network/src/debug_info.rs index 9cc39baae6237..b06e275d1d6ac 100644 --- a/client/network/src/debug_info.rs +++ b/client/network/src/debug_info.rs @@ -17,12 +17,14 @@ use fnv::FnvHashMap; use futures::prelude::*; use libp2p::Multiaddr; +use libp2p::core::nodes::listeners::ListenerId; use libp2p::core::{ConnectedPoint, either::EitherOutput, PeerId, PublicKey}; use libp2p::swarm::{IntoProtocolsHandler, IntoProtocolsHandlerSelect, ProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; use libp2p::identify::{Identify, IdentifyEvent, IdentifyInfo}; use libp2p::ping::{Ping, PingConfig, PingEvent, PingSuccess}; use log::{debug, trace, error}; +use std::error; use std::collections::hash_map::Entry; use std::pin::Pin; use std::task::{Context, Poll}; @@ -251,6 +253,16 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static { self.identify.inject_new_external_addr(addr); } + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn error::Error + 'static)) { + self.ping.inject_listener_error(id, err); + self.identify.inject_listener_error(id, err); + } + + fn inject_listener_closed(&mut self, id: ListenerId) { + self.ping.inject_listener_closed(id); + self.identify.inject_listener_closed(id); + } + fn poll( &mut self, cx: &mut Context, diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index 531e769cceafa..ab5ab0a1f456a 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -47,7 +47,7 @@ use futures::prelude::*; use futures_timer::Delay; -use libp2p::core::{ConnectedPoint, Multiaddr, PeerId, PublicKey}; +use libp2p::core::{nodes::listeners::ListenerId, ConnectedPoint, Multiaddr, PeerId, PublicKey}; use libp2p::swarm::{ProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, PollParameters}; use libp2p::kad::{Kademlia, KademliaEvent, Quorum, Record}; use libp2p::kad::GetClosestPeersError; @@ -266,6 +266,15 @@ where NetworkBehaviour::inject_replaced(&mut self.kademlia, peer_id, closed, opened) } + fn inject_addr_reach_failure( + &mut self, + peer_id: Option<&PeerId>, + addr: &Multiaddr, + error: &dyn std::error::Error + ) { + NetworkBehaviour::inject_addr_reach_failure(&mut self.kademlia, peer_id, addr, error) + } + fn inject_node_event( &mut self, peer_id: PeerId, @@ -278,10 +287,28 @@ where let new_addr = addr.clone() .with(Protocol::P2p(self.local_peer_id.clone().into())); info!(target: "sub-libp2p", "Discovered new external address for our node: {}", new_addr); + NetworkBehaviour::inject_new_external_addr(&mut self.kademlia, addr) } fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { info!(target: "sub-libp2p", "No longer listening on {}", addr); + NetworkBehaviour::inject_expired_listen_addr(&mut self.kademlia, addr) + } + + fn inject_dial_failure(&mut self, peer_id: &PeerId) { + NetworkBehaviour::inject_dial_failure(&mut self.kademlia, peer_id) + } + + fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { + NetworkBehaviour::inject_new_listen_addr(&mut self.kademlia, addr) + } + + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn std::error::Error + 'static)) { + NetworkBehaviour::inject_listener_error(&mut self.kademlia, id, err); + } + + fn inject_listener_closed(&mut self, id: ListenerId) { + NetworkBehaviour::inject_listener_closed(&mut self.kademlia, id); } fn poll( diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 2df8f6597c508..5e8df2831ba63 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -20,7 +20,7 @@ use crate::utils::interval; use bytes::{Bytes, BytesMut}; use futures::prelude::*; use libp2p::{Multiaddr, PeerId}; -use libp2p::core::{ConnectedPoint, nodes::Substream, muxing::StreamMuxerBox}; +use libp2p::core::{ConnectedPoint, nodes::{listeners::ListenerId, Substream}, muxing::StreamMuxerBox}; use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; use sp_core::storage::{StorageKey, ChildInfo}; @@ -2004,6 +2004,14 @@ Protocol { fn inject_new_external_addr(&mut self, addr: &Multiaddr) { self.behaviour.inject_new_external_addr(addr) } + + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn std::error::Error + 'static)) { + self.behaviour.inject_listener_error(id, err); + } + + fn inject_listener_closed(&mut self, id: ListenerId) { + self.behaviour.inject_listener_closed(id); + } } impl, H: ExHashT> DiscoveryNetBehaviour for Protocol { diff --git a/client/network/src/protocol/legacy_proto/tests.rs b/client/network/src/protocol/legacy_proto/tests.rs index 6a2174f30c937..18e32f1d0189f 100644 --- a/client/network/src/protocol/legacy_proto/tests.rs +++ b/client/network/src/protocol/legacy_proto/tests.rs @@ -18,13 +18,13 @@ use futures::{prelude::*, ready}; use codec::{Encode, Decode}; -use libp2p::core::nodes::Substream; +use libp2p::core::nodes::{Substream, listeners::ListenerId}; use libp2p::core::{ConnectedPoint, transport::boxed::Boxed, muxing::StreamMuxerBox}; use libp2p::swarm::{Swarm, ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{PollParameters, NetworkBehaviour, NetworkBehaviourAction}; use libp2p::{PeerId, Multiaddr, Transport}; use rand::seq::SliceRandom; -use std::{io, task::Context, task::Poll, time::Duration}; +use std::{error, io, task::Context, task::Poll, time::Duration}; use crate::message::Message; use crate::protocol::legacy_proto::{LegacyProto, LegacyProtoOut}; use sp_test_primitives::Block; @@ -204,6 +204,14 @@ impl NetworkBehaviour for CustomProtoWithAddr { fn inject_new_external_addr(&mut self, addr: &Multiaddr) { self.inner.inject_new_external_addr(addr) } + + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn error::Error + 'static)) { + self.inner.inject_listener_error(id, err); + } + + fn inject_listener_closed(&mut self, id: ListenerId) { + self.inner.inject_listener_closed(id); + } } #[test] From eefb472c33f8acb1852e90526af46f2ac878c155 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Feb 2020 16:56:49 +0100 Subject: [PATCH 02/25] Remove support for secio (#4831) --- client/network/src/transport.rs | 36 +++++++++++++-------------------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/client/network/src/transport.rs b/client/network/src/transport.rs index d632f9b75c0c9..b11b1870511be 100644 --- a/client/network/src/transport.rs +++ b/client/network/src/transport.rs @@ -17,12 +17,10 @@ use futures::prelude::*; use libp2p::{ InboundUpgradeExt, OutboundUpgradeExt, PeerId, Transport, - mplex, identity, secio, yamux, bandwidth, wasm_ext + mplex, identity, yamux, bandwidth, wasm_ext }; #[cfg(not(target_os = "unknown"))] use libp2p::{tcp, dns, websocket, noise}; -#[cfg(not(target_os = "unknown"))] -use libp2p::core::{either::EitherError, either::EitherOutput}; use libp2p::core::{self, upgrade, transport::boxed::Boxed, transport::OptionalTransport, muxing::StreamMuxerBox}; use std::{io, sync::Arc, time::Duration, usize}; @@ -52,7 +50,6 @@ pub fn build_transport( rare panic here is basically zero"); noise::NoiseConfig::ix(noise_keypair) }; - let secio_config = secio::SecioConfig::new(keypair); // Build configuration objects for multiplexing mechanisms. let mut mplex_config = mplex::MplexConfig::new(); @@ -93,28 +90,23 @@ pub fn build_transport( // For non-WASM, we support both secio and noise. #[cfg(not(target_os = "unknown"))] let transport = transport.and_then(move |stream, endpoint| { - let upgrade = core::upgrade::SelectUpgrade::new(noise_config, secio_config); - core::upgrade::apply(stream, upgrade, endpoint, upgrade::Version::V1) - .map(|out| match out? { - // We negotiated noise - EitherOutput::First((remote_id, out)) => { - let remote_key = match remote_id { - noise::RemoteIdentity::IdentityKey(key) => key, - _ => return Err(upgrade::UpgradeError::Apply(EitherError::A(noise::NoiseError::InvalidKey))) - }; - Ok((EitherOutput::First(out), remote_key.into_peer_id())) - } - // We negotiated secio - EitherOutput::Second((remote_id, out)) => - Ok((EitherOutput::Second(out), remote_id)) + core::upgrade::apply(stream, noise_config, endpoint, upgrade::Version::V1) + .and_then(|(remote_id, out)| async move { + let remote_key = match remote_id { + noise::RemoteIdentity::IdentityKey(key) => key, + _ => return Err(upgrade::UpgradeError::Apply(noise::NoiseError::InvalidKey)) + }; + Ok((out, remote_key.into_peer_id())) }) }); - // For WASM, we only support secio for now. + // We refuse all WASM connections for now. It is intended that we negotiate noise in the + // future. See https://github.com/libp2p/rust-libp2p/issues/1414 #[cfg(target_os = "unknown")] - let transport = transport.and_then(move |stream, endpoint| { - core::upgrade::apply(stream, secio_config, endpoint, upgrade::Version::V1) - .map_ok(|(id, stream)| ((stream, id))) + let transport = transport.and_then(move |_, _| async move { + let r: Result<(wasm_ext::Connection, PeerId), _> = + Err(io::Error::new(io::ErrorKind::Other, format!("No encryption protocol supported"))); + r }); // Multiplexing From b9ee3dfeca8b4d105111564238ec2943c2130adc Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Feb 2020 17:25:14 +0100 Subject: [PATCH 03/25] Print an error if listener is closed (#4830) * Print an error if listener is closed * Oops, forgot to commit this --- client/network/src/discovery.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index ab5ab0a1f456a..de49913b265e9 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -59,7 +59,7 @@ use libp2p::core::{nodes::Substream, muxing::StreamMuxerBox}; #[cfg(not(target_os = "unknown"))] use libp2p::mdns::{Mdns, MdnsEvent}; use libp2p::multiaddr::Protocol; -use log::{debug, info, trace, warn}; +use log::{debug, info, trace, warn, error}; use std::{cmp, collections::VecDeque, time::Duration}; use std::task::{Context, Poll}; use sp_core::hexdisplay::HexDisplay; @@ -304,10 +304,12 @@ where } fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn std::error::Error + 'static)) { + error!(target: "sub-libp2p", "Error on libp2p listener {:?}: {}", id, err); NetworkBehaviour::inject_listener_error(&mut self.kademlia, id, err); } fn inject_listener_closed(&mut self, id: ListenerId) { + error!(target: "sub-libp2p", "Libp2p listener {:?} closed", id); NetworkBehaviour::inject_listener_closed(&mut self.kademlia, id); } From 584ac4f8d925387efcbf5af805f06173e42c338d Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 5 Feb 2020 17:26:07 +0100 Subject: [PATCH 04/25] Avoid losing values in intermediate take (#4833) --- primitives/consensus/common/src/block_import.rs | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/primitives/consensus/common/src/block_import.rs b/primitives/consensus/common/src/block_import.rs index 952a044e9b035..dabe6331e8150 100644 --- a/primitives/consensus/common/src/block_import.rs +++ b/primitives/consensus/common/src/block_import.rs @@ -228,15 +228,14 @@ impl BlockImportParams { /// Take interemdiate by given key, and remove it from the processing list. pub fn take_intermediate(&mut self, key: &[u8]) -> Result, Error> { - if self.intermediates.contains_key(key) { - self.intermediates.remove(key) - .ok_or(Error::NoIntermediate) - .and_then(|value| { - value.downcast::() - .map_err(|_| Error::InvalidIntermediate) - }) - } else { - Err(Error::NoIntermediate) + let (k, v) = self.intermediates.remove_entry(key).ok_or(Error::NoIntermediate)?; + + match v.downcast::() { + Ok(v) => Ok(v), + Err(v) => { + self.intermediates.insert(k, v); + Err(Error::InvalidIntermediate) + }, } } From 4e0e8c179faaad362e3013024c70abda6493fa1b Mon Sep 17 00:00:00 2001 From: Sergei Pepyakin Date: Wed, 5 Feb 2020 17:26:48 +0100 Subject: [PATCH 05/25] Get rid of in-substrate usages of `core_intrinsics` feature (#4823) * Remove usage of unneeded Rust feature core_intrinsics * core::intrinsics::abort -> arch::wasm32::unreachable * Don't publish `core::intrinsics`. * Disable panic_handler and alloc_error_handler for no_std non wasm builds --- primitives/application-crypto/Cargo.toml | 6 +++++- primitives/io/src/lib.rs | 5 ++--- primitives/sandbox/src/lib.rs | 1 - primitives/std/src/lib.rs | 2 +- primitives/std/without_std.rs | 1 - 5 files changed, 8 insertions(+), 7 deletions(-) diff --git a/primitives/application-crypto/Cargo.toml b/primitives/application-crypto/Cargo.toml index 9377bcff8a378..1fa9a6631c4ef 100644 --- a/primitives/application-crypto/Cargo.toml +++ b/primitives/application-crypto/Cargo.toml @@ -21,5 +21,9 @@ std = [ "full_crypto", "sp-core/std", "codec/std", "serde", "sp-std/std", "sp-io # or Intel SGX. # For the regular wasm runtime builds this should not be used. full_crypto = [ - "sp-core/full_crypto" + "sp-core/full_crypto", + # Don't add `panic_handler` and `alloc_error_handler` since they are expected to be provided + # by the user anyway. + "sp-io/disable_panic_handler", + "sp-io/disable_oom", ] diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index dce67133d39a3..1b531725fefc8 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -20,7 +20,6 @@ #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), feature(alloc_error_handler))] -#![cfg_attr(not(feature = "std"), feature(core_intrinsics))] #![cfg_attr(feature = "std", doc = "Substrate runtime standard library as compiled when linked with Rust's standard library.")] @@ -892,7 +891,7 @@ pub fn panic(info: &core::panic::PanicInfo) -> ! { unsafe { let message = sp_std::alloc::format!("{}", info); logging::log(LogLevel::Error, "runtime", message.as_bytes()); - core::intrinsics::abort() + core::arch::wasm32::unreachable(); } } @@ -902,7 +901,7 @@ pub fn panic(info: &core::panic::PanicInfo) -> ! { pub fn oom(_: core::alloc::Layout) -> ! { unsafe { logging::log(LogLevel::Error, "runtime", b"Runtime memory exhausted. Aborting"); - core::intrinsics::abort(); + core::arch::wasm32::unreachable(); } } diff --git a/primitives/sandbox/src/lib.rs b/primitives/sandbox/src/lib.rs index 17712ad3655e7..e7cd684b458a4 100755 --- a/primitives/sandbox/src/lib.rs +++ b/primitives/sandbox/src/lib.rs @@ -36,7 +36,6 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] -#![cfg_attr(not(feature = "std"), feature(core_intrinsics))] use sp_std::prelude::*; diff --git a/primitives/std/src/lib.rs b/primitives/std/src/lib.rs index 3fcf5daeb4b00..856b09540355c 100644 --- a/primitives/std/src/lib.rs +++ b/primitives/std/src/lib.rs @@ -18,7 +18,7 @@ //! or client/alloc to be used with any code that depends on the runtime. #![cfg_attr(not(feature = "std"), no_std)] -#![cfg_attr(not(feature = "std"), feature(core_intrinsics))] + #![cfg_attr(feature = "std", doc = "Substrate runtime standard library as compiled when linked with Rust's standard library.")] diff --git a/primitives/std/without_std.rs b/primitives/std/without_std.rs index ad587531ddc97..4424ca0e7de68 100755 --- a/primitives/std/without_std.rs +++ b/primitives/std/without_std.rs @@ -27,7 +27,6 @@ pub use core::convert; pub use core::default; pub use core::fmt; pub use core::hash; -pub use core::intrinsics; pub use core::iter; pub use core::marker; pub use core::mem; From 53008ba412c54f534e6033d9055ea1f403dcfe34 Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Wed, 5 Feb 2020 17:27:50 +0100 Subject: [PATCH 06/25] Additional RPC for dumping all main storage key pairs under a prefix (#4803) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Merge branch 'gav-split-balanecs-vesting' into gav-upsub # Conflicts: # Cargo.lock # cli/Cargo.toml # collator/Cargo.toml # primitives/Cargo.toml # runtime/common/Cargo.toml # runtime/common/src/claims.rs # runtime/kusama/Cargo.toml # runtime/polkadot/Cargo.toml # service/Cargo.toml * Update client/src/client.rs * Update client/src/client.rs * Fix merge conflict Co-authored-by: Bastian Köcher --- client/rpc-api/src/state/mod.rs | 4 ++++ client/rpc/src/state/mod.rs | 15 +++++++++++++++ client/rpc/src/state/state_full.rs | 11 +++++++++++ client/rpc/src/state/state_light.rs | 8 ++++++++ client/src/client.rs | 23 +++++++++++++++++++++-- 5 files changed, 59 insertions(+), 2 deletions(-) diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 6901738efef26..b2cf8ce909b20 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -44,6 +44,10 @@ pub trait StateApi { #[rpc(name = "state_getKeys")] fn storage_keys(&self, prefix: StorageKey, hash: Option) -> FutureResult>; + /// Returns the keys with prefix, leave empty to get all the keys + #[rpc(name = "state_getPairs")] + fn storage_pairs(&self, prefix: StorageKey, hash: Option) -> FutureResult>; + /// Returns the keys with prefix with pagination support. /// Up to `count` keys will be returned. /// If `start_key` is passed, return next keys in storage in lexicographic order. diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index df26daa929526..8f621cc8afc96 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -63,6 +63,13 @@ pub trait StateBackend: Send + Sync + 'static prefix: StorageKey, ) -> FutureResult>; + /// Returns the keys with prefix along with their values, leave empty to get all the pairs. + fn storage_pairs( + &self, + block: Option, + prefix: StorageKey, + ) -> FutureResult>; + /// Returns the keys with prefix with pagination support. fn storage_keys_paged( &self, @@ -255,6 +262,14 @@ impl StateApi for State self.backend.storage_keys(block, key_prefix) } + fn storage_pairs( + &self, + key_prefix: StorageKey, + block: Option, + ) -> FutureResult> { + self.backend.storage_pairs(block, key_prefix) + } + fn storage_keys_paged( &self, prefix: Option, diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 87c75e77a6584..3d5613626e044 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -254,6 +254,17 @@ impl StateBackend for FullState, + prefix: StorageKey, + ) -> FutureResult> { + Box::new(result( + self.block_or_best(block) + .and_then(|block| self.client.storage_pairs(&BlockId::Hash(block), &prefix)) + .map_err(client_err))) + } + fn storage_keys_paged( &self, block: Option, diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index f25d6e2186cc1..7b2455a8fce38 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -199,6 +199,14 @@ impl StateBackend for LightState, + _prefix: StorageKey, + ) -> FutureResult> { + Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) + } + fn storage_keys_paged( &self, _block: Option, diff --git a/client/src/client.rs b/client/src/client.rs index ca8b0c5dd7cfd..9e30c7b2ea8af 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -280,6 +280,22 @@ impl Client where Ok(keys) } + /// Given a `BlockId` and a key prefix, return the matching child storage keys and values in that block. + pub fn storage_pairs(&self, id: &BlockId, key_prefix: &StorageKey) + -> sp_blockchain::Result> + { + let state = self.state_at(id)?; + let keys = state + .keys(&key_prefix.0) + .into_iter() + .map(|k| { + let d = state.storage(&k).ok().flatten().unwrap_or_default(); + (StorageKey(k), StorageData(d)) + }) + .collect(); + Ok(keys) + } + /// Given a `BlockId` and a key prefix, return a `KeyIterator` iterates matching storage keys in that block. pub fn storage_keys_iter<'a>( &self, @@ -296,7 +312,9 @@ impl Client where } /// Given a `BlockId` and a key, return the value under the key in that block. - pub fn storage(&self, id: &BlockId, key: &StorageKey) -> sp_blockchain::Result> { + pub fn storage(&self, id: &BlockId, key: &StorageKey) + -> sp_blockchain::Result> + { Ok(self.state_at(id)? .storage(&key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? .map(StorageData) @@ -305,7 +323,8 @@ impl Client where /// Given a `BlockId` and a key, return the value under the hash in that block. pub fn storage_hash(&self, id: &BlockId, key: &StorageKey) - -> sp_blockchain::Result> { + -> sp_blockchain::Result> + { Ok(self.state_at(id)? .storage_hash(&key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? ) From 7019e30466feb4fc09d28a5e9e13afd54de54f93 Mon Sep 17 00:00:00 2001 From: Sergei Pepyakin Date: Wed, 5 Feb 2020 18:20:25 +0100 Subject: [PATCH 07/25] executor: Simplify the SandboxCapabilities interface (#4825) * Don't require `store` and `store_mut` in `SandboxCapabilities`. * Simplify the sandbox a bit --- client/executor/common/src/sandbox.rs | 123 +++++++++--------- client/executor/wasmi/src/lib.rs | 34 ++--- .../wasmtime/src/function_executor.rs | 35 ++--- 3 files changed, 81 insertions(+), 111 deletions(-) diff --git a/client/executor/common/src/sandbox.rs b/client/executor/common/src/sandbox.rs index 89285a75fc9a8..f920a47ca7655 100644 --- a/client/executor/common/src/sandbox.rs +++ b/client/executor/common/src/sandbox.rs @@ -27,7 +27,7 @@ use wasmi::{ Externals, ImportResolver, MemoryInstance, MemoryRef, Module, ModuleInstance, ModuleRef, RuntimeArgs, RuntimeValue, Trap, TrapKind, memory_units::Pages, }; -use sp_wasm_interface::{Pointer, WordSize}; +use sp_wasm_interface::{FunctionContext, Pointer, WordSize}; /// Index of a function inside the supervisor. /// @@ -144,48 +144,10 @@ impl ImportResolver for Imports { /// This trait encapsulates sandboxing capabilities. /// /// Note that this functions are only called in the `supervisor` context. -pub trait SandboxCapabilities { +pub trait SandboxCapabilities: FunctionContext { /// Represents a function reference into the supervisor environment. type SupervisorFuncRef; - /// Returns a reference to an associated sandbox `Store`. - fn store(&self) -> &Store; - - /// Returns a mutable reference to an associated sandbox `Store`. - fn store_mut(&mut self) -> &mut Store; - - /// Allocate space of the specified length in the supervisor memory. - /// - /// # Errors - /// - /// Returns `Err` if allocation not possible or errors during heap management. - /// - /// Returns pointer to the allocated block. - fn allocate(&mut self, len: WordSize) -> Result>; - - /// Deallocate space specified by the pointer that was previously returned by [`allocate`]. - /// - /// # Errors - /// - /// Returns `Err` if deallocation not possible or because of errors in heap management. - /// - /// [`allocate`]: #tymethod.allocate - fn deallocate(&mut self, ptr: Pointer) -> Result<()>; - - /// Write `data` into the supervisor memory at offset specified by `ptr`. - /// - /// # Errors - /// - /// Returns `Err` if `ptr + data.len()` is out of bounds. - fn write_memory(&mut self, ptr: Pointer, data: &[u8]) -> Result<()>; - - /// Read `len` bytes from the supervisor memory. - /// - /// # Errors - /// - /// Returns `Err` if `ptr + len` is out of bounds. - fn read_memory(&self, ptr: Pointer, len: WordSize) -> Result>; - /// Invoke a function in the supervisor environment. /// /// This first invokes the dispatch_thunk function, passing in the function index of the @@ -270,8 +232,14 @@ impl<'a, FE: SandboxCapabilities + 'a> Externals for GuestExternals<'a, FE> { // Move serialized arguments inside the memory and invoke dispatch thunk and // then free allocated memory. let invoke_args_len = invoke_args_data.len() as WordSize; - let invoke_args_ptr = self.supervisor_externals.allocate(invoke_args_len)?; - self.supervisor_externals.write_memory(invoke_args_ptr, &invoke_args_data)?; + let invoke_args_ptr = self + .supervisor_externals + .allocate_memory(invoke_args_len) + .map_err(|_| trap("Can't allocate memory in supervisor for the arguments"))?; + self + .supervisor_externals + .write_memory(invoke_args_ptr, &invoke_args_data) + .map_err(|_| trap("Can't write invoke args into memory"))?; let result = self.supervisor_externals.invoke( &self.sandbox_instance.dispatch_thunk, invoke_args_ptr, @@ -279,7 +247,10 @@ impl<'a, FE: SandboxCapabilities + 'a> Externals for GuestExternals<'a, FE> { state, func_idx, )?; - self.supervisor_externals.deallocate(invoke_args_ptr)?; + self + .supervisor_externals + .deallocate_memory(invoke_args_ptr) + .map_err(|_| trap("Can't deallocate memory for dispatch thunk's invoke arguments"))?; // dispatch_thunk returns pointer to serialized arguments. // Unpack pointer and len of the serialized result data. @@ -292,9 +263,11 @@ impl<'a, FE: SandboxCapabilities + 'a> Externals for GuestExternals<'a, FE> { }; let serialized_result_val = self.supervisor_externals - .read_memory(serialized_result_val_ptr, serialized_result_val_len)?; + .read_memory(serialized_result_val_ptr, serialized_result_val_len) + .map_err(|_| trap("Can't read the serialized result from dispatch thunk"))?; self.supervisor_externals - .deallocate(serialized_result_val_ptr)?; + .deallocate_memory(serialized_result_val_ptr) + .map_err(|_| trap("Can't deallocate memory for dispatch thunk's result"))?; deserialize_result(&serialized_result_val) } @@ -433,6 +406,46 @@ fn decode_environment_definition( )) } +/// An environment in which the guest module is instantiated. +pub struct GuestEnvironment { + imports: Imports, + guest_to_supervisor_mapping: GuestToSupervisorFunctionMapping, +} + +impl GuestEnvironment { + /// Decodes an environment definition from the given raw bytes. + /// + /// Returns `Err` if the definition cannot be decoded. + pub fn decode( + store: &Store, + raw_env_def: &[u8], + ) -> std::result::Result { + let (imports, guest_to_supervisor_mapping) = + decode_environment_definition(raw_env_def, &store.memories)?; + Ok(Self { + imports, + guest_to_supervisor_mapping, + }) + } +} + +/// An unregistered sandboxed instance. +/// +/// To finish off the instantiation the user must call `register`. +#[must_use] +pub struct UnregisteredInstance { + sandbox_instance: Rc>, +} + +impl UnregisteredInstance { + /// Finalizes instantiation of this module. + pub fn register(self, store: &mut Store) -> u32 { + // At last, register the instance. + let instance_idx = store.register_sandbox_instance(self.sandbox_instance); + instance_idx + } +} + /// Instantiate a guest module and return it's index in the store. /// /// The guest module's code is specified in `wasm`. Environment that will be available to @@ -447,18 +460,16 @@ fn decode_environment_definition( /// - Module in `wasm` is invalid or couldn't be instantiated. /// /// [`EnvironmentDefinition`]: ../sandbox/struct.EnvironmentDefinition.html -pub fn instantiate( +pub fn instantiate<'a, FE: SandboxCapabilities>( supervisor_externals: &mut FE, dispatch_thunk: FE::SupervisorFuncRef, wasm: &[u8], - raw_env_def: &[u8], + host_env: GuestEnvironment, state: u32, -) -> std::result::Result { - let (imports, guest_to_supervisor_mapping) = - decode_environment_definition(raw_env_def, &supervisor_externals.store().memories)?; - +) -> std::result::Result, InstantiationError> { let module = Module::from_buffer(wasm).map_err(|_| InstantiationError::ModuleDecoding)?; - let instance = ModuleInstance::new(&module, &imports).map_err(|_| InstantiationError::Instantiation)?; + let instance = ModuleInstance::new(&module, &host_env.imports) + .map_err(|_| InstantiationError::Instantiation)?; let sandbox_instance = Rc::new(SandboxInstance { // In general, it's not a very good idea to use `.not_started_instance()` for anything @@ -466,7 +477,7 @@ pub fn instantiate( // for the purpose of running `start` function which should be ok. instance: instance.not_started_instance().clone(), dispatch_thunk, - guest_to_supervisor_mapping, + guest_to_supervisor_mapping: host_env.guest_to_supervisor_mapping, }); with_guest_externals( @@ -480,11 +491,7 @@ pub fn instantiate( }, )?; - // At last, register the instance. - let instance_idx = supervisor_externals - .store_mut() - .register_sandbox_instance(sandbox_instance); - Ok(instance_idx) + Ok(UnregisteredInstance { sandbox_instance }) } /// This struct keeps track of all sandboxed components. diff --git a/client/executor/wasmi/src/lib.rs b/client/executor/wasmi/src/lib.rs index 1bcb1aab8afd5..6fbfdbc1cced4 100644 --- a/client/executor/wasmi/src/lib.rs +++ b/client/executor/wasmi/src/lib.rs @@ -66,31 +66,6 @@ impl<'a> FunctionExecutor<'a> { impl<'a> sandbox::SandboxCapabilities for FunctionExecutor<'a> { type SupervisorFuncRef = wasmi::FuncRef; - fn store(&self) -> &sandbox::Store { - &self.sandbox_store - } - fn store_mut(&mut self) -> &mut sandbox::Store { - &mut self.sandbox_store - } - fn allocate(&mut self, len: WordSize) -> Result, Error> { - let heap = &mut self.heap; - self.memory.with_direct_access_mut(|mem| { - heap.allocate(mem, len).map_err(Into::into) - }) - } - fn deallocate(&mut self, ptr: Pointer) -> Result<(), Error> { - let heap = &mut self.heap; - self.memory.with_direct_access_mut(|mem| { - heap.deallocate(mem, ptr).map_err(Into::into) - }) - } - fn write_memory(&mut self, ptr: Pointer, data: &[u8]) -> Result<(), Error> { - self.memory.set(ptr.into(), data).map_err(Into::into) - } - fn read_memory(&self, ptr: Pointer, len: WordSize) -> Result, Error> { - self.memory.get(ptr.into(), len as usize).map_err(Into::into) - } - fn invoke( &mut self, dispatch_thunk: &Self::SupervisorFuncRef, @@ -259,8 +234,15 @@ impl<'a> Sandbox for FunctionExecutor<'a> { .clone() }; + let guest_env = match sandbox::GuestEnvironment::decode(&self.sandbox_store, raw_env_def) { + Ok(guest_env) => guest_env, + Err(_) => return Ok(sandbox_primitives::ERR_MODULE as u32), + }; + let instance_idx_or_err_code = - match sandbox::instantiate(self, dispatch_thunk, wasm, raw_env_def, state) { + match sandbox::instantiate(self, dispatch_thunk, wasm, guest_env, state) + .map(|i| i.register(&mut self.sandbox_store)) + { Ok(instance_idx) => instance_idx, Err(sandbox::InstantiationError::StartTrapped) => sandbox_primitives::ERR_EXECUTION, diff --git a/client/executor/wasmtime/src/function_executor.rs b/client/executor/wasmtime/src/function_executor.rs index 7bbf5a456bf56..b4971f8b8a65e 100644 --- a/client/executor/wasmtime/src/function_executor.rs +++ b/client/executor/wasmtime/src/function_executor.rs @@ -118,32 +118,6 @@ impl<'a> FunctionExecutor<'a> { impl<'a> SandboxCapabilities for FunctionExecutor<'a> { type SupervisorFuncRef = SupervisorFuncRef; - fn store(&self) -> &sandbox::Store { - &self.sandbox_store - } - - fn store_mut(&mut self) -> &mut sandbox::Store { - &mut self.sandbox_store - } - - fn allocate(&mut self, len: WordSize) -> Result> { - self.heap.allocate(self.memory, len).map_err(Into::into) - } - - fn deallocate(&mut self, ptr: Pointer) -> Result<()> { - self.heap.deallocate(self.memory, ptr).map_err(Into::into) - } - - fn write_memory(&mut self, ptr: Pointer, data: &[u8]) -> Result<()> { - write_memory_from(self.memory, ptr, data) - } - - fn read_memory(&self, ptr: Pointer, len: WordSize) -> Result> { - let mut output = vec![0; len as usize]; - read_memory_into(self.memory, ptr, output.as_mut())?; - Ok(output) - } - fn invoke( &mut self, dispatch_thunk: &Self::SupervisorFuncRef, @@ -327,8 +301,15 @@ impl<'a> Sandbox for FunctionExecutor<'a> { SupervisorFuncRef(func_ref) }; + let guest_env = match sandbox::GuestEnvironment::decode(&self.sandbox_store, raw_env_def) { + Ok(guest_env) => guest_env, + Err(_) => return Ok(sandbox_primitives::ERR_MODULE as u32), + }; + let instance_idx_or_err_code = - match sandbox::instantiate(self, dispatch_thunk, wasm, raw_env_def, state) { + match sandbox::instantiate(self, dispatch_thunk, wasm, guest_env, state) + .map(|i| i.register(&mut self.sandbox_store)) + { Ok(instance_idx) => instance_idx, Err(sandbox::InstantiationError::StartTrapped) => sandbox_primitives::ERR_EXECUTION, From 0d19d9566f8628a7b33a7730d1201a7f63af2968 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= Date: Wed, 5 Feb 2020 20:47:11 +0000 Subject: [PATCH 08/25] grandpa: bump version to v0.11.1 (#4813) --- client/finality-grandpa/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 7c427dfd5cd2d..1249bff751d52 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -29,11 +29,11 @@ sc-network = { version = "0.8", path = "../network" } sc-network-gossip = { version = "0.8", path = "../network-gossip" } sp-finality-tracker = { version = "2.0.0", path = "../../primitives/finality-tracker" } sp-finality-grandpa = { version = "2.0.0", path = "../../primitives/finality-grandpa" } -finality-grandpa = { version = "0.11.0", features = ["derive-codec"] } +finality-grandpa = { version = "0.11.1", features = ["derive-codec"] } pin-project = "0.4.6" [dev-dependencies] -finality-grandpa = { version = "0.11.0", features = ["derive-codec", "test-helpers"] } +finality-grandpa = { version = "0.11.1", features = ["derive-codec", "test-helpers"] } sc-network = { version = "0.8", path = "../network" } sc-network-test = { version = "0.8.0", path = "../network/test" } sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } From 1fa99067a86962a18f5d7ef4db83bb9797b93dbc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= Date: Wed, 5 Feb 2020 23:10:35 +0000 Subject: [PATCH 09/25] node: disable grandpa automatic finality fallback (#4835) * node: disable grandpa automatic finality fallback * node: bump spec_version --- bin/node/runtime/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 65e09e99d2b79..40342c1ce84b1 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -79,7 +79,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 213, + spec_version: 214, impl_version: 0, apis: RUNTIME_API_VERSIONS, }; @@ -476,7 +476,7 @@ parameter_types! { } impl pallet_finality_tracker::Trait for Runtime { - type OnFinalizationStalled = Grandpa; + type OnFinalizationStalled = (); type WindowSize = WindowSize; type ReportLatency = ReportLatency; } From c9e8aa3a8e6a2b3a049c2a84dd7ccbcb7d90ac1f Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Thu, 6 Feb 2020 13:07:52 +0100 Subject: [PATCH 10/25] do join_all (#4832) --- client/transaction-pool/graph/src/pool.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/client/transaction-pool/graph/src/pool.rs b/client/transaction-pool/graph/src/pool.rs index 815b5871eab99..91ce58518a0a0 100644 --- a/client/transaction-pool/graph/src/pool.rs +++ b/client/transaction-pool/graph/src/pool.rs @@ -378,8 +378,13 @@ impl Pool { let block_number = self.resolve_block_number(at)?; let mut result = HashMap::new(); - for xt in xts { - let (hash, validated_tx) = self.verify_one(at, block_number, xt, force).await; + for (hash, validated_tx) in + futures::future::join_all( + xts.into_iter() + .map(|xt| self.verify_one(at, block_number, xt, force)) + ) + .await + { result.insert(hash, validated_tx); } From 3fc91c76b1ed62a8db9d6b78910e8b7d7c928911 Mon Sep 17 00:00:00 2001 From: Jimmy Chu Date: Thu, 6 Feb 2020 20:13:44 +0800 Subject: [PATCH 11/25] Node template folders restructuring (#4811) * Restructure node-template so it is clear that node, runtime, and pallets are separated * Separating to mock and tests * restructuring runtime to top-level * updated release script * updated Cargo.lock --- .maintain/node-template-release/src/main.rs | 16 +++- Cargo.lock | 14 +++ Cargo.toml | 3 +- bin/node-template/Cargo.toml | 37 -------- bin/node-template/node/Cargo.toml | 38 ++++++++ bin/node-template/{ => node}/build.rs | 0 .../{ => node}/src/chain_spec.rs | 0 bin/node-template/{ => node}/src/cli.rs | 0 bin/node-template/{ => node}/src/command.rs | 0 bin/node-template/{ => node}/src/main.rs | 0 bin/node-template/{ => node}/src/service.rs | 0 bin/node-template/pallets/template/Cargo.toml | 44 ++++++++++ .../template/src/lib.rs} | 88 ++----------------- .../pallets/template/src/mock.rs | 52 +++++++++++ .../pallets/template/src/tests.rs | 26 ++++++ bin/node-template/runtime/Cargo.toml | 7 +- bin/node-template/runtime/src/lib.rs | 6 +- 17 files changed, 205 insertions(+), 126 deletions(-) delete mode 100644 bin/node-template/Cargo.toml create mode 100644 bin/node-template/node/Cargo.toml rename bin/node-template/{ => node}/build.rs (100%) rename bin/node-template/{ => node}/src/chain_spec.rs (100%) rename bin/node-template/{ => node}/src/cli.rs (100%) rename bin/node-template/{ => node}/src/command.rs (100%) rename bin/node-template/{ => node}/src/main.rs (100%) rename bin/node-template/{ => node}/src/service.rs (100%) create mode 100644 bin/node-template/pallets/template/Cargo.toml rename bin/node-template/{runtime/src/template.rs => pallets/template/src/lib.rs} (59%) create mode 100644 bin/node-template/pallets/template/src/mock.rs create mode 100644 bin/node-template/pallets/template/src/tests.rs diff --git a/.maintain/node-template-release/src/main.rs b/.maintain/node-template-release/src/main.rs index db42b155e65e7..a1d85bf33fe33 100644 --- a/.maintain/node-template-release/src/main.rs +++ b/.maintain/node-template-release/src/main.rs @@ -1,7 +1,7 @@ use structopt::StructOpt; use std::{ - path::{PathBuf, Path}, collections::HashMap, fs::{File, self}, io::{Read, Write}, + path::{PathBuf, Path}, collections::HashMap, fs::{File, OpenOptions, self}, io::{Read, Write}, process::Command }; @@ -88,7 +88,7 @@ fn replace_path_dependencies_with_git(cargo_toml_path: &Path, commit_id: &str, c // remove `Cargo.toml` cargo_toml_path.pop(); - for &table in &["dependencies", "build-dependencies"] { + for &table in &["dependencies", "build-dependencies", "dev-dependencies"] { let mut dependencies: toml::value::Table = match cargo_toml .remove(table) .and_then(|v| v.try_into().ok()) { @@ -212,11 +212,21 @@ fn main() { let node_template_path = build_dir.path().join(node_template_folder); copy_node_template(&options.node_template, build_dir.path()); - let cargo_tomls = find_cargo_tomls(build_dir.path().to_owned()); + let mut cargo_tomls = find_cargo_tomls(build_dir.path().to_owned()); let commit_id = get_git_commit_id(&options.node_template); let top_level_cargo_toml_path = node_template_path.join("Cargo.toml"); + // Check if top level Cargo.toml exists. If not, create one in the destination + if !cargo_tomls.contains(&top_level_cargo_toml_path) { + // create the top_level_cargo_toml + OpenOptions::new().create(true).write(true).open(top_level_cargo_toml_path.clone()) + .expect("Create root level `Cargo.toml` failed."); + + // push into our data structure + cargo_tomls.push(PathBuf::from(top_level_cargo_toml_path.clone())); + } + cargo_tomls.iter().for_each(|t| { let mut cargo_toml = parse_cargo_toml(&t); replace_path_dependencies_with_git(&t, &commit_id, &mut cargo_toml); diff --git a/Cargo.lock b/Cargo.lock index ed976689870d0..f521817b2cab6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3554,6 +3554,7 @@ dependencies = [ "pallet-indices", "pallet-randomness-collective-flip", "pallet-sudo", + "pallet-template", "pallet-timestamp", "pallet-transaction-payment", "parity-scale-codec", @@ -4337,6 +4338,19 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-template" +version = "2.0.0" +dependencies = [ + "frame-support", + "frame-system", + "parity-scale-codec", + "safe-mix", + "sp-core", + "sp-io", + "sp-runtime", +] + [[package]] name = "pallet-timestamp" version = "2.0.0" diff --git a/Cargo.toml b/Cargo.toml index 0bd6b4c0b0db3..552f1eadc7ca9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,8 @@ [workspace] members = [ - "bin/node-template", + "bin/node-template/node", "bin/node-template/runtime", + "bin/node-template/pallets/template", "bin/node/cli", "bin/node/executor", "bin/node/primitives", diff --git a/bin/node-template/Cargo.toml b/bin/node-template/Cargo.toml deleted file mode 100644 index 7001099b0b1ca..0000000000000 --- a/bin/node-template/Cargo.toml +++ /dev/null @@ -1,37 +0,0 @@ -[package] -name = "node-template" -version = "2.0.0" -authors = ["Anonymous"] -build = "build.rs" -edition = "2018" -license = "Unlicense" - -[[bin]] -name = "node-template" -path = "src/main.rs" - -[dependencies] -futures = "0.3.1" -log = "0.4.8" -sc-cli = { version = "0.8.0", path = "../../client/cli" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sc-executor = { version = "0.8", path = "../../client/executor" } -sc-service = { version = "0.8", path = "../../client/service" } -sp-inherents = { version = "2.0.0", path = "../../primitives/inherents" } -sc-transaction-pool = { version = "2.0.0", path = "../../client/transaction-pool" } -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -sc-network = { version = "0.8", path = "../../client/network" } -sc-consensus-aura = { version = "0.8", path = "../../client/consensus/aura" } -sp-consensus-aura = { version = "0.8", path = "../../primitives/consensus/aura" } -sp-consensus = { version = "0.8", path = "../../primitives/consensus/common" } -grandpa = { version = "0.8", package = "sc-finality-grandpa", path = "../../client/finality-grandpa" } -grandpa-primitives = { version = "2.0.0", package = "sp-finality-grandpa", path = "../../primitives/finality-grandpa" } -sc-client = { version = "0.8", path = "../../client/" } -node-template-runtime = { version = "2.0.0", path = "runtime" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sc-basic-authorship = { path = "../../client/basic-authorship" } -structopt = "0.3.8" - -[build-dependencies] -vergen = "3.0.4" -build-script-utils = { version = "2.0.0", package = "substrate-build-script-utils", path = "../../utils/build-script-utils" } diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml new file mode 100644 index 0000000000000..9ad4a0e8a55ad --- /dev/null +++ b/bin/node-template/node/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "node-template" +version = "2.0.0" +authors = ["Anonymous"] +edition = "2018" +license = "Unlicense" +build = "build.rs" + +[[bin]] +name = "node-template" + +[dependencies] +futures = "0.3.1" +log = "0.4.8" +structopt = "0.3.8" + +sc-cli = { version = "0.8.0", path = "../../../client/cli" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sc-executor = { version = "0.8", path = "../../../client/executor" } +sc-service = { version = "0.8", path = "../../../client/service" } +sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } +sc-transaction-pool = { version = "2.0.0", path = "../../../client/transaction-pool" } +sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } +sc-network = { version = "0.8", path = "../../../client/network" } +sc-consensus-aura = { version = "0.8", path = "../../../client/consensus/aura" } +sp-consensus-aura = { version = "0.8", path = "../../../primitives/consensus/aura" } +sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } +grandpa = { version = "0.8", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } +grandpa-primitives = { version = "2.0.0", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } +sc-client = { version = "0.8", path = "../../../client/" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sc-basic-authorship = { path = "../../../client/basic-authorship" } + +node-template-runtime = { version = "2.0.0", path = "../runtime" } + +[build-dependencies] +vergen = "3.0.4" +build-script-utils = { version = "2.0.0", package = "substrate-build-script-utils", path = "../../../utils/build-script-utils" } diff --git a/bin/node-template/build.rs b/bin/node-template/node/build.rs similarity index 100% rename from bin/node-template/build.rs rename to bin/node-template/node/build.rs diff --git a/bin/node-template/src/chain_spec.rs b/bin/node-template/node/src/chain_spec.rs similarity index 100% rename from bin/node-template/src/chain_spec.rs rename to bin/node-template/node/src/chain_spec.rs diff --git a/bin/node-template/src/cli.rs b/bin/node-template/node/src/cli.rs similarity index 100% rename from bin/node-template/src/cli.rs rename to bin/node-template/node/src/cli.rs diff --git a/bin/node-template/src/command.rs b/bin/node-template/node/src/command.rs similarity index 100% rename from bin/node-template/src/command.rs rename to bin/node-template/node/src/command.rs diff --git a/bin/node-template/src/main.rs b/bin/node-template/node/src/main.rs similarity index 100% rename from bin/node-template/src/main.rs rename to bin/node-template/node/src/main.rs diff --git a/bin/node-template/src/service.rs b/bin/node-template/node/src/service.rs similarity index 100% rename from bin/node-template/src/service.rs rename to bin/node-template/node/src/service.rs diff --git a/bin/node-template/pallets/template/Cargo.toml b/bin/node-template/pallets/template/Cargo.toml new file mode 100644 index 0000000000000..8ea3f3adabc52 --- /dev/null +++ b/bin/node-template/pallets/template/Cargo.toml @@ -0,0 +1,44 @@ +[package] +authors = ['Anonymous'] +edition = '2018' +name = 'pallet-template' +version = '2.0.0' + +[dependencies] +codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +safe-mix = { default-features = false, version = '1.0.0' } + +[dependencies.frame-support] +default-features = false +version = '2.0.0' +path = "../../../../frame/support" + +[dependencies.system] +default-features = false +package = 'frame-system' +version = '2.0.0' +path = "../../../../frame/system" + +[dev-dependencies.sp-core] +default-features = false +version = '2.0.0' +path = "../../../../primitives/core" + +[dev-dependencies.sp-io] +default-features = false +version = '2.0.0' +path = "../../../../primitives/io" + +[dev-dependencies.sp-runtime] +default-features = false +version = '2.0.0' +path = "../../../../primitives/runtime" + +[features] +default = ['std'] +std = [ + 'codec/std', + 'frame-support/std', + 'safe-mix/std', + 'system/std' +] diff --git a/bin/node-template/runtime/src/template.rs b/bin/node-template/pallets/template/src/lib.rs similarity index 59% rename from bin/node-template/runtime/src/template.rs rename to bin/node-template/pallets/template/src/lib.rs index 4ed8066578633..a1615b4c1f0bc 100644 --- a/bin/node-template/runtime/src/template.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -1,16 +1,23 @@ +#![cfg_attr(not(feature = "std"), no_std)] + /// A runtime module template with necessary imports /// Feel free to remove or edit this file as needed. /// If you change the name of this file, make sure to update its references in runtime/src/lib.rs /// If you remove this file, you can remove those references - /// For more guidance on Substrate modules, see the example module /// https://github.com/paritytech/substrate/blob/master/frame/example/src/lib.rs use frame_support::{decl_module, decl_storage, decl_event, decl_error, dispatch}; use system::ensure_signed; +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod tests; + /// The pallet's configuration trait. pub trait Trait: system::Trait { // Add other types and constants required to configure this pallet. @@ -95,82 +102,3 @@ decl_module! { } } } - - -/// Tests for this pallet -#[cfg(test)] -mod tests { - use super::*; - - use sp_core::H256; - use frame_support::{impl_outer_origin, assert_ok, assert_noop, parameter_types, weights::Weight}; - use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, testing::Header, Perbill, - }; - - impl_outer_origin! { - pub enum Origin for Test {} - } - - // For testing the module, we construct most of a mock runtime. This means - // first constructing a configuration type (`Test`) which `impl`s each of the - // configuration traits of modules we want to use. - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); - } - impl system::Trait for Test { - type Origin = Origin; - type Call = (); - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - } - impl Trait for Test { - type Event = (); - } - type TemplateModule = Module; - - // This function basically just builds a genesis storage key/value store according to - // our desired mockup. - fn new_test_ext() -> sp_io::TestExternalities { - system::GenesisConfig::default().build_storage::().unwrap().into() - } - - #[test] - fn it_works_for_default_value() { - new_test_ext().execute_with(|| { - // Just a dummy test for the dummy funtion `do_something` - // calling the `do_something` function with a value 42 - assert_ok!(TemplateModule::do_something(Origin::signed(1), 42)); - // asserting that the stored value is equal to what we stored - assert_eq!(TemplateModule::something(), Some(42)); - }); - } - - #[test] - fn correct_error_for_none_value() { - new_test_ext().execute_with(|| { - // Ensure the correct error is thrown on None value - assert_noop!( - TemplateModule::cause_error(Origin::signed(1)), - Error::::NoneValue - ); - }); - } -} diff --git a/bin/node-template/pallets/template/src/mock.rs b/bin/node-template/pallets/template/src/mock.rs new file mode 100644 index 0000000000000..2cbfc89d5b3ae --- /dev/null +++ b/bin/node-template/pallets/template/src/mock.rs @@ -0,0 +1,52 @@ +// Creating mock runtime here + +use crate::{Module, Trait}; +use sp_core::H256; +use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; +use sp_runtime::{ + traits::{BlakeTwo256, IdentityLookup}, testing::Header, Perbill, +}; + +impl_outer_origin! { + pub enum Origin for Test {} +} + +// For testing the module, we construct most of a mock runtime. This means +// first constructing a configuration type (`Test`) which `impl`s each of the +// configuration traits of modules we want to use. +#[derive(Clone, Eq, PartialEq)] +pub struct Test; +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); +} +impl system::Trait for Test { + type Origin = Origin; + type Call = (); + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); +} +impl Trait for Test { + type Event = (); +} +pub type TemplateModule = Module; + +// This function basically just builds a genesis storage key/value store according to +// our desired mockup. +pub fn new_test_ext() -> sp_io::TestExternalities { + system::GenesisConfig::default().build_storage::().unwrap().into() +} diff --git a/bin/node-template/pallets/template/src/tests.rs b/bin/node-template/pallets/template/src/tests.rs new file mode 100644 index 0000000000000..44a423c948fbf --- /dev/null +++ b/bin/node-template/pallets/template/src/tests.rs @@ -0,0 +1,26 @@ +// Tests to be written here + +use crate::{Error, mock::*}; +use frame_support::{assert_ok, assert_noop}; + +#[test] +fn it_works_for_default_value() { + new_test_ext().execute_with(|| { + // Just a dummy test for the dummy funtion `do_something` + // calling the `do_something` function with a value 42 + assert_ok!(TemplateModule::do_something(Origin::signed(1), 42)); + // asserting that the stored value is equal to what we stored + assert_eq!(TemplateModule::something(), Some(42)); + }); +} + +#[test] +fn correct_error_for_none_value() { + new_test_ext().execute_with(|| { + // Ensure the correct error is thrown on None value + assert_noop!( + TemplateModule::cause_error(Origin::signed(1)), + Error::::NoneValue + ); + }); +} diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index 299e78996a8cf..ddecb0e4cff4c 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -6,6 +6,8 @@ edition = "2018" license = "Unlicense" [dependencies] +codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } + aura = { version = "2.0.0", default-features = false, package = "pallet-aura", path = "../../../frame/aura" } balances = { version = "2.0.0", default-features = false, package = "pallet-balances", path = "../../../frame/balances" } frame-support = { version = "2.0.0", default-features = false, path = "../../../frame/support" } @@ -16,8 +18,6 @@ sudo = { version = "2.0.0", default-features = false, package = "pallet-sudo", p system = { version = "2.0.0", default-features = false, package = "frame-system", path = "../../../frame/system" } timestamp = { version = "2.0.0", default-features = false, package = "pallet-timestamp", path = "../../../frame/timestamp" } transaction-payment = { version = "2.0.0", default-features = false, package = "pallet-transaction-payment", path = "../../../frame/transaction-payment" } - -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } frame-executive = { version = "2.0.0", default-features = false, path = "../../../frame/executive" } serde = { version = "1.0.101", optional = true, features = ["derive"] } sp-api = { version = "2.0.0", default-features = false, path = "../../../primitives/api" } @@ -33,6 +33,8 @@ sp-std = { version = "2.0.0", default-features = false, path = "../../../primiti sp-transaction-pool = { version = "2.0.0", default-features = false, path = "../../../primitives/transaction-pool" } sp-version = { version = "2.0.0", default-features = false, path = "../../../primitives/version" } +template = { version = "2.0.0", default-features = false, path = "../pallets/template", package = "pallet-template" } + [build-dependencies] wasm-builder-runner = { version = "1.0.4", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } @@ -64,4 +66,5 @@ std = [ "system/std", "timestamp/std", "transaction-payment/std", + "template/std", ] diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index ea4535e26d94f..a863ec40a70a9 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -37,6 +37,9 @@ pub use frame_support::{ weights::Weight, }; +/// Importing a template pallet +pub use template; + /// An index to a block. pub type BlockNumber = u32; @@ -63,9 +66,6 @@ pub type Hash = sp_core::H256; /// Digest item type. pub type DigestItem = generic::DigestItem; -/// Used for the module template in `./template.rs` -mod template; - /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know /// the specifics of the runtime. They can then be made to be agnostic over specific formats /// of data like extrinsics, allowing for them to continue syncing the network through upgrades From 666b1c9f86258c48d65efb50f1e86ac25b25332e Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Thu, 6 Feb 2020 14:06:53 +0100 Subject: [PATCH 12/25] Fixed a few warnings (#4841) --- client/cli/src/node_key.rs | 3 ++- client/network/src/behaviour.rs | 2 +- frame/authority-discovery/src/lib.rs | 1 - frame/executive/src/lib.rs | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/client/cli/src/node_key.rs b/client/cli/src/node_key.rs index 88102acc63676..4401481ca56ce 100644 --- a/client/cli/src/node_key.rs +++ b/client/cli/src/node_key.rs @@ -91,9 +91,10 @@ where net_config_dir.as_ref().map(|d| d.as_ref().join(name)) } +#[cfg(test)] mod tests { - use super::*; use sc_network::config::identity::ed25519; + use super::*; #[test] fn tests_node_name_good() { diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index c31fd84eff951..8b903cec351f5 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -25,7 +25,7 @@ use libp2p::core::{Multiaddr, PeerId, PublicKey}; use libp2p::kad::record; use libp2p::swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess}; use libp2p::core::{nodes::Substream, muxing::StreamMuxerBox}; -use log::{debug, warn}; +use log::debug; use sp_consensus::{BlockOrigin, import_queue::{IncomingBlock, Origin}}; use sp_runtime::{traits::{Block as BlockT, NumberFor}, Justification}; use std::{iter, task::Context, task::Poll}; diff --git a/frame/authority-discovery/src/lib.rs b/frame/authority-discovery/src/lib.rs index c427043397075..22ea3d3bbafbb 100644 --- a/frame/authority-discovery/src/lib.rs +++ b/frame/authority-discovery/src/lib.rs @@ -103,7 +103,6 @@ mod tests { use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; type AuthorityDiscovery = Module; - type SessionIndex = u32; #[derive(Clone, Eq, PartialEq)] pub struct Test; diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 936da70211c42..93db1418561ec 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -357,7 +357,7 @@ mod tests { use sp_core::H256; use sp_runtime::{ generic::Era, Perbill, DispatchError, testing::{Digest, Header, Block}, - traits::{Bounded, Header as HeaderT, BlakeTwo256, IdentityLookup, ConvertInto}, + traits::{Header as HeaderT, BlakeTwo256, IdentityLookup, ConvertInto}, transaction_validity::{InvalidTransaction, UnknownTransaction, TransactionValidityError}, }; use frame_support::{ From 7d83cabf678a0cd51d017b5dcf59d1a5de376a13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 6 Feb 2020 14:26:41 +0100 Subject: [PATCH 13/25] Fix memory leak in runtime interface (#4837) * Fix memory leak in runtime interface We used `slice::from_raw_parts` in runtime-interface which did not free the memory afterwards. This pr changes it to `Vec::from_raw_parts` to make sure `drop` is called properly and the values are freed. * Check that `len` is non-zero * Adds comment --- primitives/runtime-interface/src/impls.rs | 19 +++++----- primitives/runtime-interface/src/pass_by.rs | 11 ++++-- .../runtime-interface/test-wasm/src/lib.rs | 35 +++++++++++++++++++ primitives/runtime-interface/test/src/lib.rs | 15 ++++++++ 4 files changed, 68 insertions(+), 12 deletions(-) diff --git a/primitives/runtime-interface/src/impls.rs b/primitives/runtime-interface/src/impls.rs index 35bd96bd05e4e..084b5e11eb3b1 100644 --- a/primitives/runtime-interface/src/impls.rs +++ b/primitives/runtime-interface/src/impls.rs @@ -38,9 +38,6 @@ use sp_std::{any::TypeId, mem, vec::Vec}; #[cfg(feature = "std")] use sp_std::borrow::Cow; -#[cfg(not(feature = "std"))] -use sp_std::{slice, boxed::Box}; - // Make sure that our assumptions for storing a pointer + its size in `u64` is valid. #[cfg(all(not(feature = "std"), not(feature = "disable_target_static_assertions")))] assert_eq_size!(usize, u32); @@ -196,11 +193,16 @@ impl FromFFIValue for Vec { let (ptr, len) = unpack_ptr_and_len(arg); let len = len as usize; + if len == 0 { + return Vec::new(); + } + + let data = unsafe { Vec::from_raw_parts(ptr as *mut u8, len, len) }; + if TypeId::of::() == TypeId::of::() { - unsafe { mem::transmute(Vec::from_raw_parts(ptr as *mut u8, len, len)) } + unsafe { mem::transmute(data) } } else { - let slice = unsafe { slice::from_raw_parts(ptr as *const u8, len) }; - Self::decode(&mut &slice[..]).expect("Host to wasm values are encoded correctly; qed") + Self::decode(&mut &data[..]).expect("Host to wasm values are encoded correctly; qed") } } } @@ -302,10 +304,9 @@ macro_rules! impl_traits_for_arrays { impl FromFFIValue for [u8; $n] { fn from_ffi_value(arg: u32) -> [u8; $n] { let mut res = [0u8; $n]; - res.copy_from_slice(unsafe { slice::from_raw_parts(arg as *const u8, $n) }); + let data = unsafe { Vec::from_raw_parts(arg as *mut u8, $n, $n) }; - // Make sure we free the pointer. - let _ = unsafe { Box::from_raw(arg as *mut u8) }; + res.copy_from_slice(&data); res } diff --git a/primitives/runtime-interface/src/pass_by.rs b/primitives/runtime-interface/src/pass_by.rs index 597a0284eee2a..d6767b5ebbe93 100644 --- a/primitives/runtime-interface/src/pass_by.rs +++ b/primitives/runtime-interface/src/pass_by.rs @@ -32,7 +32,7 @@ use sp_wasm_interface::{FunctionContext, Pointer, Result}; use sp_std::{marker::PhantomData, convert::TryFrom}; #[cfg(not(feature = "std"))] -use sp_std::{slice, vec::Vec}; +use sp_std::vec::Vec; /// Derive macro for implementing [`PassBy`] with the [`Codec`] strategy. /// @@ -255,8 +255,13 @@ impl PassByImpl for Codec { let (ptr, len) = unpack_ptr_and_len(arg); let len = len as usize; - let slice = unsafe { slice::from_raw_parts(ptr as *const u8, len) }; - T::decode(&mut &slice[..]).expect("Host to wasm values are encoded correctly; qed") + let encoded = if len == 0 { + Vec::new() + } else { + unsafe { Vec::from_raw_parts(ptr as *mut u8, len, len) } + }; + + T::decode(&mut &encoded[..]).expect("Host to wasm values are encoded correctly; qed") } } diff --git a/primitives/runtime-interface/test-wasm/src/lib.rs b/primitives/runtime-interface/test-wasm/src/lib.rs index 67fbfdcfec602..c6e2c9909f200 100644 --- a/primitives/runtime-interface/test-wasm/src/lib.rs +++ b/primitives/runtime-interface/test-wasm/src/lib.rs @@ -39,6 +39,17 @@ pub trait TestApi { data } + /// Returns 16kb data. + /// + /// # Note + /// + /// We return a `Vec` because this will use the code path that uses SCALE + /// to pass the data between native/wasm. (Vec is passed without encoding the + /// data) + fn return_16kb() -> Vec { + vec![0; 4 * 1024] + } + /// Set the storage at key with value. fn set_storage(&mut self, key: &[u8], data: &[u8]) { self.place_storage(key.to_vec(), Some(data.to_vec())); @@ -211,4 +222,28 @@ wasm_export_functions! { assert_eq!(*val, test_api::get_and_return_i128(*val)); } } + + fn test_vec_return_value_memory_is_freed() { + let mut len = 0; + for _ in 0..1024 { + len += test_api::return_16kb().len(); + } + assert_eq!(1024 * 1024 * 4, len); + } + + fn test_encoded_return_value_memory_is_freed() { + let mut len = 0; + for _ in 0..1024 { + len += test_api::return_option_input(vec![0; 16 * 1024]).map(|v| v.len()).unwrap(); + } + assert_eq!(1024 * 1024 * 16, len); + } + + fn test_array_return_value_memory_is_freed() { + let mut len = 0; + for _ in 0..1024 * 1024 { + len += test_api::get_and_return_array([0; 34])[1]; + } + assert_eq!(0, len); + } } diff --git a/primitives/runtime-interface/test/src/lib.rs b/primitives/runtime-interface/test/src/lib.rs index 48c120b2c9fd1..559a4281e09f2 100644 --- a/primitives/runtime-interface/test/src/lib.rs +++ b/primitives/runtime-interface/test/src/lib.rs @@ -113,3 +113,18 @@ fn test_overwrite_native_function_implementation() { fn test_u128_i128_as_parameter_and_return_value() { call_wasm_method::("test_u128_i128_as_parameter_and_return_value"); } + +#[test] +fn test_vec_return_value_memory_is_freed() { + call_wasm_method::("test_vec_return_value_memory_is_freed"); +} + +#[test] +fn test_encoded_return_value_memory_is_freed() { + call_wasm_method::("test_encoded_return_value_memory_is_freed"); +} + +#[test] +fn test_array_return_value_memory_is_freed() { + call_wasm_method::("test_array_return_value_memory_is_freed"); +} From faf608eeeda0031629f7608321c7ab41fac6024f Mon Sep 17 00:00:00 2001 From: Marcio Diaz Date: Thu, 6 Feb 2020 14:48:19 +0100 Subject: [PATCH 14/25] Fix broken factory by adding keystore back. (#4840) --- bin/node/cli/src/command.rs | 2 ++ client/cli/src/lib.rs | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index 3395717f2f7ed..7a22710ec1699 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -50,6 +50,8 @@ where cli_args.shared_params.dev, )?; + sc_cli::fill_config_keystore_in_memory(&mut config)?; + match ChainSpec::from(config.expect_chain_spec().id()) { Some(ref c) if c == &ChainSpec::Development || c == &ChainSpec::LocalTestnet => {}, _ => panic!("Factory is only supported for development and local testnet."), diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index c602d52ed9ea5..785a6fb073bc9 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -445,7 +445,7 @@ fn input_keystore_password() -> Result { } /// Use in memory keystore config when it is not required at all. -fn fill_config_keystore_in_memory(config: &mut sc_service::Configuration) +pub fn fill_config_keystore_in_memory(config: &mut sc_service::Configuration) -> Result<(), String> { match &mut config.keystore { From 099cd0f2ba2a041f087978ef9d53473d0a6d0aee Mon Sep 17 00:00:00 2001 From: Cecile Tonglet Date: Thu, 6 Feb 2020 15:46:49 +0100 Subject: [PATCH 15/25] CLI improvements & fixes (#4812) These are a few changes I missed during the refactoring. 1. Initialization issue and boilerplate Most importantly: part of the `Configuration` initialization was done in `sc_cli::init`. This means the user can not benefit from this initialization boilerplate if they have multiple `Configuration` since `sc_cli::init` can only be called once. 2. Boilerplate for `VersionInfo` and `Configuration` I'm also answering to the critic of @bkchr on the initialization using version: https://github.com/paritytech/substrate/pull/4692/files/bea809d4c14a2ede953227ac885e3b3f9771c548#r372047238 This will allow initializing a `Configuration` and provide the version by default. 3. Loading the `chain_spec` explicitly In the past it was done automatically but in some cases we want to delay this. I moved the code to `Configuration.load_spec()` so it can be called later on. `chain_spec` can also be written directly to the `Configuration` without using this `load_spec` helper. 4. [deleted] 5. Fixing issue that prevents the user to override the port In the refactoring I introduced a bug by mistake that could potentially prevent the CLI user to override the ports if defaults where provided for these ports (only on cumulus). 6. Change task_executor from Box to Arc This is useful for cumulus where we have 2 nodes with 2 separate Configuration that need to spawn tasks to the same runtime. 7. Renamed TasksExecutorRequired to TaskExecutor For consistency. This is related to https://github.com/paritytech/cumulus/issues/24 This is the continuation (and hopefully the end of) #4692 --- .gitlab-ci.yml | 9 - .maintain/check_for_exit.sh | 16 -- Cargo.lock | 74 ++++++- bin/node-template/node/src/command.rs | 3 +- bin/node/cli/Cargo.toml | 2 + bin/node/cli/src/command.rs | 7 +- .../tests/running_the_node_and_interrupt.rs | 58 ++++++ client/cli/src/lib.rs | 193 +++++++++++------- client/cli/src/params.rs | 5 +- client/cli/src/runtime.rs | 6 +- client/service/src/builder.rs | 6 +- client/service/src/config.rs | 56 ++++- client/service/src/error.rs | 2 +- client/service/src/lib.rs | 2 +- client/service/test/src/lib.rs | 12 +- utils/browser/src/lib.rs | 6 +- 16 files changed, 321 insertions(+), 136 deletions(-) delete mode 100755 .maintain/check_for_exit.sh create mode 100644 bin/node/cli/tests/running_the_node_and_interrupt.rs diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 828c830948a13..ced3e33eaabaa 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -235,15 +235,6 @@ check-web-wasm: - time cargo build --manifest-path=bin/node/cli/Cargo.toml --no-default-features --features "browser" --target=wasm32-unknown-unknown - sccache -s -node-exits: - stage: test - <<: *docker-env - except: - - /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - script: - - ./.maintain/check_for_exit.sh - - test-full-crypto-feature: stage: test <<: *docker-env diff --git a/.maintain/check_for_exit.sh b/.maintain/check_for_exit.sh deleted file mode 100755 index edc2130e57113..0000000000000 --- a/.maintain/check_for_exit.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash - -# Script that checks that a node exits after `SIGINT` was send. - -set -e - -cargo build -./target/debug/substrate --dev & -PID=$! - -# Let the chain running for 60 seconds -sleep 60 - -# Send `SIGINT` and give the process 30 seconds to end -kill -INT $PID -timeout 30 tail --pid=$PID -f /dev/null diff --git a/Cargo.lock b/Cargo.lock index f521817b2cab6..3f94778b8430c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -150,6 +150,19 @@ dependencies = [ "syn", ] +[[package]] +name = "assert_cmd" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6283bac8dd7226470d491bc4737816fea4ca1fba7a2847f2e9097fd6bfb4624c" +dependencies = [ + "doc-comment", + "escargot", + "predicates", + "predicates-core", + "predicates-tree", +] + [[package]] name = "assert_matches" version = "1.3.0" @@ -1227,6 +1240,18 @@ dependencies = [ "libc", ] +[[package]] +name = "escargot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74cf96bec282dcdb07099f7e31d9fed323bca9435a09aba7b6d99b7617bca96d" +dependencies = [ + "lazy_static", + "log 0.4.8", + "serde", + "serde_json", +] + [[package]] name = "evm" version = "0.14.2" @@ -3318,10 +3343,24 @@ dependencies = [ "winapi 0.3.8", ] +[[package]] +name = "nix" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50e4785f2c3b7589a0d0c1dd60285e1188adac4006e8abd6dd578e1567027363" +dependencies = [ + "bitflags", + "cc", + "cfg-if", + "libc", + "void", +] + [[package]] name = "node-cli" version = "2.0.0" dependencies = [ + "assert_cmd", "browser-utils", "frame-support", "frame-system", @@ -3329,6 +3368,7 @@ dependencies = [ "hex-literal", "jsonrpc-core", "log 0.4.8", + "nix", "node-executor", "node-primitives", "node-rpc", @@ -4816,6 +4856,32 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" +[[package]] +name = "predicates" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9bfe52247e5cc9b2f943682a85a5549fb9662245caf094504e69a2f03fe64d4" +dependencies = [ + "difference", + "predicates-core", +] + +[[package]] +name = "predicates-core" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06075c3a3e92559ff8929e7a280684489ea27fe44805174c3ebd9328dcb37178" + +[[package]] +name = "predicates-tree" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e63c4859013b38a76eca2414c64911fba30def9e3202ac461a2d22831220124" +dependencies = [ + "predicates-core", + "treeline", +] + [[package]] name = "pretty_assertions" version = "0.6.1" @@ -8118,6 +8184,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079" +[[package]] +name = "treeline" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" + [[package]] name = "trie-bench" version = "0.19.0" @@ -8204,7 +8276,7 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3bfd5b7557925ce778ff9b9ef90e3ade34c524b5ff10e239c69a42d546d2af56" dependencies = [ - "rand 0.3.23", + "rand 0.7.3", ] [[package]] diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index 86058929b0871..585b8e1ca8eb9 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -25,8 +25,7 @@ pub fn run(version: VersionInfo) -> error::Result<()> { let opt = sc_cli::from_args::(&version); - let mut config = sc_service::Configuration::default(); - config.impl_name = "node-template"; + let config = sc_service::Configuration::new(&version); match opt.subcommand { Some(subcommand) => sc_cli::run_subcommand( diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index ef6e90f91a943..cf666ffdc518e 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -95,6 +95,8 @@ sc-consensus-babe = { version = "0.8", features = ["test-helpers"], path = "../. sc-service-test = { version = "2.0.0", path = "../../../client/service/test" } futures = "0.3.1" tempfile = "3.1.0" +assert_cmd = "0.12" +nix = "0.17" [build-dependencies] build-script-utils = { version = "2.0.0", package = "substrate-build-script-utils", path = "../../../utils/build-script-utils" } diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index 7a22710ec1699..eb18d6d8b33d1 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -28,8 +28,7 @@ where let args: Vec<_> = args.collect(); let opt = sc_cli::from_iter::(args.clone(), &version); - let mut config = sc_service::Configuration::default(); - config.impl_name = "substrate-node"; + let mut config = sc_service::Configuration::new(&version); match opt.subcommand { None => sc_cli::run( @@ -41,8 +40,8 @@ where &version, ), Some(Subcommand::Factory(cli_args)) => { - sc_cli::init(&mut config, load_spec, &cli_args.shared_params, &version)?; - + sc_cli::init(&cli_args.shared_params, &version)?; + sc_cli::load_spec(&mut config, &cli_args.shared_params, load_spec)?; sc_cli::fill_import_params( &mut config, &cli_args.import_params, diff --git a/bin/node/cli/tests/running_the_node_and_interrupt.rs b/bin/node/cli/tests/running_the_node_and_interrupt.rs new file mode 100644 index 0000000000000..6b0d6963966d3 --- /dev/null +++ b/bin/node/cli/tests/running_the_node_and_interrupt.rs @@ -0,0 +1,58 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use assert_cmd::cargo::cargo_bin; +use std::convert::TryInto; +use std::process::{Child, Command, ExitStatus}; +use std::thread::sleep; +use std::time::Duration; + +#[test] +#[cfg(unix)] +fn running_the_node_works_and_can_be_interrupted() { + use nix::sys::signal::{kill, Signal::{self, SIGINT, SIGTERM}}; + use nix::unistd::Pid; + + fn wait_for(child: &mut Child, secs: usize) -> Option { + for _ in 0..secs { + match child.try_wait().unwrap() { + Some(status) => return Some(status), + None => sleep(Duration::from_secs(1)), + } + } + eprintln!("Took to long to exit. Killing..."); + let _ = child.kill(); + child.wait().unwrap(); + + None + } + + fn run_command_and_kill(signal: Signal) { + let mut cmd = Command::new(cargo_bin("substrate")).spawn().unwrap(); + sleep(Duration::from_secs(30)); + assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running"); + kill(Pid::from_raw(cmd.id().try_into().unwrap()), signal).unwrap(); + assert_eq!( + wait_for(&mut cmd, 30).map(|x| x.success()), + Some(true), + "the pocess must exit gracefully after signal {}", + signal, + ); + } + + run_command_and_kill(SIGINT); + run_command_and_kill(SIGTERM); +} diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index 785a6fb073bc9..7f726893368a0 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -35,6 +35,7 @@ use sc_service::{ RuntimeGenesis, ChainSpecExtension, PruningMode, ChainSpec, AbstractService, Roles as ServiceRoles, }; +pub use sc_service::config::VersionInfo; use sc_network::{ self, multiaddr::Protocol, @@ -74,32 +75,11 @@ const DEFAULT_NETWORK_CONFIG_PATH : &'static str = "network"; /// default sub directory to store database const DEFAULT_DB_CONFIG_PATH : &'static str = "db"; /// default sub directory for the key store -const DEFAULT_KEYSTORE_CONFIG_PATH : &'static str = "keystore"; +const DEFAULT_KEYSTORE_CONFIG_PATH : &'static str = "keystore"; /// The maximum number of characters for a node name. const NODE_NAME_MAX_LENGTH: usize = 32; -/// Executable version. Used to pass version information from the root crate. -#[derive(Clone)] -pub struct VersionInfo { - /// Implementaiton name. - pub name: &'static str, - /// Implementation version. - pub version: &'static str, - /// SCM Commit hash. - pub commit: &'static str, - /// Executable file name. - pub executable_name: &'static str, - /// Executable file description. - pub description: &'static str, - /// Executable file author. - pub author: &'static str, - /// Support URL. - pub support_url: &'static str, - /// Copyright starting year (x-current year) - pub copyright_start_year: i32, -} - fn get_chain_key(cli: &SharedParams) -> String { match cli.chain { Some(ref chain) => chain.clone(), @@ -120,8 +100,12 @@ fn generate_node_name() -> String { result } -/// Load spec give shared params and spec factory. -pub fn load_spec(cli: &SharedParams, factory: F) -> error::Result> where +/// Load spec to `Configuration` from shared params and spec factory. +pub fn load_spec<'a, G, E, F>( + mut config: &'a mut Configuration, + cli: &SharedParams, + factory: F, +) -> error::Result<&'a ChainSpec> where G: RuntimeGenesis, E: ChainSpecExtension, F: FnOnce(&str) -> Result>, String>, @@ -131,7 +115,13 @@ pub fn load_spec(cli: &SharedParams, factory: F) -> error::Result spec, None => ChainSpec::from_json_file(PathBuf::from(chain_key))? }; - Ok(spec) + + config.network.boot_nodes = spec.boot_nodes().to_vec(); + config.telemetry_endpoints = spec.telemetry_endpoints().clone(); + + config.chain_spec = Some(spec); + + Ok(config.chain_spec.as_ref().unwrap()) } fn base_path(cli: &SharedParams, version: &VersionInfo) -> PathBuf { @@ -243,8 +233,8 @@ where SL: AbstractService + Unpin, SF: AbstractService + Unpin, { - init(&mut config, spec_factory, &run_cmd.shared_params, version)?; - + init(&run_cmd.shared_params, version)?; + load_spec(&mut config, &run_cmd.shared_params, spec_factory)?; run_cmd.run(config, new_light, new_full, version) } @@ -266,30 +256,21 @@ where <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, ::Hash: std::str::FromStr, { - init(&mut config, spec_factory, &subcommand.get_shared_params(), version)?; + let shared_params = subcommand.get_shared_params(); + init(shared_params, version)?; + load_spec(&mut config, shared_params, spec_factory)?; subcommand.run(config, builder) } -/// Initialize substrate and its configuration +/// Initialize substrate. This must be done only once. /// /// This method: /// /// 1. set the panic handler /// 2. raise the FD limit /// 3. initialize the logger -/// 4. update the configuration provided with the chain specification, config directory, -/// information (version, commit), database's path, boot nodes and telemetry endpoints -pub fn init( - mut config: &mut Configuration, - spec_factory: F, - shared_params: &SharedParams, - version: &VersionInfo, -) -> error::Result<()> -where - G: RuntimeGenesis, - E: ChainSpecExtension, - F: FnOnce(&str) -> Result>, String>, +pub fn init(shared_params: &SharedParams, version: &VersionInfo) -> error::Result<()> { let full_version = sc_service::config::full_version_from_strs( version.version, @@ -300,21 +281,6 @@ where fdlimit::raise_fd_limit(); init_logger(shared_params.log.as_ref().map(|v| v.as_ref()).unwrap_or("")); - config.chain_spec = Some(load_spec(shared_params, spec_factory)?); - config.config_dir = Some(base_path(shared_params, version)); - config.impl_commit = version.commit; - config.impl_version = version.version; - - config.database = DatabaseConfig::Path { - path: config - .in_chain_config_dir(DEFAULT_DB_CONFIG_PATH) - .expect("We provided a base_path/config_dir."), - cache_size: None, - }; - - config.network.boot_nodes = config.expect_chain_spec().boot_nodes().to_vec(); - config.telemetry_endpoints = config.expect_chain_spec().telemetry_endpoints().clone(); - Ok(()) } @@ -419,8 +385,6 @@ fn fill_network_configuration( ]; } - config.public_addresses = Vec::new(); - config.client_version = client_id; config.node_key = node_key::node_key_config(cli.node_key_params, &config.net_config_path)?; @@ -496,10 +460,8 @@ pub fn fill_import_params( where G: RuntimeGenesis, { - match config.database { - DatabaseConfig::Path { ref mut cache_size, .. } => - *cache_size = Some(cli.database_cache_size), - DatabaseConfig::Custom(_) => {}, + if let Some(DatabaseConfig::Path { ref mut cache_size, .. }) = config.database { + *cache_size = Some(cli.database_cache_size); } config.state_cache_size = cli.state_cache_size; @@ -549,14 +511,30 @@ where Ok(()) } -/// Update and prepare a `Configuration` with command line parameters of `RunCmd` +/// Update and prepare a `Configuration` with command line parameters of `RunCmd` and `VersionInfo` pub fn update_config_for_running_node( mut config: &mut Configuration, cli: RunCmd, + version: &VersionInfo, ) -> error::Result<()> where G: RuntimeGenesis, { + if config.config_dir.is_none() { + config.config_dir = Some(base_path(&cli.shared_params, version)); + } + + if config.database.is_none() { + // NOTE: the loading of the DatabaseConfig is voluntarily delayed to here + // in case config.config_dir has been customized + config.database = Some(DatabaseConfig::Path { + path: config + .in_chain_config_dir(DEFAULT_DB_CONFIG_PATH) + .expect("We provided a base_path/config_dir."), + cache_size: None, + }); + } + fill_config_keystore_password_and_path(&mut config, &cli)?; let keyring = cli.get_keyring(); @@ -580,16 +558,13 @@ where (_, Some(keyring)) => keyring.to_string(), (None, None) => generate_node_name(), }; - match node_key::is_node_name_valid(&config.name) { - Ok(_) => (), - Err(msg) => Err( - error::Error::Input( - format!("Invalid node name '{}'. Reason: {}. If unsure, use none.", - config.name, - msg - ) + if let Err(msg) = node_key::is_node_name_valid(&config.name) { + return Err(error::Error::Input( + format!("Invalid node name '{}'. Reason: {}. If unsure, use none.", + config.name, + msg, ) - )? + )); } // set sentry mode (i.e. act as an authority but **never** actively participate) @@ -625,16 +600,16 @@ where } }); - if config.rpc_http.is_none() { + if config.rpc_http.is_none() || cli.rpc_port.is_some() { let rpc_interface: &str = interface_str(cli.rpc_external, cli.unsafe_rpc_external, cli.validator)?; config.rpc_http = Some(parse_address(&format!("{}:{}", rpc_interface, 9933), cli.rpc_port)?); } - if config.rpc_ws.is_none() { + if config.rpc_ws.is_none() || cli.ws_port.is_some() { let ws_interface: &str = interface_str(cli.ws_external, cli.unsafe_ws_external, cli.validator)?; config.rpc_ws = Some(parse_address(&format!("{}:{}", ws_interface, 9944), cli.ws_port)?); } - if config.grafana_port.is_none() { + if config.grafana_port.is_none() || cli.grafana_port.is_some() { let grafana_interface: &str = if cli.grafana_external { "0.0.0.0" } else { "127.0.0.1" }; config.grafana_port = Some( parse_address(&format!("{}:{}", grafana_interface, 9955), cli.grafana_port)? @@ -781,6 +756,17 @@ fn kill_color(s: &str) -> String { mod tests { use super::*; + const TEST_VERSION_INFO: &'static VersionInfo = &VersionInfo { + name: "node-test", + version: "0.1.0", + commit: "some_commit", + executable_name: "node-test", + description: "description", + author: "author", + support_url: "http://example.org", + copyright_start_year: 2020, + }; + #[test] fn keystore_path_is_generated_correctly() { let chain_spec = ChainSpec::from_genesis( @@ -805,6 +791,7 @@ mod tests { update_config_for_running_node( &mut node_config, run_cmds.clone(), + TEST_VERSION_INFO, ).unwrap(); let expected_path = match keystore_path { @@ -815,4 +802,60 @@ mod tests { assert_eq!(expected_path, node_config.keystore.path().unwrap().to_owned()); } } + + #[test] + fn ensure_load_spec_provide_defaults() { + let chain_spec = ChainSpec::from_genesis( + "test", + "test-id", + || (), + vec!["boo".to_string()], + Some(TelemetryEndpoints::new(vec![("foo".to_string(), 42)])), + None, + None, + None::<()>, + ); + + let args: Vec<&str> = vec![]; + let cli = RunCmd::from_iter(args); + + let mut config = Configuration::new(TEST_VERSION_INFO); + load_spec(&mut config, &cli.shared_params, |_| Ok(Some(chain_spec))).unwrap(); + + assert!(config.chain_spec.is_some()); + assert!(!config.network.boot_nodes.is_empty()); + assert!(config.telemetry_endpoints.is_some()); + } + + #[test] + fn ensure_update_config_for_running_node_provides_defaults() { + let chain_spec = ChainSpec::from_genesis( + "test", + "test-id", + || (), + vec![], + None, + None, + None, + None::<()>, + ); + + let args: Vec<&str> = vec![]; + let cli = RunCmd::from_iter(args); + + let mut config = Configuration::new(TEST_VERSION_INFO); + config.chain_spec = Some(chain_spec); + update_config_for_running_node(&mut config, cli, TEST_VERSION_INFO).unwrap(); + + assert!(config.config_dir.is_some()); + assert!(config.database.is_some()); + if let Some(DatabaseConfig::Path { ref cache_size, .. }) = config.database { + assert!(cache_size.is_some()); + } else { + panic!("invalid config.database variant"); + } + assert!(!config.name.is_empty()); + assert!(config.network.config_path.is_some()); + assert!(!config.network.listen_addresses.is_empty()); + } } diff --git a/client/cli/src/params.rs b/client/cli/src/params.rs index eddd8578b3917..3a4aa319c6e8f 100644 --- a/client/cli/src/params.rs +++ b/client/cli/src/params.rs @@ -936,6 +936,7 @@ impl RunCmd { crate::update_config_for_running_node( &mut config, self, + &version, )?; crate::run_node(config, new_light, new_full, &version) @@ -1003,7 +1004,7 @@ impl ExportBlocksCmd { crate::fill_config_keystore_in_memory(&mut config)?; - if let DatabaseConfig::Path { ref path, .. } = &config.database { + if let DatabaseConfig::Path { ref path, .. } = config.expect_database() { info!("DB path: {}", path.display()); } let from = self.from.as_ref().and_then(|f| f.parse().ok()).unwrap_or(1); @@ -1124,7 +1125,7 @@ impl PurgeChainCmd { crate::fill_config_keystore_in_memory(&mut config)?; - let db_path = match config.database { + let db_path = match config.expect_database() { DatabaseConfig::Path { path, .. } => path, _ => { eprintln!("Cannot purge custom database implementation"); diff --git a/client/cli/src/runtime.rs b/client/cli/src/runtime.rs index 3eee94c0495ea..62a2245c9e174 100644 --- a/client/cli/src/runtime.rs +++ b/client/cli/src/runtime.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use std::sync::Arc; + use futures::{Future, future, future::FutureExt}; use futures::select; use futures::pin_mut; @@ -91,7 +93,7 @@ where config.task_executor = { let runtime_handle = runtime.handle().clone(); - Some(Box::new(move |fut| { runtime_handle.spawn(fut); })) + Some(Arc::new(move |fut| { runtime_handle.spawn(fut); })) }; let f = future_builder(config)?; @@ -117,7 +119,7 @@ where config.task_executor = { let runtime_handle = runtime.handle().clone(); - Some(Box::new(move |fut| { runtime_handle.spawn(fut); })) + Some(Arc::new(move |fut| { runtime_handle.spawn(fut); })) }; let service = service_builder(config)?; diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 7a6ca8dc791c4..ad1d225134ffd 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -196,7 +196,7 @@ fn new_full_parts( state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), pruning: config.pruning.clone(), - source: match &config.database { + source: match config.expect_database() { DatabaseConfig::Path { path, cache_size } => sc_client_db::DatabaseSettingsSrc::Path { path: path.clone(), @@ -307,7 +307,7 @@ where TGen: RuntimeGenesis, TCSExt: Extension { state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), pruning: config.pruning.clone(), - source: match &config.database { + source: match config.expect_database() { DatabaseConfig::Path { path, cache_size } => sc_client_db::DatabaseSettingsSrc::Path { path: path.clone(), @@ -1187,7 +1187,7 @@ ServiceBuilder< task_executor: if let Some(exec) = config.task_executor { exec } else { - return Err(Error::TasksExecutorRequired); + return Err(Error::TaskExecutorRequired); }, rpc_handlers, _rpc: rpc, diff --git a/client/service/src/config.rs b/client/service/src/config.rs index cb8170f7f4966..f4043d533e190 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -28,6 +28,27 @@ use sp_core::crypto::Protected; use target_info::Target; use sc_telemetry::TelemetryEndpoints; +/// Executable version. Used to pass version information from the root crate. +#[derive(Clone)] +pub struct VersionInfo { + /// Implementation name. + pub name: &'static str, + /// Implementation version. + pub version: &'static str, + /// SCM Commit hash. + pub commit: &'static str, + /// Executable file name. + pub executable_name: &'static str, + /// Executable file description. + pub description: &'static str, + /// Executable file author. + pub author: &'static str, + /// Support URL. + pub support_url: &'static str, + /// Copyright starting year (x-current year) + pub copyright_start_year: i32, +} + /// Service configuration. pub struct Configuration { /// Implementation name @@ -39,7 +60,7 @@ pub struct Configuration { /// Node roles. pub roles: Roles, /// How to spawn background tasks. Mandatory, otherwise creating a `Service` will error. - pub task_executor: Option + Send>>) + Send>>, + pub task_executor: Option + Send>>) + Send + Sync>>, /// Extrinsic pool configuration. pub transaction_pool: TransactionPoolOptions, /// Network configuration. @@ -49,7 +70,7 @@ pub struct Configuration { /// Configuration for the keystore. pub keystore: KeystoreConfig, /// Configuration for the database. - pub database: DatabaseConfig, + pub database: Option, /// Size of internal state cache in Bytes pub state_cache_size: usize, /// Size in percent of cache size dedicated to child tries @@ -147,7 +168,7 @@ pub enum DatabaseConfig { impl Default for Configuration { /// Create a default config fn default() -> Self { - let configuration = Configuration { + Configuration { impl_name: "parity-substrate", impl_version: "0.0.0", impl_commit: "", @@ -159,10 +180,7 @@ impl Default for Configuration { transaction_pool: Default::default(), network: Default::default(), keystore: KeystoreConfig::None, - database: DatabaseConfig::Path { - path: Default::default(), - cache_size: Default::default(), - }, + database: None, state_cache_size: Default::default(), state_cache_child_ratio: Default::default(), pruning: PruningMode::default(), @@ -183,14 +201,21 @@ impl Default for Configuration { dev_key_seed: None, tracing_targets: Default::default(), tracing_receiver: Default::default(), - }; - - configuration + } } - } impl Configuration { + /// Create a default config using `VersionInfo` + pub fn new(version: &VersionInfo) -> Self { + let mut config = Configuration::default(); + config.impl_name = version.name; + config.impl_version = version.version; + config.impl_commit = version.commit; + + config + } + /// Returns full version string of this configuration. pub fn full_version(&self) -> String { full_version_from_strs(self.impl_version, self.impl_commit) @@ -220,6 +245,15 @@ impl Configuration { pub fn expect_chain_spec(&self) -> &ChainSpec { self.chain_spec.as_ref().expect("chain_spec must be specified") } + + /// Return a reference to the `DatabaseConfig` of this `Configuration`. + /// + /// ### Panics + /// + /// This method panic if the `database` is `None` + pub fn expect_database(&self) -> &DatabaseConfig { + self.database.as_ref().expect("database must be specified") + } } /// Returns platform info diff --git a/client/service/src/error.rs b/client/service/src/error.rs index 14b03d7e95de7..059e1c19e490d 100644 --- a/client/service/src/error.rs +++ b/client/service/src/error.rs @@ -42,7 +42,7 @@ pub enum Error { SelectChainRequired, /// Tasks executor is missing. #[display(fmt="Tasks executor hasn't been provided.")] - TasksExecutorRequired, + TaskExecutorRequired, /// Other error. Other(String), } diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index dc158f1300a90..577f36572acbb 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -96,7 +96,7 @@ pub struct Service { /// Receiver for futures that must be spawned as background tasks. to_spawn_rx: mpsc::UnboundedReceiver<(Pin + Send>>, Cow<'static, str>)>, /// How to spawn background tasks. - task_executor: Box + Send>>) + Send>, + task_executor: Arc + Send>>) + Send + Sync>, rpc_handlers: sc_rpc_server::RpcHandler, _rpc: Box, _telemetry: Option, diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index cb458d533fdf1..2976e66a2982f 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -133,7 +133,7 @@ fn node_config ( index: usize, spec: &ChainSpec, role: Roles, - task_executor: Box + Send>>) + Send>, + task_executor: Arc + Send>>) + Send + Sync>, key_seed: Option, base_port: u16, root: &TempDir, @@ -183,10 +183,10 @@ fn node_config ( password: None }, config_dir: Some(root.clone()), - database: DatabaseConfig::Path { + database: Some(DatabaseConfig::Path { path: root.join("db"), cache_size: None - }, + }), state_cache_size: 16777216, state_cache_child_ratio: None, pruning: Default::default(), @@ -256,7 +256,7 @@ impl TestNet where for (key, authority) in authorities { let task_executor = { let executor = executor.clone(); - Box::new(move |fut: Pin + Send>>| executor.spawn(fut.unit_error().compat())) + Arc::new(move |fut: Pin + Send>>| executor.spawn(fut.unit_error().compat())) }; let node_config = node_config( self.nodes, @@ -280,7 +280,7 @@ impl TestNet where for full in full { let task_executor = { let executor = executor.clone(); - Box::new(move |fut: Pin + Send>>| executor.spawn(fut.unit_error().compat())) + Arc::new(move |fut: Pin + Send>>| executor.spawn(fut.unit_error().compat())) }; let node_config = node_config(self.nodes, &self.chain_spec, Roles::FULL, task_executor, None, self.base_port, &temp); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); @@ -296,7 +296,7 @@ impl TestNet where for light in light { let task_executor = { let executor = executor.clone(); - Box::new(move |fut: Pin + Send>>| executor.spawn(fut.unit_error().compat())) + Arc::new(move |fut: Pin + Send>>| executor.spawn(fut.unit_error().compat())) }; let node_config = node_config(self.nodes, &self.chain_spec, Roles::LIGHT, task_executor, None, self.base_port, &temp); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); diff --git a/utils/browser/src/lib.rs b/utils/browser/src/lib.rs index 4f985871f5a22..d7ffdca1aa389 100644 --- a/utils/browser/src/lib.rs +++ b/utils/browser/src/lib.rs @@ -51,18 +51,18 @@ where allow_private_ipv4: true, enable_mdns: false, }; - config.task_executor = Some(Box::new(move |fut| { + config.task_executor = Some(Arc::new(move |fut| { wasm_bindgen_futures::spawn_local(fut) })); config.telemetry_external_transport = Some(transport); config.roles = Roles::LIGHT; config.name = format!("{} (Browser)", name); - config.database = { + config.database = Some({ info!("Opening Indexed DB database '{}'...", name); let db = kvdb_web::Database::open(name, 10) .await?; DatabaseConfig::Custom(Arc::new(db)) - }; + }); config.keystore = KeystoreConfig::InMemory; Ok(config) From 13d88accd2fa81be421b1102ff4c590a69260f09 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 6 Feb 2020 16:08:11 +0100 Subject: [PATCH 16/25] Allocator improvements/clean ups (#4838) * FreeingBumpAllocator: Initialize the heads to `u32::max_value()` `self.heads` can point to an element with the index `0` in the heap. This would make the allocator fail to reuse this element. * Simplify the `PREFIX_SIZE` handling --- primitives/allocator/src/freeing_bump.rs | 43 +++++++++++++++--------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/primitives/allocator/src/freeing_bump.rs b/primitives/allocator/src/freeing_bump.rs index 46c314c2c2e33..f51dc222a25f6 100644 --- a/primitives/allocator/src/freeing_bump.rs +++ b/primitives/allocator/src/freeing_bump.rs @@ -106,7 +106,7 @@ impl FreeingBumpHeapAllocator { FreeingBumpHeapAllocator { bumper: 0, - heads: [0; N], + heads: [u32::max_value(); N], ptr_offset, total_size: 0, } @@ -138,31 +138,30 @@ impl FreeingBumpHeapAllocator { } let list_index = (item_size.trailing_zeros() - 3) as usize; - let ptr: u32 = if self.heads[list_index] != 0 { + let ptr: u32 = if self.heads[list_index] != u32::max_value() { // Something from the free list - let item = self.heads[list_index]; - let ptr = item + PREFIX_SIZE; + let ptr = self.heads[list_index]; assert!( - ptr + item_size <= max_heap_size, + ptr + item_size + PREFIX_SIZE <= max_heap_size, "Pointer is looked up in list of free entries, into which only valid values are inserted; qed" ); - self.heads[list_index] = self.get_heap_u64(mem, item)? + self.heads[list_index] = self.get_heap_u64(mem, ptr)? .try_into() .map_err(|_| error("read invalid free list pointer"))?; ptr } else { // Nothing to be freed. Bump. - self.bump(item_size, max_heap_size)? + PREFIX_SIZE + self.bump(item_size, max_heap_size)? }; - self.set_heap_u64(mem, ptr - PREFIX_SIZE, list_index as u64)?; + self.set_heap_u64(mem, ptr, list_index as u64)?; self.total_size = self.total_size + item_size + PREFIX_SIZE; trace!("Heap size is {} bytes after allocation", self.total_size); - Ok(Pointer::new(self.ptr_offset + ptr)) + Ok(Pointer::new(self.ptr_offset + ptr + PREFIX_SIZE)) } /// Deallocates the space which was allocated for a pointer. @@ -173,18 +172,18 @@ impl FreeingBumpHeapAllocator { /// - `ptr` - pointer to the allocated chunk pub fn deallocate(&mut self, mem: &mut [u8], ptr: Pointer) -> Result<(), Error> { let ptr = u32::from(ptr) - self.ptr_offset; - if ptr < PREFIX_SIZE { - return Err(error("Invalid pointer for deallocation")); - } + let ptr = ptr.checked_sub(PREFIX_SIZE).ok_or_else(|| + error("Invalid pointer for deallocation") + )?; - let list_index: usize = self.get_heap_u64(mem, ptr - PREFIX_SIZE)? + let list_index: usize = self.get_heap_u64(mem, ptr)? .try_into() .map_err(|_| error("read invalid list index"))?; if list_index > self.heads.len() { return Err(error("read invalid list index")); } - self.set_heap_u64(mem, ptr - PREFIX_SIZE, self.heads[list_index] as u64)?; - self.heads[list_index] = ptr - PREFIX_SIZE; + self.set_heap_u64(mem, ptr, self.heads[list_index] as u64)?; + self.heads[list_index] = ptr; let item_size = Self::get_item_size_from_index(list_index); self.total_size = self.total_size.checked_sub(item_size as u32 + PREFIX_SIZE) @@ -357,7 +356,7 @@ mod tests { // then // should have re-allocated assert_eq!(ptr3, to_pointer(padded_offset + 16 + PREFIX_SIZE)); - assert_eq!(heap.heads, [0; N]); + assert_eq!(heap.heads, [u32::max_value(); N]); } #[test] @@ -563,4 +562,16 @@ mod tests { assert_eq!(item_size as u32, MAX_POSSIBLE_ALLOCATION); } + #[test] + fn deallocate_needs_to_maintain_linked_list() { + let mut mem = [0u8; 8 * 2 * 4 + ALIGNMENT as usize]; + let mut heap = FreeingBumpHeapAllocator::new(0); + + // Allocate and free some pointers + let ptrs = (0..4).map(|_| heap.allocate(&mut mem, 8).unwrap()).collect::>(); + ptrs.into_iter().for_each(|ptr| heap.deallocate(&mut mem, ptr).unwrap()); + + // Second time we should be able to allocate all of them again. + let _ = (0..4).map(|_| heap.allocate(&mut mem, 8).unwrap()).collect::>(); + } } From 203445bb155249e0e69592864a23510fd6c4c46e Mon Sep 17 00:00:00 2001 From: gabriel klawitter Date: Thu, 6 Feb 2020 16:47:44 +0100 Subject: [PATCH 17/25] ci: enable build for pre-tags (#4836) * build for pre-tags * shallow clone rustdocs --- .gitlab-ci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index ced3e33eaabaa..62a6c2de32ddf 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -65,6 +65,7 @@ variables: only: - master - /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 + - /^pre-v[0-9]+\.[0-9]+-[0-9a-f]+$/ - web @@ -515,7 +516,7 @@ publish-gh-doc: insteadOf = "https://github.com/" EOC - unset GITHUB_TOKEN - - git clone https://github.com/substrate-developer-hub/rustdocs.git + - git clone --depth 1 https://github.com/substrate-developer-hub/rustdocs.git - rsync -ax --delete ./crate-docs/ ./rustdocs/${CI_COMMIT_REF_NAME}/ - cd ./rustdocs; git add . - git commit -m "update rustdoc ${CI_COMMIT_REF_NAME}" From 8d7bf66719ff5469c71ea50f5651c5df638c65dd Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Thu, 6 Feb 2020 16:48:38 +0100 Subject: [PATCH 18/25] Refactor epoch changes to a separate crate (#4785) * Init epoch changes module * Initial integration of new epoch changes module for BABE * Fix all initial compile errors * rename: digest -> digests * Fix babe tests * Bump impl_version * Fix more test issues * Remove test flag for tree It unfortunately won't work for multiple crates. * Update cargo lock * Fix duplicate parking_lot version * Add missing license header --- Cargo.lock | 13 ++ Cargo.toml | 3 +- bin/node/runtime/src/lib.rs | 2 +- client/consensus/babe/Cargo.toml | 1 + client/consensus/babe/src/authorship.rs | 18 +- client/consensus/babe/src/aux_schema.rs | 16 +- client/consensus/babe/src/lib.rs | 76 +++++-- client/consensus/babe/src/tests.rs | 5 +- client/consensus/babe/src/verification.rs | 12 +- client/consensus/epochs/Cargo.toml | 14 ++ .../epoch_changes.rs => epochs/src/lib.rs} | 203 ++++++++++-------- frame/babe/src/lib.rs | 15 +- frame/babe/src/tests.rs | 4 +- .../babe/src/{digest.rs => digests.rs} | 57 +++-- primitives/consensus/babe/src/lib.rs | 44 +--- 15 files changed, 265 insertions(+), 218 deletions(-) create mode 100644 client/consensus/epochs/Cargo.toml rename client/consensus/{babe/src/epoch_changes.rs => epochs/src/lib.rs} (79%) rename primitives/consensus/babe/src/{digest.rs => digests.rs} (85%) diff --git a/Cargo.lock b/Cargo.lock index 3f94778b8430c..4771b78586ca1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5805,6 +5805,7 @@ dependencies = [ "sc-block-builder", "sc-client", "sc-client-api", + "sc-consensus-epochs", "sc-consensus-slots", "sc-consensus-uncles", "sc-executor", @@ -5832,6 +5833,18 @@ dependencies = [ "tokio 0.1.22", ] +[[package]] +name = "sc-consensus-epochs" +version = "0.8.0" +dependencies = [ + "fork-tree", + "parity-scale-codec", + "parking_lot 0.10.0", + "sc-client-api", + "sp-blockchain", + "sp-runtime", +] + [[package]] name = "sc-consensus-manual-seal" version = "0.8.0" diff --git a/Cargo.toml b/Cargo.toml index 552f1eadc7ca9..47e3fe3f0efce 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,8 +25,9 @@ members = [ "client/consensus/babe", "client/consensus/manual-seal", "client/consensus/pow", - "client/consensus/slots", "client/consensus/uncles", + "client/consensus/slots", + "client/consensus/epochs", "client/db", "client/executor", "client/executor/common", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 40342c1ce84b1..7b718ca21f371 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -80,7 +80,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. spec_version: 214, - impl_version: 0, + impl_version: 1, apis: RUNTIME_API_VERSIONS, }; diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 551754c7b65a3..23bf9cea2841f 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -22,6 +22,7 @@ sc-telemetry = { version = "2.0.0", path = "../../telemetry" } sc-keystore = { version = "2.0.0", path = "../../keystore" } sc-client-api = { version = "2.0.0", path = "../../api" } sc-client = { version = "0.8", path = "../../" } +sc-consensus-epochs = { version = "0.8", path = "../epochs" } sp-api = { version = "2.0.0", path = "../../../primitives/api" } sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } diff --git a/client/consensus/babe/src/authorship.rs b/client/consensus/babe/src/authorship.rs index 62667ef3978c0..8b28aefa2f77a 100644 --- a/client/consensus/babe/src/authorship.rs +++ b/client/consensus/babe/src/authorship.rs @@ -17,13 +17,17 @@ //! BABE authority selection and slot claiming. use merlin::Transcript; -use sp_consensus_babe::{AuthorityId, BabeAuthorityWeight, BABE_ENGINE_ID, BABE_VRF_PREFIX}; -use sp_consensus_babe::{Epoch, SlotNumber, AuthorityPair, BabePreDigest, BabeConfiguration}; +use sp_consensus_babe::{ + AuthorityId, BabeAuthorityWeight, BABE_ENGINE_ID, BABE_VRF_PREFIX, + SlotNumber, AuthorityPair, BabeConfiguration +}; +use sp_consensus_babe::digests::PreDigest; use sp_core::{U256, blake2_256}; use codec::Encode; use schnorrkel::vrf::VRFInOut; use sp_core::Pair; use sc_keystore::KeyStorePtr; +use super::Epoch; /// Calculates the primary selection threshold for a given authority, taking /// into account `c` (`1 - c` represents the probability of a slot being empty). @@ -104,7 +108,7 @@ fn claim_secondary_slot( authorities: &[(AuthorityId, BabeAuthorityWeight)], keystore: &KeyStorePtr, randomness: [u8; 32], -) -> Option<(BabePreDigest, AuthorityPair)> { +) -> Option<(PreDigest, AuthorityPair)> { if authorities.is_empty() { return None; } @@ -124,7 +128,7 @@ fn claim_secondary_slot( }) { if pair.public() == *expected_author { - let pre_digest = BabePreDigest::Secondary { + let pre_digest = PreDigest::Secondary { slot_number, authority_index: authority_index as u32, }; @@ -145,7 +149,7 @@ pub(super) fn claim_slot( epoch: &Epoch, config: &BabeConfiguration, keystore: &KeyStorePtr, -) -> Option<(BabePreDigest, AuthorityPair)> { +) -> Option<(PreDigest, AuthorityPair)> { claim_primary_slot(slot_number, epoch, config.c, keystore) .or_else(|| { if config.secondary_slots { @@ -175,7 +179,7 @@ fn claim_primary_slot( epoch: &Epoch, c: (u64, u64), keystore: &KeyStorePtr, -) -> Option<(BabePreDigest, AuthorityPair)> { +) -> Option<(PreDigest, AuthorityPair)> { let Epoch { authorities, randomness, epoch_index, .. } = epoch; let keystore = keystore.read(); @@ -196,7 +200,7 @@ fn claim_primary_slot( let pre_digest = get_keypair(&pair) .vrf_sign_after_check(transcript, |inout| super::authorship::check_primary_threshold(inout, threshold)) .map(|s| { - BabePreDigest::Primary { + PreDigest::Primary { slot_number, vrf_output: s.0.to_output(), vrf_proof: s.1, diff --git a/client/consensus/babe/src/aux_schema.rs b/client/consensus/babe/src/aux_schema.rs index 170c2bf42d4e0..2f64157f22951 100644 --- a/client/consensus/babe/src/aux_schema.rs +++ b/client/consensus/babe/src/aux_schema.rs @@ -16,6 +16,8 @@ //! Schema for BABE epoch changes in the aux-db. +use std::sync::Arc; +use parking_lot::Mutex; use log::info; use codec::{Decode, Encode}; @@ -23,8 +25,8 @@ use sc_client_api::backend::AuxStore; use sp_blockchain::{Result as ClientResult, Error as ClientError}; use sp_runtime::traits::Block as BlockT; use sp_consensus_babe::BabeBlockWeight; - -use super::{epoch_changes::EpochChangesFor, SharedEpochChanges}; +use sc_consensus_epochs::{EpochChangesFor, SharedEpochChanges}; +use crate::Epoch; const BABE_EPOCH_CHANGES: &[u8] = b"babe_epoch_changes"; @@ -49,14 +51,14 @@ fn load_decode(backend: &B, key: &[u8]) -> ClientResult> /// Load or initialize persistent epoch change data from backend. pub(crate) fn load_epoch_changes( backend: &B, -) -> ClientResult> { - let epoch_changes = load_decode::<_, EpochChangesFor>(backend, BABE_EPOCH_CHANGES)? - .map(Into::into) +) -> ClientResult> { + let epoch_changes = load_decode::<_, EpochChangesFor>(backend, BABE_EPOCH_CHANGES)? + .map(|v| Arc::new(Mutex::new(v))) .unwrap_or_else(|| { info!(target: "babe", "Creating empty BABE epoch changes on what appears to be first startup." ); - SharedEpochChanges::new() + SharedEpochChanges::::default() }); // rebalance the tree after deserialization. this isn't strictly necessary @@ -70,7 +72,7 @@ pub(crate) fn load_epoch_changes( /// Update the epoch changes on disk after a change. pub(crate) fn write_epoch_changes( - epoch_changes: &EpochChangesFor, + epoch_changes: &EpochChangesFor, write_aux: F, ) -> R where F: FnOnce(&[(&'static [u8], &[u8])]) -> R, diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 848097231109f..dbf61692eb788 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -59,8 +59,10 @@ #![forbid(unsafe_code)] #![warn(missing_docs)] pub use sp_consensus_babe::{ - BabeApi, ConsensusLog, BABE_ENGINE_ID, BabePreDigest, SlotNumber, BabeConfiguration, - CompatibleDigestItem, + BabeApi, ConsensusLog, BABE_ENGINE_ID, SlotNumber, BabeConfiguration, + AuthorityId, AuthorityPair, AuthoritySignature, + BabeAuthorityWeight, VRF_OUTPUT_LENGTH, + digests::{PreDigest, CompatibleDigestItem, NextEpochDescriptor}, }; pub use sp_consensus::SyncOracle; use std::{collections::HashMap, sync::Arc, u64, pin::Pin, time::{Instant, Duration}}; @@ -101,26 +103,58 @@ use log::{warn, debug, info, trace}; use sc_consensus_slots::{ SlotWorker, SlotInfo, SlotCompatible, StorageChanges, CheckedHeader, check_equivocation, }; -use epoch_changes::descendent_query; +use sc_consensus_epochs::{descendent_query, SharedEpochChanges, EpochChangesFor, Epoch as EpochT}; use sp_blockchain::{ Result as ClientResult, Error as ClientError, HeaderBackend, ProvideCache, HeaderMetadata }; use schnorrkel::SignatureError; - +use codec::{Encode, Decode}; use sp_api::ApiExt; mod aux_schema; mod verification; -mod epoch_changes; mod authorship; #[cfg(test)] mod tests; -pub use sp_consensus_babe::{ - AuthorityId, AuthorityPair, AuthoritySignature, Epoch, NextEpochDescriptor, -}; -pub use epoch_changes::{EpochChanges, EpochChangesFor, SharedEpochChanges}; +/// BABE epoch information +#[derive(Decode, Encode, Default, PartialEq, Eq, Clone, Debug)] +pub struct Epoch { + /// The epoch index + pub epoch_index: u64, + /// The starting slot of the epoch, + pub start_slot: SlotNumber, + /// The duration of this epoch + pub duration: SlotNumber, + /// The authorities and their weights + pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + /// Randomness for this epoch + pub randomness: [u8; VRF_OUTPUT_LENGTH], +} + +impl EpochT for Epoch { + type NextEpochDescriptor = NextEpochDescriptor; + type SlotNumber = SlotNumber; + + fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { + Epoch { + epoch_index: self.epoch_index + 1, + start_slot: self.start_slot + self.duration, + duration: self.duration, + authorities: descriptor.authorities, + randomness: descriptor.randomness, + } + } + + fn start_slot(&self) -> SlotNumber { + self.start_slot + } + + fn end_slot(&self) -> SlotNumber { + self.start_slot + self.duration + } +} #[derive(derive_more::Display, Debug)] enum Error { @@ -343,7 +377,7 @@ struct BabeWorker { sync_oracle: SO, force_authoring: bool, keystore: KeyStorePtr, - epoch_changes: SharedEpochChanges, + epoch_changes: SharedEpochChanges, config: Config, } @@ -361,7 +395,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork Error: std::error::Error + Send + From + From + 'static, { type EpochData = Epoch; - type Claim = (BabePreDigest, AuthorityPair); + type Claim = (PreDigest, AuthorityPair); type SyncOracle = SO; type CreateProposer = Pin> + Send + 'static @@ -533,12 +567,12 @@ impl SlotWorker for BabeWorker where /// Extract the BABE pre digest from the given header. Pre-runtime digests are /// mandatory, the function will return `Err` if none is found. -fn find_pre_digest(header: &B::Header) -> Result> +fn find_pre_digest(header: &B::Header) -> Result> { // genesis block doesn't contain a pre digest so let's generate a // dummy one to not break any invariants in the rest of the code if header.number().is_zero() { - return Ok(BabePreDigest::Secondary { + return Ok(PreDigest::Secondary { slot_number: 0, authority_index: 0, }); @@ -597,7 +631,7 @@ impl SlotCompatible for TimeSource { #[derive(Clone)] pub struct BabeLink { time_source: TimeSource, - epoch_changes: SharedEpochChanges, + epoch_changes: SharedEpochChanges, config: Config, } /// A verifier for Babe blocks. @@ -606,7 +640,7 @@ pub struct BabeVerifier { api: Arc, inherent_data_providers: sp_inherents::InherentDataProviders, config: Config, - epoch_changes: SharedEpochChanges, + epoch_changes: SharedEpochChanges, time_source: TimeSource, } @@ -711,7 +745,7 @@ impl Verifier for BabeVerifier::Runtime)?; + .map_err(Error::::Runtime)?; let (_, slot_now, _) = self.time_source.extract_timestamp_and_slot(&inherent_data) .map_err(Error::::Extraction)?; @@ -855,7 +889,7 @@ pub struct BabeBlockImport { inner: I, client: Arc>, api: Arc, - epoch_changes: SharedEpochChanges, + epoch_changes: SharedEpochChanges, config: Config, } @@ -875,7 +909,7 @@ impl BabeBlockImport { fn new( client: Arc>, api: Arc, - epoch_changes: SharedEpochChanges, + epoch_changes: SharedEpochChanges, block_import: I, config: Config, ) -> Self { @@ -1114,7 +1148,7 @@ impl BlockImport for BabeBlockImport( client: &Client, - epoch_changes: &mut EpochChangesFor, + epoch_changes: &mut EpochChangesFor, ) -> Result<(), ConsensusError> where Block: BlockT, E: CallExecutor + Send + Sync, @@ -1161,7 +1195,7 @@ pub fn block_import( RA: Send + Sync, Client: AuxStore, { - let epoch_changes = aux_schema::load_epoch_changes(&*client)?; + let epoch_changes = aux_schema::load_epoch_changes::(&*client)?; let link = BabeLink { epoch_changes: epoch_changes.clone(), time_source: Default::default(), @@ -1245,7 +1279,7 @@ pub mod test_helpers { client: &C, keystore: &KeyStorePtr, link: &BabeLink, - ) -> Option where + ) -> Option where B: BlockT, C: ProvideRuntimeApi + ProvideCache + diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 3339c06d650dc..701155e7ccad9 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -59,7 +59,7 @@ type Mutator = Arc; #[derive(Clone)] struct DummyFactory { client: Arc, - epoch_changes: crate::SharedEpochChanges, + epoch_changes: SharedEpochChanges, config: Config, mutator: Mutator, } @@ -105,7 +105,6 @@ impl DummyProposer { > > { - use codec::Encode; let block_builder = self.factory.client.new_block_at( &BlockId::Hash(self.parent_hash), pre_digests, @@ -558,7 +557,7 @@ fn propose_and_import_block( let pre_digest = sp_runtime::generic::Digest { logs: vec![ Item::babe_pre_digest( - BabePreDigest::Secondary { + PreDigest::Secondary { authority_index: 0, slot_number, }, diff --git a/client/consensus/babe/src/verification.rs b/client/consensus/babe/src/verification.rs index ee5a99ec9d533..70418b8aea1e3 100644 --- a/client/consensus/babe/src/verification.rs +++ b/client/consensus/babe/src/verification.rs @@ -18,11 +18,11 @@ use schnorrkel::vrf::{VRFOutput, VRFProof}; use sp_runtime::{traits::Header, traits::DigestItemFor}; use sp_core::{Pair, Public}; -use sp_consensus_babe::{Epoch, BabePreDigest, CompatibleDigestItem, AuthorityId}; -use sp_consensus_babe::{AuthoritySignature, SlotNumber, AuthorityIndex, AuthorityPair}; +use sp_consensus_babe::{AuthoritySignature, SlotNumber, AuthorityIndex, AuthorityPair, AuthorityId}; +use sp_consensus_babe::digests::{PreDigest, CompatibleDigestItem}; use sc_consensus_slots::CheckedHeader; use log::{debug, trace}; -use super::{find_pre_digest, babe_err, BlockT, Error}; +use super::{find_pre_digest, babe_err, Epoch, BlockT, Error}; use super::authorship::{make_transcript, calculate_primary_threshold, check_primary_threshold, secondary_slot_author}; /// BABE verification parameters @@ -32,7 +32,7 @@ pub(super) struct VerificationParams<'a, B: 'a + BlockT> { /// the pre-digest of the header being verified. this is optional - if prior /// verification code had to read it, it can be included here to avoid duplicate /// work. - pub(super) pre_digest: Option, + pub(super) pre_digest: Option, /// the slot number of the current time. pub(super) slot_now: SlotNumber, /// epoch descriptor of the epoch this block _should_ be under, if it's valid. @@ -93,7 +93,7 @@ pub(super) fn check_header( }; match &pre_digest { - BabePreDigest::Primary { vrf_output, vrf_proof, authority_index, slot_number } => { + PreDigest::Primary { vrf_output, vrf_proof, authority_index, slot_number } => { debug!(target: "babe", "Verifying Primary block"); let digest = (vrf_output, vrf_proof, *authority_index, *slot_number); @@ -106,7 +106,7 @@ pub(super) fn check_header( config.c, )?; }, - BabePreDigest::Secondary { authority_index, slot_number } if config.secondary_slots => { + PreDigest::Secondary { authority_index, slot_number } if config.secondary_slots => { debug!(target: "babe", "Verifying Secondary block"); let digest = (*authority_index, *slot_number); diff --git a/client/consensus/epochs/Cargo.toml b/client/consensus/epochs/Cargo.toml new file mode 100644 index 0000000000000..e08553a241d1c --- /dev/null +++ b/client/consensus/epochs/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "sc-consensus-epochs" +version = "0.8.0" +authors = ["Parity Technologies "] +description = "Generic epochs-based utilities for consensus" +edition = "2018" + +[dependencies] +codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } +parking_lot = "0.10.0" +fork-tree = { version = "2.0.0", path = "../../../utils/fork-tree" } +sp-runtime = { path = "../../../primitives/runtime" } +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sc-client-api = { path = "../../api" } diff --git a/client/consensus/babe/src/epoch_changes.rs b/client/consensus/epochs/src/lib.rs similarity index 79% rename from client/consensus/babe/src/epoch_changes.rs rename to client/consensus/epochs/src/lib.rs index 01e957c4998ed..cf3d9f5c4c2c2 100644 --- a/client/consensus/babe/src/epoch_changes.rs +++ b/client/consensus/epochs/src/lib.rs @@ -14,20 +14,15 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -//! Handling epoch changes in BABE. -//! -//! This exposes the `SharedEpochChanges`, which is a wrapper around a -//! persistent DAG superimposed over the forks of the blockchain. +//! Generic utilities for epoch-based consensus engines. -use std::sync::Arc; -use sp_consensus_babe::{Epoch, SlotNumber, NextEpochDescriptor}; -use fork_tree::ForkTree; -use parking_lot::{Mutex, MutexGuard}; -use sp_runtime::traits::{Block as BlockT, NumberFor, One, Zero}; +use std::{sync::Arc, ops::Add}; +use parking_lot::Mutex; use codec::{Encode, Decode}; +use fork_tree::ForkTree; use sc_client_api::utils::is_descendent_of; use sp_blockchain::{HeaderMetadata, HeaderBackend, Error as ClientError}; -use std::ops::Add; +use sp_runtime::traits::{Block as BlockT, NumberFor, One, Zero}; /// A builder for `is_descendent_of` functions. pub trait IsDescendentOfBuilder { @@ -48,13 +43,13 @@ pub trait IsDescendentOfBuilder { } /// Produce a descendent query object given the client. -pub(crate) fn descendent_query(client: &H) -> HeaderBackendDescendentBuilder<&H, Block> { +pub fn descendent_query(client: &H) -> HeaderBackendDescendentBuilder<&H, Block> { HeaderBackendDescendentBuilder(client, std::marker::PhantomData) } /// Wrapper to get around unconstrained type errors when implementing /// `IsDescendentOfBuilder` for header backends. -pub(crate) struct HeaderBackendDescendentBuilder(H, std::marker::PhantomData); +pub struct HeaderBackendDescendentBuilder(H, std::marker::PhantomData); impl<'a, H, Block> IsDescendentOfBuilder for HeaderBackendDescendentBuilder<&'a H, Block> where @@ -71,49 +66,73 @@ impl<'a, H, Block> IsDescendentOfBuilder } } +/// Epoch data, distinguish whether it is genesis or not. +pub trait Epoch { + /// Descriptor for the next epoch. + type NextEpochDescriptor; + /// Type of the slot number. + type SlotNumber: Ord; + + /// Increment the epoch data, using the next epoch descriptor. + fn increment(&self, descriptor: Self::NextEpochDescriptor) -> Self; + + /// Produce the "end slot" of the epoch. This is NOT inclusive to the epoch, + /// i.e. the slots covered by the epoch are `self.start_slot() .. self.end_slot()`. + fn end_slot(&self) -> Self::SlotNumber; + /// Produce the "start slot" of the epoch. + fn start_slot(&self) -> Self::SlotNumber; +} + /// An unimported genesis epoch. -pub struct UnimportedGenesis(Epoch); +pub struct UnimportedGenesisEpoch(Epoch); /// The viable epoch under which a block can be verified. /// /// If this is the first non-genesis block in the chain, then it will /// hold an `UnimportedGenesis` epoch. -pub enum ViableEpoch { - Genesis(UnimportedGenesis), +pub enum ViableEpoch { + /// Genesis viable epoch data. + Genesis(UnimportedGenesisEpoch), + /// Regular viable epoch data. Regular(Epoch), } -impl From for ViableEpoch { - fn from(epoch: Epoch) -> ViableEpoch { +impl From for ViableEpoch { + fn from(epoch: Epoch) -> ViableEpoch { ViableEpoch::Regular(epoch) } } -impl AsRef for ViableEpoch { +impl AsRef for ViableEpoch { fn as_ref(&self) -> &Epoch { match *self { - ViableEpoch::Genesis(UnimportedGenesis(ref e)) => e, + ViableEpoch::Genesis(UnimportedGenesisEpoch(ref e)) => e, ViableEpoch::Regular(ref e) => e, } } } -impl ViableEpoch { +impl ViableEpoch where + Epoch: crate::Epoch + Clone, +{ /// Extract the underlying epoch, disregarding the fact that a genesis /// epoch may be unimported. pub fn into_inner(self) -> Epoch { match self { - ViableEpoch::Genesis(UnimportedGenesis(e)) => e, + ViableEpoch::Genesis(UnimportedGenesisEpoch(e)) => e, ViableEpoch::Regular(e) => e, } } /// Increment the epoch, yielding an `IncrementedEpoch` to be imported /// into the fork-tree. - pub fn increment(&self, next_descriptor: NextEpochDescriptor) -> IncrementedEpoch { + pub fn increment( + &self, + next_descriptor: Epoch::NextEpochDescriptor + ) -> IncrementedEpoch { let next = self.as_ref().increment(next_descriptor); let to_persist = match *self { - ViableEpoch::Genesis(UnimportedGenesis(ref epoch_0)) => + ViableEpoch::Genesis(UnimportedGenesisEpoch(ref epoch_0)) => PersistedEpoch::Genesis(epoch_0.clone(), next), ViableEpoch::Regular(_) => PersistedEpoch::Regular(next), }; @@ -123,12 +142,11 @@ impl ViableEpoch { } /// The datatype encoded on disk. -// This really shouldn't be public, but the encode/decode derives force it to be. #[derive(Clone, Encode, Decode)] -pub enum PersistedEpoch { - // epoch_0, epoch_1, +pub enum PersistedEpoch { + /// Genesis persisted epoch data. epoch_0, epoch_1. Genesis(Epoch, Epoch), - // epoch_n + /// Regular persisted epoch data. epoch_n. Regular(Epoch), } @@ -136,9 +154,9 @@ pub enum PersistedEpoch { /// /// Create this with `ViableEpoch::increment`. #[must_use = "Freshly-incremented epoch must be imported with `EpochChanges::import`"] -pub struct IncrementedEpoch(PersistedEpoch); +pub struct IncrementedEpoch(PersistedEpoch); -impl AsRef for IncrementedEpoch { +impl AsRef for IncrementedEpoch { fn as_ref(&self) -> &Epoch { match self.0 { PersistedEpoch::Genesis(_, ref epoch_1) => epoch_1, @@ -151,7 +169,7 @@ impl AsRef for IncrementedEpoch { /// the hash and block number of the block signaling the epoch change, and the /// epoch that was signalled at that block. /// -/// BABE special-cases the first epoch, epoch_0, by saying that it starts at +/// The first epoch, epoch_0, is special cased by saying that it starts at /// slot number of the first block in the chain. When bootstrapping a chain, /// there can be multiple competing block #1s, so we have to ensure that the overlayed /// DAG doesn't get confused. @@ -163,8 +181,8 @@ impl AsRef for IncrementedEpoch { /// /// Further epochs (epoch_2, ..., epoch_n) each get their own entry. #[derive(Clone, Encode, Decode)] -pub struct EpochChanges { - inner: ForkTree, +pub struct EpochChanges { + inner: ForkTree>, } // create a fake header hash which hasn't been included in the chain. @@ -176,13 +194,23 @@ fn fake_head_hash + AsMut<[u8]> + Clone>(parent_hash: &H) -> H { h } -impl EpochChanges where +impl Default for EpochChanges where + Hash: PartialEq, + Number: Ord, +{ + fn default() -> Self { + EpochChanges { inner: ForkTree::new() } + } +} + +impl EpochChanges where Hash: PartialEq + AsRef<[u8]> + AsMut<[u8]> + Copy, Number: Ord + One + Zero + Add + Copy, + Epoch: crate::Epoch + Clone, { - /// Create a new epoch-change tracker. - fn new() -> Self { - EpochChanges { inner: ForkTree::new() } + /// Create a new epoch change. + pub fn new() -> Self { + Self::default() } /// Rebalances the tree of epoch changes so that it is sorted by length of @@ -199,12 +227,12 @@ impl EpochChanges where descendent_of_builder: D, hash: &Hash, number: Number, - slot: SlotNumber, + slot: Epoch::SlotNumber, ) -> Result<(), fork_tree::Error> { let is_descendent_of = descendent_of_builder .build_is_descendent_of(None); - let predicate = |epoch: &PersistedEpoch| match *epoch { + let predicate = |epoch: &PersistedEpoch| match *epoch { PersistedEpoch::Genesis(_, ref epoch_1) => slot >= epoch_1.end_slot(), PersistedEpoch::Regular(ref epoch_n) => @@ -233,10 +261,10 @@ impl EpochChanges where descendent_of_builder: D, parent_hash: &Hash, parent_number: Number, - slot_number: SlotNumber, + slot_number: Epoch::SlotNumber, make_genesis: G, - ) -> Result, fork_tree::Error> - where G: FnOnce(SlotNumber) -> Epoch + ) -> Result>, fork_tree::Error> + where G: FnOnce(Epoch::SlotNumber) -> Epoch { // find_node_where will give you the node in the fork-tree which is an ancestor // of the `parent_hash` by default. if the last epoch was signalled at the parent_hash, @@ -250,7 +278,7 @@ impl EpochChanges where if parent_number == Zero::zero() { // need to insert the genesis epoch. let genesis_epoch = make_genesis(slot_number); - return Ok(Some(ViableEpoch::Genesis(UnimportedGenesis(genesis_epoch)))); + return Ok(Some(ViableEpoch::Genesis(UnimportedGenesisEpoch(genesis_epoch)))); } // We want to find the deepest node in the tree which is an ancestor @@ -258,11 +286,11 @@ impl EpochChanges where // slot of our block. The genesis special-case doesn't need to look // at epoch_1 -- all we're doing here is figuring out which node // we need. - let predicate = |epoch: &PersistedEpoch| match *epoch { + let predicate = |epoch: &PersistedEpoch| match *epoch { PersistedEpoch::Genesis(ref epoch_0, _) => - epoch_0.start_slot <= slot_number, + epoch_0.start_slot() <= slot_number, PersistedEpoch::Regular(ref epoch_n) => - epoch_n.start_slot <= slot_number, + epoch_n.start_slot() <= slot_number, }; self.inner.find_node_where( @@ -276,7 +304,7 @@ impl EpochChanges where // and here we figure out which of the internal epochs // of a genesis node to use based on their start slot. PersistedEpoch::Genesis(ref epoch_0, ref epoch_1) => - if epoch_1.start_slot <= slot_number { + if epoch_1.start_slot() <= slot_number { epoch_1.clone() } else { epoch_0.clone() @@ -296,7 +324,7 @@ impl EpochChanges where hash: Hash, number: Number, parent_hash: Hash, - epoch: IncrementedEpoch, + epoch: IncrementedEpoch, ) -> Result<(), fork_tree::Error> { let is_descendent_of = descendent_of_builder .build_is_descendent_of(Some((hash, parent_hash))); @@ -314,47 +342,22 @@ impl EpochChanges where } } - /// Return the inner fork tree, useful for testing purposes. - #[cfg(test)] - pub fn tree(&self) -> &ForkTree { + /// Return the inner fork tree. + pub fn tree(&self) -> &ForkTree> { &self.inner } } /// Type alias to produce the epoch-changes tree from a block type. -pub type EpochChangesFor = EpochChanges<::Hash, NumberFor>; +pub type EpochChangesFor = EpochChanges<::Hash, NumberFor, Epoch>; /// A shared epoch changes tree. -#[derive(Clone)] -pub struct SharedEpochChanges { - inner: Arc>>, -} - -impl SharedEpochChanges { - /// Create a new instance of the `SharedEpochChanges`. - pub fn new() -> Self { - SharedEpochChanges { - inner: Arc::new(Mutex::new(EpochChanges::<_, _>::new())) - } - } - - /// Lock the shared epoch changes, - pub fn lock(&self) -> MutexGuard> { - self.inner.lock() - } -} - -impl From> for SharedEpochChanges { - fn from(epoch_changes: EpochChangesFor) -> Self { - SharedEpochChanges { - inner: Arc::new(Mutex::new(epoch_changes)) - } - } -} +pub type SharedEpochChanges = Arc>>; #[cfg(test)] mod tests { use super::*; + use super::Epoch as EpochT; #[derive(Debug, PartialEq)] pub struct TestError; @@ -396,6 +399,33 @@ mod tests { } type Hash = [u8; 1]; + type SlotNumber = u64; + + #[derive(Debug, Clone, Eq, PartialEq)] + struct Epoch { + start_slot: SlotNumber, + duration: SlotNumber, + } + + impl EpochT for Epoch { + type NextEpochDescriptor = (); + type SlotNumber = SlotNumber; + + fn increment(&self, _: ()) -> Self { + Epoch { + start_slot: self.start_slot + self.duration, + duration: self.duration, + } + } + + fn end_slot(&self) -> SlotNumber { + self.start_slot + self.duration + } + + fn start_slot(&self) -> SlotNumber { + self.start_slot + } + } #[test] fn genesis_epoch_is_created_but_not_imported() { @@ -414,11 +444,8 @@ mod tests { }; let make_genesis = |slot| Epoch { - epoch_index: 0, start_slot: slot, duration: 100, - authorities: Vec::new(), - randomness: [0; 32], }; let epoch_changes = EpochChanges::new(); @@ -468,11 +495,8 @@ mod tests { }; let make_genesis = |slot| Epoch { - epoch_index: 0, start_slot: slot, duration: 100, - authorities: Vec::new(), - randomness: [0; 32], }; let mut epoch_changes = EpochChanges::new(); @@ -486,10 +510,7 @@ mod tests { assert_eq!(genesis_epoch.as_ref(), &make_genesis(100)); - let import_epoch_1 = genesis_epoch.increment(NextEpochDescriptor { - authorities: Vec::new(), - randomness: [1; 32], - }); + let import_epoch_1 = genesis_epoch.increment(()); let epoch_1 = import_epoch_1.as_ref().clone(); epoch_changes.import( @@ -566,18 +587,12 @@ mod tests { let duration = 100; let make_genesis = |slot| Epoch { - epoch_index: 0, start_slot: slot, duration, - authorities: Vec::new(), - randomness: [0; 32], }; let mut epoch_changes = EpochChanges::new(); - let next_descriptor = NextEpochDescriptor { - authorities: Vec::new(), - randomness: [0; 32], - }; + let next_descriptor = (); // insert genesis epoch for A { diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index ab1822712f942..1578d5c556c55 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -35,8 +35,9 @@ use sp_staking::{ use codec::{Encode, Decode}; use sp_inherents::{InherentIdentifier, InherentData, ProvideInherent, MakeFatalError}; use sp_consensus_babe::{ - BABE_ENGINE_ID, ConsensusLog, BabeAuthorityWeight, NextEpochDescriptor, RawBabePreDigest, - SlotNumber, inherents::{INHERENT_IDENTIFIER, BabeInherentData} + BABE_ENGINE_ID, ConsensusLog, BabeAuthorityWeight, SlotNumber, + inherents::{INHERENT_IDENTIFIER, BabeInherentData}, + digests::{NextEpochDescriptor, RawPreDigest}, }; pub use sp_consensus_babe::{AuthorityId, VRF_OUTPUT_LENGTH, PUBLIC_KEY_LENGTH}; @@ -205,11 +206,11 @@ impl FindAuthor for Module { { for (id, mut data) in digests.into_iter() { if id == BABE_ENGINE_ID { - let pre_digest = RawBabePreDigest::decode(&mut data).ok()?; + let pre_digest = RawPreDigest::decode(&mut data).ok()?; return Some(match pre_digest { - RawBabePreDigest::Primary { authority_index, .. } => + RawPreDigest::Primary { authority_index, .. } => authority_index, - RawBabePreDigest::Secondary { authority_index, .. } => + RawPreDigest::Secondary { authority_index, .. } => authority_index, }); } @@ -397,7 +398,7 @@ impl Module { .iter() .filter_map(|s| s.as_pre_runtime()) .filter_map(|(id, mut data)| if id == BABE_ENGINE_ID { - RawBabePreDigest::decode(&mut data).ok() + RawPreDigest::decode(&mut data).ok() } else { None }) @@ -424,7 +425,7 @@ impl Module { CurrentSlot::put(digest.slot_number()); - if let RawBabePreDigest::Primary { vrf_output, .. } = digest { + if let RawPreDigest::Primary { vrf_output, .. } = digest { // place the VRF output into the `Initialized` storage item // and it'll be put onto the under-construction randomness // later, once we've decided which epoch this block is in. diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index dbd61238166b0..976a264d7ba7b 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -34,7 +34,7 @@ fn make_pre_digest( vrf_output: [u8; sp_consensus_babe::VRF_OUTPUT_LENGTH], vrf_proof: [u8; sp_consensus_babe::VRF_PROOF_LENGTH], ) -> Digest { - let digest_data = sp_consensus_babe::RawBabePreDigest::Primary { + let digest_data = sp_consensus_babe::digests::RawPreDigest::Primary { authority_index, slot_number, vrf_output, @@ -110,7 +110,7 @@ fn first_block_epoch_zero_start() { let authorities = Babe::authorities(); let consensus_log = sp_consensus_babe::ConsensusLog::NextEpochData( - sp_consensus_babe::NextEpochDescriptor { + sp_consensus_babe::digests::NextEpochDescriptor { authorities, randomness: Babe::randomness(), } diff --git a/primitives/consensus/babe/src/digest.rs b/primitives/consensus/babe/src/digests.rs similarity index 85% rename from primitives/consensus/babe/src/digest.rs rename to primitives/consensus/babe/src/digests.rs index cca088b92bdd5..7ec0f9b977cc7 100644 --- a/primitives/consensus/babe/src/digest.rs +++ b/primitives/consensus/babe/src/digests.rs @@ -41,7 +41,7 @@ use sp_std::vec::Vec; /// (VRF based) and to a secondary (slot number based). #[cfg(feature = "std")] #[derive(Clone, Debug)] -pub enum BabePreDigest { +pub enum PreDigest { /// A primary VRF-based slot assignment. Primary { /// VRF output @@ -63,20 +63,20 @@ pub enum BabePreDigest { } #[cfg(feature = "std")] -impl BabePreDigest { +impl PreDigest { /// Returns the slot number of the pre digest. pub fn authority_index(&self) -> AuthorityIndex { match self { - BabePreDigest::Primary { authority_index, .. } => *authority_index, - BabePreDigest::Secondary { authority_index, .. } => *authority_index, + PreDigest::Primary { authority_index, .. } => *authority_index, + PreDigest::Secondary { authority_index, .. } => *authority_index, } } /// Returns the slot number of the pre digest. pub fn slot_number(&self) -> SlotNumber { match self { - BabePreDigest::Primary { slot_number, .. } => *slot_number, - BabePreDigest::Secondary { slot_number, .. } => *slot_number, + PreDigest::Primary { slot_number, .. } => *slot_number, + PreDigest::Secondary { slot_number, .. } => *slot_number, } } @@ -84,18 +84,15 @@ impl BabePreDigest { /// of the chain. pub fn added_weight(&self) -> crate::BabeBlockWeight { match self { - BabePreDigest::Primary { .. } => 1, - BabePreDigest::Secondary { .. } => 0, + PreDigest::Primary { .. } => 1, + PreDigest::Secondary { .. } => 0, } } } -/// The prefix used by BABE for its VRF keys. -pub const BABE_VRF_PREFIX: &[u8] = b"substrate-babe-vrf"; - /// A raw version of `BabePreDigest`, usable on `no_std`. #[derive(Copy, Clone, Encode, Decode)] -pub enum RawBabePreDigest { +pub enum RawPreDigest { /// A primary VRF-based slot assignment. #[codec(index = "1")] Primary { @@ -123,38 +120,38 @@ pub enum RawBabePreDigest { }, } -impl RawBabePreDigest { +impl RawPreDigest { /// Returns the slot number of the pre digest. pub fn slot_number(&self) -> SlotNumber { match self { - RawBabePreDigest::Primary { slot_number, .. } => *slot_number, - RawBabePreDigest::Secondary { slot_number, .. } => *slot_number, + RawPreDigest::Primary { slot_number, .. } => *slot_number, + RawPreDigest::Secondary { slot_number, .. } => *slot_number, } } } #[cfg(feature = "std")] -impl Encode for BabePreDigest { +impl Encode for PreDigest { fn encode(&self) -> Vec { let raw = match self { - BabePreDigest::Primary { + PreDigest::Primary { vrf_output, vrf_proof, authority_index, slot_number, } => { - RawBabePreDigest::Primary { + RawPreDigest::Primary { vrf_output: *vrf_output.as_bytes(), vrf_proof: vrf_proof.to_bytes(), authority_index: *authority_index, slot_number: *slot_number, } }, - BabePreDigest::Secondary { + PreDigest::Secondary { authority_index, slot_number, } => { - RawBabePreDigest::Secondary { + RawPreDigest::Secondary { authority_index: *authority_index, slot_number: *slot_number, } @@ -166,26 +163,26 @@ impl Encode for BabePreDigest { } #[cfg(feature = "std")] -impl codec::EncodeLike for BabePreDigest {} +impl codec::EncodeLike for PreDigest {} #[cfg(feature = "std")] -impl Decode for BabePreDigest { +impl Decode for PreDigest { fn decode(i: &mut R) -> Result { let pre_digest = match Decode::decode(i)? { - RawBabePreDigest::Primary { vrf_output, vrf_proof, authority_index, slot_number } => { + RawPreDigest::Primary { vrf_output, vrf_proof, authority_index, slot_number } => { // Verify (at compile time) that the sizes in babe_primitives are correct let _: [u8; super::VRF_OUTPUT_LENGTH] = vrf_output; let _: [u8; super::VRF_PROOF_LENGTH] = vrf_proof; - BabePreDigest::Primary { + PreDigest::Primary { vrf_proof: VRFProof::from_bytes(&vrf_proof).map_err(convert_error)?, vrf_output: VRFOutput::from_bytes(&vrf_output).map_err(convert_error)?, authority_index, slot_number, } }, - RawBabePreDigest::Secondary { authority_index, slot_number } => { - BabePreDigest::Secondary { authority_index, slot_number } + RawPreDigest::Secondary { authority_index, slot_number } => { + PreDigest::Secondary { authority_index, slot_number } }, }; @@ -208,10 +205,10 @@ pub struct NextEpochDescriptor { #[cfg(feature = "std")] pub trait CompatibleDigestItem: Sized { /// Construct a digest item which contains a BABE pre-digest. - fn babe_pre_digest(seal: BabePreDigest) -> Self; + fn babe_pre_digest(seal: PreDigest) -> Self; /// If this item is an BABE pre-digest, return it. - fn as_babe_pre_digest(&self) -> Option; + fn as_babe_pre_digest(&self) -> Option; /// Construct a digest item which contains a BABE seal. fn babe_seal(signature: AuthoritySignature) -> Self; @@ -227,11 +224,11 @@ pub trait CompatibleDigestItem: Sized { impl CompatibleDigestItem for DigestItem where Hash: Debug + Send + Sync + Eq + Clone + Codec + 'static { - fn babe_pre_digest(digest: BabePreDigest) -> Self { + fn babe_pre_digest(digest: PreDigest) -> Self { DigestItem::PreRuntime(BABE_ENGINE_ID, digest.encode()) } - fn as_babe_pre_digest(&self) -> Option { + fn as_babe_pre_digest(&self) -> Option { self.try_to(OpaqueDigestItemId::PreRuntime(&BABE_ENGINE_ID)) } diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index 4cdeb072bd5e9..78c63e5022a3c 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -19,22 +19,22 @@ #![forbid(unsafe_code, missing_docs, unused_variables, unused_imports)] #![cfg_attr(not(feature = "std"), no_std)] -mod digest; +pub mod digests; pub mod inherents; use codec::{Encode, Decode}; use sp_std::vec::Vec; use sp_runtime::{ConsensusEngineId, RuntimeDebug}; - -#[cfg(feature = "std")] -pub use digest::{BabePreDigest, CompatibleDigestItem}; -pub use digest::{BABE_VRF_PREFIX, RawBabePreDigest, NextEpochDescriptor}; +use crate::digests::NextEpochDescriptor; mod app { use sp_application_crypto::{app_crypto, key_types::BABE, sr25519}; app_crypto!(sr25519, BABE); } +/// The prefix used by BABE for its VRF keys. +pub const BABE_VRF_PREFIX: &[u8] = b"substrate-babe-vrf"; + /// A Babe authority keypair. Necessarily equivalent to the schnorrkel public key used in /// the main Babe module. If that ever changes, then this must, too. #[cfg(feature = "std")] @@ -78,40 +78,6 @@ pub type BabeAuthorityWeight = u64; /// The weight of a BABE block. pub type BabeBlockWeight = u32; -/// BABE epoch information -#[derive(Decode, Encode, Default, PartialEq, Eq, Clone, RuntimeDebug)] -pub struct Epoch { - /// The epoch index - pub epoch_index: u64, - /// The starting slot of the epoch, - pub start_slot: SlotNumber, - /// The duration of this epoch - pub duration: SlotNumber, - /// The authorities and their weights - pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, - /// Randomness for this epoch - pub randomness: [u8; VRF_OUTPUT_LENGTH], -} - -impl Epoch { - /// "increment" the epoch, with given descriptor for the next. - pub fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { - Epoch { - epoch_index: self.epoch_index + 1, - start_slot: self.start_slot + self.duration, - duration: self.duration, - authorities: descriptor.authorities, - randomness: descriptor.randomness, - } - } - - /// Produce the "end slot" of the epoch. This is NOT inclusive to the epoch, - // i.e. the slots covered by the epoch are `self.start_slot .. self.end_slot()`. - pub fn end_slot(&self) -> SlotNumber { - self.start_slot + self.duration - } -} - /// An consensus log item for BABE. #[derive(Decode, Encode, Clone, PartialEq, Eq)] pub enum ConsensusLog { From dec1bb7d30f3afee7ab585c661b3f6100441c220 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Thu, 6 Feb 2020 18:52:30 +0100 Subject: [PATCH 19/25] babe: pass epoch data via intermediates (#4807) * babe: pass epoch data via intermediates * Switch to use Box for intermediates * Set intermediate.epoch to be Option * Fix proposer should put out an empty intermediate * Remove unnecessary encode/decode * Add EpochData to block_import_params in slot worker * Fix aura compile * Fix integration test --- Cargo.lock | 1 + bin/node/cli/Cargo.toml | 1 + bin/node/cli/src/service.rs | 28 ++++++++-- client/consensus/aura/src/lib.rs | 3 +- client/consensus/babe/src/lib.rs | 84 ++++++++++++++++++++---------- client/consensus/babe/src/tests.rs | 19 ++++++- client/consensus/slots/src/lib.rs | 4 +- 7 files changed, 106 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4771b78586ca1..0333904bff153 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3391,6 +3391,7 @@ dependencies = [ "sc-client-api", "sc-client-db", "sc-consensus-babe", + "sc-consensus-epochs", "sc-finality-grandpa", "sc-keystore", "sc-network", diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index cf666ffdc518e..d5f3f45b313ae 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -92,6 +92,7 @@ browser-utils = { path = "../../../utils/browser", optional = true } [dev-dependencies] sc-keystore = { version = "2.0.0", path = "../../../client/keystore" } sc-consensus-babe = { version = "0.8", features = ["test-helpers"], path = "../../../client/consensus/babe" } +sc-consensus-epochs = { version = "0.8", path = "../../../client/consensus/epochs" } sc-service-test = { version = "2.0.0", path = "../../../client/service/test" } futures = "0.3.1" tempfile = "3.1.0" diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index c462a60836a22..2c500c6a1c1ed 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -371,8 +371,11 @@ pub fn new_light(config: NodeConfiguration) #[cfg(test)] mod tests { - use std::sync::Arc; - use sc_consensus_babe::CompatibleDigestItem; + use std::{sync::Arc, collections::HashMap, borrow::Cow, any::Any}; + use sc_consensus_babe::{ + CompatibleDigestItem, BabeIntermediate, INTERMEDIATE_KEY + }; + use sc_consensus_epochs::descendent_query; use sp_consensus::{ Environment, Proposer, BlockImportParams, BlockOrigin, ForkChoiceStrategy, BlockImport, RecordProof, @@ -384,7 +387,7 @@ mod tests { use sp_core::{crypto::Pair as CryptoPair, H256}; use sp_runtime::{ generic::{BlockId, Era, Digest, SignedPayload}, - traits::Block as BlockT, + traits::{Block as BlockT, Header as HeaderT}, traits::Verify, OpaqueExtrinsic, }; @@ -499,11 +502,21 @@ mod tests { let parent_id = BlockId::number(service.client().chain_info().best_number); let parent_header = service.client().header(&parent_id).unwrap().unwrap(); + let parent_hash = parent_header.hash(); + let parent_number = *parent_header.number(); let mut proposer_factory = sc_basic_authorship::ProposerFactory { client: service.client(), transaction_pool: service.transaction_pool(), }; + let epoch = babe_link.epoch_changes().lock().epoch_for_child_of( + descendent_query(&*service.client()), + &parent_hash, + parent_number, + slot_num, + |slot| babe_link.config().genesis_epoch(slot) + ).unwrap().unwrap(); + let mut digest = Digest::::default(); // even though there's only one authority some slots might be empty, @@ -555,7 +568,14 @@ mod tests { storage_changes: None, finalized: false, auxiliary: Vec::new(), - intermediates: Default::default(), + intermediates: { + let mut intermediates = HashMap::new(); + intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate { epoch }) as Box, + ); + intermediates + }, fork_choice: Some(ForkChoiceStrategy::LongestChain), allow_missing_state: false, import_existing: false, diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 4b107d87d5512..434314a85353e 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -274,8 +274,9 @@ impl sc_consensus_slots::SimpleSlotWorker for AuraW Vec, StorageChanges, B>, Self::Claim, + Self::EpochData, ) -> sp_consensus::BlockImportParams> + Send> { - Box::new(|header, header_hash, body, storage_changes, pair| { + Box::new(|header, header_hash, body, storage_changes, pair, _epoch| { // sign the pre-sealed hash of the block and then // add it to a digest item. let signature = pair.sign(header_hash.as_ref()); diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index dbf61692eb788..f9e3ef98d6735 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -65,7 +65,10 @@ pub use sp_consensus_babe::{ digests::{PreDigest, CompatibleDigestItem, NextEpochDescriptor}, }; pub use sp_consensus::SyncOracle; -use std::{collections::HashMap, sync::Arc, u64, pin::Pin, time::{Instant, Duration}}; +use std::{ + collections::HashMap, sync::Arc, u64, pin::Pin, time::{Instant, Duration}, + any::Any, borrow::Cow +}; use sp_consensus_babe; use sp_consensus::{ImportResult, CanAuthorWith}; use sp_consensus::import_queue::{ @@ -103,7 +106,9 @@ use log::{warn, debug, info, trace}; use sc_consensus_slots::{ SlotWorker, SlotInfo, SlotCompatible, StorageChanges, CheckedHeader, check_equivocation, }; -use sc_consensus_epochs::{descendent_query, SharedEpochChanges, EpochChangesFor, Epoch as EpochT}; +use sc_consensus_epochs::{ + descendent_query, ViableEpoch, SharedEpochChanges, EpochChangesFor, Epoch as EpochT +}; use sp_blockchain::{ Result as ClientResult, Error as ClientError, HeaderBackend, ProvideCache, HeaderMetadata @@ -196,10 +201,6 @@ enum Error { FetchParentHeader(sp_blockchain::Error), #[display(fmt = "Expected epoch change to happen at {:?}, s{}", _0, _1)] ExpectedEpochChange(B::Hash, u64), - #[display(fmt = "Could not look up epoch: {:?}", _0)] - CouldNotLookUpEpoch(Box>), - #[display(fmt = "Block {} is not valid under any epoch.", _0)] - BlockNotValid(B::Hash), #[display(fmt = "Unexpected epoch change")] UnexpectedEpochChange, #[display(fmt = "Parent block of {} has no associated weight", _0)] @@ -231,6 +232,16 @@ macro_rules! babe_info { }; } + +/// Intermediate value passed to block importer. +pub struct BabeIntermediate { + /// The epoch data, if available. + pub epoch: ViableEpoch, +} + +/// Intermediate key for Babe engine. +pub static INTERMEDIATE_KEY: &[u8] = b"babe1"; + /// A slot duration. Create with `get_or_compute`. // FIXME: Once Rust has higher-kinded types, the duplication between this // and `super::babe::Config` can be eliminated. @@ -394,7 +405,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork SO: SyncOracle + Send + Clone, Error: std::error::Error + Send + From + From + 'static, { - type EpochData = Epoch; + type EpochData = ViableEpoch; type Claim = (PreDigest, AuthorityPair); type SyncOracle = SO; type CreateProposer = Pin sc_consensus_slots::SimpleSlotWorker for BabeWork |slot| self.config.genesis_epoch(slot) ) .map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))? - .map(|e| e.into_inner()) .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) } fn authorities_len(&self, epoch_data: &Self::EpochData) -> usize { - epoch_data.authorities.len() + epoch_data.as_ref().authorities.len() } fn claim_slot( &self, _parent_header: &B::Header, slot_number: SlotNumber, - epoch_data: &Epoch, + epoch_data: &ViableEpoch, ) -> Option { debug!(target: "babe", "Attempting to claim slot {}", slot_number); let s = authorship::claim_slot( slot_number, - epoch_data, + epoch_data.as_ref(), &*self.config, &self.keystore, ); @@ -469,8 +479,9 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork Vec, StorageChanges, Self::Claim, + Self::EpochData, ) -> sp_consensus::BlockImportParams + Send> { - Box::new(|header, header_hash, body, storage_changes, (_, pair)| { + Box::new(|header, header_hash, body, storage_changes, (_, pair), epoch| { // sign the pre-sealed hash of the block and then // add it to a digest item. let signature = pair.sign(header_hash.as_ref()); @@ -485,7 +496,14 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork storage_changes: Some(storage_changes), finalized: false, auxiliary: Vec::new(), // block-weight is written in block import. - intermediates: Default::default(), + intermediates: { + let mut intermediates = HashMap::new(); + intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate { epoch }) as Box, + ); + intermediates + }, fork_choice: None, allow_missing_state: false, import_existing: false, @@ -634,6 +652,19 @@ pub struct BabeLink { epoch_changes: SharedEpochChanges, config: Config, } + +impl BabeLink { + /// Get the epoch changes of this link. + pub fn epoch_changes(&self) -> &SharedEpochChanges { + &self.epoch_changes + } + + /// Get the config of this link. + pub fn config(&self) -> &Config { + &self.config + } +} + /// A verifier for Babe blocks. pub struct BabeVerifier { client: Arc>, @@ -830,6 +861,14 @@ impl Verifier for BabeVerifier ?pre_header); + let mut intermediates = HashMap::new(); + intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate { + epoch, + }) as Box, + ); + let block_import_params = BlockImportParams { origin, header: pre_header, @@ -839,7 +878,7 @@ impl Verifier for BabeVerifier BlockImport for BabeBlockImport| ConsensusError::ChainLookup( - babe_err(Error::::CouldNotLookUpEpoch(Box::new(e))).into() - ))? - .ok_or_else(|| ConsensusError::ClientImport( - babe_err(Error::::BlockNotValid(hash)).into() - ))?; + let intermediate = block.take_intermediate::( + INTERMEDIATE_KEY + )?; + let epoch = intermediate.epoch; let first_in_epoch = parent_slot < epoch.as_ref().start_slot; (epoch, first_in_epoch, parent_weight) }; diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 701155e7ccad9..687f23e646f66 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -565,8 +565,18 @@ fn propose_and_import_block( ], }; + let parent_hash = parent.hash(); + let mut block = futures::executor::block_on(proposer.propose_with(pre_digest)).unwrap().block; + let epoch = proposer_factory.epoch_changes.lock().epoch_for_child_of( + descendent_query(&*proposer_factory.client), + &parent_hash, + *parent.number(), + slot_number, + |slot| proposer_factory.config.genesis_epoch(slot) + ).unwrap().unwrap(); + let seal = { // sign the pre-sealed hash of the block and then // add it to a digest item. @@ -593,7 +603,14 @@ fn propose_and_import_block( storage_changes: None, finalized: false, auxiliary: Vec::new(), - intermediates: Default::default(), + intermediates: { + let mut intermediates = HashMap::new(); + intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate { epoch }) as Box, + ); + intermediates + }, fork_choice: Some(ForkChoiceStrategy::LongestChain), allow_missing_state: false, import_existing: false, diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index c6185d5d307b9..8bc2547a49e39 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -81,7 +81,7 @@ pub trait SimpleSlotWorker { type Claim: Send + 'static; /// Epoch data necessary for authoring. - type EpochData; + type EpochData: Send + 'static; /// The logging target to use when logging messages. fn logging_target(&self) -> &'static str; @@ -119,6 +119,7 @@ pub trait SimpleSlotWorker { Vec, StorageChanges<>::Transaction, B>, Self::Claim, + Self::EpochData, ) -> sp_consensus::BlockImportParams< B, >::Transaction @@ -280,6 +281,7 @@ pub trait SimpleSlotWorker { body, proposal.storage_changes, claim, + epoch_data, ); info!( From f3742e70b2b74c541cf2e8ff2226be7d8f824ffe Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Thu, 6 Feb 2020 23:32:48 +0100 Subject: [PATCH 20/25] easy with threads (#4848) --- client/finality-grandpa/src/tests.rs | 34 ++++++++++++++++------------ 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 0d7cf0541f355..0f4d9dadd023f 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -499,11 +499,17 @@ fn add_forced_change( )); } +fn thread_pool() -> futures::executor::ThreadPool { + futures::executor::ThreadPool::builder().pool_size(2) + .create() + .expect("never fails") +} + #[test] fn finalize_3_voters_no_observers() { let _ = env_logger::try_init(); let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); + let threads_pool = thread_pool(); let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(peers); @@ -529,7 +535,7 @@ fn finalize_3_voters_no_observers() { #[test] fn finalize_3_voters_1_full_observer() { let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); + let threads_pool = thread_pool(); let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(peers); @@ -635,7 +641,7 @@ fn transition_3_voters_twice_1_full_observer() { let net = Arc::new(Mutex::new(GrandpaTestNet::new(api, 8))); let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); + let threads_pool = thread_pool(); net.lock().peer(0).push_blocks(1, false); net.lock().block_until_sync(&mut runtime); @@ -770,7 +776,7 @@ fn transition_3_voters_twice_1_full_observer() { #[test] fn justification_is_emitted_when_consensus_data_changes() { let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); + let threads_pool = thread_pool(); let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 3); @@ -789,7 +795,7 @@ fn justification_is_emitted_when_consensus_data_changes() { #[test] fn justification_is_generated_periodically() { let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); + let threads_pool = thread_pool(); let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(peers); @@ -829,7 +835,7 @@ fn consensus_changes_works() { #[test] fn sync_justifications_on_change_blocks() { let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); + let threads_pool = thread_pool(); let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let peers_b = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; let voters = make_ids(peers_b); @@ -884,7 +890,7 @@ fn sync_justifications_on_change_blocks() { fn finalizes_multiple_pending_changes_in_order() { let _ = env_logger::try_init(); let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); + let threads_pool = thread_pool(); let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let peers_b = &[Ed25519Keyring::Dave, Ed25519Keyring::Eve, Ed25519Keyring::Ferdie]; @@ -945,7 +951,7 @@ fn finalizes_multiple_pending_changes_in_order() { fn force_change_to_new_set() { let _ = env_logger::try_init(); let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); + let threads_pool = thread_pool(); // two of these guys are offline. let genesis_authorities = &[ Ed25519Keyring::Alice, @@ -1126,7 +1132,7 @@ fn voter_persists_its_votes() { let _ = env_logger::try_init(); let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); + let threads_pool = thread_pool(); // we have two authorities but we'll only be running the voter for alice // we are going to be listening for the prevotes it casts @@ -1383,7 +1389,7 @@ fn voter_persists_its_votes() { fn finalize_3_voters_1_light_observer() { let _ = env_logger::try_init(); let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); + let threads_pool = thread_pool(); let authorities = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(authorities); @@ -1429,7 +1435,7 @@ fn finalize_3_voters_1_light_observer() { fn finality_proof_is_fetched_by_light_client_when_consensus_data_changes() { let _ = ::env_logger::try_init(); let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); + let threads_pool = thread_pool(); let peers = &[Ed25519Keyring::Alice]; let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 1); @@ -1461,7 +1467,7 @@ fn empty_finality_proof_is_returned_to_light_client_when_authority_set_is_differ let _ = ::env_logger::try_init(); let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); + let threads_pool = thread_pool(); // two of these guys are offline. let genesis_authorities = if FORCE_CHANGE { @@ -1526,7 +1532,7 @@ fn empty_finality_proof_is_returned_to_light_client_when_authority_set_is_differ fn voter_catches_up_to_latest_round_when_behind() { let _ = env_logger::try_init(); let mut runtime = current_thread::Runtime::new().unwrap(); - let threads_pool = futures::executor::ThreadPool::new().unwrap(); + let threads_pool = thread_pool(); let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; let voters = make_ids(peers); @@ -1646,7 +1652,7 @@ fn grandpa_environment_respects_voting_rules() { use finality_grandpa::Chain; use sc_network_test::TestClient; - let threads_pool = futures::executor::ThreadPool::new().unwrap(); + let threads_pool = thread_pool(); let peers = &[Ed25519Keyring::Alice]; let voters = make_ids(peers); From 18e6ea00188a50d6fc04de254923ab026228ed77 Mon Sep 17 00:00:00 2001 From: Marcio Diaz Date: Fri, 7 Feb 2020 08:12:19 +0100 Subject: [PATCH 21/25] Enable trace timings logs for transaction factory (#4845) * Enable trace timings logs for transaction factory. --- Cargo.lock | 2 ++ bin/node/cli/Cargo.toml | 2 ++ bin/node/cli/src/command.rs | 10 ++++++++++ client/cli/src/lib.rs | 4 ++-- client/cli/src/params.rs | 28 ++++++++++++++-------------- 5 files changed, 30 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0333904bff153..440b3b218e417 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3400,6 +3400,7 @@ dependencies = [ "sc-service", "sc-service-test", "sc-telemetry", + "sc-tracing", "sc-transaction-pool", "serde", "sp-authority-discovery", @@ -3417,6 +3418,7 @@ dependencies = [ "structopt", "substrate-build-script-utils", "tempfile", + "tracing", "vergen", "wasm-bindgen", "wasm-bindgen-futures", diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index d5f3f45b313ae..dad76ae4bfdad 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -32,6 +32,7 @@ jsonrpc-core = "14.0.3" log = "0.4.8" rand = "0.7.2" structopt = { version = "0.3.8", optional = true } +tracing = "0.1.10" # primitives sp-authority-discovery = { version = "2.0.0", path = "../../../primitives/authority-discovery" } @@ -60,6 +61,7 @@ sc-offchain = { version = "2.0.0", path = "../../../client/offchain" } sc-rpc = { version = "2.0.0", path = "../../../client/rpc" } sc-basic-authorship = { version = "0.8", path = "../../../client/basic-authorship" } sc-service = { version = "0.8", default-features = false, path = "../../../client/service" } +sc-tracing = { version = "2.0.0", path = "../../../client/tracing" } sc-telemetry = { version = "2.0.0", path = "../../../client/telemetry" } sc-authority-discovery = { version = "0.8", path = "../../../client/authority-discovery" } diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index eb18d6d8b33d1..f5d747a14663a 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -56,6 +56,16 @@ where _ => panic!("Factory is only supported for development and local testnet."), } + // Setup tracing. + if let Some(tracing_targets) = cli_args.shared_params.tracing_targets.as_ref() { + let subscriber = sc_tracing::ProfilingSubscriber::new( + cli_args.shared_params.tracing_receiver.into(), tracing_targets + ); + if let Err(e) = tracing::subscriber::set_global_default(subscriber) { + panic!("Unable to set global default subscriber {}", e); + } + } + let factory_state = FactoryState::new( cli_args.mode.clone(), cli_args.num, diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index 7f726893368a0..0965a79f1537e 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -638,8 +638,8 @@ where config.telemetry_endpoints = Some(TelemetryEndpoints::new(cli.telemetry_endpoints)); } - config.tracing_targets = cli.tracing_targets.into(); - config.tracing_receiver = cli.tracing_receiver.into(); + config.tracing_targets = cli.shared_params.tracing_targets.into(); + config.tracing_receiver = cli.shared_params.tracing_receiver.into(); // Imply forced authoring on --dev config.force_authoring = cli.shared_params.dev || cli.force_authoring; diff --git a/client/cli/src/params.rs b/client/cli/src/params.rs index 3a4aa319c6e8f..1dc6b0567c698 100644 --- a/client/cli/src/params.rs +++ b/client/cli/src/params.rs @@ -113,6 +113,20 @@ pub struct SharedParams { /// Sets a custom logging filter. #[structopt(short = "l", long = "log", value_name = "LOG_PATTERN")] pub log: Option, + + /// Comma separated list of targets for tracing + #[structopt(long = "tracing-targets", value_name = "TARGETS")] + pub tracing_targets: Option, + + /// Receiver to process tracing messages + #[structopt( + long = "tracing-receiver", + value_name = "RECEIVER", + possible_values = &TracingReceiver::variants(), + case_insensitive = true, + default_value = "Log" + )] + pub tracing_receiver: TracingReceiver, } /// Parameters for block import. @@ -579,20 +593,6 @@ pub struct RunCmd { #[structopt(long = "force-authoring")] pub force_authoring: bool, - /// Comma separated list of targets for tracing - #[structopt(long = "tracing-targets", value_name = "TARGETS")] - pub tracing_targets: Option, - - /// Receiver to process tracing messages - #[structopt( - long = "tracing-receiver", - value_name = "RECEIVER", - possible_values = &TracingReceiver::variants(), - case_insensitive = true, - default_value = "Log" - )] - pub tracing_receiver: TracingReceiver, - /// Specify custom keystore path. #[structopt(long = "keystore-path", value_name = "PATH", parse(from_os_str))] pub keystore_path: Option, From 65763cdb2474c664337e3a33bc9c7a548d34ea86 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 7 Feb 2020 11:53:11 +0100 Subject: [PATCH 22/25] Subsystems memory tracking: 1. Transaction pool (#4822) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * update sp-runtime * total update * usage informant * update to crates.io version * update Cargo.lock * update dummy update * fix todo * cleanup * avoid custom impl * Update client/transaction-pool/graph/src/future.rs Co-Authored-By: Tomasz Drwięga * remove another custom impl * remove another custom impl * add kb in report * update Cargo.lock * review suggestions * --amend * --amend * bump parity-util-mem to 0.5.0 * bumps * update macro and versions * add to grafana * naming Co-authored-by: Tomasz Drwięga --- Cargo.lock | 45 +++++++++++-------- client/Cargo.toml | 4 +- client/api/Cargo.toml | 2 +- client/cli/Cargo.toml | 1 + client/cli/src/informant.rs | 6 +++ client/db/Cargo.toml | 10 ++--- client/service/Cargo.toml | 1 + client/service/src/builder.rs | 2 + client/transaction-pool/Cargo.toml | 1 + client/transaction-pool/graph/Cargo.toml | 1 + .../transaction-pool/graph/src/base_pool.rs | 31 ++++++++++++- client/transaction-pool/graph/src/future.rs | 30 ++++++++++++- client/transaction-pool/graph/src/pool.rs | 10 +++++ client/transaction-pool/graph/src/ready.rs | 24 ++++++++-- .../graph/src/validated_pool.rs | 11 +++++ client/transaction-pool/src/lib.rs | 12 +++++ client/transaction-pool/src/testing/pool.rs | 11 +++++ frame/system/src/lib.rs | 6 +-- primitives/core/Cargo.toml | 1 + primitives/core/src/changes_trie.rs | 2 +- primitives/runtime/Cargo.toml | 2 + primitives/runtime/src/generic/block.rs | 6 +-- primitives/runtime/src/generic/digest.rs | 5 ++- primitives/runtime/src/generic/header.rs | 22 ++++++++- .../src/generic/unchecked_extrinsic.rs | 12 +++++ primitives/runtime/src/lib.rs | 7 +++ primitives/runtime/src/testing.rs | 13 ++++-- primitives/runtime/src/traits.rs | 26 +++++++---- primitives/test-primitives/Cargo.toml | 1 + primitives/test-primitives/src/lib.rs | 1 + primitives/transaction-pool/src/pool.rs | 4 +- test-utils/runtime/Cargo.toml | 1 + test-utils/runtime/src/lib.rs | 2 + utils/browser/Cargo.toml | 2 +- 34 files changed, 257 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 440b3b218e417..ca7a5d1844f27 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2527,31 +2527,31 @@ dependencies = [ [[package]] name = "kvdb" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8396be0e5561ccd1bf7ff0b2007487cdd7a87a056873fe6ea906b35d4dbf7ed8" +checksum = "03080afe6f42cd996da9f568d6add5d7fb5ee2ea7fb7802d2d2cbd836958fd87" dependencies = [ "parity-bytes", - "parity-util-mem 0.4.1", + "parity-util-mem 0.5.1", "smallvec 1.2.0", ] [[package]] name = "kvdb-memorydb" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d25ef14155e418515c4839e9144c839de3506e68946f255a32b7f166095493d" +checksum = "b9355274e5a9e0a7e8ef43916950eae3949024de2a8dffe4d5a6c13974a37c8e" dependencies = [ "kvdb", - "parity-util-mem 0.4.1", - "parking_lot 0.9.0", + "parity-util-mem 0.5.1", + "parking_lot 0.10.0", ] [[package]] name = "kvdb-rocksdb" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af488cc16c3801705c8d681c3a32c8faa8fafc7fb5309dee0f573f3c6a19d395" +checksum = "af36fd66ccd99f3f771ae39b75aaba28b952372b6debfb971134bf1f03466ab2" dependencies = [ "fs-swap", "interleaved-ordered", @@ -2559,8 +2559,8 @@ dependencies = [ "log 0.4.8", "num_cpus", "owning_ref", - "parity-util-mem 0.4.1", - "parking_lot 0.9.0", + "parity-util-mem 0.5.1", + "parking_lot 0.10.0", "regex", "rocksdb", "smallvec 1.2.0", @@ -2568,16 +2568,16 @@ dependencies = [ [[package]] name = "kvdb-web" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a0e36637fb86454de401e7cb88f40eb0ad1b9bcee837d46785e7c451f1ebf4" +checksum = "7a985c47b4c46429e96033ebf6eaed784a81ceccb4e5df13d63f3b9078a4df81" dependencies = [ "futures 0.3.1", "js-sys", "kvdb", "kvdb-memorydb", "log 0.4.8", - "parity-util-mem 0.4.1", + "parity-util-mem 0.5.1", "send_wrapper 0.3.0", "wasm-bindgen", "web-sys", @@ -4620,14 +4620,15 @@ dependencies = [ [[package]] name = "parity-util-mem" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900dd84654b048e5bad420bb341658fc2c4d7fea628c22bcf4621733e54859b4" +checksum = "ef1476e40bf8f5c6776e9600983435821ca86eb9819d74a6207cca69d091406a" dependencies = [ "cfg-if", "impl-trait-for-tuples", "parity-util-mem-derive", - "parking_lot 0.9.0", + "parking_lot 0.10.0", + "primitive-types", "smallvec 1.2.0", "winapi 0.3.8", ] @@ -5629,6 +5630,7 @@ dependencies = [ "lazy_static", "log 0.4.8", "names", + "parity-util-mem 0.5.1", "regex", "rpassword", "sc-client-api", @@ -5729,7 +5731,7 @@ dependencies = [ "linked-hash-map", "log 0.4.8", "parity-scale-codec", - "parity-util-mem 0.4.1", + "parity-util-mem 0.5.1", "parking_lot 0.10.0", "quickcheck", "sc-client", @@ -6308,6 +6310,7 @@ dependencies = [ "log 0.4.8", "parity-multiaddr 0.5.0", "parity-scale-codec", + "parity-util-mem 0.5.1", "parking_lot 0.10.0", "sc-chain-spec", "sc-client", @@ -6422,6 +6425,7 @@ dependencies = [ "futures 0.3.1", "log 0.4.8", "parity-scale-codec", + "parity-util-mem 0.5.1", "parking_lot 0.10.0", "serde", "sp-core", @@ -6439,6 +6443,7 @@ dependencies = [ "futures-diagnose", "log 0.4.8", "parity-scale-codec", + "parity-util-mem 0.5.1", "parking_lot 0.10.0", "sc-client-api", "sc-transaction-graph", @@ -7001,6 +7006,7 @@ dependencies = [ "log 0.4.8", "num-traits", "parity-scale-codec", + "parity-util-mem 0.5.1", "parking_lot 0.10.0", "pretty_assertions", "primitive-types", @@ -7146,6 +7152,7 @@ dependencies = [ "impl-trait-for-tuples", "log 0.4.8", "parity-scale-codec", + "parity-util-mem 0.5.1", "paste", "rand 0.7.3", "serde", @@ -7290,6 +7297,7 @@ name = "sp-test-primitives" version = "2.0.0" dependencies = [ "parity-scale-codec", + "parity-util-mem 0.5.1", "serde", "sp-application-crypto", "sp-core", @@ -7571,6 +7579,7 @@ dependencies = [ "pallet-babe", "pallet-timestamp", "parity-scale-codec", + "parity-util-mem 0.5.1", "sc-client", "sc-executor", "serde", diff --git a/client/Cargo.toml b/client/Cargo.toml index 8560408569108..c89fe88145d14 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -19,7 +19,7 @@ hash-db = { version = "0.15.2" } hex-literal = { version = "0.2.1" } sp-inherents = { version = "2.0.0", path = "../primitives/inherents" } sp-keyring = { version = "2.0.0", path = "../primitives/keyring" } -kvdb = "0.3.0" +kvdb = "0.4.0" log = { version = "0.4.8" } parking_lot = "0.10.0" sp-core = { version = "2.0.0", path = "../primitives/core" } @@ -37,5 +37,5 @@ tracing = "0.1.10" env_logger = "0.7.0" tempfile = "3.1.0" substrate-test-runtime-client = { version = "2.0.0", path = "../test-utils/runtime/client" } -kvdb-memorydb = "0.3.0" +kvdb-memorydb = "0.4.0" sp-panic-handler = { version = "2.0.0", path = "../primitives/panic-handler" } diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index 4c2867deb3e83..27a40c4d94cfb 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -18,7 +18,7 @@ sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } hex-literal = { version = "0.2.1" } sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } -kvdb = "0.3.0" +kvdb = "0.4.0" log = { version = "0.4.8" } parking_lot = "0.10.0" sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index abfb53ccb1d20..e302d53d55a61 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -35,6 +35,7 @@ names = "0.11.0" structopt = "0.3.8" sc-tracing = { version = "2.0.0", path = "../tracing" } chrono = "0.4.10" +parity-util-mem = { version = "0.5.1", default-features = false, features = ["primitive-types"] } [target.'cfg(not(target_os = "unknown"))'.dependencies] rpassword = "4.0.1" diff --git a/client/cli/src/informant.rs b/client/cli/src/informant.rs index 312e4017d5ff0..9e7c5044e046d 100644 --- a/client/cli/src/informant.rs +++ b/client/cli/src/informant.rs @@ -28,6 +28,7 @@ mod display; /// Creates an informant in the form of a `Future` that must be polled regularly. pub fn build(service: &impl AbstractService) -> impl futures::Future { let client = service.client(); + let pool = service.transaction_pool(); let mut display = display::InformantDisplay::new(); @@ -40,6 +41,11 @@ pub fn build(service: &impl AbstractService) -> impl futures::Future info.usage.as_ref().map(|usage| usage.memory.database_cache).unwrap_or(0), "disk_read_per_sec" => info.usage.as_ref().map(|usage| usage.io.bytes_read).unwrap_or(0), "disk_write_per_sec" => info.usage.as_ref().map(|usage| usage.io.bytes_written).unwrap_or(0), + "memory_transaction_pool" => parity_util_mem::malloc_size(&*transaction_pool_), ); ready(()) }); + let _ = to_spawn_tx.unbounded_send(( Box::pin(select(tel_task, exit.clone()).map(drop)), From::from("telemetry-periodic-send") diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index 1387d1df27a18..524e9a98a0dbf 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -19,6 +19,7 @@ sc-transaction-graph = { version = "2.0.0", path = "./graph" } sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } sc-client-api = { version = "2.0.0", path = "../api" } sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +parity-util-mem = { version = "0.5.1", default-features = false, features = ["primitive-types"] } [dev-dependencies] sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } diff --git a/client/transaction-pool/graph/Cargo.toml b/client/transaction-pool/graph/Cargo.toml index 4f12ab7fcc7e9..2d3172fc91544 100644 --- a/client/transaction-pool/graph/Cargo.toml +++ b/client/transaction-pool/graph/Cargo.toml @@ -14,6 +14,7 @@ serde = { version = "1.0.101", features = ["derive"] } sp-core = { version = "2.0.0", path = "../../../primitives/core" } sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } +parity-util-mem = { version = "0.5.1", default-features = false, features = ["primitive-types"] } [dev-dependencies] assert_matches = "1.3.0" diff --git a/client/transaction-pool/graph/src/base_pool.rs b/client/transaction-pool/graph/src/base_pool.rs index 7b7900c3e92fd..52e00df36395d 100644 --- a/client/transaction-pool/graph/src/base_pool.rs +++ b/client/transaction-pool/graph/src/base_pool.rs @@ -84,7 +84,7 @@ pub struct PruneStatus { /// Immutable transaction #[cfg_attr(test, derive(Clone))] -#[derive(PartialEq, Eq)] +#[derive(PartialEq, Eq, parity_util_mem::MallocSizeOf)] pub struct Transaction { /// Raw extrinsic representing that transaction. pub data: Extrinsic, @@ -209,7 +209,7 @@ const RECENTLY_PRUNED_TAGS: usize = 2; /// as-is for the second time will fail or produce unwanted results. /// Most likely it is required to revalidate them and recompute set of /// required tags. -#[derive(Debug)] +#[derive(Debug, parity_util_mem::MallocSizeOf)] pub struct BasePool { reject_future_transactions: bool, future: FutureTransactions, @@ -846,6 +846,33 @@ mod tests { } } + #[test] + fn can_track_heap_size() { + let mut pool = pool(); + pool.import(Transaction { + data: vec![5u8; 1024], + bytes: 1, + hash: 5, + priority: 5u64, + valid_till: 64u64, + requires: vec![], + provides: vec![vec![0], vec![4]], + propagate: true, + }).expect("import 1 should be ok"); + pool.import(Transaction { + data: vec![3u8; 1024], + bytes: 1, + hash: 7, + priority: 5u64, + valid_till: 64u64, + requires: vec![], + provides: vec![vec![2], vec![7]], + propagate: true, + }).expect("import 2 should be ok"); + + assert!(parity_util_mem::malloc_size(&pool) > 5000); + } + #[test] fn should_remove_invalid_transactions() { // given diff --git a/client/transaction-pool/graph/src/future.rs b/client/transaction-pool/graph/src/future.rs index 0de50c1a6532c..bda26fe34f949 100644 --- a/client/transaction-pool/graph/src/future.rs +++ b/client/transaction-pool/graph/src/future.rs @@ -29,6 +29,7 @@ use sp_runtime::transaction_validity::{ use crate::base_pool::Transaction; +#[derive(parity_util_mem::MallocSizeOf)] /// Transaction with partially satisfied dependencies. pub struct WaitingTransaction { /// Transaction details. @@ -109,7 +110,7 @@ impl WaitingTransaction { /// /// Contains transactions that are still awaiting for some other transactions that /// could provide a tag that they require. -#[derive(Debug)] +#[derive(Debug, parity_util_mem::MallocSizeOf)] pub struct FutureTransactions { /// tags that are not yet provided by any transaction and we await for them wanted_tags: HashMap>, @@ -243,3 +244,30 @@ impl FutureTransactions { self.waiting.values().fold(0, |acc, tx| acc + tx.transaction.bytes) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn can_track_heap_size() { + let mut future = FutureTransactions::default(); + future.import(WaitingTransaction { + transaction: Transaction { + data: vec![0u8; 1024], + bytes: 1, + hash: 1, + priority: 1, + valid_till: 2, + requires: vec![vec![1], vec![2]], + provides: vec![vec![3], vec![4]], + propagate: true, + }.into(), + missing_tags: vec![vec![1u8], vec![2u8]].into_iter().collect(), + imported_at: std::time::Instant::now(), + }); + + // data is at least 1024! + assert!(parity_util_mem::malloc_size(&future) > 1024); + } +} diff --git a/client/transaction-pool/graph/src/pool.rs b/client/transaction-pool/graph/src/pool.rs index 91ce58518a0a0..ab4e3a5a79f11 100644 --- a/client/transaction-pool/graph/src/pool.rs +++ b/client/transaction-pool/graph/src/pool.rs @@ -122,6 +122,16 @@ pub struct Pool { validated_pool: Arc>, } +impl parity_util_mem::MallocSizeOf for Pool +where + B::Hash: parity_util_mem::MallocSizeOf, + ExtrinsicFor: parity_util_mem::MallocSizeOf, +{ + fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { + self.validated_pool.size_of(ops) + } +} + impl Pool { /// Create a new transaction pool. pub fn new(options: Options, api: Arc) -> Self { diff --git a/client/transaction-pool/graph/src/ready.rs b/client/transaction-pool/graph/src/ready.rs index ec8d66e6b9885..23f0d49a93071 100644 --- a/client/transaction-pool/graph/src/ready.rs +++ b/client/transaction-pool/graph/src/ready.rs @@ -36,7 +36,7 @@ use crate::base_pool::Transaction; /// An in-pool transaction reference. /// /// Should be cheap to clone. -#[derive(Debug)] +#[derive(Debug, parity_util_mem::MallocSizeOf)] pub struct TransactionRef { /// The actual transaction data. pub transaction: Arc>, @@ -74,7 +74,7 @@ impl PartialEq for TransactionRef { } impl Eq for TransactionRef {} -#[derive(Debug)] +#[derive(Debug, parity_util_mem::MallocSizeOf)] pub struct ReadyTx { /// A reference to a transaction pub transaction: TransactionRef, @@ -104,7 +104,7 @@ Hence every hash retrieved from `provided_tags` is always present in `ready`; qed "#; -#[derive(Debug)] +#[derive(Debug, parity_util_mem::MallocSizeOf)] pub struct ReadyTransactions { /// Insertion id insertion_id: u64, @@ -676,6 +676,24 @@ mod tests { assert_eq!(it.next(), None); } + #[test] + fn can_report_heap_size() { + let mut ready = ReadyTransactions::default(); + let tx = Transaction { + data: vec![5], + bytes: 1, + hash: 5, + priority: 1, + valid_till: u64::max_value(), // use the max_value() here for testing. + requires: vec![], + provides: vec![], + propagate: true, + }; + import(&mut ready, tx).unwrap(); + + assert!(parity_util_mem::malloc_size(&ready) > 200); + } + #[test] fn should_order_refs() { let mut id = 1; diff --git a/client/transaction-pool/graph/src/validated_pool.rs b/client/transaction-pool/graph/src/validated_pool.rs index c7e60deb4acba..95242840646c4 100644 --- a/client/transaction-pool/graph/src/validated_pool.rs +++ b/client/transaction-pool/graph/src/validated_pool.rs @@ -74,6 +74,17 @@ pub(crate) struct ValidatedPool { rotator: PoolRotator>, } +impl parity_util_mem::MallocSizeOf for ValidatedPool +where + B::Hash: parity_util_mem::MallocSizeOf, + ExtrinsicFor: parity_util_mem::MallocSizeOf, +{ + fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { + // other entries insignificant or non-primary references + self.pool.size_of(ops) + } +} + impl ValidatedPool { /// Create a new transaction pool. pub fn new(options: Options, api: Arc) -> Self { diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index 85bf2fd3275ed..7084e1c4a0f5c 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -53,6 +53,18 @@ pub struct BasicPool revalidation_strategy: Arc>>>, } +impl parity_util_mem::MallocSizeOf for BasicPool +where + PoolApi: sc_transaction_graph::ChainApi, + PoolApi::Hash: parity_util_mem::MallocSizeOf, + Block: BlockT, +{ + fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { + // other entries insignificant or non-primary references + self.pool.size_of(ops) + } +} + /// Type of revalidation. pub enum RevalidationType { /// Light revalidation type. diff --git a/client/transaction-pool/src/testing/pool.rs b/client/transaction-pool/src/testing/pool.rs index d4f3d0ccb410c..fed02067b184a 100644 --- a/client/transaction-pool/src/testing/pool.rs +++ b/client/transaction-pool/src/testing/pool.rs @@ -215,3 +215,14 @@ fn should_not_retain_invalid_hashes_from_retracted() { block_on(pool.maintain(&BlockId::number(1), &[retracted_hash])); assert_eq!(pool.status().ready, 0); } + +#[test] +fn can_track_heap_size() { + let pool = maintained_pool(); + block_on(pool.submit_one(&BlockId::number(0), uxt(Alice, 209))).expect("1. Imported"); + block_on(pool.submit_one(&BlockId::number(0), uxt(Alice, 210))).expect("1. Imported"); + block_on(pool.submit_one(&BlockId::number(0), uxt(Alice, 211))).expect("1. Imported"); + block_on(pool.submit_one(&BlockId::number(0), uxt(Alice, 212))).expect("1. Imported"); + + assert!(parity_util_mem::malloc_size(&pool) > 3000); +} \ No newline at end of file diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 4a06dec6ee248..a0f3700c9d18c 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -106,7 +106,7 @@ use sp_runtime::{ traits::{ self, CheckEqual, SimpleArithmetic, Zero, SignedExtension, Lookup, LookupError, SimpleBitOps, Hash, Member, MaybeDisplay, EnsureOrigin, BadOrigin, SaturatedConversion, - MaybeSerialize, MaybeSerializeDeserialize, StaticLookup, One, Bounded, + MaybeSerialize, MaybeSerializeDeserialize, MaybeMallocSizeOf, StaticLookup, One, Bounded, }, }; @@ -171,12 +171,12 @@ pub trait Trait: 'static + Eq + Clone { /// The block number type used by the runtime. type BlockNumber: Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + SimpleArithmetic - + Default + Bounded + Copy + sp_std::hash::Hash + sp_std::str::FromStr; + + Default + Bounded + Copy + sp_std::hash::Hash + sp_std::str::FromStr + MaybeMallocSizeOf; /// The output of the `Hashing` function. type Hash: Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + SimpleBitOps + Ord - + Default + Copy + CheckEqual + sp_std::hash::Hash + AsRef<[u8]> + AsMut<[u8]>; + + Default + Copy + CheckEqual + sp_std::hash::Hash + AsRef<[u8]> + AsMut<[u8]> + MaybeMallocSizeOf; /// The hashing system (algorithm) being used in the runtime (e.g. Blake2). type Hashing: Hash; diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 873d607ecdf19..9bd30d1823bca 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -30,6 +30,7 @@ sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } sp-externalities = { version = "0.8.0", optional = true, path = "../externalities" } sp-storage = { version = "2.0.0", default-features = false, path = "../storage" } libsecp256k1 = { version = "0.3.2", default-features = false } +parity-util-mem = { version = "0.5.1", default-features = false, features = ["primitive-types"] } # full crypto ed25519-dalek = { version = "1.0.0-pre.3", default-features = false, features = ["u64_backend", "alloc"], optional = true } diff --git a/primitives/core/src/changes_trie.rs b/primitives/core/src/changes_trie.rs index d38761ccf0fd1..cb21ffe13df96 100644 --- a/primitives/core/src/changes_trie.rs +++ b/primitives/core/src/changes_trie.rs @@ -22,7 +22,7 @@ use codec::{Encode, Decode}; use num_traits::Zero; /// Substrate changes trie configuration. -#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize))] +#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize, parity_util_mem::MallocSizeOf))] #[derive(Debug, Clone, PartialEq, Eq, Default, Encode, Decode)] pub struct ChangesTrieConfiguration { /// Interval (in blocks) at which level1-digests are created. Digests are not diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 6f50a1a4d9ad4..bf7b2b80a3c5a 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -18,6 +18,7 @@ paste = "0.1.6" rand = { version = "0.7.2", optional = true } impl-trait-for-tuples = "0.1.3" sp-inherents = { version = "2.0.0", default-features = false, path = "../inherents" } +parity-util-mem = { version = "0.5.1", default-features = false, features = ["primitive-types"] } [dev-dependencies] serde_json = "1.0.41" @@ -37,4 +38,5 @@ std = [ "sp-io/std", "serde", "sp-inherents/std", + "parity-util-mem/std", ] diff --git a/primitives/runtime/src/generic/block.rs b/primitives/runtime/src/generic/block.rs index 21e65d1fb5233..a46396dce08f4 100644 --- a/primitives/runtime/src/generic/block.rs +++ b/primitives/runtime/src/generic/block.rs @@ -25,7 +25,7 @@ use serde::{Deserialize, Serialize}; use sp_std::prelude::*; use sp_core::RuntimeDebug; use crate::codec::{Codec, Encode, Decode}; -use crate::traits::{self, Member, Block as BlockT, Header as HeaderT, MaybeSerialize}; +use crate::traits::{self, Member, Block as BlockT, Header as HeaderT, MaybeSerialize, MaybeMallocSizeOf}; use crate::Justification; /// Something to identify a block. @@ -63,7 +63,7 @@ impl fmt::Display for BlockId { /// Abstraction over a substrate block. #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize, parity_util_mem::MallocSizeOf))] #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] #[cfg_attr(feature = "std", serde(deny_unknown_fields))] pub struct Block { @@ -76,7 +76,7 @@ pub struct Block { impl traits::Block for Block where Header: HeaderT, - Extrinsic: Member + Codec + traits::Extrinsic, + Extrinsic: Member + Codec + traits::Extrinsic + MaybeMallocSizeOf, { type Extrinsic = Extrinsic; type Header = Header; diff --git a/primitives/runtime/src/generic/digest.rs b/primitives/runtime/src/generic/digest.rs index fef02d4f00959..4d09b58793290 100644 --- a/primitives/runtime/src/generic/digest.rs +++ b/primitives/runtime/src/generic/digest.rs @@ -27,7 +27,7 @@ use sp_core::{ChangesTrieConfiguration, RuntimeDebug}; /// Generic header digest. #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize, parity_util_mem::MallocSizeOf))] pub struct Digest { /// A list of logs in the digest. pub logs: Vec>, @@ -74,6 +74,7 @@ impl Digest { /// Digest item that is able to encode/decode 'system' digest items and /// provide opaque access to other items. #[derive(PartialEq, Eq, Clone, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(parity_util_mem::MallocSizeOf))] pub enum DigestItem { /// System digest item that contains the root of changes trie at given /// block. It is created for every block iff runtime supports changes @@ -107,7 +108,7 @@ pub enum DigestItem { /// Available changes trie signals. #[derive(PartialEq, Eq, Clone, Encode, Decode)] -#[cfg_attr(feature = "std", derive(Debug))] +#[cfg_attr(feature = "std", derive(Debug, parity_util_mem::MallocSizeOf))] pub enum ChangesTrieSignal { /// New changes trie configuration is enacted, starting from **next block**. /// diff --git a/primitives/runtime/src/generic/header.rs b/primitives/runtime/src/generic/header.rs index 51f31af0781d0..5bc7932feba44 100644 --- a/primitives/runtime/src/generic/header.rs +++ b/primitives/runtime/src/generic/header.rs @@ -22,6 +22,7 @@ use crate::codec::{Decode, Encode, Codec, Input, Output, HasCompact, EncodeAsRef use crate::traits::{ self, Member, SimpleArithmetic, SimpleBitOps, Hash as HashT, MaybeSerializeDeserialize, MaybeSerialize, MaybeDisplay, + MaybeMallocSizeOf, }; use crate::generic::Digest; use sp_core::U256; @@ -51,6 +52,22 @@ pub struct Header + TryFrom, Hash: HashT> { pub digest: Digest, } +#[cfg(feature = "std")] +impl parity_util_mem::MallocSizeOf for Header +where + Number: Copy + Into + TryFrom + parity_util_mem::MallocSizeOf, + Hash: HashT, + Hash::Output: parity_util_mem::MallocSizeOf, +{ + fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { + self.parent_hash.size_of(ops) + + self.number.size_of(ops) + + self.state_root.size_of(ops) + + self.extrinsics_root.size_of(ops) + + self.digest.size_of(ops) + } +} + #[cfg(feature = "std")] pub fn serialize_number + TryFrom>( val: &T, s: S, @@ -105,10 +122,11 @@ impl codec::EncodeLike for Header where impl traits::Header for Header where Number: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + MaybeDisplay + - SimpleArithmetic + Codec + Copy + Into + TryFrom + sp_std::str::FromStr, + SimpleArithmetic + Codec + Copy + Into + TryFrom + sp_std::str::FromStr + + MaybeMallocSizeOf, Hash: HashT, Hash::Output: Default + sp_std::hash::Hash + Copy + Member + Ord + - MaybeSerialize + Debug + MaybeDisplay + SimpleBitOps + Codec, + MaybeSerialize + Debug + MaybeDisplay + SimpleBitOps + Codec + MaybeMallocSizeOf, { type Number = Number; type Hash = ::Output; diff --git a/primitives/runtime/src/generic/unchecked_extrinsic.rs b/primitives/runtime/src/generic/unchecked_extrinsic.rs index 1625d42e9367e..a516bc1f7fa99 100644 --- a/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -44,6 +44,18 @@ where pub function: Call, } +#[cfg(feature = "std")] +impl parity_util_mem::MallocSizeOf + for UncheckedExtrinsic +where + Extra: SignedExtension +{ + fn size_of(&self, _ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { + // Instantiated only in runtime. + 0 + } +} + impl UncheckedExtrinsic { diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 46930c35e8e8d..8d8effcc66a73 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -639,6 +639,13 @@ macro_rules! assert_eq_error_rate { #[derive(PartialEq, Eq, Clone, Default, Encode, Decode)] pub struct OpaqueExtrinsic(pub Vec); +#[cfg(feature = "std")] +impl parity_util_mem::MallocSizeOf for OpaqueExtrinsic { + fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { + self.0.size_of(ops) + } +} + impl sp_std::fmt::Debug for OpaqueExtrinsic { #[cfg(feature = "std")] fn fmt(&self, fmt: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { diff --git a/primitives/runtime/src/testing.rs b/primitives/runtime/src/testing.rs index c86638b57b310..e3e94c3c9f089 100644 --- a/primitives/runtime/src/testing.rs +++ b/primitives/runtime/src/testing.rs @@ -148,7 +148,7 @@ pub type DigestItem = generic::DigestItem; pub type Digest = generic::Digest; /// Block Header -#[derive(PartialEq, Eq, Clone, Serialize, Debug, Encode, Decode, Default)] +#[derive(PartialEq, Eq, Clone, Serialize, Debug, Encode, Decode, Default, parity_util_mem::MallocSizeOf)] #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] pub struct Header { @@ -220,10 +220,12 @@ impl<'a> Deserialize<'a> for Header { } /// An opaque extrinsic wrapper type. -#[derive(PartialEq, Eq, Clone, Debug, Encode, Decode)] +#[derive(PartialEq, Eq, Clone, Debug, Encode, Decode, parity_util_mem::MallocSizeOf)] pub struct ExtrinsicWrapper(Xt); -impl traits::Extrinsic for ExtrinsicWrapper { +impl traits::Extrinsic for ExtrinsicWrapper +where Xt: parity_util_mem::MallocSizeOf +{ type Call = (); type SignaturePayload = (); @@ -253,7 +255,7 @@ impl Deref for ExtrinsicWrapper { } /// Testing block -#[derive(PartialEq, Eq, Clone, Serialize, Debug, Encode, Decode)] +#[derive(PartialEq, Eq, Clone, Serialize, Debug, Encode, Decode, parity_util_mem::MallocSizeOf)] pub struct Block { /// Block header pub header: Header, @@ -300,6 +302,9 @@ impl<'a, Xt> Deserialize<'a> for Block where Block: Decode { #[derive(PartialEq, Eq, Clone, Encode, Decode)] pub struct TestXt(pub Option<(u64, Extra)>, pub Call); +// Non-opaque extrinsics always 0. +parity_util_mem::malloc_size_of_is_0!(any: TestXt); + impl Serialize for TestXt where TestXt: Encode { fn serialize(&self, seq: S) -> Result where S: Serializer { self.using_encoded(|bytes| seq.serialize_bytes(bytes)) diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index f8c2ed11a4e81..f07f4dd7ac3bf 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -470,6 +470,9 @@ sp_core::impl_maybe_marker!( /// A type that implements Serialize, DeserializeOwned and Debug when in std environment. trait MaybeSerializeDeserialize: DeserializeOwned, Serialize; + + /// A type that implements MallocSizeOf. + trait MaybeMallocSizeOf: parity_util_mem::MallocSizeOf; ); /// A type that provides a randomness beacon. @@ -503,13 +506,18 @@ pub trait IsMember { /// `parent_hash`, as well as a `digest` and a block `number`. /// /// You can also create a `new` one from those fields. -pub trait Header: Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + 'static { +pub trait Header: + Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + + MaybeMallocSizeOf + 'static +{ /// Header number. type Number: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash - + Copy + MaybeDisplay + SimpleArithmetic + Codec + sp_std::str::FromStr; + + Copy + MaybeDisplay + SimpleArithmetic + Codec + sp_std::str::FromStr + + MaybeMallocSizeOf; /// Header hash type type Hash: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + Ord - + Copy + MaybeDisplay + Default + SimpleBitOps + Codec + AsRef<[u8]> + AsMut<[u8]>; + + Copy + MaybeDisplay + Default + SimpleBitOps + Codec + AsRef<[u8]> + + AsMut<[u8]> + MaybeMallocSizeOf; /// Hashing algorithm type Hashing: Hash; @@ -557,14 +565,15 @@ pub trait Header: Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + 's /// `Extrinsic` pieces of information as well as a `Header`. /// /// You can get an iterator over each of the `extrinsics` and retrieve the `header`. -pub trait Block: Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + 'static { +pub trait Block: Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + MaybeMallocSizeOf + 'static { /// Type for extrinsics. - type Extrinsic: Member + Codec + Extrinsic + MaybeSerialize; + type Extrinsic: Member + Codec + Extrinsic + MaybeSerialize + MaybeMallocSizeOf; /// Header type. - type Header: Header; + type Header: Header + MaybeMallocSizeOf; /// Block hash type. type Hash: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + Ord - + Copy + MaybeDisplay + Default + SimpleBitOps + Codec + AsRef<[u8]> + AsMut<[u8]>; + + Copy + MaybeDisplay + Default + SimpleBitOps + Codec + AsRef<[u8]> + AsMut<[u8]> + + MaybeMallocSizeOf; /// Returns a reference to the header. fn header(&self) -> &Self::Header; @@ -583,8 +592,9 @@ pub trait Block: Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + 'st fn encode_from(header: &Self::Header, extrinsics: &[Self::Extrinsic]) -> Vec; } + /// Something that acts like an `Extrinsic`. -pub trait Extrinsic: Sized { +pub trait Extrinsic: Sized + MaybeMallocSizeOf { /// The function call. type Call; diff --git a/primitives/test-primitives/Cargo.toml b/primitives/test-primitives/Cargo.toml index ba19eb00f372f..2edd5f05751d9 100644 --- a/primitives/test-primitives/Cargo.toml +++ b/primitives/test-primitives/Cargo.toml @@ -11,6 +11,7 @@ codec = { package = "parity-scale-codec", version = "1.0.0", default-features = sp-core = { version = "2.0.0", default-features = false, path = "../core" } serde = { version = "1.0.101", optional = true, features = ["derive"] } sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } +parity-util-mem = { version = "0.5.1", default-features = false, features = ["primitive-types"] } [features] default = [ diff --git a/primitives/test-primitives/src/lib.rs b/primitives/test-primitives/src/lib.rs index 74115bdb8f46b..302b24fcc1e3e 100644 --- a/primitives/test-primitives/src/lib.rs +++ b/primitives/test-primitives/src/lib.rs @@ -28,6 +28,7 @@ use sp_runtime::traits::{BlakeTwo256, Verify, Extrinsic as ExtrinsicT,}; /// Extrinsic for test-runtime. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(parity_util_mem::MallocSizeOf))] pub enum Extrinsic { IncludeData(Vec), StorageChange(Vec, Option>), diff --git a/primitives/transaction-pool/src/pool.rs b/primitives/transaction-pool/src/pool.rs index 2a717739963ea..0b23c27f82c51 100644 --- a/primitives/transaction-pool/src/pool.rs +++ b/primitives/transaction-pool/src/pool.rs @@ -29,7 +29,7 @@ use futures::{ use serde::{Deserialize, Serialize}; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, Member}, + traits::{Block as BlockT, Member, MaybeMallocSizeOf}, transaction_validity::{ TransactionLongevity, TransactionPriority, TransactionTag, }, @@ -154,7 +154,7 @@ pub trait InPoolTransaction { } /// Transaction pool interface. -pub trait TransactionPool: Send + Sync { +pub trait TransactionPool: Send + Sync + MaybeMallocSizeOf { /// Block type. type Block: BlockT; /// Transaction hash type. diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index e0d8aa77b76c5..684a681554a89 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -37,6 +37,7 @@ sc-client = { version = "0.8", optional = true, path = "../../client" } sp-trie = { version = "2.0.0", default-features = false, path = "../../primitives/trie" } sp-transaction-pool = { version = "2.0.0", default-features = false, path = "../../primitives/transaction-pool" } trie-db = { version = "0.19.2", default-features = false } +parity-util-mem = { version = "0.5.1", default-features = false, features = ["primitive-types"] } [dev-dependencies] sc-executor = { version = "0.8", path = "../../client/executor" } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 96387b1efc304..dac232b25c9f8 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -114,6 +114,8 @@ pub enum Extrinsic { ChangesTrieConfigUpdate(Option), } +parity_util_mem::malloc_size_of_is_0!(Extrinsic); // non-opaque extrinisic does not need this + #[cfg(feature = "std")] impl serde::Serialize for Extrinsic { fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { diff --git a/utils/browser/Cargo.toml b/utils/browser/Cargo.toml index 36f21f192b022..3afa49fc25847 100644 --- a/utils/browser/Cargo.toml +++ b/utils/browser/Cargo.toml @@ -16,7 +16,7 @@ console_log = "0.1.2" js-sys = "0.3.34" wasm-bindgen = "0.2.57" wasm-bindgen-futures = "0.4.7" -kvdb-web = "0.3" +kvdb-web = "0.4" service = { version = "0.8", package = "sc-service", path = "../../client/service", default-features = false } network = { package = "sc-network", path = "../../client/network" } chain-spec = { package = "sc-chain-spec", path = "../../client/chain-spec" } From 4944bd196ccc32f453dd87511e437b18d327e7ba Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Fri, 7 Feb 2020 11:58:31 +0100 Subject: [PATCH 23/25] pow: re-add support for algorithms where only linear verification is possible (#4843) * pow: re-add support for algorithms where only linear verification is possible * Remove unused generic parameters * Clone impl for PowBlockImport --- client/consensus/pow/src/lib.rs | 62 ++++++++++++++++++++------------- 1 file changed, 38 insertions(+), 24 deletions(-) diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 87f51be216f60..d656f71a15b19 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -63,8 +63,8 @@ pub enum Error { HeaderUnsealed(B::Hash), #[display(fmt = "PoW validation error: invalid seal")] InvalidSeal, - #[display(fmt = "PoW validation error: invalid difficulty")] - InvalidDifficulty, + #[display(fmt = "PoW validation error: preliminary verification failed")] + FailedPreliminaryVerify, #[display(fmt = "Rejecting block too far in future")] TooFarInFuture, #[display(fmt = "Fetching best header failed using select chain: {:?}", _0)] @@ -154,18 +154,23 @@ pub trait PowAlgorithm { /// This function will be called twice during the import process, so the implementation /// should be properly cached. fn difficulty(&self, parent: &BlockId) -> Result>; - /// Verify that the seal is valid against given pre hash. - fn verify_seal( + /// Verify that the seal is valid against given pre hash when parent block is not yet imported. + /// + /// None means that preliminary verify is not available for this algorithm. + fn preliminary_verify( &self, - pre_hash: &B::Hash, - seal: &Seal, - ) -> Result>; + _pre_hash: &B::Hash, + _seal: &Seal, + ) -> Result, Error> { + Ok(None) + } /// Verify that the difficulty is valid against given seal. - fn verify_difficulty( + fn verify( &self, - difficulty: Self::Difficulty, parent: &BlockId, + pre_hash: &B::Hash, seal: &Seal, + difficulty: Self::Difficulty, ) -> Result>; /// Mine a seal that satisfies the given difficulty. fn mine( @@ -187,6 +192,19 @@ pub struct PowBlockImport { check_inherents_after: <::Header as HeaderT>::Number, } +impl Clone for PowBlockImport { + fn clone(&self) -> Self { + Self { + algorithm: self.algorithm.clone(), + inner: self.inner.clone(), + select_chain: self.select_chain.clone(), + client: self.client.clone(), + inherent_data_providers: self.inherent_data_providers.clone(), + check_inherents_after: self.check_inherents_after.clone(), + } + } +} + impl PowBlockImport where B: BlockT, I: BlockImport> + Send + Sync, @@ -322,12 +340,14 @@ impl BlockImport for PowBlockImport self.algorithm.difficulty(&BlockId::hash(parent_hash))?, }; - if !self.algorithm.verify_difficulty( - difficulty, + let pre_hash = block.header.hash(); + if !self.algorithm.verify( &BlockId::hash(parent_hash), + &pre_hash, &inner_seal, + difficulty, )? { - return Err(Error::::InvalidDifficulty.into()) + return Err(Error::::InvalidSeal.into()) } aux.difficulty = difficulty; @@ -379,11 +399,8 @@ impl PowVerifier { let pre_hash = header.hash(); - if !self.algorithm.verify_seal( - &pre_hash, - &inner_seal, - )? { - return Err(Error::InvalidSeal); + if !self.algorithm.preliminary_verify(&pre_hash, &inner_seal)?.unwrap_or(true) { + return Err(Error::FailedPreliminaryVerify); } Ok((header, seal)) @@ -450,20 +467,17 @@ pub fn register_pow_inherent_data_provider( pub type PowImportQueue = BasicQueue; /// Import queue for PoW engine. -pub fn import_queue( - block_import: BoxBlockImport>, +pub fn import_queue( + block_import: BoxBlockImport, algorithm: Algorithm, inherent_data_providers: InherentDataProviders, ) -> Result< - PowImportQueue>, + PowImportQueue, sp_consensus::Error > where B: BlockT, - C: ProvideRuntimeApi + HeaderBackend + BlockOf + ProvideCache + AuxStore, - C: Send + Sync + AuxStore + 'static, - C::Api: BlockBuilderApi, + Transaction: Send + Sync + 'static, Algorithm: PowAlgorithm + Clone + Send + Sync + 'static, - S: SelectChain + 'static, { register_pow_inherent_data_provider(&inherent_data_providers)?; From 20d81157b831fef52949d6e34e10dd2aea6060df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 7 Feb 2020 13:21:46 +0100 Subject: [PATCH 24/25] Move tracing cli args to `ImportParams` (#4850) --- bin/node-template/node/src/command.rs | 3 +-- bin/node/cli/src/command.rs | 4 ++-- client/cli/src/lib.rs | 7 +++---- client/cli/src/params.rs | 28 +++++++++++++-------------- 4 files changed, 20 insertions(+), 22 deletions(-) diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index 585b8e1ca8eb9..e7e386703deee 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -21,8 +21,7 @@ use crate::chain_spec; use crate::cli::Cli; /// Parse and run command line arguments -pub fn run(version: VersionInfo) -> error::Result<()> -{ +pub fn run(version: VersionInfo) -> error::Result<()> { let opt = sc_cli::from_args::(&version); let config = sc_service::Configuration::new(&version); diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index f5d747a14663a..0e9b23b73e0e1 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -57,9 +57,9 @@ where } // Setup tracing. - if let Some(tracing_targets) = cli_args.shared_params.tracing_targets.as_ref() { + if let Some(tracing_targets) = cli_args.import_params.tracing_targets.as_ref() { let subscriber = sc_tracing::ProfilingSubscriber::new( - cli_args.shared_params.tracing_receiver.into(), tracing_targets + cli_args.import_params.tracing_receiver.into(), tracing_targets ); if let Err(e) = tracing::subscriber::set_global_default(subscriber) { panic!("Unable to set global default subscriber {}", e); diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index 0965a79f1537e..735f8cb27af49 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -270,8 +270,7 @@ where /// 1. set the panic handler /// 2. raise the FD limit /// 3. initialize the logger -pub fn init(shared_params: &SharedParams, version: &VersionInfo) -> error::Result<()> -{ +pub fn init(shared_params: &SharedParams, version: &VersionInfo) -> error::Result<()> { let full_version = sc_service::config::full_version_from_strs( version.version, version.commit @@ -638,8 +637,8 @@ where config.telemetry_endpoints = Some(TelemetryEndpoints::new(cli.telemetry_endpoints)); } - config.tracing_targets = cli.shared_params.tracing_targets.into(); - config.tracing_receiver = cli.shared_params.tracing_receiver.into(); + config.tracing_targets = cli.import_params.tracing_targets.into(); + config.tracing_receiver = cli.import_params.tracing_receiver.into(); // Imply forced authoring on --dev config.force_authoring = cli.shared_params.dev || cli.force_authoring; diff --git a/client/cli/src/params.rs b/client/cli/src/params.rs index 1dc6b0567c698..ddded79142344 100644 --- a/client/cli/src/params.rs +++ b/client/cli/src/params.rs @@ -113,20 +113,6 @@ pub struct SharedParams { /// Sets a custom logging filter. #[structopt(short = "l", long = "log", value_name = "LOG_PATTERN")] pub log: Option, - - /// Comma separated list of targets for tracing - #[structopt(long = "tracing-targets", value_name = "TARGETS")] - pub tracing_targets: Option, - - /// Receiver to process tracing messages - #[structopt( - long = "tracing-receiver", - value_name = "RECEIVER", - possible_values = &TracingReceiver::variants(), - case_insensitive = true, - default_value = "Log" - )] - pub tracing_receiver: TracingReceiver, } /// Parameters for block import. @@ -169,6 +155,20 @@ pub struct ImportParams { /// Specify the state cache size. #[structopt(long = "state-cache-size", value_name = "Bytes", default_value = "67108864")] pub state_cache_size: usize, + + /// Comma separated list of targets for tracing + #[structopt(long = "tracing-targets", value_name = "TARGETS")] + pub tracing_targets: Option, + + /// Receiver to process tracing messages + #[structopt( + long = "tracing-receiver", + value_name = "RECEIVER", + possible_values = &TracingReceiver::variants(), + case_insensitive = true, + default_value = "Log" + )] + pub tracing_receiver: TracingReceiver, } /// Parameters used to create the network configuration. From 4c34b6462340b04b0cf0b2c30f512ea5617fd8a6 Mon Sep 17 00:00:00 2001 From: Cecile Tonglet Date: Fri, 7 Feb 2020 13:22:06 +0100 Subject: [PATCH 25/25] Update parity-multiaddr dependency (#4852) --- Cargo.lock | 85 +++++++++------------------------------ client/service/Cargo.toml | 2 +- 2 files changed, 21 insertions(+), 66 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ca7a5d1844f27..9b57b0153f16b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -443,12 +443,6 @@ dependencies = [ "wasm-bindgen-futures", ] -[[package]] -name = "bs58" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c95ee6bba9d950218b6cc910cf62bc9e0a171d0f4537e3627b0f54d08549b188" - [[package]] name = "bs58" version = "0.3.0" @@ -2646,8 +2640,8 @@ dependencies = [ "libp2p-wasm-ext", "libp2p-websocket", "libp2p-yamux", - "parity-multiaddr 0.7.1", - "parity-multihash 0.2.1", + "parity-multiaddr", + "parity-multihash", "parking_lot 0.10.0", "pin-project", "smallvec 1.2.0", @@ -2661,7 +2655,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbafb2706b8082233f66dc13e196f9cf9b4c229f2cd7c96b2b16617ad6ee330b" dependencies = [ "asn1_der", - "bs58 0.3.0", + "bs58", "ed25519-dalek", "fnv", "futures 0.3.1", @@ -2670,8 +2664,8 @@ dependencies = [ "libsecp256k1", "log 0.4.8", "multistream-select", - "parity-multiaddr 0.7.1", - "parity-multihash 0.2.1", + "parity-multiaddr", + "parity-multihash", "parking_lot 0.10.0", "pin-project", "prost", @@ -2682,7 +2676,7 @@ dependencies = [ "sha2", "smallvec 1.2.0", "thiserror", - "unsigned-varint 0.3.0", + "unsigned-varint", "untrusted", "void", "zeroize 1.1.0", @@ -2726,7 +2720,7 @@ version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4bdf6fba9272ad47dde94bade89540fdb16e24ae9ff7fb714c1c80a035165f28" dependencies = [ - "bs58 0.3.0", + "bs58", "cuckoofilter", "fnv", "futures 0.3.1", @@ -2745,7 +2739,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e6ecd058bf769d27ebec530544b081e08b0a1088e3186da8cc58d59915784d0" dependencies = [ "base64 0.11.0", - "bs58 0.3.0", + "bs58", "byteorder 1.3.2", "bytes 0.5.4", "fnv", @@ -2760,7 +2754,7 @@ dependencies = [ "rand 0.7.3", "sha2", "smallvec 1.2.0", - "unsigned-varint 0.3.0", + "unsigned-varint", "wasm-timer", ] @@ -2795,14 +2789,14 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log 0.4.8", - "parity-multihash 0.2.1", + "parity-multihash", "prost", "prost-build", "rand 0.7.3", "sha2", "smallvec 1.2.0", "uint", - "unsigned-varint 0.3.0", + "unsigned-varint", "void", "wasm-timer", ] @@ -2842,7 +2836,7 @@ dependencies = [ "libp2p-core", "log 0.4.8", "parking_lot 0.10.0", - "unsigned-varint 0.3.0", + "unsigned-varint", ] [[package]] @@ -2894,7 +2888,7 @@ dependencies = [ "prost", "prost-build", "rw-stream-sink", - "unsigned-varint 0.3.0", + "unsigned-varint", "void", ] @@ -3302,7 +3296,7 @@ dependencies = [ "log 0.4.8", "smallvec 1.2.0", "tokio-io", - "unsigned-varint 0.3.0", + "unsigned-varint", ] [[package]] @@ -4510,24 +4504,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c276d76c5333b8c2579e02d49a06733a55b8282d2d9b13e8d53b6406bd7e30a" -[[package]] -name = "parity-multiaddr" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "045b3c7af871285146300da35b1932bb6e4639b66c7c98e85d06a32cbc4e8fa7" -dependencies = [ - "arrayref", - "bs58 0.2.5", - "byteorder 1.3.2", - "bytes 0.4.12", - "data-encoding", - "parity-multihash 0.1.3", - "percent-encoding 1.0.1", - "serde", - "unsigned-varint 0.2.3", - "url 1.7.2", -] - [[package]] name = "parity-multiaddr" version = "0.7.1" @@ -4535,32 +4511,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80878c27f90dd162d3143333d672e80b194d6b080f05c83440e3dfda42e409f2" dependencies = [ "arrayref", - "bs58 0.3.0", + "bs58", "byteorder 1.3.2", "data-encoding", - "parity-multihash 0.2.1", + "parity-multihash", "percent-encoding 2.1.0", "serde", "static_assertions", - "unsigned-varint 0.3.0", + "unsigned-varint", "url 2.1.1", ] -[[package]] -name = "parity-multihash" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df3a17dc27848fd99e4f87eb0f8c9baba6ede0a6d555400c850ca45254ef4ce3" -dependencies = [ - "blake2", - "bytes 0.4.12", - "rand 0.6.5", - "sha-1", - "sha2", - "sha3", - "unsigned-varint 0.2.3", -] - [[package]] name = "parity-multihash" version = "0.2.1" @@ -4573,7 +4534,7 @@ dependencies = [ "sha-1", "sha2", "sha3", - "unsigned-varint 0.3.0", + "unsigned-varint", ] [[package]] @@ -6120,7 +6081,7 @@ dependencies = [ "substrate-test-client", "substrate-test-runtime-client", "tempfile", - "unsigned-varint 0.3.0", + "unsigned-varint", "void", "zeroize 1.1.0", ] @@ -6308,7 +6269,7 @@ dependencies = [ "grafana-data-source", "lazy_static", "log 0.4.8", - "parity-multiaddr 0.5.0", + "parity-multiaddr", "parity-scale-codec", "parity-util-mem 0.5.1", "parking_lot 0.10.0", @@ -8388,12 +8349,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" -[[package]] -name = "unsigned-varint" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7f0023a96687fe169081e8adce3f65e3874426b7886e9234d490af2dc077959" - [[package]] name = "unsigned-varint" version = "0.3.0" diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 31408a9a96977..bc246db03e85c 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -52,7 +52,7 @@ sc-rpc-server = { version = "2.0.0", path = "../rpc-servers" } sc-rpc = { version = "2.0.0", path = "../rpc" } sc-telemetry = { version = "2.0.0", path = "../telemetry" } sc-offchain = { version = "2.0.0", path = "../offchain" } -parity-multiaddr = { package = "parity-multiaddr", version = "0.5.0" } +parity-multiaddr = { package = "parity-multiaddr", version = "0.7.1" } grafana-data-source = { version = "0.8", path = "../../utils/grafana-data-source" } sc-tracing = { version = "2.0.0", path = "../tracing" } tracing = "0.1.10"