From 306c1764eba3c57d567949a79dfe0979a6eabe80 Mon Sep 17 00:00:00 2001 From: Bryant Eisenbach Date: Mon, 1 Jul 2019 14:31:57 +0200 Subject: [PATCH 01/12] depends: Update wordlist to v1.3 (#10823) --- Cargo.lock | 13 ++++++------- accounts/ethkey/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 106b7adddfd..1a5bcda709d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1361,7 +1361,7 @@ dependencies = [ "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "parity-crypto 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-wordlist 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-wordlist 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1379,7 +1379,7 @@ dependencies = [ "env_logger 0.5.13 (registry+https://github.com/rust-lang/crates.io-index)", "ethkey 0.3.0", "panic_hook 0.1.0", - "parity-wordlist 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-wordlist 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.89 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1398,7 +1398,7 @@ dependencies = [ "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", "parity-crypto 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-wordlist 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-wordlist 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3049,12 +3049,11 @@ dependencies = [ [[package]] name = "parity-wordlist" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4952,7 +4951,7 @@ dependencies = [ "checksum parity-tokio-ipc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "eb002c2d3539ccd3b82bd915ec060028d4ab350ad203dbffa20028c1e483af5b" "checksum parity-util-mem 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "89e80f22052161e0cb55cb5a8a75890420c525031f95c9d262dbb0434aa85dc1" "checksum parity-wasm 0.31.3 (registry+https://github.com/rust-lang/crates.io-index)" = "511379a8194230c2395d2f5fa627a5a7e108a9f976656ce723ae68fca4097bfc" -"checksum parity-wordlist 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf13102febd98f4ad416a526b42deb82daf482626ba6ab10d0ebf8f45327514c" +"checksum parity-wordlist 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "573d08f0d3bc8a6ffcdac1de2725b5daeed8db26345a9c12d91648e2d6457f3e" "checksum parity-ws 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2fec5048fba72a2e01baeb0d08089db79aead4b57e2443df172fb1840075a233" "checksum parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f0802bff09003b291ba756dc7e79313e51cc31667e94afbe847def490424cde5" "checksum parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ab41b4aed082705d1056416ae4468b6ea99d52599ecf3169b00088d43113e337" diff --git a/accounts/ethkey/Cargo.toml b/accounts/ethkey/Cargo.toml index 9c01fa74f58..3806b0c3baa 100644 --- a/accounts/ethkey/Cargo.toml +++ b/accounts/ethkey/Cargo.toml @@ -11,7 +11,7 @@ eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" } ethereum-types = "0.6.0" lazy_static = "1.0" log = "0.4" -parity-wordlist = "1.2" +parity-wordlist = "1.3" quick-error = "1.2.2" rand = "0.6" rustc-hex = "1.0" From 5dc5be1e58a2585d3d32f6dd4453789c605e3a00 Mon Sep 17 00:00:00 2001 From: David Date: Mon, 1 Jul 2019 14:41:45 +0200 Subject: [PATCH 02/12] Better logging when backfilling ancient blocks fail (#10796) * Better logging when backfilling ancient blocks fail Print total blocks imported, closes #10792 * `finalize()` doesn't need Engine Pull out call to migrated_blocks() from replace_client_db() * More logs * Clarify that the percentage may be misleading * Remove replace_client_db() and replace with a straight call to restore_db() * Include the parent_hash in UnlinkedAncientBlockChain errors * Add a new RestorationStatus varian: Finalizing (as it can take a loooong while) Call abort_restore() when restoration fails * Add missing cases for new variant * typos * Typo and derive Debug * Do not attempt to salvage existing blocks unless they form a complete chain back to genesis * Fix test * Revert "Fix test" This reverts commit f027d4b4cb7b6c23fceec528c1711886ba9cfe4e. * Fix test again * Update comment * Be careful about locks * fix test failure * Do not defer returning an error when the chain is broken * Review feedback * no hex formatting for Option --- ethcore/blockchain/src/best_block.rs | 5 +- ethcore/blockchain/src/blockchain.rs | 5 +- ethcore/db/src/keys.rs | 6 +- ethcore/src/snapshot/consensus/authority.rs | 3 +- ethcore/src/snapshot/consensus/mod.rs | 2 +- ethcore/src/snapshot/consensus/work.rs | 10 +- ethcore/src/snapshot/error.rs | 6 +- ethcore/src/snapshot/service.rs | 101 +++++++++++++------- ethcore/src/snapshot/tests/helpers.rs | 2 +- ethcore/src/snapshot/tests/proof_of_work.rs | 2 +- ethcore/sync/src/chain/handler.rs | 8 +- ethcore/sync/src/chain/mod.rs | 5 +- ethcore/types/src/restoration_status.rs | 5 + parity/informant.rs | 11 ++- parity/snapshot.rs | 1 + 15 files changed, 108 insertions(+), 64 deletions(-) diff --git a/ethcore/blockchain/src/best_block.rs b/ethcore/blockchain/src/best_block.rs index 20f247391dc..cddb798358d 100644 --- a/ethcore/blockchain/src/best_block.rs +++ b/ethcore/blockchain/src/best_block.rs @@ -24,7 +24,8 @@ use common_types::header::Header; /// For GHOST fork-choice rule it would typically describe the block with highest /// combined difficulty (usually the block with the highest block number). /// -/// Sometimes refered as 'latest block'. +/// Sometimes referred as 'latest block'. +#[derive(Debug)] pub struct BestBlock { /// Best block decoded header. pub header: Header, @@ -35,7 +36,7 @@ pub struct BestBlock { } /// Best ancient block info. If the blockchain has a gap this keeps track of where it starts. -#[derive(Default)] +#[derive(Debug, Default)] pub struct BestAncientBlock { /// Best block hash. pub hash: H256, diff --git a/ethcore/blockchain/src/blockchain.rs b/ethcore/blockchain/src/blockchain.rs index dbe18f25310..f1350f957f0 100644 --- a/ethcore/blockchain/src/blockchain.rs +++ b/ethcore/blockchain/src/blockchain.rs @@ -652,10 +652,7 @@ impl BlockChain { // and write them if let (Some(hash), Some(number)) = (best_ancient, best_ancient_number) { let mut best_ancient_block = bc.best_ancient_block.write(); - *best_ancient_block = Some(BestAncientBlock { - hash: hash, - number: number, - }); + *best_ancient_block = Some(BestAncientBlock { hash, number }); } } diff --git a/ethcore/db/src/keys.rs b/ethcore/db/src/keys.rs index ceab94211ec..d7db42bf6c1 100644 --- a/ethcore/db/src/keys.rs +++ b/ethcore/db/src/keys.rs @@ -205,7 +205,7 @@ pub struct TransactionAddress { } /// Contains all block receipts. -#[derive(Clone, RlpEncodableWrapper, RlpDecodableWrapper, MallocSizeOf)] +#[derive(Debug, Clone, RlpEncodableWrapper, RlpDecodableWrapper, MallocSizeOf)] pub struct BlockReceipts { /// Block receipts pub receipts: Vec, @@ -214,9 +214,7 @@ pub struct BlockReceipts { impl BlockReceipts { /// Create new block receipts wrapper. pub fn new(receipts: Vec) -> Self { - BlockReceipts { - receipts: receipts - } + BlockReceipts { receipts } } } diff --git a/ethcore/src/snapshot/consensus/authority.rs b/ethcore/src/snapshot/consensus/authority.rs index 4ff812bdb54..4f65d42909e 100644 --- a/ethcore/src/snapshot/consensus/authority.rs +++ b/ethcore/src/snapshot/consensus/authority.rs @@ -348,7 +348,7 @@ impl Rebuilder for ChunkRebuilder { Ok(()) } - fn finalize(&mut self, _engine: &dyn Engine) -> Result<(), ::error::Error> { + fn finalize(&mut self) -> Result<(), ::error::Error> { if !self.had_genesis { return Err(Error::WrongChunkFormat("No genesis transition included.".into()).into()); } @@ -358,6 +358,7 @@ impl Rebuilder for ChunkRebuilder { None => return Err(Error::WrongChunkFormat("Warp target block not included.".into()).into()), }; + trace!(target: "snapshot", "rebuilder, finalize: verifying {} unverified first blocks", self.unverified_firsts.len()); // verify the first entries of chunks we couldn't before. // we store all last verifiers, but not all firsts. // match each unverified first epoch with a last epoch verifier. diff --git a/ethcore/src/snapshot/consensus/mod.rs b/ethcore/src/snapshot/consensus/mod.rs index 4262248b1db..d6f317538ba 100644 --- a/ethcore/src/snapshot/consensus/mod.rs +++ b/ethcore/src/snapshot/consensus/mod.rs @@ -92,5 +92,5 @@ pub trait Rebuilder: Send { /// /// This should apply the necessary "glue" between chunks, /// and verify against the restored state. - fn finalize(&mut self, engine: &dyn Engine) -> Result<(), ::error::Error>; + fn finalize(&mut self) -> Result<(), ::error::Error>; } diff --git a/ethcore/src/snapshot/consensus/work.rs b/ethcore/src/snapshot/consensus/work.rs index 4bb0c758a7f..eda200d6671 100644 --- a/ethcore/src/snapshot/consensus/work.rs +++ b/ethcore/src/snapshot/consensus/work.rs @@ -208,15 +208,15 @@ impl PowRebuilder { /// Create a new PowRebuilder. fn new(chain: BlockChain, db: Arc, manifest: &ManifestData, snapshot_blocks: u64) -> Result { Ok(PowRebuilder { - chain: chain, - db: db, + chain, + db, rng: OsRng::new().map_err(|e| format!("{}", e))?, disconnected: Vec::new(), best_number: manifest.block_number, best_hash: manifest.block_hash, best_root: manifest.state_root, fed_blocks: 0, - snapshot_blocks: snapshot_blocks, + snapshot_blocks, }) } } @@ -298,9 +298,9 @@ impl Rebuilder for PowRebuilder { } /// Glue together any disconnected chunks and check that the chain is complete. - fn finalize(&mut self, _: &dyn Engine) -> Result<(), ::error::Error> { + fn finalize(&mut self) -> Result<(), ::error::Error> { let mut batch = self.db.transaction(); - + trace!(target: "snapshot", "rebuilder, finalize: inserting {} disconnected chunks", self.disconnected.len()); for (first_num, first_hash) in self.disconnected.drain(..) { let parent_num = first_num - 1; diff --git a/ethcore/src/snapshot/error.rs b/ethcore/src/snapshot/error.rs index 8381bd4cb9a..68742e2e178 100644 --- a/ethcore/src/snapshot/error.rs +++ b/ethcore/src/snapshot/error.rs @@ -68,8 +68,8 @@ pub enum Error { BadEpochProof(u64), /// Wrong chunk format. WrongChunkFormat(String), - /// Unlinked ancient block chain - UnlinkedAncientBlockChain, + /// Unlinked ancient block chain; includes the parent hash where linkage failed + UnlinkedAncientBlockChain(H256), } impl error::Error for Error { @@ -108,7 +108,7 @@ impl fmt::Display for Error { Error::SnapshotAborted => write!(f, "Snapshot was aborted."), Error::BadEpochProof(i) => write!(f, "Bad epoch proof for transition to epoch {}", i), Error::WrongChunkFormat(ref msg) => write!(f, "Wrong chunk format: {}", msg), - Error::UnlinkedAncientBlockChain => write!(f, "Unlinked ancient blocks chain"), + Error::UnlinkedAncientBlockChain(parent_hash) => write!(f, "Unlinked ancient blocks chain at parent_hash={:#x}", parent_hash), } } } diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index 3e914035e90..5e1efe13824 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -43,6 +43,7 @@ use bytes::Bytes; use journaldb::Algorithm; use kvdb::DBTransaction; use snappy; +use snapshot::error::Error::UnlinkedAncientBlockChain; /// Helper for removing directories in case of error. struct Guard(bool, PathBuf); @@ -110,17 +111,17 @@ impl Restoration { let secondary = components.rebuilder(chain, raw_db.clone(), &manifest)?; - let root = manifest.state_root.clone(); + let final_state_root = manifest.state_root.clone(); Ok(Restoration { - manifest: manifest, + manifest, state_chunks_left: state_chunks, block_chunks_left: block_chunks, state: StateRebuilder::new(raw_db.key_value().clone(), params.pruning), - secondary: secondary, + secondary, writer: params.writer, snappy_buffer: Vec::new(), - final_state_root: root, + final_state_root, guard: params.guard, db: raw_db, }) @@ -170,7 +171,7 @@ impl Restoration { } // finish up restoration. - fn finalize(mut self, engine: &dyn Engine) -> Result<(), Error> { + fn finalize(mut self) -> Result<(), Error> { use trie::TrieError; if !self.is_done() { return Ok(()) } @@ -186,13 +187,14 @@ impl Restoration { self.state.finalize(self.manifest.block_number, self.manifest.block_hash)?; // connect out-of-order chunks and verify chain integrity. - self.secondary.finalize(engine)?; + self.secondary.finalize()?; if let Some(writer) = self.writer { writer.finish(self.manifest)?; } self.guard.disarm(); + trace!(target: "snapshot", "restoration finalised correctly"); Ok(()) } @@ -337,16 +339,6 @@ impl Service { dir } - // replace one the client's database with our own. - fn replace_client_db(&self) -> Result<(), Error> { - let migrated_blocks = self.migrate_blocks()?; - info!(target: "snapshot", "Migrated {} ancient blocks", migrated_blocks); - - let rest_db = self.restoration_db(); - self.client.restore_db(&*rest_db.to_string_lossy())?; - Ok(()) - } - // Migrate the blocks in the current DB into the new chain fn migrate_blocks(&self) -> Result { // Count the number of migrated blocks @@ -361,11 +353,27 @@ impl Service { // The old database looks like this: // [genesis, best_ancient_block] ... [first_block, best_block] - // If we are fully synced neither `best_ancient_block` nor `first_block` is set, and we can assume that the whole range from [genesis, best_block] is imported. - // The new database only contains the tip of the chain ([first_block, best_block]), + // If we are fully synced neither `best_ancient_block` nor `first_block` is set, and we can + // assume that the whole range from [genesis, best_block] is imported. + // The new database only contains the tip of the chain ([new_first_block, new_best_block]), // so the useful set of blocks is defined as: // [0 ... min(new.first_block, best_ancient_block or best_block)] + // + // If, for whatever reason, the old db does not have ancient blocks (i.e. + // `best_ancient_block` is `None` AND a non-zero `first_block`), such that the old db looks + // like [old_first_block..old_best_block] (which may or may not partially overlap with + // [new_first_block..new_best_block]) we do the conservative thing and do not migrate the + // old blocks. let find_range = || -> Option<(H256, H256)> { + // In theory, if the current best_block is > new first_block (i.e. ranges overlap) + // we could salvage them but what if there's been a re-org at the boundary and the two + // chains do not match anymore? We'd have to check the existing blocks carefully. + if cur_chain_info.ancient_block_number.is_none() && cur_chain_info.first_block_number.unwrap_or(0) > 0 { + info!(target: "blockchain", "blocks in the current DB do not stretch back to genesis; can't salvage them into the new DB. In current DB, first block: #{:?}/{:?}, best block: #{:?}/{:?}", + cur_chain_info.first_block_number, cur_chain_info.first_block_hash, + cur_chain_info.best_block_number, cur_chain_info.best_block_hash); + return None; + } let next_available_from = next_chain_info.first_block_number?; let cur_available_to = cur_chain_info.ancient_block_number.unwrap_or(cur_chain_info.best_block_number); @@ -375,10 +383,11 @@ impl Service { return None; } - trace!(target: "snapshot", "Trying to import ancient blocks until {}", highest_block_num); + trace!(target: "snapshot", "Trying to import ancient blocks until {}. First block in new chain=#{}, first block in old chain=#{:?}, best block in old chain=#{}", + highest_block_num, next_available_from, cur_chain_info.first_block_number, cur_chain_info.best_block_number); // Here we start from the highest block number and go backward to 0, - // thus starting at `highest_block_num` and targetting `0`. + // thus starting at `highest_block_num` and targeting `0`. let target_hash = self.client.block_hash(BlockId::Number(0))?; let start_hash = self.client.block_hash(BlockId::Number(highest_block_num))?; @@ -398,7 +407,10 @@ impl Service { return Ok(count); } - let block = self.client.block(BlockId::Hash(parent_hash)).ok_or(::snapshot::error::Error::UnlinkedAncientBlockChain)?; + let block = self.client.block(BlockId::Hash(parent_hash)).ok_or_else(|| { + error!(target: "snapshot", "migrate_blocks: did not find block from parent_hash={:#x} (start_hash={:#x})", parent_hash, start_hash); + UnlinkedAncientBlockChain(parent_hash) + })?; parent_hash = block.parent_hash(); let block_number = block.number(); @@ -412,7 +424,14 @@ impl Service { next_chain.insert_unordered_block(&mut batch, block, block_receipts, Some(parent_total_difficulty), false, true); count += 1; }, - _ => break, + _ => { + // We couldn't reach the targeted hash + error!(target: "snapshot", "migrate_blocks: failed to find receipts and parent total difficulty; cannot reach the target_hash ({:#x}). Block #{}, parent_hash={:#x}, parent_total_difficulty={:?}, start_hash={:#x}, ancient_block_number={:?}, best_block_number={:?}", + target_hash, block_number, parent_hash, parent_total_difficulty, + start_hash, cur_chain_info.ancient_block_number, cur_chain_info.best_block_number, + ); + return Err(UnlinkedAncientBlockChain(parent_hash).into()); + }, } // Writing changes to DB and logging every now and then @@ -433,11 +452,6 @@ impl Service { next_chain.commit(); next_db.key_value().flush().expect("DB flush failed."); - // We couldn't reach the targeted hash - if parent_hash != target_hash { - return Err(::snapshot::error::Error::UnlinkedAncientBlockChain.into()); - } - // Update best ancient block in the Next Chain next_chain.update_best_ancient_block(&start_hash); Ok(count) @@ -549,6 +563,8 @@ impl Service { *self.status.lock() = RestorationStatus::Initializing { chunks_done: 0, + state_chunks: manifest.state_hashes.len() as u32, + block_chunks: manifest.block_hashes.len() as u32, }; fs::create_dir_all(&rest_dir)?; @@ -563,7 +579,7 @@ impl Service { manifest: manifest.clone(), pruning: self.pruning, db: self.restoration_db_handler.open(&rest_db)?, - writer: writer, + writer, genesis: &self.genesis_block, guard: Guard::new(rest_db), engine: &*self.engine, @@ -654,15 +670,20 @@ impl Service { // lead to deadlock. fn finalize_restoration(&self, rest: &mut Option) -> Result<(), Error> { trace!(target: "snapshot", "finalizing restoration"); + *self.status.lock() = RestorationStatus::Finalizing; let recover = rest.as_ref().map_or(false, |rest| rest.writer.is_some()); // destroy the restoration before replacing databases and snapshot. rest.take() - .map(|r| r.finalize(&*self.engine)) + .map(|r| r.finalize()) .unwrap_or(Ok(()))?; - self.replace_client_db()?; + let migrated_blocks = self.migrate_blocks()?; + info!(target: "snapshot", "Migrated {} ancient blocks", migrated_blocks); + + // replace the Client's database with the new one (restart the Client). + self.client.restore_db(&*self.restoration_db().to_string_lossy())?; if recover { let mut reader = self.reader.write(); @@ -690,14 +711,20 @@ impl Service { /// Feed a chunk of either kind (block or state). no-op if no restoration or status is wrong. fn feed_chunk(&self, hash: H256, chunk: &[u8], is_state: bool) { // TODO: be able to process block chunks and state chunks at same time? - let mut restoration = self.restoration.lock(); - match self.feed_chunk_with_restoration(&mut restoration, hash, chunk, is_state) { + let r = { + let mut restoration = self.restoration.lock(); + self.feed_chunk_with_restoration(&mut restoration, hash, chunk, is_state) + }; + match r { Ok(()) | Err(Error::Snapshot(SnapshotError::RestorationAborted)) => (), Err(e) => { + // TODO: after this we're sometimes deadlocked warn!("Encountered error during snapshot restoration: {}", e); - *self.restoration.lock() = None; - *self.status.lock() = RestorationStatus::Failed; + self.abort_restore(); + if let Some(mut status) = self.status.try_lock_for(std::time::Duration::from_millis(10)) { + *status = RestorationStatus::Failed; + } let _ = fs::remove_dir_all(self.restoration_dir()); } } @@ -707,8 +734,8 @@ impl Service { fn feed_chunk_with_restoration(&self, restoration: &mut Option, hash: H256, chunk: &[u8], is_state: bool) -> Result<(), Error> { let (result, db) = { match self.status() { - RestorationStatus::Inactive | RestorationStatus::Failed => { - trace!(target: "snapshot", "Tried to restore chunk {:x} while inactive or failed", hash); + RestorationStatus::Inactive | RestorationStatus::Failed | RestorationStatus::Finalizing => { + trace!(target: "snapshot", "Tried to restore chunk {:x} while inactive, failed or finalizing", hash); return Ok(()); }, RestorationStatus::Ongoing { .. } | RestorationStatus::Initializing { .. } => { @@ -803,7 +830,7 @@ impl SnapshotService for Service { let mut cur_status = self.status.lock(); match *cur_status { - RestorationStatus::Initializing { ref mut chunks_done } => { + RestorationStatus::Initializing { ref mut chunks_done, .. } => { *chunks_done = self.state_chunks.load(Ordering::SeqCst) as u32 + self.block_chunks.load(Ordering::SeqCst) as u32; } diff --git a/ethcore/src/snapshot/tests/helpers.rs b/ethcore/src/snapshot/tests/helpers.rs index 1873d05b7f9..a6e516b1b1a 100644 --- a/ethcore/src/snapshot/tests/helpers.rs +++ b/ethcore/src/snapshot/tests/helpers.rs @@ -187,5 +187,5 @@ pub fn restore( trace!(target: "snapshot", "finalizing"); state.finalize(manifest.block_number, manifest.block_hash)?; - secondary.finalize(engine) + secondary.finalize() } diff --git a/ethcore/src/snapshot/tests/proof_of_work.rs b/ethcore/src/snapshot/tests/proof_of_work.rs index fb714e667f5..4aa444229a2 100644 --- a/ethcore/src/snapshot/tests/proof_of_work.rs +++ b/ethcore/src/snapshot/tests/proof_of_work.rs @@ -93,7 +93,7 @@ fn chunk_and_restore(amount: u64) { rebuilder.feed(&chunk, engine.as_ref(), &flag).unwrap(); } - rebuilder.finalize(engine.as_ref()).unwrap(); + rebuilder.finalize().unwrap(); drop(rebuilder); // and test it. diff --git a/ethcore/sync/src/chain/handler.rs b/ethcore/sync/src/chain/handler.rs index afd0b4ff227..ada5058dc9c 100644 --- a/ethcore/sync/src/chain/handler.rs +++ b/ethcore/sync/src/chain/handler.rs @@ -256,7 +256,7 @@ impl SyncHandler { return Err(DownloaderImportError::Invalid); } match io.chain().block_status(BlockId::Hash(hash.clone())) { - BlockStatus::InChain => { + BlockStatus::InChain => { trace!(target: "sync", "New block hash already in chain {:?}", hash); }, BlockStatus::Queued => { @@ -529,10 +529,14 @@ impl SyncHandler { sync.snapshot.clear(); return Ok(()); }, - RestorationStatus::Initializing { .. } => { + RestorationStatus::Initializing { .. } => { trace!(target: "warp", "{}: Snapshot restoration is initializing", peer_id); return Ok(()); } + RestorationStatus::Finalizing => { + trace!(target: "warp", "{}: Snapshot finalizing restoration", peer_id); + return Ok(()); + } RestorationStatus::Ongoing { .. } => { trace!(target: "sync", "{}: Snapshot restoration is ongoing", peer_id); }, diff --git a/ethcore/sync/src/chain/mod.rs b/ethcore/sync/src/chain/mod.rs index 0c2b03d1bd0..fb655308d98 100644 --- a/ethcore/sync/src/chain/mod.rs +++ b/ethcore/sync/src/chain/mod.rs @@ -1210,7 +1210,7 @@ impl ChainSync { RestorationStatus::Inactive | RestorationStatus::Failed => { self.set_state(SyncState::SnapshotWaiting); }, - RestorationStatus::Initializing { .. } | RestorationStatus::Ongoing { .. } => (), + RestorationStatus::Initializing { .. } | RestorationStatus::Ongoing { .. } | RestorationStatus::Finalizing => (), }, SyncState::SnapshotWaiting => { match io.snapshot_service().status() { @@ -1221,6 +1221,9 @@ impl ChainSync { RestorationStatus::Initializing { .. } => { trace!(target:"sync", "Snapshot restoration is initializing"); }, + RestorationStatus::Finalizing { .. } => { + trace!(target:"sync", "Snapshot finalizing restoration"); + }, RestorationStatus::Ongoing { state_chunks_done, block_chunks_done, .. } => { if !self.snapshot.is_complete() && self.snapshot.done_chunks() - (state_chunks_done + block_chunks_done) as usize <= MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD { trace!(target:"sync", "Resuming snapshot sync"); diff --git a/ethcore/types/src/restoration_status.rs b/ethcore/types/src/restoration_status.rs index b36ec7ef4a9..f02aa118c80 100644 --- a/ethcore/types/src/restoration_status.rs +++ b/ethcore/types/src/restoration_status.rs @@ -23,6 +23,10 @@ pub enum RestorationStatus { Inactive, /// Restoration is initializing Initializing { + /// Total number of state chunks. + state_chunks: u32, + /// Total number of block chunks. + block_chunks: u32, /// Number of chunks done/imported chunks_done: u32, }, @@ -37,6 +41,7 @@ pub enum RestorationStatus { /// Number of block chunks completed. block_chunks_done: u32, }, + Finalizing, /// Failed restoration. Failed, } diff --git a/parity/informant.rs b/parity/informant.rs index 2855579762c..14400d58f08 100644 --- a/parity/informant.rs +++ b/parity/informant.rs @@ -319,9 +319,16 @@ impl Informant { RestorationStatus::Ongoing { state_chunks, block_chunks, state_chunks_done, block_chunks_done } => { format!("Syncing snapshot {}/{}", state_chunks_done + block_chunks_done, state_chunks + block_chunks) }, - RestorationStatus::Initializing { chunks_done } => { - format!("Snapshot initializing ({} chunks restored)", chunks_done) + RestorationStatus::Initializing { chunks_done, state_chunks, block_chunks } => { + let total_chunks = state_chunks + block_chunks; + // Note that the percentage here can be slightly misleading when + // they have chunks already on disk: we'll import the local + // chunks first and then download the rest. + format!("Snapshot initializing ({}/{} chunks restored, {:.0}%)", chunks_done, total_chunks, (chunks_done as f32 / total_chunks as f32) * 100.0) }, + RestorationStatus::Finalizing => { + format!("Snapshot finalization under way") + } _ => String::new(), } ) diff --git a/parity/snapshot.rs b/parity/snapshot.rs index 269965c3355..c1d2a77e3fd 100644 --- a/parity/snapshot.rs +++ b/parity/snapshot.rs @@ -123,6 +123,7 @@ fn restore_using(snapshot: Arc, reader: &R, match snapshot.status() { RestorationStatus::Ongoing { .. } => Err("Snapshot file is incomplete and missing chunks.".into()), RestorationStatus::Initializing { .. } => Err("Snapshot restoration is still initializing.".into()), + RestorationStatus::Finalizing => Err("Snapshot restoration is still finalizing.".into()), RestorationStatus::Failed => Err("Snapshot restoration failed.".into()), RestorationStatus::Inactive => { info!("Restoration complete."); From 9d9e2b43f29475aad3e4d29872d72c6610edef22 Mon Sep 17 00:00:00 2001 From: Marek Kotewicz Date: Tue, 2 Jul 2019 15:06:27 +0800 Subject: [PATCH 03/12] ethcore does not use byteorder (#10829) --- Cargo.lock | 1 - ethcore/Cargo.toml | 1 - ethcore/src/builtin.rs | 5 +++-- ethcore/src/lib.rs | 1 - ethcore/src/state_db.rs | 17 +++++++++-------- 5 files changed, 12 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1a5bcda709d..3bf2f113f45 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -867,7 +867,6 @@ dependencies = [ "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", "blooms-db 0.1.0", "bn 0.4.4 (git+https://github.com/paritytech/bn)", - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "common-types 0.1.0", "criterion 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 9296f10b1a6..9c315fffbbc 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -10,7 +10,6 @@ authors = ["Parity Technologies "] ansi_term = "0.11" blooms-db = { path = "../util/blooms-db", optional = true } bn = { git = "https://github.com/paritytech/bn", default-features = false } -byteorder = "1.0" common-types = { path = "types" } crossbeam = "0.4" derive_more = "0.14.0" diff --git a/ethcore/src/builtin.rs b/ethcore/src/builtin.rs index bae1c75da36..6a52c9cdd42 100644 --- a/ethcore/src/builtin.rs +++ b/ethcore/src/builtin.rs @@ -19,7 +19,6 @@ use std::cmp::{max, min}; use std::io::{self, Read}; -use byteorder::{ByteOrder, BigEndian}; use parity_crypto::digest; use num::{BigUint, Zero, One}; @@ -369,7 +368,9 @@ impl Impl for ModexpImpl { // but so would running out of addressable memory! let mut read_len = |reader: &mut io::Chain<&[u8], io::Repeat>| { reader.read_exact(&mut buf[..]).expect("reading from zero-extended memory cannot fail; qed"); - BigEndian::read_u64(&buf[24..]) as usize + let mut len_bytes = [0u8; 8]; + len_bytes.copy_from_slice(&buf[24..]); + u64::from_be_bytes(len_bytes) as usize }; let base_len = read_len(&mut reader); diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 254b6bed731..a0ee0c2d701 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -55,7 +55,6 @@ extern crate ansi_term; extern crate bn; -extern crate byteorder; extern crate common_types as types; extern crate crossbeam; extern crate ethabi; diff --git a/ethcore/src/state_db.rs b/ethcore/src/state_db.rs index 132677ab5cf..cb00d2132e8 100644 --- a/ethcore/src/state_db.rs +++ b/ethcore/src/state_db.rs @@ -21,7 +21,6 @@ use std::io; use std::sync::Arc; use bloom_journal::{Bloom, BloomJournal}; -use byteorder::{LittleEndian, ByteOrder}; use db::COL_ACCOUNT_BLOOM; use ethereum_types::{H256, Address}; use hash::keccak; @@ -169,11 +168,15 @@ impl StateDB { let hash_count = hash_count_bytes[0]; let mut bloom_parts = vec![0u64; ACCOUNT_BLOOM_SPACE / 8]; - let mut key = [0u8; 8]; for i in 0..ACCOUNT_BLOOM_SPACE / 8 { - LittleEndian::write_u64(&mut key, i as u64); + let key: [u8; 8] = (i as u64).to_le_bytes(); bloom_parts[i] = db.get(COL_ACCOUNT_BLOOM, &key).expect("low-level database error") - .and_then(|val| Some(LittleEndian::read_u64(&val[..]))) + .map(|val| { + assert!(val.len() == 8, "low-level database error"); + let mut buff = [0u8; 8]; + buff.copy_from_slice(&*val); + u64::from_le_bytes(buff) + }) .unwrap_or(0u64); } @@ -186,12 +189,10 @@ impl StateDB { pub fn commit_bloom(batch: &mut DBTransaction, journal: BloomJournal) -> io::Result<()> { assert!(journal.hash_functions <= 255); batch.put(COL_ACCOUNT_BLOOM, ACCOUNT_BLOOM_HASHCOUNT_KEY, &[journal.hash_functions as u8]); - let mut key = [0u8; 8]; - let mut val = [0u8; 8]; for (bloom_part_index, bloom_part_value) in journal.entries { - LittleEndian::write_u64(&mut key, bloom_part_index as u64); - LittleEndian::write_u64(&mut val, bloom_part_value); + let key: [u8; 8] = (bloom_part_index as u64).to_le_bytes(); + let val: [u8; 8] = bloom_part_value.to_le_bytes(); batch.put(COL_ACCOUNT_BLOOM, &key, &val); } Ok(()) From 5b30f22011d39a2ad3cc4157d253490218802d3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexandre=20R=2E=20Bald=C3=A9?= Date: Tue, 2 Jul 2019 12:33:02 +0100 Subject: [PATCH 04/12] Fix typo in README.md (#10828) Without this colon I first understood this line to refer to the `make` utility on Windows. Might be useful to fix this. The additional whitespaces at the end are to trigger a line break. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a8489013a3c..50715eaf1c6 100644 --- a/README.md +++ b/README.md @@ -65,7 +65,7 @@ We recommend installing Rust through [rustup](https://www.rustup.rs/). If you do `clang` is required. It comes with Xcode command line tools or can be installed with homebrew. -- Windows +- Windows: Make sure you have Visual Studio 2015 with C++ support installed. Next, download and run the `rustup` installer from https://static.rust-lang.org/rustup/dist/x86_64-pc-windows-msvc/rustup-init.exe, start "VS2015 x64 Native Tools Command Prompt", and use the following command to install and set up the `msvc` toolchain: ```bash From 5f064a907614a85cae50696c5584c6b67f8c09eb Mon Sep 17 00:00:00 2001 From: Anton Gavrilov Date: Tue, 2 Jul 2019 17:52:05 +0200 Subject: [PATCH 05/12] Remove excessive warning (#10831) --- rpc/src/v1/helpers/engine_signer.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/rpc/src/v1/helpers/engine_signer.rs b/rpc/src/v1/helpers/engine_signer.rs index f993d15f230..56cead696cb 100644 --- a/rpc/src/v1/helpers/engine_signer.rs +++ b/rpc/src/v1/helpers/engine_signer.rs @@ -37,10 +37,7 @@ impl ethcore::engines::EngineSigner for EngineSigner { fn sign(&self, message: ethkey::Message) -> Result { match self.accounts.sign(self.address, Some(self.password.clone()), message) { Ok(ok) => Ok(ok), - Err(e) => { - warn!("Unable to sign consensus message: {:?}", e); - Err(ethkey::Error::InvalidSecret) - }, + Err(e) => Err(ethkey::Error::InvalidSecret), } } From b4af8df535aabcc61a39607f33e1093ed53650bf Mon Sep 17 00:00:00 2001 From: David Date: Tue, 2 Jul 2019 21:26:26 +0200 Subject: [PATCH 06/12] When updating the client or when called from RPC, sleep should mean sleep (#10814) Closes https://github.com/paritytech/parity-ethereum/issues/10687 `sleep()` is called from several places but when called from `disable()` or through the `setMode` RPC, we should ignore queue contents and go to sleep. --- ethcore/src/client/client.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 6094f69949f..7678314f0ee 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -1108,7 +1108,7 @@ impl Client { let mut ss = self.sleep_state.lock(); if let Some(t) = ss.last_activity { if Instant::now() > t + timeout { - self.sleep(); + self.sleep(false); ss.last_activity = None; } } @@ -1118,7 +1118,7 @@ impl Client { let now = Instant::now(); if let Some(t) = ss.last_activity { if now > t + timeout { - self.sleep(); + self.sleep(false); ss.last_activity = None; ss.last_autosleep = Some(now); } @@ -1217,10 +1217,10 @@ impl Client { } } - fn sleep(&self) { + fn sleep(&self, force: bool) { if self.liveness.load(AtomicOrdering::Relaxed) { // only sleep if the import queue is mostly empty. - if self.queue_info().total_queue_size() <= MAX_QUEUE_SIZE_TO_SLEEP_ON { + if force || (self.queue_info().total_queue_size() <= MAX_QUEUE_SIZE_TO_SLEEP_ON) { self.liveness.store(false, AtomicOrdering::Relaxed); self.notify(|n| n.stop()); info!(target: "mode", "sleep: Sleeping."); @@ -1733,7 +1733,7 @@ impl BlockChainClient for Client { } match new_mode { Mode::Active => self.wake_up(), - Mode::Off => self.sleep(), + Mode::Off => self.sleep(true), _ => {(*self.sleep_state.lock()).last_activity = Some(Instant::now()); } } } From 895574b774e06592d4ef7a8f5ba26b679188471e Mon Sep 17 00:00:00 2001 From: David Date: Wed, 3 Jul 2019 09:44:32 +0200 Subject: [PATCH 07/12] Allow --nat extip:your.host.here.org (#10830) * Allow --nat extip:your.host.here.org Closes #10604 * Use split instead of Regex --- parity/configuration.rs | 44 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 40 insertions(+), 4 deletions(-) diff --git a/parity/configuration.rs b/parity/configuration.rs index 541637fd411..e864eabade1 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -16,7 +16,7 @@ use std::time::Duration; use std::io::Read; -use std::net::SocketAddr; +use std::net::{SocketAddr, ToSocketAddrs}; use std::path::PathBuf; use std::collections::{HashSet, BTreeMap}; use std::iter::FromIterator; @@ -725,9 +725,18 @@ impl Configuration { let port = self.args.arg_ports_shift + self.args.arg_port; let listen_address = SocketAddr::new(self.interface(&self.args.arg_interface).parse().unwrap(), port); let public_address = if self.args.arg_nat.starts_with("extip:") { - let host = &self.args.arg_nat[6..]; - let host = host.parse().map_err(|_| format!("Invalid host given with `--nat extip:{}`", host))?; - Some(SocketAddr::new(host, port)) + let host = self.args.arg_nat[6..].split(':').next().expect("split has at least one part; qed"); + let host = format!("{}:{}", host, port); + match host.to_socket_addrs() { + Ok(mut addr_iter) => { + if let Some(addr) = addr_iter.next() { + Some(addr) + } else { + return Err(format!("Invalid host given with `--nat extip:{}`", &self.args.arg_nat[6..])) + } + }, + Err(_) => return Err(format!("Invalid host given with `--nat extip:{}`", &self.args.arg_nat[6..])) + } } else { None }; @@ -1844,6 +1853,33 @@ mod tests { assert_eq!(conf1.ipfs_config().port, 5002); } + #[test] + fn should_resolve_external_nat_hosts() { + // Ip works + let conf = parse(&["parity", "--nat", "extip:1.1.1.1"]); + assert_eq!(conf.net_addresses().unwrap().1.unwrap().ip().to_string(), "1.1.1.1"); + assert_eq!(conf.net_addresses().unwrap().1.unwrap().port(), 30303); + + // Ip with port works, port is discarded + let conf = parse(&["parity", "--nat", "extip:192.168.1.1:123"]); + assert_eq!(conf.net_addresses().unwrap().1.unwrap().ip().to_string(), "192.168.1.1"); + assert_eq!(conf.net_addresses().unwrap().1.unwrap().port(), 30303); + + // Hostname works + let conf = parse(&["parity", "--nat", "extip:ethereum.org"]); + assert!(conf.net_addresses().unwrap().1.is_some()); + assert_eq!(conf.net_addresses().unwrap().1.unwrap().port(), 30303); + + // Hostname works, garbage at the end is discarded + let conf = parse(&["parity", "--nat", "extip:ethereum.org:whatever bla bla 123"]); + assert!(conf.net_addresses().unwrap().1.is_some()); + assert_eq!(conf.net_addresses().unwrap().1.unwrap().port(), 30303); + + // Garbage is error + let conf = parse(&["parity", "--nat", "extip:blabla"]); + assert!(conf.net_addresses().is_err()); + } + #[test] fn should_expose_all_servers() { // given From 8d24b4e80445e573e9f1c3420f88633a22372480 Mon Sep 17 00:00:00 2001 From: Marek Kotewicz Date: Wed, 3 Jul 2019 16:57:50 +0800 Subject: [PATCH 08/12] idiomatic changes to PodState (#10834) --- ethcore/src/pod_account.rs | 16 ++-------------- ethcore/src/pod_state.rs | 31 ++++++++++--------------------- 2 files changed, 12 insertions(+), 35 deletions(-) diff --git a/ethcore/src/pod_account.rs b/ethcore/src/pod_account.rs index 087577b02c6..196d627b124 100644 --- a/ethcore/src/pod_account.rs +++ b/ethcore/src/pod_account.rs @@ -16,7 +16,6 @@ //! Account system expressed in Plain Old Data. -use std::fmt; use std::collections::BTreeMap; use itertools::Itertools; use hash::{keccak}; @@ -53,7 +52,8 @@ pub struct PodAccount { fn opt_bytes_to_hex(opt_bytes: &Option, serializer: S) -> Result where S: Serializer { - serializer.collect_str(&format_args!("0x{}",opt_bytes.as_ref().map_or("".to_string(), |b|b.to_hex()))) + let readable = opt_bytes.as_ref().map(|b| b.to_hex()).unwrap_or_default(); + serializer.collect_str(&format_args!("0x{}", readable)) } impl PodAccount { @@ -124,18 +124,6 @@ impl From for PodAccount { } } -impl fmt::Display for PodAccount { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "(bal={}; nonce={}; code={} bytes, #{}; storage={} items)", - self.balance, - self.nonce, - self.code.as_ref().map_or(0, |c| c.len()), - self.code.as_ref().map_or_else(H256::zero, |c| keccak(c)), - self.storage.len(), - ) - } -} - /// Determine difference between two optionally existant `Account`s. Returns None /// if they are the same. pub fn diff_pod(pre: Option<&PodAccount>, post: Option<&PodAccount>) -> Option { diff --git a/ethcore/src/pod_state.rs b/ethcore/src/pod_state.rs index 1c5ecc8400e..406bcc05dc4 100644 --- a/ethcore/src/pod_state.rs +++ b/ethcore/src/pod_state.rs @@ -16,9 +16,7 @@ //! State of all accounts in the system expressed in Plain Old Data. -use std::fmt; use std::collections::BTreeMap; -use itertools::Itertools; use ethereum_types::{H256, Address}; use triehash::sec_trie_root; use pod_account::{self, PodAccount}; @@ -30,12 +28,6 @@ use ethjson; pub struct PodState(BTreeMap); impl PodState { - /// Contruct a new object from the `m`. - pub fn new() -> PodState { Default::default() } - - /// Contruct a new object from the `m`. - pub fn from(m: BTreeMap) -> PodState { PodState(m) } - /// Get the underlying map. pub fn get(&self) -> &BTreeMap { &self.0 } @@ -65,21 +57,18 @@ impl From for PodState { } } -impl fmt::Display for PodState { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - for (add, acc) in &self.0 { - writeln!(f, "{} => {}", add, acc)?; - } - Ok(()) +impl From> for PodState { + fn from(s: BTreeMap) -> Self { + PodState(s) } } /// Calculate and return diff between `pre` state and `post` state. pub fn diff_pod(pre: &PodState, post: &PodState) -> StateDiff { StateDiff { - raw: pre.get().keys() - .merge(post.get().keys()) - .filter_map(|acc| pod_account::diff_pod(pre.get().get(acc), post.get().get(acc)).map(|d| (acc.clone(), d))) + raw: pre.0.keys() + .chain(post.0.keys()) + .filter_map(|acc| pod_account::diff_pod(pre.0.get(acc), post.0.get(acc)).map(|d| (*acc, d))) .collect() } } @@ -87,9 +76,9 @@ pub fn diff_pod(pre: &PodState, post: &PodState) -> StateDiff { #[cfg(test)] mod test { use std::collections::BTreeMap; - use types::state_diff::*; - use types::account_diff::*; use pod_account::PodAccount; + use types::account_diff::{AccountDiff, Diff}; + use types::state_diff::StateDiff; use super::{PodState, Address}; #[test] @@ -102,7 +91,7 @@ mod test { storage: map![], } ]); - assert_eq!(super::diff_pod(&a, &PodState::new()), StateDiff { raw: map![ + assert_eq!(super::diff_pod(&a, &PodState::default()), StateDiff { raw: map![ Address::from_low_u64_be(1) => AccountDiff{ balance: Diff::Died(69.into()), nonce: Diff::Died(0.into()), @@ -110,7 +99,7 @@ mod test { storage: map![], } ]}); - assert_eq!(super::diff_pod(&PodState::new(), &a), StateDiff{ raw: map![ + assert_eq!(super::diff_pod(&PodState::default(), &a), StateDiff{ raw: map![ Address::from_low_u64_be(1) => AccountDiff{ balance: Diff::Born(69.into()), nonce: Diff::Born(0.into()), From 02e33c4f91864d3bfbd3ccbbbd4591c654c6afbf Mon Sep 17 00:00:00 2001 From: Luke Schoen Date: Thu, 4 Jul 2019 00:02:41 +1000 Subject: [PATCH 09/12] refactor: Related #9459 - evmbin: replace untyped json! macro with fully typed serde serialization using Rust structs (#10657) * fix: Replace multirust with rustup wince multirust is deprecated * docs: Update evmbin Rust docs and code comments * WIP: Add Response struct. Initial step using serde to serialize instead of hardcoding with JSON * fix: Update Response struct types to be string after formatting * fix: Fix move out of borrowed content error by cloning informant * refactor: Change from camelcase to snake case to fix linting errors * restore: Restore some code since now covered in separate PR #10658 * restore: Restore original Rustdocs of evmbin * WIP * add Clone type * add newlines to end of json files * remove uml file that was unintentionally commited * rename chain spec to state test JSON fle * remove log. fix indentation * revert: Restore indentation now handled by separate PR #10740 * remove state test json files as moved to PR #10742 * revert changes in info.rs since covered in PR #10742 * revert changes to main.rs since covered in PR #10742 * revert newlines back to master * revert newlines back to master2 * refactor: Rename Response to TraceData * fix: Remove Clone and replace with lifetimes. Update tests since not ordered * refactor: Change all json! to typed serde * docs: Update rustdocs. Remove fixme * fix: Add missing semicolons from printf * fix: Change style from unwrap to expect in evmbin/src/display/json.rs Co-Authored-By: Andronik Ordian * fix: Change style from unwrap to expect in evmbin/src/display/std_json.rs Co-Authored-By: Andronik Ordian * revert updating module comments as will be done in separate PR #10742 instead * review-fix: Remove useless reference * Remove unncessary use of format macro * Update evmbin/src/display/json.rs Co-Authored-By: Andronik Ordian * refactor: Update evmbin/src/display/json.rs with serialization in set_gas success Co-Authored-By: Andronik Ordian * refactor: Update evmbin/src/display/json.rs with serialization in set_gas failure Co-Authored-By: Andronik Ordian * refactor: Update evmbin/src/display/std_json.rs with serialization in finish for state root Co-Authored-By: Andronik Ordian * refactor: Update evmbin/src/display/std_json.rs with serialization in before_test Co-Authored-By: Andronik Ordian * refactor: Update evmbin/src/display/std_json.rs with serialization for state root Co-Authored-By: Andronik Ordian * refactor: Update evmbin/src/display/std_json.rs with serialization for finish success Co-Authored-By: Andronik Ordian * refactor: Update evmbin/src/display/std_json.rs with serialization for finish failure Co-Authored-By: Andronik Ordian * refactor: Rename structs and variables. Remove space. Simplify MessageInitial struct * refactor: Captialize expect message * revert to previous struct name TraceDataStateRoot * refactor: Simplify variable for consistency * Update accounts/ethstore/src/json/crypto.rs Co-Authored-By: David --- accounts/ethstore/src/json/crypto.rs | 2 +- evmbin/src/display/json.rs | 102 ++++++++++---- evmbin/src/display/std_json.rs | 201 ++++++++++++++++++--------- evmbin/src/info.rs | 20 +-- 4 files changed, 225 insertions(+), 100 deletions(-) diff --git a/accounts/ethstore/src/json/crypto.rs b/accounts/ethstore/src/json/crypto.rs index 34664f98b0e..a7315d7e5c9 100644 --- a/accounts/ethstore/src/json/crypto.rs +++ b/accounts/ethstore/src/json/crypto.rs @@ -41,7 +41,7 @@ impl str::FromStr for Crypto { impl From for String { fn from(c: Crypto) -> Self { - serde_json::to_string(&c).expect("serialization cannot fail, cause all crypto keys are strings") + serde_json::to_string(&c).expect("Serialization cannot fail, because all crypto keys are strings") } } diff --git a/evmbin/src/display/json.rs b/evmbin/src/display/json.rs index 449938eab56..195e00c7379 100644 --- a/evmbin/src/display/json.rs +++ b/evmbin/src/display/json.rs @@ -47,6 +47,42 @@ pub struct Informant { unmatched: bool, } +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct TraceData<'a> { + pc: usize, + op: u8, + op_name: &'a str, + gas: &'a str, + gas_cost: &'a str, + memory: &'a str, + stack: &'a [U256], + storage: &'a HashMap, + depth: usize, +} + +#[derive(Serialize, Debug)] +pub struct MessageInitial<'a> { + action: &'a str, + test: &'a str, +} + +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct MessageSuccess<'a> { + output: &'a str, + gas_used: &'a str, + time: &'a u64, +} + +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct MessageFailure<'a> { + error: &'a str, + gas_used: &'a str, + time: &'a u64, +} + impl Informant { fn with_informant_in_depth(informant: &mut Informant, depth: usize, f: F) { if depth == 0 { @@ -59,17 +95,21 @@ impl Informant { fn informant_trace(informant: &Informant, gas_used: U256) -> String { let info = ::evm::Instruction::from_u8(informant.instruction).map(|i| i.info()); - json!({ - "pc": informant.pc, - "op": informant.instruction, - "opName": info.map(|i| i.name).unwrap_or(""), - "gas": format!("{:#x}", gas_used.saturating_add(informant.gas_cost)), - "gasCost": format!("{:#x}", informant.gas_cost), - "memory": format!("0x{}", informant.memory.to_hex()), - "stack": informant.stack, - "storage": informant.storage, - "depth": informant.depth, - }).to_string() + let trace_data = + TraceData { + pc: informant.pc, + op: informant.instruction, + op_name: info.map(|i| i.name).unwrap_or(""), + gas: &format!("{:#x}", gas_used.saturating_add(informant.gas_cost)), + gas_cost: &format!("{:#x}", informant.gas_cost), + memory: &format!("0x{}", informant.memory.to_hex()), + stack: &informant.stack, + storage: &informant.storage, + depth: informant.depth, + } + ; + + serde_json::to_string(&trace_data).expect("Serialization cannot fail; qed") } } @@ -77,7 +117,15 @@ impl vm::Informant for Informant { type Sink = (); fn before_test(&mut self, name: &str, action: &str) { - println!("{}", json!({"action": action, "test": name})); + let message_init = + MessageInitial { + action, + test: &name, + } + ; + + let s = serde_json::to_string(&message_init).expect("Serialization cannot fail; qed"); + println!("{}", s); } fn set_gas(&mut self, gas: U256) { @@ -93,26 +141,32 @@ impl vm::Informant for Informant { println!("{}", trace); } - let success_msg = json!({ - "output": format!("0x{}", success.output.to_hex()), - "gasUsed": format!("{:#x}", success.gas_used), - "time": display::as_micros(&success.time), - }); + let message_success = + MessageSuccess { + output: &format!("0x{}", success.output.to_hex()), + gas_used: &format!("{:#x}", success.gas_used), + time: &display::as_micros(&success.time), + } + ; - println!("{}", success_msg) + let s = serde_json::to_string(&message_success).expect("Serialization cannot fail; qed"); + println!("{}", s); }, Err(failure) => { for trace in failure.traces.unwrap_or_else(Vec::new) { println!("{}", trace); } - let failure_msg = json!({ - "error": &failure.error.to_string(), - "gasUsed": format!("{:#x}", failure.gas_used), - "time": display::as_micros(&failure.time), - }); + let message_failure = + MessageFailure { + error: &failure.error.to_string(), + gas_used: &format!("{:#x}", failure.gas_used), + time: &display::as_micros(&failure.time), + } + ; - println!("{}", failure_msg) + let s = serde_json::to_string(&message_failure).expect("Serialization cannot fail; qed"); + println!("{}", s); }, } } diff --git a/evmbin/src/display/std_json.rs b/evmbin/src/display/std_json.rs index 74d9e0040a2..1734c305e24 100644 --- a/evmbin/src/display/std_json.rs +++ b/evmbin/src/display/std_json.rs @@ -64,6 +64,52 @@ pub struct Informant { out_sink: Out, } +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct TraceData<'a> { + pc: usize, + op: u8, + op_name: &'a str, + gas: &'a str, + stack: &'a [U256], + storage: &'a HashMap, + depth: usize, +} + +#[derive(Serialize, Debug)] +pub struct MessageInitial<'a> { + action: &'a str, + test: &'a str, +} + +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct MessageSuccess<'a> { + output: &'a str, + gas_used: &'a str, + time: &'a u64, +} + +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct MessageFailure<'a> { + error: &'a str, + gas_used: &'a str, + time: &'a u64, +} + +#[derive(Serialize, Debug)] +pub struct DumpData<'a> { + root: &'a H256, + accounts: &'a pod_state::PodState, +} + +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct TraceDataStateRoot<'a> { + state_root: &'a H256, +} + impl Default for Informant { fn default() -> Self { Self::new(io::stderr(), io::stdout()) @@ -95,7 +141,8 @@ impl Informant { storage: Default::default(), subinfos: Default::default(), subdepth: 0, - trace_sink, out_sink + trace_sink, + out_sink, } } @@ -108,12 +155,16 @@ impl Informant { } fn dump_state_into(trace_sink: &mut Trace, root: H256, end_state: &Option) { - if let Some(ref end_state) = end_state { - let dump_data = json!({ - "root": root, - "accounts": end_state, - }); - writeln!(trace_sink, "{}", dump_data).expect("The sink must be writeable."); + if let Some(ref end_state) = end_state { + let dump_data = + DumpData { + root: &root, + accounts: end_state, + } + ; + + let s = serde_json::to_string(&dump_data).expect("Serialization cannot fail; qed"); + writeln!(trace_sink, "{}", s).expect("The sink must be writeable."); } } @@ -124,12 +175,15 @@ impl vm::Informant for Informant { type Sink = (Trace, Out); fn before_test(&mut self, name: &str, action: &str) { - let out_data = json!({ - "action": action, - "test": name, - }); + let message_init = + MessageInitial { + action, + test: &name, + } + ; - writeln!(&mut self.out_sink, "{}", out_data).expect("The sink must be writeable."); + let s = serde_json::to_string(&message_init).expect("Serialization cannot fail; qed"); + writeln!(&mut self.out_sink, "{}", s).expect("The sink must be writeable."); } fn set_gas(&mut self, _gas: U256) {} @@ -137,34 +191,46 @@ impl vm::Informant for Informant { fn clone_sink(&self) -> Self::Sink { (self.trace_sink.clone(), self.out_sink.clone()) } + fn finish(result: vm::RunResult<::Output>, (ref mut trace_sink, ref mut out_sink): &mut Self::Sink) { match result { Ok(success) => { - let trace_data = json!({"stateRoot": success.state_root}); - writeln!(trace_sink, "{}", trace_data) - .expect("The sink must be writeable."); + let state_root_data = + TraceDataStateRoot { + state_root: &success.state_root, + } + ; + + let s = serde_json::to_string(&state_root_data).expect("Serialization cannot fail; qed"); + writeln!(trace_sink, "{}", s).expect("The sink must be writeable."); Self::dump_state_into(trace_sink, success.state_root, &success.end_state); - let out_data = json!({ - "output": format!("0x{}", success.output.to_hex()), - "gasUsed": format!("{:#x}", success.gas_used), - "time": display::as_micros(&success.time), - }); + let message_success = + MessageSuccess { + output: &format!("0x{}", success.output.to_hex()), + gas_used: &format!("{:#x}", success.gas_used), + time: &display::as_micros(&success.time), + } + ; - writeln!(out_sink, "{}", out_data).expect("The sink must be writeable."); + let s = serde_json::to_string(&message_success).expect("Serialization cannot fail; qed"); + writeln!(out_sink, "{}", s).expect("The sink must be writeable."); }, Err(failure) => { - let out_data = json!({ - "error": &failure.error.to_string(), - "gasUsed": format!("{:#x}", failure.gas_used), - "time": display::as_micros(&failure.time), - }); + let message_failure = + MessageFailure { + error: &failure.error.to_string(), + gas_used: &format!("{:#x}", failure.gas_used), + time: &display::as_micros(&failure.time), + } + ; Self::dump_state_into(trace_sink, failure.state_root, &failure.end_state); - writeln!(out_sink, "{}", out_data).expect("The sink must be writeable."); + let s = serde_json::to_string(&message_failure).expect("Serialization cannot fail; qed"); + writeln!(out_sink, "{}", s).expect("The sink must be writeable."); }, } } @@ -178,17 +244,22 @@ impl trace::VMTracer for Informant { Self::with_informant_in_depth(self, subdepth, |informant: &mut Informant| { let info = ::evm::Instruction::from_u8(instruction).map(|i| i.info()); informant.instruction = instruction; - let trace_data = json!({ - "pc": pc, - "op": instruction, - "opName": info.map(|i| i.name).unwrap_or(""), - "gas": format!("{:#x}", current_gas), - "stack": informant.stack, - "storage": informant.storage, - "depth": informant.depth, - }); - - writeln!(&mut informant.trace_sink, "{}", trace_data).expect("The sink must be writeable."); + + let trace_data = + TraceData { + pc: pc, + op: instruction, + op_name: info.map(|i| i.name).unwrap_or(""), + gas: &format!("{:#x}", current_gas), + stack: &informant.stack, + storage: &informant.storage, + depth: informant.depth, + } + ; + + let s = serde_json::to_string(&trace_data).expect("Serialization cannot fail; qed"); + + writeln!(&mut informant.trace_sink, "{}", s).expect("The sink must be writeable."); }); true } @@ -279,8 +350,8 @@ pub mod tests { }, "60F8d6", 0xffff, - r#"{"depth":1,"gas":"0xffff","op":96,"opName":"PUSH1","pc":0,"stack":[],"storage":{}} -{"depth":1,"gas":"0xfffc","op":214,"opName":"","pc":2,"stack":["0xf8"],"storage":{}} + r#"{"pc":0,"op":96,"opName":"PUSH1","gas":"0xffff","stack":[],"storage":{},"depth":1} +{"pc":2,"op":214,"opName":"","gas":"0xfffc","stack":["0xf8"],"storage":{},"depth":1} "#, ); @@ -293,7 +364,7 @@ pub mod tests { }, "F8d6", 0xffff, - r#"{"depth":1,"gas":"0xffff","op":248,"opName":"","pc":0,"stack":[],"storage":{}} + r#"{"pc":0,"op":248,"opName":"","gas":"0xffff","stack":[],"storage":{},"depth":1} "#, ); } @@ -309,30 +380,30 @@ pub mod tests { }, "32343434345830f138343438323439f0", 0xffff, - r#"{"depth":1,"gas":"0xffff","op":50,"opName":"ORIGIN","pc":0,"stack":[],"storage":{}} -{"depth":1,"gas":"0xfffd","op":52,"opName":"CALLVALUE","pc":1,"stack":["0x0"],"storage":{}} -{"depth":1,"gas":"0xfffb","op":52,"opName":"CALLVALUE","pc":2,"stack":["0x0","0x0"],"storage":{}} -{"depth":1,"gas":"0xfff9","op":52,"opName":"CALLVALUE","pc":3,"stack":["0x0","0x0","0x0"],"storage":{}} -{"depth":1,"gas":"0xfff7","op":52,"opName":"CALLVALUE","pc":4,"stack":["0x0","0x0","0x0","0x0"],"storage":{}} -{"depth":1,"gas":"0xfff5","op":88,"opName":"PC","pc":5,"stack":["0x0","0x0","0x0","0x0","0x0"],"storage":{}} -{"depth":1,"gas":"0xfff3","op":48,"opName":"ADDRESS","pc":6,"stack":["0x0","0x0","0x0","0x0","0x0","0x5"],"storage":{}} -{"depth":1,"gas":"0xfff1","op":241,"opName":"CALL","pc":7,"stack":["0x0","0x0","0x0","0x0","0x0","0x5","0x0"],"storage":{}} -{"depth":1,"gas":"0x9e21","op":56,"opName":"CODESIZE","pc":8,"stack":["0x1"],"storage":{}} -{"depth":1,"gas":"0x9e1f","op":52,"opName":"CALLVALUE","pc":9,"stack":["0x1","0x10"],"storage":{}} -{"depth":1,"gas":"0x9e1d","op":52,"opName":"CALLVALUE","pc":10,"stack":["0x1","0x10","0x0"],"storage":{}} -{"depth":1,"gas":"0x9e1b","op":56,"opName":"CODESIZE","pc":11,"stack":["0x1","0x10","0x0","0x0"],"storage":{}} -{"depth":1,"gas":"0x9e19","op":50,"opName":"ORIGIN","pc":12,"stack":["0x1","0x10","0x0","0x0","0x10"],"storage":{}} -{"depth":1,"gas":"0x9e17","op":52,"opName":"CALLVALUE","pc":13,"stack":["0x1","0x10","0x0","0x0","0x10","0x0"],"storage":{}} -{"depth":1,"gas":"0x9e15","op":57,"opName":"CODECOPY","pc":14,"stack":["0x1","0x10","0x0","0x0","0x10","0x0","0x0"],"storage":{}} -{"depth":1,"gas":"0x9e0c","op":240,"opName":"CREATE","pc":15,"stack":["0x1","0x10","0x0","0x0"],"storage":{}} -{"depth":2,"gas":"0x210c","op":50,"opName":"ORIGIN","pc":0,"stack":[],"storage":{}} -{"depth":2,"gas":"0x210a","op":52,"opName":"CALLVALUE","pc":1,"stack":["0x0"],"storage":{}} -{"depth":2,"gas":"0x2108","op":52,"opName":"CALLVALUE","pc":2,"stack":["0x0","0x0"],"storage":{}} -{"depth":2,"gas":"0x2106","op":52,"opName":"CALLVALUE","pc":3,"stack":["0x0","0x0","0x0"],"storage":{}} -{"depth":2,"gas":"0x2104","op":52,"opName":"CALLVALUE","pc":4,"stack":["0x0","0x0","0x0","0x0"],"storage":{}} -{"depth":2,"gas":"0x2102","op":88,"opName":"PC","pc":5,"stack":["0x0","0x0","0x0","0x0","0x0"],"storage":{}} -{"depth":2,"gas":"0x2100","op":48,"opName":"ADDRESS","pc":6,"stack":["0x0","0x0","0x0","0x0","0x0","0x5"],"storage":{}} -{"depth":2,"gas":"0x20fe","op":241,"opName":"CALL","pc":7,"stack":["0x0","0x0","0x0","0x0","0x0","0x5","0xbd770416a3345f91e4b34576cb804a576fa48eb1"],"storage":{}} + r#"{"pc":0,"op":50,"opName":"ORIGIN","gas":"0xffff","stack":[],"storage":{},"depth":1} +{"pc":1,"op":52,"opName":"CALLVALUE","gas":"0xfffd","stack":["0x0"],"storage":{},"depth":1} +{"pc":2,"op":52,"opName":"CALLVALUE","gas":"0xfffb","stack":["0x0","0x0"],"storage":{},"depth":1} +{"pc":3,"op":52,"opName":"CALLVALUE","gas":"0xfff9","stack":["0x0","0x0","0x0"],"storage":{},"depth":1} +{"pc":4,"op":52,"opName":"CALLVALUE","gas":"0xfff7","stack":["0x0","0x0","0x0","0x0"],"storage":{},"depth":1} +{"pc":5,"op":88,"opName":"PC","gas":"0xfff5","stack":["0x0","0x0","0x0","0x0","0x0"],"storage":{},"depth":1} +{"pc":6,"op":48,"opName":"ADDRESS","gas":"0xfff3","stack":["0x0","0x0","0x0","0x0","0x0","0x5"],"storage":{},"depth":1} +{"pc":7,"op":241,"opName":"CALL","gas":"0xfff1","stack":["0x0","0x0","0x0","0x0","0x0","0x5","0x0"],"storage":{},"depth":1} +{"pc":8,"op":56,"opName":"CODESIZE","gas":"0x9e21","stack":["0x1"],"storage":{},"depth":1} +{"pc":9,"op":52,"opName":"CALLVALUE","gas":"0x9e1f","stack":["0x1","0x10"],"storage":{},"depth":1} +{"pc":10,"op":52,"opName":"CALLVALUE","gas":"0x9e1d","stack":["0x1","0x10","0x0"],"storage":{},"depth":1} +{"pc":11,"op":56,"opName":"CODESIZE","gas":"0x9e1b","stack":["0x1","0x10","0x0","0x0"],"storage":{},"depth":1} +{"pc":12,"op":50,"opName":"ORIGIN","gas":"0x9e19","stack":["0x1","0x10","0x0","0x0","0x10"],"storage":{},"depth":1} +{"pc":13,"op":52,"opName":"CALLVALUE","gas":"0x9e17","stack":["0x1","0x10","0x0","0x0","0x10","0x0"],"storage":{},"depth":1} +{"pc":14,"op":57,"opName":"CODECOPY","gas":"0x9e15","stack":["0x1","0x10","0x0","0x0","0x10","0x0","0x0"],"storage":{},"depth":1} +{"pc":15,"op":240,"opName":"CREATE","gas":"0x9e0c","stack":["0x1","0x10","0x0","0x0"],"storage":{},"depth":1} +{"pc":0,"op":50,"opName":"ORIGIN","gas":"0x210c","stack":[],"storage":{},"depth":2} +{"pc":1,"op":52,"opName":"CALLVALUE","gas":"0x210a","stack":["0x0"],"storage":{},"depth":2} +{"pc":2,"op":52,"opName":"CALLVALUE","gas":"0x2108","stack":["0x0","0x0"],"storage":{},"depth":2} +{"pc":3,"op":52,"opName":"CALLVALUE","gas":"0x2106","stack":["0x0","0x0","0x0"],"storage":{},"depth":2} +{"pc":4,"op":52,"opName":"CALLVALUE","gas":"0x2104","stack":["0x0","0x0","0x0","0x0"],"storage":{},"depth":2} +{"pc":5,"op":88,"opName":"PC","gas":"0x2102","stack":["0x0","0x0","0x0","0x0","0x0"],"storage":{},"depth":2} +{"pc":6,"op":48,"opName":"ADDRESS","gas":"0x2100","stack":["0x0","0x0","0x0","0x0","0x0","0x5"],"storage":{},"depth":2} +{"pc":7,"op":241,"opName":"CALL","gas":"0x20fe","stack":["0x0","0x0","0x0","0x0","0x0","0x5","0xbd770416a3345f91e4b34576cb804a576fa48eb1"],"storage":{},"depth":2} "#, ) } diff --git a/evmbin/src/info.rs b/evmbin/src/info.rs index 9090c5377e6..ca068c36b0d 100644 --- a/evmbin/src/info.rs +++ b/evmbin/src/info.rs @@ -256,16 +256,16 @@ pub mod tests { assert_eq!( &String::from_utf8_lossy(&**res.lock().unwrap()), -r#"{"depth":1,"gas":"0xffff","op":98,"opName":"PUSH3","pc":0,"stack":[],"storage":{}} -{"depth":1,"gas":"0xfffc","op":96,"opName":"PUSH1","pc":4,"stack":["0xaaaaaa"],"storage":{}} -{"depth":1,"gas":"0xfff9","op":96,"opName":"PUSH1","pc":6,"stack":["0xaaaaaa","0xaa"],"storage":{}} -{"depth":1,"gas":"0xfff6","op":80,"opName":"POP","pc":8,"stack":["0xaaaaaa","0xaa","0xaa"],"storage":{}} -{"depth":1,"gas":"0xfff4","op":96,"opName":"PUSH1","pc":9,"stack":["0xaaaaaa","0xaa"],"storage":{}} -{"depth":1,"gas":"0xfff1","op":96,"opName":"PUSH1","pc":11,"stack":["0xaaaaaa","0xaa","0xaa"],"storage":{}} -{"depth":1,"gas":"0xffee","op":96,"opName":"PUSH1","pc":13,"stack":["0xaaaaaa","0xaa","0xaa","0xaa"],"storage":{}} -{"depth":1,"gas":"0xffeb","op":96,"opName":"PUSH1","pc":15,"stack":["0xaaaaaa","0xaa","0xaa","0xaa","0xaa"],"storage":{}} -{"depth":1,"gas":"0xffe8","op":96,"opName":"PUSH1","pc":17,"stack":["0xaaaaaa","0xaa","0xaa","0xaa","0xaa","0xaa"],"storage":{}} -{"depth":1,"gas":"0xffe5","op":96,"opName":"PUSH1","pc":19,"stack":["0xaaaaaa","0xaa","0xaa","0xaa","0xaa","0xaa","0xaa"],"storage":{}} +r#"{"pc":0,"op":98,"opName":"PUSH3","gas":"0xffff","stack":[],"storage":{},"depth":1} +{"pc":4,"op":96,"opName":"PUSH1","gas":"0xfffc","stack":["0xaaaaaa"],"storage":{},"depth":1} +{"pc":6,"op":96,"opName":"PUSH1","gas":"0xfff9","stack":["0xaaaaaa","0xaa"],"storage":{},"depth":1} +{"pc":8,"op":80,"opName":"POP","gas":"0xfff6","stack":["0xaaaaaa","0xaa","0xaa"],"storage":{},"depth":1} +{"pc":9,"op":96,"opName":"PUSH1","gas":"0xfff4","stack":["0xaaaaaa","0xaa"],"storage":{},"depth":1} +{"pc":11,"op":96,"opName":"PUSH1","gas":"0xfff1","stack":["0xaaaaaa","0xaa","0xaa"],"storage":{},"depth":1} +{"pc":13,"op":96,"opName":"PUSH1","gas":"0xffee","stack":["0xaaaaaa","0xaa","0xaa","0xaa"],"storage":{},"depth":1} +{"pc":15,"op":96,"opName":"PUSH1","gas":"0xffeb","stack":["0xaaaaaa","0xaa","0xaa","0xaa","0xaa"],"storage":{},"depth":1} +{"pc":17,"op":96,"opName":"PUSH1","gas":"0xffe8","stack":["0xaaaaaa","0xaa","0xaa","0xaa","0xaa","0xaa"],"storage":{},"depth":1} +{"pc":19,"op":96,"opName":"PUSH1","gas":"0xffe5","stack":["0xaaaaaa","0xaa","0xaa","0xaa","0xaa","0xaa","0xaa"],"storage":{},"depth":1} "#); } } From 09edb94d5355759a03a1c982b29035f02af5642c Mon Sep 17 00:00:00 2001 From: Luke Schoen Date: Thu, 4 Jul 2019 01:44:58 +1000 Subject: [PATCH 10/12] tests: Relates to #10655: Test instructions for Readme (#10835) * tests: Relates to #10655: Test instructions for Readme * Add instructions to run tests * Update instructions to view docs * fix: Fix link to package list * fix: Move link to line about title of package list * Update README.md --- README.md | 47 +++++++++++++++++++++++++++++++++++------------ 1 file changed, 35 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 50715eaf1c6..e4aec89927a 100644 --- a/README.md +++ b/README.md @@ -16,11 +16,12 @@ 3.2 [Building from Source Code](#chapter-0032)
3.3 [Simple One-Line Installer for Mac and Linux](#chapter-0033)
3.4 [Starting Parity Ethereum](#chapter-0034) -4. [Documentation](#chapter-004) -5. [Toolchain](#chapter-005) -6. [Community](#chapter-006) -7. [Contributing](#chapter-007) -8. [License](#chapter-008) +4. [Testing](#chapter-004) +5. [Documentation](#chapter-005) +6. [Toolchain](#chapter-006) +7. [Community](#chapter-007) +8. [Contributing](#chapter-008) +9. [License](#chapter-009) ## 1. Description @@ -148,7 +149,25 @@ To start Parity Ethereum as a regular user using `systemd` init: 2. Copy release to bin folder, write `sudo install ./target/release/parity /usr/bin/parity` 3. To configure Parity Ethereum, write a `/etc/parity/config.toml` config file, see [Configuring Parity Ethereum](https://paritytech.github.io/wiki/Configuring-Parity) for details. -## 4. Documentation +## 4. Testing + +You can run tests with the following commands: + +* **All** packages + ``` + cargo test --all + ``` + +* Specific package + ``` + cargo test --package + ``` + +Replace `` with one of the packages from the [package list](#package-list) (e.g. `cargo test --package evmbin`). + +You can show your logs in the test output by passing `--nocapture` (i.e. `cargo test --package evmbin -- --nocapture`) + +## 5. Documentation Official website: https://parity.io @@ -160,16 +179,20 @@ You can generate documentation for Parity Ethereum Rust packages that automatica * **All** packages ``` - cargo doc --open + cargo doc --document-private-items --open ``` * Specific package ``` - cargo doc --package --open + cargo doc --package -- --document-private-items --open ``` +Use`--document-private-items` to also view private documentation and `--no-deps` to exclude building documentation for dependencies. + Replacing `` with one of the following from the details section below (i.e. `cargo doc --package parity-ethereum --open`): + +**Package List**

* Parity Ethereum (EthCore) Client Application @@ -330,7 +353,7 @@ Example (generic documentation comment): /// ``` -## 5. Toolchain +## 6. Toolchain In addition to the Parity Ethereum client, there are additional tools in this repository available: @@ -342,7 +365,7 @@ In addition to the Parity Ethereum client, there are additional tools in this re The following tool is available in a separate repository: - [ethabi](https://github.com/paritytech/ethabi) - Parity Ethereum Encoding of Function Calls. [Docs here](https://crates.io/crates/ethabi) -## 6. Community +## 7. Community ### Join the chat! @@ -355,7 +378,7 @@ Questions? Get in touch with us on Gitter: Alternatively, join our community on Matrix: [![Riot: +Parity](https://img.shields.io/badge/riot-%2Bparity%3Amatrix.parity.io-orange.svg)](https://riot.im/app/#/group/+parity:matrix.parity.io) -## 7. Contributing +## 8. Contributing An introduction has been provided in the ["So You Want to be a Core Developer" presentation slides by Hernando Castano](http://tiny.cc/contrib-to-parity-eth). Additional guidelines are provided in [CONTRIBUTING](./.github/CONTRIBUTING.md). @@ -363,6 +386,6 @@ An introduction has been provided in the ["So You Want to be a Core Developer" p [CODE_OF_CONDUCT](./.github/CODE_OF_CONDUCT.md) -## 8. License +## 9. License [LICENSE](./LICENSE) From 582a4ea33962479524a1eed83e3b67458816cb65 Mon Sep 17 00:00:00 2001 From: David Date: Thu, 4 Jul 2019 13:43:20 +0200 Subject: [PATCH 11/12] Break circular dependency between Client and Engine (part 1) (#10833) * First draft: pass the parent block header to on_close_block * typos and cleanup * whitespace * Store parent in ClosedBlock as well so it can be reopen()'d * Don't pass parent when reopening a block * Remove the unused ancestry param --- ethcore/src/block.rs | 49 ++++++++++------------ ethcore/src/client/client.rs | 2 - ethcore/src/client/test_client.rs | 1 - ethcore/src/engines/authority_round/mod.rs | 49 +++++++++------------- ethcore/src/engines/basic_authority.rs | 2 +- ethcore/src/engines/clique/mod.rs | 7 +++- ethcore/src/engines/clique/tests.rs | 3 +- ethcore/src/engines/instant_seal.rs | 2 +- ethcore/src/engines/mod.rs | 7 +++- ethcore/src/engines/null_engine.rs | 6 ++- ethcore/src/ethereum/ethash.rs | 8 ++-- ethcore/src/miner/miner.rs | 2 +- ethcore/src/test_helpers.rs | 1 - ethcore/src/tests/client.rs | 2 +- ethcore/src/tests/trace.rs | 3 -- 15 files changed, 64 insertions(+), 80 deletions(-) diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index d2cb128c48c..7eb81f4238d 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -22,13 +22,13 @@ //! and can be appended to with transactions and uncles. //! //! When ready, `OpenBlock` can be closed and turned into a `ClosedBlock`. A `ClosedBlock` can -//! be reopend again by a miner under certain circumstances. On block close, state commit is +//! be re-opend again by a miner under certain circumstances. On block close, state commit is //! performed. //! //! `LockedBlock` is a version of a `ClosedBlock` that cannot be reopened. It can be sealed //! using an engine. //! -//! `ExecutedBlock` is an underlaying data structure used by all structs above to store block +//! `ExecutedBlock` is an underlying data structure used by all structs above to store block //! related info. use std::{cmp, ops}; @@ -52,7 +52,7 @@ use vm::{EnvInfo, LastHashes}; use hash::keccak; use rlp::{RlpStream, Encodable, encode_list}; use types::transaction::{SignedTransaction, Error as TransactionError}; -use types::header::{Header, ExtendedHeader}; +use types::header::Header; use types::receipt::{Receipt, TransactionOutcome}; /// Block that is ready for transactions to be added. @@ -62,6 +62,7 @@ use types::receipt::{Receipt, TransactionOutcome}; pub struct OpenBlock<'x> { block: ExecutedBlock, engine: &'x dyn Engine, + parent: Header, } /// Just like `OpenBlock`, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields, @@ -72,6 +73,7 @@ pub struct OpenBlock<'x> { pub struct ClosedBlock { block: ExecutedBlock, unclosed_state: State, + parent: Header, } /// Just like `ClosedBlock` except that we can't reopen it and it's faster. @@ -102,7 +104,7 @@ pub struct ExecutedBlock { pub receipts: Vec, /// Hashes of already executed transactions. pub transactions_set: HashSet, - /// Underlaying state. + /// Underlying state. pub state: State, /// Transaction traces. pub traces: Tracing, @@ -119,13 +121,13 @@ impl ExecutedBlock { uncles: Default::default(), receipts: Default::default(), transactions_set: Default::default(), - state: state, + state, traces: if tracing { Tracing::enabled() } else { Tracing::Disabled }, - last_hashes: last_hashes, + last_hashes, } } @@ -162,7 +164,7 @@ pub trait Drain { impl<'x> OpenBlock<'x> { /// Create a new `OpenBlock` ready for transaction pushing. - pub fn new<'a, I: IntoIterator>( + pub fn new<'a>( engine: &'x dyn Engine, factories: Factories, tracing: bool, @@ -173,14 +175,11 @@ impl<'x> OpenBlock<'x> { gas_range_target: (U256, U256), extra_data: Bytes, is_epoch_begin: bool, - ancestry: I, ) -> Result { let number = parent.number() + 1; let state = State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce(number), factories)?; - let mut r = OpenBlock { - block: ExecutedBlock::new(state, last_hashes, tracing), - engine: engine, - }; + + let mut r = OpenBlock { block: ExecutedBlock::new(state, last_hashes, tracing), engine, parent: parent.clone() }; r.block.header.set_parent_hash(parent.hash()); r.block.header.set_number(number); @@ -195,7 +194,7 @@ impl<'x> OpenBlock<'x> { engine.populate_from_parent(&mut r.block.header, parent); engine.machine().on_new_block(&mut r.block)?; - engine.on_new_block(&mut r.block, is_epoch_begin, &mut ancestry.into_iter())?; + engine.on_new_block(&mut r.block, is_epoch_begin)?; Ok(r) } @@ -297,19 +296,20 @@ impl<'x> OpenBlock<'x> { /// Turn this into a `ClosedBlock`. pub fn close(self) -> Result { let unclosed_state = self.block.state.clone(); + let parent = self.parent.clone(); let locked = self.close_and_lock()?; Ok(ClosedBlock { block: locked.block, unclosed_state, + parent, }) } /// Turn this into a `LockedBlock`. pub fn close_and_lock(self) -> Result { let mut s = self; - - s.engine.on_close_block(&mut s.block)?; + s.engine.on_close_block(&mut s.block, &s.parent)?; s.block.state.commit()?; s.block.header.set_transactions_root(ordered_trie_root(s.block.transactions.iter().map(|e| e.rlp_bytes()))); @@ -378,10 +378,8 @@ impl ClosedBlock { // revert rewards (i.e. set state back at last transaction's state). let mut block = self.block; block.state = self.unclosed_state; - OpenBlock { - block: block, - engine: engine, - } + let parent = self.parent; + OpenBlock { block, engine, parent } } } @@ -479,7 +477,6 @@ pub(crate) fn enact( last_hashes: Arc, factories: Factories, is_epoch_begin: bool, - ancestry: &mut dyn Iterator, ) -> Result { // For trace log let trace_state = if log_enabled!(target: "enact", ::log::Level::Trace) { @@ -501,7 +498,6 @@ pub(crate) fn enact( (3141562.into(), 31415620.into()), vec![], is_epoch_begin, - ancestry, )?; if let Some(ref s) = trace_state { @@ -522,7 +518,7 @@ pub(crate) fn enact( b.close_and_lock() } -/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header +/// Enact the block given by `block_bytes` using `engine` on the database `db` with the given `parent` block header pub fn enact_verified( block: PreverifiedBlock, engine: &dyn Engine, @@ -532,7 +528,6 @@ pub fn enact_verified( last_hashes: Arc, factories: Factories, is_epoch_begin: bool, - ancestry: &mut dyn Iterator, ) -> Result { enact( @@ -546,7 +541,6 @@ pub fn enact_verified( last_hashes, factories, is_epoch_begin, - ancestry, ) } @@ -608,7 +602,6 @@ mod tests { (3141562.into(), 31415620.into()), vec![], false, - None, )?; b.populate_from(&header); @@ -643,7 +636,7 @@ mod tests { let genesis_header = spec.genesis_header(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b = OpenBlock::new(&*spec.engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b = OpenBlock::new(&*spec.engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b = b.close_and_lock().unwrap(); let _ = b.seal(&*spec.engine, vec![]); } @@ -657,7 +650,7 @@ mod tests { let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![], false, None).unwrap() + let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![], false).unwrap() .close_and_lock().unwrap().seal(engine, vec![]).unwrap(); let orig_bytes = b.rlp_bytes(); let orig_db = b.drain().state.drop().1; @@ -682,7 +675,7 @@ mod tests { let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let mut open_block = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let mut open_block = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![], false).unwrap(); let mut uncle1_header = Header::new(); uncle1_header.set_extra_data(b"uncle1".to_vec()); let mut uncle2_header = Header::new(); diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 7678314f0ee..e9847437c9d 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -407,7 +407,6 @@ impl Importer { last_hashes, client.factories.clone(), is_epoch_begin, - &mut chain.ancestry_with_metadata_iter(*header.parent_hash()), ); let mut locked_block = match enact_result { @@ -2361,7 +2360,6 @@ impl PrepareOpenBlock for Client { gas_range_target, extra_data, is_epoch_begin, - chain.ancestry_with_metadata_iter(best_header.hash()), )?; // Add uncles diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 9eb1b14eb8f..7894a665de3 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -417,7 +417,6 @@ impl PrepareOpenBlock for TestBlockChainClient { gas_range_target, extra_data, false, - None, )?; // TODO [todr] Override timestamp for predictability open_block.set_timestamp(*self.latest_block_timestamp.read()); diff --git a/ethcore/src/engines/authority_round/mod.rs b/ethcore/src/engines/authority_round/mod.rs index 97305efad84..cb9b8a49954 100644 --- a/ethcore/src/engines/authority_round/mod.rs +++ b/ethcore/src/engines/authority_round/mod.rs @@ -1211,7 +1211,6 @@ impl Engine for AuthorityRound { &self, block: &mut ExecutedBlock, epoch_begin: bool, - _ancestry: &mut dyn Iterator, ) -> Result<(), Error> { // with immediate transitions, we don't use the epoch mechanism anyway. // the genesis is always considered an epoch, but we ignore it intentionally. @@ -1236,26 +1235,18 @@ impl Engine for AuthorityRound { } /// Apply the block reward on finalisation of the block. - fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> { + fn on_close_block( + &self, + block: &mut ExecutedBlock, + parent: &Header, + ) -> Result<(), Error> { let mut beneficiaries = Vec::new(); if block.header.number() >= self.empty_steps_transition { let empty_steps = if block.header.seal().is_empty() { // this is a new block, calculate rewards based on the empty steps messages we have accumulated - let client = match self.client.read().as_ref().and_then(|weak| weak.upgrade()) { - Some(client) => client, - None => { - debug!(target: "engine", "Unable to close block: missing client ref."); - return Err(EngineError::RequiresClient.into()) - }, - }; - - let parent = client.block_header(::client::BlockId::Hash(*block.header.parent_hash())) - .expect("hash is from parent; parent header must exist; qed") - .decode()?; - - let parent_step = header_step(&parent, self.empty_steps_transition)?; + let parent_step = header_step(parent, self.empty_steps_transition)?; let current_step = self.step.inner.load(); - self.empty_steps(parent_step.into(), current_step.into(), parent.hash()) + self.empty_steps(parent_step, current_step, parent.hash()) } else { // we're verifying a block, extract empty steps from the seal header_empty_steps(&block.header)? @@ -1707,9 +1698,9 @@ mod tests { let db1 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let db2 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b1 = b1.close_and_lock().unwrap(); - let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes, addr2, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes, addr2, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b2 = b2.close_and_lock().unwrap(); engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); @@ -1741,9 +1732,9 @@ mod tests { let db2 = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b1 = b1.close_and_lock().unwrap(); - let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes, addr2, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes, addr2, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b2 = b2.close_and_lock().unwrap(); engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); @@ -1991,7 +1982,7 @@ mod tests { engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); - let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b1 = b1.close_and_lock().unwrap(); // the block is empty so we don't seal and instead broadcast an empty step message @@ -2029,7 +2020,7 @@ mod tests { engine.register_client(Arc::downgrade(&client) as _); // step 2 - let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b1 = b1.close_and_lock().unwrap(); // since the block is empty it isn't sealed and we generate empty steps @@ -2038,7 +2029,7 @@ mod tests { engine.step(); // step 3 - let mut b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes.clone(), addr2, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let mut b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes.clone(), addr2, (3141562.into(), 31415620.into()), vec![], false).unwrap(); b2.push_transaction(Transaction { action: Action::Create, nonce: U256::from(0), @@ -2082,7 +2073,7 @@ mod tests { engine.register_client(Arc::downgrade(&client) as _); // step 2 - let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b1 = b1.close_and_lock().unwrap(); // since the block is empty it isn't sealed and we generate empty steps @@ -2091,7 +2082,7 @@ mod tests { engine.step(); // step 3 - let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes.clone(), addr2, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes.clone(), addr2, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b2 = b2.close_and_lock().unwrap(); engine.set_signer(Box::new((tap.clone(), addr2, "0".into()))); assert_eq!(engine.generate_seal(&b2, &genesis_header), Seal::None); @@ -2099,7 +2090,7 @@ mod tests { // step 4 // the spec sets the maximum_empty_steps to 2 so we will now seal an empty block and include the empty step messages - let b3 = OpenBlock::new(engine, Default::default(), false, db3, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b3 = OpenBlock::new(engine, Default::default(), false, db3, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b3 = b3.close_and_lock().unwrap(); engine.set_signer(Box::new((tap.clone(), addr1, "1".into()))); @@ -2132,7 +2123,7 @@ mod tests { engine.register_client(Arc::downgrade(&client) as _); // step 2 - let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b1 = b1.close_and_lock().unwrap(); // since the block is empty it isn't sealed and we generate empty steps @@ -2142,7 +2133,7 @@ mod tests { // step 3 // the signer of the accumulated empty step message should be rewarded - let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b2 = OpenBlock::new(engine, Default::default(), false, db2, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let addr1_balance = b2.state.balance(&addr1).unwrap(); // after closing the block `addr1` should be reward twice, one for the included empty step message and another for block creation @@ -2242,7 +2233,6 @@ mod tests { (3141562.into(), 31415620.into()), vec![], false, - None, ).unwrap(); let b1 = b1.close_and_lock().unwrap(); @@ -2264,7 +2254,6 @@ mod tests { (3141562.into(), 31415620.into()), vec![], false, - None, ).unwrap(); let addr1_balance = b2.state.balance(&addr1).unwrap(); diff --git a/ethcore/src/engines/basic_authority.rs b/ethcore/src/engines/basic_authority.rs index 18afc51ce68..b08512b79b5 100644 --- a/ethcore/src/engines/basic_authority.rs +++ b/ethcore/src/engines/basic_authority.rs @@ -268,7 +268,7 @@ mod tests { let genesis_header = spec.genesis_header(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b = b.close_and_lock().unwrap(); if let Seal::Regular(seal) = engine.generate_seal(&b, &genesis_header) { assert!(b.try_seal(engine, seal).is_ok()); diff --git a/ethcore/src/engines/clique/mod.rs b/ethcore/src/engines/clique/mod.rs index 9c8e1823feb..89bcef94c2b 100644 --- a/ethcore/src/engines/clique/mod.rs +++ b/ethcore/src/engines/clique/mod.rs @@ -368,13 +368,16 @@ impl Engine for Clique { &self, _block: &mut ExecutedBlock, _epoch_begin: bool, - _ancestry: &mut dyn Iterator, ) -> Result<(), Error> { Ok(()) } // Clique has no block reward. - fn on_close_block(&self, _block: &mut ExecutedBlock) -> Result<(), Error> { + fn on_close_block( + &self, + _block: &mut ExecutedBlock, + _parent_header: &Header + ) -> Result<(), Error> { Ok(()) } diff --git a/ethcore/src/engines/clique/tests.rs b/ethcore/src/engines/clique/tests.rs index 76140284f6d..97e218a05ce 100644 --- a/ethcore/src/engines/clique/tests.rs +++ b/ethcore/src/engines/clique/tests.rs @@ -111,7 +111,7 @@ impl CliqueTester { /// Get signers after a certain state // This is generally used to fetch the state after a test has been executed and checked against - // the intial list of signers provided in the test + // the initial list of signers provided in the test pub fn clique_signers(&self, hash: &H256) -> impl Iterator { self.get_state_at_block(hash).signers().clone().into_iter() } @@ -171,7 +171,6 @@ impl CliqueTester { (3141562.into(), 31415620.into()), extra_data, false, - None, ).unwrap(); { diff --git a/ethcore/src/engines/instant_seal.rs b/ethcore/src/engines/instant_seal.rs index 27f133424d5..58fbfeed1d4 100644 --- a/ethcore/src/engines/instant_seal.rs +++ b/ethcore/src/engines/instant_seal.rs @@ -110,7 +110,7 @@ mod tests { let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let genesis_header = spec.genesis_header(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b = b.close_and_lock().unwrap(); if let Seal::Regular(seal) = engine.generate_seal(&b, &genesis_header) { assert!(b.try_seal(engine, seal).is_ok()); diff --git a/ethcore/src/engines/mod.rs b/ethcore/src/engines/mod.rs index c550f74c2b0..b0207b075eb 100644 --- a/ethcore/src/engines/mod.rs +++ b/ethcore/src/engines/mod.rs @@ -304,13 +304,16 @@ pub trait Engine: Sync + Send { &self, _block: &mut ExecutedBlock, _epoch_begin: bool, - _ancestry: &mut dyn Iterator, ) -> Result<(), Error> { Ok(()) } /// Block transformation functions, after the transactions. - fn on_close_block(&self, _block: &mut ExecutedBlock) -> Result<(), Error> { + fn on_close_block( + &self, + _block: &mut ExecutedBlock, + _parent_header: &Header, + ) -> Result<(), Error> { Ok(()) } diff --git a/ethcore/src/engines/null_engine.rs b/ethcore/src/engines/null_engine.rs index a291969772c..71cb9d459b5 100644 --- a/ethcore/src/engines/null_engine.rs +++ b/ethcore/src/engines/null_engine.rs @@ -61,7 +61,11 @@ impl Engine for NullEngine { fn machine(&self) -> &Machine { &self.machine } - fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> { + fn on_close_block( + &self, + block: &mut ExecutedBlock, + _parent_header: &Header + ) -> Result<(), Error> { use std::ops::Shr; let author = *block.header.author(); diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index c29f960e573..a6540ab7908 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -239,7 +239,7 @@ impl Engine for Arc { /// Apply the block reward on finalisation of the block. /// This assumes that all uncles are valid uncles (i.e. of at least one generation before the current). - fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> { + fn on_close_block(&self, block: &mut ExecutedBlock, _parent_header: &Header) -> Result<(), Error> { use std::ops::Shr; let author = *block.header.author(); @@ -540,7 +540,7 @@ mod tests { let genesis_header = spec.genesis_header(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b = b.close().unwrap(); assert_eq!(b.state.balance(&Address::zero()).unwrap(), U256::from_str("4563918244f40000").unwrap()); } @@ -589,7 +589,7 @@ mod tests { let genesis_header = spec.genesis_header(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let mut b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let mut b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false).unwrap(); let mut uncle = Header::new(); let uncle_author = Address::from_str("ef2d6d194084c2de36e0dabfce45d046b37d1106").unwrap(); uncle.set_author(uncle_author); @@ -607,7 +607,7 @@ mod tests { let genesis_header = spec.genesis_header(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false, None).unwrap(); + let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![], false).unwrap(); let b = b.close().unwrap(); let ubi_contract = Address::from_str("00efdd5883ec628983e9063c7d969fe268bbf310").unwrap(); diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index c35423d7403..e933d7d58b6 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -548,7 +548,7 @@ impl Miner { } }, // Invalid nonce error can happen only if previous transaction is skipped because of gas limit. - // If there is errornous state of transaction queue it will be fixed when next block is imported. + // If there is erroneous state of transaction queue it will be fixed when next block is imported. Err(Error::Execution(ExecutionError::InvalidNonce { expected, got })) => { debug!(target: "miner", "Skipping adding transaction to block because of invalid nonce: {:?} (expected: {:?}, got: {:?})", hash, expected, got); }, diff --git a/ethcore/src/test_helpers.rs b/ethcore/src/test_helpers.rs index 6bdaeadf9bd..5137aade025 100644 --- a/ethcore/src/test_helpers.rs +++ b/ethcore/src/test_helpers.rs @@ -155,7 +155,6 @@ pub fn generate_dummy_client_with_spec_and_data(test_spec: F, block_number: u (3141562.into(), 31415620.into()), vec![], false, - None, ).unwrap(); rolling_timestamp += 10; b.set_timestamp(rolling_timestamp); diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index 093c55a9137..343751b0481 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -117,7 +117,7 @@ fn query_none_block() { Arc::new(Miner::new_for_tests(&spec, None)), IoChannel::disconnected(), ).unwrap(); - let non_existant = client.block_header(BlockId::Number(188)); + let non_existant = client.block_header(BlockId::Number(188)); assert!(non_existant.is_none()); } diff --git a/ethcore/src/tests/trace.rs b/ethcore/src/tests/trace.rs index 882fed4436a..b80c7485128 100644 --- a/ethcore/src/tests/trace.rs +++ b/ethcore/src/tests/trace.rs @@ -87,7 +87,6 @@ fn can_trace_block_and_uncle_reward() { (3141562.into(), 31415620.into()), vec![], false, - None, ).unwrap(); rolling_timestamp += 10; root_block.set_timestamp(rolling_timestamp); @@ -116,7 +115,6 @@ fn can_trace_block_and_uncle_reward() { (3141562.into(), 31415620.into()), vec![], false, - None, ).unwrap(); rolling_timestamp += 10; parent_block.set_timestamp(rolling_timestamp); @@ -144,7 +142,6 @@ fn can_trace_block_and_uncle_reward() { (3141562.into(), 31415620.into()), vec![], false, - None, ).unwrap(); rolling_timestamp += 10; block.set_timestamp(rolling_timestamp); From bacc0f0b9aa2b20db82ea051e68eab8dba1d17be Mon Sep 17 00:00:00 2001 From: Luke Schoen Date: Thu, 4 Jul 2019 21:45:56 +1000 Subject: [PATCH 12/12] refactor: whisper: Add type aliases and update rustdocs in message.rs (#10812) * refactor: Add type aliases to Whisper and update rustdocs * remove my question that was answered --- whisper/src/message.rs | 190 +++++++++++++++++++++++--------------- whisper/src/rpc/filter.rs | 10 +- whisper/src/rpc/mod.rs | 7 +- 3 files changed, 123 insertions(+), 84 deletions(-) diff --git a/whisper/src/message.rs b/whisper/src/message.rs index ad50e3d9bf0..cf12d65b3a8 100644 --- a/whisper/src/message.rs +++ b/whisper/src/message.rs @@ -27,11 +27,38 @@ use tiny_keccak::{keccak256, Keccak}; #[cfg(not(time_checked_add))] use time_utils::CheckedSystemTime; +/// Bloom of topics. +type Bloom = H512; +/// Topic data index within a bloom. +type BloomTopicIndex = usize; +/// List of envelope topics. +type EnvelopeTopics = SmallVec<[EnvelopeTopic; 4]>; +/// Envelope topic data. +type EnvelopeTopicData = u8; +/// List of envelope topics data. +type EnvelopeTopicsData = [EnvelopeTopicData; 4]; +/// Expiry timestamp of an envelope. +type EnvelopeExpiryTimestamp = u64; +/// Message contained within an envelope +type EnvelopeMessage = Vec; +/// Arbitrary value used to target lower PoW hash. +type EnvelopeNonce = u64; +/// Envelope nonce in bytes. +type EnvelopeNonceBytes = [u8; 8]; +/// Envelope proving work duration in milliseconds. +type EnvelopeProvingWorkDuration = u64; +/// Envelope message uniquely identifying proving hash. +type EnvelopeProvingHash = H256; +/// Envelope work that has been proved by the proving hash. +type EnvelopeProvenWork = f64; +/// Time-to-live of an envelope in seconds. +type EnvelopeTTLDuration = u64; + /// Work-factor proved. Takes 3 parameters: size of message, time to live, /// and hash. /// /// Panics if size or TTL is zero. -pub fn work_factor_proved(size: u64, ttl: u64, hash: H256) -> f64 { +pub fn work_factor_proved(size: u64, ttl: EnvelopeTTLDuration, hash: EnvelopeProvingHash) -> EnvelopeProvenWork { assert!(size != 0 && ttl != 0); let leading_zeros = { @@ -44,51 +71,51 @@ pub fn work_factor_proved(size: u64, ttl: u64, hash: H256) -> f64 { 2.0_f64.powi(leading_zeros as i32) / spacetime } -/// A topic of a message. +/// A topic of a message. The topic is an abridged version of the first four bytes of the original topic's hash. #[derive(Debug, Default, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord)] -pub struct Topic(pub [u8; 4]); +pub struct EnvelopeTopic(pub EnvelopeTopicsData); -impl From<[u8; 4]> for Topic { - fn from(x: [u8; 4]) -> Self { - Topic(x) +impl From for EnvelopeTopic { + fn from(x: EnvelopeTopicsData) -> Self { + EnvelopeTopic(x) } } -impl Topic { - /// set up to three bits in the 64-byte bloom passed. +impl EnvelopeTopic { + /// Set up to three bits in the 64-byte bloom passed. /// - /// this takes 3 sets of 9 bits, treating each as an index in the range + /// This takes 3 sets of 9 bits, treating each as an index in the range /// 0..512 into the bloom and setting the corresponding bit in the bloom to 1. - pub fn bloom_into(&self, bloom: &mut H512) { + pub fn bloom_into(&self, bloom: &mut Bloom) { - let data = &self.0; + let topics_data = &self.0; for i in 0..3 { - let mut idx = data[i] as usize; + let mut topic_idx = topics_data[i] as BloomTopicIndex; - if data[3] & (1 << i) != 0 { - idx += 256; + if topics_data[3] & (1 << i) != 0 { + topic_idx += 256; } - debug_assert!(idx <= 511); - bloom.as_bytes_mut()[idx / 8] |= 1 << (7 - idx % 8); + debug_assert!(topic_idx <= 511); + bloom.as_bytes_mut()[topic_idx / 8] |= 1 << (7 - topic_idx % 8); } } /// Get bloom for single topic. - pub fn bloom(&self) -> H512 { + pub fn bloom(&self) -> Bloom { let mut bloom = Default::default(); self.bloom_into(&mut bloom); bloom } } -impl rlp::Encodable for Topic { +impl rlp::Encodable for EnvelopeTopic { fn rlp_append(&self, s: &mut RlpStream) { s.encoder().encode_value(&self.0); } } -impl rlp::Decodable for Topic { +impl rlp::Decodable for EnvelopeTopic { fn decode(rlp: &Rlp) -> Result { use std::cmp; @@ -96,16 +123,16 @@ impl rlp::Decodable for Topic { cmp::Ordering::Less => Err(DecoderError::RlpIsTooShort), cmp::Ordering::Greater => Err(DecoderError::RlpIsTooBig), cmp::Ordering::Equal => { - let mut t = [0u8; 4]; + let mut t: EnvelopeTopicsData = [0u8; 4]; t.copy_from_slice(bytes); - Ok(Topic(t)) + Ok(EnvelopeTopic(t)) } }) } } /// Calculate union of blooms for given topics. -pub fn bloom_topics(topics: &[Topic]) -> H512 { +pub fn bloom_topics(topics: &[EnvelopeTopic]) -> Bloom { let mut bloom = H512::default(); for topic in topics { topic.bloom_into(&mut bloom); @@ -143,7 +170,8 @@ impl fmt::Display for Error { } } -fn append_topics<'a>(s: &'a mut RlpStream, topics: &[Topic]) -> &'a mut RlpStream { +/// Append given topic(s) to RLP stream. +fn append_topics<'a>(s: &'a mut RlpStream, topics: &[EnvelopeTopic]) -> &'a mut RlpStream { if topics.len() == 1 { s.append(&topics[0]) } else { @@ -151,27 +179,27 @@ fn append_topics<'a>(s: &'a mut RlpStream, topics: &[Topic]) -> &'a mut RlpStrea } } -fn decode_topics(rlp: Rlp) -> Result, DecoderError> { +fn decode_topics(rlp: Rlp) -> Result { if rlp.is_list() { - rlp.iter().map(|r| r.as_val::()).collect() + rlp.iter().map(|r| r.as_val::()).collect() } else { rlp.as_val().map(|t| SmallVec::from_slice(&[t])) } } -// Raw envelope struct. +/// An `Envelope` instance is contained in each `Message`. #[derive(Clone, Debug, PartialEq, Eq)] pub struct Envelope { - /// Expiry timestamp - pub expiry: u64, - /// Time-to-live in seconds - pub ttl: u64, - /// series of 4-byte topics. - pub topics: SmallVec<[Topic; 4]>, - /// The message contained within. - pub data: Vec, + /// Expiry timestamp. + pub expiry: EnvelopeExpiryTimestamp, + /// Time-to-live in seconds. + pub ttl: EnvelopeTTLDuration, + /// Series of 4-byte topics. + pub topics: EnvelopeTopics, + /// The message contained within an envelope. + pub message_data: EnvelopeMessage, /// Arbitrary value used to target lower PoW hash. - pub nonce: u64, + pub nonce: EnvelopeNonce, } impl Envelope { @@ -180,7 +208,8 @@ impl Envelope { self.topics.len() != 1 } - fn proving_hash(&self) -> H256 { + // Generate the uniquely identifying proving hash for the message. + fn proving_hash(&self) -> EnvelopeProvingHash { use byteorder::{BigEndian, ByteOrder}; let mut buf = [0; 32]; @@ -189,7 +218,7 @@ impl Envelope { stream.append(&self.expiry).append(&self.ttl); append_topics(&mut stream, &self.topics) - .append(&self.data); + .append(&self.message_data); let mut digest = Keccak::new_keccak256(); digest.update(&*stream.drain()); @@ -212,7 +241,7 @@ impl rlp::Encodable for Envelope { .append(&self.ttl); append_topics(s, &self.topics) - .append(&self.data) + .append(&self.message_data) .append(&self.nonce); } } @@ -225,7 +254,7 @@ impl rlp::Decodable for Envelope { expiry: rlp.val_at(0)?, ttl: rlp.val_at(1)?, topics: decode_topics(rlp.at(2)?)?, - data: rlp.val_at(3)?, + message_data: rlp.val_at(3)?, nonce: rlp.val_at(4)?, }) } @@ -234,22 +263,22 @@ impl rlp::Decodable for Envelope { /// Message creation parameters. /// Pass this to `Message::create` to make a message. pub struct CreateParams { - /// time-to-live in seconds. - pub ttl: u64, - /// payload data. - pub payload: Vec, - /// Topics. May not be empty. - pub topics: Vec, + /// Envelope time-to-live in seconds. + pub ttl: EnvelopeTTLDuration, + /// Envelope payload of message data. + pub payload: EnvelopeMessage, + /// Envelope topics. Must not be empty. + pub topics: Vec, /// How many milliseconds to spend proving work. - pub work: u64, + pub work: EnvelopeProvingWorkDuration, } /// A whisper message. This is a checked message carrying around metadata. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Message { envelope: Envelope, - bloom: H512, - hash: H256, + bloom: Bloom, + hash: EnvelopeProvingHash, encoded_size: usize, } @@ -270,6 +299,7 @@ impl Message { assert!(params.ttl > 0); + // Expiry period since the last epoch rounded up to the nearest second. let expiry = { let since_epoch = SystemTime::now() .checked_add(Duration::from_secs(params.ttl)) @@ -277,10 +307,13 @@ impl Message { .ok_or(Error::TimestampOverflow)? .duration_since(time::UNIX_EPOCH).expect("time after now is after unix epoch; qed"); - // round up the sub-second to next whole second. + // Round up the sub-second to next whole second. since_epoch.as_secs() + if since_epoch.subsec_nanos() == 0 { 0 } else { 1 } }; + // Encrypt an RLP stream into a digest. Create the RLP stream by appending + // to it the envelope topics, envelope payload of message data, + // envelope ttl, and the expiry period since the last epoch. let start_digest = { let mut stream = RlpStream::new_list(4); stream.append(&expiry).append(¶ms.ttl); @@ -291,8 +324,10 @@ impl Message { digest }; + // Find the best nonce based on using updating the digest with + // randomly generated envelope nonce bytes let mut buf = [0; 32]; - let mut try_nonce = move |nonce: &[u8; 8]| { + let mut try_nonce = move |nonce: &EnvelopeNonceBytes| { let mut digest = start_digest.clone(); digest.update(&nonce[..]); digest.finalize(&mut buf[..]); @@ -300,9 +335,12 @@ impl Message { buf.clone() }; - let mut nonce: [u8; 8] = rng.gen(); + let mut nonce: EnvelopeNonceBytes = rng.gen(); let mut best_found = try_nonce(&nonce); + // Start proving work, which involves repeatedly trying to create another + // nonce hash that is better (lower PoW hash) than the latest best nonce, + // to replace it. let start = Instant::now(); while start.elapsed() <= Duration::from_millis(params.work) { @@ -316,10 +354,10 @@ impl Message { } let envelope = Envelope { - expiry: expiry, + expiry, ttl: params.ttl, topics: params.topics.into_iter().collect(), - data: params.payload, + message_data: params.payload, nonce: BigEndian::read_u64(&nonce[..]), }; @@ -344,9 +382,9 @@ impl Message { Message::from_components(envelope, encoded_size, hash, now) } - // create message from envelope, hash, and encoded size. - // does checks for validity. - fn from_components(envelope: Envelope, size: usize, hash: H256, now: SystemTime) + // Create message from envelope, hash, and encoded size. + // Does checks for validity. + fn from_components(envelope: Envelope, size: usize, hash: EnvelopeProvingHash, now: SystemTime) -> Result { const LEEWAY_SECONDS: u64 = 2; @@ -371,9 +409,9 @@ impl Message { let bloom = bloom_topics(&envelope.topics); Ok(Message { - envelope: envelope, - bloom: bloom, - hash: hash, + envelope, + bloom, + hash, encoded_size: size, }) } @@ -388,18 +426,18 @@ impl Message { self.encoded_size } - /// Get a uniquely identifying hash for the message. - pub fn hash(&self) -> &H256 { + /// Get a uniquely identifying proving hash for the message. + pub fn hash(&self) -> &EnvelopeProvingHash { &self.hash } - /// Get the bloom filter of the topics + /// Get the bloom filter of the topics. pub fn bloom(&self) -> &H512 { &self.bloom } /// Get the work proved by the hash. - pub fn work_proved(&self) -> f64 { + pub fn work_proved(&self) -> EnvelopeProvenWork { let proving_hash = self.envelope.proving_hash(); work_factor_proved(self.encoded_size as _, self.envelope.ttl, proving_hash) @@ -411,13 +449,13 @@ impl Message { } /// Get the topics. - pub fn topics(&self) -> &[Topic] { + pub fn topics(&self) -> &[EnvelopeTopic] { &self.envelope.topics } /// Get the message data. - pub fn data(&self) -> &[u8] { - &self.envelope.data + pub fn message_data(&self) -> &EnvelopeMessage { + &self.envelope.message_data } } @@ -438,7 +476,7 @@ mod tests { assert!(Message::create(CreateParams { ttl: 100, payload: vec![1, 2, 3, 4], - topics: vec![Topic([1, 2, 1, 2])], + topics: vec![EnvelopeTopic([1, 2, 1, 2])], work: 50, }).is_ok()); } @@ -448,7 +486,7 @@ mod tests { let envelope = Envelope { expiry: 100_000, ttl: 30, - data: vec![9; 256], + message_data: vec![9; 256], topics: SmallVec::from_slice(&[Default::default()]), nonce: 1010101, }; @@ -464,8 +502,8 @@ mod tests { let envelope = Envelope { expiry: 100_000, ttl: 30, - data: vec![9; 256], - topics: SmallVec::from_slice(&[Default::default(), Topic([1, 2, 3, 4])]), + message_data: vec![9; 256], + topics: SmallVec::from_slice(&[Default::default(), EnvelopeTopic([1, 2, 3, 4])]), nonce: 1010101, }; @@ -480,7 +518,7 @@ mod tests { let envelope = Envelope { expiry: 100_000, ttl: 30, - data: vec![9; 256], + message_data: vec![9; 256], topics: SmallVec::from_slice(&[Default::default()]), nonce: 1010101, }; @@ -499,7 +537,7 @@ mod tests { let envelope = Envelope { expiry: 100_000, ttl: 30, - data: vec![9; 256], + message_data: vec![9; 256], topics: SmallVec::from_slice(&[Default::default()]), nonce: 1010101, }; @@ -516,7 +554,7 @@ mod tests { let envelope = Envelope { expiry: 100_000, ttl: 200_000, - data: vec![9; 256], + message_data: vec![9; 256], topics: SmallVec::from_slice(&[Default::default()]), nonce: 1010101, }; @@ -530,10 +568,10 @@ mod tests { #[test] fn work_factor() { // 256 leading zeros -> 2^256 / 1 - assert_eq!(work_factor_proved(1, 1, H256::zero()), 115792089237316200000000000000000000000000000000000000000000000000000000000000.0); + assert_eq!(work_factor_proved(1, 1, EnvelopeProvingHash::zero()), 115792089237316200000000000000000000000000000000000000000000000000000000000000.0); // 255 leading zeros -> 2^255 / 1 - assert_eq!(work_factor_proved(1, 1, H256::from_low_u64_be(1)), 57896044618658100000000000000000000000000000000000000000000000000000000000000.0); + assert_eq!(work_factor_proved(1, 1, EnvelopeProvingHash::from_low_u64_be(1)), 57896044618658100000000000000000000000000000000000000000000000000000000000000.0); // 0 leading zeros -> 2^0 / 1 - assert_eq!(work_factor_proved(1, 1, serde_json::from_str::("\"0xff00000000000000000000000000000000000000000000000000000000000000\"").unwrap()), 1.0); + assert_eq!(work_factor_proved(1, 1, serde_json::from_str::("\"0xff00000000000000000000000000000000000000000000000000000000000000\"").unwrap()), 1.0); } } diff --git a/whisper/src/rpc/filter.rs b/whisper/src/rpc/filter.rs index a58f16f9c85..84a6445ca51 100644 --- a/whisper/src/rpc/filter.rs +++ b/whisper/src/rpc/filter.rs @@ -24,7 +24,7 @@ use ethkey::Public; use jsonrpc_pubsub::typed::{Subscriber, Sink}; use parking_lot::{Mutex, RwLock}; -use message::{Message, Topic}; +use message::{Message, EnvelopeTopic}; use super::{key_store::KeyStore, types::{self, FilterItem, HexEncode}}; /// Kinds of filters, @@ -198,7 +198,7 @@ impl Drop for Manager { /// Filter incoming messages by critera. pub struct Filter { - topics: Vec<(Vec, H512, Topic)>, + topics: Vec<(Vec, H512, EnvelopeTopic)>, from: Option, decrypt_with: Option, } @@ -278,7 +278,7 @@ impl Filter { } }; - let decrypted = match decrypt.decrypt(message.data()) { + let decrypted = match decrypt.decrypt(message.message_data()) { Some(d) => d, None => { trace!(target: "whisper", "Failed to decrypt message with {} matching topics", @@ -317,7 +317,7 @@ impl Filter { #[cfg(test)] mod tests { - use message::{CreateParams, Message, Topic}; + use message::{CreateParams, Message, EnvelopeTopic}; use rpc::types::{FilterRequest, HexEncode}; use rpc::abridge_topic; use super::*; @@ -366,7 +366,7 @@ mod tests { let message = Message::create(CreateParams { ttl: 100, payload: vec![1, 3, 5, 7, 9], - topics: vec![Topic([1, 8, 3, 99])], + topics: vec![EnvelopeTopic([1, 8, 3, 99])], work: 0, }).unwrap(); diff --git a/whisper/src/rpc/mod.rs b/whisper/src/rpc/mod.rs index 57ec4c6c4f7..0612fb5ac33 100644 --- a/whisper/src/rpc/mod.rs +++ b/whisper/src/rpc/mod.rs @@ -19,7 +19,7 @@ //! Manages standard message format decoding, ephemeral identities, signing, //! encryption, and decryption. //! -//! Provides an interface for using whisper to transmit data securely. +//! Provides an interface for using Whisper to transmit data securely. use std::sync::Arc; @@ -35,7 +35,7 @@ use self::filter::Filter; use self::key_store::{Key, KeyStore}; use self::types::HexEncode; -use message::{CreateParams, Message, Topic}; +use message::{CreateParams, Message, EnvelopeTopic}; mod crypto; mod filter; @@ -61,7 +61,7 @@ fn topic_hash(topic: &[u8]) -> H256 { } // abridge topic using first four bytes of hash. -fn abridge_topic(topic: &[u8]) -> Topic { +fn abridge_topic(topic: &[u8]) -> EnvelopeTopic { let mut abridged = [0; 4]; let hash = topic_hash(topic).0; abridged.copy_from_slice(&hash[..4]); @@ -99,6 +99,7 @@ pub trait Whisper { #[rpc(name = "shh_getPrivateKey")] fn get_private(&self, types::Identity) -> Result; + /// Get symmetric key. Succeeds if identity has been stored. #[rpc(name = "shh_getSymKey")] fn get_symmetric(&self, types::Identity) -> Result;