Skip to content

Commit

Permalink
add explicit_iter_loop clippy lint (paradigmxyz#8570)
Browse files Browse the repository at this point in the history
Co-authored-by: Matthias Seitz <[email protected]>
  • Loading branch information
2 people authored and mw2000 committed Jun 5, 2024
1 parent 9ab3c66 commit 7474a51
Show file tree
Hide file tree
Showing 47 changed files with 79 additions and 78 deletions.
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,7 @@ doc_markdown = "warn"
unnecessary_struct_initialization = "warn"
string_lit_as_bytes = "warn"
explicit_into_iter_loop = "warn"
explicit_iter_loop = "warn"
type_repetition_in_bounds = "warn"

# These are nursery lints which have findings. Allow them for now. Some are not
Expand Down
2 changes: 1 addition & 1 deletion bin/reth/src/commands/debug_cmd/build_block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ impl Command {
})
.transpose()?;

for tx_bytes in self.transactions.iter() {
for tx_bytes in &self.transactions {
debug!(target: "reth::cli", bytes = ?tx_bytes, "Decoding transaction");
let transaction = TransactionSigned::decode(&mut &Bytes::from_str(tx_bytes)?[..])?
.into_ecrecovered()
Expand Down
2 changes: 1 addition & 1 deletion crates/blockchain-tree/src/block_buffer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ impl BlockBuffer {
// get this child blocks children and add them to the remove list.
if let Some(parent_children) = self.parent_to_child.remove(&parent_hash) {
// remove child from buffer
for child_hash in parent_children.iter() {
for child_hash in &parent_children {
if let Some(block) = self.remove_block(child_hash) {
removed_blocks.push(block);
}
Expand Down
2 changes: 1 addition & 1 deletion crates/blockchain-tree/src/block_indices.rs
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ impl BlockIndices {

/// Insert block to chain and fork child indices of the new chain
pub(crate) fn insert_chain(&mut self, chain_id: BlockchainId, chain: &Chain) {
for (number, block) in chain.blocks().iter() {
for (number, block) in chain.blocks() {
// add block -> chain_id index
self.blocks_to_chain.insert(block.hash(), chain_id);
// add number -> block
Expand Down
6 changes: 3 additions & 3 deletions crates/blockchain-tree/src/blockchain_tree.rs
Original file line number Diff line number Diff line change
Expand Up @@ -632,7 +632,7 @@ where
/// in the tree.
fn insert_unwound_chain(&mut self, chain: AppendableChain) -> Option<BlockchainId> {
// iterate over all blocks in chain and find any fork blocks that are in tree.
for (number, block) in chain.blocks().iter() {
for (number, block) in chain.blocks() {
let hash = block.hash();

// find all chains that fork from this block.
Expand Down Expand Up @@ -903,8 +903,8 @@ where

// check unconnected block buffer for children of the chains
let mut all_chain_blocks = Vec::new();
for (_, chain) in self.state.chains.iter() {
for (&number, block) in chain.blocks().iter() {
for chain in self.state.chains.values() {
for (&number, block) in chain.blocks() {
all_chain_blocks.push(BlockNumHash { number, hash: block.hash() })
}
}
Expand Down
6 changes: 3 additions & 3 deletions crates/ethereum-forks/src/hardfork.rs
Original file line number Diff line number Diff line change
Expand Up @@ -658,20 +658,20 @@ mod tests {
let op_hardforks =
[Hardfork::Bedrock, Hardfork::Regolith, Hardfork::Canyon, Hardfork::Ecotone];

for hardfork in pow_hardforks.iter() {
for hardfork in &pow_hardforks {
assert_eq!(hardfork.consensus_type(), ConsensusType::ProofOfWork);
assert!(!hardfork.is_proof_of_stake());
assert!(hardfork.is_proof_of_work());
}

for hardfork in pos_hardforks.iter() {
for hardfork in &pos_hardforks {
assert_eq!(hardfork.consensus_type(), ConsensusType::ProofOfStake);
assert!(hardfork.is_proof_of_stake());
assert!(!hardfork.is_proof_of_work());
}

#[cfg(feature = "optimism")]
for hardfork in op_hardforks.iter() {
for hardfork in &op_hardforks {
assert_eq!(hardfork.consensus_type(), ConsensusType::ProofOfStake);
assert!(hardfork.is_proof_of_stake());
assert!(!hardfork.is_proof_of_work());
Expand Down
2 changes: 1 addition & 1 deletion crates/exex/src/manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,7 @@ impl Future for ExExManager {
self.update_capacity();

// handle incoming exex events
for exex in self.exex_handles.iter_mut() {
for exex in &mut self.exex_handles {
while let Poll::Ready(Some(event)) = exex.receiver.poll_recv(cx) {
debug!(exex_id = %exex.id, ?event, "Received event from exex");
exex.metrics.events_sent_total.increment(1);
Expand Down
4 changes: 2 additions & 2 deletions crates/metrics/metrics-derive/src/expand.rs
Original file line number Diff line number Diff line change
Expand Up @@ -299,7 +299,7 @@ fn parse_metric_fields(node: &DeriveInput) -> Result<Vec<MetricField<'_>>> {
};

let mut metrics = Vec::with_capacity(data.fields.len());
for field in data.fields.iter() {
for field in &data.fields {
let (mut describe, mut rename, mut skip) = (None, None, false);
if let Some(metric_attr) = parse_single_attr(field, "metric")? {
let parsed =
Expand Down Expand Up @@ -404,7 +404,7 @@ fn parse_single_required_attr<'a, T: WithAttrs + ToTokens>(

fn parse_docs_to_string<T: WithAttrs>(token: &T) -> Result<Option<String>> {
let mut doc_str = None;
for attr in token.attrs().iter() {
for attr in token.attrs() {
if let syn::Meta::NameValue(ref meta) = attr.meta {
if let Expr::Lit(ref lit) = meta.value {
if let Lit::Str(ref doc) = lit.lit {
Expand Down
2 changes: 1 addition & 1 deletion crates/net/discv4/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -552,7 +552,7 @@ impl Discv4Service {
builder.tcp6(local_node_record.tcp_port);
}

for (key, val) in config.additional_eip868_rlp_pairs.iter() {
for (key, val) in &config.additional_eip868_rlp_pairs {
builder.add_value_rlp(key, val.clone());
}
builder.build(&secret_key).expect("v4 is set")
Expand Down
2 changes: 1 addition & 1 deletion crates/net/discv5/src/filter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ impl MustNotIncludeKeys {
impl MustNotIncludeKeys {
/// Returns `true` if [`Enr`](discv5::Enr) passes filtering rules.
pub fn filter(&self, enr: &discv5::Enr) -> FilterOutcome {
for key in self.keys.iter() {
for key in &self.keys {
if matches!(key.filter(enr), FilterOutcome::Ok) {
return FilterOutcome::Ignore {
reason: format!(
Expand Down
4 changes: 2 additions & 2 deletions crates/net/network/src/peers/manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2829,12 +2829,12 @@ mod tests {
);

// establish dialed connections
for peer_id in num_pendingout_states.iter() {
for peer_id in &num_pendingout_states {
peer_manager.on_active_outgoing_established(*peer_id);
}

// all dialed connections should now be in 'Out' state
for peer_id in num_pendingout_states.iter() {
for peer_id in &num_pendingout_states {
assert_eq!(peer_manager.peers.get(peer_id).unwrap().state, PeerConnectionState::Out);
}

Expand Down
2 changes: 1 addition & 1 deletion crates/net/network/src/session/active.rs
Original file line number Diff line number Diff line change
Expand Up @@ -423,7 +423,7 @@ impl ActiveSession {
/// session should be terminated.
#[must_use]
fn check_timed_out_requests(&mut self, now: Instant) -> bool {
for (id, req) in self.inflight_requests.iter_mut() {
for (id, req) in &mut self.inflight_requests {
if req.is_timed_out(now) {
if req.is_waiting() {
debug!(target: "net::session", ?id, remote_peer_id=?self.remote_peer_id, "timed out outgoing request");
Expand Down
4 changes: 2 additions & 2 deletions crates/net/network/src/session/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -333,14 +333,14 @@ impl SessionManager {
/// It will trigger the disconnect on all the session tasks to gracefully terminate. The result
/// will be picked by the receiver.
pub fn disconnect_all(&self, reason: Option<DisconnectReason>) {
for (_, session) in self.active_sessions.iter() {
for session in self.active_sessions.values() {
session.disconnect(reason);
}
}

/// Disconnects all pending sessions.
pub fn disconnect_all_pending(&mut self) {
for (_, session) in self.pending_sessions.iter_mut() {
for session in self.pending_sessions.values_mut() {
session.disconnect();
}
}
Expand Down
4 changes: 2 additions & 2 deletions crates/net/network/src/state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ where
pub(crate) fn announce_new_block_hash(&mut self, msg: NewBlockMessage) {
let number = msg.block.block.header.number;
let hashes = NewBlockHashes(vec![BlockHashNumber { hash: msg.hash, number }]);
for (peer_id, peer) in self.active_peers.iter_mut() {
for (peer_id, peer) in &mut self.active_peers {
if peer.blocks.contains(&msg.hash) {
// skip peers which already reported the block
continue
Expand Down Expand Up @@ -417,7 +417,7 @@ where
let mut received_responses = Vec::new();

// poll all connected peers for responses
for (id, peer) in self.active_peers.iter_mut() {
for (id, peer) in &mut self.active_peers {
if let Some(mut response) = peer.pending_response.take() {
match response.poll(cx) {
Poll::Ready(res) => {
Expand Down
2 changes: 1 addition & 1 deletion crates/net/network/src/test_utils/testnet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ where

fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
for peer in this.peers.iter_mut() {
for peer in &mut this.peers {
let _ = peer.poll_unpin(cx);
}
Poll::Pending
Expand Down
2 changes: 1 addition & 1 deletion crates/net/network/src/transactions/fetcher.rs
Original file line number Diff line number Diff line change
Expand Up @@ -661,7 +661,7 @@ impl TransactionFetcher {

#[cfg(debug_assertions)]
{
for hash in new_announced_hashes.iter() {
for hash in &new_announced_hashes {
if self.hashes_pending_fetch.contains(hash) {
debug!(target: "net::tx", "`{}` should have been taken out of buffer before packing in a request, breaks invariant `@hashes_pending_fetch` and `@inflight_requests`, `@hashes_fetch_inflight_and_pending_fetch` for `{}`: {:?}",
format!("{:?}", new_announced_hashes), // Assuming new_announced_hashes can be debug-printed directly
Expand Down
8 changes: 4 additions & 4 deletions crates/net/network/src/transactions/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -429,7 +429,7 @@ where
// Iterate through the transactions to propagate and fill the hashes and full
// transaction lists, before deciding whether or not to send full transactions to the
// peer.
for tx in to_propagate.iter() {
for tx in &to_propagate {
if peer.seen_transactions.insert(tx.hash()) {
hashes.push(tx);

Expand Down Expand Up @@ -469,7 +469,7 @@ where
} else {
let new_full_transactions = full_transactions.build();

for tx in new_full_transactions.iter() {
for tx in &new_full_transactions {
propagated
.0
.entry(tx.hash())
Expand Down Expand Up @@ -527,7 +527,7 @@ where
}

let new_full_transactions = full_transactions.build();
for tx in new_full_transactions.iter() {
for tx in &new_full_transactions {
propagated.0.entry(tx.hash()).or_default().push(PropagateKind::Full(peer_id));
}
// send full transactions
Expand Down Expand Up @@ -955,7 +955,7 @@ where
// requests (based on received `NewPooledTransactionHashes`) then we already
// recorded the hashes as seen by this peer in `Self::on_new_pooled_transaction_hashes`.
let mut num_already_seen_by_peer = 0;
for tx in transactions.iter() {
for tx in &transactions {
if source.is_broadcast() && !peer.seen_transactions.insert(*tx.hash()) {
num_already_seen_by_peer += 1;
}
Expand Down
6 changes: 3 additions & 3 deletions crates/primitives/src/prune/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ impl ReceiptsLogPruneConfig {
/// Example:
///
/// `{ addrA: Before(872), addrB: Before(500), addrC: Distance(128) }`
///
///
/// for `tip: 1000`, gets transformed to a map such as:
///
/// `{ 500: [addrB], 872: [addrA, addrC] }`
Expand All @@ -46,7 +46,7 @@ impl ReceiptsLogPruneConfig {
let mut map = BTreeMap::new();
let pruned_block = pruned_block.unwrap_or_default();

for (address, mode) in self.0.iter() {
for (address, mode) in &self.0 {
// Getting `None`, means that there is nothing to prune yet, so we need it to include in
// the BTreeMap (block = 0), otherwise it will be excluded.
// Reminder that this BTreeMap works as an inclusion list that excludes (prunes) all
Expand Down Expand Up @@ -75,7 +75,7 @@ impl ReceiptsLogPruneConfig {
let pruned_block = pruned_block.unwrap_or_default();
let mut lowest = None;

for (_, mode) in self.0.iter() {
for mode in self.0.values() {
if let PruneMode::Distance(_) = mode {
if let Some((block, _)) =
mode.prune_target_block(tip, PruneSegment::ContractLogs, PrunePurpose::User)?
Expand Down
2 changes: 1 addition & 1 deletion crates/primitives/src/request.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ impl Encodable for Requests {
let mut h = alloy_rlp::Header { list: true, payload_length: 0 };

let mut encoded = Vec::new();
for req in self.0.iter() {
for req in &self.0 {
let encoded_req = req.encoded_7685();
h.payload_length += encoded_req.len();
encoded.push(Bytes::from(encoded_req));
Expand Down
2 changes: 1 addition & 1 deletion crates/primitives/src/transaction/pooled.rs
Original file line number Diff line number Diff line change
Expand Up @@ -737,7 +737,7 @@ mod tests {
&hex!("d30102808083c5cd02887dc5cdfd9e64fd9e407c56"),
];

for hex_data in input_too_short.iter() {
for hex_data in &input_too_short {
let input_rlp = &mut &hex_data[..];
let res = PooledTransactionsElement::decode(input_rlp);

Expand Down
2 changes: 1 addition & 1 deletion crates/primitives/src/trie/hash_builder/state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ impl Compact for HashBuilderState {

buf.put_u16(self.stack.len() as u16);
len += 2;
for item in self.stack.iter() {
for item in &self.stack {
buf.put_u16(item.len() as u16);
buf.put_slice(&item[..]);
len += 2 + item.len();
Expand Down
2 changes: 1 addition & 1 deletion crates/prune/src/segments/headers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ mod tests {

let headers = random_header_range(&mut rng, 0..100, B256::ZERO);
let tx = db.factory.provider_rw().unwrap().into_tx();
for header in headers.iter() {
for header in &headers {
TestStageDB::insert_header(None, &tx, header, U256::ZERO).unwrap();
}
tx.commit().unwrap();
Expand Down
2 changes: 1 addition & 1 deletion crates/revm/src/state_change.rs
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ pub fn insert_post_block_withdrawals_balance_increments(
// Process withdrawals
if chain_spec.is_shanghai_active_at_timestamp(block_timestamp) {
if let Some(withdrawals) = withdrawals {
for withdrawal in withdrawals.iter() {
for withdrawal in withdrawals {
if withdrawal.amount > 0 {
*balance_increments.entry(withdrawal.address).or_default() +=
withdrawal.amount_wei().to::<u128>();
Expand Down
2 changes: 1 addition & 1 deletion crates/rpc/rpc/src/eth/api/fees.rs
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ where

if let Some(percentiles) = &reward_percentiles {
let mut block_rewards = Vec::with_capacity(percentiles.len());
for &percentile in percentiles.iter() {
for &percentile in percentiles {
block_rewards.push(self.approximate_percentile(entry, percentile));
}
rewards.push(block_rewards);
Expand Down
2 changes: 1 addition & 1 deletion crates/rpc/rpc/src/eth/gas_oracle.rs
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ where

let mut prices = Vec::with_capacity(limit);

for tx in block.body.iter() {
for tx in &block.body {
let mut effective_gas_tip = None;
// ignore transactions with a tip under the configured threshold
if let Some(ignore_under) = self.ignore_price {
Expand Down
4 changes: 2 additions & 2 deletions crates/rpc/rpc/src/eth/logs_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ where
let mut log_index: u64 = 0;
// Iterate over transaction hashes and receipts and append matching logs.
for (receipt_idx, (tx_hash, receipt)) in tx_hashes_and_receipts.into_iter().enumerate() {
for log in receipt.logs.iter() {
for log in &receipt.logs {
if log_matches_filter(block_num_hash, log, filter) {
let log = Log {
inner: log.clone(),
Expand Down Expand Up @@ -64,7 +64,7 @@ pub(crate) fn append_matching_block_logs(
// The transaction hash of the current receipt.
let mut transaction_hash = None;

for log in receipt.logs.iter() {
for log in &receipt.logs {
if log_matches_filter(block_num_hash, log, filter) {
let first_tx_num = match loaded_first_tx_num {
Some(num) => num,
Expand Down
2 changes: 1 addition & 1 deletion crates/stages/stages/src/stages/hashing_account.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ impl AccountHashingStage {
let mut account_cursor =
provider.tx_ref().cursor_write::<tables::PlainAccountState>()?;
accounts.sort_by(|a, b| a.0.cmp(&b.0));
for (addr, acc) in accounts.iter() {
for (addr, acc) in &accounts {
account_cursor.append(*addr, *acc)?;
}

Expand Down
2 changes: 1 addition & 1 deletion crates/storage/db/src/static_file/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ pub fn iter_static_files(path: impl AsRef<Path>) -> Result<SortedStaticFiles, Ni
}
}

for (_, range_list) in static_files.iter_mut() {
for range_list in static_files.values_mut() {
// Sort by block end range.
range_list.sort_by(|a, b| a.0.end().cmp(&b.0.end()));
}
Expand Down
4 changes: 2 additions & 2 deletions crates/storage/libmdbx-rs/benches/transaction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ fn bench_put_rand(c: &mut Criterion) {
c.bench_function("bench_put_rand", |b| {
b.iter(|| {
let txn = env.begin_rw_txn().unwrap();
for (key, data) in items.iter() {
for (key, data) in &items {
txn.put(db.dbi(), key, data, WriteFlags::empty()).unwrap();
}
})
Expand All @@ -104,7 +104,7 @@ fn bench_put_rand_raw(c: &mut Criterion) {
mdbx_txn_begin_ex(env, ptr::null_mut(), 0, &mut txn, ptr::null_mut());

let mut i: ::libc::c_int = 0;
for (key, data) in items.iter() {
for (key, data) in &items {
key_val.iov_len = key.len() as size_t;
key_val.iov_base = key.as_bytes().as_ptr() as *mut _;
data_val.iov_len = data.len() as size_t;
Expand Down
2 changes: 1 addition & 1 deletion crates/storage/libmdbx-rs/src/txn_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ mod read_transactions {

// Iterate through active read transactions and time out those that's open for
// longer than `self.max_duration`.
for entry in self.active.iter() {
for entry in &self.active {
let (tx, start) = entry.value();
let duration = now - *start;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -873,7 +873,7 @@ mod tests {

// insert initial state to the database
db.update(|tx| {
for (address, (account, storage)) in prestate.iter() {
for (address, (account, storage)) in &prestate {
let hashed_address = keccak256(address);
tx.put::<tables::HashedAccounts>(hashed_address, *account).unwrap();
for (slot, value) in storage {
Expand Down
Loading

0 comments on commit 7474a51

Please sign in to comment.