diff --git a/Cargo.toml b/Cargo.toml index 703788583..2a10b59cc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -175,6 +175,7 @@ equatable_if_let = "warn" explicit_into_iter_loop = "warn" explicit_iter_loop = "warn" flat_map_option = "warn" +if_not_else = "warn" imprecise_flops = "warn" iter_on_empty_collections = "warn" iter_on_single_items = "warn" diff --git a/crates/consensus/beacon/src/engine/hooks/controller.rs b/crates/consensus/beacon/src/engine/hooks/controller.rs index 0ff33d26b..544a4c564 100644 --- a/crates/consensus/beacon/src/engine/hooks/controller.rs +++ b/crates/consensus/beacon/src/engine/hooks/controller.rs @@ -68,10 +68,10 @@ impl EngineHooksController { "Polled running hook with db write access" ); - if !result.event.is_finished() { - self.active_db_write_hook = Some(hook); - } else { + if result.event.is_finished() { self.hooks.push_back(hook); + } else { + self.active_db_write_hook = Some(hook); } return Poll::Ready(Ok(result)) diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index c7074e366..71d93f3a6 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -652,13 +652,7 @@ where return Ok(TreeOutcome::new(status)) } - let status = if !self.backfill_sync_state.is_idle() { - if let Err(error) = self.buffer_block_without_senders(block) { - self.on_insert_block_error(error)? - } else { - PayloadStatus::from_status(PayloadStatusEnum::Syncing) - } - } else { + let status = if self.backfill_sync_state.is_idle() { let mut latest_valid_hash = None; let num_hash = block.num_hash(); match self.insert_block_without_senders(block) { @@ -684,6 +678,10 @@ where } Err(error) => self.on_insert_block_error(error)?, } + } else if let Err(error) = self.buffer_block_without_senders(block) { + self.on_insert_block_error(error)? + } else { + PayloadStatus::from_status(PayloadStatusEnum::Syncing) }; let mut outcome = TreeOutcome::new(status); @@ -862,12 +860,12 @@ where fn advance_persistence(&mut self) -> Result<(), TryRecvError> { if self.should_persist() && !self.persistence_state.in_progress() { let blocks_to_persist = self.get_canonical_blocks_to_persist(); - if !blocks_to_persist.is_empty() { + if blocks_to_persist.is_empty() { + debug!(target: "engine", "Returned empty set of blocks to persist"); + } else { let (tx, rx) = oneshot::channel(); let _ = self.persistence.save_blocks(blocks_to_persist, tx); self.persistence_state.start(rx); - } else { - debug!(target: "engine", "Returned empty set of blocks to persist"); } } diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 79817dd3c..16b5d4785 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -1024,13 +1024,7 @@ where entry.get_mut().insert(peer_id); } Entry::Vacant(entry) => { - if !self.bad_imports.contains(tx.hash()) { - // this is a new transaction that should be imported into the pool - let pool_transaction = Pool::Transaction::from_pooled(tx); - new_txs.push(pool_transaction); - - entry.insert(HashSet::from([peer_id])); - } else { + if self.bad_imports.contains(tx.hash()) { trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), hash=%tx.hash(), @@ -1038,6 +1032,12 @@ where "received a known bad transaction from peer" ); has_bad_transactions = true; + } else { + // this is a new transaction that should be imported into the pool + let pool_transaction = Pool::Transaction::from_pooled(tx); + new_txs.push(pool_transaction); + + entry.insert(HashSet::from([peer_id])); } } } diff --git a/crates/net/network/src/transactions/validation.rs b/crates/net/network/src/transactions/validation.rs index f16d914fb..6f44d5485 100644 --- a/crates/net/network/src/transactions/validation.rs +++ b/crates/net/network/src/transactions/validation.rs @@ -88,10 +88,10 @@ pub trait PartiallyFilterMessage { let partially_valid_data = msg.dedup(); ( - if partially_valid_data.len() != original_len { - FilterOutcome::ReportPeer - } else { + if partially_valid_data.len() == original_len { FilterOutcome::Ok + } else { + FilterOutcome::ReportPeer }, partially_valid_data, ) diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index 44b980f6e..148dc0955 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -184,12 +184,12 @@ where let (peer, maybe_header) = maybe_header.map(|h| h.map(|h| h.seal_slow())).split(); if let Some(header) = maybe_header { - if header.hash() != this.hash { + if header.hash() == this.hash { + this.header = Some(header); + } else { debug!(target: "downloaders", expected=?this.hash, received=?header.hash(), "Received wrong header"); // received a different header than requested this.client.report_bad_message(peer) - } else { - this.header = Some(header); } } } @@ -491,10 +491,7 @@ where headers_falling.sort_unstable_by_key(|h| Reverse(h.number)); // check the starting hash - if headers_falling[0].hash() != self.start_hash { - // received a different header than requested - self.client.report_bad_message(peer); - } else { + if headers_falling[0].hash() == self.start_hash { let headers_rising = headers_falling.iter().rev().cloned().collect::>(); // check if the downloaded headers are valid if let Err(err) = self.consensus.validate_header_range(&headers_rising) { @@ -516,6 +513,9 @@ where // set the headers response self.headers = Some(headers_falling); + } else { + // received a different header than requested + self.client.report_bad_message(peer); } } } diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 523fb326f..705f24eaf 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -1,4 +1,4 @@ -//! Loads and formats OP transaction RPC response. +//! Loads and formats OP transaction RPC response. use std::sync::Arc; @@ -93,7 +93,9 @@ where ) -> Result::Error> { let Some(l1_block_info) = l1_block_info else { return Ok(OptimismTxMeta::default()) }; - let (l1_fee, l1_data_gas) = if !tx.is_deposit() { + let (l1_fee, l1_data_gas) = if tx.is_deposit() { + (None, None) + } else { let envelope_buf = tx.envelope_encoded(); let inner_l1_fee = l1_block_info @@ -106,8 +108,6 @@ where Some(inner_l1_fee.saturating_to::()), Some(inner_l1_data_gas.saturating_to::()), ) - } else { - (None, None) }; Ok(OptimismTxMeta::new(Some(l1_block_info), l1_fee, l1_data_gas)) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 4b36e8c53..cbc975d39 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1459,7 +1459,14 @@ impl Decodable for TransactionSigned { let remaining_len = buf.len(); // if the transaction is encoded as a string then it is a typed transaction - if !header.list { + if header.list { + let tx = Self::decode_rlp_legacy_transaction(&mut original_encoding)?; + + // advance the buffer based on how far `decode_rlp_legacy_transaction` advanced the + // buffer + *buf = original_encoding; + Ok(tx) + } else { let tx = Self::decode_enveloped_typed_transaction(buf)?; let bytes_consumed = remaining_len - buf.len(); @@ -1470,13 +1477,6 @@ impl Decodable for TransactionSigned { return Err(RlpError::UnexpectedLength) } - Ok(tx) - } else { - let tx = Self::decode_rlp_legacy_transaction(&mut original_encoding)?; - - // advance the buffer based on how far `decode_rlp_legacy_transaction` advanced the - // buffer - *buf = original_encoding; Ok(tx) } } @@ -1493,7 +1493,7 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { if let Transaction::Eip4844(ref mut tx_eip_4844) = transaction { tx_eip_4844.placeholder = - if tx_eip_4844.to != Address::default() { Some(()) } else { None }; + if tx_eip_4844.to == Address::default() { None } else { Some(()) }; } #[cfg(feature = "optimism")] diff --git a/crates/prune/prune/src/segments/user/history.rs b/crates/prune/prune/src/segments/user/history.rs index ff477a39f..3551deeed 100644 --- a/crates/prune/prune/src/segments/user/history.rs +++ b/crates/prune/prune/src/segments/user/history.rs @@ -107,54 +107,54 @@ where // If there were blocks less than or equal to the target one // (so the shard has changed), update the shard. - if blocks.len() as usize != higher_blocks.len() { - // If there will be no more blocks in the shard after pruning blocks below target - // block, we need to remove it, as empty shards are not allowed. - if higher_blocks.is_empty() { - if key.as_ref().highest_block_number == u64::MAX { - let prev_row = cursor - .prev()? - .map(|(k, v)| Result::<_, DatabaseError>::Ok((k.key()?, v))) - .transpose()?; - match prev_row { - // If current shard is the last shard for the sharded key that - // has previous shards, replace it with the previous shard. - Some((prev_key, prev_value)) if key_matches(&prev_key, &key) => { - cursor.delete_current()?; - // Upsert will replace the last shard for this sharded key with - // the previous value. - cursor.upsert(RawKey::new(key), prev_value)?; - Ok(PruneShardOutcome::Updated) - } - // If there's no previous shard for this sharded key, - // just delete last shard completely. - _ => { - // If we successfully moved the cursor to a previous row, - // jump to the original last shard. - if prev_row.is_some() { - cursor.next()?; - } - // Delete shard. - cursor.delete_current()?; - Ok(PruneShardOutcome::Deleted) + if blocks.len() as usize == higher_blocks.len() { + return Ok(PruneShardOutcome::Unchanged); + } + + // If there will be no more blocks in the shard after pruning blocks below target + // block, we need to remove it, as empty shards are not allowed. + if higher_blocks.is_empty() { + if key.as_ref().highest_block_number == u64::MAX { + let prev_row = cursor + .prev()? + .map(|(k, v)| Result::<_, DatabaseError>::Ok((k.key()?, v))) + .transpose()?; + match prev_row { + // If current shard is the last shard for the sharded key that + // has previous shards, replace it with the previous shard. + Some((prev_key, prev_value)) if key_matches(&prev_key, &key) => { + cursor.delete_current()?; + // Upsert will replace the last shard for this sharded key with + // the previous value. + cursor.upsert(RawKey::new(key), prev_value)?; + Ok(PruneShardOutcome::Updated) + } + // If there's no previous shard for this sharded key, + // just delete last shard completely. + _ => { + // If we successfully moved the cursor to a previous row, + // jump to the original last shard. + if prev_row.is_some() { + cursor.next()?; } + // Delete shard. + cursor.delete_current()?; + Ok(PruneShardOutcome::Deleted) } } - // If current shard is not the last shard for this sharded key, - // just delete it. - else { - cursor.delete_current()?; - Ok(PruneShardOutcome::Deleted) - } - } else { - cursor.upsert( - RawKey::new(key), - RawValue::new(BlockNumberList::new_pre_sorted(higher_blocks)), - )?; - Ok(PruneShardOutcome::Updated) + } + // If current shard is not the last shard for this sharded key, + // just delete it. + else { + cursor.delete_current()?; + Ok(PruneShardOutcome::Deleted) } } else { - Ok(PruneShardOutcome::Unchanged) + cursor.upsert( + RawKey::new(key), + RawValue::new(BlockNumberList::new_pre_sorted(higher_blocks)), + )?; + Ok(PruneShardOutcome::Updated) } } } diff --git a/crates/rpc/ipc/src/server/connection.rs b/crates/rpc/ipc/src/server/connection.rs index 10bb2a035..5e7497cb9 100644 --- a/crates/rpc/ipc/src/server/connection.rs +++ b/crates/rpc/ipc/src/server/connection.rs @@ -119,7 +119,9 @@ where 'inner: loop { // drain all calls that are ready and put them in the output item queue - let drained = if !this.pending_calls.is_empty() { + let drained = if this.pending_calls.is_empty() { + false + } else { if let Poll::Ready(Some(res)) = this.pending_calls.as_mut().poll_next(cx) { let item = match res { Ok(Some(resp)) => resp, @@ -130,8 +132,6 @@ where continue 'outer; } true - } else { - false }; // read from the stream diff --git a/crates/rpc/rpc-eth-types/src/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs index f89dc01ff..5075a6d4a 100644 --- a/crates/rpc/rpc-eth-types/src/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -176,13 +176,13 @@ where } // sort results then take the configured percentile result - let mut price = if !results.is_empty() { + let mut price = if results.is_empty() { + inner.last_price.price + } else { results.sort_unstable(); *results.get((results.len() - 1) * self.oracle_config.percentile as usize / 100).expect( "gas price index is a percent of nonzero array length, so a value always exists", ) - } else { - inner.last_price.price }; // constrain to the max price diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 46130d760..3e376f672 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -507,12 +507,12 @@ mod tests { async fn after_execution(&self, headers: Self::Seed) -> Result<(), TestRunnerError> { self.client.extend(headers.iter().map(|h| h.clone().unseal())).await; - let tip = if !headers.is_empty() { - headers.last().unwrap().hash() - } else { + let tip = if headers.is_empty() { let tip = random_header(&mut generators::rng(), 0, None); self.db.insert_headers(std::iter::once(&tip))?; tip.hash() + } else { + headers.last().unwrap().hash() }; self.send_tip(tip); Ok(()) diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index d527cd7d3..6e2dd118c 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -316,7 +316,9 @@ impl Stage for MerkleStage { } // Unwind trie only if there are transitions - if !range.is_empty() { + if range.is_empty() { + info!(target: "sync::stages::merkle::unwind", "Nothing to unwind"); + } else { let (block_root, updates) = StateRoot::incremental_root_with_updates(tx, range) .map_err(|e| StageError::Fatal(Box::new(e)))?; @@ -330,8 +332,6 @@ impl Stage for MerkleStage { provider.write_trie_updates(&updates)?; // TODO(alexey): update entities checkpoint - } else { - info!(target: "sync::stages::merkle::unwind", "Nothing to unwind"); } Ok(UnwindOutput { checkpoint: StageCheckpoint::new(input.unwind_to) }) diff --git a/crates/storage/codecs/derive/src/compact/enums.rs b/crates/storage/codecs/derive/src/compact/enums.rs index 4d387e9d7..b60d9feda 100644 --- a/crates/storage/codecs/derive/src/compact/enums.rs +++ b/crates/storage/codecs/derive/src/compact/enums.rs @@ -57,10 +57,10 @@ impl<'a> EnumHandler<'a> { FieldTypes::EnumUnnamedField((next_ftype, use_alt_impl)) => { // This variant is of the type `EnumVariant(UnnamedField)` let field_type = format_ident!("{next_ftype}"); - let from_compact_ident = if !use_alt_impl { - format_ident!("from_compact") - } else { + let from_compact_ident = if *use_alt_impl { format_ident!("specialized_from_compact") + } else { + format_ident!("from_compact") }; // Unnamed type @@ -98,10 +98,10 @@ impl<'a> EnumHandler<'a> { if let Some(next_field) = self.fields_iterator.peek() { match next_field { FieldTypes::EnumUnnamedField((_, use_alt_impl)) => { - let to_compact_ident = if !use_alt_impl { - format_ident!("to_compact") - } else { + let to_compact_ident = if *use_alt_impl { format_ident!("specialized_to_compact") + } else { + format_ident!("to_compact") }; // Unnamed type diff --git a/crates/storage/codecs/derive/src/compact/flags.rs b/crates/storage/codecs/derive/src/compact/flags.rs index 5a0f33b32..34ab2edba 100644 --- a/crates/storage/codecs/derive/src/compact/flags.rs +++ b/crates/storage/codecs/derive/src/compact/flags.rs @@ -155,15 +155,15 @@ fn build_struct_field_flags( /// Returns the total number of bytes used by the flags struct and how many unused bits. fn pad_flag_struct(total_bits: u8, field_flags: &mut Vec) -> (u8, u8) { let remaining = 8 - total_bits % 8; - if remaining != 8 { + if remaining == 8 { + (total_bits / 8, 0) + } else { let bsize = format_ident!("B{remaining}"); field_flags.push(quote! { #[skip] unused: #bsize , }); ((total_bits + remaining) / 8, remaining) - } else { - (total_bits / 8, 0) } } diff --git a/crates/storage/codecs/derive/src/compact/structs.rs b/crates/storage/codecs/derive/src/compact/structs.rs index b8882448b..07d7a3803 100644 --- a/crates/storage/codecs/derive/src/compact/structs.rs +++ b/crates/storage/codecs/derive/src/compact/structs.rs @@ -46,10 +46,10 @@ impl<'a> StructHandler<'a> { fn to(&mut self, field_descriptor: &StructFieldDescriptor) { let (name, ftype, is_compact, use_alt_impl) = field_descriptor; - let to_compact_ident = if !use_alt_impl { - format_ident!("to_compact") - } else { + let to_compact_ident = if *use_alt_impl { format_ident!("specialized_to_compact") + } else { + format_ident!("to_compact") }; // Should only happen on wrapper structs like `Struct(pub Field)` @@ -108,10 +108,10 @@ impl<'a> StructHandler<'a> { (format_ident!("{name}"), format_ident!("{name}_len")) }; - let from_compact_ident = if !use_alt_impl { - format_ident!("from_compact") - } else { + let from_compact_ident = if *use_alt_impl { format_ident!("specialized_from_compact") + } else { + format_ident!("from_compact") }; // ! Be careful before changing the following assert ! Especially if the type does not diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 435aa6d38..b43653f18 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -336,7 +336,12 @@ pub fn init_from_state_dump( // compute and compare state root. this advances the stage checkpoints. let computed_state_root = compute_state_root(&provider_rw)?; - if computed_state_root != expected_state_root { + if computed_state_root == expected_state_root { + info!(target: "reth::cli", + ?computed_state_root, + "Computed state root matches state root in state dump" + ); + } else { error!(target: "reth::cli", ?computed_state_root, ?expected_state_root, @@ -344,11 +349,6 @@ pub fn init_from_state_dump( ); Err(InitDatabaseError::StateRootMismatch { expected_state_root, computed_state_root })? - } else { - info!(target: "reth::cli", - ?computed_state_root, - "Computed state root matches state root in state dump" - ); } // insert sync stages for stages that require state diff --git a/crates/storage/libmdbx-rs/src/cursor.rs b/crates/storage/libmdbx-rs/src/cursor.rs index 36da31caa..74a704c0d 100644 --- a/crates/storage/libmdbx-rs/src/cursor.rs +++ b/crates/storage/libmdbx-rs/src/cursor.rs @@ -101,10 +101,10 @@ where assert_ne!(data_ptr, data_val.iov_base); let key_out = { // MDBX wrote in new key - if key_ptr != key_val.iov_base { - Some(Key::decode_val::(txn, key_val)?) - } else { + if key_ptr == key_val.iov_base { None + } else { + Some(Key::decode_val::(txn, key_val)?) } }; let data_out = Value::decode_val::(txn, data_val)?; diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index faf41f3ae..15e920178 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -645,10 +645,10 @@ impl DatabaseProvider { let recovered = match (tx, sender) { (Some((tx_id, tx)), Some((sender_tx_id, sender))) => { - if tx_id != sender_tx_id { - Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) - } else { + if tx_id == sender_tx_id { Ok(TransactionSignedEcRecovered::from_signed_transaction(tx, sender)) + } else { + Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) } } (Some((tx_id, _)), _) | (_, Some((tx_id, _))) => { @@ -1291,10 +1291,10 @@ impl DatabaseProvider { let recovered = match (tx, sender) { (Some((tx_id, tx)), Some((sender_tx_id, sender))) => { - if tx_id != sender_tx_id { - Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) - } else { + if tx_id == sender_tx_id { Ok(TransactionSignedEcRecovered::from_signed_transaction(tx, sender)) + } else { + Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) } } (Some((tx_id, _)), _) | (_, Some((tx_id, _))) => { diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 3d05aa09a..db2467419 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -304,13 +304,13 @@ where ) -> Result<(), UnifiedStorageWriterError> { match &self.static_file { Some(writer) => { - if writer.user_header().segment() != segment { + if writer.user_header().segment() == segment { + Ok(()) + } else { Err(UnifiedStorageWriterError::IncorrectStaticFileWriter( writer.user_header().segment(), segment, )) - } else { - Ok(()) } } None => Err(UnifiedStorageWriterError::MissingStaticFileWriter), diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 8e35dbaf8..4c2d269d9 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -275,14 +275,14 @@ where let mut old_entries: Vec<_> = new_entries .into_iter() .filter_map(|entry| { - let old = if !entry.value.is_zero() { - storage.insert(entry.key, entry.value) - } else { + let old = if entry.value.is_zero() { let old = storage.remove(&entry.key); if matches!(old, Some(U256::ZERO)) { return None } old + } else { + storage.insert(entry.key, entry.value) }; Some(StorageEntry { value: old.unwrap_or(U256::ZERO), ..entry }) })