chore: make clippy happy (#12594)

This commit is contained in:
Matthias Seitz
2024-11-16 06:04:39 +01:00
committed by GitHub
parent 2dc9a06321
commit fc97a0cbaf
24 changed files with 54 additions and 56 deletions

View File

@ -111,7 +111,7 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> Environmen
db: Arc<DatabaseEnv>,
static_file_provider: StaticFileProvider<N::Primitives>,
) -> eyre::Result<ProviderFactory<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>> {
let has_receipt_pruning = config.prune.as_ref().map_or(false, |a| a.has_receipts_pruning());
let has_receipt_pruning = config.prune.as_ref().is_some_and(|a| a.has_receipts_pruning());
let prune_modes =
config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default();
let factory = ProviderFactory::<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>::new(

View File

@ -47,19 +47,19 @@ impl ForkchoiceStateTracker {
/// Returns whether the latest received FCU is valid: [`ForkchoiceStatus::Valid`]
#[allow(dead_code)]
pub(crate) fn is_latest_valid(&self) -> bool {
self.latest_status().map_or(false, |s| s.is_valid())
self.latest_status().is_some_and(|s| s.is_valid())
}
/// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Syncing`]
#[allow(dead_code)]
pub(crate) fn is_latest_syncing(&self) -> bool {
self.latest_status().map_or(false, |s| s.is_syncing())
self.latest_status().is_some_and(|s| s.is_syncing())
}
/// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Invalid`]
#[allow(dead_code)]
pub fn is_latest_invalid(&self) -> bool {
self.latest_status().map_or(false, |s| s.is_invalid())
self.latest_status().is_some_and(|s| s.is_invalid())
}
/// Returns the last valid head hash.

View File

@ -12,10 +12,7 @@ use test_fuzz::test_fuzz;
#[test_fuzz]
fn roundtrip_pooled_transactions(hex_data: Vec<u8>) -> Result<(), alloy_rlp::Error> {
let input_rlp = &mut &hex_data[..];
let txs: PooledTransactions = match PooledTransactions::decode(input_rlp) {
Ok(txs) => txs,
Err(e) => return Err(e),
};
let txs: PooledTransactions = PooledTransactions::decode(input_rlp)?;
// get the amount of bytes decoded in `decode` by subtracting the length of the original buf,
// from the length of the remaining bytes

View File

@ -416,7 +416,7 @@ where
.with_static_files_metrics();
let has_receipt_pruning =
self.toml_config().prune.as_ref().map_or(false, |a| a.has_receipts_pruning());
self.toml_config().prune.as_ref().is_some_and(|a| a.has_receipts_pruning());
// Check for consistency between database and static files. If it fails, it unwinds to
// the first block that's consistent between database and static files.

View File

@ -48,7 +48,7 @@ where
request: TransactionRequest,
) -> Result<TxEnv, Self::Error> {
// Ensure that if versioned hashes are set, they're not empty
if request.blob_versioned_hashes.as_ref().map_or(false, |hashes| hashes.is_empty()) {
if request.blob_versioned_hashes.as_ref().is_some_and(|hashes| hashes.is_empty()) {
return Err(RpcInvalidTransactionError::BlobTransactionMissingBlobHashes.into_eth_err())
}

View File

@ -91,7 +91,7 @@ impl<Provider: StaticFileProviderFactory + DBProvider<Tx: DbTxMut>> Segment<Prov
pruned += entries_pruned;
}
let done = last_pruned_block.map_or(false, |block| block == block_range_end);
let done = last_pruned_block == Some(block_range_end);
let progress = PruneProgress::new(done, &limiter);
Ok(SegmentOutput {

View File

@ -78,7 +78,7 @@ impl PruneLimiter {
/// Returns `true` if the limit on the number of deleted entries (rows in the database) is
/// reached.
pub fn is_deleted_entries_limit_reached(&self) -> bool {
self.deleted_entries_limit.as_ref().map_or(false, |limit| limit.is_limit_reached())
self.deleted_entries_limit.as_ref().is_some_and(|limit| limit.is_limit_reached())
}
/// Increments the number of deleted entries by the given number.
@ -112,7 +112,7 @@ impl PruneLimiter {
/// Returns `true` if time limit is reached.
pub fn is_time_limit_reached(&self) -> bool {
self.time_limit.as_ref().map_or(false, |limit| limit.is_limit_reached())
self.time_limit.as_ref().is_some_and(|limit| limit.is_limit_reached())
}
/// Returns `true` if any limit is reached.

View File

@ -106,11 +106,11 @@ impl<T> BlockBatchRecord<T> {
!self
.prune_modes
.account_history
.map_or(false, |mode| mode.should_prune(block_number, tip)) &&
.is_some_and(|mode| mode.should_prune(block_number, tip)) &&
!self
.prune_modes
.storage_history
.map_or(false, |mode| mode.should_prune(block_number, tip))
.is_some_and(|mode| mode.should_prune(block_number, tip))
}) {
BundleRetention::Reverts
} else {
@ -143,7 +143,7 @@ impl<T> BlockBatchRecord<T> {
// Block receipts should not be retained
if self.prune_modes.receipts == Some(PruneMode::Full) ||
// [`PruneSegment::Receipts`] takes priority over [`PruneSegment::ContractLogs`]
self.prune_modes.receipts.map_or(false, |mode| mode.should_prune(block_number, tip))
self.prune_modes.receipts.is_some_and(|mode| mode.should_prune(block_number, tip))
{
receipts.clear();
return Ok(())

View File

@ -1941,17 +1941,17 @@ impl TransportRpcModuleConfig {
/// Returns true if the given module is configured for the http transport.
pub fn contains_http(&self, module: &RethRpcModule) -> bool {
self.http.as_ref().map_or(false, |http| http.contains(module))
self.http.as_ref().is_some_and(|http| http.contains(module))
}
/// Returns true if the given module is configured for the ws transport.
pub fn contains_ws(&self, module: &RethRpcModule) -> bool {
self.ws.as_ref().map_or(false, |ws| ws.contains(module))
self.ws.as_ref().is_some_and(|ws| ws.contains(module))
}
/// Returns true if the given module is configured for the ipc transport.
pub fn contains_ipc(&self, module: &RethRpcModule) -> bool {
self.ipc.as_ref().map_or(false, |ipc| ipc.contains(module))
self.ipc.as_ref().is_some_and(|ipc| ipc.contains(module))
}
/// Ensures that both http and ws are configured and that they are configured to use the same

View File

@ -690,7 +690,7 @@ pub trait Call: LoadState<Evm: ConfigureEvm<Header = Header>> + SpawnBlocking {
request: TransactionRequest,
) -> Result<TxEnv, Self::Error> {
// Ensure that if versioned hashes are set, they're not empty
if request.blob_versioned_hashes.as_ref().map_or(false, |hashes| hashes.is_empty()) {
if request.blob_versioned_hashes.as_ref().is_some_and(|hashes| hashes.is_empty()) {
return Err(RpcInvalidTransactionError::BlobTransactionMissingBlobHashes.into_eth_err())
}

View File

@ -177,7 +177,7 @@ impl<N: ProviderNodeTypes> Pipeline<N> {
self.progress
.minimum_block_number
.zip(self.max_block)
.map_or(false, |(progress, target)| progress >= target)
.is_some_and(|(progress, target)| progress >= target)
{
trace!(
target: "sync::pipeline",
@ -393,7 +393,7 @@ impl<N: ProviderNodeTypes> Pipeline<N> {
let stage_reached_max_block = prev_checkpoint
.zip(self.max_block)
.map_or(false, |(prev_progress, target)| prev_progress.block_number >= target);
.is_some_and(|(prev_progress, target)| prev_progress.block_number >= target);
if stage_reached_max_block {
warn!(
target: "sync::pipeline",

View File

@ -347,7 +347,7 @@ impl OperationMetrics {
// Record duration only for large values to prevent the performance hit of clock syscall
// on small operations
if value_size.map_or(false, |size| size > LARGE_VALUE_THRESHOLD_BYTES) {
if value_size.is_some_and(|size| size > LARGE_VALUE_THRESHOLD_BYTES) {
let start = Instant::now();
let result = f();
self.large_value_duration_seconds.record(start.elapsed());

View File

@ -38,7 +38,7 @@ pub fn iter_static_files(path: impl AsRef<Path>) -> Result<SortedStaticFiles, Ni
.collect::<Vec<_>>();
for entry in entries {
if entry.metadata().map_or(false, |metadata| metadata.is_file()) {
if entry.metadata().is_ok_and(|metadata| metadata.is_file()) {
if let Some((segment, _)) =
StaticFileSegment::parse_filename(&entry.file_name().to_string_lossy())
{

View File

@ -1107,7 +1107,7 @@ impl<N: NodePrimitives> StaticFileProvider<N> {
};
if static_file_upper_bound
.map_or(false, |static_file_upper_bound| static_file_upper_bound >= number)
.is_some_and(|static_file_upper_bound| static_file_upper_bound >= number)
{
return fetch_from_static_file(self)
}

View File

@ -318,7 +318,7 @@ impl<T: TransactionOrdering> TxPool<T> {
// blob pool that are valid with the lower blob fee
if best_transactions_attributes
.blob_fee
.map_or(false, |fee| fee < self.all_transactions.pending_fees.blob_fee as u64)
.is_some_and(|fee| fee < self.all_transactions.pending_fees.blob_fee as u64)
{
let unlocked_by_blob_fee =
self.blob_pool.satisfy_attributes(best_transactions_attributes);
@ -1446,7 +1446,7 @@ impl<T: PoolTransaction> AllTransactions<T> {
fn contains_conflicting_transaction(&self, tx: &ValidPoolTransaction<T>) -> bool {
self.txs_iter(tx.transaction_id.sender)
.next()
.map_or(false, |(_, existing)| tx.tx_type_conflicts_with(&existing.transaction))
.is_some_and(|(_, existing)| tx.tx_type_conflicts_with(&existing.transaction))
}
/// Additional checks for a new transaction.

View File

@ -32,7 +32,7 @@ impl SparseStateTrie {
/// Returns `true` if storage slot for account was already revealed.
pub fn is_storage_slot_revealed(&self, account: &B256, slot: &B256) -> bool {
self.revealed.get(account).map_or(false, |slots| slots.contains(slot))
self.revealed.get(account).is_some_and(|slots| slots.contains(slot))
}
/// Reveal unknown trie paths from provided leaf path and its proof for the account.

View File

@ -529,7 +529,7 @@ impl RevealedSparseTrie {
let unset_branch_nibble = self
.nodes
.get(&child_path)
.map_or(false, move |node| match node {
.is_some_and(move |node| match node {
SparseNode::Leaf { key, .. } => {
// Get full path of the leaf node
child_path.extend_from_slice_unchecked(key);
@ -665,7 +665,7 @@ impl RevealedSparseTrie {
child_path.extend_from_slice_unchecked(key);
if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) {
RlpNode::word_rlp(&hash)
} else if buffers.rlp_node_stack.last().map_or(false, |e| e.0 == child_path) {
} else if buffers.rlp_node_stack.last().is_some_and(|e| e.0 == child_path) {
let (_, child) = buffers.rlp_node_stack.pop().unwrap();
self.rlp_buf.clear();
let rlp_node = ExtensionNodeRef::new(key, &child).rlp(&mut self.rlp_buf);
@ -699,7 +699,7 @@ impl RevealedSparseTrie {
.resize(buffers.branch_child_buf.len(), Default::default());
let mut added_children = false;
for (i, child_path) in buffers.branch_child_buf.iter().enumerate() {
if buffers.rlp_node_stack.last().map_or(false, |e| &e.0 == child_path) {
if buffers.rlp_node_stack.last().is_some_and(|e| &e.0 == child_path) {
let (_, child) = buffers.rlp_node_stack.pop().unwrap();
// Insert children in the resulting buffer in a normal order, because
// initially we iterated in reverse.

View File

@ -30,7 +30,7 @@ where
/// exhausted. Returns the first entry for which `comparator` returns `false` or `None`.
fn advance_while_false(&mut self, comparator: impl Fn(&K) -> bool) -> Option<(K, V)> {
let mut entry = self.entries.get(self.index);
while entry.map_or(false, |entry| comparator(&entry.0)) {
while entry.is_some_and(|entry| comparator(&entry.0)) {
self.index += 1;
entry = self.entries.get(self.index);
}

View File

@ -82,14 +82,14 @@ where
// It's an exact match, return the account from post state without looking up in the
// database.
if post_state_entry.map_or(false, |entry| entry.0 == key) {
if post_state_entry.is_some_and(|entry| entry.0 == key) {
return Ok(post_state_entry)
}
// It's not an exact match, reposition to the first greater or equal account that wasn't
// cleared.
let mut db_entry = self.cursor.seek(key)?;
while db_entry.as_ref().map_or(false, |(address, _)| self.is_account_cleared(address)) {
while db_entry.as_ref().is_some_and(|(address, _)| self.is_account_cleared(address)) {
db_entry = self.cursor.next()?;
}
@ -103,7 +103,7 @@ where
// If post state was given precedence or account was cleared, move the cursor forward.
let mut db_entry = self.cursor.seek(last_account)?;
while db_entry.as_ref().map_or(false, |(address, _)| {
while db_entry.as_ref().is_some_and(|(address, _)| {
address <= &last_account || self.is_account_cleared(address)
}) {
db_entry = self.cursor.next()?;
@ -200,14 +200,14 @@ where
let post_state_cursor =
post_state_storage.map(|s| ForwardInMemoryCursor::new(&s.non_zero_valued_slots));
let cleared_slots = post_state_storage.map(|s| &s.zero_valued_slots);
let storage_wiped = post_state_storage.map_or(false, |s| s.wiped);
let storage_wiped = post_state_storage.is_some_and(|s| s.wiped);
Self { cursor, post_state_cursor, cleared_slots, storage_wiped, last_slot: None }
}
/// Check if the slot was zeroed out in the post state.
/// The database is not checked since it already has no zero-valued slots.
fn is_slot_zero_valued(&self, slot: &B256) -> bool {
self.cleared_slots.map_or(false, |s| s.contains(slot))
self.cleared_slots.is_some_and(|s| s.contains(slot))
}
/// Find the storage entry in post state or database that's greater or equal to provided subkey.
@ -217,14 +217,14 @@ where
// If database storage was wiped or it's an exact match,
// return the storage slot from post state without looking up in the database.
if self.storage_wiped || post_state_entry.map_or(false, |entry| entry.0 == subkey) {
if self.storage_wiped || post_state_entry.is_some_and(|entry| entry.0 == subkey) {
return Ok(post_state_entry)
}
// It's not an exact match and storage was not wiped,
// reposition to the first greater or equal account.
let mut db_entry = self.cursor.seek(subkey)?;
while db_entry.as_ref().map_or(false, |entry| self.is_slot_zero_valued(&entry.0)) {
while db_entry.as_ref().is_some_and(|entry| self.is_slot_zero_valued(&entry.0)) {
db_entry = self.cursor.next()?;
}
@ -248,7 +248,7 @@ where
let mut db_entry = self.cursor.seek(last_slot)?;
while db_entry
.as_ref()
.map_or(false, |entry| entry.0 == last_slot || self.is_slot_zero_valued(&entry.0))
.is_some_and(|entry| entry.0 == last_slot || self.is_slot_zero_valued(&entry.0))
{
db_entry = self.cursor.next()?;
}

View File

@ -106,7 +106,7 @@ where
if let Some((hashed_key, value)) = self.current_hashed_entry.take() {
// If the walker's key is less than the unpacked hashed key,
// reset the checked status and continue
if self.walker.key().map_or(false, |key| key < &Nibbles::unpack(hashed_key)) {
if self.walker.key().is_some_and(|key| key < &Nibbles::unpack(hashed_key)) {
self.current_walker_key_checked = false;
continue
}

View File

@ -79,13 +79,13 @@ impl<'a, C: TrieCursor> InMemoryAccountTrieCursor<'a, C> {
exact: bool,
) -> Result<Option<(Nibbles, BranchNodeCompact)>, DatabaseError> {
let in_memory = self.in_memory_cursor.seek(&key);
if exact && in_memory.as_ref().map_or(false, |entry| entry.0 == key) {
if exact && in_memory.as_ref().is_some_and(|entry| entry.0 == key) {
return Ok(in_memory)
}
// Reposition the cursor to the first greater or equal node that wasn't removed.
let mut db_entry = self.cursor.seek(key.clone())?;
while db_entry.as_ref().map_or(false, |entry| self.removed_nodes.contains(&entry.0)) {
while db_entry.as_ref().is_some_and(|entry| self.removed_nodes.contains(&entry.0)) {
db_entry = self.cursor.next()?;
}
@ -105,7 +105,7 @@ impl<'a, C: TrieCursor> InMemoryAccountTrieCursor<'a, C> {
let mut db_entry = self.cursor.seek(last.clone())?;
while db_entry
.as_ref()
.map_or(false, |entry| entry.0 < last || self.removed_nodes.contains(&entry.0))
.is_some_and(|entry| entry.0 < last || self.removed_nodes.contains(&entry.0))
{
db_entry = self.cursor.next()?;
}
@ -184,7 +184,7 @@ impl<'a, C> InMemoryStorageTrieCursor<'a, C> {
) -> Self {
let in_memory_cursor = updates.map(|u| ForwardInMemoryCursor::new(&u.storage_nodes));
let removed_nodes = updates.map(|u| &u.removed_nodes);
let storage_trie_cleared = updates.map_or(false, |u| u.is_deleted);
let storage_trie_cleared = updates.is_some_and(|u| u.is_deleted);
Self {
hashed_address,
cursor,
@ -204,16 +204,17 @@ impl<C: TrieCursor> InMemoryStorageTrieCursor<'_, C> {
) -> Result<Option<(Nibbles, BranchNodeCompact)>, DatabaseError> {
let in_memory = self.in_memory_cursor.as_mut().and_then(|c| c.seek(&key));
if self.storage_trie_cleared ||
(exact && in_memory.as_ref().map_or(false, |entry| entry.0 == key))
(exact && in_memory.as_ref().is_some_and(|entry| entry.0 == key))
{
return Ok(in_memory.filter(|(nibbles, _)| !exact || nibbles == &key))
}
// Reposition the cursor to the first greater or equal node that wasn't removed.
let mut db_entry = self.cursor.seek(key.clone())?;
while db_entry.as_ref().map_or(false, |entry| {
self.removed_nodes.as_ref().map_or(false, |r| r.contains(&entry.0))
}) {
while db_entry
.as_ref()
.is_some_and(|entry| self.removed_nodes.as_ref().is_some_and(|r| r.contains(&entry.0)))
{
db_entry = self.cursor.next()?;
}
@ -234,8 +235,8 @@ impl<C: TrieCursor> InMemoryStorageTrieCursor<'_, C> {
// Reposition the cursor to the first greater or equal node that wasn't removed.
let mut db_entry = self.cursor.seek(last.clone())?;
while db_entry.as_ref().map_or(false, |entry| {
entry.0 < last || self.removed_nodes.as_ref().map_or(false, |r| r.contains(&entry.0))
while db_entry.as_ref().is_some_and(|entry| {
entry.0 < last || self.removed_nodes.as_ref().is_some_and(|r| r.contains(&entry.0))
}) {
db_entry = self.cursor.next()?;
}

View File

@ -89,7 +89,7 @@ impl CursorSubNode {
/// Returns `true` if the current nibble has a root hash.
pub fn hash_flag(&self) -> bool {
self.node.as_ref().map_or(false, |node| match self.nibble {
self.node.as_ref().is_some_and(|node| match self.nibble {
// This guy has it
-1 => node.root_hash.is_some(),
// Or get it from the children

View File

@ -88,7 +88,7 @@ impl<C> TrieWalker<C> {
/// Indicates whether the children of the current node are present in the trie.
pub fn children_are_in_trie(&self) -> bool {
self.stack.last().map_or(false, |n| n.tree_flag())
self.stack.last().is_some_and(|n| n.tree_flag())
}
/// Returns the next unprocessed key in the trie.
@ -112,7 +112,7 @@ impl<C> TrieWalker<C> {
self.can_skip_current_node = self
.stack
.last()
.map_or(false, |node| !self.changes.contains(node.full_key()) && node.hash_flag());
.is_some_and(|node| !self.changes.contains(node.full_key()) && node.hash_flag());
}
}

View File

@ -292,7 +292,7 @@ where
let mut keys = trie_nodes.keys().peekable();
let mut ignored = HashSet::<Nibbles>::default();
while let Some(key) = keys.next() {
if keys.peek().map_or(false, |next| next.starts_with(key)) {
if keys.peek().is_some_and(|next| next.starts_with(key)) {
ignored.insert(key.clone());
}
}
@ -306,7 +306,7 @@ where
if hash_builder.key.starts_with(&parent_branch_path) ||
trie_nodes
.peek()
.map_or(false, |next| next.0.starts_with(&parent_branch_path))
.is_some_and(|next| next.0.starts_with(&parent_branch_path))
{
hash_builder.add_branch(path, branch_hash, false);
} else {