clippy: add if_not_else clippy lint (#10524)

Co-authored-by: Matthias Seitz <matthias.seitz@outlook.de>
This commit is contained in:
Thomas Coratger
2024-08-26 03:58:06 -07:00
committed by GitHub
parent 1b1e835fb1
commit 709e7b73ce
21 changed files with 133 additions and 134 deletions

View File

@ -175,6 +175,7 @@ equatable_if_let = "warn"
explicit_into_iter_loop = "warn"
explicit_iter_loop = "warn"
flat_map_option = "warn"
if_not_else = "warn"
imprecise_flops = "warn"
iter_on_empty_collections = "warn"
iter_on_single_items = "warn"

View File

@ -68,10 +68,10 @@ impl EngineHooksController {
"Polled running hook with db write access"
);
if !result.event.is_finished() {
self.active_db_write_hook = Some(hook);
} else {
if result.event.is_finished() {
self.hooks.push_back(hook);
} else {
self.active_db_write_hook = Some(hook);
}
return Poll::Ready(Ok(result))

View File

@ -652,13 +652,7 @@ where
return Ok(TreeOutcome::new(status))
}
let status = if !self.backfill_sync_state.is_idle() {
if let Err(error) = self.buffer_block_without_senders(block) {
self.on_insert_block_error(error)?
} else {
PayloadStatus::from_status(PayloadStatusEnum::Syncing)
}
} else {
let status = if self.backfill_sync_state.is_idle() {
let mut latest_valid_hash = None;
let num_hash = block.num_hash();
match self.insert_block_without_senders(block) {
@ -684,6 +678,10 @@ where
}
Err(error) => self.on_insert_block_error(error)?,
}
} else if let Err(error) = self.buffer_block_without_senders(block) {
self.on_insert_block_error(error)?
} else {
PayloadStatus::from_status(PayloadStatusEnum::Syncing)
};
let mut outcome = TreeOutcome::new(status);
@ -862,12 +860,12 @@ where
fn advance_persistence(&mut self) -> Result<(), TryRecvError> {
if self.should_persist() && !self.persistence_state.in_progress() {
let blocks_to_persist = self.get_canonical_blocks_to_persist();
if !blocks_to_persist.is_empty() {
if blocks_to_persist.is_empty() {
debug!(target: "engine", "Returned empty set of blocks to persist");
} else {
let (tx, rx) = oneshot::channel();
let _ = self.persistence.save_blocks(blocks_to_persist, tx);
self.persistence_state.start(rx);
} else {
debug!(target: "engine", "Returned empty set of blocks to persist");
}
}

View File

@ -1024,13 +1024,7 @@ where
entry.get_mut().insert(peer_id);
}
Entry::Vacant(entry) => {
if !self.bad_imports.contains(tx.hash()) {
// this is a new transaction that should be imported into the pool
let pool_transaction = Pool::Transaction::from_pooled(tx);
new_txs.push(pool_transaction);
entry.insert(HashSet::from([peer_id]));
} else {
if self.bad_imports.contains(tx.hash()) {
trace!(target: "net::tx",
peer_id=format!("{peer_id:#}"),
hash=%tx.hash(),
@ -1038,6 +1032,12 @@ where
"received a known bad transaction from peer"
);
has_bad_transactions = true;
} else {
// this is a new transaction that should be imported into the pool
let pool_transaction = Pool::Transaction::from_pooled(tx);
new_txs.push(pool_transaction);
entry.insert(HashSet::from([peer_id]));
}
}
}

View File

@ -88,10 +88,10 @@ pub trait PartiallyFilterMessage {
let partially_valid_data = msg.dedup();
(
if partially_valid_data.len() != original_len {
FilterOutcome::ReportPeer
} else {
if partially_valid_data.len() == original_len {
FilterOutcome::Ok
} else {
FilterOutcome::ReportPeer
},
partially_valid_data,
)

View File

@ -184,12 +184,12 @@ where
let (peer, maybe_header) =
maybe_header.map(|h| h.map(|h| h.seal_slow())).split();
if let Some(header) = maybe_header {
if header.hash() != this.hash {
if header.hash() == this.hash {
this.header = Some(header);
} else {
debug!(target: "downloaders", expected=?this.hash, received=?header.hash(), "Received wrong header");
// received a different header than requested
this.client.report_bad_message(peer)
} else {
this.header = Some(header);
}
}
}
@ -491,10 +491,7 @@ where
headers_falling.sort_unstable_by_key(|h| Reverse(h.number));
// check the starting hash
if headers_falling[0].hash() != self.start_hash {
// received a different header than requested
self.client.report_bad_message(peer);
} else {
if headers_falling[0].hash() == self.start_hash {
let headers_rising = headers_falling.iter().rev().cloned().collect::<Vec<_>>();
// check if the downloaded headers are valid
if let Err(err) = self.consensus.validate_header_range(&headers_rising) {
@ -516,6 +513,9 @@ where
// set the headers response
self.headers = Some(headers_falling);
} else {
// received a different header than requested
self.client.report_bad_message(peer);
}
}
}

View File

@ -1,4 +1,4 @@
//! Loads and formats OP transaction RPC response.
//! Loads and formats OP transaction RPC response.
use std::sync::Arc;
@ -93,7 +93,9 @@ where
) -> Result<OptimismTxMeta, <Self as EthApiTypes>::Error> {
let Some(l1_block_info) = l1_block_info else { return Ok(OptimismTxMeta::default()) };
let (l1_fee, l1_data_gas) = if !tx.is_deposit() {
let (l1_fee, l1_data_gas) = if tx.is_deposit() {
(None, None)
} else {
let envelope_buf = tx.envelope_encoded();
let inner_l1_fee = l1_block_info
@ -106,8 +108,6 @@ where
Some(inner_l1_fee.saturating_to::<u128>()),
Some(inner_l1_data_gas.saturating_to::<u128>()),
)
} else {
(None, None)
};
Ok(OptimismTxMeta::new(Some(l1_block_info), l1_fee, l1_data_gas))

View File

@ -1459,7 +1459,14 @@ impl Decodable for TransactionSigned {
let remaining_len = buf.len();
// if the transaction is encoded as a string then it is a typed transaction
if !header.list {
if header.list {
let tx = Self::decode_rlp_legacy_transaction(&mut original_encoding)?;
// advance the buffer based on how far `decode_rlp_legacy_transaction` advanced the
// buffer
*buf = original_encoding;
Ok(tx)
} else {
let tx = Self::decode_enveloped_typed_transaction(buf)?;
let bytes_consumed = remaining_len - buf.len();
@ -1470,13 +1477,6 @@ impl Decodable for TransactionSigned {
return Err(RlpError::UnexpectedLength)
}
Ok(tx)
} else {
let tx = Self::decode_rlp_legacy_transaction(&mut original_encoding)?;
// advance the buffer based on how far `decode_rlp_legacy_transaction` advanced the
// buffer
*buf = original_encoding;
Ok(tx)
}
}
@ -1493,7 +1493,7 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned {
if let Transaction::Eip4844(ref mut tx_eip_4844) = transaction {
tx_eip_4844.placeholder =
if tx_eip_4844.to != Address::default() { Some(()) } else { None };
if tx_eip_4844.to == Address::default() { None } else { Some(()) };
}
#[cfg(feature = "optimism")]

View File

@ -107,54 +107,54 @@ where
// If there were blocks less than or equal to the target one
// (so the shard has changed), update the shard.
if blocks.len() as usize != higher_blocks.len() {
// If there will be no more blocks in the shard after pruning blocks below target
// block, we need to remove it, as empty shards are not allowed.
if higher_blocks.is_empty() {
if key.as_ref().highest_block_number == u64::MAX {
let prev_row = cursor
.prev()?
.map(|(k, v)| Result::<_, DatabaseError>::Ok((k.key()?, v)))
.transpose()?;
match prev_row {
// If current shard is the last shard for the sharded key that
// has previous shards, replace it with the previous shard.
Some((prev_key, prev_value)) if key_matches(&prev_key, &key) => {
cursor.delete_current()?;
// Upsert will replace the last shard for this sharded key with
// the previous value.
cursor.upsert(RawKey::new(key), prev_value)?;
Ok(PruneShardOutcome::Updated)
}
// If there's no previous shard for this sharded key,
// just delete last shard completely.
_ => {
// If we successfully moved the cursor to a previous row,
// jump to the original last shard.
if prev_row.is_some() {
cursor.next()?;
}
// Delete shard.
cursor.delete_current()?;
Ok(PruneShardOutcome::Deleted)
if blocks.len() as usize == higher_blocks.len() {
return Ok(PruneShardOutcome::Unchanged);
}
// If there will be no more blocks in the shard after pruning blocks below target
// block, we need to remove it, as empty shards are not allowed.
if higher_blocks.is_empty() {
if key.as_ref().highest_block_number == u64::MAX {
let prev_row = cursor
.prev()?
.map(|(k, v)| Result::<_, DatabaseError>::Ok((k.key()?, v)))
.transpose()?;
match prev_row {
// If current shard is the last shard for the sharded key that
// has previous shards, replace it with the previous shard.
Some((prev_key, prev_value)) if key_matches(&prev_key, &key) => {
cursor.delete_current()?;
// Upsert will replace the last shard for this sharded key with
// the previous value.
cursor.upsert(RawKey::new(key), prev_value)?;
Ok(PruneShardOutcome::Updated)
}
// If there's no previous shard for this sharded key,
// just delete last shard completely.
_ => {
// If we successfully moved the cursor to a previous row,
// jump to the original last shard.
if prev_row.is_some() {
cursor.next()?;
}
// Delete shard.
cursor.delete_current()?;
Ok(PruneShardOutcome::Deleted)
}
}
// If current shard is not the last shard for this sharded key,
// just delete it.
else {
cursor.delete_current()?;
Ok(PruneShardOutcome::Deleted)
}
} else {
cursor.upsert(
RawKey::new(key),
RawValue::new(BlockNumberList::new_pre_sorted(higher_blocks)),
)?;
Ok(PruneShardOutcome::Updated)
}
// If current shard is not the last shard for this sharded key,
// just delete it.
else {
cursor.delete_current()?;
Ok(PruneShardOutcome::Deleted)
}
} else {
Ok(PruneShardOutcome::Unchanged)
cursor.upsert(
RawKey::new(key),
RawValue::new(BlockNumberList::new_pre_sorted(higher_blocks)),
)?;
Ok(PruneShardOutcome::Updated)
}
}
}

View File

@ -119,7 +119,9 @@ where
'inner: loop {
// drain all calls that are ready and put them in the output item queue
let drained = if !this.pending_calls.is_empty() {
let drained = if this.pending_calls.is_empty() {
false
} else {
if let Poll::Ready(Some(res)) = this.pending_calls.as_mut().poll_next(cx) {
let item = match res {
Ok(Some(resp)) => resp,
@ -130,8 +132,6 @@ where
continue 'outer;
}
true
} else {
false
};
// read from the stream

View File

@ -176,13 +176,13 @@ where
}
// sort results then take the configured percentile result
let mut price = if !results.is_empty() {
let mut price = if results.is_empty() {
inner.last_price.price
} else {
results.sort_unstable();
*results.get((results.len() - 1) * self.oracle_config.percentile as usize / 100).expect(
"gas price index is a percent of nonzero array length, so a value always exists",
)
} else {
inner.last_price.price
};
// constrain to the max price

View File

@ -507,12 +507,12 @@ mod tests {
async fn after_execution(&self, headers: Self::Seed) -> Result<(), TestRunnerError> {
self.client.extend(headers.iter().map(|h| h.clone().unseal())).await;
let tip = if !headers.is_empty() {
headers.last().unwrap().hash()
} else {
let tip = if headers.is_empty() {
let tip = random_header(&mut generators::rng(), 0, None);
self.db.insert_headers(std::iter::once(&tip))?;
tip.hash()
} else {
headers.last().unwrap().hash()
};
self.send_tip(tip);
Ok(())

View File

@ -316,7 +316,9 @@ impl<DB: Database> Stage<DB> for MerkleStage {
}
// Unwind trie only if there are transitions
if !range.is_empty() {
if range.is_empty() {
info!(target: "sync::stages::merkle::unwind", "Nothing to unwind");
} else {
let (block_root, updates) = StateRoot::incremental_root_with_updates(tx, range)
.map_err(|e| StageError::Fatal(Box::new(e)))?;
@ -330,8 +332,6 @@ impl<DB: Database> Stage<DB> for MerkleStage {
provider.write_trie_updates(&updates)?;
// TODO(alexey): update entities checkpoint
} else {
info!(target: "sync::stages::merkle::unwind", "Nothing to unwind");
}
Ok(UnwindOutput { checkpoint: StageCheckpoint::new(input.unwind_to) })

View File

@ -57,10 +57,10 @@ impl<'a> EnumHandler<'a> {
FieldTypes::EnumUnnamedField((next_ftype, use_alt_impl)) => {
// This variant is of the type `EnumVariant(UnnamedField)`
let field_type = format_ident!("{next_ftype}");
let from_compact_ident = if !use_alt_impl {
format_ident!("from_compact")
} else {
let from_compact_ident = if *use_alt_impl {
format_ident!("specialized_from_compact")
} else {
format_ident!("from_compact")
};
// Unnamed type
@ -98,10 +98,10 @@ impl<'a> EnumHandler<'a> {
if let Some(next_field) = self.fields_iterator.peek() {
match next_field {
FieldTypes::EnumUnnamedField((_, use_alt_impl)) => {
let to_compact_ident = if !use_alt_impl {
format_ident!("to_compact")
} else {
let to_compact_ident = if *use_alt_impl {
format_ident!("specialized_to_compact")
} else {
format_ident!("to_compact")
};
// Unnamed type

View File

@ -155,15 +155,15 @@ fn build_struct_field_flags(
/// Returns the total number of bytes used by the flags struct and how many unused bits.
fn pad_flag_struct(total_bits: u8, field_flags: &mut Vec<TokenStream2>) -> (u8, u8) {
let remaining = 8 - total_bits % 8;
if remaining != 8 {
if remaining == 8 {
(total_bits / 8, 0)
} else {
let bsize = format_ident!("B{remaining}");
field_flags.push(quote! {
#[skip]
unused: #bsize ,
});
((total_bits + remaining) / 8, remaining)
} else {
(total_bits / 8, 0)
}
}

View File

@ -46,10 +46,10 @@ impl<'a> StructHandler<'a> {
fn to(&mut self, field_descriptor: &StructFieldDescriptor) {
let (name, ftype, is_compact, use_alt_impl) = field_descriptor;
let to_compact_ident = if !use_alt_impl {
format_ident!("to_compact")
} else {
let to_compact_ident = if *use_alt_impl {
format_ident!("specialized_to_compact")
} else {
format_ident!("to_compact")
};
// Should only happen on wrapper structs like `Struct(pub Field)`
@ -108,10 +108,10 @@ impl<'a> StructHandler<'a> {
(format_ident!("{name}"), format_ident!("{name}_len"))
};
let from_compact_ident = if !use_alt_impl {
format_ident!("from_compact")
} else {
let from_compact_ident = if *use_alt_impl {
format_ident!("specialized_from_compact")
} else {
format_ident!("from_compact")
};
// ! Be careful before changing the following assert ! Especially if the type does not

View File

@ -336,7 +336,12 @@ pub fn init_from_state_dump<DB: Database>(
// compute and compare state root. this advances the stage checkpoints.
let computed_state_root = compute_state_root(&provider_rw)?;
if computed_state_root != expected_state_root {
if computed_state_root == expected_state_root {
info!(target: "reth::cli",
?computed_state_root,
"Computed state root matches state root in state dump"
);
} else {
error!(target: "reth::cli",
?computed_state_root,
?expected_state_root,
@ -344,11 +349,6 @@ pub fn init_from_state_dump<DB: Database>(
);
Err(InitDatabaseError::StateRootMismatch { expected_state_root, computed_state_root })?
} else {
info!(target: "reth::cli",
?computed_state_root,
"Computed state root matches state root in state dump"
);
}
// insert sync stages for stages that require state

View File

@ -101,10 +101,10 @@ where
assert_ne!(data_ptr, data_val.iov_base);
let key_out = {
// MDBX wrote in new key
if key_ptr != key_val.iov_base {
Some(Key::decode_val::<K>(txn, key_val)?)
} else {
if key_ptr == key_val.iov_base {
None
} else {
Some(Key::decode_val::<K>(txn, key_val)?)
}
};
let data_out = Value::decode_val::<K>(txn, data_val)?;

View File

@ -645,10 +645,10 @@ impl<TX: DbTx> DatabaseProvider<TX> {
let recovered = match (tx, sender) {
(Some((tx_id, tx)), Some((sender_tx_id, sender))) => {
if tx_id != sender_tx_id {
Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id })
} else {
if tx_id == sender_tx_id {
Ok(TransactionSignedEcRecovered::from_signed_transaction(tx, sender))
} else {
Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id })
}
}
(Some((tx_id, _)), _) | (_, Some((tx_id, _))) => {
@ -1291,10 +1291,10 @@ impl<TX: DbTxMut + DbTx> DatabaseProvider<TX> {
let recovered = match (tx, sender) {
(Some((tx_id, tx)), Some((sender_tx_id, sender))) => {
if tx_id != sender_tx_id {
Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id })
} else {
if tx_id == sender_tx_id {
Ok(TransactionSignedEcRecovered::from_signed_transaction(tx, sender))
} else {
Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id })
}
}
(Some((tx_id, _)), _) | (_, Some((tx_id, _))) => {

View File

@ -304,13 +304,13 @@ where
) -> Result<(), UnifiedStorageWriterError> {
match &self.static_file {
Some(writer) => {
if writer.user_header().segment() != segment {
if writer.user_header().segment() == segment {
Ok(())
} else {
Err(UnifiedStorageWriterError::IncorrectStaticFileWriter(
writer.user_header().segment(),
segment,
))
} else {
Ok(())
}
}
None => Err(UnifiedStorageWriterError::MissingStaticFileWriter),

View File

@ -275,14 +275,14 @@ where
let mut old_entries: Vec<_> = new_entries
.into_iter()
.filter_map(|entry| {
let old = if !entry.value.is_zero() {
storage.insert(entry.key, entry.value)
} else {
let old = if entry.value.is_zero() {
let old = storage.remove(&entry.key);
if matches!(old, Some(U256::ZERO)) {
return None
}
old
} else {
storage.insert(entry.key, entry.value)
};
Some(StorageEntry { value: old.unwrap_or(U256::ZERO), ..entry })
})