mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
clippy: add if_not_else clippy lint (#10524)
Co-authored-by: Matthias Seitz <matthias.seitz@outlook.de>
This commit is contained in:
@ -175,6 +175,7 @@ equatable_if_let = "warn"
|
|||||||
explicit_into_iter_loop = "warn"
|
explicit_into_iter_loop = "warn"
|
||||||
explicit_iter_loop = "warn"
|
explicit_iter_loop = "warn"
|
||||||
flat_map_option = "warn"
|
flat_map_option = "warn"
|
||||||
|
if_not_else = "warn"
|
||||||
imprecise_flops = "warn"
|
imprecise_flops = "warn"
|
||||||
iter_on_empty_collections = "warn"
|
iter_on_empty_collections = "warn"
|
||||||
iter_on_single_items = "warn"
|
iter_on_single_items = "warn"
|
||||||
|
|||||||
@ -68,10 +68,10 @@ impl EngineHooksController {
|
|||||||
"Polled running hook with db write access"
|
"Polled running hook with db write access"
|
||||||
);
|
);
|
||||||
|
|
||||||
if !result.event.is_finished() {
|
if result.event.is_finished() {
|
||||||
self.active_db_write_hook = Some(hook);
|
|
||||||
} else {
|
|
||||||
self.hooks.push_back(hook);
|
self.hooks.push_back(hook);
|
||||||
|
} else {
|
||||||
|
self.active_db_write_hook = Some(hook);
|
||||||
}
|
}
|
||||||
|
|
||||||
return Poll::Ready(Ok(result))
|
return Poll::Ready(Ok(result))
|
||||||
|
|||||||
@ -652,13 +652,7 @@ where
|
|||||||
return Ok(TreeOutcome::new(status))
|
return Ok(TreeOutcome::new(status))
|
||||||
}
|
}
|
||||||
|
|
||||||
let status = if !self.backfill_sync_state.is_idle() {
|
let status = if self.backfill_sync_state.is_idle() {
|
||||||
if let Err(error) = self.buffer_block_without_senders(block) {
|
|
||||||
self.on_insert_block_error(error)?
|
|
||||||
} else {
|
|
||||||
PayloadStatus::from_status(PayloadStatusEnum::Syncing)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
let mut latest_valid_hash = None;
|
let mut latest_valid_hash = None;
|
||||||
let num_hash = block.num_hash();
|
let num_hash = block.num_hash();
|
||||||
match self.insert_block_without_senders(block) {
|
match self.insert_block_without_senders(block) {
|
||||||
@ -684,6 +678,10 @@ where
|
|||||||
}
|
}
|
||||||
Err(error) => self.on_insert_block_error(error)?,
|
Err(error) => self.on_insert_block_error(error)?,
|
||||||
}
|
}
|
||||||
|
} else if let Err(error) = self.buffer_block_without_senders(block) {
|
||||||
|
self.on_insert_block_error(error)?
|
||||||
|
} else {
|
||||||
|
PayloadStatus::from_status(PayloadStatusEnum::Syncing)
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut outcome = TreeOutcome::new(status);
|
let mut outcome = TreeOutcome::new(status);
|
||||||
@ -862,12 +860,12 @@ where
|
|||||||
fn advance_persistence(&mut self) -> Result<(), TryRecvError> {
|
fn advance_persistence(&mut self) -> Result<(), TryRecvError> {
|
||||||
if self.should_persist() && !self.persistence_state.in_progress() {
|
if self.should_persist() && !self.persistence_state.in_progress() {
|
||||||
let blocks_to_persist = self.get_canonical_blocks_to_persist();
|
let blocks_to_persist = self.get_canonical_blocks_to_persist();
|
||||||
if !blocks_to_persist.is_empty() {
|
if blocks_to_persist.is_empty() {
|
||||||
|
debug!(target: "engine", "Returned empty set of blocks to persist");
|
||||||
|
} else {
|
||||||
let (tx, rx) = oneshot::channel();
|
let (tx, rx) = oneshot::channel();
|
||||||
let _ = self.persistence.save_blocks(blocks_to_persist, tx);
|
let _ = self.persistence.save_blocks(blocks_to_persist, tx);
|
||||||
self.persistence_state.start(rx);
|
self.persistence_state.start(rx);
|
||||||
} else {
|
|
||||||
debug!(target: "engine", "Returned empty set of blocks to persist");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1024,13 +1024,7 @@ where
|
|||||||
entry.get_mut().insert(peer_id);
|
entry.get_mut().insert(peer_id);
|
||||||
}
|
}
|
||||||
Entry::Vacant(entry) => {
|
Entry::Vacant(entry) => {
|
||||||
if !self.bad_imports.contains(tx.hash()) {
|
if self.bad_imports.contains(tx.hash()) {
|
||||||
// this is a new transaction that should be imported into the pool
|
|
||||||
let pool_transaction = Pool::Transaction::from_pooled(tx);
|
|
||||||
new_txs.push(pool_transaction);
|
|
||||||
|
|
||||||
entry.insert(HashSet::from([peer_id]));
|
|
||||||
} else {
|
|
||||||
trace!(target: "net::tx",
|
trace!(target: "net::tx",
|
||||||
peer_id=format!("{peer_id:#}"),
|
peer_id=format!("{peer_id:#}"),
|
||||||
hash=%tx.hash(),
|
hash=%tx.hash(),
|
||||||
@ -1038,6 +1032,12 @@ where
|
|||||||
"received a known bad transaction from peer"
|
"received a known bad transaction from peer"
|
||||||
);
|
);
|
||||||
has_bad_transactions = true;
|
has_bad_transactions = true;
|
||||||
|
} else {
|
||||||
|
// this is a new transaction that should be imported into the pool
|
||||||
|
let pool_transaction = Pool::Transaction::from_pooled(tx);
|
||||||
|
new_txs.push(pool_transaction);
|
||||||
|
|
||||||
|
entry.insert(HashSet::from([peer_id]));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -88,10 +88,10 @@ pub trait PartiallyFilterMessage {
|
|||||||
let partially_valid_data = msg.dedup();
|
let partially_valid_data = msg.dedup();
|
||||||
|
|
||||||
(
|
(
|
||||||
if partially_valid_data.len() != original_len {
|
if partially_valid_data.len() == original_len {
|
||||||
FilterOutcome::ReportPeer
|
|
||||||
} else {
|
|
||||||
FilterOutcome::Ok
|
FilterOutcome::Ok
|
||||||
|
} else {
|
||||||
|
FilterOutcome::ReportPeer
|
||||||
},
|
},
|
||||||
partially_valid_data,
|
partially_valid_data,
|
||||||
)
|
)
|
||||||
|
|||||||
@ -184,12 +184,12 @@ where
|
|||||||
let (peer, maybe_header) =
|
let (peer, maybe_header) =
|
||||||
maybe_header.map(|h| h.map(|h| h.seal_slow())).split();
|
maybe_header.map(|h| h.map(|h| h.seal_slow())).split();
|
||||||
if let Some(header) = maybe_header {
|
if let Some(header) = maybe_header {
|
||||||
if header.hash() != this.hash {
|
if header.hash() == this.hash {
|
||||||
|
this.header = Some(header);
|
||||||
|
} else {
|
||||||
debug!(target: "downloaders", expected=?this.hash, received=?header.hash(), "Received wrong header");
|
debug!(target: "downloaders", expected=?this.hash, received=?header.hash(), "Received wrong header");
|
||||||
// received a different header than requested
|
// received a different header than requested
|
||||||
this.client.report_bad_message(peer)
|
this.client.report_bad_message(peer)
|
||||||
} else {
|
|
||||||
this.header = Some(header);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -491,10 +491,7 @@ where
|
|||||||
headers_falling.sort_unstable_by_key(|h| Reverse(h.number));
|
headers_falling.sort_unstable_by_key(|h| Reverse(h.number));
|
||||||
|
|
||||||
// check the starting hash
|
// check the starting hash
|
||||||
if headers_falling[0].hash() != self.start_hash {
|
if headers_falling[0].hash() == self.start_hash {
|
||||||
// received a different header than requested
|
|
||||||
self.client.report_bad_message(peer);
|
|
||||||
} else {
|
|
||||||
let headers_rising = headers_falling.iter().rev().cloned().collect::<Vec<_>>();
|
let headers_rising = headers_falling.iter().rev().cloned().collect::<Vec<_>>();
|
||||||
// check if the downloaded headers are valid
|
// check if the downloaded headers are valid
|
||||||
if let Err(err) = self.consensus.validate_header_range(&headers_rising) {
|
if let Err(err) = self.consensus.validate_header_range(&headers_rising) {
|
||||||
@ -516,6 +513,9 @@ where
|
|||||||
|
|
||||||
// set the headers response
|
// set the headers response
|
||||||
self.headers = Some(headers_falling);
|
self.headers = Some(headers_falling);
|
||||||
|
} else {
|
||||||
|
// received a different header than requested
|
||||||
|
self.client.report_bad_message(peer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
//! Loads and formats OP transaction RPC response.
|
//! Loads and formats OP transaction RPC response.
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
@ -93,7 +93,9 @@ where
|
|||||||
) -> Result<OptimismTxMeta, <Self as EthApiTypes>::Error> {
|
) -> Result<OptimismTxMeta, <Self as EthApiTypes>::Error> {
|
||||||
let Some(l1_block_info) = l1_block_info else { return Ok(OptimismTxMeta::default()) };
|
let Some(l1_block_info) = l1_block_info else { return Ok(OptimismTxMeta::default()) };
|
||||||
|
|
||||||
let (l1_fee, l1_data_gas) = if !tx.is_deposit() {
|
let (l1_fee, l1_data_gas) = if tx.is_deposit() {
|
||||||
|
(None, None)
|
||||||
|
} else {
|
||||||
let envelope_buf = tx.envelope_encoded();
|
let envelope_buf = tx.envelope_encoded();
|
||||||
|
|
||||||
let inner_l1_fee = l1_block_info
|
let inner_l1_fee = l1_block_info
|
||||||
@ -106,8 +108,6 @@ where
|
|||||||
Some(inner_l1_fee.saturating_to::<u128>()),
|
Some(inner_l1_fee.saturating_to::<u128>()),
|
||||||
Some(inner_l1_data_gas.saturating_to::<u128>()),
|
Some(inner_l1_data_gas.saturating_to::<u128>()),
|
||||||
)
|
)
|
||||||
} else {
|
|
||||||
(None, None)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(OptimismTxMeta::new(Some(l1_block_info), l1_fee, l1_data_gas))
|
Ok(OptimismTxMeta::new(Some(l1_block_info), l1_fee, l1_data_gas))
|
||||||
|
|||||||
@ -1459,7 +1459,14 @@ impl Decodable for TransactionSigned {
|
|||||||
let remaining_len = buf.len();
|
let remaining_len = buf.len();
|
||||||
|
|
||||||
// if the transaction is encoded as a string then it is a typed transaction
|
// if the transaction is encoded as a string then it is a typed transaction
|
||||||
if !header.list {
|
if header.list {
|
||||||
|
let tx = Self::decode_rlp_legacy_transaction(&mut original_encoding)?;
|
||||||
|
|
||||||
|
// advance the buffer based on how far `decode_rlp_legacy_transaction` advanced the
|
||||||
|
// buffer
|
||||||
|
*buf = original_encoding;
|
||||||
|
Ok(tx)
|
||||||
|
} else {
|
||||||
let tx = Self::decode_enveloped_typed_transaction(buf)?;
|
let tx = Self::decode_enveloped_typed_transaction(buf)?;
|
||||||
|
|
||||||
let bytes_consumed = remaining_len - buf.len();
|
let bytes_consumed = remaining_len - buf.len();
|
||||||
@ -1470,13 +1477,6 @@ impl Decodable for TransactionSigned {
|
|||||||
return Err(RlpError::UnexpectedLength)
|
return Err(RlpError::UnexpectedLength)
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(tx)
|
|
||||||
} else {
|
|
||||||
let tx = Self::decode_rlp_legacy_transaction(&mut original_encoding)?;
|
|
||||||
|
|
||||||
// advance the buffer based on how far `decode_rlp_legacy_transaction` advanced the
|
|
||||||
// buffer
|
|
||||||
*buf = original_encoding;
|
|
||||||
Ok(tx)
|
Ok(tx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1493,7 +1493,7 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned {
|
|||||||
|
|
||||||
if let Transaction::Eip4844(ref mut tx_eip_4844) = transaction {
|
if let Transaction::Eip4844(ref mut tx_eip_4844) = transaction {
|
||||||
tx_eip_4844.placeholder =
|
tx_eip_4844.placeholder =
|
||||||
if tx_eip_4844.to != Address::default() { Some(()) } else { None };
|
if tx_eip_4844.to == Address::default() { None } else { Some(()) };
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "optimism")]
|
#[cfg(feature = "optimism")]
|
||||||
|
|||||||
@ -107,54 +107,54 @@ where
|
|||||||
|
|
||||||
// If there were blocks less than or equal to the target one
|
// If there were blocks less than or equal to the target one
|
||||||
// (so the shard has changed), update the shard.
|
// (so the shard has changed), update the shard.
|
||||||
if blocks.len() as usize != higher_blocks.len() {
|
if blocks.len() as usize == higher_blocks.len() {
|
||||||
// If there will be no more blocks in the shard after pruning blocks below target
|
return Ok(PruneShardOutcome::Unchanged);
|
||||||
// block, we need to remove it, as empty shards are not allowed.
|
}
|
||||||
if higher_blocks.is_empty() {
|
|
||||||
if key.as_ref().highest_block_number == u64::MAX {
|
// If there will be no more blocks in the shard after pruning blocks below target
|
||||||
let prev_row = cursor
|
// block, we need to remove it, as empty shards are not allowed.
|
||||||
.prev()?
|
if higher_blocks.is_empty() {
|
||||||
.map(|(k, v)| Result::<_, DatabaseError>::Ok((k.key()?, v)))
|
if key.as_ref().highest_block_number == u64::MAX {
|
||||||
.transpose()?;
|
let prev_row = cursor
|
||||||
match prev_row {
|
.prev()?
|
||||||
// If current shard is the last shard for the sharded key that
|
.map(|(k, v)| Result::<_, DatabaseError>::Ok((k.key()?, v)))
|
||||||
// has previous shards, replace it with the previous shard.
|
.transpose()?;
|
||||||
Some((prev_key, prev_value)) if key_matches(&prev_key, &key) => {
|
match prev_row {
|
||||||
cursor.delete_current()?;
|
// If current shard is the last shard for the sharded key that
|
||||||
// Upsert will replace the last shard for this sharded key with
|
// has previous shards, replace it with the previous shard.
|
||||||
// the previous value.
|
Some((prev_key, prev_value)) if key_matches(&prev_key, &key) => {
|
||||||
cursor.upsert(RawKey::new(key), prev_value)?;
|
cursor.delete_current()?;
|
||||||
Ok(PruneShardOutcome::Updated)
|
// Upsert will replace the last shard for this sharded key with
|
||||||
}
|
// the previous value.
|
||||||
// If there's no previous shard for this sharded key,
|
cursor.upsert(RawKey::new(key), prev_value)?;
|
||||||
// just delete last shard completely.
|
Ok(PruneShardOutcome::Updated)
|
||||||
_ => {
|
}
|
||||||
// If we successfully moved the cursor to a previous row,
|
// If there's no previous shard for this sharded key,
|
||||||
// jump to the original last shard.
|
// just delete last shard completely.
|
||||||
if prev_row.is_some() {
|
_ => {
|
||||||
cursor.next()?;
|
// If we successfully moved the cursor to a previous row,
|
||||||
}
|
// jump to the original last shard.
|
||||||
// Delete shard.
|
if prev_row.is_some() {
|
||||||
cursor.delete_current()?;
|
cursor.next()?;
|
||||||
Ok(PruneShardOutcome::Deleted)
|
|
||||||
}
|
}
|
||||||
|
// Delete shard.
|
||||||
|
cursor.delete_current()?;
|
||||||
|
Ok(PruneShardOutcome::Deleted)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// If current shard is not the last shard for this sharded key,
|
}
|
||||||
// just delete it.
|
// If current shard is not the last shard for this sharded key,
|
||||||
else {
|
// just delete it.
|
||||||
cursor.delete_current()?;
|
else {
|
||||||
Ok(PruneShardOutcome::Deleted)
|
cursor.delete_current()?;
|
||||||
}
|
Ok(PruneShardOutcome::Deleted)
|
||||||
} else {
|
|
||||||
cursor.upsert(
|
|
||||||
RawKey::new(key),
|
|
||||||
RawValue::new(BlockNumberList::new_pre_sorted(higher_blocks)),
|
|
||||||
)?;
|
|
||||||
Ok(PruneShardOutcome::Updated)
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Ok(PruneShardOutcome::Unchanged)
|
cursor.upsert(
|
||||||
|
RawKey::new(key),
|
||||||
|
RawValue::new(BlockNumberList::new_pre_sorted(higher_blocks)),
|
||||||
|
)?;
|
||||||
|
Ok(PruneShardOutcome::Updated)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -119,7 +119,9 @@ where
|
|||||||
|
|
||||||
'inner: loop {
|
'inner: loop {
|
||||||
// drain all calls that are ready and put them in the output item queue
|
// drain all calls that are ready and put them in the output item queue
|
||||||
let drained = if !this.pending_calls.is_empty() {
|
let drained = if this.pending_calls.is_empty() {
|
||||||
|
false
|
||||||
|
} else {
|
||||||
if let Poll::Ready(Some(res)) = this.pending_calls.as_mut().poll_next(cx) {
|
if let Poll::Ready(Some(res)) = this.pending_calls.as_mut().poll_next(cx) {
|
||||||
let item = match res {
|
let item = match res {
|
||||||
Ok(Some(resp)) => resp,
|
Ok(Some(resp)) => resp,
|
||||||
@ -130,8 +132,6 @@ where
|
|||||||
continue 'outer;
|
continue 'outer;
|
||||||
}
|
}
|
||||||
true
|
true
|
||||||
} else {
|
|
||||||
false
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// read from the stream
|
// read from the stream
|
||||||
|
|||||||
@ -176,13 +176,13 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
// sort results then take the configured percentile result
|
// sort results then take the configured percentile result
|
||||||
let mut price = if !results.is_empty() {
|
let mut price = if results.is_empty() {
|
||||||
|
inner.last_price.price
|
||||||
|
} else {
|
||||||
results.sort_unstable();
|
results.sort_unstable();
|
||||||
*results.get((results.len() - 1) * self.oracle_config.percentile as usize / 100).expect(
|
*results.get((results.len() - 1) * self.oracle_config.percentile as usize / 100).expect(
|
||||||
"gas price index is a percent of nonzero array length, so a value always exists",
|
"gas price index is a percent of nonzero array length, so a value always exists",
|
||||||
)
|
)
|
||||||
} else {
|
|
||||||
inner.last_price.price
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// constrain to the max price
|
// constrain to the max price
|
||||||
|
|||||||
@ -507,12 +507,12 @@ mod tests {
|
|||||||
|
|
||||||
async fn after_execution(&self, headers: Self::Seed) -> Result<(), TestRunnerError> {
|
async fn after_execution(&self, headers: Self::Seed) -> Result<(), TestRunnerError> {
|
||||||
self.client.extend(headers.iter().map(|h| h.clone().unseal())).await;
|
self.client.extend(headers.iter().map(|h| h.clone().unseal())).await;
|
||||||
let tip = if !headers.is_empty() {
|
let tip = if headers.is_empty() {
|
||||||
headers.last().unwrap().hash()
|
|
||||||
} else {
|
|
||||||
let tip = random_header(&mut generators::rng(), 0, None);
|
let tip = random_header(&mut generators::rng(), 0, None);
|
||||||
self.db.insert_headers(std::iter::once(&tip))?;
|
self.db.insert_headers(std::iter::once(&tip))?;
|
||||||
tip.hash()
|
tip.hash()
|
||||||
|
} else {
|
||||||
|
headers.last().unwrap().hash()
|
||||||
};
|
};
|
||||||
self.send_tip(tip);
|
self.send_tip(tip);
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@ -316,7 +316,9 @@ impl<DB: Database> Stage<DB> for MerkleStage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Unwind trie only if there are transitions
|
// Unwind trie only if there are transitions
|
||||||
if !range.is_empty() {
|
if range.is_empty() {
|
||||||
|
info!(target: "sync::stages::merkle::unwind", "Nothing to unwind");
|
||||||
|
} else {
|
||||||
let (block_root, updates) = StateRoot::incremental_root_with_updates(tx, range)
|
let (block_root, updates) = StateRoot::incremental_root_with_updates(tx, range)
|
||||||
.map_err(|e| StageError::Fatal(Box::new(e)))?;
|
.map_err(|e| StageError::Fatal(Box::new(e)))?;
|
||||||
|
|
||||||
@ -330,8 +332,6 @@ impl<DB: Database> Stage<DB> for MerkleStage {
|
|||||||
provider.write_trie_updates(&updates)?;
|
provider.write_trie_updates(&updates)?;
|
||||||
|
|
||||||
// TODO(alexey): update entities checkpoint
|
// TODO(alexey): update entities checkpoint
|
||||||
} else {
|
|
||||||
info!(target: "sync::stages::merkle::unwind", "Nothing to unwind");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(UnwindOutput { checkpoint: StageCheckpoint::new(input.unwind_to) })
|
Ok(UnwindOutput { checkpoint: StageCheckpoint::new(input.unwind_to) })
|
||||||
|
|||||||
@ -57,10 +57,10 @@ impl<'a> EnumHandler<'a> {
|
|||||||
FieldTypes::EnumUnnamedField((next_ftype, use_alt_impl)) => {
|
FieldTypes::EnumUnnamedField((next_ftype, use_alt_impl)) => {
|
||||||
// This variant is of the type `EnumVariant(UnnamedField)`
|
// This variant is of the type `EnumVariant(UnnamedField)`
|
||||||
let field_type = format_ident!("{next_ftype}");
|
let field_type = format_ident!("{next_ftype}");
|
||||||
let from_compact_ident = if !use_alt_impl {
|
let from_compact_ident = if *use_alt_impl {
|
||||||
format_ident!("from_compact")
|
|
||||||
} else {
|
|
||||||
format_ident!("specialized_from_compact")
|
format_ident!("specialized_from_compact")
|
||||||
|
} else {
|
||||||
|
format_ident!("from_compact")
|
||||||
};
|
};
|
||||||
|
|
||||||
// Unnamed type
|
// Unnamed type
|
||||||
@ -98,10 +98,10 @@ impl<'a> EnumHandler<'a> {
|
|||||||
if let Some(next_field) = self.fields_iterator.peek() {
|
if let Some(next_field) = self.fields_iterator.peek() {
|
||||||
match next_field {
|
match next_field {
|
||||||
FieldTypes::EnumUnnamedField((_, use_alt_impl)) => {
|
FieldTypes::EnumUnnamedField((_, use_alt_impl)) => {
|
||||||
let to_compact_ident = if !use_alt_impl {
|
let to_compact_ident = if *use_alt_impl {
|
||||||
format_ident!("to_compact")
|
|
||||||
} else {
|
|
||||||
format_ident!("specialized_to_compact")
|
format_ident!("specialized_to_compact")
|
||||||
|
} else {
|
||||||
|
format_ident!("to_compact")
|
||||||
};
|
};
|
||||||
|
|
||||||
// Unnamed type
|
// Unnamed type
|
||||||
|
|||||||
@ -155,15 +155,15 @@ fn build_struct_field_flags(
|
|||||||
/// Returns the total number of bytes used by the flags struct and how many unused bits.
|
/// Returns the total number of bytes used by the flags struct and how many unused bits.
|
||||||
fn pad_flag_struct(total_bits: u8, field_flags: &mut Vec<TokenStream2>) -> (u8, u8) {
|
fn pad_flag_struct(total_bits: u8, field_flags: &mut Vec<TokenStream2>) -> (u8, u8) {
|
||||||
let remaining = 8 - total_bits % 8;
|
let remaining = 8 - total_bits % 8;
|
||||||
if remaining != 8 {
|
if remaining == 8 {
|
||||||
|
(total_bits / 8, 0)
|
||||||
|
} else {
|
||||||
let bsize = format_ident!("B{remaining}");
|
let bsize = format_ident!("B{remaining}");
|
||||||
field_flags.push(quote! {
|
field_flags.push(quote! {
|
||||||
#[skip]
|
#[skip]
|
||||||
unused: #bsize ,
|
unused: #bsize ,
|
||||||
});
|
});
|
||||||
((total_bits + remaining) / 8, remaining)
|
((total_bits + remaining) / 8, remaining)
|
||||||
} else {
|
|
||||||
(total_bits / 8, 0)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -46,10 +46,10 @@ impl<'a> StructHandler<'a> {
|
|||||||
fn to(&mut self, field_descriptor: &StructFieldDescriptor) {
|
fn to(&mut self, field_descriptor: &StructFieldDescriptor) {
|
||||||
let (name, ftype, is_compact, use_alt_impl) = field_descriptor;
|
let (name, ftype, is_compact, use_alt_impl) = field_descriptor;
|
||||||
|
|
||||||
let to_compact_ident = if !use_alt_impl {
|
let to_compact_ident = if *use_alt_impl {
|
||||||
format_ident!("to_compact")
|
|
||||||
} else {
|
|
||||||
format_ident!("specialized_to_compact")
|
format_ident!("specialized_to_compact")
|
||||||
|
} else {
|
||||||
|
format_ident!("to_compact")
|
||||||
};
|
};
|
||||||
|
|
||||||
// Should only happen on wrapper structs like `Struct(pub Field)`
|
// Should only happen on wrapper structs like `Struct(pub Field)`
|
||||||
@ -108,10 +108,10 @@ impl<'a> StructHandler<'a> {
|
|||||||
(format_ident!("{name}"), format_ident!("{name}_len"))
|
(format_ident!("{name}"), format_ident!("{name}_len"))
|
||||||
};
|
};
|
||||||
|
|
||||||
let from_compact_ident = if !use_alt_impl {
|
let from_compact_ident = if *use_alt_impl {
|
||||||
format_ident!("from_compact")
|
|
||||||
} else {
|
|
||||||
format_ident!("specialized_from_compact")
|
format_ident!("specialized_from_compact")
|
||||||
|
} else {
|
||||||
|
format_ident!("from_compact")
|
||||||
};
|
};
|
||||||
|
|
||||||
// ! Be careful before changing the following assert ! Especially if the type does not
|
// ! Be careful before changing the following assert ! Especially if the type does not
|
||||||
|
|||||||
@ -336,7 +336,12 @@ pub fn init_from_state_dump<DB: Database>(
|
|||||||
|
|
||||||
// compute and compare state root. this advances the stage checkpoints.
|
// compute and compare state root. this advances the stage checkpoints.
|
||||||
let computed_state_root = compute_state_root(&provider_rw)?;
|
let computed_state_root = compute_state_root(&provider_rw)?;
|
||||||
if computed_state_root != expected_state_root {
|
if computed_state_root == expected_state_root {
|
||||||
|
info!(target: "reth::cli",
|
||||||
|
?computed_state_root,
|
||||||
|
"Computed state root matches state root in state dump"
|
||||||
|
);
|
||||||
|
} else {
|
||||||
error!(target: "reth::cli",
|
error!(target: "reth::cli",
|
||||||
?computed_state_root,
|
?computed_state_root,
|
||||||
?expected_state_root,
|
?expected_state_root,
|
||||||
@ -344,11 +349,6 @@ pub fn init_from_state_dump<DB: Database>(
|
|||||||
);
|
);
|
||||||
|
|
||||||
Err(InitDatabaseError::StateRootMismatch { expected_state_root, computed_state_root })?
|
Err(InitDatabaseError::StateRootMismatch { expected_state_root, computed_state_root })?
|
||||||
} else {
|
|
||||||
info!(target: "reth::cli",
|
|
||||||
?computed_state_root,
|
|
||||||
"Computed state root matches state root in state dump"
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// insert sync stages for stages that require state
|
// insert sync stages for stages that require state
|
||||||
|
|||||||
@ -101,10 +101,10 @@ where
|
|||||||
assert_ne!(data_ptr, data_val.iov_base);
|
assert_ne!(data_ptr, data_val.iov_base);
|
||||||
let key_out = {
|
let key_out = {
|
||||||
// MDBX wrote in new key
|
// MDBX wrote in new key
|
||||||
if key_ptr != key_val.iov_base {
|
if key_ptr == key_val.iov_base {
|
||||||
Some(Key::decode_val::<K>(txn, key_val)?)
|
|
||||||
} else {
|
|
||||||
None
|
None
|
||||||
|
} else {
|
||||||
|
Some(Key::decode_val::<K>(txn, key_val)?)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let data_out = Value::decode_val::<K>(txn, data_val)?;
|
let data_out = Value::decode_val::<K>(txn, data_val)?;
|
||||||
|
|||||||
@ -645,10 +645,10 @@ impl<TX: DbTx> DatabaseProvider<TX> {
|
|||||||
|
|
||||||
let recovered = match (tx, sender) {
|
let recovered = match (tx, sender) {
|
||||||
(Some((tx_id, tx)), Some((sender_tx_id, sender))) => {
|
(Some((tx_id, tx)), Some((sender_tx_id, sender))) => {
|
||||||
if tx_id != sender_tx_id {
|
if tx_id == sender_tx_id {
|
||||||
Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id })
|
|
||||||
} else {
|
|
||||||
Ok(TransactionSignedEcRecovered::from_signed_transaction(tx, sender))
|
Ok(TransactionSignedEcRecovered::from_signed_transaction(tx, sender))
|
||||||
|
} else {
|
||||||
|
Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
(Some((tx_id, _)), _) | (_, Some((tx_id, _))) => {
|
(Some((tx_id, _)), _) | (_, Some((tx_id, _))) => {
|
||||||
@ -1291,10 +1291,10 @@ impl<TX: DbTxMut + DbTx> DatabaseProvider<TX> {
|
|||||||
|
|
||||||
let recovered = match (tx, sender) {
|
let recovered = match (tx, sender) {
|
||||||
(Some((tx_id, tx)), Some((sender_tx_id, sender))) => {
|
(Some((tx_id, tx)), Some((sender_tx_id, sender))) => {
|
||||||
if tx_id != sender_tx_id {
|
if tx_id == sender_tx_id {
|
||||||
Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id })
|
|
||||||
} else {
|
|
||||||
Ok(TransactionSignedEcRecovered::from_signed_transaction(tx, sender))
|
Ok(TransactionSignedEcRecovered::from_signed_transaction(tx, sender))
|
||||||
|
} else {
|
||||||
|
Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
(Some((tx_id, _)), _) | (_, Some((tx_id, _))) => {
|
(Some((tx_id, _)), _) | (_, Some((tx_id, _))) => {
|
||||||
|
|||||||
@ -304,13 +304,13 @@ where
|
|||||||
) -> Result<(), UnifiedStorageWriterError> {
|
) -> Result<(), UnifiedStorageWriterError> {
|
||||||
match &self.static_file {
|
match &self.static_file {
|
||||||
Some(writer) => {
|
Some(writer) => {
|
||||||
if writer.user_header().segment() != segment {
|
if writer.user_header().segment() == segment {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
Err(UnifiedStorageWriterError::IncorrectStaticFileWriter(
|
Err(UnifiedStorageWriterError::IncorrectStaticFileWriter(
|
||||||
writer.user_header().segment(),
|
writer.user_header().segment(),
|
||||||
segment,
|
segment,
|
||||||
))
|
))
|
||||||
} else {
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => Err(UnifiedStorageWriterError::MissingStaticFileWriter),
|
None => Err(UnifiedStorageWriterError::MissingStaticFileWriter),
|
||||||
|
|||||||
@ -275,14 +275,14 @@ where
|
|||||||
let mut old_entries: Vec<_> = new_entries
|
let mut old_entries: Vec<_> = new_entries
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter_map(|entry| {
|
.filter_map(|entry| {
|
||||||
let old = if !entry.value.is_zero() {
|
let old = if entry.value.is_zero() {
|
||||||
storage.insert(entry.key, entry.value)
|
|
||||||
} else {
|
|
||||||
let old = storage.remove(&entry.key);
|
let old = storage.remove(&entry.key);
|
||||||
if matches!(old, Some(U256::ZERO)) {
|
if matches!(old, Some(U256::ZERO)) {
|
||||||
return None
|
return None
|
||||||
}
|
}
|
||||||
old
|
old
|
||||||
|
} else {
|
||||||
|
storage.insert(entry.key, entry.value)
|
||||||
};
|
};
|
||||||
Some(StorageEntry { value: old.unwrap_or(U256::ZERO), ..entry })
|
Some(StorageEntry { value: old.unwrap_or(U256::ZERO), ..entry })
|
||||||
})
|
})
|
||||||
|
|||||||
Reference in New Issue
Block a user