fix(ChainTree): set first block on forked chain (#6821)

This commit is contained in:
rakita
2024-02-27 13:52:30 +01:00
committed by GitHub
parent 8d258bb7e7
commit 9a949e905b
8 changed files with 26 additions and 21 deletions

View File

@ -1756,11 +1756,15 @@ mod tests {
]))
.assert(&tree);
// chain 0 has two blocks so receipts and reverts len is 2
assert_eq!(tree.state.chains.get(&0.into()).unwrap().state().receipts().len(), 2);
assert_eq!(tree.state.chains.get(&0.into()).unwrap().state().state().reverts.len(), 2);
let chain0 = tree.state.chains.get(&0.into()).unwrap().state();
assert_eq!(chain0.receipts().len(), 2);
assert_eq!(chain0.state().reverts.len(), 2);
assert_eq!(chain0.first_block(), block1.number);
// chain 1 has one block so receipts and reverts len is 1
assert_eq!(tree.state.chains.get(&1.into()).unwrap().state().receipts().len(), 1);
assert_eq!(tree.state.chains.get(&1.into()).unwrap().state().state().reverts.len(), 1);
let chain1 = tree.state.chains.get(&1.into()).unwrap().state();
assert_eq!(chain1.receipts().len(), 1);
assert_eq!(chain1.state().reverts.len(), 1);
assert_eq!(chain1.first_block(), block2.number);
}
#[test]

View File

@ -149,6 +149,7 @@ impl AppendableChain {
let size = state.receipts().len();
state.receipts_mut().drain(0..size - 1);
state.state_mut().take_n_reverts(size - 1);
state.set_first_block(block.number);
// If all is okay, return new chain back. Present chain is not modified.
Ok(Self { chain: Chain::from_block(block, state, None) })

View File

@ -206,7 +206,7 @@ impl TransactionFetcher {
// tx is really big, pack request with single tx
if size >= self.info.soft_limit_byte_size_pooled_transactions_response_on_pack_request {
return hashes_from_announcement_iter.collect::<RequestTxHashes>();
return hashes_from_announcement_iter.collect::<RequestTxHashes>()
} else {
acc_size_response = size;
}
@ -332,7 +332,7 @@ impl TransactionFetcher {
);
max_retried_and_evicted_hashes.push(hash);
continue;
continue
}
*retries += 1;
}

View File

@ -159,7 +159,7 @@ impl PayloadAttributes for OptimismPayloadAttributes {
if self.gas_limit.is_none() && chain_spec.is_optimism() {
return Err(AttributesValidationError::InvalidParams(
"MissingGasLimitInPayloadAttributes".to_string().into(),
));
))
}
Ok(())

View File

@ -142,7 +142,7 @@ where
if filter.block > best_number {
// no new blocks since the last poll
return Ok(FilterChanges::Empty);
return Ok(FilterChanges::Empty)
}
// update filter
@ -211,7 +211,7 @@ where
*filter.clone()
} else {
// Not a log filter
return Err(FilterError::FilterNotFound(id));
return Err(FilterError::FilterNotFound(id))
}
};
@ -420,7 +420,7 @@ where
let best_number = chain_info.best_number;
if to_block - from_block > self.max_blocks_per_filter {
return Err(FilterError::QueryExceedsMaxBlocks(self.max_blocks_per_filter));
return Err(FilterError::QueryExceedsMaxBlocks(self.max_blocks_per_filter))
}
let mut all_logs = Vec::new();
@ -440,7 +440,7 @@ where
false,
)?;
}
return Ok(all_logs);
return Ok(all_logs)
}
// derive bloom filters from filter input, so we can check headers for matching logs
@ -485,7 +485,7 @@ where
if is_multi_block_range && all_logs.len() > self.max_logs_per_response {
return Err(FilterError::QueryExceedsMaxResults(
self.max_logs_per_response,
));
))
}
}
}
@ -725,7 +725,7 @@ impl Iterator for BlockRangeInclusiveIter {
let start = self.iter.next()?;
let end = (start + self.step).min(self.end);
if start > end {
return None;
return None
}
Some((start, end))
}

View File

@ -96,7 +96,7 @@ impl ExecInput {
if all_tx_cnt == 0 {
// if there is no more transaction return back.
return Ok((first_tx_num..first_tx_num, start_block..=target_block, true));
return Ok((first_tx_num..first_tx_num, start_block..=target_block, true))
}
// get block of this tx

View File

@ -107,14 +107,14 @@ impl BlobStore for DiskFileBlobStore {
txs: Vec<B256>,
) -> Result<Vec<(B256, BlobTransactionSidecar)>, BlobStoreError> {
if txs.is_empty() {
return Ok(Vec::new());
return Ok(Vec::new())
}
self.inner.get_all(txs)
}
fn get_exact(&self, txs: Vec<B256>) -> Result<Vec<BlobTransactionSidecar>, BlobStoreError> {
if txs.is_empty() {
return Ok(Vec::new());
return Ok(Vec::new())
}
self.inner.get_exact(txs)
}
@ -215,7 +215,7 @@ impl DiskFileBlobStoreInner {
/// Returns true if the blob for the given transaction hash is in the blob cache or on disk.
fn contains(&self, tx: B256) -> Result<bool, BlobStoreError> {
if self.blob_cache.lock().get(&tx).is_some() {
return Ok(true);
return Ok(true)
}
// we only check if the file exists and assume it's valid
Ok(self.blob_disk_file(tx).is_file())
@ -224,7 +224,7 @@ impl DiskFileBlobStoreInner {
/// Retrieves the blob for the given transaction hash from the blob cache or disk.
fn get_one(&self, tx: B256) -> Result<Option<BlobTransactionSidecar>, BlobStoreError> {
if let Some(blob) = self.blob_cache.lock().get(&tx) {
return Ok(Some(blob.clone()));
return Ok(Some(blob.clone()))
}
let blob = self.read_one(tx)?;
if let Some(blob) = &blob {
@ -323,11 +323,11 @@ impl DiskFileBlobStoreInner {
}
}
if cache_miss.is_empty() {
return Ok(res);
return Ok(res)
}
let from_disk = self.read_many_decoded(cache_miss);
if from_disk.is_empty() {
return Ok(res);
return Ok(res)
}
let mut cache = self.blob_cache.lock();
for (tx, data) in from_disk {

View File

@ -58,7 +58,7 @@ impl<T: TransactionOrdering> Iterator for BestTransactionsWithFees<T> {
max_fee_per_blob_gas < self.base_fee_per_blob_gas as u128
}) {
crate::traits::BestTransactions::mark_invalid(self, &best);
continue;
continue
};
return Some(best)
}