test: add a test for devnet failure (#14288)

Co-authored-by: Matthias Seitz <matthias.seitz@outlook.de>
This commit is contained in:
Arsenii Kulikov
2025-02-08 21:06:42 +04:00
committed by GitHub
parent 7d0f5b757f
commit 46d3b6a32e
5 changed files with 82 additions and 6 deletions

View File

@ -266,8 +266,6 @@ where
/// Sends FCU and waits for the node to sync to the given block.
pub async fn sync_to(&self, block: BlockHash) -> eyre::Result<()> {
self.engine_api.update_forkchoice(block, block).await?;
let start = std::time::Instant::now();
while self
@ -277,6 +275,7 @@ where
.is_none_or(|h| h.hash() != block)
{
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
self.engine_api.update_forkchoice(block, block).await?;
assert!(start.elapsed() <= std::time::Duration::from_secs(10), "timed out");
}

View File

@ -135,3 +135,46 @@ async fn test_long_reorg() -> eyre::Result<()> {
Ok(())
}
#[tokio::test]
async fn test_reorg_through_backfill() -> eyre::Result<()> {
reth_tracing::init_test_tracing();
let seed: [u8; 32] = rand::thread_rng().gen();
let mut rng = StdRng::from_seed(seed);
println!("Seed: {:?}", seed);
let chain_spec = Arc::new(
ChainSpecBuilder::default()
.chain(MAINNET.chain)
.genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap())
.cancun_activated()
.prague_activated()
.build(),
);
let (mut nodes, _tasks, _) =
setup_engine::<EthereumNode>(2, chain_spec.clone(), false, eth_payload_attributes).await?;
let mut first_node = nodes.pop().unwrap();
let mut second_node = nodes.pop().unwrap();
let first_provider = ProviderBuilder::new().on_http(first_node.rpc_url());
// Advance first node 100 blocks and finalize the chain.
advance_with_random_transactions(&mut first_node, 100, &mut rng, true).await?;
// Sync second node to 20th block.
let head = first_provider.get_block_by_number(20.into(), false.into()).await?.unwrap();
second_node.sync_to(head.header.hash).await?;
// Produce an unfinalized fork chain with 5 blocks
second_node.payload.timestamp = head.header.timestamp;
advance_with_random_transactions(&mut second_node, 5, &mut rng, false).await?;
// Now reorg second node to the finalized canonical head
let head = first_provider.get_block_by_number(100.into(), false.into()).await?.unwrap();
second_node.sync_to(head.header.hash).await?;
Ok(())
}

View File

@ -31,7 +31,7 @@ use std::{
task::{ready, Context, Poll},
};
use thiserror::Error;
use tracing::{error, trace};
use tracing::{debug, error, trace};
/// A heuristic that is used to determine the number of requests that should be prepared for a peer.
/// This should ensure that there are always requests lined up for peers to handle while the
@ -203,6 +203,16 @@ where
self.queued_validated_headers.last().or(self.lowest_validated_header.as_ref())
}
/// Resets the request trackers and clears the sync target.
///
/// This ensures the downloader will restart after a new sync target has been set.
fn reset(&mut self) {
debug!(target: "downloaders::headers", "Resetting headers downloader");
self.next_request_block_number = 0;
self.next_chain_tip_block_number = 0;
self.sync_target.take();
}
/// Validate that the received header matches the expected sync target.
fn validate_sync_target(
&self,
@ -294,11 +304,23 @@ where
// If the header is valid on its own, but not against its parent, we return it as
// detached head error.
// In stage sync this will trigger an unwind because this means that the the local head
// is not part of the chain the sync target is on. In other words, the downloader was
// unable to connect the the sync target with the local head because the sync target and
// the local head or on different chains.
if let Err(error) = self.consensus.validate_header_against_parent(&*last_header, head) {
let local_head = head.clone();
// Replace the last header with a detached variant
error!(target: "downloaders::headers", %error, number = last_header.number(), hash = ?last_header.hash(), "Header cannot be attached to known canonical chain");
// Reset trackers so that we can start over the next time the sync target is
// updated.
// The expected event flow when that happens is that the node will unwind the local
// chain and restart the downloader.
self.reset();
return Err(HeadersDownloaderError::DetachedHead {
local_head: Box::new(head.clone()),
local_head: Box::new(local_head),
header: Box::new(last_header.clone()),
error: Box::new(error),
}
@ -674,6 +696,11 @@ where
// headers are sorted high to low
self.queued_validated_headers.pop();
}
trace!(
target: "downloaders::headers",
head=?head.num_hash(),
"Updating local head"
);
// update the local head
self.local_head = Some(head);
}
@ -681,6 +708,12 @@ where
/// If the given target is different from the current target, we need to update the sync target
fn update_sync_target(&mut self, target: SyncTarget) {
let current_tip = self.sync_target.as_ref().and_then(|t| t.hash());
trace!(
target: "downloaders::headers",
sync_target=?target,
current_tip=?current_tip,
"Updating sync target"
);
match target {
SyncTarget::Tip(tip) => {
if Some(tip) != current_tip {

View File

@ -176,6 +176,7 @@ impl<T: HeaderDownloader> Future for SpawnedDownloader<T> {
}
/// Commands delegated to the spawned [`HeaderDownloader`]
#[derive(Debug)]
enum DownloaderUpdates<H> {
UpdateSyncGap(SealedHeader<H>, SyncTarget),
UpdateLocalHead(SealedHeader<H>),

View File

@ -23,7 +23,7 @@ pub trait HeaderDownloader:
/// The header type being downloaded.
type Header: Sealable + Debug + Send + Sync + Unpin + 'static;
/// Updates the gap to sync which ranges from local head to the sync target
/// Updates the gap to sync which ranges from local head to the sync target.
///
/// See also [`HeaderDownloader::update_sync_target`] and
/// [`HeaderDownloader::update_local_head`]
@ -35,7 +35,7 @@ pub trait HeaderDownloader:
/// Updates the block number of the local database
fn update_local_head(&mut self, head: SealedHeader<Self::Header>);
/// Updates the target we want to sync to
/// Updates the target we want to sync to.
fn update_sync_target(&mut self, target: SyncTarget);
/// Sets the headers batch size that the Stream should return.