mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
feat: add SealedBlock in reth-primitives-traits (#13735)
This commit is contained in:
@ -15,7 +15,7 @@ use reth_network_p2p::{
|
||||
error::{DownloadError, DownloadResult},
|
||||
};
|
||||
use reth_primitives::SealedHeader;
|
||||
use reth_primitives_traits::size::InMemorySize;
|
||||
use reth_primitives_traits::{size::InMemorySize, Block};
|
||||
use reth_storage_api::HeaderProvider;
|
||||
use reth_tasks::{TaskSpawner, TokioTaskExecutor};
|
||||
use std::{
|
||||
@ -35,11 +35,15 @@ use tracing::info;
|
||||
/// All blocks in a batch are fetched at the same time.
|
||||
#[must_use = "Stream does nothing unless polled"]
|
||||
#[derive(Debug)]
|
||||
pub struct BodiesDownloader<B: BodiesClient, Provider: HeaderProvider> {
|
||||
pub struct BodiesDownloader<
|
||||
B: Block,
|
||||
C: BodiesClient<Body = B::Body>,
|
||||
Provider: HeaderProvider<Header = B::Header>,
|
||||
> {
|
||||
/// The bodies client
|
||||
client: Arc<B>,
|
||||
client: Arc<C>,
|
||||
/// The consensus client
|
||||
consensus: Arc<dyn Consensus<Provider::Header, B::Body, Error = ConsensusError>>,
|
||||
consensus: Arc<dyn Consensus<B, Error = ConsensusError>>,
|
||||
/// The database handle
|
||||
provider: Provider,
|
||||
/// The maximum number of non-empty blocks per one request
|
||||
@ -57,19 +61,20 @@ pub struct BodiesDownloader<B: BodiesClient, Provider: HeaderProvider> {
|
||||
/// The latest block number returned.
|
||||
latest_queued_block_number: Option<BlockNumber>,
|
||||
/// Requests in progress
|
||||
in_progress_queue: BodiesRequestQueue<Provider::Header, B>,
|
||||
in_progress_queue: BodiesRequestQueue<B, C>,
|
||||
/// Buffered responses
|
||||
buffered_responses: BinaryHeap<OrderedBodiesResponse<Provider::Header, B::Body>>,
|
||||
buffered_responses: BinaryHeap<OrderedBodiesResponse<B>>,
|
||||
/// Queued body responses that can be returned for insertion into the database.
|
||||
queued_bodies: Vec<BlockResponse<Provider::Header, B::Body>>,
|
||||
queued_bodies: Vec<BlockResponse<B>>,
|
||||
/// The bodies downloader metrics.
|
||||
metrics: BodyDownloaderMetrics,
|
||||
}
|
||||
|
||||
impl<B, Provider> BodiesDownloader<B, Provider>
|
||||
impl<B, C, Provider> BodiesDownloader<B, C, Provider>
|
||||
where
|
||||
B: BodiesClient<Body: InMemorySize> + 'static,
|
||||
Provider: HeaderProvider<Header: BlockHeader> + Unpin + 'static,
|
||||
B: Block,
|
||||
C: BodiesClient<Body = B::Body> + 'static,
|
||||
Provider: HeaderProvider<Header = B::Header> + Unpin + 'static,
|
||||
{
|
||||
/// Returns the next contiguous request.
|
||||
fn next_headers_request(&self) -> DownloadResult<Option<Vec<SealedHeader<Provider::Header>>>> {
|
||||
@ -97,7 +102,7 @@ where
|
||||
&self,
|
||||
range: RangeInclusive<BlockNumber>,
|
||||
max_non_empty: u64,
|
||||
) -> DownloadResult<Option<Vec<SealedHeader<Provider::Header>>>> {
|
||||
) -> DownloadResult<Option<Vec<SealedHeader<B::Header>>>> {
|
||||
if range.is_empty() || max_non_empty == 0 {
|
||||
return Ok(None)
|
||||
}
|
||||
@ -193,16 +198,14 @@ where
|
||||
}
|
||||
|
||||
/// Queues bodies and sets the latest queued block number
|
||||
fn queue_bodies(&mut self, bodies: Vec<BlockResponse<Provider::Header, B::Body>>) {
|
||||
fn queue_bodies(&mut self, bodies: Vec<BlockResponse<B>>) {
|
||||
self.latest_queued_block_number = Some(bodies.last().expect("is not empty").block_number());
|
||||
self.queued_bodies.extend(bodies);
|
||||
self.metrics.queued_blocks.set(self.queued_bodies.len() as f64);
|
||||
}
|
||||
|
||||
/// Removes the next response from the buffer.
|
||||
fn pop_buffered_response(
|
||||
&mut self,
|
||||
) -> Option<OrderedBodiesResponse<Provider::Header, B::Body>> {
|
||||
fn pop_buffered_response(&mut self) -> Option<OrderedBodiesResponse<B>> {
|
||||
let resp = self.buffered_responses.pop()?;
|
||||
self.metrics.buffered_responses.decrement(1.);
|
||||
self.buffered_blocks_size_bytes -= resp.size();
|
||||
@ -212,10 +215,10 @@ where
|
||||
}
|
||||
|
||||
/// Adds a new response to the internal buffer
|
||||
fn buffer_bodies_response(&mut self, response: Vec<BlockResponse<Provider::Header, B::Body>>) {
|
||||
fn buffer_bodies_response(&mut self, response: Vec<BlockResponse<B>>) {
|
||||
// take into account capacity
|
||||
let size = response.iter().map(BlockResponse::size).sum::<usize>() +
|
||||
response.capacity() * mem::size_of::<BlockResponse<Provider::Header, B::Body>>();
|
||||
response.capacity() * mem::size_of::<BlockResponse<B>>();
|
||||
|
||||
let response = OrderedBodiesResponse { resp: response, size };
|
||||
let response_len = response.len();
|
||||
@ -229,7 +232,7 @@ where
|
||||
}
|
||||
|
||||
/// Returns a response if it's first block number matches the next expected.
|
||||
fn try_next_buffered(&mut self) -> Option<Vec<BlockResponse<Provider::Header, B::Body>>> {
|
||||
fn try_next_buffered(&mut self) -> Option<Vec<BlockResponse<B>>> {
|
||||
if let Some(next) = self.buffered_responses.peek() {
|
||||
let expected = self.next_expected_block_number();
|
||||
let next_block_range = next.block_range();
|
||||
@ -255,7 +258,7 @@ where
|
||||
|
||||
/// Returns the next batch of block bodies that can be returned if we have enough buffered
|
||||
/// bodies
|
||||
fn try_split_next_batch(&mut self) -> Option<Vec<BlockResponse<Provider::Header, B::Body>>> {
|
||||
fn try_split_next_batch(&mut self) -> Option<Vec<BlockResponse<B>>> {
|
||||
if self.queued_bodies.len() >= self.stream_batch_size {
|
||||
let next_batch = self.queued_bodies.drain(..self.stream_batch_size).collect::<Vec<_>>();
|
||||
self.queued_bodies.shrink_to_fit();
|
||||
@ -280,24 +283,19 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<B, Provider> BodiesDownloader<B, Provider>
|
||||
impl<B, C, Provider> BodiesDownloader<B, C, Provider>
|
||||
where
|
||||
B: BodiesClient + 'static,
|
||||
Provider: HeaderProvider + Unpin + 'static,
|
||||
Self: BodyDownloader + 'static,
|
||||
B: Block + 'static,
|
||||
C: BodiesClient<Body = B::Body> + 'static,
|
||||
Provider: HeaderProvider<Header = B::Header> + Unpin + 'static,
|
||||
{
|
||||
/// Spawns the downloader task via [`tokio::task::spawn`]
|
||||
pub fn into_task(
|
||||
self,
|
||||
) -> TaskDownloader<<Self as BodyDownloader>::Header, <Self as BodyDownloader>::Body> {
|
||||
pub fn into_task(self) -> TaskDownloader<B> {
|
||||
self.into_task_with(&TokioTaskExecutor::default())
|
||||
}
|
||||
|
||||
/// Convert the downloader into a [`TaskDownloader`] by spawning it via the given spawner.
|
||||
pub fn into_task_with<S>(
|
||||
self,
|
||||
spawner: &S,
|
||||
) -> TaskDownloader<<Self as BodyDownloader>::Header, <Self as BodyDownloader>::Body>
|
||||
pub fn into_task_with<S>(self, spawner: &S) -> TaskDownloader<B>
|
||||
where
|
||||
S: TaskSpawner,
|
||||
{
|
||||
@ -305,13 +303,13 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<B, Provider> BodyDownloader for BodiesDownloader<B, Provider>
|
||||
impl<B, C, Provider> BodyDownloader for BodiesDownloader<B, C, Provider>
|
||||
where
|
||||
B: BodiesClient<Body: Debug + InMemorySize> + 'static,
|
||||
Provider: HeaderProvider + Unpin + 'static,
|
||||
B: Block + 'static,
|
||||
C: BodiesClient<Body = B::Body> + 'static,
|
||||
Provider: HeaderProvider<Header = B::Header> + Unpin + 'static,
|
||||
{
|
||||
type Header = Provider::Header;
|
||||
type Body = B::Body;
|
||||
type Block = B;
|
||||
|
||||
/// Set a new download range (exclusive).
|
||||
///
|
||||
@ -356,12 +354,13 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<B, Provider> Stream for BodiesDownloader<B, Provider>
|
||||
impl<B, C, Provider> Stream for BodiesDownloader<B, C, Provider>
|
||||
where
|
||||
B: BodiesClient<Body: InMemorySize> + 'static,
|
||||
Provider: HeaderProvider + Unpin + 'static,
|
||||
B: Block + 'static,
|
||||
C: BodiesClient<Body = B::Body> + 'static,
|
||||
Provider: HeaderProvider<Header = B::Header> + Unpin + 'static,
|
||||
{
|
||||
type Item = BodyDownloaderResult<Provider::Header, B::Body>;
|
||||
type Item = BodyDownloaderResult<B>;
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
let this = self.get_mut();
|
||||
@ -443,13 +442,13 @@ where
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct OrderedBodiesResponse<H, B> {
|
||||
resp: Vec<BlockResponse<H, B>>,
|
||||
struct OrderedBodiesResponse<B: Block> {
|
||||
resp: Vec<BlockResponse<B>>,
|
||||
/// The total size of the response in bytes
|
||||
size: usize,
|
||||
}
|
||||
|
||||
impl<H, B> OrderedBodiesResponse<H, B> {
|
||||
impl<B: Block> OrderedBodiesResponse<B> {
|
||||
#[inline]
|
||||
fn len(&self) -> usize {
|
||||
self.resp.len()
|
||||
@ -464,10 +463,7 @@ impl<H, B> OrderedBodiesResponse<H, B> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<H, B> OrderedBodiesResponse<H, B>
|
||||
where
|
||||
H: BlockHeader,
|
||||
{
|
||||
impl<B: Block> OrderedBodiesResponse<B> {
|
||||
/// Returns the block number of the first element
|
||||
///
|
||||
/// # Panics
|
||||
@ -485,21 +481,21 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: BlockHeader, B> PartialEq for OrderedBodiesResponse<H, B> {
|
||||
impl<B: Block> PartialEq for OrderedBodiesResponse<B> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.first_block_number() == other.first_block_number()
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: BlockHeader, B> Eq for OrderedBodiesResponse<H, B> {}
|
||||
impl<B: Block> Eq for OrderedBodiesResponse<B> {}
|
||||
|
||||
impl<H: BlockHeader, B> PartialOrd for OrderedBodiesResponse<H, B> {
|
||||
impl<B: Block> PartialOrd for OrderedBodiesResponse<B> {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: BlockHeader, B> Ord for OrderedBodiesResponse<H, B> {
|
||||
impl<B: Block> Ord for OrderedBodiesResponse<B> {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
self.first_block_number().cmp(&other.first_block_number()).reverse()
|
||||
}
|
||||
@ -576,15 +572,16 @@ impl BodiesDownloaderBuilder {
|
||||
}
|
||||
|
||||
/// Consume self and return the concurrent downloader.
|
||||
pub fn build<B, Provider>(
|
||||
pub fn build<B, C, Provider>(
|
||||
self,
|
||||
client: B,
|
||||
consensus: Arc<dyn Consensus<Provider::Header, B::Body, Error = ConsensusError>>,
|
||||
client: C,
|
||||
consensus: Arc<dyn Consensus<B, Error = ConsensusError>>,
|
||||
provider: Provider,
|
||||
) -> BodiesDownloader<B, Provider>
|
||||
) -> BodiesDownloader<B, C, Provider>
|
||||
where
|
||||
B: BodiesClient + 'static,
|
||||
Provider: HeaderProvider,
|
||||
B: Block,
|
||||
C: BodiesClient<Body = B::Body> + 'static,
|
||||
Provider: HeaderProvider<Header = B::Header>,
|
||||
{
|
||||
let Self {
|
||||
request_limit,
|
||||
@ -646,15 +643,16 @@ mod tests {
|
||||
);
|
||||
let (_static_dir, static_dir_path) = create_test_static_files_dir();
|
||||
|
||||
let mut downloader = BodiesDownloaderBuilder::default().build(
|
||||
client.clone(),
|
||||
Arc::new(TestConsensus::default()),
|
||||
ProviderFactory::<MockNodeTypesWithDB>::new(
|
||||
db,
|
||||
MAINNET.clone(),
|
||||
StaticFileProvider::read_write(static_dir_path).unwrap(),
|
||||
),
|
||||
);
|
||||
let mut downloader = BodiesDownloaderBuilder::default()
|
||||
.build::<reth_primitives::Block, _, _>(
|
||||
client.clone(),
|
||||
Arc::new(TestConsensus::default()),
|
||||
ProviderFactory::<MockNodeTypesWithDB>::new(
|
||||
db,
|
||||
MAINNET.clone(),
|
||||
StaticFileProvider::read_write(static_dir_path).unwrap(),
|
||||
),
|
||||
);
|
||||
downloader.set_download_range(0..=19).expect("failed to set download range");
|
||||
|
||||
assert_matches!(
|
||||
@ -689,16 +687,17 @@ mod tests {
|
||||
let client = Arc::new(TestBodiesClient::default().with_bodies(bodies.clone()));
|
||||
let (_static_dir, static_dir_path) = create_test_static_files_dir();
|
||||
|
||||
let mut downloader =
|
||||
BodiesDownloaderBuilder::default().with_request_limit(request_limit).build(
|
||||
client.clone(),
|
||||
Arc::new(TestConsensus::default()),
|
||||
ProviderFactory::<MockNodeTypesWithDB>::new(
|
||||
db,
|
||||
MAINNET.clone(),
|
||||
StaticFileProvider::read_write(static_dir_path).unwrap(),
|
||||
),
|
||||
);
|
||||
let mut downloader = BodiesDownloaderBuilder::default()
|
||||
.with_request_limit(request_limit)
|
||||
.build::<reth_primitives::Block, _, _>(
|
||||
client.clone(),
|
||||
Arc::new(TestConsensus::default()),
|
||||
ProviderFactory::<MockNodeTypesWithDB>::new(
|
||||
db,
|
||||
MAINNET.clone(),
|
||||
StaticFileProvider::read_write(static_dir_path).unwrap(),
|
||||
),
|
||||
);
|
||||
downloader.set_download_range(0..=199).expect("failed to set download range");
|
||||
|
||||
let _ = downloader.collect::<Vec<_>>().await;
|
||||
@ -724,7 +723,7 @@ mod tests {
|
||||
let mut downloader = BodiesDownloaderBuilder::default()
|
||||
.with_stream_batch_size(stream_batch_size)
|
||||
.with_request_limit(request_limit)
|
||||
.build(
|
||||
.build::<reth_primitives::Block, _, _>(
|
||||
client.clone(),
|
||||
Arc::new(TestConsensus::default()),
|
||||
ProviderFactory::<MockNodeTypesWithDB>::new(
|
||||
@ -760,7 +759,9 @@ mod tests {
|
||||
let client = Arc::new(TestBodiesClient::default().with_bodies(bodies.clone()));
|
||||
let (_static_dir, static_dir_path) = create_test_static_files_dir();
|
||||
|
||||
let mut downloader = BodiesDownloaderBuilder::default().with_stream_batch_size(100).build(
|
||||
let mut downloader = BodiesDownloaderBuilder::default()
|
||||
.with_stream_batch_size(100)
|
||||
.build::<reth_primitives::Block, _, _>(
|
||||
client.clone(),
|
||||
Arc::new(TestConsensus::default()),
|
||||
ProviderFactory::<MockNodeTypesWithDB>::new(
|
||||
@ -806,7 +807,7 @@ mod tests {
|
||||
.with_stream_batch_size(10)
|
||||
.with_request_limit(1)
|
||||
.with_max_buffered_blocks_size_bytes(1)
|
||||
.build(
|
||||
.build::<reth_primitives::Block, _, _>(
|
||||
client.clone(),
|
||||
Arc::new(TestConsensus::default()),
|
||||
ProviderFactory::<MockNodeTypesWithDB>::new(
|
||||
@ -843,7 +844,7 @@ mod tests {
|
||||
let mut downloader = BodiesDownloaderBuilder::default()
|
||||
.with_request_limit(3)
|
||||
.with_stream_batch_size(100)
|
||||
.build(
|
||||
.build::<reth_primitives::Block, _, _>(
|
||||
client.clone(),
|
||||
Arc::new(TestConsensus::default()),
|
||||
ProviderFactory::<MockNodeTypesWithDB>::new(
|
||||
|
||||
@ -4,29 +4,26 @@ use reth_network_p2p::{
|
||||
bodies::{downloader::BodyDownloader, response::BlockResponse},
|
||||
error::{DownloadError, DownloadResult},
|
||||
};
|
||||
use reth_primitives_traits::Block;
|
||||
use std::{fmt::Debug, ops::RangeInclusive};
|
||||
|
||||
/// A [`BodyDownloader`] implementation that does nothing.
|
||||
#[derive(Debug, Default)]
|
||||
#[non_exhaustive]
|
||||
pub struct NoopBodiesDownloader<H, B> {
|
||||
_header: std::marker::PhantomData<H>,
|
||||
_body: std::marker::PhantomData<B>,
|
||||
pub struct NoopBodiesDownloader<B> {
|
||||
_block: std::marker::PhantomData<B>,
|
||||
}
|
||||
|
||||
impl<H: Debug + Send + Sync + Unpin + 'static, B: Debug + Send + Sync + Unpin + 'static>
|
||||
BodyDownloader for NoopBodiesDownloader<H, B>
|
||||
{
|
||||
type Body = B;
|
||||
type Header = H;
|
||||
impl<B: Block + 'static> BodyDownloader for NoopBodiesDownloader<B> {
|
||||
type Block = B;
|
||||
|
||||
fn set_download_range(&mut self, _: RangeInclusive<BlockNumber>) -> DownloadResult<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<H, B> Stream for NoopBodiesDownloader<H, B> {
|
||||
type Item = Result<Vec<BlockResponse<H, B>>, DownloadError>;
|
||||
impl<B: Block + 'static> Stream for NoopBodiesDownloader<B> {
|
||||
type Item = Result<Vec<BlockResponse<B>>, DownloadError>;
|
||||
|
||||
fn poll_next(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
|
||||
@ -10,7 +10,7 @@ use reth_network_p2p::{
|
||||
error::DownloadResult,
|
||||
};
|
||||
use reth_primitives::SealedHeader;
|
||||
use reth_primitives_traits::InMemorySize;
|
||||
use reth_primitives_traits::Block;
|
||||
use std::{
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
@ -20,19 +20,19 @@ use std::{
|
||||
/// The wrapper around [`FuturesUnordered`] that keeps information
|
||||
/// about the blocks currently being requested.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct BodiesRequestQueue<H, B: BodiesClient> {
|
||||
pub(crate) struct BodiesRequestQueue<B: Block, C: BodiesClient<Body = B::Body>> {
|
||||
/// Inner body request queue.
|
||||
inner: FuturesUnordered<BodiesRequestFuture<H, B>>,
|
||||
inner: FuturesUnordered<BodiesRequestFuture<B, C>>,
|
||||
/// The downloader metrics.
|
||||
metrics: BodyDownloaderMetrics,
|
||||
/// Last requested block number.
|
||||
pub(crate) last_requested_block_number: Option<BlockNumber>,
|
||||
}
|
||||
|
||||
impl<H, B> BodiesRequestQueue<H, B>
|
||||
impl<B, C> BodiesRequestQueue<B, C>
|
||||
where
|
||||
B: BodiesClient + 'static,
|
||||
H: BlockHeader,
|
||||
B: Block,
|
||||
C: BodiesClient<Body = B::Body> + 'static,
|
||||
{
|
||||
/// Create new instance of request queue.
|
||||
pub(crate) fn new(metrics: BodyDownloaderMetrics) -> Self {
|
||||
@ -58,9 +58,9 @@ where
|
||||
/// Expects a sorted list of headers.
|
||||
pub(crate) fn push_new_request(
|
||||
&mut self,
|
||||
client: Arc<B>,
|
||||
consensus: Arc<dyn Consensus<H, B::Body, Error = ConsensusError>>,
|
||||
request: Vec<SealedHeader<H>>,
|
||||
client: Arc<C>,
|
||||
consensus: Arc<dyn Consensus<B, Error = ConsensusError>>,
|
||||
request: Vec<SealedHeader<B::Header>>,
|
||||
) {
|
||||
// Set last max requested block number
|
||||
self.last_requested_block_number = request
|
||||
@ -78,12 +78,12 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<H, B> Stream for BodiesRequestQueue<H, B>
|
||||
impl<B, C> Stream for BodiesRequestQueue<B, C>
|
||||
where
|
||||
H: BlockHeader + Send + Sync + Unpin + 'static,
|
||||
B: BodiesClient<Body: InMemorySize> + 'static,
|
||||
B: Block + 'static,
|
||||
C: BodiesClient<Body = B::Body> + 'static,
|
||||
{
|
||||
type Item = DownloadResult<Vec<BlockResponse<H, B::Body>>>;
|
||||
type Item = DownloadResult<Vec<BlockResponse<B>>>;
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
self.get_mut().inner.poll_next_unpin(cx)
|
||||
|
||||
@ -10,7 +10,7 @@ use reth_network_p2p::{
|
||||
};
|
||||
use reth_network_peers::{PeerId, WithPeerId};
|
||||
use reth_primitives::{BlockBody, GotExpected, SealedBlock, SealedHeader};
|
||||
use reth_primitives_traits::InMemorySize;
|
||||
use reth_primitives_traits::{Block, InMemorySize};
|
||||
use std::{
|
||||
collections::VecDeque,
|
||||
mem,
|
||||
@ -38,31 +38,31 @@ use std::{
|
||||
/// All errors regarding the response cause the peer to get penalized, meaning that adversaries
|
||||
/// that try to give us bodies that do not match the requested order are going to be penalized
|
||||
/// and eventually disconnected.
|
||||
pub(crate) struct BodiesRequestFuture<H, B: BodiesClient> {
|
||||
client: Arc<B>,
|
||||
consensus: Arc<dyn Consensus<H, B::Body, Error = ConsensusError>>,
|
||||
pub(crate) struct BodiesRequestFuture<B: Block, C: BodiesClient<Body = B::Body>> {
|
||||
client: Arc<C>,
|
||||
consensus: Arc<dyn Consensus<B, Error = ConsensusError>>,
|
||||
metrics: BodyDownloaderMetrics,
|
||||
/// Metrics for individual responses. This can be used to observe how the size (in bytes) of
|
||||
/// responses change while bodies are being downloaded.
|
||||
response_metrics: ResponseMetrics,
|
||||
// Headers to download. The collection is shrunk as responses are buffered.
|
||||
pending_headers: VecDeque<SealedHeader<H>>,
|
||||
pending_headers: VecDeque<SealedHeader<B::Header>>,
|
||||
/// Internal buffer for all blocks
|
||||
buffer: Vec<BlockResponse<H, B::Body>>,
|
||||
fut: Option<B::Output>,
|
||||
buffer: Vec<BlockResponse<B>>,
|
||||
fut: Option<C::Output>,
|
||||
/// Tracks how many bodies we requested in the last request.
|
||||
last_request_len: Option<usize>,
|
||||
}
|
||||
|
||||
impl<H, B> BodiesRequestFuture<H, B>
|
||||
impl<B, C> BodiesRequestFuture<B, C>
|
||||
where
|
||||
H: BlockHeader,
|
||||
B: BodiesClient + 'static,
|
||||
B: Block,
|
||||
C: BodiesClient<Body = B::Body> + 'static,
|
||||
{
|
||||
/// Returns an empty future. Use [`BodiesRequestFuture::with_headers`] to set the request.
|
||||
pub(crate) fn new(
|
||||
client: Arc<B>,
|
||||
consensus: Arc<dyn Consensus<H, B::Body, Error = ConsensusError>>,
|
||||
client: Arc<C>,
|
||||
consensus: Arc<dyn Consensus<B, Error = ConsensusError>>,
|
||||
metrics: BodyDownloaderMetrics,
|
||||
) -> Self {
|
||||
Self {
|
||||
@ -77,7 +77,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn with_headers(mut self, headers: Vec<SealedHeader<H>>) -> Self {
|
||||
pub(crate) fn with_headers(mut self, headers: Vec<SealedHeader<B::Header>>) -> Self {
|
||||
self.buffer.reserve_exact(headers.len());
|
||||
self.pending_headers = VecDeque::from(headers);
|
||||
// Submit the request only if there are any headers to download.
|
||||
@ -163,9 +163,9 @@ where
|
||||
///
|
||||
/// This method removes headers from the internal collection.
|
||||
/// If the response fails validation, then the header will be put back.
|
||||
fn try_buffer_blocks(&mut self, bodies: Vec<B::Body>) -> DownloadResult<()>
|
||||
fn try_buffer_blocks(&mut self, bodies: Vec<C::Body>) -> DownloadResult<()>
|
||||
where
|
||||
B::Body: InMemorySize,
|
||||
C::Body: InMemorySize,
|
||||
{
|
||||
let bodies_capacity = bodies.capacity();
|
||||
let bodies_len = bodies.len();
|
||||
@ -180,7 +180,7 @@ where
|
||||
|
||||
if next_header.is_empty() {
|
||||
// increment empty block body metric
|
||||
total_size += mem::size_of::<BlockBody>();
|
||||
total_size += mem::size_of::<C::Body>();
|
||||
self.buffer.push(BlockResponse::Empty(next_header));
|
||||
} else {
|
||||
let next_body = bodies.next().unwrap();
|
||||
@ -188,7 +188,7 @@ where
|
||||
// increment full block body metric
|
||||
total_size += next_body.size();
|
||||
|
||||
let block = SealedBlock::new(next_header, next_body);
|
||||
let block = SealedBlock::from_sealed_parts(next_header, next_body);
|
||||
|
||||
if let Err(error) = self.consensus.validate_block_pre_execution(&block) {
|
||||
// Body is invalid, put the header back and return an error
|
||||
@ -214,12 +214,12 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<H, B> Future for BodiesRequestFuture<H, B>
|
||||
impl<B, C> Future for BodiesRequestFuture<B, C>
|
||||
where
|
||||
H: BlockHeader + Unpin + Send + Sync + 'static,
|
||||
B: BodiesClient<Body: InMemorySize> + 'static,
|
||||
B: Block + 'static,
|
||||
C: BodiesClient<Body = B::Body> + 'static,
|
||||
{
|
||||
type Output = DownloadResult<Vec<BlockResponse<H, B::Body>>>;
|
||||
type Output = DownloadResult<Vec<BlockResponse<B>>>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.get_mut();
|
||||
@ -275,7 +275,7 @@ mod tests {
|
||||
let headers = random_header_range(&mut rng, 0..20, B256::ZERO);
|
||||
|
||||
let client = Arc::new(TestBodiesClient::default());
|
||||
let fut = BodiesRequestFuture::new(
|
||||
let fut = BodiesRequestFuture::<reth_primitives::Block, _>::new(
|
||||
client.clone(),
|
||||
Arc::new(TestConsensus::default()),
|
||||
BodyDownloaderMetrics::default(),
|
||||
@ -299,7 +299,7 @@ mod tests {
|
||||
let client = Arc::new(
|
||||
TestBodiesClient::default().with_bodies(bodies.clone()).with_max_batch_size(batch_size),
|
||||
);
|
||||
let fut = BodiesRequestFuture::new(
|
||||
let fut = BodiesRequestFuture::<reth_primitives::Block, _>::new(
|
||||
client.clone(),
|
||||
Arc::new(TestConsensus::default()),
|
||||
BodyDownloaderMetrics::default(),
|
||||
|
||||
@ -6,6 +6,7 @@ use reth_network_p2p::{
|
||||
bodies::downloader::{BodyDownloader, BodyDownloaderResult},
|
||||
error::DownloadResult,
|
||||
};
|
||||
use reth_primitives_traits::Block;
|
||||
use reth_tasks::{TaskSpawner, TokioTaskExecutor};
|
||||
use std::{
|
||||
fmt::Debug,
|
||||
@ -24,15 +25,13 @@ pub const BODIES_TASK_BUFFER_SIZE: usize = 4;
|
||||
/// A [BodyDownloader] that drives a spawned [BodyDownloader] on a spawned task.
|
||||
#[derive(Debug)]
|
||||
#[pin_project]
|
||||
pub struct TaskDownloader<H, B> {
|
||||
pub struct TaskDownloader<B: Block> {
|
||||
#[pin]
|
||||
from_downloader: ReceiverStream<BodyDownloaderResult<H, B>>,
|
||||
from_downloader: ReceiverStream<BodyDownloaderResult<B>>,
|
||||
to_downloader: UnboundedSender<RangeInclusive<BlockNumber>>,
|
||||
}
|
||||
|
||||
// === impl TaskDownloader ===
|
||||
|
||||
impl<H: Send + Sync + Unpin + 'static, B: Send + Sync + Unpin + 'static> TaskDownloader<H, B> {
|
||||
impl<B: Block + 'static> TaskDownloader<B> {
|
||||
/// Spawns the given `downloader` via [`tokio::task::spawn`] returns a [`TaskDownloader`] that's
|
||||
/// connected to that task.
|
||||
///
|
||||
@ -46,25 +45,27 @@ impl<H: Send + Sync + Unpin + 'static, B: Send + Sync + Unpin + 'static> TaskDow
|
||||
/// use reth_consensus::{Consensus, ConsensusError};
|
||||
/// use reth_downloaders::bodies::{bodies::BodiesDownloaderBuilder, task::TaskDownloader};
|
||||
/// use reth_network_p2p::bodies::client::BodiesClient;
|
||||
/// use reth_primitives_traits::InMemorySize;
|
||||
/// use reth_primitives_traits::{Block, InMemorySize};
|
||||
/// use reth_storage_api::HeaderProvider;
|
||||
/// use std::{fmt::Debug, sync::Arc};
|
||||
///
|
||||
/// fn t<
|
||||
/// B: BodiesClient<Body: Debug + InMemorySize> + 'static,
|
||||
/// Provider: HeaderProvider<Header = alloy_consensus::Header> + Unpin + 'static,
|
||||
/// B: Block + 'static,
|
||||
/// C: BodiesClient<Body = B::Body> + 'static,
|
||||
/// Provider: HeaderProvider<Header = B::Header> + Unpin + 'static,
|
||||
/// >(
|
||||
/// client: Arc<B>,
|
||||
/// consensus: Arc<dyn Consensus<Provider::Header, B::Body, Error = ConsensusError>>,
|
||||
/// client: Arc<C>,
|
||||
/// consensus: Arc<dyn Consensus<B, Error = ConsensusError>>,
|
||||
/// provider: Provider,
|
||||
/// ) {
|
||||
/// let downloader = BodiesDownloaderBuilder::default().build(client, consensus, provider);
|
||||
/// let downloader =
|
||||
/// BodiesDownloaderBuilder::default().build::<B, _, _>(client, consensus, provider);
|
||||
/// let downloader = TaskDownloader::spawn(downloader);
|
||||
/// }
|
||||
/// ```
|
||||
pub fn spawn<T>(downloader: T) -> Self
|
||||
where
|
||||
T: BodyDownloader<Header = H, Body = B> + 'static,
|
||||
T: BodyDownloader<Block = B> + 'static,
|
||||
{
|
||||
Self::spawn_with(downloader, &TokioTaskExecutor::default())
|
||||
}
|
||||
@ -73,7 +74,7 @@ impl<H: Send + Sync + Unpin + 'static, B: Send + Sync + Unpin + 'static> TaskDow
|
||||
/// that's connected to that task.
|
||||
pub fn spawn_with<T, S>(downloader: T, spawner: &S) -> Self
|
||||
where
|
||||
T: BodyDownloader<Header = H, Body = B> + 'static,
|
||||
T: BodyDownloader<Block = B> + 'static,
|
||||
S: TaskSpawner,
|
||||
{
|
||||
let (bodies_tx, bodies_rx) = mpsc::channel(BODIES_TASK_BUFFER_SIZE);
|
||||
@ -91,11 +92,8 @@ impl<H: Send + Sync + Unpin + 'static, B: Send + Sync + Unpin + 'static> TaskDow
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: Debug + Send + Sync + Unpin + 'static, B: Debug + Send + Sync + Unpin + 'static>
|
||||
BodyDownloader for TaskDownloader<H, B>
|
||||
{
|
||||
type Header = H;
|
||||
type Body = B;
|
||||
impl<B: Block + 'static> BodyDownloader for TaskDownloader<B> {
|
||||
type Block = B;
|
||||
|
||||
fn set_download_range(&mut self, range: RangeInclusive<BlockNumber>) -> DownloadResult<()> {
|
||||
let _ = self.to_downloader.send(range);
|
||||
@ -103,8 +101,8 @@ impl<H: Debug + Send + Sync + Unpin + 'static, B: Debug + Send + Sync + Unpin +
|
||||
}
|
||||
}
|
||||
|
||||
impl<H, B> Stream for TaskDownloader<H, B> {
|
||||
type Item = BodyDownloaderResult<H, B>;
|
||||
impl<B: Block + 'static> Stream for TaskDownloader<B> {
|
||||
type Item = BodyDownloaderResult<B>;
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
self.project().from_downloader.poll_next(cx)
|
||||
@ -114,7 +112,7 @@ impl<H, B> Stream for TaskDownloader<H, B> {
|
||||
/// A [`BodyDownloader`] that runs on its own task
|
||||
struct SpawnedDownloader<T: BodyDownloader> {
|
||||
updates: UnboundedReceiverStream<RangeInclusive<BlockNumber>>,
|
||||
bodies_tx: PollSender<BodyDownloaderResult<T::Header, T::Body>>,
|
||||
bodies_tx: PollSender<BodyDownloaderResult<T::Block>>,
|
||||
downloader: T,
|
||||
}
|
||||
|
||||
@ -197,7 +195,7 @@ mod tests {
|
||||
let client = Arc::new(
|
||||
TestBodiesClient::default().with_bodies(bodies.clone()).with_should_delay(true),
|
||||
);
|
||||
let downloader = BodiesDownloaderBuilder::default().build(
|
||||
let downloader = BodiesDownloaderBuilder::default().build::<reth_primitives::Block, _, _>(
|
||||
client.clone(),
|
||||
Arc::new(TestConsensus::default()),
|
||||
factory,
|
||||
@ -219,7 +217,7 @@ mod tests {
|
||||
reth_tracing::init_test_tracing();
|
||||
let factory = create_test_provider_factory();
|
||||
|
||||
let downloader = BodiesDownloaderBuilder::default().build(
|
||||
let downloader = BodiesDownloaderBuilder::default().build::<reth_primitives::Block, _, _>(
|
||||
Arc::new(TestBodiesClient::default()),
|
||||
Arc::new(TestConsensus::default()),
|
||||
factory,
|
||||
|
||||
@ -7,13 +7,14 @@ use alloy_primitives::B256;
|
||||
use reth_db::{tables, DatabaseEnv};
|
||||
use reth_db_api::{database::Database, transaction::DbTxMut};
|
||||
use reth_network_p2p::bodies::response::BlockResponse;
|
||||
use reth_primitives::{Block, BlockBody, SealedBlock, SealedHeader};
|
||||
use reth_primitives::{BlockBody, SealedBlock, SealedHeader};
|
||||
use reth_primitives_traits::Block;
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub(crate) fn zip_blocks<'a, H: Clone + BlockHeader + 'a, B>(
|
||||
headers: impl Iterator<Item = &'a SealedHeader<H>>,
|
||||
bodies: &mut HashMap<B256, B>,
|
||||
) -> Vec<BlockResponse<H, B>> {
|
||||
pub(crate) fn zip_blocks<'a, B: Block>(
|
||||
headers: impl Iterator<Item = &'a SealedHeader<B::Header>>,
|
||||
bodies: &mut HashMap<B256, B::Body>,
|
||||
) -> Vec<BlockResponse<B>> {
|
||||
headers
|
||||
.into_iter()
|
||||
.map(|header| {
|
||||
@ -21,7 +22,7 @@ pub(crate) fn zip_blocks<'a, H: Clone + BlockHeader + 'a, B>(
|
||||
if header.is_empty() {
|
||||
BlockResponse::Empty(header.clone())
|
||||
} else {
|
||||
BlockResponse::Full(SealedBlock::new(header.clone(), body))
|
||||
BlockResponse::Full(SealedBlock::from_sealed_parts(header.clone(), body))
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
@ -30,7 +31,7 @@ pub(crate) fn zip_blocks<'a, H: Clone + BlockHeader + 'a, B>(
|
||||
pub(crate) fn create_raw_bodies(
|
||||
headers: impl IntoIterator<Item = SealedHeader>,
|
||||
bodies: &mut HashMap<B256, BlockBody>,
|
||||
) -> Vec<Block> {
|
||||
) -> Vec<reth_primitives::Block> {
|
||||
headers
|
||||
.into_iter()
|
||||
.map(|header| {
|
||||
@ -45,7 +46,7 @@ pub(crate) fn insert_headers(db: &DatabaseEnv, headers: &[SealedHeader]) {
|
||||
db.update(|tx| {
|
||||
for header in headers {
|
||||
tx.put::<tables::CanonicalHeaders>(header.number, header.hash()).unwrap();
|
||||
tx.put::<tables::Headers>(header.number, header.clone().unseal()).unwrap();
|
||||
tx.put::<tables::Headers>(header.number, header.clone_header()).unwrap();
|
||||
}
|
||||
})
|
||||
.expect("failed to commit")
|
||||
|
||||
@ -11,6 +11,7 @@ use reth_network_p2p::{
|
||||
error::RequestError,
|
||||
headers::client::{HeadersClient, HeadersDirection, HeadersFut, HeadersRequest},
|
||||
priority::Priority,
|
||||
BlockClient,
|
||||
};
|
||||
use reth_network_peers::PeerId;
|
||||
use reth_primitives::SealedHeader;
|
||||
@ -40,7 +41,7 @@ pub const DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE: u64 = 1_000_000_000;
|
||||
/// transactions in memory for use in the bodies stage.
|
||||
///
|
||||
/// This reads the entire file into memory, so it is not suitable for large files.
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FileClient<B: Block = reth_primitives::Block> {
|
||||
/// The buffered headers retrieved when fetching new bodies.
|
||||
headers: HashMap<BlockNumber, B::Header>,
|
||||
@ -116,7 +117,7 @@ impl<B: FullBlock> FileClient<B> {
|
||||
/// Clones and returns the highest header of this client has or `None` if empty. Seals header
|
||||
/// before returning.
|
||||
pub fn tip_header(&self) -> Option<SealedHeader<B::Header>> {
|
||||
self.headers.get(&self.max_block()?).map(|h| SealedHeader::seal(h.clone()))
|
||||
self.headers.get(&self.max_block()?).map(|h| SealedHeader::seal_slow(h.clone()))
|
||||
}
|
||||
|
||||
/// Returns true if all blocks are canonical (no gaps)
|
||||
@ -350,6 +351,10 @@ impl<B: FullBlock> DownloadClient for FileClient<B> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: FullBlock> BlockClient for FileClient<B> {
|
||||
type Block = B;
|
||||
}
|
||||
|
||||
/// Chunks file into several [`FileClient`]s.
|
||||
#[derive(Debug)]
|
||||
pub struct ChunkedFileReader {
|
||||
@ -546,11 +551,12 @@ mod tests {
|
||||
|
||||
let client: Arc<FileClient> =
|
||||
Arc::new(FileClient::from_file(file.into()).await.unwrap().with_bodies(bodies.clone()));
|
||||
let mut downloader = BodiesDownloaderBuilder::default().build(
|
||||
client.clone(),
|
||||
Arc::new(TestConsensus::default()),
|
||||
factory,
|
||||
);
|
||||
let mut downloader = BodiesDownloaderBuilder::default()
|
||||
.build::<reth_primitives::Block, _, _>(
|
||||
client.clone(),
|
||||
Arc::new(TestConsensus::default()),
|
||||
factory,
|
||||
);
|
||||
downloader.set_download_range(0..=19).expect("failed to set download range");
|
||||
|
||||
assert_matches!(
|
||||
@ -571,10 +577,10 @@ mod tests {
|
||||
let file = tempfile::tempfile().unwrap();
|
||||
let client: Arc<FileClient> = Arc::new(
|
||||
FileClient::from_file(file.into()).await.unwrap().with_headers(HashMap::from([
|
||||
(0u64, p0.clone().unseal()),
|
||||
(1, p1.clone().unseal()),
|
||||
(2, p2.clone().unseal()),
|
||||
(3, p3.clone().unseal()),
|
||||
(0u64, p0.clone_header()),
|
||||
(1, p1.clone_header()),
|
||||
(2, p2.clone_header()),
|
||||
(3, p3.clone_header()),
|
||||
])),
|
||||
);
|
||||
|
||||
@ -628,11 +634,12 @@ mod tests {
|
||||
// insert headers in db for the bodies downloader
|
||||
insert_headers(factory.db_ref().db(), &headers);
|
||||
|
||||
let mut downloader = BodiesDownloaderBuilder::default().build(
|
||||
client.clone(),
|
||||
Arc::new(TestConsensus::default()),
|
||||
factory,
|
||||
);
|
||||
let mut downloader = BodiesDownloaderBuilder::default()
|
||||
.build::<reth_primitives::Block, _, _>(
|
||||
client.clone(),
|
||||
Arc::new(TestConsensus::default()),
|
||||
factory,
|
||||
);
|
||||
downloader.set_download_range(0..=19).expect("failed to set download range");
|
||||
|
||||
assert_matches!(
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
use alloy_primitives::Sealable;
|
||||
use futures::Stream;
|
||||
use reth_network_p2p::headers::{
|
||||
downloader::{HeaderDownloader, SyncTarget},
|
||||
@ -11,7 +12,9 @@ use std::fmt::Debug;
|
||||
#[non_exhaustive]
|
||||
pub struct NoopHeaderDownloader<H>(std::marker::PhantomData<H>);
|
||||
|
||||
impl<H: Debug + Send + Sync + Unpin + 'static> HeaderDownloader for NoopHeaderDownloader<H> {
|
||||
impl<H: Sealable + Debug + Send + Sync + Unpin + 'static> HeaderDownloader
|
||||
for NoopHeaderDownloader<H>
|
||||
{
|
||||
type Header = H;
|
||||
|
||||
fn update_local_head(&mut self, _: SealedHeader<H>) {}
|
||||
@ -21,7 +24,7 @@ impl<H: Debug + Send + Sync + Unpin + 'static> HeaderDownloader for NoopHeaderDo
|
||||
fn set_batch_size(&mut self, _: usize) {}
|
||||
}
|
||||
|
||||
impl<H> Stream for NoopHeaderDownloader<H> {
|
||||
impl<H: Sealable> Stream for NoopHeaderDownloader<H> {
|
||||
type Item = Result<Vec<SealedHeader<H>>, HeadersDownloaderError<H>>;
|
||||
|
||||
fn poll_next(
|
||||
|
||||
@ -4,7 +4,7 @@ use super::task::TaskDownloader;
|
||||
use crate::metrics::HeaderDownloaderMetrics;
|
||||
use alloy_consensus::BlockHeader;
|
||||
use alloy_eips::BlockHashOrNumber;
|
||||
use alloy_primitives::{BlockNumber, B256};
|
||||
use alloy_primitives::{BlockNumber, Sealable, B256};
|
||||
use futures::{stream::Stream, FutureExt};
|
||||
use futures_util::{stream::FuturesUnordered, StreamExt};
|
||||
use rayon::prelude::*;
|
||||
@ -40,14 +40,14 @@ const REQUESTS_PER_PEER_MULTIPLIER: usize = 5;
|
||||
|
||||
/// Wrapper for internal downloader errors.
|
||||
#[derive(Error, Debug)]
|
||||
enum ReverseHeadersDownloaderError<H> {
|
||||
enum ReverseHeadersDownloaderError<H: Sealable> {
|
||||
#[error(transparent)]
|
||||
Downloader(#[from] HeadersDownloaderError<H>),
|
||||
#[error(transparent)]
|
||||
Response(#[from] Box<HeadersResponseError>),
|
||||
}
|
||||
|
||||
impl<H> From<HeadersResponseError> for ReverseHeadersDownloaderError<H> {
|
||||
impl<H: Sealable> From<HeadersResponseError> for ReverseHeadersDownloaderError<H> {
|
||||
fn from(value: HeadersResponseError) -> Self {
|
||||
Self::Response(Box::new(value))
|
||||
}
|
||||
@ -251,7 +251,8 @@ where
|
||||
) -> Result<(), ReverseHeadersDownloaderError<H::Header>> {
|
||||
let mut validated = Vec::with_capacity(headers.len());
|
||||
|
||||
let sealed_headers = headers.into_par_iter().map(SealedHeader::seal).collect::<Vec<_>>();
|
||||
let sealed_headers =
|
||||
headers.into_par_iter().map(SealedHeader::seal_slow).collect::<Vec<_>>();
|
||||
for parent in sealed_headers {
|
||||
// Validate that the header is the parent header of the last validated header.
|
||||
if let Some(validated_header) =
|
||||
@ -378,7 +379,7 @@ where
|
||||
}
|
||||
|
||||
let header = headers.swap_remove(0);
|
||||
let target = SealedHeader::seal(header);
|
||||
let target = SealedHeader::seal_slow(header);
|
||||
|
||||
match sync_target {
|
||||
SyncTargetBlock::Hash(hash) | SyncTargetBlock::HashAndNumber { hash, .. } => {
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
use alloy_primitives::Sealable;
|
||||
use futures::{FutureExt, Stream};
|
||||
use futures_util::StreamExt;
|
||||
use pin_project::pin_project;
|
||||
@ -23,7 +24,7 @@ pub const HEADERS_TASK_BUFFER_SIZE: usize = 8;
|
||||
/// A [HeaderDownloader] that drives a spawned [HeaderDownloader] on a spawned task.
|
||||
#[derive(Debug)]
|
||||
#[pin_project]
|
||||
pub struct TaskDownloader<H> {
|
||||
pub struct TaskDownloader<H: Sealable> {
|
||||
#[pin]
|
||||
from_downloader: ReceiverStream<HeadersDownloaderResult<Vec<SealedHeader<H>>, H>>,
|
||||
to_downloader: UnboundedSender<DownloaderUpdates<H>>,
|
||||
@ -31,7 +32,7 @@ pub struct TaskDownloader<H> {
|
||||
|
||||
// === impl TaskDownloader ===
|
||||
|
||||
impl<H: Send + Sync + Unpin + 'static> TaskDownloader<H> {
|
||||
impl<H: Sealable + Send + Sync + Unpin + 'static> TaskDownloader<H> {
|
||||
/// Spawns the given `downloader` via [`tokio::task::spawn`] and returns a [`TaskDownloader`]
|
||||
/// that's connected to that task.
|
||||
///
|
||||
@ -83,7 +84,7 @@ impl<H: Send + Sync + Unpin + 'static> TaskDownloader<H> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: Debug + Send + Sync + Unpin + 'static> HeaderDownloader for TaskDownloader<H> {
|
||||
impl<H: Sealable + Debug + Send + Sync + Unpin + 'static> HeaderDownloader for TaskDownloader<H> {
|
||||
type Header = H;
|
||||
|
||||
fn update_sync_gap(&mut self, head: SealedHeader<H>, target: SyncTarget) {
|
||||
@ -103,7 +104,7 @@ impl<H: Debug + Send + Sync + Unpin + 'static> HeaderDownloader for TaskDownload
|
||||
}
|
||||
}
|
||||
|
||||
impl<H> Stream for TaskDownloader<H> {
|
||||
impl<H: Sealable> Stream for TaskDownloader<H> {
|
||||
type Item = HeadersDownloaderResult<Vec<SealedHeader<H>>, H>;
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
|
||||
@ -9,5 +9,5 @@ pub(crate) fn child_header(parent: &SealedHeader) -> SealedHeader {
|
||||
let mut child = parent.as_ref().clone();
|
||||
child.number += 1;
|
||||
child.parent_hash = parent.hash_slow();
|
||||
SealedHeader::seal(child)
|
||||
SealedHeader::seal_slow(child)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user