mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
debug
This commit is contained in:
@ -159,15 +159,15 @@ impl BlockIngest {
|
|||||||
return Some(block);
|
return Some(block);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(hlfs) = &self.hlfs {
|
// if let Some(hlfs) = &self.hlfs {
|
||||||
//info!("!! HEIGHT [{height}] :: HEAD [{head}]");
|
// //info!("!! HEIGHT [{height}] :: HEAD [{head}]");
|
||||||
if hlfs.try_fetch_one(height).await.ok().flatten().is_some() {
|
// if hlfs.try_fetch_one(height).await.ok().flatten().is_some() {
|
||||||
if let Some(block) = self.try_collect_local_block(height).await {
|
// if let Some(block) = self.try_collect_local_block(height).await {
|
||||||
info!("Returning HLFS-fetched block @[{height}]");
|
// info!("Returning HLFS-fetched block @[{height}]");
|
||||||
return Some(block);
|
// return Some(block);
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
|
|
||||||
self.try_collect_s3_block(height)
|
self.try_collect_s3_block(height)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,14 +1,16 @@
|
|||||||
use clap::Args;
|
use clap::Args;
|
||||||
use reth_hlfs::{Backfiller, Client, Server, OP_REQ_MAX_BLOCK, OP_RES_MAX_BLOCK, PeerRecord};
|
use reth_hlfs::{Backfiller, Client, PeerRecord, Server, OP_REQ_MAX_BLOCK, OP_RES_MAX_BLOCK};
|
||||||
use reth_network_api::{events::NetworkEvent, FullNetwork};
|
use reth_network_api::{events::NetworkEvent, FullNetwork};
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashSet,
|
collections::HashSet,
|
||||||
net::{IpAddr, SocketAddr},
|
net::{IpAddr, SocketAddr},
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
time::Duration,
|
|
||||||
};
|
};
|
||||||
use tokio::{task::JoinHandle, time::timeout};
|
use tokio::{
|
||||||
|
task::JoinHandle,
|
||||||
|
time::{sleep, timeout, Duration},
|
||||||
|
};
|
||||||
use tracing::{debug, info, warn};
|
use tracing::{debug, info, warn};
|
||||||
|
|
||||||
// use futures_util::StreamExt;
|
// use futures_util::StreamExt;
|
||||||
@ -27,7 +29,6 @@ pub(crate) struct ShareBlocksArgs {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct ShareBlocks {
|
pub(crate) struct ShareBlocks {
|
||||||
pub(crate) _backfiller: Backfiller,
|
|
||||||
_server: JoinHandle<()>,
|
_server: JoinHandle<()>,
|
||||||
_autodetect: JoinHandle<()>,
|
_autodetect: JoinHandle<()>,
|
||||||
}
|
}
|
||||||
@ -53,37 +54,54 @@ impl ShareBlocks {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
let client = Client::new(&args.archive_dir, Vec::new()).with_timeout(Duration::from_secs(5));
|
let _autodetect = spawn_autodetect(network, host, args.share_blocks_port, args.archive_dir.clone());
|
||||||
let bf = Backfiller::new(client, &args.archive_dir);
|
|
||||||
|
|
||||||
let _autodetect = spawn_autodetect(network, host, args.share_blocks_port, bf.clone());
|
|
||||||
|
|
||||||
info!(%bind, dir=%args.archive_dir.display(), "hlfs: enabled (reth peers)");
|
info!(%bind, dir=%args.archive_dir.display(), "hlfs: enabled (reth peers)");
|
||||||
Ok(Self { _backfiller: bf, _server, _autodetect })
|
Ok(Self { _server, _autodetect })
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(dead_code)]
|
// #[allow(dead_code)]
|
||||||
pub(crate) async fn try_fetch_one(&self, block: u64) -> eyre::Result<Option<usize>> {
|
// pub(crate) async fn try_fetch_one(&self, block: u64) -> eyre::Result<Option<usize>> {
|
||||||
let rr = block as usize;
|
// self._backfiller.fetch_if_missing(block).await.map_err(|e| eyre::eyre!(e))
|
||||||
self._backfiller.fetch_if_missing(block, rr).await.map_err(|e| eyre::eyre!(e))
|
// }
|
||||||
// <- fix: HlfsError -> eyre::Report
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn spawn_autodetect<Net>(
|
fn spawn_autodetect<Net>(
|
||||||
network: Net,
|
network: Net,
|
||||||
self_ip: IpAddr,
|
self_ip: IpAddr,
|
||||||
hlfs_port: u16,
|
hlfs_port: u16,
|
||||||
backfiller: Backfiller,
|
archive_dir: PathBuf,
|
||||||
) -> JoinHandle<()>
|
) -> JoinHandle<()>
|
||||||
where
|
where
|
||||||
Net: FullNetwork + Clone + 'static,
|
Net: FullNetwork + Clone + 'static,
|
||||||
{
|
{
|
||||||
let good: Arc<tokio::sync::Mutex<HashSet<PeerRecord>>> = Arc::new(tokio::sync::Mutex::new(HashSet::new()));
|
let client = Client::new(&archive_dir, Vec::new()).with_timeout(Duration::from_secs(5));
|
||||||
|
let backfiller = Arc::new(tokio::sync::Mutex::new(Backfiller::new(client, &archive_dir)));
|
||||||
|
let good: Arc<tokio::sync::Mutex<HashSet<PeerRecord>>> =
|
||||||
|
Arc::new(tokio::sync::Mutex::new(HashSet::new()));
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn({
|
||||||
|
let backfiller = backfiller.clone();
|
||||||
|
async move {
|
||||||
|
loop {
|
||||||
|
let mut bf = backfiller.lock().await;
|
||||||
|
warn!("hlfs: backfiller started");
|
||||||
|
if bf.client.max_block < bf.max_block_seen {
|
||||||
|
let block = bf.client.max_block + 1;
|
||||||
|
let _ = bf.fetch_if_missing(block).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
sleep(Duration::from_secs(1)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
tokio::spawn({
|
||||||
|
let backfiller = backfiller.clone();
|
||||||
|
async move {
|
||||||
let mut events = network.event_listener();
|
let mut events = network.event_listener();
|
||||||
loop {
|
loop {
|
||||||
|
let mut bf = backfiller.lock().await;
|
||||||
match events.next().await {
|
match events.next().await {
|
||||||
Some(NetworkEvent::ActivePeerSession { info, .. }) => {
|
Some(NetworkEvent::ActivePeerSession { info, .. }) => {
|
||||||
let ip = info.remote_addr.ip();
|
let ip = info.remote_addr.ip();
|
||||||
@ -101,7 +119,7 @@ where
|
|||||||
let mut g = good.lock().await;
|
let mut g = good.lock().await;
|
||||||
if g.insert(PeerRecord { addr, max_block }) {
|
if g.insert(PeerRecord { addr, max_block }) {
|
||||||
let v: Vec<_> = g.iter().copied().collect();
|
let v: Vec<_> = g.iter().copied().collect();
|
||||||
backfiller.set_peers(v.clone());
|
bf.set_peers(v.clone());
|
||||||
info!(%addr, %max_block, total=v.len(), "hlfs: peer added");
|
info!(%addr, %max_block, total=v.len(), "hlfs: peer added");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -115,6 +133,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -83,9 +83,9 @@ impl Hash for PeerRecord {
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Client {
|
pub struct Client {
|
||||||
root: PathBuf,
|
root: PathBuf,
|
||||||
peers: Arc<Mutex<Vec<PeerRecord>>>,
|
pub peers: Arc<Mutex<Vec<PeerRecord>>>,
|
||||||
timeout: Duration,
|
timeout: Duration,
|
||||||
max_block: u64,
|
pub max_block: u64,
|
||||||
}
|
}
|
||||||
impl Client {
|
impl Client {
|
||||||
pub fn new(root: impl Into<PathBuf>, peers: Vec<PeerRecord>) -> Self {
|
pub fn new(root: impl Into<PathBuf>, peers: Vec<PeerRecord>) -> Self {
|
||||||
@ -310,21 +310,28 @@ async fn handle_conn(
|
|||||||
/// Backfiller: ask client per missing block; rotate peers every block.
|
/// Backfiller: ask client per missing block; rotate peers every block.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Backfiller {
|
pub struct Backfiller {
|
||||||
client: Client,
|
pub client: Client,
|
||||||
root: PathBuf,
|
root: PathBuf,
|
||||||
|
pub max_block_seen: u64,
|
||||||
}
|
}
|
||||||
impl Backfiller {
|
impl Backfiller {
|
||||||
pub fn new(client: Client, root: impl Into<PathBuf>) -> Self {
|
pub fn new(client: Client, root: impl Into<PathBuf>) -> Self {
|
||||||
Self { client, root: root.into() }
|
Self { client, root: root.into(), max_block_seen: 0 }
|
||||||
}
|
}
|
||||||
pub fn set_peers(&self, peers: Vec<PeerRecord>) {
|
pub fn set_peers(&mut self, peers: Vec<PeerRecord>) {
|
||||||
self.client.update_peers(peers);
|
self.client.update_peers(peers);
|
||||||
|
let _peers = self.client.peers.lock().clone();
|
||||||
|
for p in _peers {
|
||||||
|
if p.max_block > self.max_block_seen {
|
||||||
|
self.max_block_seen = p.max_block
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
pub async fn fetch_if_missing(
|
pub async fn fetch_if_missing(
|
||||||
&self,
|
&mut self,
|
||||||
number: u64,
|
number: u64,
|
||||||
rr_index: usize,
|
|
||||||
) -> Result<Option<usize>, HlfsError> {
|
) -> Result<Option<usize>, HlfsError> {
|
||||||
|
let rr_index = number as usize;
|
||||||
let n = number.saturating_sub(1); // 0 -> 0, others -> number-1
|
let n = number.saturating_sub(1); // 0 -> 0, others -> number-1
|
||||||
let f = (n / 1_000_000) * 1_000_000;
|
let f = (n / 1_000_000) * 1_000_000;
|
||||||
let s = (n / 1_000) * 1_000;
|
let s = (n / 1_000) * 1_000;
|
||||||
|
|||||||
Reference in New Issue
Block a user