mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
Compare commits
36 Commits
nb-2025082
...
4989808852
| Author | SHA1 | Date | |
|---|---|---|---|
| 4989808852 | |||
| c32b837212 | |||
| 0a4a0026db | |||
| 66dd70d258 | |||
| 4615ba53c4 | |||
| 0328b040f7 | |||
| f05e6b6f6e | |||
| ca650a09e7 | |||
| 88bdda8c9a | |||
| 6212b9dc9e | |||
| c23b12ac0c | |||
| 52b17dac1a | |||
| cb4359ec20 | |||
| 9a9118ecd8 | |||
| 6e0cdfcbc1 | |||
| 68bc908adb | |||
| b352197e20 | |||
| c7ed9fc8f1 | |||
| 5bdde70351 | |||
| d06e7ad7b0 | |||
| 166814b2be | |||
| 03f86c3a8d | |||
| beb8f0b8c7 | |||
| 772ff250ce | |||
| 5ee9053286 | |||
| 29e6972d58 | |||
| e87b9232cc | |||
| b004263f82 | |||
| 74e27b5ee2 | |||
| 09fcf0751f | |||
| 8f2eca4754 | |||
| 707b4fb709 | |||
| 62dd5a71b5 | |||
| 412c38a8cd | |||
| 796ea518bd | |||
| dd2c925af2 |
37
.github/workflows/docker.yml
vendored
Normal file
37
.github/workflows/docker.yml
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
# Publishes the Docker image.
|
||||
|
||||
name: docker
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
|
||||
env:
|
||||
IMAGE_NAME: ${{ github.repository_owner }}/nanoreth
|
||||
CARGO_TERM_COLOR: always
|
||||
DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/nanoreth
|
||||
DOCKER_USERNAME: ${{ github.actor }}
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: build and push as latest
|
||||
runs-on: ubuntu-24.04
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
- name: Log in to Docker
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin
|
||||
- name: Set up Docker builder
|
||||
run: |
|
||||
docker buildx create --use --name builder
|
||||
- name: Build and push nanoreth image
|
||||
run: make IMAGE_NAME=$IMAGE_NAME DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME PROFILE=maxperf docker-build-push-latest
|
||||
52
Makefile
52
Makefile
@ -1,6 +1,8 @@
|
||||
# Modifed from reth Makefile
|
||||
.DEFAULT_GOAL := help
|
||||
|
||||
GIT_SHA ?= $(shell git rev-parse HEAD)
|
||||
GIT_TAG ?= $(shell git describe --tags --abbrev=0 2>/dev/null)
|
||||
BIN_DIR = "dist/bin"
|
||||
|
||||
# List of features to use when building. Can be overridden via the environment.
|
||||
@ -17,6 +19,9 @@ PROFILE ?= release
|
||||
# Extra flags for Cargo
|
||||
CARGO_INSTALL_EXTRA_FLAGS ?=
|
||||
|
||||
# The docker image name
|
||||
DOCKER_IMAGE_NAME ?= ghcr.io/hl-archive-node/nanoreth
|
||||
|
||||
##@ Help
|
||||
|
||||
.PHONY: help
|
||||
@ -207,3 +212,50 @@ check-features:
|
||||
--package reth-primitives-traits \
|
||||
--package reth-primitives \
|
||||
--feature-powerset
|
||||
|
||||
##@ Docker
|
||||
|
||||
# Note: This requires a buildx builder with emulation support. For example:
|
||||
#
|
||||
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
|
||||
# `docker buildx create --use --driver docker-container --name cross-builder`
|
||||
.PHONY: docker-build-push
|
||||
docker-build-push: ## Build and push a cross-arch Docker image tagged with the latest git tag.
|
||||
$(call docker_build_push,$(GIT_TAG),$(GIT_TAG))
|
||||
|
||||
# Note: This requires a buildx builder with emulation support. For example:
|
||||
#
|
||||
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
|
||||
# `docker buildx create --use --driver docker-container --name cross-builder`
|
||||
.PHONY: docker-build-push-git-sha
|
||||
docker-build-push-git-sha: ## Build and push a cross-arch Docker image tagged with the latest git sha.
|
||||
$(call docker_build_push,$(GIT_SHA),$(GIT_SHA))
|
||||
|
||||
# Note: This requires a buildx builder with emulation support. For example:
|
||||
#
|
||||
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
|
||||
# `docker buildx create --use --driver docker-container --name cross-builder`
|
||||
.PHONY: docker-build-push-latest
|
||||
docker-build-push-latest: ## Build and push a cross-arch Docker image tagged with the latest git tag and `latest`.
|
||||
$(call docker_build_push,$(GIT_TAG),latest)
|
||||
|
||||
# Note: This requires a buildx builder with emulation support. For example:
|
||||
#
|
||||
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
|
||||
# `docker buildx create --use --name cross-builder`
|
||||
.PHONY: docker-build-push-nightly
|
||||
docker-build-push-nightly: ## Build and push cross-arch Docker image tagged with the latest git tag with a `-nightly` suffix, and `latest-nightly`.
|
||||
$(call docker_build_push,nightly,nightly)
|
||||
|
||||
# Create a Docker image using the main Dockerfile
|
||||
define docker_build_push
|
||||
docker buildx build --file ./Dockerfile . \
|
||||
--platform linux/amd64 \
|
||||
--tag $(DOCKER_IMAGE_NAME):$(1) \
|
||||
--tag $(DOCKER_IMAGE_NAME):$(2) \
|
||||
--build-arg BUILD_PROFILE="$(PROFILE)" \
|
||||
--build-arg FEATURES="jemalloc,asm-keccak" \
|
||||
--build-arg RUSTFLAGS="-C target-cpu=native" \
|
||||
--provenance=false \
|
||||
--push
|
||||
endef
|
||||
|
||||
@ -7,17 +7,18 @@
|
||||
//! For non-system transactions, we can just return the log as is, and the client will
|
||||
//! adjust the transaction index accordingly.
|
||||
|
||||
use alloy_consensus::{transaction::TransactionMeta, TxReceipt};
|
||||
use alloy_consensus::{transaction::TransactionMeta, BlockHeader, TxReceipt};
|
||||
use alloy_eips::{BlockId, BlockNumberOrTag};
|
||||
use alloy_json_rpc::RpcObject;
|
||||
use alloy_primitives::{B256, U256};
|
||||
use alloy_rpc_types::{
|
||||
pubsub::{Params, SubscriptionKind},
|
||||
BlockTransactions, Filter, FilterChanges, FilterId, Log, PendingTransactionFilterKind,
|
||||
TransactionInfo,
|
||||
};
|
||||
use jsonrpsee::{proc_macros::rpc, PendingSubscriptionSink, SubscriptionMessage, SubscriptionSink};
|
||||
use jsonrpsee_core::{async_trait, RpcResult};
|
||||
use jsonrpsee_types::ErrorObject;
|
||||
use jsonrpsee_types::{error::INTERNAL_ERROR_CODE, ErrorObject};
|
||||
use reth::{api::FullNodeComponents, builder::rpc::RpcContext, tasks::TaskSpawner};
|
||||
use reth_primitives_traits::{BlockBody as _, SignedTransaction};
|
||||
use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, ReceiptProvider};
|
||||
@ -71,6 +72,217 @@ impl<T> EthWrapper for T where
|
||||
{
|
||||
}
|
||||
|
||||
#[rpc(server, namespace = "eth")]
|
||||
#[async_trait]
|
||||
pub trait EthSystemTransactionApi<T: RpcObject, R: RpcObject> {
|
||||
#[method(name = "getEvmSystemTxsByBlockHash")]
|
||||
async fn get_evm_system_txs_by_block_hash(&self, hash: B256) -> RpcResult<Option<Vec<T>>>;
|
||||
|
||||
#[method(name = "getEvmSystemTxsByBlockNumber")]
|
||||
async fn get_evm_system_txs_by_block_number(
|
||||
&self,
|
||||
block_id: Option<BlockId>,
|
||||
) -> RpcResult<Option<Vec<T>>>;
|
||||
|
||||
#[method(name = "getEvmSystemTxsReceiptsByBlockHash")]
|
||||
async fn get_evm_system_txs_receipts_by_block_hash(
|
||||
&self,
|
||||
hash: B256,
|
||||
) -> RpcResult<Option<Vec<R>>>;
|
||||
|
||||
#[method(name = "getEvmSystemTxsReceiptsByBlockNumber")]
|
||||
async fn get_evm_system_txs_receipts_by_block_number(
|
||||
&self,
|
||||
block_id: Option<BlockId>,
|
||||
) -> RpcResult<Option<Vec<R>>>;
|
||||
}
|
||||
|
||||
pub struct HlSystemTransactionExt<Eth: EthWrapper> {
|
||||
eth_api: Eth,
|
||||
_marker: PhantomData<Eth>,
|
||||
}
|
||||
|
||||
impl<Eth: EthWrapper> HlSystemTransactionExt<Eth> {
|
||||
pub fn new(eth_api: Eth) -> Self {
|
||||
Self { eth_api, _marker: PhantomData }
|
||||
}
|
||||
|
||||
async fn get_system_txs_by_block_id(
|
||||
&self,
|
||||
block_id: BlockId,
|
||||
) -> RpcResult<Option<Vec<RpcTransaction<Eth::NetworkTypes>>>>
|
||||
where
|
||||
jsonrpsee_types::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
|
||||
{
|
||||
if let Some(block) = self.eth_api.recovered_block(block_id).await? {
|
||||
let block_hash = block.hash();
|
||||
let block_number = block.number();
|
||||
let base_fee_per_gas = block.base_fee_per_gas();
|
||||
let system_txs = block
|
||||
.transactions_with_sender()
|
||||
.enumerate()
|
||||
.filter_map(|(index, (signer, tx))| {
|
||||
if tx.is_system_transaction() {
|
||||
let tx_info = TransactionInfo {
|
||||
hash: Some(*tx.tx_hash()),
|
||||
block_hash: Some(block_hash),
|
||||
block_number: Some(block_number),
|
||||
base_fee: base_fee_per_gas,
|
||||
index: Some(index as u64),
|
||||
};
|
||||
self.eth_api
|
||||
.tx_resp_builder()
|
||||
.fill(tx.clone().with_signer(*signer), tx_info)
|
||||
.ok()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
Ok(Some(system_txs))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_system_txs_receipts_by_block_id(
|
||||
&self,
|
||||
block_id: BlockId,
|
||||
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>>
|
||||
where
|
||||
jsonrpsee_types::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
|
||||
{
|
||||
if let Some((block, receipts)) =
|
||||
EthBlocks::load_block_and_receipts(&self.eth_api, block_id).await?
|
||||
{
|
||||
let block_number = block.number;
|
||||
let base_fee = block.base_fee_per_gas;
|
||||
let block_hash = block.hash();
|
||||
let excess_blob_gas = block.excess_blob_gas;
|
||||
let timestamp = block.timestamp;
|
||||
let mut gas_used = 0;
|
||||
let mut next_log_index = 0;
|
||||
|
||||
let mut inputs = Vec::new();
|
||||
for (idx, (tx, receipt)) in
|
||||
block.transactions_recovered().zip(receipts.iter()).enumerate()
|
||||
{
|
||||
if receipt.cumulative_gas_used() != 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
let meta = TransactionMeta {
|
||||
tx_hash: *tx.tx_hash(),
|
||||
index: idx as u64,
|
||||
block_hash,
|
||||
block_number,
|
||||
base_fee,
|
||||
excess_blob_gas,
|
||||
timestamp,
|
||||
};
|
||||
|
||||
let input = ConvertReceiptInput {
|
||||
receipt: Cow::Borrowed(receipt),
|
||||
tx,
|
||||
gas_used: receipt.cumulative_gas_used() - gas_used,
|
||||
next_log_index,
|
||||
meta,
|
||||
};
|
||||
|
||||
gas_used = receipt.cumulative_gas_used();
|
||||
next_log_index += receipt.logs().len();
|
||||
|
||||
inputs.push(input);
|
||||
}
|
||||
|
||||
let receipts = self.eth_api.tx_resp_builder().convert_receipts(inputs)?;
|
||||
Ok(Some(receipts))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<Eth: EthWrapper>
|
||||
EthSystemTransactionApiServer<RpcTransaction<Eth::NetworkTypes>, RpcReceipt<Eth::NetworkTypes>>
|
||||
for HlSystemTransactionExt<Eth>
|
||||
where
|
||||
jsonrpsee_types::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
|
||||
{
|
||||
/// Returns the system transactions for a given block hash.
|
||||
/// Semi-compliance with the `eth_getSystemTxsByBlockHash` RPC method introduced by hl-node.
|
||||
/// https://hyperliquid.gitbook.io/hyperliquid-docs/for-developers/hyperevm/json-rpc
|
||||
///
|
||||
/// NOTE: Method name differs from hl-node because we retrieve transaction data from EVM
|
||||
/// (signature recovery for 'from' address, EVM hash calculation) rather than HyperCore.
|
||||
async fn get_evm_system_txs_by_block_hash(
|
||||
&self,
|
||||
hash: B256,
|
||||
) -> RpcResult<Option<Vec<RpcTransaction<Eth::NetworkTypes>>>> {
|
||||
trace!(target: "rpc::eth", ?hash, "Serving eth_getEvmSystemTxsByBlockHash");
|
||||
match self.get_system_txs_by_block_id(BlockId::Hash(hash.into())).await {
|
||||
Ok(txs) => Ok(txs),
|
||||
// hl-node returns none if the block is not found
|
||||
Err(_) => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the system transactions for a given block number, or the latest block if no block
|
||||
/// number is provided. Semi-compliance with the `eth_getSystemTxsByBlockNumber` RPC method
|
||||
/// introduced by hl-node. https://hyperliquid.gitbook.io/hyperliquid-docs/for-developers/hyperevm/json-rpc
|
||||
///
|
||||
/// NOTE: Method name differs from hl-node because we retrieve transaction data from EVM
|
||||
/// (signature recovery for 'from' address, EVM hash calculation) rather than HyperCore.
|
||||
async fn get_evm_system_txs_by_block_number(
|
||||
&self,
|
||||
id: Option<BlockId>,
|
||||
) -> RpcResult<Option<Vec<RpcTransaction<Eth::NetworkTypes>>>> {
|
||||
trace!(target: "rpc::eth", ?id, "Serving eth_getEvmSystemTxsByBlockNumber");
|
||||
match self.get_system_txs_by_block_id(id.unwrap_or_default()).await? {
|
||||
Some(txs) => Ok(Some(txs)),
|
||||
None => {
|
||||
// hl-node returns an error if the block is not found
|
||||
Err(ErrorObject::owned(
|
||||
INTERNAL_ERROR_CODE,
|
||||
format!("invalid block height: {id:?}"),
|
||||
Some(()),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the receipts for the system transactions for a given block hash.
|
||||
async fn get_evm_system_txs_receipts_by_block_hash(
|
||||
&self,
|
||||
hash: B256,
|
||||
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>> {
|
||||
trace!(target: "rpc::eth", ?hash, "Serving eth_getEvmSystemTxsReceiptsByBlockHash");
|
||||
match self.get_system_txs_receipts_by_block_id(BlockId::Hash(hash.into())).await {
|
||||
Ok(receipts) => Ok(receipts),
|
||||
// hl-node returns none if the block is not found
|
||||
Err(_) => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the receipts for the system transactions for a given block number, or the latest
|
||||
/// block if no block
|
||||
async fn get_evm_system_txs_receipts_by_block_number(
|
||||
&self,
|
||||
block_id: Option<BlockId>,
|
||||
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>> {
|
||||
trace!(target: "rpc::eth", ?block_id, "Serving eth_getEvmSystemTxsReceiptsByBlockNumber");
|
||||
match self.get_system_txs_receipts_by_block_id(block_id.unwrap_or_default()).await? {
|
||||
Some(receipts) => Ok(Some(receipts)),
|
||||
None => Err(ErrorObject::owned(
|
||||
INTERNAL_ERROR_CODE,
|
||||
format!("invalid block height: {block_id:?}"),
|
||||
Some(()),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct HlNodeFilterHttp<Eth: EthWrapper> {
|
||||
filter: Arc<EthFilter<Eth>>,
|
||||
provider: Arc<Eth::Provider>,
|
||||
@ -146,8 +358,9 @@ impl<Eth: EthWrapper> HlNodeFilterWs<Eth> {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>>
|
||||
for HlNodeFilterWs<Eth>
|
||||
impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>> for HlNodeFilterWs<Eth>
|
||||
where
|
||||
jsonrpsee_types::error::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
|
||||
{
|
||||
async fn subscribe(
|
||||
&self,
|
||||
@ -446,7 +659,7 @@ where
|
||||
}
|
||||
|
||||
pub fn install_hl_node_compliance<Node, EthApi>(
|
||||
ctx: RpcContext<Node, EthApi>,
|
||||
ctx: &mut RpcContext<Node, EthApi>,
|
||||
) -> Result<(), eyre::Error>
|
||||
where
|
||||
Node: FullNodeComponents,
|
||||
@ -473,5 +686,9 @@ where
|
||||
ctx.modules.replace_configured(
|
||||
HlNodeBlockFilterHttp::new(Arc::new(ctx.registry.eth_api().clone())).into_rpc(),
|
||||
)?;
|
||||
|
||||
ctx.modules
|
||||
.merge_configured(HlSystemTransactionExt::new(ctx.registry.eth_api().clone()).into_rpc())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -39,7 +39,7 @@ fn main() -> eyre::Result<()> {
|
||||
let (node, engine_handle_tx) = HlNode::new(ext.block_source_args.parse().await?);
|
||||
let NodeHandle { node, node_exit_future: exit_future } = builder
|
||||
.node(node)
|
||||
.extend_rpc_modules(move |ctx| {
|
||||
.extend_rpc_modules(move |mut ctx| {
|
||||
let upstream_rpc_url =
|
||||
ext.upstream_rpc_url.unwrap_or_else(|| default_upstream_rpc_url.to_owned());
|
||||
|
||||
@ -60,10 +60,15 @@ fn main() -> eyre::Result<()> {
|
||||
}
|
||||
|
||||
if ext.hl_node_compliant {
|
||||
install_hl_node_compliance(ctx)?;
|
||||
install_hl_node_compliance(&mut ctx)?;
|
||||
info!("hl-node compliant mode enabled");
|
||||
}
|
||||
|
||||
if !ext.experimental_eth_get_proof {
|
||||
ctx.modules.remove_method_from_configured("eth_getProof");
|
||||
info!("eth_getProof is disabled by default");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.apply(|builder| {
|
||||
|
||||
@ -55,6 +55,24 @@ pub struct HlNodeArgs {
|
||||
/// This is useful when read precompile is needed for gas estimation.
|
||||
#[arg(long, env = "FORWARD_CALL")]
|
||||
pub forward_call: bool,
|
||||
|
||||
/// Experimental: enables the eth_getProof RPC method.
|
||||
///
|
||||
/// Note: Due to the state root difference, trie updates* may not function correctly in all
|
||||
/// scenarios. For example, incremental root updates are not possible, which can cause
|
||||
/// eth_getProof to malfunction in some cases.
|
||||
///
|
||||
/// This limitation does not impact normal node functionality, except for state root (which is
|
||||
/// unused) and eth_getProof. The archival state is maintained by block order, not by trie
|
||||
/// updates. As a precaution, nanoreth disables eth_getProof by default to prevent
|
||||
/// potential issues.
|
||||
///
|
||||
/// Use --experimental-eth-get-proof to forcibly enable eth_getProof, assuming trie updates are
|
||||
/// working as intended. Enabling this by default will be tracked in #15.
|
||||
///
|
||||
/// * Refers to the Merkle trie used for eth_getProof and state root, not actual state values.
|
||||
#[arg(long, env = "EXPERIMENTAL_ETH_GET_PROOF")]
|
||||
pub experimental_eth_get_proof: bool,
|
||||
}
|
||||
|
||||
/// The main reth_hl cli interface.
|
||||
|
||||
@ -12,7 +12,7 @@ pub mod utils;
|
||||
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc;
|
||||
use tracing::info;
|
||||
use tracing::{error, info};
|
||||
|
||||
pub use cli::*;
|
||||
pub use config::*;
|
||||
@ -78,8 +78,11 @@ pub async fn start_pseudo_peer(
|
||||
_ = transaction_rx.recv() => {}
|
||||
|
||||
Some(eth_req) = eth_rx.recv() => {
|
||||
service.process_eth_request(eth_req).await?;
|
||||
info!("Processed eth request");
|
||||
if let Err(e) = service.process_eth_request(eth_req).await {
|
||||
error!("Error processing eth request: {e:?}");
|
||||
} else {
|
||||
info!("Processed eth request");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -152,13 +152,14 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
||||
async fn collect_blocks(
|
||||
&self,
|
||||
block_numbers: impl IntoIterator<Item = u64>,
|
||||
) -> Vec<BlockAndReceipts> {
|
||||
) -> eyre::Result<Vec<BlockAndReceipts>> {
|
||||
let block_numbers = block_numbers.into_iter().collect::<Vec<_>>();
|
||||
futures::stream::iter(block_numbers)
|
||||
.map(async |number| self.collect_block(number).await.unwrap())
|
||||
let res = futures::stream::iter(block_numbers)
|
||||
.map(async |number| self.collect_block(number).await)
|
||||
.buffered(self.block_source.recommended_chunk_size() as usize)
|
||||
.collect::<Vec<_>>()
|
||||
.await
|
||||
.await;
|
||||
res.into_iter().collect()
|
||||
}
|
||||
|
||||
pub async fn process_eth_request(
|
||||
@ -185,7 +186,7 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
||||
HeadersDirection::Falling => {
|
||||
self.collect_blocks((number + 1 - limit..number + 1).rev()).await
|
||||
}
|
||||
}
|
||||
}?
|
||||
.into_par_iter()
|
||||
.map(|block| block.to_reth_block(chain_id).header.clone())
|
||||
.collect::<Vec<_>>();
|
||||
@ -203,7 +204,7 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
||||
|
||||
let block_bodies = self
|
||||
.collect_blocks(numbers)
|
||||
.await
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|block| block.to_reth_block(chain_id).body)
|
||||
.collect::<Vec<_>>();
|
||||
@ -340,7 +341,7 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
||||
|
||||
debug!("Backfilling from {start_number} to {end_number}");
|
||||
// Collect blocks and cache them
|
||||
let blocks = self.collect_blocks(uncached_block_numbers).await;
|
||||
let blocks = self.collect_blocks(uncached_block_numbers).await?;
|
||||
let block_map: HashMap<B256, u64> =
|
||||
blocks.into_iter().map(|block| (block.hash(), block.number())).collect();
|
||||
let maybe_block_number = block_map.get(&target_hash).copied();
|
||||
|
||||
@ -17,12 +17,12 @@ impl FileOperations {
|
||||
files.extend(
|
||||
subentries
|
||||
.filter_map(|f| f.ok().map(|f| f.path()))
|
||||
.filter(|p| TimeUtils::datetime_from_path(p).is_some()),
|
||||
.filter_map(|p| TimeUtils::datetime_from_path(&p).map(|dt| (dt, p))),
|
||||
);
|
||||
}
|
||||
}
|
||||
files.sort();
|
||||
Some(files)
|
||||
Some(files.into_iter().map(|(_, p)| p).collect())
|
||||
}
|
||||
|
||||
pub fn find_latest_hourly_file(root: &Path) -> Option<PathBuf> {
|
||||
|
||||
@ -193,3 +193,22 @@ async fn test_update_last_fetch_fallback() -> eyre::Result<()> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hourly_files_sort() -> eyre::Result<()> {
|
||||
let temp_dir = tempfile::tempdir()?;
|
||||
// create 20250826/9, 20250826/14
|
||||
let targets = [("20250826", "9"), ("20250826", "14")];
|
||||
for (date, hour) in targets {
|
||||
let hourly_file = temp_dir.path().join(HOURLY_SUBDIR).join(date).join(hour);
|
||||
let parent = hourly_file.parent().unwrap();
|
||||
std::fs::create_dir_all(parent)?;
|
||||
std::fs::File::create(hourly_file)?;
|
||||
}
|
||||
let files = FileOperations::all_hourly_files(temp_dir.path()).unwrap();
|
||||
let file_names: Vec<_> =
|
||||
files.into_iter().map(|p| p.file_name().unwrap().to_string_lossy().into_owned()).collect();
|
||||
|
||||
assert_eq!(file_names, ["9", "14"]);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user