mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
Compare commits
57 Commits
nb-2025082
...
32c4f92aec
| Author | SHA1 | Date | |
|---|---|---|---|
| 32c4f92aec | |||
| 3f08b0a4e6 | |||
| d7992ab8ff | |||
| b37a30fb37 | |||
| f6432498d8 | |||
| c32b837212 | |||
| 0a4a0026db | |||
| 66dd70d258 | |||
| 4615ba53c4 | |||
| 0328b040f7 | |||
| f05e6b6f6e | |||
| ca650a09e7 | |||
| 88bdda8c9a | |||
| 6212b9dc9e | |||
| c23b12ac0c | |||
| 52b17dac1a | |||
| cb4359ec20 | |||
| 9a9118ecd8 | |||
| 6e0cdfcbc1 | |||
| 68bc908adb | |||
| b352197e20 | |||
| c7ed9fc8f1 | |||
| 5bdde70351 | |||
| d06e7ad7b0 | |||
| 166814b2be | |||
| 03f86c3a8d | |||
| beb8f0b8c7 | |||
| 772ff250ce | |||
| 5ee9053286 | |||
| 29e6972d58 | |||
| e87b9232cc | |||
| b004263f82 | |||
| 74e27b5ee2 | |||
| 09fcf0751f | |||
| 8f2eca4754 | |||
| 707b4fb709 | |||
| 62dd5a71b5 | |||
| 412c38a8cd | |||
| 796ea518bd | |||
| dd2c925af2 | |||
| 3ffd7bb351 | |||
| 52909eea3f | |||
| 0f9c2c5897 | |||
| ad4a8cd365 | |||
| 80506a7a43 | |||
| 2af312b628 | |||
| 1908e9f414 | |||
| 65cdc27b51 | |||
| 4f430487d6 | |||
| 19f35a6b54 | |||
| d61020e996 | |||
| 657df240f4 | |||
| 73a34a4bc1 | |||
| d8eef6305b | |||
| bae68ef8db | |||
| f576dddfa6 | |||
| 894ebcbfa5 |
37
.github/workflows/docker.yml
vendored
Normal file
37
.github/workflows/docker.yml
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
# Publishes the Docker image.
|
||||
|
||||
name: docker
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
|
||||
env:
|
||||
IMAGE_NAME: ${{ github.repository_owner }}/nanoreth
|
||||
CARGO_TERM_COLOR: always
|
||||
DOCKER_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/nanoreth
|
||||
DOCKER_USERNAME: ${{ github.actor }}
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: build and push as latest
|
||||
runs-on: ubuntu-24.04
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
- name: Log in to Docker
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin
|
||||
- name: Set up Docker builder
|
||||
run: |
|
||||
docker buildx create --use --name builder
|
||||
- name: Build and push nanoreth image
|
||||
run: make IMAGE_NAME=$IMAGE_NAME DOCKER_IMAGE_NAME=$DOCKER_IMAGE_NAME PROFILE=maxperf docker-build-push-latest
|
||||
52
Makefile
52
Makefile
@ -1,6 +1,8 @@
|
||||
# Modifed from reth Makefile
|
||||
.DEFAULT_GOAL := help
|
||||
|
||||
GIT_SHA ?= $(shell git rev-parse HEAD)
|
||||
GIT_TAG ?= $(shell git describe --tags --abbrev=0 2>/dev/null)
|
||||
BIN_DIR = "dist/bin"
|
||||
|
||||
# List of features to use when building. Can be overridden via the environment.
|
||||
@ -17,6 +19,9 @@ PROFILE ?= release
|
||||
# Extra flags for Cargo
|
||||
CARGO_INSTALL_EXTRA_FLAGS ?=
|
||||
|
||||
# The docker image name
|
||||
DOCKER_IMAGE_NAME ?= ghcr.io/hl-archive-node/nanoreth
|
||||
|
||||
##@ Help
|
||||
|
||||
.PHONY: help
|
||||
@ -207,3 +212,50 @@ check-features:
|
||||
--package reth-primitives-traits \
|
||||
--package reth-primitives \
|
||||
--feature-powerset
|
||||
|
||||
##@ Docker
|
||||
|
||||
# Note: This requires a buildx builder with emulation support. For example:
|
||||
#
|
||||
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
|
||||
# `docker buildx create --use --driver docker-container --name cross-builder`
|
||||
.PHONY: docker-build-push
|
||||
docker-build-push: ## Build and push a cross-arch Docker image tagged with the latest git tag.
|
||||
$(call docker_build_push,$(GIT_TAG),$(GIT_TAG))
|
||||
|
||||
# Note: This requires a buildx builder with emulation support. For example:
|
||||
#
|
||||
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
|
||||
# `docker buildx create --use --driver docker-container --name cross-builder`
|
||||
.PHONY: docker-build-push-git-sha
|
||||
docker-build-push-git-sha: ## Build and push a cross-arch Docker image tagged with the latest git sha.
|
||||
$(call docker_build_push,$(GIT_SHA),$(GIT_SHA))
|
||||
|
||||
# Note: This requires a buildx builder with emulation support. For example:
|
||||
#
|
||||
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
|
||||
# `docker buildx create --use --driver docker-container --name cross-builder`
|
||||
.PHONY: docker-build-push-latest
|
||||
docker-build-push-latest: ## Build and push a cross-arch Docker image tagged with the latest git tag and `latest`.
|
||||
$(call docker_build_push,$(GIT_TAG),latest)
|
||||
|
||||
# Note: This requires a buildx builder with emulation support. For example:
|
||||
#
|
||||
# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64`
|
||||
# `docker buildx create --use --name cross-builder`
|
||||
.PHONY: docker-build-push-nightly
|
||||
docker-build-push-nightly: ## Build and push cross-arch Docker image tagged with the latest git tag with a `-nightly` suffix, and `latest-nightly`.
|
||||
$(call docker_build_push,nightly,nightly)
|
||||
|
||||
# Create a Docker image using the main Dockerfile
|
||||
define docker_build_push
|
||||
docker buildx build --file ./Dockerfile . \
|
||||
--platform linux/amd64 \
|
||||
--tag $(DOCKER_IMAGE_NAME):$(1) \
|
||||
--tag $(DOCKER_IMAGE_NAME):$(2) \
|
||||
--build-arg BUILD_PROFILE="$(PROFILE)" \
|
||||
--build-arg FEATURES="jemalloc,asm-keccak" \
|
||||
--build-arg RUSTFLAGS="-C target-cpu=native" \
|
||||
--provenance=false \
|
||||
--push
|
||||
endef
|
||||
|
||||
@ -1,21 +1,28 @@
|
||||
use alloy_consensus::{transaction::TransactionMeta, TxReceipt};
|
||||
//! Overrides for RPC methods to post-filter system transactions and logs.
|
||||
//!
|
||||
//! System transactions are always at the beginning of the block,
|
||||
//! so we can use the transaction index to determine if the log is from a system transaction,
|
||||
//! and if it is, we can exclude it.
|
||||
//!
|
||||
//! For non-system transactions, we can just return the log as is, and the client will
|
||||
//! adjust the transaction index accordingly.
|
||||
|
||||
use alloy_consensus::{transaction::TransactionMeta, BlockHeader, TxReceipt};
|
||||
use alloy_eips::{BlockId, BlockNumberOrTag};
|
||||
use alloy_json_rpc::RpcObject;
|
||||
use alloy_primitives::{B256, U256};
|
||||
use alloy_rpc_types::{
|
||||
pubsub::{Params, SubscriptionKind},
|
||||
BlockTransactions, Filter, FilterChanges, FilterId, Log, PendingTransactionFilterKind,
|
||||
TransactionInfo,
|
||||
};
|
||||
use jsonrpsee::{proc_macros::rpc, PendingSubscriptionSink, SubscriptionMessage, SubscriptionSink};
|
||||
use jsonrpsee_core::{async_trait, RpcResult};
|
||||
use jsonrpsee_types::ErrorObject;
|
||||
use reth::{
|
||||
api::FullNodeComponents, builder::rpc::RpcContext, rpc::result::internal_rpc_err,
|
||||
tasks::TaskSpawner,
|
||||
};
|
||||
use jsonrpsee_types::{error::INTERNAL_ERROR_CODE, ErrorObject};
|
||||
use reth::{api::FullNodeComponents, builder::rpc::RpcContext, tasks::TaskSpawner};
|
||||
use reth_primitives_traits::{BlockBody as _, SignedTransaction};
|
||||
use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, ReceiptProvider};
|
||||
use reth_rpc::{EthFilter, EthPubSub};
|
||||
use reth_rpc::{eth::pubsub::SubscriptionSerializeError, EthFilter, EthPubSub, RpcTypes};
|
||||
use reth_rpc_eth_api::{
|
||||
helpers::{EthBlocks, EthTransactions, LoadReceipt},
|
||||
transaction::ConvertReceiptInput,
|
||||
@ -25,12 +32,9 @@ use reth_rpc_eth_api::{
|
||||
use serde::Serialize;
|
||||
use std::{borrow::Cow, marker::PhantomData, sync::Arc};
|
||||
use tokio_stream::{Stream, StreamExt};
|
||||
use tracing::{info, trace, Instrument};
|
||||
use tracing::{trace, Instrument};
|
||||
|
||||
use crate::{
|
||||
node::primitives::{HlPrimitives, TransactionSigned},
|
||||
HlBlock,
|
||||
};
|
||||
use crate::{node::primitives::HlPrimitives, HlBlock};
|
||||
|
||||
pub trait EthWrapper:
|
||||
EthApiServer<
|
||||
@ -39,8 +43,10 @@ pub trait EthWrapper:
|
||||
RpcBlock<Self::NetworkTypes>,
|
||||
RpcReceipt<Self::NetworkTypes>,
|
||||
RpcHeader<Self::NetworkTypes>,
|
||||
> + FullEthApiTypes<Primitives = HlPrimitives>
|
||||
+ RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
|
||||
> + FullEthApiTypes<
|
||||
Primitives = HlPrimitives,
|
||||
NetworkTypes: RpcTypes<TransactionResponse = alloy_rpc_types_eth::Transaction>,
|
||||
> + RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
|
||||
+ EthBlocks
|
||||
+ EthTransactions
|
||||
+ LoadReceipt
|
||||
@ -48,23 +54,235 @@ pub trait EthWrapper:
|
||||
{
|
||||
}
|
||||
|
||||
impl<
|
||||
T: EthApiServer<
|
||||
RpcTxReq<Self::NetworkTypes>,
|
||||
RpcTransaction<Self::NetworkTypes>,
|
||||
RpcBlock<Self::NetworkTypes>,
|
||||
RpcReceipt<Self::NetworkTypes>,
|
||||
RpcHeader<Self::NetworkTypes>,
|
||||
> + FullEthApiTypes<Primitives = HlPrimitives>
|
||||
+ RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
|
||||
+ EthBlocks
|
||||
+ EthTransactions
|
||||
+ LoadReceipt
|
||||
+ 'static,
|
||||
> EthWrapper for T
|
||||
impl<T> EthWrapper for T where
|
||||
T: EthApiServer<
|
||||
RpcTxReq<Self::NetworkTypes>,
|
||||
RpcTransaction<Self::NetworkTypes>,
|
||||
RpcBlock<Self::NetworkTypes>,
|
||||
RpcReceipt<Self::NetworkTypes>,
|
||||
RpcHeader<Self::NetworkTypes>,
|
||||
> + FullEthApiTypes<
|
||||
Primitives = HlPrimitives,
|
||||
NetworkTypes: RpcTypes<TransactionResponse = alloy_rpc_types_eth::Transaction>,
|
||||
> + RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
|
||||
+ EthBlocks
|
||||
+ EthTransactions
|
||||
+ LoadReceipt
|
||||
+ 'static
|
||||
{
|
||||
}
|
||||
|
||||
#[rpc(server, namespace = "eth")]
|
||||
#[async_trait]
|
||||
pub trait EthSystemTransactionApi<T: RpcObject, R: RpcObject> {
|
||||
#[method(name = "getEvmSystemTxsByBlockHash")]
|
||||
async fn get_evm_system_txs_by_block_hash(&self, hash: B256) -> RpcResult<Option<Vec<T>>>;
|
||||
|
||||
#[method(name = "getEvmSystemTxsByBlockNumber")]
|
||||
async fn get_evm_system_txs_by_block_number(
|
||||
&self,
|
||||
block_id: Option<BlockId>,
|
||||
) -> RpcResult<Option<Vec<T>>>;
|
||||
|
||||
#[method(name = "getEvmSystemTxsReceiptsByBlockHash")]
|
||||
async fn get_evm_system_txs_receipts_by_block_hash(
|
||||
&self,
|
||||
hash: B256,
|
||||
) -> RpcResult<Option<Vec<R>>>;
|
||||
|
||||
#[method(name = "getEvmSystemTxsReceiptsByBlockNumber")]
|
||||
async fn get_evm_system_txs_receipts_by_block_number(
|
||||
&self,
|
||||
block_id: Option<BlockId>,
|
||||
) -> RpcResult<Option<Vec<R>>>;
|
||||
}
|
||||
|
||||
pub struct HlSystemTransactionExt<Eth: EthWrapper> {
|
||||
eth_api: Eth,
|
||||
_marker: PhantomData<Eth>,
|
||||
}
|
||||
|
||||
impl<Eth: EthWrapper> HlSystemTransactionExt<Eth> {
|
||||
pub fn new(eth_api: Eth) -> Self {
|
||||
Self { eth_api, _marker: PhantomData }
|
||||
}
|
||||
|
||||
async fn get_system_txs_by_block_id(
|
||||
&self,
|
||||
block_id: BlockId,
|
||||
) -> RpcResult<Option<Vec<RpcTransaction<Eth::NetworkTypes>>>>
|
||||
where
|
||||
jsonrpsee_types::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
|
||||
{
|
||||
if let Some(block) = self.eth_api.recovered_block(block_id).await? {
|
||||
let block_hash = block.hash();
|
||||
let block_number = block.number();
|
||||
let base_fee_per_gas = block.base_fee_per_gas();
|
||||
let system_txs = block
|
||||
.transactions_with_sender()
|
||||
.enumerate()
|
||||
.filter_map(|(index, (signer, tx))| {
|
||||
if tx.is_system_transaction() {
|
||||
let tx_info = TransactionInfo {
|
||||
hash: Some(*tx.tx_hash()),
|
||||
block_hash: Some(block_hash),
|
||||
block_number: Some(block_number),
|
||||
base_fee: base_fee_per_gas,
|
||||
index: Some(index as u64),
|
||||
};
|
||||
self.eth_api
|
||||
.tx_resp_builder()
|
||||
.fill(tx.clone().with_signer(*signer), tx_info)
|
||||
.ok()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
Ok(Some(system_txs))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_system_txs_receipts_by_block_id(
|
||||
&self,
|
||||
block_id: BlockId,
|
||||
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>>
|
||||
where
|
||||
jsonrpsee_types::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
|
||||
{
|
||||
if let Some((block, receipts)) =
|
||||
EthBlocks::load_block_and_receipts(&self.eth_api, block_id).await?
|
||||
{
|
||||
let block_number = block.number;
|
||||
let base_fee = block.base_fee_per_gas;
|
||||
let block_hash = block.hash();
|
||||
let excess_blob_gas = block.excess_blob_gas;
|
||||
let timestamp = block.timestamp;
|
||||
let mut gas_used = 0;
|
||||
let mut next_log_index = 0;
|
||||
|
||||
let mut inputs = Vec::new();
|
||||
for (idx, (tx, receipt)) in
|
||||
block.transactions_recovered().zip(receipts.iter()).enumerate()
|
||||
{
|
||||
if receipt.cumulative_gas_used() != 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
let meta = TransactionMeta {
|
||||
tx_hash: *tx.tx_hash(),
|
||||
index: idx as u64,
|
||||
block_hash,
|
||||
block_number,
|
||||
base_fee,
|
||||
excess_blob_gas,
|
||||
timestamp,
|
||||
};
|
||||
|
||||
let input = ConvertReceiptInput {
|
||||
receipt: Cow::Borrowed(receipt),
|
||||
tx,
|
||||
gas_used: receipt.cumulative_gas_used() - gas_used,
|
||||
next_log_index,
|
||||
meta,
|
||||
};
|
||||
|
||||
gas_used = receipt.cumulative_gas_used();
|
||||
next_log_index += receipt.logs().len();
|
||||
|
||||
inputs.push(input);
|
||||
}
|
||||
|
||||
let receipts = self.eth_api.tx_resp_builder().convert_receipts(inputs)?;
|
||||
Ok(Some(receipts))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<Eth: EthWrapper>
|
||||
EthSystemTransactionApiServer<RpcTransaction<Eth::NetworkTypes>, RpcReceipt<Eth::NetworkTypes>>
|
||||
for HlSystemTransactionExt<Eth>
|
||||
where
|
||||
jsonrpsee_types::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
|
||||
{
|
||||
/// Returns the system transactions for a given block hash.
|
||||
/// Semi-compliance with the `eth_getSystemTxsByBlockHash` RPC method introduced by hl-node.
|
||||
/// https://hyperliquid.gitbook.io/hyperliquid-docs/for-developers/hyperevm/json-rpc
|
||||
///
|
||||
/// NOTE: Method name differs from hl-node because we retrieve transaction data from EVM
|
||||
/// (signature recovery for 'from' address, EVM hash calculation) rather than HyperCore.
|
||||
async fn get_evm_system_txs_by_block_hash(
|
||||
&self,
|
||||
hash: B256,
|
||||
) -> RpcResult<Option<Vec<RpcTransaction<Eth::NetworkTypes>>>> {
|
||||
trace!(target: "rpc::eth", ?hash, "Serving eth_getEvmSystemTxsByBlockHash");
|
||||
match self.get_system_txs_by_block_id(BlockId::Hash(hash.into())).await {
|
||||
Ok(txs) => Ok(txs),
|
||||
// hl-node returns none if the block is not found
|
||||
Err(_) => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the system transactions for a given block number, or the latest block if no block
|
||||
/// number is provided. Semi-compliance with the `eth_getSystemTxsByBlockNumber` RPC method
|
||||
/// introduced by hl-node. https://hyperliquid.gitbook.io/hyperliquid-docs/for-developers/hyperevm/json-rpc
|
||||
///
|
||||
/// NOTE: Method name differs from hl-node because we retrieve transaction data from EVM
|
||||
/// (signature recovery for 'from' address, EVM hash calculation) rather than HyperCore.
|
||||
async fn get_evm_system_txs_by_block_number(
|
||||
&self,
|
||||
id: Option<BlockId>,
|
||||
) -> RpcResult<Option<Vec<RpcTransaction<Eth::NetworkTypes>>>> {
|
||||
trace!(target: "rpc::eth", ?id, "Serving eth_getEvmSystemTxsByBlockNumber");
|
||||
match self.get_system_txs_by_block_id(id.unwrap_or_default()).await? {
|
||||
Some(txs) => Ok(Some(txs)),
|
||||
None => {
|
||||
// hl-node returns an error if the block is not found
|
||||
Err(ErrorObject::owned(
|
||||
INTERNAL_ERROR_CODE,
|
||||
format!("invalid block height: {id:?}"),
|
||||
Some(()),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the receipts for the system transactions for a given block hash.
|
||||
async fn get_evm_system_txs_receipts_by_block_hash(
|
||||
&self,
|
||||
hash: B256,
|
||||
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>> {
|
||||
trace!(target: "rpc::eth", ?hash, "Serving eth_getEvmSystemTxsReceiptsByBlockHash");
|
||||
match self.get_system_txs_receipts_by_block_id(BlockId::Hash(hash.into())).await {
|
||||
Ok(receipts) => Ok(receipts),
|
||||
// hl-node returns none if the block is not found
|
||||
Err(_) => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the receipts for the system transactions for a given block number, or the latest
|
||||
/// block if no block
|
||||
async fn get_evm_system_txs_receipts_by_block_number(
|
||||
&self,
|
||||
block_id: Option<BlockId>,
|
||||
) -> RpcResult<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>> {
|
||||
trace!(target: "rpc::eth", ?block_id, "Serving eth_getEvmSystemTxsReceiptsByBlockNumber");
|
||||
match self.get_system_txs_receipts_by_block_id(block_id.unwrap_or_default()).await? {
|
||||
Some(receipts) => Ok(Some(receipts)),
|
||||
None => Err(ErrorObject::owned(
|
||||
INTERNAL_ERROR_CODE,
|
||||
format!("invalid block height: {block_id:?}"),
|
||||
Some(()),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct HlNodeFilterHttp<Eth: EthWrapper> {
|
||||
filter: Arc<EthFilter<Eth>>,
|
||||
provider: Arc<Eth::Provider>,
|
||||
@ -80,19 +298,16 @@ impl<Eth: EthWrapper> HlNodeFilterHttp<Eth> {
|
||||
impl<Eth: EthWrapper> EthFilterApiServer<RpcTransaction<Eth::NetworkTypes>>
|
||||
for HlNodeFilterHttp<Eth>
|
||||
{
|
||||
/// Handler for `eth_newFilter`
|
||||
async fn new_filter(&self, filter: Filter) -> RpcResult<FilterId> {
|
||||
trace!(target: "rpc::eth", "Serving eth_newFilter");
|
||||
self.filter.new_filter(filter).await
|
||||
}
|
||||
|
||||
/// Handler for `eth_newBlockFilter`
|
||||
async fn new_block_filter(&self) -> RpcResult<FilterId> {
|
||||
trace!(target: "rpc::eth", "Serving eth_newBlockFilter");
|
||||
self.filter.new_block_filter().await
|
||||
}
|
||||
|
||||
/// Handler for `eth_newPendingTransactionFilter`
|
||||
async fn new_pending_transaction_filter(
|
||||
&self,
|
||||
kind: Option<PendingTransactionFilterKind>,
|
||||
@ -101,7 +316,6 @@ impl<Eth: EthWrapper> EthFilterApiServer<RpcTransaction<Eth::NetworkTypes>>
|
||||
self.filter.new_pending_transaction_filter(kind).await
|
||||
}
|
||||
|
||||
/// Handler for `eth_getFilterChanges`
|
||||
async fn filter_changes(
|
||||
&self,
|
||||
id: FilterId,
|
||||
@ -110,31 +324,20 @@ impl<Eth: EthWrapper> EthFilterApiServer<RpcTransaction<Eth::NetworkTypes>>
|
||||
self.filter.filter_changes(id).await.map_err(ErrorObject::from)
|
||||
}
|
||||
|
||||
/// Returns an array of all logs matching filter with given id.
|
||||
///
|
||||
/// Returns an error if no matching log filter exists.
|
||||
///
|
||||
/// Handler for `eth_getFilterLogs`
|
||||
async fn filter_logs(&self, id: FilterId) -> RpcResult<Vec<Log>> {
|
||||
trace!(target: "rpc::eth", "Serving eth_getFilterLogs");
|
||||
self.filter.filter_logs(id).await.map_err(ErrorObject::from)
|
||||
}
|
||||
|
||||
/// Handler for `eth_uninstallFilter`
|
||||
async fn uninstall_filter(&self, id: FilterId) -> RpcResult<bool> {
|
||||
trace!(target: "rpc::eth", "Serving eth_uninstallFilter");
|
||||
self.filter.uninstall_filter(id).await
|
||||
}
|
||||
|
||||
/// Returns logs matching given filter object.
|
||||
///
|
||||
/// Handler for `eth_getLogs`
|
||||
async fn logs(&self, filter: Filter) -> RpcResult<Vec<Log>> {
|
||||
trace!(target: "rpc::eth", "Serving eth_getLogs");
|
||||
let logs = EthFilterApiServer::logs(&*self.filter, filter).await?;
|
||||
let provider = self.provider.clone();
|
||||
|
||||
Ok(logs.into_iter().filter_map(|log| adjust_log::<Eth>(log, &provider)).collect())
|
||||
Ok(logs.into_iter().filter_map(|log| adjust_log::<Eth>(log, &self.provider)).collect())
|
||||
}
|
||||
}
|
||||
|
||||
@ -155,10 +358,10 @@ impl<Eth: EthWrapper> HlNodeFilterWs<Eth> {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>>
|
||||
for HlNodeFilterWs<Eth>
|
||||
impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>> for HlNodeFilterWs<Eth>
|
||||
where
|
||||
jsonrpsee_types::error::ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
|
||||
{
|
||||
/// Handler for `eth_subscribe`
|
||||
async fn subscribe(
|
||||
&self,
|
||||
pending: PendingSubscriptionSink,
|
||||
@ -166,16 +369,12 @@ impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>>
|
||||
params: Option<Params>,
|
||||
) -> jsonrpsee::core::SubscriptionResult {
|
||||
let sink = pending.accept().await?;
|
||||
let pubsub = self.pubsub.clone();
|
||||
let provider = self.provider.clone();
|
||||
let (pubsub, provider) = (self.pubsub.clone(), self.provider.clone());
|
||||
self.subscription_task_spawner.spawn(Box::pin(async move {
|
||||
if kind == SubscriptionKind::Logs {
|
||||
// if no params are provided, used default filter params
|
||||
let filter = match params {
|
||||
Some(Params::Logs(filter)) => *filter,
|
||||
Some(Params::Bool(_)) => {
|
||||
return;
|
||||
}
|
||||
Some(Params::Logs(f)) => *f,
|
||||
Some(Params::Bool(_)) => return,
|
||||
_ => Default::default(),
|
||||
};
|
||||
let _ = pipe_from_stream(
|
||||
@ -185,93 +384,42 @@ impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>>
|
||||
.await;
|
||||
} else {
|
||||
let _ = pubsub.handle_accepted(sink, kind, params).await;
|
||||
};
|
||||
}
|
||||
}));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn adjust_log<Eth: EthWrapper>(mut log: Log, provider: &Eth::Provider) -> Option<Log> {
|
||||
let transaction_index = log.transaction_index?;
|
||||
let log_index = log.log_index?;
|
||||
|
||||
let (tx_idx, log_idx) = (log.transaction_index?, log.log_index?);
|
||||
let receipts = provider.receipts_by_block(log.block_number?.into()).unwrap()?;
|
||||
|
||||
// System transactions are always at the beginning of the block,
|
||||
// so we can use the transaction index to determine if the log is from a system transaction,
|
||||
// and if it is, we can exclude it.
|
||||
//
|
||||
// For non-system transactions, we can just return the log as is, and the client will
|
||||
// adjust the transaction index accordingly.
|
||||
let mut system_tx_count = 0u64;
|
||||
let mut system_tx_logs_count = 0u64;
|
||||
|
||||
let (mut sys_tx_count, mut sys_log_count) = (0u64, 0u64);
|
||||
for receipt in receipts {
|
||||
let is_system_tx = receipt.cumulative_gas_used() == 0;
|
||||
if is_system_tx {
|
||||
system_tx_count += 1;
|
||||
system_tx_logs_count += receipt.logs().len() as u64;
|
||||
if receipt.cumulative_gas_used() == 0 {
|
||||
sys_tx_count += 1;
|
||||
sys_log_count += receipt.logs().len() as u64;
|
||||
}
|
||||
}
|
||||
|
||||
if system_tx_count > transaction_index {
|
||||
if sys_tx_count > tx_idx {
|
||||
return None;
|
||||
}
|
||||
|
||||
log.transaction_index = Some(transaction_index - system_tx_count);
|
||||
log.log_index = Some(log_index - system_tx_logs_count);
|
||||
log.transaction_index = Some(tx_idx - sys_tx_count);
|
||||
log.log_index = Some(log_idx - sys_log_count);
|
||||
Some(log)
|
||||
}
|
||||
|
||||
/// Helper to convert a serde error into an [`ErrorObject`]
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("Failed to serialize subscription item: {0}")]
|
||||
pub struct SubscriptionSerializeError(#[from] serde_json::Error);
|
||||
|
||||
impl SubscriptionSerializeError {
|
||||
const fn new(err: serde_json::Error) -> Self {
|
||||
Self(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SubscriptionSerializeError> for ErrorObject<'static> {
|
||||
fn from(value: SubscriptionSerializeError) -> Self {
|
||||
internal_rpc_err(value.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
async fn pipe_from_stream<T, St>(
|
||||
async fn pipe_from_stream<T: Serialize, St: Stream<Item = T> + Unpin>(
|
||||
sink: SubscriptionSink,
|
||||
mut stream: St,
|
||||
) -> Result<(), ErrorObject<'static>>
|
||||
where
|
||||
St: Stream<Item = T> + Unpin,
|
||||
T: Serialize,
|
||||
{
|
||||
) -> Result<(), ErrorObject<'static>> {
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = sink.closed() => {
|
||||
// connection dropped
|
||||
break Ok(())
|
||||
},
|
||||
_ = sink.closed() => break Ok(()),
|
||||
maybe_item = stream.next() => {
|
||||
let item = match maybe_item {
|
||||
Some(item) => item,
|
||||
None => {
|
||||
// stream ended
|
||||
break Ok(())
|
||||
},
|
||||
};
|
||||
let msg = SubscriptionMessage::new(
|
||||
sink.method_name(),
|
||||
sink.subscription_id(),
|
||||
&item
|
||||
).map_err(SubscriptionSerializeError::new)?;
|
||||
|
||||
if sink.send(msg).await.is_err() {
|
||||
break Ok(());
|
||||
}
|
||||
let Some(item) = maybe_item else { break Ok(()) };
|
||||
let msg = SubscriptionMessage::new(sink.method_name(), sink.subscription_id(), &item)
|
||||
.map_err(SubscriptionSerializeError::from)?;
|
||||
if sink.send(msg).await.is_err() { break Ok(()); }
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -321,10 +469,6 @@ macro_rules! engine_span {
|
||||
};
|
||||
}
|
||||
|
||||
fn is_system_tx(tx: &TransactionSigned) -> bool {
|
||||
tx.is_system_transaction()
|
||||
}
|
||||
|
||||
fn adjust_block<Eth: EthWrapper>(
|
||||
recovered_block: &RpcBlock<Eth::NetworkTypes>,
|
||||
eth_api: &Eth,
|
||||
@ -335,6 +479,11 @@ fn adjust_block<Eth: EthWrapper>(
|
||||
new_block.transactions = match new_block.transactions {
|
||||
BlockTransactions::Full(mut transactions) => {
|
||||
transactions.drain(..system_tx_count);
|
||||
transactions.iter_mut().for_each(|tx| {
|
||||
if let Some(idx) = &mut tx.transaction_index {
|
||||
*idx -= system_tx_count as u64;
|
||||
}
|
||||
});
|
||||
BlockTransactions::Full(transactions)
|
||||
}
|
||||
BlockTransactions::Hashes(mut hashes) => {
|
||||
@ -410,8 +559,8 @@ async fn adjust_transaction_receipt<Eth: EthWrapper>(
|
||||
) -> Result<Option<RpcReceipt<Eth::NetworkTypes>>, Eth::Error> {
|
||||
match eth_api.load_transaction_and_receipt(tx_hash).await? {
|
||||
Some((_, meta, _)) => {
|
||||
// LoadReceipt::block_transaction_receipt loads the block again, so loading blocks again doesn't hurt performance much
|
||||
info!("block hash: {:?}", meta.block_hash);
|
||||
// LoadReceipt::block_transaction_receipt loads the block again, so loading blocks again
|
||||
// doesn't hurt performance much
|
||||
let Some((system_tx_count, block_receipts)) =
|
||||
adjust_block_receipts(meta.block_hash.into(), eth_api).await?
|
||||
else {
|
||||
@ -423,10 +572,12 @@ async fn adjust_transaction_receipt<Eth: EthWrapper>(
|
||||
}
|
||||
}
|
||||
|
||||
// This function assumes that `block_id` is already validated by the caller.
|
||||
fn system_tx_count_for_block<Eth: EthWrapper>(eth_api: &Eth, block_id: BlockId) -> usize {
|
||||
let provider = eth_api.provider();
|
||||
let block = provider.block_by_id(block_id).unwrap().unwrap();
|
||||
let system_tx_count = block.body.transactions().iter().filter(|tx| is_system_tx(tx)).count();
|
||||
let system_tx_count =
|
||||
block.body.transactions().iter().filter(|tx| tx.is_system_transaction()).count();
|
||||
system_tx_count
|
||||
}
|
||||
|
||||
@ -464,8 +615,9 @@ where
|
||||
let res =
|
||||
self.eth_api.block_transaction_count_by_hash(hash).instrument(engine_span!()).await?;
|
||||
Ok(res.map(|count| {
|
||||
count
|
||||
- U256::from(system_tx_count_for_block(&*self.eth_api, BlockId::Hash(hash.into())))
|
||||
let sys_tx_count =
|
||||
system_tx_count_for_block(&*self.eth_api, BlockId::Hash(hash.into()));
|
||||
count - U256::from(sys_tx_count)
|
||||
}))
|
||||
}
|
||||
|
||||
@ -507,7 +659,7 @@ where
|
||||
}
|
||||
|
||||
pub fn install_hl_node_compliance<Node, EthApi>(
|
||||
ctx: RpcContext<Node, EthApi>,
|
||||
ctx: &mut RpcContext<Node, EthApi>,
|
||||
) -> Result<(), eyre::Error>
|
||||
where
|
||||
Node: FullNodeComponents,
|
||||
@ -534,5 +686,9 @@ where
|
||||
ctx.modules.replace_configured(
|
||||
HlNodeBlockFilterHttp::new(Arc::new(ctx.registry.eth_api().clone())).into_rpc(),
|
||||
)?;
|
||||
|
||||
ctx.modules
|
||||
.merge_configured(HlSystemTransactionExt::new(ctx.registry.eth_api().clone()).into_rpc())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
3
src/addons/mod.rs
Normal file
3
src/addons/mod.rs
Normal file
@ -0,0 +1,3 @@
|
||||
pub mod call_forwarder;
|
||||
pub mod hl_node_compliance;
|
||||
pub mod tx_forwarder;
|
||||
@ -37,7 +37,7 @@ impl EthForwarderExt {
|
||||
Self { client }
|
||||
}
|
||||
|
||||
fn from_client_error(e: ClientError, internal_error_prefix: &str) -> ErrorObject {
|
||||
fn from_client_error(e: ClientError, internal_error_prefix: &str) -> ErrorObject<'static> {
|
||||
match e {
|
||||
ClientError::Call(e) => e,
|
||||
_ => ErrorObject::owned(
|
||||
@ -7,7 +7,6 @@ use std::sync::LazyLock;
|
||||
static GENESIS_HASH: B256 =
|
||||
b256!("d8fcc13b6a195b88b7b2da3722ff6cad767b13a8c1e9ffb1c73aa9d216d895f0");
|
||||
|
||||
/// Dev hardforks
|
||||
pub static HL_HARDFORKS: LazyLock<ChainHardforks> = LazyLock::new(|| {
|
||||
ChainHardforks::new(vec![
|
||||
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),
|
||||
|
||||
@ -1,8 +1,7 @@
|
||||
//! Chain specification for HyperEVM.
|
||||
pub mod hl;
|
||||
pub mod parser;
|
||||
|
||||
use crate::hardforks::{hl::HlHardfork, HlHardforks};
|
||||
use crate::hardforks::HlHardforks;
|
||||
use alloy_consensus::Header;
|
||||
use alloy_eips::eip7840::BlobParams;
|
||||
use alloy_genesis::Genesis;
|
||||
@ -13,15 +12,13 @@ use reth_chainspec::{
|
||||
};
|
||||
use reth_discv4::NodeRecord;
|
||||
use reth_evm::eth::spec::EthExecutorSpec;
|
||||
use std::{fmt::Display, sync::Arc};
|
||||
use std::fmt::Display;
|
||||
|
||||
pub const MAINNET_CHAIN_ID: u64 = 999;
|
||||
pub const TESTNET_CHAIN_ID: u64 = 998;
|
||||
|
||||
/// Hl chain spec type.
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq)]
|
||||
pub struct HlChainSpec {
|
||||
/// [`ChainSpec`].
|
||||
pub inner: ChainSpec,
|
||||
}
|
||||
|
||||
@ -75,10 +72,6 @@ impl EthChainSpec for HlChainSpec {
|
||||
fn bootnodes(&self) -> Option<Vec<NodeRecord>> {
|
||||
self.inner.bootnodes()
|
||||
}
|
||||
|
||||
fn is_optimism(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
impl Hardforks for HlChainSpec {
|
||||
@ -105,23 +98,13 @@ impl Hardforks for HlChainSpec {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ChainSpec> for HlChainSpec {
|
||||
fn from(value: ChainSpec) -> Self {
|
||||
Self { inner: value }
|
||||
}
|
||||
}
|
||||
|
||||
impl EthereumHardforks for HlChainSpec {
|
||||
fn ethereum_fork_activation(&self, fork: EthereumHardfork) -> ForkCondition {
|
||||
self.inner.ethereum_fork_activation(fork)
|
||||
}
|
||||
}
|
||||
|
||||
impl HlHardforks for HlChainSpec {
|
||||
fn hl_fork_activation(&self, fork: HlHardfork) -> ForkCondition {
|
||||
self.fork(fork)
|
||||
}
|
||||
}
|
||||
impl HlHardforks for HlChainSpec {}
|
||||
|
||||
impl EthExecutorSpec for HlChainSpec {
|
||||
fn deposit_contract_address(&self) -> Option<Address> {
|
||||
@ -129,18 +112,6 @@ impl EthExecutorSpec for HlChainSpec {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<HlChainSpec> for ChainSpec {
|
||||
fn from(value: HlChainSpec) -> Self {
|
||||
value.inner
|
||||
}
|
||||
}
|
||||
|
||||
impl HlHardforks for Arc<HlChainSpec> {
|
||||
fn hl_fork_activation(&self, fork: HlHardfork) -> ForkCondition {
|
||||
self.as_ref().hl_fork_activation(fork)
|
||||
}
|
||||
}
|
||||
|
||||
impl HlChainSpec {
|
||||
pub const MAINNET_RPC_URL: &str = "https://rpc.hyperliquid.xyz/evm";
|
||||
pub const TESTNET_RPC_URL: &str = "https://rpc.hyperliquid-testnet.xyz/evm";
|
||||
|
||||
@ -1 +0,0 @@
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
pub mod api;
|
||||
mod handler;
|
||||
pub mod spec;
|
||||
pub mod transaction;
|
||||
|
||||
@ -1,20 +1,15 @@
|
||||
use revm::primitives::hardfork::SpecId;
|
||||
use std::str::FromStr;
|
||||
|
||||
#[repr(u8)]
|
||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Default)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub enum HlSpecId {
|
||||
/// Placeholder for evm cancun fork
|
||||
#[default]
|
||||
V1, // V1
|
||||
V1,
|
||||
}
|
||||
|
||||
impl HlSpecId {
|
||||
pub const fn is_enabled_in(self, other: HlSpecId) -> bool {
|
||||
other as u8 <= self as u8
|
||||
}
|
||||
|
||||
/// Converts the [`HlSpecId`] into a [`SpecId`].
|
||||
pub const fn into_eth_spec(self) -> SpecId {
|
||||
match self {
|
||||
Self::V1 => SpecId::CANCUN,
|
||||
@ -23,31 +18,8 @@ impl HlSpecId {
|
||||
}
|
||||
|
||||
impl From<HlSpecId> for SpecId {
|
||||
/// Converts the [`HlSpecId`] into a [`SpecId`].
|
||||
fn from(spec: HlSpecId) -> Self {
|
||||
spec.into_eth_spec()
|
||||
}
|
||||
}
|
||||
|
||||
/// String identifiers for HL hardforks
|
||||
pub mod name {
|
||||
pub const V1: &str = "V1";
|
||||
}
|
||||
|
||||
impl FromStr for HlSpecId {
|
||||
type Err = String;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(match s {
|
||||
name::V1 => Self::V1,
|
||||
_ => return Err(format!("Unknown HL spec: {s}")),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<HlSpecId> for &'static str {
|
||||
fn from(spec_id: HlSpecId) -> Self {
|
||||
match spec_id {
|
||||
HlSpecId::V1 => name::V1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -124,12 +124,13 @@ impl FromRecoveredTx<TransactionSigned> for HlTxEnv<TxEnv> {
|
||||
|
||||
impl FromTxWithEncoded<TransactionSigned> for HlTxEnv<TxEnv> {
|
||||
fn from_encoded_tx(tx: &TransactionSigned, sender: Address, _encoded: Bytes) -> Self {
|
||||
use reth_primitives::Transaction;
|
||||
let base = match tx.clone().into_inner().into_typed_transaction() {
|
||||
reth_primitives::Transaction::Legacy(tx) => TxEnv::from_recovered_tx(&tx, sender),
|
||||
reth_primitives::Transaction::Eip2930(tx) => TxEnv::from_recovered_tx(&tx, sender),
|
||||
reth_primitives::Transaction::Eip1559(tx) => TxEnv::from_recovered_tx(&tx, sender),
|
||||
reth_primitives::Transaction::Eip4844(tx) => TxEnv::from_recovered_tx(&tx, sender),
|
||||
reth_primitives::Transaction::Eip7702(tx) => TxEnv::from_recovered_tx(&tx, sender),
|
||||
Transaction::Legacy(tx) => TxEnv::from_recovered_tx(&tx, sender),
|
||||
Transaction::Eip2930(tx) => TxEnv::from_recovered_tx(&tx, sender),
|
||||
Transaction::Eip1559(tx) => TxEnv::from_recovered_tx(&tx, sender),
|
||||
Transaction::Eip4844(tx) => TxEnv::from_recovered_tx(&tx, sender),
|
||||
Transaction::Eip7702(tx) => TxEnv::from_recovered_tx(&tx, sender),
|
||||
};
|
||||
|
||||
Self { base }
|
||||
|
||||
@ -13,88 +13,5 @@ hardfork!(
|
||||
HlHardfork {
|
||||
/// Initial version
|
||||
V1,
|
||||
/// block.number bugfix
|
||||
V2,
|
||||
/// gas mismatch bugfix
|
||||
V3,
|
||||
}
|
||||
);
|
||||
|
||||
impl HlHardfork {
|
||||
/// Retrieves the activation block for the specified hardfork on the given chain.
|
||||
pub fn activation_block<H: Hardfork>(self, fork: H, chain: Chain) -> Option<u64> {
|
||||
if chain == Chain::from_named(NamedChain::Hyperliquid) {
|
||||
return Self::hl_mainnet_activation_block(fork);
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Retrieves the activation timestamp for the specified hardfork on the given chain.
|
||||
pub fn activation_timestamp<H: Hardfork>(self, fork: H, chain: Chain) -> Option<u64> {
|
||||
None
|
||||
}
|
||||
|
||||
/// Retrieves the activation block for the specified hardfork on the HyperLiquid mainnet.
|
||||
pub fn hl_mainnet_activation_block<H: Hardfork>(fork: H) -> Option<u64> {
|
||||
match_hardfork(
|
||||
fork,
|
||||
|fork| match fork {
|
||||
EthereumHardfork::Frontier |
|
||||
EthereumHardfork::Homestead |
|
||||
EthereumHardfork::Tangerine |
|
||||
EthereumHardfork::SpuriousDragon |
|
||||
EthereumHardfork::Byzantium |
|
||||
EthereumHardfork::Constantinople |
|
||||
EthereumHardfork::Petersburg |
|
||||
EthereumHardfork::Istanbul |
|
||||
EthereumHardfork::MuirGlacier |
|
||||
EthereumHardfork::Berlin |
|
||||
EthereumHardfork::London |
|
||||
EthereumHardfork::Shanghai |
|
||||
EthereumHardfork::Cancun => Some(0),
|
||||
_ => None,
|
||||
},
|
||||
|fork| match fork {
|
||||
Self::V1 | Self::V2 | Self::V3 => Some(0),
|
||||
_ => None,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Hl mainnet list of hardforks.
|
||||
pub fn hl_mainnet() -> ChainHardforks {
|
||||
ChainHardforks::new(vec![
|
||||
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),
|
||||
(EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)),
|
||||
(EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)),
|
||||
(EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)),
|
||||
(EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)),
|
||||
(EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)),
|
||||
(EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)),
|
||||
(EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)),
|
||||
(EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)),
|
||||
(EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)),
|
||||
(EthereumHardfork::London.boxed(), ForkCondition::Block(0)),
|
||||
(EthereumHardfork::Shanghai.boxed(), ForkCondition::Block(0)),
|
||||
(EthereumHardfork::Cancun.boxed(), ForkCondition::Block(0)),
|
||||
(Self::V1.boxed(), ForkCondition::Block(0)),
|
||||
(Self::V2.boxed(), ForkCondition::Block(0)),
|
||||
(Self::V3.boxed(), ForkCondition::Block(0)),
|
||||
])
|
||||
}
|
||||
}
|
||||
|
||||
/// Match helper method since it's not possible to match on `dyn Hardfork`
|
||||
fn match_hardfork<H, HF, HHF>(fork: H, hardfork_fn: HF, hl_hardfork_fn: HHF) -> Option<u64>
|
||||
where
|
||||
H: Hardfork,
|
||||
HF: Fn(&EthereumHardfork) -> Option<u64>,
|
||||
HHF: Fn(&HlHardfork) -> Option<u64>,
|
||||
{
|
||||
let fork: &dyn Any = ⋔
|
||||
if let Some(fork) = fork.downcast_ref::<EthereumHardfork>() {
|
||||
return hardfork_fn(fork);
|
||||
}
|
||||
fork.downcast_ref::<HlHardfork>().and_then(hl_hardfork_fn)
|
||||
}
|
||||
|
||||
@ -1,13 +1,14 @@
|
||||
//! Hard forks of hl protocol.
|
||||
//! Hard forks of HyperEVM.
|
||||
#![allow(unused)]
|
||||
use hl::HlHardfork;
|
||||
use reth_chainspec::{EthereumHardforks, ForkCondition};
|
||||
|
||||
pub mod hl;
|
||||
|
||||
use hl::HlHardfork;
|
||||
use reth_chainspec::{EthereumHardforks, ForkCondition};
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Extends [`EthereumHardforks`] with hl helper methods.
|
||||
pub trait HlHardforks: EthereumHardforks {
|
||||
/// Retrieves [`ForkCondition`] by an [`HlHardfork`]. If `fork` is not present, returns
|
||||
/// [`ForkCondition::Never`].
|
||||
fn hl_fork_activation(&self, fork: HlHardfork) -> ForkCondition;
|
||||
}
|
||||
///
|
||||
/// Currently a placeholder for future use.
|
||||
pub trait HlHardforks: EthereumHardforks {}
|
||||
|
||||
impl<T: HlHardforks> HlHardforks for Arc<T> {}
|
||||
|
||||
@ -1,11 +1,9 @@
|
||||
pub mod call_forwarder;
|
||||
pub mod addons;
|
||||
pub mod chainspec;
|
||||
pub mod consensus;
|
||||
mod evm;
|
||||
mod hardforks;
|
||||
pub mod hl_node_compliance;
|
||||
pub mod node;
|
||||
pub mod pseudo_peer;
|
||||
pub mod tx_forwarder;
|
||||
|
||||
pub use node::primitives::{HlBlock, HlBlockBody, HlPrimitives};
|
||||
|
||||
17
src/main.rs
17
src/main.rs
@ -4,15 +4,17 @@ use clap::Parser;
|
||||
use reth::builder::{NodeBuilder, NodeHandle, WithLaunchContext};
|
||||
use reth_db::DatabaseEnv;
|
||||
use reth_hl::{
|
||||
call_forwarder::{self, CallForwarderApiServer},
|
||||
addons::{
|
||||
call_forwarder::{self, CallForwarderApiServer},
|
||||
hl_node_compliance::install_hl_node_compliance,
|
||||
tx_forwarder::{self, EthForwarderApiServer},
|
||||
},
|
||||
chainspec::{parser::HlChainSpecParser, HlChainSpec},
|
||||
hl_node_compliance::install_hl_node_compliance,
|
||||
node::{
|
||||
cli::{Cli, HlNodeArgs},
|
||||
storage::tables::Tables,
|
||||
HlNode,
|
||||
},
|
||||
tx_forwarder::{self, EthForwarderApiServer},
|
||||
};
|
||||
use tracing::info;
|
||||
|
||||
@ -37,7 +39,7 @@ fn main() -> eyre::Result<()> {
|
||||
let (node, engine_handle_tx) = HlNode::new(ext.block_source_args.parse().await?);
|
||||
let NodeHandle { node, node_exit_future: exit_future } = builder
|
||||
.node(node)
|
||||
.extend_rpc_modules(move |ctx| {
|
||||
.extend_rpc_modules(move |mut ctx| {
|
||||
let upstream_rpc_url =
|
||||
ext.upstream_rpc_url.unwrap_or_else(|| default_upstream_rpc_url.to_owned());
|
||||
|
||||
@ -58,10 +60,15 @@ fn main() -> eyre::Result<()> {
|
||||
}
|
||||
|
||||
if ext.hl_node_compliant {
|
||||
install_hl_node_compliance(ctx)?;
|
||||
install_hl_node_compliance(&mut ctx)?;
|
||||
info!("hl-node compliant mode enabled");
|
||||
}
|
||||
|
||||
if !ext.experimental_eth_get_proof {
|
||||
ctx.modules.remove_method_from_configured("eth_getProof");
|
||||
info!("eth_getProof is disabled by default");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.apply(|builder| {
|
||||
|
||||
@ -55,6 +55,24 @@ pub struct HlNodeArgs {
|
||||
/// This is useful when read precompile is needed for gas estimation.
|
||||
#[arg(long, env = "FORWARD_CALL")]
|
||||
pub forward_call: bool,
|
||||
|
||||
/// Experimental: enables the eth_getProof RPC method.
|
||||
///
|
||||
/// Note: Due to the state root difference, trie updates* may not function correctly in all
|
||||
/// scenarios. For example, incremental root updates are not possible, which can cause
|
||||
/// eth_getProof to malfunction in some cases.
|
||||
///
|
||||
/// This limitation does not impact normal node functionality, except for state root (which is
|
||||
/// unused) and eth_getProof. The archival state is maintained by block order, not by trie
|
||||
/// updates. As a precaution, nanoreth disables eth_getProof by default to prevent
|
||||
/// potential issues.
|
||||
///
|
||||
/// Use --experimental-eth-get-proof to forcibly enable eth_getProof, assuming trie updates are
|
||||
/// working as intended. Enabling this by default will be tracked in #15.
|
||||
///
|
||||
/// * Refers to the Merkle trie used for eth_getProof and state root, not actual state values.
|
||||
#[arg(long, env = "EXPERIMENTAL_ETH_GET_PROOF")]
|
||||
pub experimental_eth_get_proof: bool,
|
||||
}
|
||||
|
||||
/// The main reth_hl cli interface.
|
||||
|
||||
@ -1,22 +1,9 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::{
|
||||
node::{rpc::engine_api::payload::HlPayloadTypes, HlNode},
|
||||
HlBlock, HlPrimitives,
|
||||
};
|
||||
use crate::{HlBlock, HlPrimitives};
|
||||
use alloy_eips::eip7685::Requests;
|
||||
use alloy_primitives::U256;
|
||||
use reth::{
|
||||
api::FullNodeTypes,
|
||||
builder::{components::PayloadServiceBuilder, BuilderContext},
|
||||
payload::{PayloadBuilderHandle, PayloadServiceCommand},
|
||||
transaction_pool::TransactionPool,
|
||||
};
|
||||
use reth_evm::ConfigureEvm;
|
||||
use reth_payload_primitives::BuiltPayload;
|
||||
use reth_primitives::SealedBlock;
|
||||
use tokio::sync::{broadcast, mpsc};
|
||||
use tracing::warn;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Built payload for Hl. This is similar to [`EthBuiltPayload`] but without sidecars as those
|
||||
/// included into [`HlBlock`].
|
||||
@ -45,73 +32,3 @@ impl BuiltPayload for HlBuiltPayload {
|
||||
self.requests.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default)]
|
||||
#[non_exhaustive]
|
||||
pub struct HlPayloadServiceBuilder;
|
||||
|
||||
impl<Node, Pool, Evm> PayloadServiceBuilder<Node, Pool, Evm> for HlPayloadServiceBuilder
|
||||
where
|
||||
Node: FullNodeTypes<Types = HlNode>,
|
||||
Pool: TransactionPool,
|
||||
Evm: ConfigureEvm,
|
||||
{
|
||||
async fn spawn_payload_builder_service(
|
||||
self,
|
||||
ctx: &BuilderContext<Node>,
|
||||
_pool: Pool,
|
||||
_evm_config: Evm,
|
||||
) -> eyre::Result<PayloadBuilderHandle<HlPayloadTypes>> {
|
||||
let (tx, mut rx) = mpsc::unbounded_channel();
|
||||
|
||||
ctx.task_executor().spawn_critical("payload builder", async move {
|
||||
let mut subscriptions = Vec::new();
|
||||
|
||||
while let Some(message) = rx.recv().await {
|
||||
match message {
|
||||
PayloadServiceCommand::Subscribe(tx) => {
|
||||
let (events_tx, events_rx) = broadcast::channel(100);
|
||||
// Retain senders to make sure that channels are not getting closed
|
||||
subscriptions.push(events_tx);
|
||||
let _ = tx.send(events_rx);
|
||||
}
|
||||
message => warn!(?message, "Noop payload service received a message"),
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(PayloadBuilderHandle::new(tx))
|
||||
}
|
||||
}
|
||||
|
||||
// impl From<EthBuiltPayload> for HlBuiltPayload {
|
||||
// fn from(value: EthBuiltPayload) -> Self {
|
||||
// let EthBuiltPayload { id, block, fees, sidecars, requests } = value;
|
||||
// HlBuiltPayload {
|
||||
// id,
|
||||
// block: block.into(),
|
||||
// fees,
|
||||
// requests,
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// pub struct HlPayloadBuilder<Inner> {
|
||||
// inner: Inner,
|
||||
// }
|
||||
|
||||
// impl<Inner> PayloadBuilder for HlPayloadBuilder<Inner>
|
||||
// where
|
||||
// Inner: PayloadBuilder<BuiltPayload = EthBuiltPayload>,
|
||||
// {
|
||||
// type Attributes = Inner::Attributes;
|
||||
// type BuiltPayload = HlBuiltPayload;
|
||||
// type Error = Inner::Error;
|
||||
|
||||
// fn try_build(
|
||||
// &self,
|
||||
// args: BuildArguments<Self::Attributes, Self::BuiltPayload>,
|
||||
// ) -> Result<BuildOutcome<Self::BuiltPayload>, PayloadBuilderError> {
|
||||
// let outcome = self.inner.try_build(args)?;
|
||||
// }
|
||||
// }
|
||||
|
||||
@ -71,10 +71,10 @@ where
|
||||
let timestamp = evm_env.block_env.timestamp.saturating_to();
|
||||
|
||||
// Filter out system tx receipts
|
||||
let transactions_for_root: Vec<TransactionSigned> =
|
||||
transactions.iter().filter(|t| !is_system_transaction(t)).cloned().collect::<Vec<_>>();
|
||||
let receipts_for_root: Vec<Receipt> =
|
||||
receipts.iter().filter(|r| r.cumulative_gas_used() != 0).cloned().collect::<Vec<_>>();
|
||||
let transactions_for_root: Vec<_> =
|
||||
transactions.iter().filter(|t| !is_system_transaction(t)).cloned().collect();
|
||||
let receipts_for_root: Vec<_> =
|
||||
receipts.iter().filter(|r| r.cumulative_gas_used() != 0).cloned().collect();
|
||||
|
||||
let transactions_root = proofs::calculate_transaction_root(&transactions_for_root);
|
||||
let receipts_root = Receipt::calculate_receipt_root_no_memo(&receipts_for_root);
|
||||
@ -295,7 +295,6 @@ where
|
||||
// configure evm env based on parent block
|
||||
let mut cfg_env =
|
||||
CfgEnv::new().with_chain_id(self.chain_spec().chain().id()).with_spec(spec);
|
||||
|
||||
if let Some(blob_params) = &blob_params {
|
||||
cfg_env.set_max_blobs_per_tx(blob_params.max_blobs_per_tx);
|
||||
}
|
||||
@ -376,10 +375,6 @@ where
|
||||
block: &'a SealedBlock<BlockTy<Self::Primitives>>,
|
||||
) -> ExecutionCtxFor<'a, Self> {
|
||||
let block_body = block.body();
|
||||
let extras = HlExtras {
|
||||
read_precompile_calls: block_body.read_precompile_calls.clone(),
|
||||
highest_precompile_address: block_body.highest_precompile_address,
|
||||
};
|
||||
HlBlockExecutionCtx {
|
||||
ctx: EthBlockExecutionCtx {
|
||||
parent_hash: block.header().parent_hash,
|
||||
@ -387,7 +382,10 @@ where
|
||||
ommers: &block.body().ommers,
|
||||
withdrawals: block.body().withdrawals.as_ref().map(Cow::Borrowed),
|
||||
},
|
||||
extras,
|
||||
extras: HlExtras {
|
||||
read_precompile_calls: block_body.read_precompile_calls.clone(),
|
||||
highest_precompile_address: block_body.highest_precompile_address,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@ -403,8 +401,7 @@ where
|
||||
ommers: &[],
|
||||
withdrawals: attributes.withdrawals.map(Cow::Owned),
|
||||
},
|
||||
// TODO: hacky, double check if this is correct
|
||||
extras: HlExtras::default(),
|
||||
extras: HlExtras::default(), // TODO: hacky, double check if this is correct
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -416,10 +413,6 @@ impl ConfigureEngineEvm<HlExecutionData> for HlEvmConfig {
|
||||
|
||||
fn context_for_payload<'a>(&self, payload: &'a HlExecutionData) -> ExecutionCtxFor<'a, Self> {
|
||||
let block = &payload.0;
|
||||
let extras = HlExtras {
|
||||
read_precompile_calls: block.body.read_precompile_calls.clone(),
|
||||
highest_precompile_address: block.body.highest_precompile_address,
|
||||
};
|
||||
HlBlockExecutionCtx {
|
||||
ctx: EthBlockExecutionCtx {
|
||||
parent_hash: block.header.parent_hash,
|
||||
@ -427,7 +420,10 @@ impl ConfigureEngineEvm<HlExecutionData> for HlEvmConfig {
|
||||
ommers: &block.body.ommers,
|
||||
withdrawals: block.body.withdrawals.as_ref().map(Cow::Borrowed),
|
||||
},
|
||||
extras,
|
||||
extras: HlExtras {
|
||||
read_precompile_calls: block.body.read_precompile_calls.clone(),
|
||||
highest_precompile_address: block.body.highest_precompile_address,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -4,7 +4,7 @@ use crate::{
|
||||
hardforks::HlHardforks,
|
||||
node::{
|
||||
primitives::TransactionSigned,
|
||||
types::{ReadPrecompileInput, ReadPrecompileResult},
|
||||
types::{HlExtras, ReadPrecompileInput, ReadPrecompileResult},
|
||||
},
|
||||
};
|
||||
use alloy_consensus::{Transaction, TxReceipt};
|
||||
@ -102,7 +102,7 @@ where
|
||||
{
|
||||
/// Creates a new HlBlockExecutor.
|
||||
pub fn new(mut evm: EVM, ctx: HlBlockExecutionCtx<'a>, spec: Spec, receipt_builder: R) -> Self {
|
||||
apply_precompiles(&mut evm, &ctx);
|
||||
apply_precompiles(&mut evm, &ctx.extras);
|
||||
Self { spec, evm, gas_used: 0, receipts: vec![], receipt_builder, ctx }
|
||||
}
|
||||
|
||||
@ -155,7 +155,7 @@ where
|
||||
type Evm = E;
|
||||
|
||||
fn apply_pre_execution_changes(&mut self) -> Result<(), BlockExecutionError> {
|
||||
apply_precompiles(&mut self.evm, &self.ctx);
|
||||
apply_precompiles(&mut self.evm, &self.ctx.extras);
|
||||
self.deploy_corewriter_contract()?;
|
||||
|
||||
Ok(())
|
||||
@ -240,10 +240,9 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_precompiles<'a, DB, EVM>(evm: &mut EVM, ctx: &HlBlockExecutionCtx<'a>)
|
||||
pub fn apply_precompiles<EVM>(evm: &mut EVM, extras: &HlExtras)
|
||||
where
|
||||
EVM: Evm<DB = &'a mut State<DB>, Precompiles = PrecompilesMap>,
|
||||
DB: Database + 'a,
|
||||
EVM: Evm<Precompiles = PrecompilesMap>,
|
||||
{
|
||||
let block_number = evm.block().number;
|
||||
let precompiles_mut = evm.precompiles_mut();
|
||||
@ -255,9 +254,7 @@ where
|
||||
precompiles_mut.apply_precompile(&address, |_| None);
|
||||
}
|
||||
}
|
||||
for (address, precompile) in
|
||||
ctx.extras.read_precompile_calls.clone().unwrap_or_default().0.iter()
|
||||
{
|
||||
for (address, precompile) in extras.read_precompile_calls.clone().unwrap_or_default().0.iter() {
|
||||
let precompile = precompile.clone();
|
||||
precompiles_mut.apply_precompile(address, |_| {
|
||||
let precompiles_map: HashMap<ReadPrecompileInput, ReadPrecompileResult> =
|
||||
@ -271,7 +268,7 @@ where
|
||||
// NOTE: This is adapted from hyperliquid-dex/hyper-evm-sync#5
|
||||
const WARM_PRECOMPILES_BLOCK_NUMBER: u64 = 8_197_684;
|
||||
if block_number >= U256::from(WARM_PRECOMPILES_BLOCK_NUMBER) {
|
||||
fill_all_precompiles(ctx, precompiles_mut);
|
||||
fill_all_precompiles(extras, precompiles_mut);
|
||||
}
|
||||
}
|
||||
|
||||
@ -279,9 +276,9 @@ fn address_to_u64(address: Address) -> u64 {
|
||||
address.into_u256().try_into().unwrap()
|
||||
}
|
||||
|
||||
fn fill_all_precompiles<'a>(ctx: &HlBlockExecutionCtx<'a>, precompiles_mut: &mut PrecompilesMap) {
|
||||
fn fill_all_precompiles(extras: &HlExtras, precompiles_mut: &mut PrecompilesMap) {
|
||||
let lowest_address = 0x800;
|
||||
let highest_address = ctx.extras.highest_precompile_address.map_or(0x80D, address_to_u64);
|
||||
let highest_address = extras.highest_precompile_address.map_or(0x80D, address_to_u64);
|
||||
for address in lowest_address..=highest_address {
|
||||
let address = Address::from(U160::from(address));
|
||||
precompiles_mut.apply_precompile(&address, |f| {
|
||||
|
||||
@ -32,6 +32,8 @@ mod factory;
|
||||
mod patch;
|
||||
pub mod receipt_builder;
|
||||
|
||||
pub use executor::apply_precompiles;
|
||||
|
||||
/// HL EVM implementation.
|
||||
///
|
||||
/// This is a wrapper type around the `revm` evm with optional [`Inspector`] (tracing)
|
||||
@ -165,7 +167,6 @@ where
|
||||
type EVM = HlEvmConfig;
|
||||
|
||||
async fn build_evm(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::EVM> {
|
||||
let evm_config = HlEvmConfig::hl(ctx.chain_spec());
|
||||
Ok(evm_config)
|
||||
Ok(HlEvmConfig::hl(ctx.chain_spec()))
|
||||
}
|
||||
}
|
||||
|
||||
@ -15,12 +15,15 @@ use crate::{
|
||||
pseudo_peer::BlockSourceConfig,
|
||||
};
|
||||
use consensus::HlConsensusBuilder;
|
||||
use engine::HlPayloadServiceBuilder;
|
||||
use evm::HlExecutorBuilder;
|
||||
use network::HlNetworkBuilder;
|
||||
use reth::{
|
||||
api::{FullNodeTypes, NodeTypes},
|
||||
builder::{components::ComponentsBuilder, rpc::RpcAddOns, Node, NodeAdapter},
|
||||
builder::{
|
||||
components::{ComponentsBuilder, NoopPayloadServiceBuilder},
|
||||
rpc::RpcAddOns,
|
||||
Node, NodeAdapter,
|
||||
},
|
||||
};
|
||||
use reth_engine_primitives::ConsensusEngineHandle;
|
||||
use std::{marker::PhantomData, sync::Arc};
|
||||
@ -65,7 +68,7 @@ impl HlNode {
|
||||
) -> ComponentsBuilder<
|
||||
Node,
|
||||
HlPoolBuilder,
|
||||
HlPayloadServiceBuilder,
|
||||
NoopPayloadServiceBuilder,
|
||||
HlNetworkBuilder,
|
||||
HlExecutorBuilder,
|
||||
HlConsensusBuilder,
|
||||
@ -77,7 +80,7 @@ impl HlNode {
|
||||
.node_types::<Node>()
|
||||
.pool(HlPoolBuilder)
|
||||
.executor(HlExecutorBuilder::default())
|
||||
.payload(HlPayloadServiceBuilder::default())
|
||||
.payload(NoopPayloadServiceBuilder::default())
|
||||
.network(HlNetworkBuilder {
|
||||
engine_handle_rx: self.engine_handle_rx.clone(),
|
||||
block_source_config: self.block_source_config.clone(),
|
||||
@ -100,7 +103,7 @@ where
|
||||
type ComponentsBuilder = ComponentsBuilder<
|
||||
N,
|
||||
HlPoolBuilder,
|
||||
HlPayloadServiceBuilder,
|
||||
NoopPayloadServiceBuilder,
|
||||
HlNetworkBuilder,
|
||||
HlExecutorBuilder,
|
||||
HlConsensusBuilder,
|
||||
|
||||
@ -89,7 +89,6 @@ where
|
||||
/// Process a new payload and return the outcome
|
||||
fn new_payload(&self, block: BlockMsg, peer_id: PeerId) -> ImportFut {
|
||||
let engine = self.engine.clone();
|
||||
|
||||
Box::pin(async move {
|
||||
let sealed_block = block.block.0.block.clone().seal();
|
||||
let payload = HlPayloadTypes::block_to_payload(sealed_block);
|
||||
@ -107,7 +106,7 @@ where
|
||||
.into(),
|
||||
_ => None,
|
||||
},
|
||||
Err(err) => None,
|
||||
Err(_) => None,
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -117,15 +116,10 @@ where
|
||||
let engine = self.engine.clone();
|
||||
let consensus = self.consensus.clone();
|
||||
let sealed_block = block.block.0.block.clone().seal();
|
||||
let hash = sealed_block.hash();
|
||||
let number = sealed_block.number();
|
||||
let (hash, number) = (sealed_block.hash(), sealed_block.number());
|
||||
|
||||
Box::pin(async move {
|
||||
let (head_block_hash, current_hash) = match consensus.canonical_head(hash, number) {
|
||||
Ok(hash) => hash,
|
||||
Err(_) => return None,
|
||||
};
|
||||
|
||||
let (head_block_hash, _) = consensus.canonical_head(hash, number).ok()?;
|
||||
let state = ForkchoiceState {
|
||||
head_block_hash,
|
||||
safe_block_hash: head_block_hash,
|
||||
@ -146,18 +140,15 @@ where
|
||||
.into(),
|
||||
_ => None,
|
||||
},
|
||||
Err(err) => None,
|
||||
Err(_) => None,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Add a new block import task to the pending imports
|
||||
fn on_new_block(&mut self, block: BlockMsg, peer_id: PeerId) {
|
||||
let payload_fut = self.new_payload(block.clone(), peer_id);
|
||||
self.pending_imports.push(payload_fut);
|
||||
|
||||
let fcu_fut = self.update_fork_choice(block, peer_id);
|
||||
self.pending_imports.push(fcu_fut);
|
||||
self.pending_imports.push(self.new_payload(block.clone(), peer_id));
|
||||
self.pending_imports.push(self.update_fork_choice(block, peer_id));
|
||||
}
|
||||
}
|
||||
|
||||
@ -176,11 +167,9 @@ where
|
||||
}
|
||||
|
||||
// Process completed imports and send events to network
|
||||
while let Poll::Ready(Some(outcome)) = this.pending_imports.poll_next_unpin(cx) {
|
||||
if let Some(outcome) = outcome {
|
||||
if let Err(e) = this.to_network.send(BlockImportEvent::Outcome(outcome)) {
|
||||
return Poll::Ready(Err(Box::new(e)));
|
||||
}
|
||||
while let Poll::Ready(Some(Some(outcome))) = this.pending_imports.poll_next_unpin(cx) {
|
||||
if let Err(e) = this.to_network.send(BlockImportEvent::Outcome(outcome)) {
|
||||
return Poll::Ready(Err(Box::new(e)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -188,22 +177,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn collect_block(height: u64) -> Option<BlockAndReceipts> {
|
||||
let ingest_dir = "/home/user/personal/evm-blocks";
|
||||
let f = ((height - 1) / 1_000_000) * 1_000_000;
|
||||
let s = ((height - 1) / 1_000) * 1_000;
|
||||
let path = format!("{ingest_dir}/{f}/{s}/{height}.rmp.lz4");
|
||||
if std::path::Path::new(&path).exists() {
|
||||
let file = std::fs::File::open(path).unwrap();
|
||||
let file = std::io::BufReader::new(file);
|
||||
let mut decoder = lz4_flex::frame::FrameDecoder::new(file);
|
||||
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder).unwrap();
|
||||
Some(blocks[0].clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::chainspec::hl::hl_mainnet;
|
||||
@ -277,15 +250,12 @@ mod tests {
|
||||
fn chain_info(&self) -> Result<ChainInfo, ProviderError> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn best_block_number(&self) -> Result<u64, ProviderError> {
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
fn last_block_number(&self) -> Result<u64, ProviderError> {
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
fn block_number(&self, _hash: B256) -> Result<Option<u64>, ProviderError> {
|
||||
Ok(None)
|
||||
}
|
||||
@ -295,7 +265,6 @@ mod tests {
|
||||
fn block_hash(&self, _number: u64) -> Result<Option<B256>, ProviderError> {
|
||||
Ok(Some(B256::ZERO))
|
||||
}
|
||||
|
||||
fn canonical_hashes_range(
|
||||
&self,
|
||||
_start: u64,
|
||||
@ -315,14 +284,12 @@ mod tests {
|
||||
fn both_valid() -> Self {
|
||||
Self { new_payload: PayloadStatusEnum::Valid, fcu: PayloadStatusEnum::Valid }
|
||||
}
|
||||
|
||||
fn invalid_new_payload() -> Self {
|
||||
Self {
|
||||
new_payload: PayloadStatusEnum::Invalid { validation_error: "test error".into() },
|
||||
fcu: PayloadStatusEnum::Valid,
|
||||
}
|
||||
}
|
||||
|
||||
fn invalid_fcu() -> Self {
|
||||
Self {
|
||||
new_payload: PayloadStatusEnum::Valid,
|
||||
@ -342,19 +309,15 @@ mod tests {
|
||||
let consensus = Arc::new(HlConsensus { provider: MockProvider });
|
||||
let (to_engine, from_engine) = mpsc::unbounded_channel();
|
||||
let engine_handle = ConsensusEngineHandle::new(to_engine);
|
||||
|
||||
handle_engine_msg(from_engine, responses).await;
|
||||
|
||||
let (to_import, from_network) = mpsc::unbounded_channel();
|
||||
let (to_network, import_outcome) = mpsc::unbounded_channel();
|
||||
|
||||
let handle = ImportHandle::new(to_import, import_outcome);
|
||||
|
||||
let service = ImportService::new(consensus, engine_handle, from_network, to_network);
|
||||
tokio::spawn(Box::pin(async move {
|
||||
service.await.unwrap();
|
||||
}));
|
||||
|
||||
Self { handle }
|
||||
}
|
||||
|
||||
|
||||
@ -12,7 +12,6 @@ use crate::{
|
||||
HlBlock,
|
||||
};
|
||||
use alloy_rlp::{Decodable, Encodable};
|
||||
// use handshake::HlHandshake;
|
||||
use reth::{
|
||||
api::{FullNodeTypes, TxTy},
|
||||
builder::{components::NetworkBuilder, BuilderContext},
|
||||
@ -69,32 +68,22 @@ mod rlp {
|
||||
|
||||
impl<'a> From<&'a HlNewBlock> for HlNewBlockHelper<'a> {
|
||||
fn from(value: &'a HlNewBlock) -> Self {
|
||||
let HlNewBlock(NewBlock {
|
||||
block:
|
||||
HlBlock {
|
||||
header,
|
||||
body:
|
||||
HlBlockBody {
|
||||
inner: BlockBody { transactions, ommers, withdrawals },
|
||||
sidecars,
|
||||
read_precompile_calls,
|
||||
highest_precompile_address,
|
||||
},
|
||||
},
|
||||
td,
|
||||
}) = value;
|
||||
|
||||
let b = &value.0.block;
|
||||
Self {
|
||||
block: BlockHelper {
|
||||
header: Cow::Borrowed(header),
|
||||
transactions: Cow::Borrowed(transactions),
|
||||
ommers: Cow::Borrowed(ommers),
|
||||
withdrawals: withdrawals.as_ref().map(Cow::Borrowed),
|
||||
header: Cow::Borrowed(&b.header),
|
||||
transactions: Cow::Borrowed(&b.body.inner.transactions),
|
||||
ommers: Cow::Borrowed(&b.body.inner.ommers),
|
||||
withdrawals: b.body.inner.withdrawals.as_ref().map(Cow::Borrowed),
|
||||
},
|
||||
td: *td,
|
||||
sidecars: sidecars.as_ref().map(Cow::Borrowed),
|
||||
read_precompile_calls: read_precompile_calls.as_ref().map(Cow::Borrowed),
|
||||
highest_precompile_address: highest_precompile_address.as_ref().map(Cow::Borrowed),
|
||||
td: value.0.td,
|
||||
sidecars: b.body.sidecars.as_ref().map(Cow::Borrowed),
|
||||
read_precompile_calls: b.body.read_precompile_calls.as_ref().map(Cow::Borrowed),
|
||||
highest_precompile_address: b
|
||||
.body
|
||||
.highest_precompile_address
|
||||
.as_ref()
|
||||
.map(Cow::Borrowed),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -111,30 +100,24 @@ mod rlp {
|
||||
|
||||
impl Decodable for HlNewBlock {
|
||||
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
|
||||
let HlNewBlockHelper {
|
||||
block: BlockHelper { header, transactions, ommers, withdrawals },
|
||||
td,
|
||||
sidecars,
|
||||
read_precompile_calls,
|
||||
highest_precompile_address,
|
||||
} = HlNewBlockHelper::decode(buf)?;
|
||||
|
||||
let h = HlNewBlockHelper::decode(buf)?;
|
||||
Ok(HlNewBlock(NewBlock {
|
||||
block: HlBlock {
|
||||
header: header.into_owned(),
|
||||
header: h.block.header.into_owned(),
|
||||
body: HlBlockBody {
|
||||
inner: BlockBody {
|
||||
transactions: transactions.into_owned(),
|
||||
ommers: ommers.into_owned(),
|
||||
withdrawals: withdrawals.map(|w| w.into_owned()),
|
||||
transactions: h.block.transactions.into_owned(),
|
||||
ommers: h.block.ommers.into_owned(),
|
||||
withdrawals: h.block.withdrawals.map(|w| w.into_owned()),
|
||||
},
|
||||
sidecars: sidecars.map(|s| s.into_owned()),
|
||||
read_precompile_calls: read_precompile_calls.map(|s| s.into_owned()),
|
||||
highest_precompile_address: highest_precompile_address
|
||||
sidecars: h.sidecars.map(|s| s.into_owned()),
|
||||
read_precompile_calls: h.read_precompile_calls.map(|s| s.into_owned()),
|
||||
highest_precompile_address: h
|
||||
.highest_precompile_address
|
||||
.map(|s| s.into_owned()),
|
||||
},
|
||||
},
|
||||
td,
|
||||
td: h.td,
|
||||
}))
|
||||
}
|
||||
}
|
||||
@ -172,41 +155,32 @@ impl HlNetworkBuilder {
|
||||
where
|
||||
Node: FullNodeTypes<Types = HlNode>,
|
||||
{
|
||||
let Self { engine_handle_rx, .. } = self;
|
||||
|
||||
let network_builder = ctx.network_config_builder()?;
|
||||
|
||||
let (to_import, from_network) = mpsc::unbounded_channel();
|
||||
let (to_network, import_outcome) = mpsc::unbounded_channel();
|
||||
|
||||
let handle = ImportHandle::new(to_import, import_outcome);
|
||||
let consensus = Arc::new(HlConsensus { provider: ctx.provider().clone() });
|
||||
|
||||
ctx.task_executor().spawn_critical("block import", async move {
|
||||
let handle = engine_handle_rx
|
||||
let handle = self
|
||||
.engine_handle_rx
|
||||
.lock()
|
||||
.await
|
||||
.take()
|
||||
.expect("node should only be launched once")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
ImportService::new(consensus, handle, from_network, to_network).await.unwrap();
|
||||
});
|
||||
|
||||
let network_builder = network_builder
|
||||
.disable_dns_discovery()
|
||||
.disable_nat()
|
||||
.boot_nodes(boot_nodes())
|
||||
.set_head(ctx.head())
|
||||
.with_pow()
|
||||
.block_import(Box::new(HlBlockImport::new(handle)));
|
||||
// .discovery(discv4)
|
||||
// .eth_rlpx_handshake(Arc::new(HlHandshake::default()));
|
||||
|
||||
let network_config = ctx.build_network_config(network_builder);
|
||||
|
||||
Ok(network_config)
|
||||
Ok(ctx.build_network_config(
|
||||
ctx.network_config_builder()?
|
||||
.disable_dns_discovery()
|
||||
.disable_nat()
|
||||
.boot_nodes(boot_nodes())
|
||||
.set_head(ctx.head())
|
||||
.with_pow()
|
||||
.block_import(Box::new(HlBlockImport::new(handle))),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@ -229,11 +203,9 @@ where
|
||||
pool: Pool,
|
||||
) -> eyre::Result<Self::Network> {
|
||||
let block_source_config = self.block_source_config.clone();
|
||||
let network_config = self.network_config(ctx)?;
|
||||
let network = NetworkManager::builder(network_config).await?;
|
||||
let handle = ctx.start_network(network, pool);
|
||||
let handle =
|
||||
ctx.start_network(NetworkManager::builder(self.network_config(ctx)?).await?, pool);
|
||||
let local_node_record = handle.local_node_record();
|
||||
let chain_spec = ctx.chain_spec();
|
||||
info!(target: "reth::cli", enode=%local_node_record, "P2P networking initialized");
|
||||
|
||||
let next_block_number = ctx
|
||||
@ -243,12 +215,17 @@ where
|
||||
.block_number +
|
||||
1;
|
||||
|
||||
let chain_spec = ctx.chain_spec();
|
||||
ctx.task_executor().spawn_critical("pseudo peer", async move {
|
||||
let block_source =
|
||||
block_source_config.create_cached_block_source((&*chain_spec).clone(), next_block_number).await;
|
||||
start_pseudo_peer(chain_spec, local_node_record.to_string(), block_source)
|
||||
.await
|
||||
.unwrap();
|
||||
start_pseudo_peer(
|
||||
chain_spec.clone(),
|
||||
local_node_record.to_string(),
|
||||
block_source_config
|
||||
.create_cached_block_source((*chain_spec).clone(), next_block_number)
|
||||
.await,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
});
|
||||
|
||||
Ok(handle)
|
||||
|
||||
@ -68,19 +68,15 @@ impl BlockBodyTrait for HlBlockBody {
|
||||
fn transactions(&self) -> &[Self::Transaction] {
|
||||
BlockBodyTrait::transactions(&self.inner)
|
||||
}
|
||||
|
||||
fn into_ethereum_body(self) -> BlockBody {
|
||||
self.inner
|
||||
}
|
||||
|
||||
fn into_transactions(self) -> Vec<Self::Transaction> {
|
||||
self.inner.into_transactions()
|
||||
}
|
||||
|
||||
fn withdrawals(&self) -> Option<&alloy_rpc_types::Withdrawals> {
|
||||
self.inner.withdrawals()
|
||||
}
|
||||
|
||||
fn ommers(&self) -> Option<&[Self::OmmerHeader]> {
|
||||
self.inner.ommers()
|
||||
}
|
||||
@ -116,15 +112,12 @@ impl Block for HlBlock {
|
||||
fn new(header: Self::Header, body: Self::Body) -> Self {
|
||||
Self { header, body }
|
||||
}
|
||||
|
||||
fn header(&self) -> &Self::Header {
|
||||
&self.header
|
||||
}
|
||||
|
||||
fn body(&self) -> &Self::Body {
|
||||
&self.body
|
||||
}
|
||||
|
||||
fn split(self) -> (Self::Header, Self::Body) {
|
||||
(self.header, self.body)
|
||||
}
|
||||
@ -179,7 +172,6 @@ mod rlp {
|
||||
read_precompile_calls,
|
||||
highest_precompile_address,
|
||||
} = value;
|
||||
|
||||
Self {
|
||||
transactions: Cow::Borrowed(transactions),
|
||||
ommers: Cow::Borrowed(ommers),
|
||||
@ -203,7 +195,6 @@ mod rlp {
|
||||
highest_precompile_address,
|
||||
},
|
||||
} = value;
|
||||
|
||||
Self {
|
||||
header: Cow::Borrowed(header),
|
||||
transactions: Cow::Borrowed(transactions),
|
||||
@ -220,7 +211,6 @@ mod rlp {
|
||||
fn encode(&self, out: &mut dyn bytes::BufMut) {
|
||||
BlockBodyHelper::from(self).encode(out);
|
||||
}
|
||||
|
||||
fn length(&self) -> usize {
|
||||
BlockBodyHelper::from(self).length()
|
||||
}
|
||||
@ -253,7 +243,6 @@ mod rlp {
|
||||
fn encode(&self, out: &mut dyn bytes::BufMut) {
|
||||
BlockHelper::from(self).encode(out);
|
||||
}
|
||||
|
||||
fn length(&self) -> usize {
|
||||
BlockHelper::from(self).length()
|
||||
}
|
||||
|
||||
@ -114,11 +114,6 @@ impl reth_codecs::Compact for TransactionSigned {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn convert_recovered(value: Recovered<TransactionSigned>) -> Recovered<InnerType> {
|
||||
let (tx, signer) = value.into_parts();
|
||||
Recovered::new_unchecked(tx.into_inner(), signer)
|
||||
}
|
||||
|
||||
impl FromRecoveredTx<TransactionSigned> for TxEnv {
|
||||
fn from_recovered_tx(tx: &TransactionSigned, sender: Address) -> Self {
|
||||
TxEnv::from_recovered_tx(&tx.inner(), sender)
|
||||
@ -192,20 +187,6 @@ impl SerdeBincodeCompat for TransactionSigned {
|
||||
|
||||
pub type BlockBody = alloy_consensus::BlockBody<TransactionSigned>;
|
||||
|
||||
impl From<TransactionSigned> for EthereumTxEnvelope<TxEip4844> {
|
||||
fn from(value: TransactionSigned) -> Self {
|
||||
value.into_inner()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<TransactionSigned> for EthereumTxEnvelope<TxEip4844WithSidecar> {
|
||||
type Error = <InnerType as TryInto<EthereumTxEnvelope<TxEip4844WithSidecar>>>::Error;
|
||||
|
||||
fn try_from(value: TransactionSigned) -> Result<Self, Self::Error> {
|
||||
value.into_inner().try_into()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<TransactionSigned>
|
||||
for EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>
|
||||
{
|
||||
@ -231,10 +212,6 @@ impl From<EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>
|
||||
impl Compress for TransactionSigned {
|
||||
type Compressed = Vec<u8>;
|
||||
|
||||
fn compress(self) -> Self::Compressed {
|
||||
self.into_inner().compress()
|
||||
}
|
||||
|
||||
fn compress_to_buf<B: bytes::BufMut + AsMut<[u8]>>(&self, buf: &mut B) {
|
||||
self.inner().compress_to_buf(buf);
|
||||
}
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
use crate::node::rpc::HlEthApi;
|
||||
use crate::node::rpc::{HlEthApi, HlRpcNodeCore};
|
||||
use reth::rpc::server_types::eth::{
|
||||
builder::config::PendingBlockKind, error::FromEvmError, EthApiError, PendingBlock,
|
||||
};
|
||||
@ -6,12 +6,12 @@ use reth_rpc_eth_api::{
|
||||
helpers::{
|
||||
pending_block::PendingEnvBuilder, EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt,
|
||||
},
|
||||
RpcConvert, RpcNodeCore,
|
||||
RpcConvert,
|
||||
};
|
||||
|
||||
impl<N, Rpc> EthBlocks for HlEthApi<N, Rpc>
|
||||
where
|
||||
N: RpcNodeCore,
|
||||
N: HlRpcNodeCore,
|
||||
EthApiError: FromEvmError<N::Evm>,
|
||||
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||
{
|
||||
@ -19,7 +19,7 @@ where
|
||||
|
||||
impl<N, Rpc> LoadBlock for HlEthApi<N, Rpc>
|
||||
where
|
||||
N: RpcNodeCore,
|
||||
N: HlRpcNodeCore,
|
||||
EthApiError: FromEvmError<N::Evm>,
|
||||
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||
{
|
||||
@ -27,7 +27,7 @@ where
|
||||
|
||||
impl<N, Rpc> LoadPendingBlock for HlEthApi<N, Rpc>
|
||||
where
|
||||
N: RpcNodeCore,
|
||||
N: HlRpcNodeCore,
|
||||
EthApiError: FromEvmError<N::Evm>,
|
||||
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||
{
|
||||
@ -49,7 +49,7 @@ where
|
||||
|
||||
impl<N, Rpc> LoadReceipt for HlEthApi<N, Rpc>
|
||||
where
|
||||
N: RpcNodeCore,
|
||||
N: HlRpcNodeCore,
|
||||
EthApiError: FromEvmError<N::Evm>,
|
||||
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||
{
|
||||
|
||||
@ -1,14 +1,23 @@
|
||||
use super::HlEthApi;
|
||||
use super::{HlEthApi, HlRpcNodeCore};
|
||||
use crate::{node::evm::apply_precompiles, HlBlock};
|
||||
use alloy_evm::Evm;
|
||||
use alloy_primitives::B256;
|
||||
use reth::rpc::server_types::eth::EthApiError;
|
||||
use reth_evm::TxEnvFor;
|
||||
use reth_evm::{ConfigureEvm, Database, EvmEnvFor, TxEnvFor};
|
||||
use reth_primitives::{NodePrimitives, Recovered};
|
||||
use reth_primitives_traits::SignedTransaction;
|
||||
use reth_provider::{ProviderError, ProviderTx};
|
||||
use reth_rpc_eth_api::{
|
||||
helpers::{estimate::EstimateCall, Call, EthCall},
|
||||
FromEvmError, RpcConvert, RpcNodeCore,
|
||||
};
|
||||
use revm::DatabaseCommit;
|
||||
|
||||
impl<N> HlRpcNodeCore for N where N: RpcNodeCore<Primitives: NodePrimitives<Block = HlBlock>> {}
|
||||
|
||||
impl<N, Rpc> EthCall for HlEthApi<N, Rpc>
|
||||
where
|
||||
N: RpcNodeCore,
|
||||
N: HlRpcNodeCore,
|
||||
EthApiError: FromEvmError<N::Evm>,
|
||||
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError, TxEnv = TxEnvFor<N::Evm>>,
|
||||
{
|
||||
@ -16,7 +25,7 @@ where
|
||||
|
||||
impl<N, Rpc> EstimateCall for HlEthApi<N, Rpc>
|
||||
where
|
||||
N: RpcNodeCore,
|
||||
N: HlRpcNodeCore,
|
||||
EthApiError: FromEvmError<N::Evm>,
|
||||
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError, TxEnv = TxEnvFor<N::Evm>>,
|
||||
{
|
||||
@ -24,7 +33,7 @@ where
|
||||
|
||||
impl<N, Rpc> Call for HlEthApi<N, Rpc>
|
||||
where
|
||||
N: RpcNodeCore,
|
||||
N: HlRpcNodeCore,
|
||||
EthApiError: FromEvmError<N::Evm>,
|
||||
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError, TxEnv = TxEnvFor<N::Evm>>,
|
||||
{
|
||||
@ -37,4 +46,35 @@ where
|
||||
fn max_simulate_blocks(&self) -> u64 {
|
||||
self.inner.eth_api.max_simulate_blocks()
|
||||
}
|
||||
|
||||
fn replay_transactions_until<'a, DB, I>(
|
||||
&self,
|
||||
db: &mut DB,
|
||||
evm_env: EvmEnvFor<Self::Evm>,
|
||||
transactions: I,
|
||||
target_tx_hash: B256,
|
||||
) -> Result<usize, Self::Error>
|
||||
where
|
||||
DB: Database<Error = ProviderError> + DatabaseCommit + core::fmt::Debug,
|
||||
I: IntoIterator<Item = Recovered<&'a ProviderTx<Self::Provider>>>,
|
||||
{
|
||||
let block_number = evm_env.block_env().number;
|
||||
let hl_extras = self.get_hl_extras(block_number.try_into().unwrap())?;
|
||||
|
||||
let mut evm = self.evm_config().evm_with_env(db, evm_env);
|
||||
apply_precompiles(&mut evm, &hl_extras);
|
||||
|
||||
let mut index = 0;
|
||||
for tx in transactions {
|
||||
if *tx.tx_hash() == target_tx_hash {
|
||||
// reached the target transaction
|
||||
break;
|
||||
}
|
||||
|
||||
let tx_env = self.evm_config().tx_env(tx);
|
||||
evm.transact_commit(tx_env).map_err(Self::Error::from_evm_err)?;
|
||||
index += 1;
|
||||
}
|
||||
Ok(index)
|
||||
}
|
||||
}
|
||||
|
||||
@ -36,7 +36,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Validator for Optimism engine API.
|
||||
/// Validator for HyperEVM engine API.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HlPayloadValidator {
|
||||
inner: HlExecutionPayloadValidator<HlChainSpec>,
|
||||
@ -123,7 +123,7 @@ where
|
||||
return Err(PayloadError::BlockHash {
|
||||
execution: sealed_block.hash(),
|
||||
consensus: expected_hash,
|
||||
})?;
|
||||
});
|
||||
}
|
||||
|
||||
Ok(sealed_block)
|
||||
|
||||
@ -1,3 +1,9 @@
|
||||
use crate::{
|
||||
chainspec::HlChainSpec,
|
||||
node::{evm::apply_precompiles, types::HlExtras},
|
||||
HlBlock, HlPrimitives,
|
||||
};
|
||||
use alloy_evm::Evm;
|
||||
use alloy_network::Ethereum;
|
||||
use alloy_primitives::U256;
|
||||
use reth::{
|
||||
@ -18,8 +24,9 @@ use reth::{
|
||||
TaskSpawner,
|
||||
},
|
||||
};
|
||||
use reth_evm::ConfigureEvm;
|
||||
use reth_provider::{ChainSpecProvider, ProviderHeader, ProviderTx};
|
||||
use reth_evm::{ConfigureEvm, Database, EvmEnvFor, HaltReasonFor, InspectorFor, TxEnvFor};
|
||||
use reth_primitives::NodePrimitives;
|
||||
use reth_provider::{BlockReader, ChainSpecProvider, ProviderError, ProviderHeader, ProviderTx};
|
||||
use reth_rpc::RpcTypes;
|
||||
use reth_rpc_eth_api::{
|
||||
helpers::{
|
||||
@ -29,17 +36,18 @@ use reth_rpc_eth_api::{
|
||||
EthApiTypes, FromEvmError, RpcConvert, RpcConverter, RpcNodeCore, RpcNodeCoreExt,
|
||||
SignableTxRequest,
|
||||
};
|
||||
use revm::context::result::ResultAndState;
|
||||
use std::{fmt, marker::PhantomData, sync::Arc};
|
||||
|
||||
use crate::chainspec::HlChainSpec;
|
||||
|
||||
mod block;
|
||||
mod call;
|
||||
pub mod engine_api;
|
||||
mod transaction;
|
||||
|
||||
pub trait HlRpcNodeCore: RpcNodeCore<Primitives: NodePrimitives<Block = HlBlock>> {}
|
||||
|
||||
/// Container type `HlEthApi`
|
||||
pub(crate) struct HlEthApiInner<N: RpcNodeCore, Rpc: RpcConvert> {
|
||||
pub(crate) struct HlEthApiInner<N: HlRpcNodeCore, Rpc: RpcConvert> {
|
||||
/// Gateway to node's core components.
|
||||
pub(crate) eth_api: EthApiInner<N, Rpc>,
|
||||
}
|
||||
@ -48,14 +56,14 @@ type HlRpcConvert<N, NetworkT> =
|
||||
RpcConverter<NetworkT, <N as FullNodeComponents>::Evm, EthReceiptConverter<HlChainSpec>>;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct HlEthApi<N: RpcNodeCore, Rpc: RpcConvert> {
|
||||
pub struct HlEthApi<N: HlRpcNodeCore, Rpc: RpcConvert> {
|
||||
/// Gateway to node's core components.
|
||||
pub(crate) inner: Arc<HlEthApiInner<N, Rpc>>,
|
||||
}
|
||||
|
||||
impl<N, Rpc> fmt::Debug for HlEthApi<N, Rpc>
|
||||
where
|
||||
N: RpcNodeCore,
|
||||
N: HlRpcNodeCore,
|
||||
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
@ -65,7 +73,7 @@ where
|
||||
|
||||
impl<N, Rpc> EthApiTypes for HlEthApi<N, Rpc>
|
||||
where
|
||||
N: RpcNodeCore,
|
||||
N: HlRpcNodeCore,
|
||||
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||
{
|
||||
type Error = EthApiError;
|
||||
@ -79,7 +87,7 @@ where
|
||||
|
||||
impl<N, Rpc> RpcNodeCore for HlEthApi<N, Rpc>
|
||||
where
|
||||
N: RpcNodeCore,
|
||||
N: HlRpcNodeCore,
|
||||
Rpc: RpcConvert<Primitives = N::Primitives>,
|
||||
{
|
||||
type Primitives = N::Primitives;
|
||||
@ -111,7 +119,7 @@ where
|
||||
|
||||
impl<N, Rpc> RpcNodeCoreExt for HlEthApi<N, Rpc>
|
||||
where
|
||||
N: RpcNodeCore,
|
||||
N: HlRpcNodeCore,
|
||||
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||
{
|
||||
#[inline]
|
||||
@ -122,7 +130,7 @@ where
|
||||
|
||||
impl<N, Rpc> EthApiSpec for HlEthApi<N, Rpc>
|
||||
where
|
||||
N: RpcNodeCore,
|
||||
N: HlRpcNodeCore,
|
||||
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||
{
|
||||
type Transaction = ProviderTx<Self::Provider>;
|
||||
@ -141,7 +149,7 @@ where
|
||||
|
||||
impl<N, Rpc> SpawnBlocking for HlEthApi<N, Rpc>
|
||||
where
|
||||
N: RpcNodeCore,
|
||||
N: HlRpcNodeCore,
|
||||
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||
{
|
||||
#[inline]
|
||||
@ -162,7 +170,7 @@ where
|
||||
|
||||
impl<N, Rpc> LoadFee for HlEthApi<N, Rpc>
|
||||
where
|
||||
N: RpcNodeCore,
|
||||
N: HlRpcNodeCore,
|
||||
EthApiError: FromEvmError<N::Evm>,
|
||||
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||
{
|
||||
@ -179,14 +187,14 @@ where
|
||||
|
||||
impl<N, Rpc> LoadState for HlEthApi<N, Rpc>
|
||||
where
|
||||
N: RpcNodeCore,
|
||||
N: HlRpcNodeCore,
|
||||
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||
{
|
||||
}
|
||||
|
||||
impl<N, Rpc> EthState for HlEthApi<N, Rpc>
|
||||
where
|
||||
N: RpcNodeCore,
|
||||
N: HlRpcNodeCore,
|
||||
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||
{
|
||||
#[inline]
|
||||
@ -197,7 +205,7 @@ where
|
||||
|
||||
impl<N, Rpc> EthFees for HlEthApi<N, Rpc>
|
||||
where
|
||||
N: RpcNodeCore,
|
||||
N: HlRpcNodeCore,
|
||||
EthApiError: FromEvmError<N::Evm>,
|
||||
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||
{
|
||||
@ -205,15 +213,50 @@ where
|
||||
|
||||
impl<N, Rpc> Trace for HlEthApi<N, Rpc>
|
||||
where
|
||||
N: RpcNodeCore,
|
||||
N: HlRpcNodeCore,
|
||||
EthApiError: FromEvmError<N::Evm>,
|
||||
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||
{
|
||||
fn inspect<DB, I>(
|
||||
&self,
|
||||
db: DB,
|
||||
evm_env: EvmEnvFor<Self::Evm>,
|
||||
tx_env: TxEnvFor<Self::Evm>,
|
||||
inspector: I,
|
||||
) -> Result<ResultAndState<HaltReasonFor<Self::Evm>>, Self::Error>
|
||||
where
|
||||
DB: Database<Error = ProviderError>,
|
||||
I: InspectorFor<Self::Evm, DB>,
|
||||
{
|
||||
let block_number = evm_env.block_env().number;
|
||||
let hl_extras = self.get_hl_extras(block_number.try_into().unwrap())?;
|
||||
|
||||
let mut evm = self.evm_config().evm_with_env_and_inspector(db, evm_env, inspector);
|
||||
apply_precompiles(&mut evm, &hl_extras);
|
||||
evm.transact(tx_env).map_err(Self::Error::from_evm_err)
|
||||
}
|
||||
}
|
||||
|
||||
impl<N, Rpc> HlEthApi<N, Rpc>
|
||||
where
|
||||
N: HlRpcNodeCore,
|
||||
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||
{
|
||||
fn get_hl_extras(&self, block_number: u64) -> Result<HlExtras, ProviderError> {
|
||||
Ok(self
|
||||
.provider()
|
||||
.block_by_number(block_number)?
|
||||
.map(|block| HlExtras {
|
||||
read_precompile_calls: block.body.read_precompile_calls.clone(),
|
||||
highest_precompile_address: block.body.highest_precompile_address,
|
||||
})
|
||||
.unwrap_or_default())
|
||||
}
|
||||
}
|
||||
|
||||
impl<N, Rpc> AddDevSigners for HlEthApi<N, Rpc>
|
||||
where
|
||||
N: RpcNodeCore,
|
||||
N: HlRpcNodeCore,
|
||||
Rpc: RpcConvert<
|
||||
Network: RpcTypes<TransactionRequest: SignableTxRequest<ProviderTx<N::Provider>>>,
|
||||
>,
|
||||
@ -239,7 +282,7 @@ impl<NetworkT> Default for HlEthApiBuilder<NetworkT> {
|
||||
|
||||
impl<N, NetworkT> EthApiBuilder<N> for HlEthApiBuilder<NetworkT>
|
||||
where
|
||||
N: FullNodeComponents<Types: NodeTypes<ChainSpec = HlChainSpec>>
|
||||
N: FullNodeComponents<Types: NodeTypes<ChainSpec = HlChainSpec, Primitives = HlPrimitives>>
|
||||
+ RpcNodeCore<
|
||||
Primitives = PrimitivesTy<N::Types>,
|
||||
Evm: ConfigureEvm<NextBlockEnvCtx: BuildPendingEnv<HeaderTy<N::Types>>>,
|
||||
|
||||
@ -1,21 +1,21 @@
|
||||
use crate::node::rpc::HlEthApi;
|
||||
use crate::node::rpc::{HlEthApi, HlRpcNodeCore};
|
||||
use alloy_primitives::{Bytes, B256};
|
||||
use reth::rpc::server_types::eth::EthApiError;
|
||||
use reth_rpc_eth_api::{
|
||||
helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction},
|
||||
RpcConvert, RpcNodeCore,
|
||||
RpcConvert,
|
||||
};
|
||||
|
||||
impl<N, Rpc> LoadTransaction for HlEthApi<N, Rpc>
|
||||
where
|
||||
N: RpcNodeCore,
|
||||
N: HlRpcNodeCore,
|
||||
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||
{
|
||||
}
|
||||
|
||||
impl<N, Rpc> EthTransactions for HlEthApi<N, Rpc>
|
||||
where
|
||||
N: RpcNodeCore,
|
||||
N: HlRpcNodeCore,
|
||||
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
|
||||
{
|
||||
fn signers(&self) -> &SignersForRpc<Self::Provider, Self::NetworkTypes> {
|
||||
|
||||
@ -1,3 +1,7 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::pseudo_peer::HlNodeBlockSourceArgs;
|
||||
|
||||
use super::config::BlockSourceConfig;
|
||||
use clap::{Args, Parser};
|
||||
use reth_node_core::args::LogArgs;
|
||||
@ -13,7 +17,7 @@ pub struct BlockSourceArgs {
|
||||
block_source: Option<String>,
|
||||
|
||||
#[arg(long, alias = "local-ingest-dir")]
|
||||
block_source_from_node: Option<String>,
|
||||
local_ingest_dir: Option<String>,
|
||||
|
||||
/// Shorthand of --block-source=s3://hl-mainnet-evm-blocks
|
||||
#[arg(long, default_value_t = false)]
|
||||
@ -22,6 +26,19 @@ pub struct BlockSourceArgs {
|
||||
/// Shorthand of --block-source-from-node=~/hl/data/evm_blocks_and_receipts
|
||||
#[arg(long)]
|
||||
local: bool,
|
||||
|
||||
/// Interval for polling new blocks in S3 in milliseconds.
|
||||
#[arg(id = "s3.polling-interval", long = "s3.polling-interval", default_value = "25")]
|
||||
s3_polling_interval: u64,
|
||||
|
||||
/// Maximum allowed delay for the hl-node block source in milliseconds.
|
||||
/// If this threshold is exceeded, the client falls back to other sources.
|
||||
#[arg(
|
||||
id = "local.fallback-threshold",
|
||||
long = "local.fallback-threshold",
|
||||
default_value = "5000"
|
||||
)]
|
||||
local_fallback_threshold: u64,
|
||||
}
|
||||
|
||||
impl BlockSourceArgs {
|
||||
@ -33,7 +50,10 @@ impl BlockSourceArgs {
|
||||
|
||||
async fn create_base_config(&self) -> eyre::Result<BlockSourceConfig> {
|
||||
if self.s3 {
|
||||
return Ok(BlockSourceConfig::s3_default().await);
|
||||
return Ok(BlockSourceConfig::s3_default(Duration::from_millis(
|
||||
self.s3_polling_interval,
|
||||
))
|
||||
.await);
|
||||
}
|
||||
|
||||
if self.local {
|
||||
@ -47,18 +67,25 @@ impl BlockSourceArgs {
|
||||
};
|
||||
|
||||
if let Some(bucket) = value.strip_prefix("s3://") {
|
||||
Ok(BlockSourceConfig::s3(bucket.to_string()).await)
|
||||
Ok(BlockSourceConfig::s3(
|
||||
bucket.to_string(),
|
||||
Duration::from_millis(self.s3_polling_interval),
|
||||
)
|
||||
.await)
|
||||
} else {
|
||||
Ok(BlockSourceConfig::local(value.into()))
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_node_source_config(&self, config: BlockSourceConfig) -> BlockSourceConfig {
|
||||
let Some(block_source_from_node) = self.block_source_from_node.as_ref() else {
|
||||
let Some(local_ingest_dir) = self.local_ingest_dir.as_ref() else {
|
||||
return config;
|
||||
};
|
||||
|
||||
config.with_block_source_from_node(block_source_from_node.to_string())
|
||||
config.with_block_source_from_node(HlNodeBlockSourceArgs {
|
||||
root: local_ingest_dir.into(),
|
||||
fallback_threshold: Duration::from_millis(self.local_fallback_threshold),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,31 +1,38 @@
|
||||
use crate::chainspec::HlChainSpec;
|
||||
|
||||
use super::sources::{
|
||||
BlockSourceBoxed, CachedBlockSource, HlNodeBlockSource, LocalBlockSource, S3BlockSource,
|
||||
BlockSourceBoxed, CachedBlockSource, HlNodeBlockSource, HlNodeBlockSourceArgs,
|
||||
LocalBlockSource, S3BlockSource,
|
||||
};
|
||||
use aws_config::BehaviorVersion;
|
||||
use std::{env::home_dir, path::PathBuf, sync::Arc};
|
||||
use std::{env::home_dir, path::PathBuf, sync::Arc, time::Duration};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BlockSourceConfig {
|
||||
pub source_type: BlockSourceType,
|
||||
pub block_source_from_node: Option<String>,
|
||||
pub block_source_from_node: Option<HlNodeBlockSourceArgs>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum BlockSourceType {
|
||||
S3Default,
|
||||
S3 { bucket: String },
|
||||
S3Default { polling_interval: Duration },
|
||||
S3 { bucket: String, polling_interval: Duration },
|
||||
Local { path: PathBuf },
|
||||
}
|
||||
|
||||
impl BlockSourceConfig {
|
||||
pub async fn s3_default() -> Self {
|
||||
Self { source_type: BlockSourceType::S3Default, block_source_from_node: None }
|
||||
pub async fn s3_default(polling_interval: Duration) -> Self {
|
||||
Self {
|
||||
source_type: BlockSourceType::S3Default { polling_interval },
|
||||
block_source_from_node: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn s3(bucket: String) -> Self {
|
||||
Self { source_type: BlockSourceType::S3 { bucket }, block_source_from_node: None }
|
||||
pub async fn s3(bucket: String, polling_interval: Duration) -> Self {
|
||||
Self {
|
||||
source_type: BlockSourceType::S3 { bucket, polling_interval },
|
||||
block_source_from_node: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn local(path: PathBuf) -> Self {
|
||||
@ -45,15 +52,22 @@ impl BlockSourceConfig {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_block_source_from_node(mut self, block_source_from_node: String) -> Self {
|
||||
pub fn with_block_source_from_node(
|
||||
mut self,
|
||||
block_source_from_node: HlNodeBlockSourceArgs,
|
||||
) -> Self {
|
||||
self.block_source_from_node = Some(block_source_from_node);
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn create_block_source(&self, chain_spec: HlChainSpec) -> BlockSourceBoxed {
|
||||
match &self.source_type {
|
||||
BlockSourceType::S3Default => s3_block_source(chain_spec.official_s3_bucket()).await,
|
||||
BlockSourceType::S3 { bucket } => s3_block_source(bucket).await,
|
||||
BlockSourceType::S3Default { polling_interval } => {
|
||||
s3_block_source(chain_spec.official_s3_bucket(), *polling_interval).await
|
||||
}
|
||||
BlockSourceType::S3 { bucket, polling_interval } => {
|
||||
s3_block_source(bucket, *polling_interval).await
|
||||
}
|
||||
BlockSourceType::Local { path } => {
|
||||
Arc::new(Box::new(LocalBlockSource::new(path.clone())))
|
||||
}
|
||||
@ -72,7 +86,7 @@ impl BlockSourceConfig {
|
||||
Arc::new(Box::new(
|
||||
HlNodeBlockSource::new(
|
||||
fallback_block_source,
|
||||
PathBuf::from(block_source_from_node.clone()),
|
||||
block_source_from_node.clone(),
|
||||
next_block_number,
|
||||
)
|
||||
.await,
|
||||
@ -91,9 +105,9 @@ impl BlockSourceConfig {
|
||||
}
|
||||
}
|
||||
|
||||
async fn s3_block_source(bucket: impl AsRef<str>) -> BlockSourceBoxed {
|
||||
async fn s3_block_source(bucket: impl AsRef<str>, polling_interval: Duration) -> BlockSourceBoxed {
|
||||
let client = aws_sdk_s3::Client::new(
|
||||
&aws_config::defaults(BehaviorVersion::latest()).region("ap-northeast-1").load().await,
|
||||
);
|
||||
Arc::new(Box::new(S3BlockSource::new(client, bucket.as_ref().to_string())))
|
||||
Arc::new(Box::new(S3BlockSource::new(client, bucket.as_ref().to_string(), polling_interval)))
|
||||
}
|
||||
|
||||
@ -1 +0,0 @@
|
||||
pub const MAX_CONCURRENCY: usize = 100;
|
||||
@ -1,36 +0,0 @@
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum PseudoPeerError {
|
||||
#[error("Block source error: {0}")]
|
||||
BlockSource(String),
|
||||
|
||||
#[error("Network error: {0}")]
|
||||
Network(#[from] reth_network::error::NetworkError),
|
||||
|
||||
#[error("Configuration error: {0}")]
|
||||
Config(String),
|
||||
|
||||
#[error("AWS S3 error: {0}")]
|
||||
S3(#[from] aws_sdk_s3::Error),
|
||||
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
#[error("Serialization error: {0}")]
|
||||
Serialization(#[from] rmp_serde::encode::Error),
|
||||
|
||||
#[error("Deserialization error: {0}")]
|
||||
Deserialization(#[from] rmp_serde::decode::Error),
|
||||
|
||||
#[error("Compression error: {0}")]
|
||||
Compression(String),
|
||||
}
|
||||
|
||||
impl From<eyre::Error> for PseudoPeerError {
|
||||
fn from(err: eyre::Error) -> Self {
|
||||
PseudoPeerError::Config(err.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, PseudoPeerError>;
|
||||
@ -5,33 +5,25 @@
|
||||
|
||||
pub mod cli;
|
||||
pub mod config;
|
||||
pub mod consts;
|
||||
pub mod error;
|
||||
pub mod network;
|
||||
pub mod service;
|
||||
pub mod sources;
|
||||
pub mod utils;
|
||||
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc;
|
||||
use tracing::{error, info};
|
||||
|
||||
pub use cli::*;
|
||||
pub use config::*;
|
||||
pub use error::*;
|
||||
pub use network::*;
|
||||
pub use service::*;
|
||||
pub use sources::*;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
use tokio::sync::mpsc;
|
||||
use tracing::info;
|
||||
|
||||
/// Re-export commonly used types
|
||||
pub mod prelude {
|
||||
pub use super::{
|
||||
config::BlockSourceConfig,
|
||||
error::{PseudoPeerError, Result},
|
||||
service::{BlockPoller, PseudoPeer},
|
||||
sources::{BlockSource, CachedBlockSource, LocalBlockSource, S3BlockSource},
|
||||
};
|
||||
@ -86,8 +78,11 @@ pub async fn start_pseudo_peer(
|
||||
_ = transaction_rx.recv() => {}
|
||||
|
||||
Some(eth_req) = eth_rx.recv() => {
|
||||
service.process_eth_request(eth_req).await?;
|
||||
info!("Processed eth request");
|
||||
if let Err(e) = service.process_eth_request(eth_req).await {
|
||||
error!("Error processing eth request: {e:?}");
|
||||
} else {
|
||||
info!("Processed eth request");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -6,7 +6,11 @@ use reth_network::{
|
||||
};
|
||||
use reth_network_peers::TrustedPeer;
|
||||
use reth_provider::test_utils::NoopProvider;
|
||||
use std::{str::FromStr, sync::Arc};
|
||||
use std::{
|
||||
net::{Ipv4Addr, SocketAddr},
|
||||
str::FromStr,
|
||||
sync::Arc,
|
||||
};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
pub struct NetworkBuilder {
|
||||
@ -32,27 +36,11 @@ impl Default for NetworkBuilder {
|
||||
}
|
||||
|
||||
impl NetworkBuilder {
|
||||
pub fn with_secret(mut self, secret: SecretKey) -> Self {
|
||||
self.secret = secret;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_peer_config(mut self, peer_config: PeersConfig) -> Self {
|
||||
self.peer_config = peer_config;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_boot_nodes(mut self, boot_nodes: Vec<TrustedPeer>) -> Self {
|
||||
self.boot_nodes = boot_nodes;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_ports(mut self, discovery_port: u16, listener_port: u16) -> Self {
|
||||
self.discovery_port = discovery_port;
|
||||
self.listener_port = listener_port;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_chain_spec(mut self, chain_spec: HlChainSpec) -> Self {
|
||||
self.chain_spec = chain_spec;
|
||||
self
|
||||
@ -66,8 +54,8 @@ impl NetworkBuilder {
|
||||
let builder = NetworkConfig::<(), HlNetworkPrimitives>::builder(self.secret)
|
||||
.boot_nodes(self.boot_nodes)
|
||||
.peer_config(self.peer_config)
|
||||
.discovery_port(self.discovery_port)
|
||||
.listener_port(self.listener_port);
|
||||
.discovery_addr(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), self.discovery_port))
|
||||
.listener_addr(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), self.listener_port));
|
||||
let chain_id = self.chain_spec.inner.chain().id();
|
||||
|
||||
let (block_poller, start_tx) =
|
||||
|
||||
@ -26,7 +26,6 @@ use std::{
|
||||
pin::Pin,
|
||||
sync::{Arc, Mutex},
|
||||
task::{Context, Poll},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::{sync::mpsc, task::JoinHandle};
|
||||
use tracing::{debug, info};
|
||||
@ -49,8 +48,6 @@ pub struct BlockPoller {
|
||||
}
|
||||
|
||||
impl BlockPoller {
|
||||
const POLL_INTERVAL: Duration = Duration::from_millis(25);
|
||||
|
||||
pub fn new_suspended<BS: BlockSource>(
|
||||
chain_id: u64,
|
||||
block_source: BS,
|
||||
@ -77,19 +74,20 @@ impl BlockPoller {
|
||||
start_rx.recv().await.ok_or(eyre::eyre!("Failed to receive start signal"))?;
|
||||
info!("Starting block poller");
|
||||
|
||||
let latest_block_number = block_source
|
||||
let polling_interval = block_source.polling_interval();
|
||||
let mut next_block_number = block_source
|
||||
.find_latest_block_number()
|
||||
.await
|
||||
.ok_or(eyre::eyre!("Failed to find latest block number"))?;
|
||||
|
||||
let mut next_block_number = latest_block_number;
|
||||
loop {
|
||||
let Ok(block) = block_source.collect_block(next_block_number).await else {
|
||||
tokio::time::sleep(Self::POLL_INTERVAL).await;
|
||||
continue;
|
||||
};
|
||||
block_tx_clone.send((next_block_number, block)).await?;
|
||||
next_block_number += 1;
|
||||
match block_source.collect_block(next_block_number).await {
|
||||
Ok(block) => {
|
||||
block_tx_clone.send((next_block_number, block)).await?;
|
||||
next_block_number += 1;
|
||||
}
|
||||
Err(_) => tokio::time::sleep(polling_interval).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -111,8 +109,7 @@ impl BlockImport<HlNewBlock> for BlockPoller {
|
||||
},
|
||||
}))
|
||||
}
|
||||
Poll::Ready(None) => Poll::Pending,
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(None) | Poll::Pending => Poll::Pending,
|
||||
}
|
||||
}
|
||||
|
||||
@ -155,14 +152,14 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
||||
async fn collect_blocks(
|
||||
&self,
|
||||
block_numbers: impl IntoIterator<Item = u64>,
|
||||
) -> Vec<BlockAndReceipts> {
|
||||
) -> eyre::Result<Vec<BlockAndReceipts>> {
|
||||
let block_numbers = block_numbers.into_iter().collect::<Vec<_>>();
|
||||
let blocks = futures::stream::iter(block_numbers)
|
||||
.map(async |number| self.collect_block(number).await.unwrap())
|
||||
let res = futures::stream::iter(block_numbers)
|
||||
.map(async |number| self.collect_block(number).await)
|
||||
.buffered(self.block_source.recommended_chunk_size() as usize)
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
blocks
|
||||
res.into_iter().collect()
|
||||
}
|
||||
|
||||
pub async fn process_eth_request(
|
||||
@ -179,7 +176,6 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
||||
debug!(
|
||||
"GetBlockHeaders request: {start_block:?}, {limit:?}, {skip:?}, {direction:?}"
|
||||
);
|
||||
|
||||
let number = match start_block {
|
||||
HashOrNumber::Hash(hash) => self.hash_to_block_number(hash).await,
|
||||
HashOrNumber::Number(number) => number,
|
||||
@ -190,7 +186,7 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
||||
HeadersDirection::Falling => {
|
||||
self.collect_blocks((number + 1 - limit..number + 1).rev()).await
|
||||
}
|
||||
}
|
||||
}?
|
||||
.into_par_iter()
|
||||
.map(|block| block.to_reth_block(chain_id).header.clone())
|
||||
.collect::<Vec<_>>();
|
||||
@ -208,19 +204,15 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
||||
|
||||
let block_bodies = self
|
||||
.collect_blocks(numbers)
|
||||
.await
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|block| block.to_reth_block(chain_id).body)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let _ = response.send(Ok(BlockBodies(block_bodies)));
|
||||
}
|
||||
IncomingEthRequest::GetNodeData { .. } => {
|
||||
debug!("GetNodeData request: {eth_req:?}");
|
||||
}
|
||||
eth_req => {
|
||||
debug!("New eth protocol request: {eth_req:?}");
|
||||
}
|
||||
IncomingEthRequest::GetNodeData { .. } => debug!("GetNodeData request: {eth_req:?}"),
|
||||
eth_req => debug!("New eth protocol request: {eth_req:?}"),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -251,7 +243,6 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
||||
// This is tricky because Raw EVM files (BlockSource) does not have hash to number mapping
|
||||
// so we can either enumerate all blocks to get hash to number mapping, or fallback to an
|
||||
// official RPC. The latter is much easier but has 300/day rate limit.
|
||||
|
||||
use jsonrpsee::http_client::HttpClientBuilder;
|
||||
use jsonrpsee_core::client::ClientT;
|
||||
|
||||
@ -259,7 +250,6 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
||||
let client =
|
||||
HttpClientBuilder::default().build(self.chain_spec.official_rpc_url()).unwrap();
|
||||
let target_block: Block = client.request("eth_getBlockByHash", (hash, false)).await?;
|
||||
|
||||
debug!("From official RPC: {:?} for {hash:?}", target_block.header.number);
|
||||
self.cache_blocks([(hash, target_block.header.number)]);
|
||||
Ok(target_block.header.number)
|
||||
@ -272,9 +262,10 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
||||
if self.if_hit_then_warm_around.lock().unwrap().contains(&block_number) {
|
||||
self.warm_cache_around_blocks(block_number, self.warm_cache_size).await;
|
||||
}
|
||||
return Some(block_number);
|
||||
Some(block_number)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Backfill the cache with blocks to find the target hash
|
||||
@ -319,10 +310,11 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
||||
async fn warm_cache_around_blocks(&mut self, block_number: u64, chunk_size: u64) {
|
||||
let start = std::cmp::max(block_number.saturating_sub(chunk_size), 1);
|
||||
let end = std::cmp::min(block_number + chunk_size, self.known_latest_block_number);
|
||||
|
||||
self.if_hit_then_warm_around.lock().unwrap().insert(start);
|
||||
self.if_hit_then_warm_around.lock().unwrap().insert(end);
|
||||
|
||||
{
|
||||
let mut guard = self.if_hit_then_warm_around.lock().unwrap();
|
||||
guard.insert(start);
|
||||
guard.insert(end);
|
||||
}
|
||||
const IMPOSSIBLE_HASH: B256 = B256::ZERO;
|
||||
let _ = self.try_block_range_for_hash(start, end, IMPOSSIBLE_HASH).await;
|
||||
}
|
||||
@ -348,15 +340,12 @@ impl<BS: BlockSource> PseudoPeer<BS> {
|
||||
}
|
||||
|
||||
debug!("Backfilling from {start_number} to {end_number}");
|
||||
|
||||
// Collect blocks and cache them
|
||||
let blocks = self.collect_blocks(uncached_block_numbers).await;
|
||||
let blocks = self.collect_blocks(uncached_block_numbers).await?;
|
||||
let block_map: HashMap<B256, u64> =
|
||||
blocks.into_iter().map(|block| (block.hash(), block.number())).collect();
|
||||
|
||||
let maybe_block_number = block_map.get(&target_hash).copied();
|
||||
self.cache_blocks(block_map);
|
||||
|
||||
Ok(maybe_block_number)
|
||||
}
|
||||
|
||||
|
||||
48
src/pseudo_peer/sources/cached.rs
Normal file
48
src/pseudo_peer/sources/cached.rs
Normal file
@ -0,0 +1,48 @@
|
||||
use super::{BlockSource, BlockSourceBoxed};
|
||||
use crate::node::types::BlockAndReceipts;
|
||||
use futures::{future::BoxFuture, FutureExt};
|
||||
use reth_network::cache::LruMap;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
/// Block source wrapper that caches blocks in memory
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CachedBlockSource {
|
||||
block_source: BlockSourceBoxed,
|
||||
cache: Arc<RwLock<LruMap<u64, BlockAndReceipts>>>,
|
||||
}
|
||||
|
||||
impl CachedBlockSource {
|
||||
const CACHE_LIMIT: u32 = 100000;
|
||||
|
||||
pub fn new(block_source: BlockSourceBoxed) -> Self {
|
||||
Self { block_source, cache: Arc::new(RwLock::new(LruMap::new(Self::CACHE_LIMIT))) }
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockSource for CachedBlockSource {
|
||||
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
|
||||
let block_source = self.block_source.clone();
|
||||
let cache = self.cache.clone();
|
||||
async move {
|
||||
if let Some(block) = cache.write().unwrap().get(&height) {
|
||||
return Ok(block.clone());
|
||||
}
|
||||
let block = block_source.collect_block(height).await?;
|
||||
cache.write().unwrap().insert(height, block.clone());
|
||||
Ok(block)
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
|
||||
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>> {
|
||||
self.block_source.find_latest_block_number()
|
||||
}
|
||||
|
||||
fn recommended_chunk_size(&self) -> u64 {
|
||||
self.block_source.recommended_chunk_size()
|
||||
}
|
||||
|
||||
fn polling_interval(&self) -> std::time::Duration {
|
||||
self.block_source.polling_interval()
|
||||
}
|
||||
}
|
||||
@ -1,635 +0,0 @@
|
||||
use super::{BlockSource, BlockSourceBoxed};
|
||||
use crate::node::types::{BlockAndReceipts, EvmBlock};
|
||||
use futures::future::BoxFuture;
|
||||
use rangemap::RangeInclusiveMap;
|
||||
use reth_network::cache::LruMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
fs::File,
|
||||
io::{BufRead, BufReader, Read, Seek, SeekFrom},
|
||||
ops::RangeInclusive,
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
use time::{macros::format_description, Date, Duration, OffsetDateTime, Time};
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::{info, warn};
|
||||
|
||||
const TAIL_INTERVAL: std::time::Duration = std::time::Duration::from_millis(25);
|
||||
const HOURLY_SUBDIR: &str = "hourly";
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct LocalBlocksCache {
|
||||
cache: LruMap<u64, BlockAndReceipts>,
|
||||
// Lightweight range map to track the ranges of blocks in the local ingest directory
|
||||
ranges: RangeInclusiveMap<u64, PathBuf>,
|
||||
}
|
||||
|
||||
impl LocalBlocksCache {
|
||||
// 3660 blocks per hour
|
||||
const CACHE_SIZE: u32 = 8000;
|
||||
|
||||
fn new() -> Self {
|
||||
Self { cache: LruMap::new(Self::CACHE_SIZE), ranges: RangeInclusiveMap::new() }
|
||||
}
|
||||
|
||||
fn load_scan_result(&mut self, scan_result: ScanResult) {
|
||||
for blk in scan_result.new_blocks {
|
||||
let EvmBlock::Reth115(b) = &blk.block;
|
||||
self.cache.insert(b.header.header.number, blk);
|
||||
}
|
||||
for range in scan_result.new_block_ranges {
|
||||
self.ranges.insert(range, scan_result.path.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
struct LocalBlockAndReceipts(String, BlockAndReceipts);
|
||||
|
||||
struct ScanResult {
|
||||
path: PathBuf,
|
||||
next_expected_height: u64,
|
||||
new_blocks: Vec<BlockAndReceipts>,
|
||||
new_block_ranges: Vec<RangeInclusive<u64>>,
|
||||
}
|
||||
|
||||
struct ScanOptions {
|
||||
start_height: u64,
|
||||
only_load_ranges: bool,
|
||||
}
|
||||
|
||||
fn line_to_evm_block(line: &str) -> serde_json::Result<(BlockAndReceipts, u64)> {
|
||||
let LocalBlockAndReceipts(_block_timestamp, parsed_block): LocalBlockAndReceipts =
|
||||
serde_json::from_str(line)?;
|
||||
let height = match &parsed_block.block {
|
||||
EvmBlock::Reth115(b) => b.header.header.number,
|
||||
};
|
||||
Ok((parsed_block, height))
|
||||
}
|
||||
|
||||
fn scan_hour_file(path: &Path, last_line: &mut usize, options: ScanOptions) -> ScanResult {
|
||||
let file = File::open(path).expect("Failed to open hour file path");
|
||||
let reader = BufReader::new(file);
|
||||
|
||||
let ScanOptions { start_height, only_load_ranges } = options;
|
||||
|
||||
let mut new_blocks = Vec::new();
|
||||
let mut last_height = start_height;
|
||||
let lines: Vec<String> = reader.lines().collect::<Result<_, _>>().unwrap();
|
||||
let skip = if *last_line == 0 { 0 } else { *last_line - 1 };
|
||||
|
||||
let mut block_ranges = Vec::new();
|
||||
let mut current_range: Option<(u64, u64)> = None;
|
||||
|
||||
for (line_idx, line) in lines.iter().enumerate().skip(skip) {
|
||||
if line_idx < *last_line || line.trim().is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
match line_to_evm_block(line) {
|
||||
Ok((parsed_block, height)) => {
|
||||
if height >= start_height {
|
||||
last_height = last_height.max(height);
|
||||
if !only_load_ranges {
|
||||
new_blocks.push(parsed_block);
|
||||
}
|
||||
*last_line = line_idx;
|
||||
}
|
||||
|
||||
match current_range {
|
||||
Some((start, end)) if end + 1 == height => {
|
||||
current_range = Some((start, height));
|
||||
}
|
||||
_ => {
|
||||
if let Some((start, end)) = current_range.take() {
|
||||
block_ranges.push(start..=end);
|
||||
}
|
||||
current_range = Some((height, height));
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
warn!("Failed to parse line: {}...", line.get(0..50).unwrap_or(line));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some((start, end)) = current_range {
|
||||
block_ranges.push(start..=end);
|
||||
}
|
||||
|
||||
ScanResult {
|
||||
path: path.to_path_buf(),
|
||||
next_expected_height: last_height + 1,
|
||||
new_blocks,
|
||||
new_block_ranges: block_ranges,
|
||||
}
|
||||
}
|
||||
|
||||
fn date_from_datetime(dt: OffsetDateTime) -> String {
|
||||
dt.format(&format_description!("[year][month][day]")).unwrap()
|
||||
}
|
||||
|
||||
/// Block source that monitors the local ingest directory for the HL node.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HlNodeBlockSource {
|
||||
pub fallback: BlockSourceBoxed,
|
||||
pub local_ingest_dir: PathBuf,
|
||||
pub local_blocks_cache: Arc<Mutex<LocalBlocksCache>>, // height → block
|
||||
// for rate limiting requests to fallback
|
||||
pub last_local_fetch: Arc<Mutex<Option<(u64, OffsetDateTime)>>>,
|
||||
}
|
||||
|
||||
impl BlockSource for HlNodeBlockSource {
|
||||
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
|
||||
Box::pin(async move {
|
||||
let now = OffsetDateTime::now_utc();
|
||||
|
||||
if let Some(block) = self.try_collect_local_block(height).await {
|
||||
self.update_last_fetch(height, now).await;
|
||||
return Ok(block);
|
||||
}
|
||||
|
||||
if let Some((last_height, last_poll_time)) = *self.last_local_fetch.lock().await {
|
||||
let more_recent = last_height < height;
|
||||
let too_soon = now - last_poll_time < Self::MAX_ALLOWED_THRESHOLD_BEFORE_FALLBACK;
|
||||
if more_recent && too_soon {
|
||||
return Err(eyre::eyre!(
|
||||
"Not found locally; limiting polling rate before fallback so that hl-node has chance to catch up"
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let block = self.fallback.collect_block(height).await?;
|
||||
self.update_last_fetch(height, now).await;
|
||||
Ok(block)
|
||||
})
|
||||
}
|
||||
|
||||
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
|
||||
Box::pin(async move {
|
||||
let Some(dir) = Self::find_latest_hourly_file(&self.local_ingest_dir) else {
|
||||
warn!(
|
||||
"No EVM blocks from hl-node found at {:?}; fallback to s3/ingest-dir",
|
||||
self.local_ingest_dir
|
||||
);
|
||||
return self.fallback.find_latest_block_number().await;
|
||||
};
|
||||
|
||||
let mut file = File::open(&dir).expect("Failed to open hour file path");
|
||||
if let Some((_, height)) = read_last_complete_line(&mut file) {
|
||||
info!("Latest block number: {} with path {}", height, dir.display());
|
||||
Some(height)
|
||||
} else {
|
||||
warn!(
|
||||
"Failed to parse the hl-node hourly file at {:?}; fallback to s3/ingest-dir",
|
||||
file
|
||||
);
|
||||
self.fallback.find_latest_block_number().await
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn recommended_chunk_size(&self) -> u64 {
|
||||
self.fallback.recommended_chunk_size()
|
||||
}
|
||||
}
|
||||
|
||||
fn read_last_complete_line<R: Read + Seek>(read: &mut R) -> Option<(BlockAndReceipts, u64)> {
|
||||
const CHUNK_SIZE: u64 = 50000;
|
||||
let mut buf = Vec::with_capacity(CHUNK_SIZE as usize);
|
||||
let mut pos = read.seek(SeekFrom::End(0)).unwrap();
|
||||
let mut last_line = Vec::new();
|
||||
|
||||
while pos > 0 {
|
||||
let read_size = std::cmp::min(pos, CHUNK_SIZE);
|
||||
buf.resize(read_size as usize, 0);
|
||||
|
||||
read.seek(SeekFrom::Start(pos - read_size)).unwrap();
|
||||
read.read_exact(&mut buf).unwrap();
|
||||
|
||||
last_line = [buf.clone(), last_line].concat();
|
||||
|
||||
if last_line.ends_with(b"\n") {
|
||||
last_line.pop();
|
||||
}
|
||||
|
||||
if let Some(idx) = last_line.iter().rposition(|&b| b == b'\n') {
|
||||
let candidate = &last_line[idx + 1..];
|
||||
if let Ok((evm_block, height)) = line_to_evm_block(str::from_utf8(candidate).unwrap()) {
|
||||
return Some((evm_block, height));
|
||||
}
|
||||
// Incomplete line; truncate and continue
|
||||
last_line.truncate(idx);
|
||||
}
|
||||
|
||||
if pos < read_size {
|
||||
break;
|
||||
}
|
||||
pos -= read_size;
|
||||
}
|
||||
|
||||
line_to_evm_block(&String::from_utf8(last_line).unwrap()).ok()
|
||||
}
|
||||
|
||||
impl HlNodeBlockSource {
|
||||
/// [HlNodeBlockSource] picks the faster one between local ingest directory and s3/ingest-dir.
|
||||
/// But if we immediately fallback to s3/ingest-dir, in case of S3, it may cause unnecessary
|
||||
/// requests to S3 while it'll return 404.
|
||||
///
|
||||
/// To avoid unnecessary fallback, we set a short threshold period.
|
||||
/// This threshold is several times longer than the expected block time, reducing redundant
|
||||
/// fallback attempts.
|
||||
pub(crate) const MAX_ALLOWED_THRESHOLD_BEFORE_FALLBACK: Duration = Duration::milliseconds(5000);
|
||||
|
||||
async fn update_last_fetch(&self, height: u64, now: OffsetDateTime) {
|
||||
let mut last_fetch = self.last_local_fetch.lock().await;
|
||||
if let Some((last_height, _)) = *last_fetch {
|
||||
if last_height >= height {
|
||||
return;
|
||||
}
|
||||
}
|
||||
*last_fetch = Some((height, now));
|
||||
}
|
||||
|
||||
async fn try_collect_local_block(&self, height: u64) -> Option<BlockAndReceipts> {
|
||||
let mut u_cache = self.local_blocks_cache.lock().await;
|
||||
if let Some(block) = u_cache.cache.remove(&height) {
|
||||
return Some(block);
|
||||
}
|
||||
|
||||
let path = u_cache.ranges.get(&height).cloned()?;
|
||||
|
||||
info!("Loading block data from {:?}", path);
|
||||
u_cache.load_scan_result(scan_hour_file(
|
||||
&path,
|
||||
&mut 0,
|
||||
ScanOptions { start_height: 0, only_load_ranges: false },
|
||||
));
|
||||
u_cache.cache.get(&height).cloned()
|
||||
}
|
||||
|
||||
fn datetime_from_path(path: &Path) -> Option<OffsetDateTime> {
|
||||
let dt_part = path.parent()?.file_name()?.to_str()?;
|
||||
let hour_part = path.file_name()?.to_str()?;
|
||||
|
||||
let hour: u8 = hour_part.parse().ok()?;
|
||||
Some(OffsetDateTime::new_utc(
|
||||
Date::parse(dt_part, &format_description!("[year][month][day]")).ok()?,
|
||||
Time::from_hms(hour, 0, 0).ok()?,
|
||||
))
|
||||
}
|
||||
|
||||
fn all_hourly_files(root: &Path) -> Option<Vec<PathBuf>> {
|
||||
let dir = root.join(HOURLY_SUBDIR);
|
||||
let mut files = Vec::new();
|
||||
|
||||
for entry in std::fs::read_dir(dir).ok()? {
|
||||
let file = entry.ok()?.path();
|
||||
let subfiles: Vec<_> = std::fs::read_dir(&file)
|
||||
.ok()?
|
||||
.filter_map(|f| f.ok().map(|f| f.path()))
|
||||
.filter(|p| Self::datetime_from_path(p).is_some())
|
||||
.collect();
|
||||
files.extend(subfiles);
|
||||
}
|
||||
|
||||
files.sort();
|
||||
Some(files)
|
||||
}
|
||||
|
||||
fn find_latest_hourly_file(root: &Path) -> Option<PathBuf> {
|
||||
Self::all_hourly_files(root)?.last().cloned()
|
||||
}
|
||||
|
||||
async fn try_backfill_local_blocks(
|
||||
root: &Path,
|
||||
cache: &Arc<Mutex<LocalBlocksCache>>,
|
||||
cutoff_height: u64,
|
||||
) -> eyre::Result<()> {
|
||||
let mut u_cache = cache.lock().await;
|
||||
|
||||
for subfile in Self::all_hourly_files(root).unwrap_or_default() {
|
||||
let mut file = File::open(&subfile).expect("Failed to open hour file path");
|
||||
|
||||
if let Some((_, height)) = read_last_complete_line(&mut file) {
|
||||
if height < cutoff_height {
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
warn!("Failed to parse last line of file, fallback to slow path: {:?}", subfile);
|
||||
}
|
||||
|
||||
let mut scan_result = scan_hour_file(
|
||||
&subfile,
|
||||
&mut 0,
|
||||
ScanOptions { start_height: cutoff_height, only_load_ranges: true },
|
||||
);
|
||||
// Only store the block ranges for now; actual block data will be loaded lazily later to
|
||||
// optimize memory usage
|
||||
scan_result.new_blocks.clear();
|
||||
u_cache.load_scan_result(scan_result);
|
||||
}
|
||||
|
||||
if u_cache.ranges.is_empty() {
|
||||
warn!("No ranges found in {:?}", root);
|
||||
} else {
|
||||
let (min, _) = u_cache.ranges.first_range_value().unwrap();
|
||||
let (max, _) = u_cache.ranges.last_range_value().unwrap();
|
||||
info!(
|
||||
"Populated {} ranges (min: {}, max: {})",
|
||||
u_cache.ranges.len(),
|
||||
min.start(),
|
||||
max.end()
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn start_local_ingest_loop(&self, current_head: u64) {
|
||||
let root = self.local_ingest_dir.to_owned();
|
||||
let cache = self.local_blocks_cache.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut next_height = current_head;
|
||||
|
||||
// Wait for the first hourly file to be created
|
||||
let mut dt = loop {
|
||||
if let Some(latest_file) = Self::find_latest_hourly_file(&root) {
|
||||
break Self::datetime_from_path(&latest_file).unwrap();
|
||||
}
|
||||
tokio::time::sleep(TAIL_INTERVAL).await;
|
||||
};
|
||||
|
||||
let mut hour = dt.hour();
|
||||
let mut day_str = date_from_datetime(dt);
|
||||
let mut last_line = 0;
|
||||
|
||||
info!("Starting local ingest loop from height: {:?}", current_head);
|
||||
|
||||
loop {
|
||||
let hour_file = root.join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}"));
|
||||
|
||||
if hour_file.exists() {
|
||||
let scan_result = scan_hour_file(
|
||||
&hour_file,
|
||||
&mut last_line,
|
||||
ScanOptions { start_height: next_height, only_load_ranges: false },
|
||||
);
|
||||
next_height = scan_result.next_expected_height;
|
||||
|
||||
let mut u_cache = cache.lock().await;
|
||||
u_cache.load_scan_result(scan_result);
|
||||
}
|
||||
|
||||
let now = OffsetDateTime::now_utc();
|
||||
|
||||
if dt + Duration::HOUR < now {
|
||||
dt += Duration::HOUR;
|
||||
hour = dt.hour();
|
||||
day_str = date_from_datetime(dt);
|
||||
last_line = 0;
|
||||
info!(
|
||||
"Moving to a new file. {:?}",
|
||||
root.join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}"))
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
tokio::time::sleep(TAIL_INTERVAL).await;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
pub(crate) async fn run(&self, next_block_number: u64) -> eyre::Result<()> {
|
||||
let _ = Self::try_backfill_local_blocks(
|
||||
&self.local_ingest_dir,
|
||||
&self.local_blocks_cache,
|
||||
next_block_number,
|
||||
)
|
||||
.await;
|
||||
|
||||
self.start_local_ingest_loop(next_block_number).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn new(
|
||||
fallback: BlockSourceBoxed,
|
||||
local_ingest_dir: PathBuf,
|
||||
next_block_number: u64,
|
||||
) -> Self {
|
||||
let block_source = HlNodeBlockSource {
|
||||
fallback,
|
||||
local_ingest_dir,
|
||||
local_blocks_cache: Arc::new(Mutex::new(LocalBlocksCache::new())),
|
||||
last_local_fetch: Arc::new(Mutex::new(None)),
|
||||
};
|
||||
block_source.run(next_block_number).await.unwrap();
|
||||
block_source
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{
|
||||
node::types::{reth_compat, ReadPrecompileCalls},
|
||||
pseudo_peer::sources::LocalBlockSource,
|
||||
};
|
||||
use alloy_consensus::{BlockBody, Header};
|
||||
use alloy_primitives::{Address, Bloom, Bytes, B256, B64, U256};
|
||||
use std::{io::Write, time::Duration};
|
||||
|
||||
#[test]
|
||||
fn test_datetime_from_path() {
|
||||
let path = Path::new("/home/username/hl/data/evm_block_and_receipts/hourly/20250731/4");
|
||||
let dt = HlNodeBlockSource::datetime_from_path(path).unwrap();
|
||||
println!("{dt:?}");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_backfill() {
|
||||
let test_path = Path::new("/root/evm_block_and_receipts");
|
||||
if !test_path.exists() {
|
||||
return;
|
||||
}
|
||||
|
||||
let cache = Arc::new(Mutex::new(LocalBlocksCache::new()));
|
||||
HlNodeBlockSource::try_backfill_local_blocks(test_path, &cache, 1000000).await.unwrap();
|
||||
|
||||
let u_cache = cache.lock().await;
|
||||
println!("{:?}", u_cache.ranges);
|
||||
assert_eq!(
|
||||
u_cache.ranges.get(&9735058),
|
||||
Some(&test_path.join(HOURLY_SUBDIR).join("20250729").join("22"))
|
||||
);
|
||||
}
|
||||
|
||||
fn scan_result_from_single_block(block: BlockAndReceipts) -> ScanResult {
|
||||
let height = match &block.block {
|
||||
EvmBlock::Reth115(b) => b.header.header.number,
|
||||
};
|
||||
ScanResult {
|
||||
path: PathBuf::from("/nonexistent-block"),
|
||||
next_expected_height: height + 1,
|
||||
new_blocks: vec![block],
|
||||
new_block_ranges: vec![height..=height],
|
||||
}
|
||||
}
|
||||
|
||||
fn empty_block(
|
||||
number: u64,
|
||||
timestamp: u64,
|
||||
extra_data: &'static [u8],
|
||||
) -> LocalBlockAndReceipts {
|
||||
let extra_data = Bytes::from_static(extra_data);
|
||||
let res = BlockAndReceipts {
|
||||
block: EvmBlock::Reth115(reth_compat::SealedBlock {
|
||||
header: reth_compat::SealedHeader {
|
||||
header: Header {
|
||||
parent_hash: B256::ZERO,
|
||||
ommers_hash: B256::ZERO,
|
||||
beneficiary: Address::ZERO,
|
||||
state_root: B256::ZERO,
|
||||
transactions_root: B256::ZERO,
|
||||
receipts_root: B256::ZERO,
|
||||
logs_bloom: Bloom::ZERO,
|
||||
difficulty: U256::ZERO,
|
||||
number,
|
||||
gas_limit: 0,
|
||||
gas_used: 0,
|
||||
timestamp,
|
||||
extra_data,
|
||||
mix_hash: B256::ZERO,
|
||||
nonce: B64::ZERO,
|
||||
base_fee_per_gas: None,
|
||||
withdrawals_root: None,
|
||||
blob_gas_used: None,
|
||||
excess_blob_gas: None,
|
||||
parent_beacon_block_root: None,
|
||||
requests_hash: None,
|
||||
},
|
||||
hash: B256::ZERO,
|
||||
},
|
||||
body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None },
|
||||
}),
|
||||
receipts: vec![],
|
||||
system_txs: vec![],
|
||||
read_precompile_calls: ReadPrecompileCalls(vec![]),
|
||||
highest_precompile_address: None,
|
||||
};
|
||||
LocalBlockAndReceipts(timestamp.to_string(), res)
|
||||
}
|
||||
|
||||
fn setup_temp_dir_and_file() -> eyre::Result<(tempfile::TempDir, File)> {
|
||||
let now = OffsetDateTime::now_utc();
|
||||
let day_str = date_from_datetime(now);
|
||||
let hour = now.hour();
|
||||
|
||||
let temp_dir = tempfile::tempdir()?;
|
||||
let path = temp_dir.path().join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}"));
|
||||
std::fs::create_dir_all(path.parent().unwrap())?;
|
||||
|
||||
Ok((temp_dir, File::create(path)?))
|
||||
}
|
||||
|
||||
struct BlockSourceHierarchy {
|
||||
block_source: HlNodeBlockSource,
|
||||
_temp_dir: tempfile::TempDir,
|
||||
file1: File,
|
||||
current_block: LocalBlockAndReceipts,
|
||||
future_block_hl_node: LocalBlockAndReceipts,
|
||||
future_block_fallback: LocalBlockAndReceipts,
|
||||
}
|
||||
|
||||
async fn setup_block_source_hierarchy() -> eyre::Result<BlockSourceHierarchy> {
|
||||
// Setup fallback block source
|
||||
let block_source_fallback = HlNodeBlockSource::new(
|
||||
BlockSourceBoxed::new(Box::new(LocalBlockSource::new("/nonexistent"))),
|
||||
PathBuf::from("/nonexistent"),
|
||||
1000000,
|
||||
)
|
||||
.await;
|
||||
let block_hl_node_0 = empty_block(1000000, 1722633600, b"hl-node");
|
||||
let block_hl_node_1 = empty_block(1000001, 1722633600, b"hl-node");
|
||||
let block_fallback_1 = empty_block(1000001, 1722633600, b"fallback");
|
||||
|
||||
let (temp_dir1, mut file1) = setup_temp_dir_and_file()?;
|
||||
writeln!(&mut file1, "{}", serde_json::to_string(&block_hl_node_0)?)?;
|
||||
|
||||
let block_source = HlNodeBlockSource::new(
|
||||
BlockSourceBoxed::new(Box::new(block_source_fallback.clone())),
|
||||
temp_dir1.path().to_path_buf(),
|
||||
1000000,
|
||||
)
|
||||
.await;
|
||||
|
||||
block_source_fallback
|
||||
.local_blocks_cache
|
||||
.lock()
|
||||
.await
|
||||
.load_scan_result(scan_result_from_single_block(block_fallback_1.1.clone()));
|
||||
|
||||
Ok(BlockSourceHierarchy {
|
||||
block_source,
|
||||
_temp_dir: temp_dir1,
|
||||
file1,
|
||||
current_block: block_hl_node_0,
|
||||
future_block_hl_node: block_hl_node_1,
|
||||
future_block_fallback: block_fallback_1,
|
||||
})
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_last_fetch_no_fallback() -> eyre::Result<()> {
|
||||
let hierarchy = setup_block_source_hierarchy().await?;
|
||||
let BlockSourceHierarchy {
|
||||
block_source,
|
||||
current_block,
|
||||
future_block_hl_node,
|
||||
mut file1,
|
||||
..
|
||||
} = hierarchy;
|
||||
|
||||
let block = block_source.collect_block(1000000).await.unwrap();
|
||||
assert_eq!(block, current_block.1);
|
||||
|
||||
let block = block_source.collect_block(1000001).await;
|
||||
assert!(block.is_err());
|
||||
|
||||
writeln!(&mut file1, "{}", serde_json::to_string(&future_block_hl_node)?)?;
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
let block = block_source.collect_block(1000001).await.unwrap();
|
||||
assert_eq!(block, future_block_hl_node.1);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_last_fetch_fallback() -> eyre::Result<()> {
|
||||
let hierarchy = setup_block_source_hierarchy().await?;
|
||||
let BlockSourceHierarchy {
|
||||
block_source,
|
||||
current_block,
|
||||
future_block_fallback,
|
||||
mut file1,
|
||||
..
|
||||
} = hierarchy;
|
||||
|
||||
let block = block_source.collect_block(1000000).await.unwrap();
|
||||
assert_eq!(block, current_block.1);
|
||||
|
||||
tokio::time::sleep(HlNodeBlockSource::MAX_ALLOWED_THRESHOLD_BEFORE_FALLBACK.unsigned_abs())
|
||||
.await;
|
||||
|
||||
writeln!(&mut file1, "{}", serde_json::to_string(&future_block_fallback)?)?;
|
||||
let block = block_source.collect_block(1000001).await.unwrap();
|
||||
assert_eq!(block, future_block_fallback.1);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
51
src/pseudo_peer/sources/hl_node/cache.rs
Normal file
51
src/pseudo_peer/sources/hl_node/cache.rs
Normal file
@ -0,0 +1,51 @@
|
||||
use super::scan::ScanResult;
|
||||
use crate::node::types::{BlockAndReceipts, EvmBlock};
|
||||
use rangemap::RangeInclusiveMap;
|
||||
use reth_network::cache::LruMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use tracing::{info, warn};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct LocalBlocksCache {
|
||||
cache: LruMap<u64, BlockAndReceipts>,
|
||||
ranges: RangeInclusiveMap<u64, PathBuf>,
|
||||
}
|
||||
|
||||
impl LocalBlocksCache {
|
||||
pub fn new(cache_size: u32) -> Self {
|
||||
Self { cache: LruMap::new(cache_size), ranges: RangeInclusiveMap::new() }
|
||||
}
|
||||
|
||||
pub fn load_scan_result(&mut self, scan_result: ScanResult) {
|
||||
for blk in scan_result.new_blocks {
|
||||
let EvmBlock::Reth115(b) = &blk.block;
|
||||
self.cache.insert(b.header.header.number, blk);
|
||||
}
|
||||
for range in scan_result.new_block_ranges {
|
||||
self.ranges.insert(range, scan_result.path.clone());
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_block(&mut self, height: u64) -> Option<BlockAndReceipts> {
|
||||
self.cache.remove(&height)
|
||||
}
|
||||
|
||||
pub fn get_path_for_height(&self, height: u64) -> Option<PathBuf> {
|
||||
self.ranges.get(&height).cloned()
|
||||
}
|
||||
|
||||
pub fn log_range_summary(&self, root: &Path) {
|
||||
if self.ranges.is_empty() {
|
||||
warn!("No ranges found in {:?}", root);
|
||||
} else {
|
||||
let (min, max) =
|
||||
(self.ranges.first_range_value().unwrap(), self.ranges.last_range_value().unwrap());
|
||||
info!(
|
||||
"Populated {} ranges (min: {}, max: {})",
|
||||
self.ranges.len(),
|
||||
min.0.start(),
|
||||
max.0.end()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
67
src/pseudo_peer/sources/hl_node/file_ops.rs
Normal file
67
src/pseudo_peer/sources/hl_node/file_ops.rs
Normal file
@ -0,0 +1,67 @@
|
||||
use super::{scan::Scanner, time_utils::TimeUtils, HOURLY_SUBDIR};
|
||||
use crate::node::types::BlockAndReceipts;
|
||||
use std::{
|
||||
fs::File,
|
||||
io::{Read, Seek, SeekFrom},
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
pub struct FileOperations;
|
||||
|
||||
impl FileOperations {
|
||||
pub fn all_hourly_files(root: &Path) -> Option<Vec<PathBuf>> {
|
||||
let mut files = Vec::new();
|
||||
for entry in std::fs::read_dir(root.join(HOURLY_SUBDIR)).ok()? {
|
||||
let dir = entry.ok()?.path();
|
||||
if let Ok(subentries) = std::fs::read_dir(&dir) {
|
||||
files.extend(
|
||||
subentries
|
||||
.filter_map(|f| f.ok().map(|f| f.path()))
|
||||
.filter_map(|p| TimeUtils::datetime_from_path(&p).map(|dt| (dt, p))),
|
||||
);
|
||||
}
|
||||
}
|
||||
files.sort();
|
||||
Some(files.into_iter().map(|(_, p)| p).collect())
|
||||
}
|
||||
|
||||
pub fn find_latest_hourly_file(root: &Path) -> Option<PathBuf> {
|
||||
Self::all_hourly_files(root)?.into_iter().last()
|
||||
}
|
||||
|
||||
pub fn read_last_block_from_file(path: &Path) -> Option<(BlockAndReceipts, u64)> {
|
||||
let mut file = File::open(path).ok()?;
|
||||
Self::read_last_complete_line(&mut file)
|
||||
}
|
||||
|
||||
fn read_last_complete_line<R: Read + Seek>(read: &mut R) -> Option<(BlockAndReceipts, u64)> {
|
||||
const CHUNK_SIZE: u64 = 50000;
|
||||
let mut buf = Vec::with_capacity(CHUNK_SIZE as usize);
|
||||
let mut pos = read.seek(SeekFrom::End(0)).unwrap();
|
||||
let mut last_line = Vec::new();
|
||||
|
||||
while pos > 0 {
|
||||
let read_size = pos.min(CHUNK_SIZE);
|
||||
buf.resize(read_size as usize, 0);
|
||||
read.seek(SeekFrom::Start(pos - read_size)).unwrap();
|
||||
read.read_exact(&mut buf).unwrap();
|
||||
last_line = [buf.clone(), last_line].concat();
|
||||
if last_line.ends_with(b"\n") {
|
||||
last_line.pop();
|
||||
}
|
||||
|
||||
if let Some(idx) = last_line.iter().rposition(|&b| b == b'\n') {
|
||||
let candidate = &last_line[idx + 1..];
|
||||
if let Ok(result) = Scanner::line_to_evm_block(str::from_utf8(candidate).unwrap()) {
|
||||
return Some(result);
|
||||
}
|
||||
last_line.truncate(idx);
|
||||
}
|
||||
if pos < read_size {
|
||||
break;
|
||||
}
|
||||
pos -= read_size;
|
||||
}
|
||||
Scanner::line_to_evm_block(&String::from_utf8(last_line).unwrap()).ok()
|
||||
}
|
||||
}
|
||||
231
src/pseudo_peer/sources/hl_node/mod.rs
Normal file
231
src/pseudo_peer/sources/hl_node/mod.rs
Normal file
@ -0,0 +1,231 @@
|
||||
mod cache;
|
||||
mod file_ops;
|
||||
mod scan;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
mod time_utils;
|
||||
|
||||
use self::{
|
||||
cache::LocalBlocksCache,
|
||||
file_ops::FileOperations,
|
||||
scan::{ScanOptions, Scanner},
|
||||
time_utils::TimeUtils,
|
||||
};
|
||||
use super::{BlockSource, BlockSourceBoxed};
|
||||
use crate::node::types::BlockAndReceipts;
|
||||
use futures::future::BoxFuture;
|
||||
use std::{
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
use time::OffsetDateTime;
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::{info, warn};
|
||||
|
||||
const HOURLY_SUBDIR: &str = "hourly";
|
||||
const CACHE_SIZE: u32 = 8000; // 3660 blocks per hour
|
||||
const ONE_HOUR: Duration = Duration::from_secs(60 * 60);
|
||||
const TAIL_INTERVAL: Duration = Duration::from_millis(25);
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HlNodeBlockSourceArgs {
|
||||
pub root: PathBuf,
|
||||
pub fallback_threshold: Duration,
|
||||
}
|
||||
|
||||
/// Block source that monitors the local ingest directory for the HL node.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HlNodeBlockSource {
|
||||
pub fallback: BlockSourceBoxed,
|
||||
pub local_blocks_cache: Arc<Mutex<LocalBlocksCache>>,
|
||||
pub last_local_fetch: Arc<Mutex<Option<(u64, OffsetDateTime)>>>,
|
||||
pub args: HlNodeBlockSourceArgs,
|
||||
}
|
||||
|
||||
impl BlockSource for HlNodeBlockSource {
|
||||
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
|
||||
let fallback = self.fallback.clone();
|
||||
let args = self.args.clone();
|
||||
let local_blocks_cache = self.local_blocks_cache.clone();
|
||||
let last_local_fetch = self.last_local_fetch.clone();
|
||||
Box::pin(async move {
|
||||
let now = OffsetDateTime::now_utc();
|
||||
|
||||
if let Some(block) = Self::try_collect_local_block(local_blocks_cache, height).await {
|
||||
Self::update_last_fetch(last_local_fetch, height, now).await;
|
||||
return Ok(block);
|
||||
}
|
||||
|
||||
if let Some((last_height, last_poll_time)) = *last_local_fetch.lock().await {
|
||||
let more_recent = last_height < height;
|
||||
let too_soon = now - last_poll_time < args.fallback_threshold;
|
||||
if more_recent && too_soon {
|
||||
return Err(eyre::eyre!(
|
||||
"Not found locally; limiting polling rate before fallback so that hl-node has chance to catch up"
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let block = fallback.collect_block(height).await?;
|
||||
Self::update_last_fetch(last_local_fetch, height, now).await;
|
||||
Ok(block)
|
||||
})
|
||||
}
|
||||
|
||||
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>> {
|
||||
let fallback = self.fallback.clone();
|
||||
let args = self.args.clone();
|
||||
Box::pin(async move {
|
||||
let Some(dir) = FileOperations::find_latest_hourly_file(&args.root) else {
|
||||
warn!(
|
||||
"No EVM blocks from hl-node found at {:?}; fallback to s3/ingest-dir",
|
||||
args.root
|
||||
);
|
||||
return fallback.find_latest_block_number().await;
|
||||
};
|
||||
|
||||
match FileOperations::read_last_block_from_file(&dir) {
|
||||
Some((_, height)) => {
|
||||
info!("Latest block number: {} with path {}", height, dir.display());
|
||||
Some(height)
|
||||
}
|
||||
None => {
|
||||
warn!(
|
||||
"Failed to parse the hl-node hourly file at {:?}; fallback to s3/ingest-dir",
|
||||
dir
|
||||
);
|
||||
fallback.find_latest_block_number().await
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn recommended_chunk_size(&self) -> u64 {
|
||||
self.fallback.recommended_chunk_size()
|
||||
}
|
||||
}
|
||||
|
||||
impl HlNodeBlockSource {
|
||||
async fn update_last_fetch(
|
||||
last_local_fetch: Arc<Mutex<Option<(u64, OffsetDateTime)>>>,
|
||||
height: u64,
|
||||
now: OffsetDateTime,
|
||||
) {
|
||||
let mut last_fetch = last_local_fetch.lock().await;
|
||||
if last_fetch.is_none_or(|(h, _)| h < height) {
|
||||
*last_fetch = Some((height, now));
|
||||
}
|
||||
}
|
||||
|
||||
async fn try_collect_local_block(
|
||||
local_blocks_cache: Arc<Mutex<LocalBlocksCache>>,
|
||||
height: u64,
|
||||
) -> Option<BlockAndReceipts> {
|
||||
let mut u_cache = local_blocks_cache.lock().await;
|
||||
if let Some(block) = u_cache.get_block(height) {
|
||||
return Some(block);
|
||||
}
|
||||
let path = u_cache.get_path_for_height(height)?;
|
||||
info!("Loading block data from {:?}", path);
|
||||
let scan_result = Scanner::scan_hour_file(
|
||||
&path,
|
||||
&mut 0,
|
||||
ScanOptions { start_height: 0, only_load_ranges: false },
|
||||
);
|
||||
u_cache.load_scan_result(scan_result);
|
||||
u_cache.get_block(height)
|
||||
}
|
||||
|
||||
async fn try_backfill_local_blocks(
|
||||
root: &Path,
|
||||
cache: &Arc<Mutex<LocalBlocksCache>>,
|
||||
cutoff_height: u64,
|
||||
) -> eyre::Result<()> {
|
||||
let mut u_cache = cache.lock().await;
|
||||
for subfile in FileOperations::all_hourly_files(root).unwrap_or_default() {
|
||||
if let Some((_, height)) = FileOperations::read_last_block_from_file(&subfile) {
|
||||
if height < cutoff_height {
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
warn!("Failed to parse last line of file: {:?}", subfile);
|
||||
}
|
||||
let mut scan_result = Scanner::scan_hour_file(
|
||||
&subfile,
|
||||
&mut 0,
|
||||
ScanOptions { start_height: cutoff_height, only_load_ranges: true },
|
||||
);
|
||||
scan_result.new_blocks.clear(); // Only store ranges, load data lazily
|
||||
u_cache.load_scan_result(scan_result);
|
||||
}
|
||||
u_cache.log_range_summary(root);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn start_local_ingest_loop(&self, current_head: u64) {
|
||||
let root = self.args.root.to_owned();
|
||||
let cache = self.local_blocks_cache.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut next_height = current_head;
|
||||
let mut dt = loop {
|
||||
if let Some(f) = FileOperations::find_latest_hourly_file(&root) {
|
||||
break TimeUtils::datetime_from_path(&f).unwrap();
|
||||
}
|
||||
tokio::time::sleep(TAIL_INTERVAL).await;
|
||||
};
|
||||
let (mut hour, mut day_str, mut last_line) =
|
||||
(dt.hour(), TimeUtils::date_from_datetime(dt), 0);
|
||||
info!("Starting local ingest loop from height: {}", current_head);
|
||||
loop {
|
||||
let hour_file = root.join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}"));
|
||||
if hour_file.exists() {
|
||||
let scan_result = Scanner::scan_hour_file(
|
||||
&hour_file,
|
||||
&mut last_line,
|
||||
ScanOptions { start_height: next_height, only_load_ranges: false },
|
||||
);
|
||||
next_height = scan_result.next_expected_height;
|
||||
cache.lock().await.load_scan_result(scan_result);
|
||||
}
|
||||
let now = OffsetDateTime::now_utc();
|
||||
if dt + ONE_HOUR < now {
|
||||
dt += ONE_HOUR;
|
||||
(hour, day_str, last_line) = (dt.hour(), TimeUtils::date_from_datetime(dt), 0);
|
||||
info!(
|
||||
"Moving to new file: {:?}",
|
||||
root.join(HOURLY_SUBDIR).join(&day_str).join(format!("{hour}"))
|
||||
);
|
||||
continue;
|
||||
}
|
||||
tokio::time::sleep(TAIL_INTERVAL).await;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
pub(crate) async fn run(&self, next_block_number: u64) -> eyre::Result<()> {
|
||||
let _ = Self::try_backfill_local_blocks(
|
||||
&self.args.root,
|
||||
&self.local_blocks_cache,
|
||||
next_block_number,
|
||||
)
|
||||
.await;
|
||||
self.start_local_ingest_loop(next_block_number).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn new(
|
||||
fallback: BlockSourceBoxed,
|
||||
args: HlNodeBlockSourceArgs,
|
||||
next_block_number: u64,
|
||||
) -> Self {
|
||||
let block_source = Self {
|
||||
fallback,
|
||||
args,
|
||||
local_blocks_cache: Arc::new(Mutex::new(LocalBlocksCache::new(CACHE_SIZE))),
|
||||
last_local_fetch: Arc::new(Mutex::new(None)),
|
||||
};
|
||||
block_source.run(next_block_number).await.unwrap();
|
||||
block_source
|
||||
}
|
||||
}
|
||||
91
src/pseudo_peer/sources/hl_node/scan.rs
Normal file
91
src/pseudo_peer/sources/hl_node/scan.rs
Normal file
@ -0,0 +1,91 @@
|
||||
use crate::node::types::{BlockAndReceipts, EvmBlock};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
fs::File,
|
||||
io::{BufRead, BufReader},
|
||||
ops::RangeInclusive,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
use tracing::warn;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct LocalBlockAndReceipts(pub String, pub BlockAndReceipts);
|
||||
|
||||
pub struct ScanResult {
|
||||
pub path: PathBuf,
|
||||
pub next_expected_height: u64,
|
||||
pub new_blocks: Vec<BlockAndReceipts>,
|
||||
pub new_block_ranges: Vec<RangeInclusive<u64>>,
|
||||
}
|
||||
|
||||
pub struct ScanOptions {
|
||||
pub start_height: u64,
|
||||
pub only_load_ranges: bool,
|
||||
}
|
||||
|
||||
pub struct Scanner;
|
||||
|
||||
impl Scanner {
|
||||
pub fn line_to_evm_block(line: &str) -> serde_json::Result<(BlockAndReceipts, u64)> {
|
||||
let LocalBlockAndReceipts(_, parsed_block): LocalBlockAndReceipts =
|
||||
serde_json::from_str(line)?;
|
||||
let height = match &parsed_block.block {
|
||||
EvmBlock::Reth115(b) => b.header.header.number,
|
||||
};
|
||||
Ok((parsed_block, height))
|
||||
}
|
||||
|
||||
pub fn scan_hour_file(path: &Path, last_line: &mut usize, options: ScanOptions) -> ScanResult {
|
||||
let lines: Vec<String> =
|
||||
BufReader::new(File::open(path).expect("Failed to open hour file"))
|
||||
.lines()
|
||||
.collect::<Result<_, _>>()
|
||||
.unwrap();
|
||||
let skip = if *last_line == 0 { 0 } else { *last_line - 1 };
|
||||
let mut new_blocks = Vec::new();
|
||||
let mut last_height = options.start_height;
|
||||
let mut block_ranges = Vec::new();
|
||||
let mut current_range: Option<(u64, u64)> = None;
|
||||
|
||||
for (line_idx, line) in lines.iter().enumerate().skip(skip) {
|
||||
if line_idx < *last_line || line.trim().is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
match Self::line_to_evm_block(line) {
|
||||
Ok((parsed_block, height)) => {
|
||||
if height >= options.start_height {
|
||||
last_height = last_height.max(height);
|
||||
if !options.only_load_ranges {
|
||||
new_blocks.push(parsed_block);
|
||||
}
|
||||
*last_line = line_idx;
|
||||
}
|
||||
|
||||
match current_range {
|
||||
Some((start, end)) if end + 1 == height => {
|
||||
current_range = Some((start, height))
|
||||
}
|
||||
_ => {
|
||||
if let Some((start, end)) = current_range.take() {
|
||||
block_ranges.push(start..=end);
|
||||
}
|
||||
current_range = Some((height, height));
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(_) => warn!("Failed to parse line: {}...", line.get(0..50).unwrap_or(line)),
|
||||
}
|
||||
}
|
||||
|
||||
if let Some((start, end)) = current_range {
|
||||
block_ranges.push(start..=end);
|
||||
}
|
||||
ScanResult {
|
||||
path: path.to_path_buf(),
|
||||
next_expected_height: last_height + 1,
|
||||
new_blocks,
|
||||
new_block_ranges: block_ranges,
|
||||
}
|
||||
}
|
||||
}
|
||||
214
src/pseudo_peer/sources/hl_node/tests.rs
Normal file
214
src/pseudo_peer/sources/hl_node/tests.rs
Normal file
@ -0,0 +1,214 @@
|
||||
use super::*;
|
||||
use crate::{
|
||||
node::types::{reth_compat, ReadPrecompileCalls},
|
||||
pseudo_peer::sources::{hl_node::scan::LocalBlockAndReceipts, LocalBlockSource},
|
||||
};
|
||||
use alloy_consensus::{BlockBody, Header};
|
||||
use alloy_primitives::{Address, Bloom, Bytes, B256, B64, U256};
|
||||
use std::{io::Write, time::Duration};
|
||||
|
||||
const DEFAULT_FALLBACK_THRESHOLD_FOR_TEST: Duration = Duration::from_millis(5000);
|
||||
|
||||
#[test]
|
||||
fn test_datetime_from_path() {
|
||||
let path = Path::new("/home/username/hl/data/evm_block_and_receipts/hourly/20250731/4");
|
||||
let dt = TimeUtils::datetime_from_path(path).unwrap();
|
||||
println!("{dt:?}");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_backfill() {
|
||||
let test_path = Path::new("/root/evm_block_and_receipts");
|
||||
if !test_path.exists() {
|
||||
return;
|
||||
}
|
||||
|
||||
let cache = Arc::new(Mutex::new(LocalBlocksCache::new(CACHE_SIZE)));
|
||||
HlNodeBlockSource::try_backfill_local_blocks(test_path, &cache, 1000000).await.unwrap();
|
||||
|
||||
let u_cache = cache.lock().await;
|
||||
assert_eq!(
|
||||
u_cache.get_path_for_height(9735058),
|
||||
Some(test_path.join(HOURLY_SUBDIR).join("20250729").join("22"))
|
||||
);
|
||||
}
|
||||
|
||||
fn scan_result_from_single_block(block: BlockAndReceipts) -> scan::ScanResult {
|
||||
use crate::node::types::EvmBlock;
|
||||
let height = match &block.block {
|
||||
EvmBlock::Reth115(b) => b.header.header.number,
|
||||
};
|
||||
scan::ScanResult {
|
||||
path: PathBuf::from("/nonexistent-block"),
|
||||
next_expected_height: height + 1,
|
||||
new_blocks: vec![block],
|
||||
new_block_ranges: vec![height..=height],
|
||||
}
|
||||
}
|
||||
|
||||
fn empty_block(number: u64, timestamp: u64, extra_data: &'static [u8]) -> LocalBlockAndReceipts {
|
||||
use crate::node::types::EvmBlock;
|
||||
LocalBlockAndReceipts(
|
||||
timestamp.to_string(),
|
||||
BlockAndReceipts {
|
||||
block: EvmBlock::Reth115(reth_compat::SealedBlock {
|
||||
header: reth_compat::SealedHeader {
|
||||
header: Header {
|
||||
parent_hash: B256::ZERO,
|
||||
ommers_hash: B256::ZERO,
|
||||
beneficiary: Address::ZERO,
|
||||
state_root: B256::ZERO,
|
||||
transactions_root: B256::ZERO,
|
||||
receipts_root: B256::ZERO,
|
||||
logs_bloom: Bloom::ZERO,
|
||||
difficulty: U256::ZERO,
|
||||
number,
|
||||
gas_limit: 0,
|
||||
gas_used: 0,
|
||||
timestamp,
|
||||
extra_data: Bytes::from_static(extra_data),
|
||||
mix_hash: B256::ZERO,
|
||||
nonce: B64::ZERO,
|
||||
base_fee_per_gas: None,
|
||||
withdrawals_root: None,
|
||||
blob_gas_used: None,
|
||||
excess_blob_gas: None,
|
||||
parent_beacon_block_root: None,
|
||||
requests_hash: None,
|
||||
},
|
||||
hash: B256::ZERO,
|
||||
},
|
||||
body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None },
|
||||
}),
|
||||
receipts: vec![],
|
||||
system_txs: vec![],
|
||||
read_precompile_calls: ReadPrecompileCalls(vec![]),
|
||||
highest_precompile_address: None,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn setup_temp_dir_and_file() -> eyre::Result<(tempfile::TempDir, std::fs::File)> {
|
||||
let now = OffsetDateTime::now_utc();
|
||||
let temp_dir = tempfile::tempdir()?;
|
||||
let path = temp_dir
|
||||
.path()
|
||||
.join(HOURLY_SUBDIR)
|
||||
.join(TimeUtils::date_from_datetime(now))
|
||||
.join(format!("{}", now.hour()));
|
||||
std::fs::create_dir_all(path.parent().unwrap())?;
|
||||
Ok((temp_dir, std::fs::File::create(path)?))
|
||||
}
|
||||
|
||||
struct BlockSourceHierarchy {
|
||||
block_source: HlNodeBlockSource,
|
||||
_temp_dir: tempfile::TempDir,
|
||||
file1: std::fs::File,
|
||||
current_block: LocalBlockAndReceipts,
|
||||
future_block_hl_node: LocalBlockAndReceipts,
|
||||
future_block_fallback: LocalBlockAndReceipts,
|
||||
}
|
||||
|
||||
async fn setup_block_source_hierarchy() -> eyre::Result<BlockSourceHierarchy> {
|
||||
// Setup fallback block source
|
||||
let block_source_fallback = HlNodeBlockSource::new(
|
||||
BlockSourceBoxed::new(Box::new(LocalBlockSource::new("/nonexistent"))),
|
||||
HlNodeBlockSourceArgs {
|
||||
root: { PathBuf::from("/nonexistent") },
|
||||
fallback_threshold: DEFAULT_FALLBACK_THRESHOLD_FOR_TEST,
|
||||
},
|
||||
1000000,
|
||||
)
|
||||
.await;
|
||||
let block_hl_node_0 = empty_block(1000000, 1722633600, b"hl-node");
|
||||
let block_hl_node_1 = empty_block(1000001, 1722633600, b"hl-node");
|
||||
let block_fallback_1 = empty_block(1000001, 1722633600, b"fallback");
|
||||
|
||||
let (temp_dir1, mut file1) = setup_temp_dir_and_file()?;
|
||||
writeln!(&mut file1, "{}", serde_json::to_string(&block_hl_node_0)?)?;
|
||||
|
||||
let block_source = HlNodeBlockSource::new(
|
||||
BlockSourceBoxed::new(Box::new(block_source_fallback.clone())),
|
||||
HlNodeBlockSourceArgs {
|
||||
root: temp_dir1.path().to_path_buf(),
|
||||
fallback_threshold: DEFAULT_FALLBACK_THRESHOLD_FOR_TEST,
|
||||
},
|
||||
1000000,
|
||||
)
|
||||
.await;
|
||||
|
||||
block_source_fallback
|
||||
.local_blocks_cache
|
||||
.lock()
|
||||
.await
|
||||
.load_scan_result(scan_result_from_single_block(block_fallback_1.1.clone()));
|
||||
|
||||
Ok(BlockSourceHierarchy {
|
||||
block_source,
|
||||
_temp_dir: temp_dir1,
|
||||
file1,
|
||||
current_block: block_hl_node_0,
|
||||
future_block_hl_node: block_hl_node_1,
|
||||
future_block_fallback: block_fallback_1,
|
||||
})
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_last_fetch_no_fallback() -> eyre::Result<()> {
|
||||
let hierarchy = setup_block_source_hierarchy().await?;
|
||||
let BlockSourceHierarchy {
|
||||
block_source, current_block, future_block_hl_node, mut file1, ..
|
||||
} = hierarchy;
|
||||
|
||||
let block = block_source.collect_block(1000000).await.unwrap();
|
||||
assert_eq!(block, current_block.1);
|
||||
|
||||
let block = block_source.collect_block(1000001).await;
|
||||
assert!(block.is_err());
|
||||
|
||||
writeln!(&mut file1, "{}", serde_json::to_string(&future_block_hl_node)?)?;
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
let block = block_source.collect_block(1000001).await.unwrap();
|
||||
assert_eq!(block, future_block_hl_node.1);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_update_last_fetch_fallback() -> eyre::Result<()> {
|
||||
let hierarchy = setup_block_source_hierarchy().await?;
|
||||
let BlockSourceHierarchy {
|
||||
block_source, current_block, future_block_fallback, mut file1, ..
|
||||
} = hierarchy;
|
||||
|
||||
let block = block_source.collect_block(1000000).await.unwrap();
|
||||
assert_eq!(block, current_block.1);
|
||||
|
||||
tokio::time::sleep(DEFAULT_FALLBACK_THRESHOLD_FOR_TEST).await;
|
||||
|
||||
writeln!(&mut file1, "{}", serde_json::to_string(&future_block_fallback)?)?;
|
||||
let block = block_source.collect_block(1000001).await.unwrap();
|
||||
assert_eq!(block, future_block_fallback.1);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hourly_files_sort() -> eyre::Result<()> {
|
||||
let temp_dir = tempfile::tempdir()?;
|
||||
// create 20250826/9, 20250826/14
|
||||
let targets = [("20250826", "9"), ("20250826", "14")];
|
||||
for (date, hour) in targets {
|
||||
let hourly_file = temp_dir.path().join(HOURLY_SUBDIR).join(date).join(hour);
|
||||
let parent = hourly_file.parent().unwrap();
|
||||
std::fs::create_dir_all(parent)?;
|
||||
std::fs::File::create(hourly_file)?;
|
||||
}
|
||||
let files = FileOperations::all_hourly_files(temp_dir.path()).unwrap();
|
||||
let file_names: Vec<_> =
|
||||
files.into_iter().map(|p| p.file_name().unwrap().to_string_lossy().into_owned()).collect();
|
||||
|
||||
assert_eq!(file_names, ["9", "14"]);
|
||||
Ok(())
|
||||
}
|
||||
19
src/pseudo_peer/sources/hl_node/time_utils.rs
Normal file
19
src/pseudo_peer/sources/hl_node/time_utils.rs
Normal file
@ -0,0 +1,19 @@
|
||||
use std::path::Path;
|
||||
use time::{macros::format_description, Date, OffsetDateTime, Time};
|
||||
|
||||
pub struct TimeUtils;
|
||||
|
||||
impl TimeUtils {
|
||||
pub fn datetime_from_path(path: &Path) -> Option<OffsetDateTime> {
|
||||
let (dt_part, hour_part) =
|
||||
(path.parent()?.file_name()?.to_str()?, path.file_name()?.to_str()?);
|
||||
Some(OffsetDateTime::new_utc(
|
||||
Date::parse(dt_part, &format_description!("[year][month][day]")).ok()?,
|
||||
Time::from_hms(hour_part.parse().ok()?, 0, 0).ok()?,
|
||||
))
|
||||
}
|
||||
|
||||
pub fn date_from_datetime(dt: OffsetDateTime) -> String {
|
||||
dt.format(&format_description!("[year][month][day]")).unwrap()
|
||||
}
|
||||
}
|
||||
64
src/pseudo_peer/sources/local.rs
Normal file
64
src/pseudo_peer/sources/local.rs
Normal file
@ -0,0 +1,64 @@
|
||||
use super::{utils, BlockSource};
|
||||
use crate::node::types::BlockAndReceipts;
|
||||
use eyre::Context;
|
||||
use futures::{future::BoxFuture, FutureExt};
|
||||
use std::path::PathBuf;
|
||||
use tracing::info;
|
||||
|
||||
/// Block source that reads blocks from local filesystem (--ingest-dir)
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LocalBlockSource {
|
||||
dir: PathBuf,
|
||||
}
|
||||
|
||||
impl LocalBlockSource {
|
||||
pub fn new(dir: impl Into<PathBuf>) -> Self {
|
||||
Self { dir: dir.into() }
|
||||
}
|
||||
|
||||
async fn pick_path_with_highest_number(dir: PathBuf, is_dir: bool) -> Option<(u64, String)> {
|
||||
let files = std::fs::read_dir(&dir).unwrap().collect::<Vec<_>>();
|
||||
let files = files
|
||||
.into_iter()
|
||||
.filter(|path| path.as_ref().unwrap().path().is_dir() == is_dir)
|
||||
.map(|entry| entry.unwrap().path().to_string_lossy().to_string())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
utils::name_with_largest_number(&files, is_dir)
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockSource for LocalBlockSource {
|
||||
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
|
||||
let dir = self.dir.clone();
|
||||
async move {
|
||||
let path = dir.join(utils::rmp_path(height));
|
||||
let file = tokio::fs::read(&path)
|
||||
.await
|
||||
.wrap_err_with(|| format!("Failed to read block from {path:?}"))?;
|
||||
let mut decoder = lz4_flex::frame::FrameDecoder::new(&file[..]);
|
||||
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;
|
||||
Ok(blocks[0].clone())
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
|
||||
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>> {
|
||||
let dir = self.dir.clone();
|
||||
async move {
|
||||
let (_, first_level) = Self::pick_path_with_highest_number(dir.clone(), true).await?;
|
||||
let (_, second_level) =
|
||||
Self::pick_path_with_highest_number(dir.join(first_level), true).await?;
|
||||
let (block_number, third_level) =
|
||||
Self::pick_path_with_highest_number(dir.join(second_level), false).await?;
|
||||
|
||||
info!("Latest block number: {} with path {}", block_number, third_level);
|
||||
Some(block_number)
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
|
||||
fn recommended_chunk_size(&self) -> u64 {
|
||||
1000
|
||||
}
|
||||
}
|
||||
@ -1,269 +1,40 @@
|
||||
use crate::node::types::BlockAndReceipts;
|
||||
use aws_sdk_s3::types::RequestPayer;
|
||||
use eyre::Context;
|
||||
use futures::{future::BoxFuture, FutureExt};
|
||||
use reth_network::cache::LruMap;
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
sync::{Arc, RwLock},
|
||||
};
|
||||
use tracing::info;
|
||||
use auto_impl::auto_impl;
|
||||
use futures::future::BoxFuture;
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
// Module declarations
|
||||
mod cached;
|
||||
mod hl_node;
|
||||
pub use hl_node::HlNodeBlockSource;
|
||||
mod local;
|
||||
mod s3;
|
||||
mod utils;
|
||||
|
||||
// Public exports
|
||||
pub use cached::CachedBlockSource;
|
||||
pub use hl_node::{HlNodeBlockSource, HlNodeBlockSourceArgs};
|
||||
pub use local::LocalBlockSource;
|
||||
pub use s3::S3BlockSource;
|
||||
|
||||
const DEFAULT_POLLING_INTERVAL: Duration = Duration::from_millis(25);
|
||||
|
||||
/// Trait for block sources that can retrieve blocks from various sources
|
||||
#[auto_impl(&, &mut, Box, Arc)]
|
||||
pub trait BlockSource: Send + Sync + std::fmt::Debug + Unpin + 'static {
|
||||
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>>;
|
||||
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>>;
|
||||
/// Retrieves a block at the specified height
|
||||
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>>;
|
||||
|
||||
/// Finds the latest block number available from this source
|
||||
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>>;
|
||||
|
||||
/// Returns the recommended chunk size for batch operations
|
||||
fn recommended_chunk_size(&self) -> u64;
|
||||
|
||||
/// Returns the polling interval
|
||||
fn polling_interval(&self) -> Duration {
|
||||
DEFAULT_POLLING_INTERVAL
|
||||
}
|
||||
}
|
||||
|
||||
/// Type alias for a boxed block source
|
||||
pub type BlockSourceBoxed = Arc<Box<dyn BlockSource>>;
|
||||
|
||||
fn name_with_largest_number(files: &[String], is_dir: bool) -> Option<(u64, String)> {
|
||||
let mut files = files
|
||||
.iter()
|
||||
.filter_map(|file_raw| {
|
||||
let file = file_raw.strip_suffix("/").unwrap_or(file_raw).split("/").last().unwrap();
|
||||
let stem = if is_dir { file } else { file.strip_suffix(".rmp.lz4")? };
|
||||
stem.parse::<u64>().ok().map(|number| (number, file_raw.to_string()))
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
if files.is_empty() {
|
||||
return None;
|
||||
}
|
||||
files.sort_by_key(|(number, _)| *number);
|
||||
files.last().cloned()
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct S3BlockSource {
|
||||
client: aws_sdk_s3::Client,
|
||||
bucket: String,
|
||||
}
|
||||
|
||||
impl S3BlockSource {
|
||||
pub fn new(client: aws_sdk_s3::Client, bucket: String) -> Self {
|
||||
Self { client, bucket }
|
||||
}
|
||||
|
||||
async fn pick_path_with_highest_number(
|
||||
client: aws_sdk_s3::Client,
|
||||
bucket: String,
|
||||
dir: String,
|
||||
is_dir: bool,
|
||||
) -> Option<(u64, String)> {
|
||||
let request = client
|
||||
.list_objects()
|
||||
.bucket(&bucket)
|
||||
.prefix(dir)
|
||||
.delimiter("/")
|
||||
.request_payer(RequestPayer::Requester);
|
||||
let response = request.send().await.ok()?;
|
||||
let files: Vec<String> = if is_dir {
|
||||
response
|
||||
.common_prefixes
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|object| object.prefix.as_ref().unwrap().to_string())
|
||||
.collect()
|
||||
} else {
|
||||
response
|
||||
.contents
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|object| object.key.as_ref().unwrap().to_string())
|
||||
.collect()
|
||||
};
|
||||
name_with_largest_number(&files, is_dir)
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockSource for S3BlockSource {
|
||||
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
|
||||
let client = self.client.clone();
|
||||
let bucket = self.bucket.clone();
|
||||
async move {
|
||||
let path = rmp_path(height);
|
||||
let request = client
|
||||
.get_object()
|
||||
.request_payer(RequestPayer::Requester)
|
||||
.bucket(&bucket)
|
||||
.key(path);
|
||||
let response = request.send().await?;
|
||||
let bytes = response.body.collect().await?.into_bytes();
|
||||
let mut decoder = lz4_flex::frame::FrameDecoder::new(&bytes[..]);
|
||||
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;
|
||||
Ok(blocks[0].clone())
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
|
||||
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
|
||||
let client = self.client.clone();
|
||||
let bucket = self.bucket.clone();
|
||||
async move {
|
||||
let (_, first_level) = Self::pick_path_with_highest_number(
|
||||
client.clone(),
|
||||
bucket.clone(),
|
||||
"".to_string(),
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
let (_, second_level) = Self::pick_path_with_highest_number(
|
||||
client.clone(),
|
||||
bucket.clone(),
|
||||
first_level,
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
let (block_number, third_level) = Self::pick_path_with_highest_number(
|
||||
client.clone(),
|
||||
bucket.clone(),
|
||||
second_level,
|
||||
false,
|
||||
)
|
||||
.await?;
|
||||
|
||||
info!("Latest block number: {} with path {}", block_number, third_level);
|
||||
Some(block_number)
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
|
||||
fn recommended_chunk_size(&self) -> u64 {
|
||||
1000
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockSource for LocalBlockSource {
|
||||
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
|
||||
let dir = self.dir.clone();
|
||||
async move {
|
||||
let path = dir.join(rmp_path(height));
|
||||
let file = tokio::fs::read(&path)
|
||||
.await
|
||||
.wrap_err_with(|| format!("Failed to read block from {path:?}"))?;
|
||||
let mut decoder = lz4_flex::frame::FrameDecoder::new(&file[..]);
|
||||
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;
|
||||
Ok(blocks[0].clone())
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
|
||||
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
|
||||
let dir = self.dir.clone();
|
||||
async move {
|
||||
let (_, first_level) = Self::pick_path_with_highest_number(dir.clone(), true).await?;
|
||||
let (_, second_level) =
|
||||
Self::pick_path_with_highest_number(dir.join(first_level), true).await?;
|
||||
let (block_number, third_level) =
|
||||
Self::pick_path_with_highest_number(dir.join(second_level), false).await?;
|
||||
|
||||
info!("Latest block number: {} with path {}", block_number, third_level);
|
||||
Some(block_number)
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
|
||||
fn recommended_chunk_size(&self) -> u64 {
|
||||
1000
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LocalBlockSource {
|
||||
dir: PathBuf,
|
||||
}
|
||||
|
||||
impl LocalBlockSource {
|
||||
pub fn new(dir: impl Into<PathBuf>) -> Self {
|
||||
Self { dir: dir.into() }
|
||||
}
|
||||
|
||||
fn name_with_largest_number_static(files: &[String], is_dir: bool) -> Option<(u64, String)> {
|
||||
let mut files = files
|
||||
.iter()
|
||||
.filter_map(|file_raw| {
|
||||
let file = file_raw.strip_suffix("/").unwrap_or(file_raw);
|
||||
let file = file.split("/").last().unwrap();
|
||||
let stem = if is_dir { file } else { file.strip_suffix(".rmp.lz4")? };
|
||||
stem.parse::<u64>().ok().map(|number| (number, file_raw.to_string()))
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
if files.is_empty() {
|
||||
return None;
|
||||
}
|
||||
files.sort_by_key(|(number, _)| *number);
|
||||
files.last().map(|(number, file)| (*number, file.to_string()))
|
||||
}
|
||||
|
||||
async fn pick_path_with_highest_number(dir: PathBuf, is_dir: bool) -> Option<(u64, String)> {
|
||||
let files = std::fs::read_dir(&dir).unwrap().collect::<Vec<_>>();
|
||||
let files = files
|
||||
.into_iter()
|
||||
.filter(|path| path.as_ref().unwrap().path().is_dir() == is_dir)
|
||||
.map(|entry| entry.unwrap().path().to_string_lossy().to_string())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Self::name_with_largest_number_static(&files, is_dir)
|
||||
}
|
||||
}
|
||||
|
||||
fn rmp_path(height: u64) -> String {
|
||||
let f = ((height - 1) / 1_000_000) * 1_000_000;
|
||||
let s = ((height - 1) / 1_000) * 1_000;
|
||||
let path = format!("{f}/{s}/{height}.rmp.lz4");
|
||||
path
|
||||
}
|
||||
|
||||
impl BlockSource for BlockSourceBoxed {
|
||||
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
|
||||
self.as_ref().collect_block(height)
|
||||
}
|
||||
|
||||
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
|
||||
self.as_ref().find_latest_block_number()
|
||||
}
|
||||
|
||||
fn recommended_chunk_size(&self) -> u64 {
|
||||
self.as_ref().recommended_chunk_size()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CachedBlockSource {
|
||||
block_source: BlockSourceBoxed,
|
||||
cache: Arc<RwLock<LruMap<u64, BlockAndReceipts>>>,
|
||||
}
|
||||
|
||||
impl CachedBlockSource {
|
||||
const CACHE_LIMIT: u32 = 100000;
|
||||
pub fn new(block_source: BlockSourceBoxed) -> Self {
|
||||
Self { block_source, cache: Arc::new(RwLock::new(LruMap::new(Self::CACHE_LIMIT))) }
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockSource for CachedBlockSource {
|
||||
fn collect_block(&self, height: u64) -> BoxFuture<eyre::Result<BlockAndReceipts>> {
|
||||
let block_source = self.block_source.clone();
|
||||
let cache = self.cache.clone();
|
||||
async move {
|
||||
if let Some(block) = cache.write().unwrap().get(&height) {
|
||||
return Ok(block.clone());
|
||||
}
|
||||
let block = block_source.collect_block(height).await?;
|
||||
cache.write().unwrap().insert(height, block.clone());
|
||||
Ok(block)
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
|
||||
fn find_latest_block_number(&self) -> BoxFuture<Option<u64>> {
|
||||
self.block_source.find_latest_block_number()
|
||||
}
|
||||
|
||||
fn recommended_chunk_size(&self) -> u64 {
|
||||
self.block_source.recommended_chunk_size()
|
||||
}
|
||||
}
|
||||
|
||||
95
src/pseudo_peer/sources/s3.rs
Normal file
95
src/pseudo_peer/sources/s3.rs
Normal file
@ -0,0 +1,95 @@
|
||||
use super::{utils, BlockSource};
|
||||
use crate::node::types::BlockAndReceipts;
|
||||
use aws_sdk_s3::types::RequestPayer;
|
||||
use futures::{future::BoxFuture, FutureExt};
|
||||
use std::{sync::Arc, time::Duration};
|
||||
use tracing::info;
|
||||
|
||||
/// Block source that reads blocks from S3 (--s3)
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct S3BlockSource {
|
||||
client: Arc<aws_sdk_s3::Client>,
|
||||
bucket: String,
|
||||
polling_interval: Duration,
|
||||
}
|
||||
|
||||
impl S3BlockSource {
|
||||
pub fn new(client: aws_sdk_s3::Client, bucket: String, polling_interval: Duration) -> Self {
|
||||
Self { client: client.into(), bucket, polling_interval }
|
||||
}
|
||||
|
||||
async fn pick_path_with_highest_number(
|
||||
client: &aws_sdk_s3::Client,
|
||||
bucket: &str,
|
||||
dir: &str,
|
||||
is_dir: bool,
|
||||
) -> Option<(u64, String)> {
|
||||
let request = client
|
||||
.list_objects()
|
||||
.bucket(bucket)
|
||||
.prefix(dir)
|
||||
.delimiter("/")
|
||||
.request_payer(RequestPayer::Requester);
|
||||
let response = request.send().await.ok()?;
|
||||
let files: Vec<String> = if is_dir {
|
||||
response
|
||||
.common_prefixes?
|
||||
.iter()
|
||||
.map(|object| object.prefix.as_ref().unwrap().to_string())
|
||||
.collect()
|
||||
} else {
|
||||
response
|
||||
.contents?
|
||||
.iter()
|
||||
.map(|object| object.key.as_ref().unwrap().to_string())
|
||||
.collect()
|
||||
};
|
||||
utils::name_with_largest_number(&files, is_dir)
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockSource for S3BlockSource {
|
||||
fn collect_block(&self, height: u64) -> BoxFuture<'static, eyre::Result<BlockAndReceipts>> {
|
||||
let client = self.client.clone();
|
||||
let bucket = self.bucket.clone();
|
||||
async move {
|
||||
let path = utils::rmp_path(height);
|
||||
let request = client
|
||||
.get_object()
|
||||
.request_payer(RequestPayer::Requester)
|
||||
.bucket(&bucket)
|
||||
.key(path);
|
||||
let response = request.send().await?;
|
||||
let bytes = response.body.collect().await?.into_bytes();
|
||||
let mut decoder = lz4_flex::frame::FrameDecoder::new(&bytes[..]);
|
||||
let blocks: Vec<BlockAndReceipts> = rmp_serde::from_read(&mut decoder)?;
|
||||
Ok(blocks[0].clone())
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
|
||||
fn find_latest_block_number(&self) -> BoxFuture<'static, Option<u64>> {
|
||||
let client = self.client.clone();
|
||||
let bucket = self.bucket.clone();
|
||||
async move {
|
||||
let (_, first_level) =
|
||||
Self::pick_path_with_highest_number(&client, &bucket, "", true).await?;
|
||||
let (_, second_level) =
|
||||
Self::pick_path_with_highest_number(&client, &bucket, &first_level, true).await?;
|
||||
let (block_number, third_level) =
|
||||
Self::pick_path_with_highest_number(&client, &bucket, &second_level, false).await?;
|
||||
|
||||
info!("Latest block number: {} with path {}", block_number, third_level);
|
||||
Some(block_number)
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
|
||||
fn recommended_chunk_size(&self) -> u64 {
|
||||
1000
|
||||
}
|
||||
|
||||
fn polling_interval(&self) -> Duration {
|
||||
self.polling_interval
|
||||
}
|
||||
}
|
||||
26
src/pseudo_peer/sources/utils.rs
Normal file
26
src/pseudo_peer/sources/utils.rs
Normal file
@ -0,0 +1,26 @@
|
||||
//! Shared utilities for block sources
|
||||
|
||||
/// Finds the file/directory with the largest number in its name from a list of files
|
||||
pub fn name_with_largest_number(files: &[String], is_dir: bool) -> Option<(u64, String)> {
|
||||
let mut files = files
|
||||
.iter()
|
||||
.filter_map(|file_raw| {
|
||||
let file = file_raw.strip_suffix("/").unwrap_or(file_raw);
|
||||
let file = file.split("/").last().unwrap();
|
||||
let stem = if is_dir { file } else { file.strip_suffix(".rmp.lz4")? };
|
||||
stem.parse::<u64>().ok().map(|number| (number, file_raw.to_string()))
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
if files.is_empty() {
|
||||
return None;
|
||||
}
|
||||
files.sort_by_key(|(number, _)| *number);
|
||||
files.last().cloned()
|
||||
}
|
||||
|
||||
/// Generates the RMP file path for a given block height
|
||||
pub fn rmp_path(height: u64) -> String {
|
||||
let f = ((height - 1) / 1_000_000) * 1_000_000;
|
||||
let s = ((height - 1) / 1_000) * 1_000;
|
||||
format!("{f}/{s}/{height}.rmp.lz4")
|
||||
}
|
||||
@ -1,30 +0,0 @@
|
||||
use std::path::Path;
|
||||
|
||||
use crate::pseudo_peer::{prelude::*, BlockSourceType};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_block_source_config_s3() {
|
||||
let config = BlockSourceConfig::s3("test-bucket".to_string()).await;
|
||||
assert!(
|
||||
matches!(config.source_type, BlockSourceType::S3 { bucket } if bucket == "test-bucket")
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_block_source_config_local() {
|
||||
let config = BlockSourceConfig::local("/test/path".into());
|
||||
assert!(
|
||||
matches!(config.source_type, BlockSourceType::Local { path } if path == Path::new("/test/path"))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_types() {
|
||||
let io_error = std::io::Error::new(std::io::ErrorKind::NotFound, "File not found");
|
||||
let benchmark_error: PseudoPeerError = io_error.into();
|
||||
|
||||
match benchmark_error {
|
||||
PseudoPeerError::Io(_) => (),
|
||||
_ => panic!("Expected Io error"),
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user