mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
remove: Reduce unnecessary LoC
This commit is contained in:
@ -7,7 +7,6 @@ use std::sync::LazyLock;
|
|||||||
static GENESIS_HASH: B256 =
|
static GENESIS_HASH: B256 =
|
||||||
b256!("d8fcc13b6a195b88b7b2da3722ff6cad767b13a8c1e9ffb1c73aa9d216d895f0");
|
b256!("d8fcc13b6a195b88b7b2da3722ff6cad767b13a8c1e9ffb1c73aa9d216d895f0");
|
||||||
|
|
||||||
/// Dev hardforks
|
|
||||||
pub static HL_HARDFORKS: LazyLock<ChainHardforks> = LazyLock::new(|| {
|
pub static HL_HARDFORKS: LazyLock<ChainHardforks> = LazyLock::new(|| {
|
||||||
ChainHardforks::new(vec![
|
ChainHardforks::new(vec![
|
||||||
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),
|
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),
|
||||||
|
|||||||
@ -1,8 +1,7 @@
|
|||||||
//! Chain specification for HyperEVM.
|
|
||||||
pub mod hl;
|
pub mod hl;
|
||||||
pub mod parser;
|
pub mod parser;
|
||||||
|
|
||||||
use crate::hardforks::{hl::HlHardfork, HlHardforks};
|
use crate::hardforks::HlHardforks;
|
||||||
use alloy_consensus::Header;
|
use alloy_consensus::Header;
|
||||||
use alloy_eips::eip7840::BlobParams;
|
use alloy_eips::eip7840::BlobParams;
|
||||||
use alloy_genesis::Genesis;
|
use alloy_genesis::Genesis;
|
||||||
@ -13,15 +12,13 @@ use reth_chainspec::{
|
|||||||
};
|
};
|
||||||
use reth_discv4::NodeRecord;
|
use reth_discv4::NodeRecord;
|
||||||
use reth_evm::eth::spec::EthExecutorSpec;
|
use reth_evm::eth::spec::EthExecutorSpec;
|
||||||
use std::{fmt::Display, sync::Arc};
|
use std::fmt::Display;
|
||||||
|
|
||||||
pub const MAINNET_CHAIN_ID: u64 = 999;
|
pub const MAINNET_CHAIN_ID: u64 = 999;
|
||||||
pub const TESTNET_CHAIN_ID: u64 = 998;
|
pub const TESTNET_CHAIN_ID: u64 = 998;
|
||||||
|
|
||||||
/// Hl chain spec type.
|
|
||||||
#[derive(Debug, Default, Clone, PartialEq, Eq)]
|
#[derive(Debug, Default, Clone, PartialEq, Eq)]
|
||||||
pub struct HlChainSpec {
|
pub struct HlChainSpec {
|
||||||
/// [`ChainSpec`].
|
|
||||||
pub inner: ChainSpec,
|
pub inner: ChainSpec,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -117,11 +114,7 @@ impl EthereumHardforks for HlChainSpec {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HlHardforks for HlChainSpec {
|
impl HlHardforks for HlChainSpec {}
|
||||||
fn hl_fork_activation(&self, fork: HlHardfork) -> ForkCondition {
|
|
||||||
self.fork(fork)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EthExecutorSpec for HlChainSpec {
|
impl EthExecutorSpec for HlChainSpec {
|
||||||
fn deposit_contract_address(&self) -> Option<Address> {
|
fn deposit_contract_address(&self) -> Option<Address> {
|
||||||
@ -135,12 +128,6 @@ impl From<HlChainSpec> for ChainSpec {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HlHardforks for Arc<HlChainSpec> {
|
|
||||||
fn hl_fork_activation(&self, fork: HlHardfork) -> ForkCondition {
|
|
||||||
self.as_ref().hl_fork_activation(fork)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HlChainSpec {
|
impl HlChainSpec {
|
||||||
pub const MAINNET_RPC_URL: &str = "https://rpc.hyperliquid.xyz/evm";
|
pub const MAINNET_RPC_URL: &str = "https://rpc.hyperliquid.xyz/evm";
|
||||||
pub const TESTNET_RPC_URL: &str = "https://rpc.hyperliquid-testnet.xyz/evm";
|
pub const TESTNET_RPC_URL: &str = "https://rpc.hyperliquid-testnet.xyz/evm";
|
||||||
|
|||||||
@ -1 +0,0 @@
|
|||||||
|
|
||||||
@ -1,4 +1,3 @@
|
|||||||
pub mod api;
|
pub mod api;
|
||||||
mod handler;
|
|
||||||
pub mod spec;
|
pub mod spec;
|
||||||
pub mod transaction;
|
pub mod transaction;
|
||||||
|
|||||||
@ -13,88 +13,5 @@ hardfork!(
|
|||||||
HlHardfork {
|
HlHardfork {
|
||||||
/// Initial version
|
/// Initial version
|
||||||
V1,
|
V1,
|
||||||
/// block.number bugfix
|
|
||||||
V2,
|
|
||||||
/// gas mismatch bugfix
|
|
||||||
V3,
|
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
impl HlHardfork {
|
|
||||||
/// Retrieves the activation block for the specified hardfork on the given chain.
|
|
||||||
pub fn activation_block<H: Hardfork>(self, fork: H, chain: Chain) -> Option<u64> {
|
|
||||||
if chain == Chain::from_named(NamedChain::Hyperliquid) {
|
|
||||||
return Self::hl_mainnet_activation_block(fork);
|
|
||||||
}
|
|
||||||
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Retrieves the activation timestamp for the specified hardfork on the given chain.
|
|
||||||
pub fn activation_timestamp<H: Hardfork>(self, fork: H, chain: Chain) -> Option<u64> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Retrieves the activation block for the specified hardfork on the HyperLiquid mainnet.
|
|
||||||
pub fn hl_mainnet_activation_block<H: Hardfork>(fork: H) -> Option<u64> {
|
|
||||||
match_hardfork(
|
|
||||||
fork,
|
|
||||||
|fork| match fork {
|
|
||||||
EthereumHardfork::Frontier |
|
|
||||||
EthereumHardfork::Homestead |
|
|
||||||
EthereumHardfork::Tangerine |
|
|
||||||
EthereumHardfork::SpuriousDragon |
|
|
||||||
EthereumHardfork::Byzantium |
|
|
||||||
EthereumHardfork::Constantinople |
|
|
||||||
EthereumHardfork::Petersburg |
|
|
||||||
EthereumHardfork::Istanbul |
|
|
||||||
EthereumHardfork::MuirGlacier |
|
|
||||||
EthereumHardfork::Berlin |
|
|
||||||
EthereumHardfork::London |
|
|
||||||
EthereumHardfork::Shanghai |
|
|
||||||
EthereumHardfork::Cancun => Some(0),
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
|fork| match fork {
|
|
||||||
Self::V1 | Self::V2 | Self::V3 => Some(0),
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Hl mainnet list of hardforks.
|
|
||||||
pub fn hl_mainnet() -> ChainHardforks {
|
|
||||||
ChainHardforks::new(vec![
|
|
||||||
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::London.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::Shanghai.boxed(), ForkCondition::Block(0)),
|
|
||||||
(EthereumHardfork::Cancun.boxed(), ForkCondition::Block(0)),
|
|
||||||
(Self::V1.boxed(), ForkCondition::Block(0)),
|
|
||||||
(Self::V2.boxed(), ForkCondition::Block(0)),
|
|
||||||
(Self::V3.boxed(), ForkCondition::Block(0)),
|
|
||||||
])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Match helper method since it's not possible to match on `dyn Hardfork`
|
|
||||||
fn match_hardfork<H, HF, HHF>(fork: H, hardfork_fn: HF, hl_hardfork_fn: HHF) -> Option<u64>
|
|
||||||
where
|
|
||||||
H: Hardfork,
|
|
||||||
HF: Fn(&EthereumHardfork) -> Option<u64>,
|
|
||||||
HHF: Fn(&HlHardfork) -> Option<u64>,
|
|
||||||
{
|
|
||||||
let fork: &dyn Any = ⋔
|
|
||||||
if let Some(fork) = fork.downcast_ref::<EthereumHardfork>() {
|
|
||||||
return hardfork_fn(fork);
|
|
||||||
}
|
|
||||||
fork.downcast_ref::<HlHardfork>().and_then(hl_hardfork_fn)
|
|
||||||
}
|
|
||||||
|
|||||||
@ -1,13 +1,14 @@
|
|||||||
//! Hard forks of hl protocol.
|
//! Hard forks of HyperEVM.
|
||||||
#![allow(unused)]
|
#![allow(unused)]
|
||||||
use hl::HlHardfork;
|
|
||||||
use reth_chainspec::{EthereumHardforks, ForkCondition};
|
|
||||||
|
|
||||||
pub mod hl;
|
pub mod hl;
|
||||||
|
|
||||||
|
use hl::HlHardfork;
|
||||||
|
use reth_chainspec::{EthereumHardforks, ForkCondition};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
/// Extends [`EthereumHardforks`] with hl helper methods.
|
/// Extends [`EthereumHardforks`] with hl helper methods.
|
||||||
pub trait HlHardforks: EthereumHardforks {
|
///
|
||||||
/// Retrieves [`ForkCondition`] by an [`HlHardfork`]. If `fork` is not present, returns
|
/// Currently a placeholder for future use.
|
||||||
/// [`ForkCondition::Never`].
|
pub trait HlHardforks: EthereumHardforks {}
|
||||||
fn hl_fork_activation(&self, fork: HlHardfork) -> ForkCondition;
|
|
||||||
}
|
impl<T: HlHardforks> HlHardforks for Arc<T> {}
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
//! Overrides for RPC methods to post-filter system transactions and logs.
|
//! Overrides for RPC methods to post-filter system transactions and logs.
|
||||||
//!
|
//!
|
||||||
//! System transactions are always at the beginning of the block,
|
//! System transactions are always at the beginning of the block,
|
||||||
//! so we can use the transaction index to determine if the log is from a system transaction,
|
//! so we can use the transaction index to determine if the log is from a system transaction,
|
||||||
//! and if it is, we can exclude it.
|
//! and if it is, we can exclude it.
|
||||||
//!
|
//!
|
||||||
@ -18,13 +18,10 @@ use alloy_rpc_types::{
|
|||||||
use jsonrpsee::{proc_macros::rpc, PendingSubscriptionSink, SubscriptionMessage, SubscriptionSink};
|
use jsonrpsee::{proc_macros::rpc, PendingSubscriptionSink, SubscriptionMessage, SubscriptionSink};
|
||||||
use jsonrpsee_core::{async_trait, RpcResult};
|
use jsonrpsee_core::{async_trait, RpcResult};
|
||||||
use jsonrpsee_types::ErrorObject;
|
use jsonrpsee_types::ErrorObject;
|
||||||
use reth::{
|
use reth::{api::FullNodeComponents, builder::rpc::RpcContext, tasks::TaskSpawner};
|
||||||
api::FullNodeComponents, builder::rpc::RpcContext, rpc::result::internal_rpc_err,
|
|
||||||
tasks::TaskSpawner,
|
|
||||||
};
|
|
||||||
use reth_primitives_traits::{BlockBody as _, SignedTransaction};
|
use reth_primitives_traits::{BlockBody as _, SignedTransaction};
|
||||||
use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, ReceiptProvider};
|
use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, ReceiptProvider};
|
||||||
use reth_rpc::{EthFilter, EthPubSub};
|
use reth_rpc::{eth::pubsub::SubscriptionSerializeError, EthFilter, EthPubSub};
|
||||||
use reth_rpc_eth_api::{
|
use reth_rpc_eth_api::{
|
||||||
helpers::{EthBlocks, EthTransactions, LoadReceipt},
|
helpers::{EthBlocks, EthTransactions, LoadReceipt},
|
||||||
transaction::ConvertReceiptInput,
|
transaction::ConvertReceiptInput,
|
||||||
@ -34,12 +31,9 @@ use reth_rpc_eth_api::{
|
|||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use std::{borrow::Cow, marker::PhantomData, sync::Arc};
|
use std::{borrow::Cow, marker::PhantomData, sync::Arc};
|
||||||
use tokio_stream::{Stream, StreamExt};
|
use tokio_stream::{Stream, StreamExt};
|
||||||
use tracing::{info, trace, Instrument};
|
use tracing::{trace, Instrument};
|
||||||
|
|
||||||
use crate::{
|
use crate::{node::primitives::HlPrimitives, HlBlock};
|
||||||
node::primitives::{HlPrimitives, TransactionSigned},
|
|
||||||
HlBlock,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub trait EthWrapper:
|
pub trait EthWrapper:
|
||||||
EthApiServer<
|
EthApiServer<
|
||||||
@ -197,22 +191,6 @@ fn adjust_log<Eth: EthWrapper>(mut log: Log, provider: &Eth::Provider) -> Option
|
|||||||
Some(log)
|
Some(log)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, thiserror::Error)]
|
|
||||||
#[error("Failed to serialize subscription item: {0}")]
|
|
||||||
pub struct SubscriptionSerializeError(#[from] serde_json::Error);
|
|
||||||
|
|
||||||
impl SubscriptionSerializeError {
|
|
||||||
const fn new(err: serde_json::Error) -> Self {
|
|
||||||
Self(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<SubscriptionSerializeError> for ErrorObject<'static> {
|
|
||||||
fn from(value: SubscriptionSerializeError) -> Self {
|
|
||||||
internal_rpc_err(value.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn pipe_from_stream<T: Serialize, St: Stream<Item = T> + Unpin>(
|
async fn pipe_from_stream<T: Serialize, St: Stream<Item = T> + Unpin>(
|
||||||
sink: SubscriptionSink,
|
sink: SubscriptionSink,
|
||||||
mut stream: St,
|
mut stream: St,
|
||||||
@ -223,7 +201,7 @@ async fn pipe_from_stream<T: Serialize, St: Stream<Item = T> + Unpin>(
|
|||||||
maybe_item = stream.next() => {
|
maybe_item = stream.next() => {
|
||||||
let Some(item) = maybe_item else { break Ok(()) };
|
let Some(item) = maybe_item else { break Ok(()) };
|
||||||
let msg = SubscriptionMessage::new(sink.method_name(), sink.subscription_id(), &item)
|
let msg = SubscriptionMessage::new(sink.method_name(), sink.subscription_id(), &item)
|
||||||
.map_err(SubscriptionSerializeError::new)?;
|
.map_err(SubscriptionSerializeError::from)?;
|
||||||
if sink.send(msg).await.is_err() { break Ok(()); }
|
if sink.send(msg).await.is_err() { break Ok(()); }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -274,10 +252,6 @@ macro_rules! engine_span {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_system_tx(tx: &TransactionSigned) -> bool {
|
|
||||||
tx.is_system_transaction()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn adjust_block<Eth: EthWrapper>(
|
fn adjust_block<Eth: EthWrapper>(
|
||||||
recovered_block: &RpcBlock<Eth::NetworkTypes>,
|
recovered_block: &RpcBlock<Eth::NetworkTypes>,
|
||||||
eth_api: &Eth,
|
eth_api: &Eth,
|
||||||
@ -365,7 +339,6 @@ async fn adjust_transaction_receipt<Eth: EthWrapper>(
|
|||||||
Some((_, meta, _)) => {
|
Some((_, meta, _)) => {
|
||||||
// LoadReceipt::block_transaction_receipt loads the block again, so loading blocks again
|
// LoadReceipt::block_transaction_receipt loads the block again, so loading blocks again
|
||||||
// doesn't hurt performance much
|
// doesn't hurt performance much
|
||||||
info!("block hash: {:?}", meta.block_hash);
|
|
||||||
let Some((system_tx_count, block_receipts)) =
|
let Some((system_tx_count, block_receipts)) =
|
||||||
adjust_block_receipts(meta.block_hash.into(), eth_api).await?
|
adjust_block_receipts(meta.block_hash.into(), eth_api).await?
|
||||||
else {
|
else {
|
||||||
@ -377,10 +350,12 @@ async fn adjust_transaction_receipt<Eth: EthWrapper>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This function assumes that `block_id` is already validated by the caller.
|
||||||
fn system_tx_count_for_block<Eth: EthWrapper>(eth_api: &Eth, block_id: BlockId) -> usize {
|
fn system_tx_count_for_block<Eth: EthWrapper>(eth_api: &Eth, block_id: BlockId) -> usize {
|
||||||
let provider = eth_api.provider();
|
let provider = eth_api.provider();
|
||||||
let block = provider.block_by_id(block_id).unwrap().unwrap();
|
let block = provider.block_by_id(block_id).unwrap().unwrap();
|
||||||
let system_tx_count = block.body.transactions().iter().filter(|tx| is_system_tx(tx)).count();
|
let system_tx_count =
|
||||||
|
block.body.transactions().iter().filter(|tx| tx.is_system_transaction()).count();
|
||||||
system_tx_count
|
system_tx_count
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,22 +1,9 @@
|
|||||||
use std::sync::Arc;
|
use crate::{HlBlock, HlPrimitives};
|
||||||
|
|
||||||
use crate::{
|
|
||||||
node::{rpc::engine_api::payload::HlPayloadTypes, HlNode},
|
|
||||||
HlBlock, HlPrimitives,
|
|
||||||
};
|
|
||||||
use alloy_eips::eip7685::Requests;
|
use alloy_eips::eip7685::Requests;
|
||||||
use alloy_primitives::U256;
|
use alloy_primitives::U256;
|
||||||
use reth::{
|
|
||||||
api::FullNodeTypes,
|
|
||||||
builder::{components::PayloadServiceBuilder, BuilderContext},
|
|
||||||
payload::{PayloadBuilderHandle, PayloadServiceCommand},
|
|
||||||
transaction_pool::TransactionPool,
|
|
||||||
};
|
|
||||||
use reth_evm::ConfigureEvm;
|
|
||||||
use reth_payload_primitives::BuiltPayload;
|
use reth_payload_primitives::BuiltPayload;
|
||||||
use reth_primitives::SealedBlock;
|
use reth_primitives::SealedBlock;
|
||||||
use tokio::sync::{broadcast, mpsc};
|
use std::sync::Arc;
|
||||||
use tracing::warn;
|
|
||||||
|
|
||||||
/// Built payload for Hl. This is similar to [`EthBuiltPayload`] but without sidecars as those
|
/// Built payload for Hl. This is similar to [`EthBuiltPayload`] but without sidecars as those
|
||||||
/// included into [`HlBlock`].
|
/// included into [`HlBlock`].
|
||||||
@ -45,73 +32,3 @@ impl BuiltPayload for HlBuiltPayload {
|
|||||||
self.requests.clone()
|
self.requests.clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, Default)]
|
|
||||||
#[non_exhaustive]
|
|
||||||
pub struct HlPayloadServiceBuilder;
|
|
||||||
|
|
||||||
impl<Node, Pool, Evm> PayloadServiceBuilder<Node, Pool, Evm> for HlPayloadServiceBuilder
|
|
||||||
where
|
|
||||||
Node: FullNodeTypes<Types = HlNode>,
|
|
||||||
Pool: TransactionPool,
|
|
||||||
Evm: ConfigureEvm,
|
|
||||||
{
|
|
||||||
async fn spawn_payload_builder_service(
|
|
||||||
self,
|
|
||||||
ctx: &BuilderContext<Node>,
|
|
||||||
_pool: Pool,
|
|
||||||
_evm_config: Evm,
|
|
||||||
) -> eyre::Result<PayloadBuilderHandle<HlPayloadTypes>> {
|
|
||||||
let (tx, mut rx) = mpsc::unbounded_channel();
|
|
||||||
|
|
||||||
ctx.task_executor().spawn_critical("payload builder", async move {
|
|
||||||
let mut subscriptions = Vec::new();
|
|
||||||
|
|
||||||
while let Some(message) = rx.recv().await {
|
|
||||||
match message {
|
|
||||||
PayloadServiceCommand::Subscribe(tx) => {
|
|
||||||
let (events_tx, events_rx) = broadcast::channel(100);
|
|
||||||
// Retain senders to make sure that channels are not getting closed
|
|
||||||
subscriptions.push(events_tx);
|
|
||||||
let _ = tx.send(events_rx);
|
|
||||||
}
|
|
||||||
message => warn!(?message, "Noop payload service received a message"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(PayloadBuilderHandle::new(tx))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// impl From<EthBuiltPayload> for HlBuiltPayload {
|
|
||||||
// fn from(value: EthBuiltPayload) -> Self {
|
|
||||||
// let EthBuiltPayload { id, block, fees, sidecars, requests } = value;
|
|
||||||
// HlBuiltPayload {
|
|
||||||
// id,
|
|
||||||
// block: block.into(),
|
|
||||||
// fees,
|
|
||||||
// requests,
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// pub struct HlPayloadBuilder<Inner> {
|
|
||||||
// inner: Inner,
|
|
||||||
// }
|
|
||||||
|
|
||||||
// impl<Inner> PayloadBuilder for HlPayloadBuilder<Inner>
|
|
||||||
// where
|
|
||||||
// Inner: PayloadBuilder<BuiltPayload = EthBuiltPayload>,
|
|
||||||
// {
|
|
||||||
// type Attributes = Inner::Attributes;
|
|
||||||
// type BuiltPayload = HlBuiltPayload;
|
|
||||||
// type Error = Inner::Error;
|
|
||||||
|
|
||||||
// fn try_build(
|
|
||||||
// &self,
|
|
||||||
// args: BuildArguments<Self::Attributes, Self::BuiltPayload>,
|
|
||||||
// ) -> Result<BuildOutcome<Self::BuiltPayload>, PayloadBuilderError> {
|
|
||||||
// let outcome = self.inner.try_build(args)?;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|||||||
@ -165,7 +165,6 @@ where
|
|||||||
type EVM = HlEvmConfig;
|
type EVM = HlEvmConfig;
|
||||||
|
|
||||||
async fn build_evm(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::EVM> {
|
async fn build_evm(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::EVM> {
|
||||||
let evm_config = HlEvmConfig::hl(ctx.chain_spec());
|
Ok(HlEvmConfig::hl(ctx.chain_spec()))
|
||||||
Ok(evm_config)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -15,12 +15,15 @@ use crate::{
|
|||||||
pseudo_peer::BlockSourceConfig,
|
pseudo_peer::BlockSourceConfig,
|
||||||
};
|
};
|
||||||
use consensus::HlConsensusBuilder;
|
use consensus::HlConsensusBuilder;
|
||||||
use engine::HlPayloadServiceBuilder;
|
|
||||||
use evm::HlExecutorBuilder;
|
use evm::HlExecutorBuilder;
|
||||||
use network::HlNetworkBuilder;
|
use network::HlNetworkBuilder;
|
||||||
use reth::{
|
use reth::{
|
||||||
api::{FullNodeTypes, NodeTypes},
|
api::{FullNodeTypes, NodeTypes},
|
||||||
builder::{components::ComponentsBuilder, rpc::RpcAddOns, Node, NodeAdapter},
|
builder::{
|
||||||
|
components::{ComponentsBuilder, NoopPayloadServiceBuilder},
|
||||||
|
rpc::RpcAddOns,
|
||||||
|
Node, NodeAdapter,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
use reth_engine_primitives::ConsensusEngineHandle;
|
use reth_engine_primitives::ConsensusEngineHandle;
|
||||||
use std::{marker::PhantomData, sync::Arc};
|
use std::{marker::PhantomData, sync::Arc};
|
||||||
@ -65,7 +68,7 @@ impl HlNode {
|
|||||||
) -> ComponentsBuilder<
|
) -> ComponentsBuilder<
|
||||||
Node,
|
Node,
|
||||||
HlPoolBuilder,
|
HlPoolBuilder,
|
||||||
HlPayloadServiceBuilder,
|
NoopPayloadServiceBuilder,
|
||||||
HlNetworkBuilder,
|
HlNetworkBuilder,
|
||||||
HlExecutorBuilder,
|
HlExecutorBuilder,
|
||||||
HlConsensusBuilder,
|
HlConsensusBuilder,
|
||||||
@ -77,7 +80,7 @@ impl HlNode {
|
|||||||
.node_types::<Node>()
|
.node_types::<Node>()
|
||||||
.pool(HlPoolBuilder)
|
.pool(HlPoolBuilder)
|
||||||
.executor(HlExecutorBuilder::default())
|
.executor(HlExecutorBuilder::default())
|
||||||
.payload(HlPayloadServiceBuilder::default())
|
.payload(NoopPayloadServiceBuilder::default())
|
||||||
.network(HlNetworkBuilder {
|
.network(HlNetworkBuilder {
|
||||||
engine_handle_rx: self.engine_handle_rx.clone(),
|
engine_handle_rx: self.engine_handle_rx.clone(),
|
||||||
block_source_config: self.block_source_config.clone(),
|
block_source_config: self.block_source_config.clone(),
|
||||||
@ -100,7 +103,7 @@ where
|
|||||||
type ComponentsBuilder = ComponentsBuilder<
|
type ComponentsBuilder = ComponentsBuilder<
|
||||||
N,
|
N,
|
||||||
HlPoolBuilder,
|
HlPoolBuilder,
|
||||||
HlPayloadServiceBuilder,
|
NoopPayloadServiceBuilder,
|
||||||
HlNetworkBuilder,
|
HlNetworkBuilder,
|
||||||
HlExecutorBuilder,
|
HlExecutorBuilder,
|
||||||
HlConsensusBuilder,
|
HlConsensusBuilder,
|
||||||
|
|||||||
@ -12,7 +12,6 @@ use crate::{
|
|||||||
HlBlock,
|
HlBlock,
|
||||||
};
|
};
|
||||||
use alloy_rlp::{Decodable, Encodable};
|
use alloy_rlp::{Decodable, Encodable};
|
||||||
// use handshake::HlHandshake;
|
|
||||||
use reth::{
|
use reth::{
|
||||||
api::{FullNodeTypes, TxTy},
|
api::{FullNodeTypes, TxTy},
|
||||||
builder::{components::NetworkBuilder, BuilderContext},
|
builder::{components::NetworkBuilder, BuilderContext},
|
||||||
|
|||||||
@ -114,11 +114,6 @@ impl reth_codecs::Compact for TransactionSigned {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn convert_recovered(value: Recovered<TransactionSigned>) -> Recovered<InnerType> {
|
|
||||||
let (tx, signer) = value.into_parts();
|
|
||||||
Recovered::new_unchecked(tx.into_inner(), signer)
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromRecoveredTx<TransactionSigned> for TxEnv {
|
impl FromRecoveredTx<TransactionSigned> for TxEnv {
|
||||||
fn from_recovered_tx(tx: &TransactionSigned, sender: Address) -> Self {
|
fn from_recovered_tx(tx: &TransactionSigned, sender: Address) -> Self {
|
||||||
TxEnv::from_recovered_tx(&tx.inner(), sender)
|
TxEnv::from_recovered_tx(&tx.inner(), sender)
|
||||||
@ -192,20 +187,6 @@ impl SerdeBincodeCompat for TransactionSigned {
|
|||||||
|
|
||||||
pub type BlockBody = alloy_consensus::BlockBody<TransactionSigned>;
|
pub type BlockBody = alloy_consensus::BlockBody<TransactionSigned>;
|
||||||
|
|
||||||
impl From<TransactionSigned> for EthereumTxEnvelope<TxEip4844> {
|
|
||||||
fn from(value: TransactionSigned) -> Self {
|
|
||||||
value.into_inner()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<TransactionSigned> for EthereumTxEnvelope<TxEip4844WithSidecar> {
|
|
||||||
type Error = <InnerType as TryInto<EthereumTxEnvelope<TxEip4844WithSidecar>>>::Error;
|
|
||||||
|
|
||||||
fn try_from(value: TransactionSigned) -> Result<Self, Self::Error> {
|
|
||||||
value.into_inner().try_into()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<TransactionSigned>
|
impl TryFrom<TransactionSigned>
|
||||||
for EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>
|
for EthereumTxEnvelope<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>
|
||||||
{
|
{
|
||||||
|
|||||||
@ -36,7 +36,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Validator for Optimism engine API.
|
/// Validator for HyperEVM engine API.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct HlPayloadValidator {
|
pub struct HlPayloadValidator {
|
||||||
inner: HlExecutionPayloadValidator<HlChainSpec>,
|
inner: HlExecutionPayloadValidator<HlChainSpec>,
|
||||||
@ -123,7 +123,7 @@ where
|
|||||||
return Err(PayloadError::BlockHash {
|
return Err(PayloadError::BlockHash {
|
||||||
execution: sealed_block.hash(),
|
execution: sealed_block.hash(),
|
||||||
consensus: expected_hash,
|
consensus: expected_hash,
|
||||||
})?;
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(sealed_block)
|
Ok(sealed_block)
|
||||||
|
|||||||
@ -1 +0,0 @@
|
|||||||
pub const MAX_CONCURRENCY: usize = 100;
|
|
||||||
@ -1,36 +0,0 @@
|
|||||||
use thiserror::Error;
|
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
|
||||||
pub enum PseudoPeerError {
|
|
||||||
#[error("Block source error: {0}")]
|
|
||||||
BlockSource(String),
|
|
||||||
|
|
||||||
#[error("Network error: {0}")]
|
|
||||||
Network(#[from] reth_network::error::NetworkError),
|
|
||||||
|
|
||||||
#[error("Configuration error: {0}")]
|
|
||||||
Config(String),
|
|
||||||
|
|
||||||
#[error("AWS S3 error: {0}")]
|
|
||||||
S3(#[from] aws_sdk_s3::Error),
|
|
||||||
|
|
||||||
#[error("IO error: {0}")]
|
|
||||||
Io(#[from] std::io::Error),
|
|
||||||
|
|
||||||
#[error("Serialization error: {0}")]
|
|
||||||
Serialization(#[from] rmp_serde::encode::Error),
|
|
||||||
|
|
||||||
#[error("Deserialization error: {0}")]
|
|
||||||
Deserialization(#[from] rmp_serde::decode::Error),
|
|
||||||
|
|
||||||
#[error("Compression error: {0}")]
|
|
||||||
Compression(String),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<eyre::Error> for PseudoPeerError {
|
|
||||||
fn from(err: eyre::Error) -> Self {
|
|
||||||
PseudoPeerError::Config(err.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, PseudoPeerError>;
|
|
||||||
@ -5,33 +5,27 @@
|
|||||||
|
|
||||||
pub mod cli;
|
pub mod cli;
|
||||||
pub mod config;
|
pub mod config;
|
||||||
pub mod consts;
|
|
||||||
pub mod error;
|
|
||||||
pub mod network;
|
pub mod network;
|
||||||
pub mod service;
|
pub mod service;
|
||||||
pub mod sources;
|
pub mod sources;
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests;
|
||||||
pub mod utils;
|
pub mod utils;
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
pub use cli::*;
|
pub use cli::*;
|
||||||
pub use config::*;
|
pub use config::*;
|
||||||
pub use error::*;
|
|
||||||
pub use network::*;
|
pub use network::*;
|
||||||
pub use service::*;
|
pub use service::*;
|
||||||
pub use sources::*;
|
pub use sources::*;
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests;
|
|
||||||
|
|
||||||
use tokio::sync::mpsc;
|
|
||||||
use tracing::info;
|
|
||||||
|
|
||||||
/// Re-export commonly used types
|
/// Re-export commonly used types
|
||||||
pub mod prelude {
|
pub mod prelude {
|
||||||
pub use super::{
|
pub use super::{
|
||||||
config::BlockSourceConfig,
|
config::BlockSourceConfig,
|
||||||
error::{PseudoPeerError, Result},
|
|
||||||
service::{BlockPoller, PseudoPeer},
|
service::{BlockPoller, PseudoPeer},
|
||||||
sources::{BlockSource, CachedBlockSource, LocalBlockSource, S3BlockSource},
|
sources::{BlockSource, CachedBlockSource, LocalBlockSource, S3BlockSource},
|
||||||
};
|
};
|
||||||
|
|||||||
@ -32,16 +32,6 @@ impl Default for NetworkBuilder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl NetworkBuilder {
|
impl NetworkBuilder {
|
||||||
pub fn with_secret(mut self, secret: SecretKey) -> Self {
|
|
||||||
self.secret = secret;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn with_peer_config(mut self, peer_config: PeersConfig) -> Self {
|
|
||||||
self.peer_config = peer_config;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn with_boot_nodes(mut self, boot_nodes: Vec<TrustedPeer>) -> Self {
|
pub fn with_boot_nodes(mut self, boot_nodes: Vec<TrustedPeer>) -> Self {
|
||||||
self.boot_nodes = boot_nodes;
|
self.boot_nodes = boot_nodes;
|
||||||
self
|
self
|
||||||
|
|||||||
@ -24,7 +24,8 @@ fn name_with_largest_number(files: &[String], is_dir: bool) -> Option<(u64, Stri
|
|||||||
let mut files = files
|
let mut files = files
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|file_raw| {
|
.filter_map(|file_raw| {
|
||||||
let file = file_raw.strip_suffix("/").unwrap_or(file_raw).split("/").last().unwrap();
|
let file = file_raw.strip_suffix("/").unwrap_or(file_raw);
|
||||||
|
let file = file.split("/").last().unwrap();
|
||||||
let stem = if is_dir { file } else { file.strip_suffix(".rmp.lz4")? };
|
let stem = if is_dir { file } else { file.strip_suffix(".rmp.lz4")? };
|
||||||
stem.parse::<u64>().ok().map(|number| (number, file_raw.to_string()))
|
stem.parse::<u64>().ok().map(|number| (number, file_raw.to_string()))
|
||||||
})
|
})
|
||||||
@ -181,23 +182,6 @@ impl LocalBlockSource {
|
|||||||
Self { dir: dir.into() }
|
Self { dir: dir.into() }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn name_with_largest_number_static(files: &[String], is_dir: bool) -> Option<(u64, String)> {
|
|
||||||
let mut files = files
|
|
||||||
.iter()
|
|
||||||
.filter_map(|file_raw| {
|
|
||||||
let file = file_raw.strip_suffix("/").unwrap_or(file_raw);
|
|
||||||
let file = file.split("/").last().unwrap();
|
|
||||||
let stem = if is_dir { file } else { file.strip_suffix(".rmp.lz4")? };
|
|
||||||
stem.parse::<u64>().ok().map(|number| (number, file_raw.to_string()))
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
if files.is_empty() {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
files.sort_by_key(|(number, _)| *number);
|
|
||||||
files.last().map(|(number, file)| (*number, file.to_string()))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn pick_path_with_highest_number(dir: PathBuf, is_dir: bool) -> Option<(u64, String)> {
|
async fn pick_path_with_highest_number(dir: PathBuf, is_dir: bool) -> Option<(u64, String)> {
|
||||||
let files = std::fs::read_dir(&dir).unwrap().collect::<Vec<_>>();
|
let files = std::fs::read_dir(&dir).unwrap().collect::<Vec<_>>();
|
||||||
let files = files
|
let files = files
|
||||||
@ -206,7 +190,7 @@ impl LocalBlockSource {
|
|||||||
.map(|entry| entry.unwrap().path().to_string_lossy().to_string())
|
.map(|entry| entry.unwrap().path().to_string_lossy().to_string())
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
Self::name_with_largest_number_static(&files, is_dir)
|
name_with_largest_number(&files, is_dir)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -17,14 +17,3 @@ async fn test_block_source_config_local() {
|
|||||||
matches!(config.source_type, BlockSourceType::Local { path } if path == Path::new("/test/path"))
|
matches!(config.source_type, BlockSourceType::Local { path } if path == Path::new("/test/path"))
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_error_types() {
|
|
||||||
let io_error = std::io::Error::new(std::io::ErrorKind::NotFound, "File not found");
|
|
||||||
let benchmark_error: PseudoPeerError = io_error.into();
|
|
||||||
|
|
||||||
match benchmark_error {
|
|
||||||
PseudoPeerError::Io(_) => (),
|
|
||||||
_ => panic!("Expected Io error"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
Reference in New Issue
Block a user