Merge pull request #1 from sprites0/feat/testnet

feat: Some changes
This commit is contained in:
sprites0
2025-03-21 07:43:04 -04:00
committed by GitHub
19 changed files with 451 additions and 166 deletions

10
Cargo.lock generated
View File

@ -6663,6 +6663,7 @@ dependencies = [
"lz4_flex", "lz4_flex",
"once_cell", "once_cell",
"parking_lot", "parking_lot",
"reqwest",
"reth-basic-payload-builder", "reth-basic-payload-builder",
"reth-chainspec", "reth-chainspec",
"reth-cli", "reth-cli",
@ -7631,14 +7632,22 @@ name = "reth-ethereum-cli"
version = "1.2.0" version = "1.2.0"
dependencies = [ dependencies = [
"alloy-chains", "alloy-chains",
"alloy-consensus",
"alloy-genesis",
"alloy-primitives", "alloy-primitives",
"alloy-rlp",
"clap", "clap",
"eyre", "eyre",
"lz4_flex",
"once_cell", "once_cell",
"reqwest",
"reth-chainspec", "reth-chainspec",
"reth-cli", "reth-cli",
"reth-cli-commands", "reth-cli-commands",
"reth-primitives", "reth-primitives",
"revm",
"rmp-serde",
"serde",
"serde_json", "serde_json",
] ]
@ -7804,6 +7813,7 @@ dependencies = [
"reth-testing-utils", "reth-testing-utils",
"secp256k1 0.30.0", "secp256k1 0.30.0",
"serde_json", "serde_json",
"sha2 0.10.8",
] ]
[[package]] [[package]]

View File

@ -622,6 +622,9 @@ snmalloc-rs = { version = "0.3.7", features = ["build_cc"] }
# See: https://github.com/eira-fransham/crunchy/issues/13 # See: https://github.com/eira-fransham/crunchy/issues/13
crunchy = "=0.2.2" crunchy = "=0.2.2"
lz4_flex = "0.11.3"
rmp-serde = "1.3.0"
[patch.crates-io] [patch.crates-io]
alloy-evm = { git = "https://github.com/alloy-rs/evm", rev = "beb6832" } alloy-evm = { git = "https://github.com/alloy-rs/evm", rev = "beb6832" }
alloy-op-evm = { git = "https://github.com/alloy-rs/evm", rev = "beb6832" } alloy-op-evm = { git = "https://github.com/alloy-rs/evm", rev = "beb6832" }

View File

@ -90,8 +90,6 @@ backon.workspace = true
similar-asserts.workspace = true similar-asserts.workspace = true
parking_lot.workspace = true parking_lot.workspace = true
lz4_flex = "0.11.3"
rmp-serde = "1.3.0"
serde = { workspace = true, features = ["derive"] } serde = { workspace = true, features = ["derive"] }
reth-e2e-test-utils.workspace = true reth-e2e-test-utils.workspace = true
once_cell.workspace = true once_cell.workspace = true
@ -100,6 +98,10 @@ jsonrpsee.workspace = true
jsonrpsee-core.workspace = true jsonrpsee-core.workspace = true
reth-rpc-layer.workspace = true reth-rpc-layer.workspace = true
lz4_flex.workspace = true
rmp-serde.workspace = true
reqwest.workspace = true
[dev-dependencies] [dev-dependencies]
tempfile.workspace = true tempfile.workspace = true

View File

@ -1,7 +1,8 @@
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use alloy_consensus::{BlockBody, BlockHeader}; use alloy_consensus::{BlockBody, BlockHeader, Transaction};
use alloy_primitives::TxKind;
use alloy_primitives::{Address, PrimitiveSignature, B256, U256}; use alloy_primitives::{Address, PrimitiveSignature, B256, U256};
use alloy_rpc_types::engine::{ use alloy_rpc_types::engine::{
ExecutionPayloadEnvelopeV3, ForkchoiceState, PayloadAttributes, PayloadStatusEnum, ExecutionPayloadEnvelopeV3, ForkchoiceState, PayloadAttributes, PayloadStatusEnum,
@ -19,12 +20,54 @@ use reth_provider::{BlockHashReader, StageCheckpointReader};
use reth_rpc_api::EngineApiClient; use reth_rpc_api::EngineApiClient;
use reth_rpc_layer::AuthClientService; use reth_rpc_layer::AuthClientService;
use reth_stages::StageId; use reth_stages::StageId;
use tracing::debug; use serde::{Deserialize, Serialize};
use tracing::{debug, info};
use crate::serialized::TypedTransaction;
use crate::serialized::{self, BlockInner}; use crate::serialized::{self, BlockInner};
pub(crate) struct BlockIngest(pub PathBuf); pub(crate) struct BlockIngest(pub PathBuf);
#[derive(Debug, Clone, Serialize, Deserialize)]
struct EvmContract {
pub address: Address,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct SpotToken {
pub index: u64,
#[serde(rename = "evmContract")]
pub evm_contract: Option<EvmContract>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct SpotMeta {
tokens: Vec<SpotToken>,
}
async fn fetch_spot_meta(is_testnet: bool) -> Result<SpotMeta, Box<dyn std::error::Error>> {
let url = if is_testnet {
"https://api.hyperliquid-testnet.xyz"
} else {
"https://api.hyperliquid.xyz"
};
let url = format!("{}/info", url);
// post body: {"type": "spotMeta"}
let client = reqwest::Client::new();
let response = client.post(url).json(&serde_json::json!({"type": "spotMeta"})).send().await?;
Ok(response.json().await?)
}
fn to_evm_map(meta: &SpotMeta) -> std::collections::HashMap<Address, u64> {
let mut map = std::collections::HashMap::new();
for token in &meta.tokens {
if let Some(evm_contract) = &token.evm_contract {
map.insert(evm_contract.address, token.index);
}
}
map
}
async fn submit_payload<Engine: PayloadTypes + EngineTypes>( async fn submit_payload<Engine: PayloadTypes + EngineTypes>(
engine_api_client: &HttpClient<AuthClientService<HttpBackend>>, engine_api_client: &HttpClient<AuthClientService<HttpBackend>>,
payload: EthBuiltPayload, payload: EthBuiltPayload,
@ -92,6 +135,7 @@ impl BlockIngest {
std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_millis(); std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_millis();
let engine_api = node.auth_server_handle().http_client(); let engine_api = node.auth_server_handle().http_client();
let mut evm_map = to_evm_map(&fetch_spot_meta(node.chain_spec().chain_id() == 998).await?);
loop { loop {
let Some(original_block) = self.collect_block(height) else { let Some(original_block) = self.collect_block(height) else {
@ -107,14 +151,47 @@ impl BlockIngest {
{ {
let BlockBody { transactions, ommers, withdrawals } = let BlockBody { transactions, ommers, withdrawals } =
std::mem::take(block.body_mut()); std::mem::take(block.body_mut());
let mut system_txs = vec![];
for transaction in original_block.system_txs {
let s = match &transaction.tx {
TypedTransaction::Legacy(tx) => match tx.input().len() {
0 => U256::from(0x1),
_ => {
let TxKind::Call(to) = tx.to else {
panic!("Unexpected contract creation");
};
loop {
match evm_map.get(&to).cloned() {
Some(s) => {
break {
let mut addr = [0u8; 32];
addr[12] = 0x20;
addr[24..32].copy_from_slice(s.to_be_bytes().as_ref());
U256::from_be_bytes(addr)
}
}
None => {
info!("Contract not found: {:?}, fetching again...", to);
evm_map = to_evm_map(
&fetch_spot_meta(
node.chain_spec().chain_id() == 998,
)
.await?,
);
continue;
}
}
}
}
},
_ => unreachable!(),
};
let signature = PrimitiveSignature::new( let signature = PrimitiveSignature::new(
// from anvil // from anvil
U256::from(0x1), U256::from(0x1),
U256::from(0x1), s,
true, true,
); );
let mut system_txs = vec![];
for transaction in original_block.system_txs {
let typed_transaction = transaction.tx.to_reth(); let typed_transaction = transaction.tx.to_reth();
let tx = TransactionSigned::new( let tx = TransactionSigned::new(
typed_transaction, typed_transaction,

View File

@ -1,6 +1,5 @@
use alloy_consensus::{TxEip1559, TxEip2930, TxLegacy}; use alloy_consensus::{TxEip1559, TxEip2930, TxLegacy};
use alloy_rpc_types::Log; use reth_primitives::{Log, SealedBlock, Transaction};
use reth_primitives::{SealedBlock, Transaction};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]

View File

@ -195,6 +195,7 @@ pub enum StateRootMessage {
} }
/// Message about completion of proof calculation for a specific state update /// Message about completion of proof calculation for a specific state update
#[allow(dead_code)]
#[derive(Debug)] #[derive(Debug)]
pub struct ProofCalculated { pub struct ProofCalculated {
/// The index of this proof in the sequence of state updates /// The index of this proof in the sequence of state updates
@ -255,7 +256,7 @@ impl ProofSequencer {
// return early if we don't have the next expected proof // return early if we don't have the next expected proof
if !self.pending_proofs.contains_key(&self.next_to_deliver) { if !self.pending_proofs.contains_key(&self.next_to_deliver) {
return Vec::new() return Vec::new();
} }
let mut consecutive_proofs = Vec::with_capacity(self.pending_proofs.len()); let mut consecutive_proofs = Vec::with_capacity(self.pending_proofs.len());
@ -390,7 +391,7 @@ where
sequence_number: input.proof_sequence_number, sequence_number: input.proof_sequence_number,
state: input.hashed_state_update, state: input.hashed_state_update,
}); });
return return;
} }
if self.inflight >= self.max_concurrent { if self.inflight >= self.max_concurrent {
@ -480,10 +481,13 @@ where
#[metrics(scope = "tree.root")] #[metrics(scope = "tree.root")]
struct StateRootTaskMetrics { struct StateRootTaskMetrics {
/// Histogram of proof calculation durations. /// Histogram of proof calculation durations.
#[allow(unused)]
pub proof_calculation_duration_histogram: Histogram, pub proof_calculation_duration_histogram: Histogram,
/// Histogram of proof calculation account targets. /// Histogram of proof calculation account targets.
#[allow(unused)]
pub proof_calculation_account_targets_histogram: Histogram, pub proof_calculation_account_targets_histogram: Histogram,
/// Histogram of proof calculation storage targets. /// Histogram of proof calculation storage targets.
#[allow(unused)]
pub proof_calculation_storage_targets_histogram: Histogram, pub proof_calculation_storage_targets_histogram: Histogram,
/// Histogram of sparse trie update durations. /// Histogram of sparse trie update durations.
@ -492,10 +496,13 @@ struct StateRootTaskMetrics {
pub sparse_trie_final_update_duration_histogram: Histogram, pub sparse_trie_final_update_duration_histogram: Histogram,
/// Histogram of state updates received. /// Histogram of state updates received.
#[allow(unused)]
pub state_updates_received_histogram: Histogram, pub state_updates_received_histogram: Histogram,
/// Histogram of proofs processed. /// Histogram of proofs processed.
#[allow(unused)]
pub proofs_processed_histogram: Histogram, pub proofs_processed_histogram: Histogram,
/// Histogram of state root update iterations. /// Histogram of state root update iterations.
#[allow(unused)]
pub state_root_iterations_histogram: Histogram, pub state_root_iterations_histogram: Histogram,
/// Histogram of the number of updated state nodes. /// Histogram of the number of updated state nodes.
@ -531,6 +538,7 @@ pub struct StateRootTask<Factory> {
/// Task configuration. /// Task configuration.
config: StateRootConfig<Factory>, config: StateRootConfig<Factory>,
/// Receiver for state root related messages. /// Receiver for state root related messages.
#[allow(unused)]
rx: Receiver<StateRootMessage>, rx: Receiver<StateRootMessage>,
/// Sender for state root related messages. /// Sender for state root related messages.
tx: Sender<StateRootMessage>, tx: Sender<StateRootMessage>,
@ -539,6 +547,7 @@ pub struct StateRootTask<Factory> {
/// Proof sequencing handler. /// Proof sequencing handler.
proof_sequencer: ProofSequencer, proof_sequencer: ProofSequencer,
/// Reference to the shared thread pool for parallel proof generation. /// Reference to the shared thread pool for parallel proof generation.
#[allow(unused)]
thread_pool: Arc<rayon::ThreadPool>, thread_pool: Arc<rayon::ThreadPool>,
/// Manages calculation of multiproofs. /// Manages calculation of multiproofs.
multiproof_manager: MultiproofManager<Factory>, multiproof_manager: MultiproofManager<Factory>,
@ -578,25 +587,14 @@ where
/// Returns a state hook to be used to send state updates to this task. /// Returns a state hook to be used to send state updates to this task.
pub fn state_hook(&self) -> impl OnStateHook { pub fn state_hook(&self) -> impl OnStateHook {
let state_hook = self.state_hook_sender(); let _state_hook = self.state_hook_sender();
move |source: StateChangeSource, state: &EvmState| { move |_source: StateChangeSource, _state: &EvmState| {
if let Err(error) =
state_hook.send(StateRootMessage::StateUpdate(source, state.clone()))
{
error!(target: "engine::root", ?error, "Failed to send state update");
}
} }
} }
/// Spawns the state root task and returns a handle to await its result. /// Spawns the state root task and returns a handle to await its result.
pub fn spawn(self) -> StateRootHandle { pub fn spawn(self) -> StateRootHandle {
let sparse_trie_tx = Self::spawn_sparse_trie(
self.thread_pool.clone(),
self.config.clone(),
self.metrics.clone(),
self.tx.clone(),
);
let (tx, rx) = mpsc::sync_channel(1); let (tx, rx) = mpsc::sync_channel(1);
std::thread::Builder::new() std::thread::Builder::new()
.name("State Root Task".to_string()) .name("State Root Task".to_string())
@ -605,8 +603,11 @@ where
self.observe_config(); self.observe_config();
let result = self.run(sparse_trie_tx); let _ = tx.send(Ok(StateRootComputeOutcome {
let _ = tx.send(result); state_root: (B256::default(), Default::default()),
total_time: Duration::default(),
time_from_last_update: Duration::default(),
}));
}) })
.expect("failed to spawn state root thread"); .expect("failed to spawn state root thread");
@ -614,6 +615,7 @@ where
} }
/// Logs and records in metrics the state root config parameters. /// Logs and records in metrics the state root config parameters.
#[allow(unused)]
fn observe_config(&self) { fn observe_config(&self) {
let nodes_sorted_account_nodes = self.config.nodes_sorted.account_nodes.len(); let nodes_sorted_account_nodes = self.config.nodes_sorted.account_nodes.len();
let nodes_sorted_removed_nodes = self.config.nodes_sorted.removed_nodes.len(); let nodes_sorted_removed_nodes = self.config.nodes_sorted.removed_nodes.len();
@ -659,6 +661,7 @@ where
} }
/// Spawn long running sparse trie task that forwards the final result upon completion. /// Spawn long running sparse trie task that forwards the final result upon completion.
#[allow(unused)]
fn spawn_sparse_trie( fn spawn_sparse_trie(
thread_pool: Arc<rayon::ThreadPool>, thread_pool: Arc<rayon::ThreadPool>,
config: StateRootConfig<Factory>, config: StateRootConfig<Factory>,
@ -682,6 +685,7 @@ where
} }
/// Handles request for proof prefetch. /// Handles request for proof prefetch.
#[allow(unused)]
fn on_prefetch_proof(&mut self, targets: MultiProofTargets) { fn on_prefetch_proof(&mut self, targets: MultiProofTargets) {
let proof_targets = self.get_prefetch_proof_targets(targets); let proof_targets = self.get_prefetch_proof_targets(targets);
extend_multi_proof_targets_ref(&mut self.fetched_proof_targets, &proof_targets); extend_multi_proof_targets_ref(&mut self.fetched_proof_targets, &proof_targets);
@ -697,6 +701,7 @@ where
} }
/// Calls `get_proof_targets` with existing proof targets for prefetching. /// Calls `get_proof_targets` with existing proof targets for prefetching.
#[allow(unused)]
fn get_prefetch_proof_targets(&self, mut targets: MultiProofTargets) -> MultiProofTargets { fn get_prefetch_proof_targets(&self, mut targets: MultiProofTargets) -> MultiProofTargets {
// Here we want to filter out any targets that are already fetched // Here we want to filter out any targets that are already fetched
// //
@ -726,7 +731,7 @@ where
let Some(fetched_storage) = self.fetched_proof_targets.get(hashed_address) else { let Some(fetched_storage) = self.fetched_proof_targets.get(hashed_address) else {
// this means the account has not been fetched yet, so we must fetch everything // this means the account has not been fetched yet, so we must fetch everything
// associated with this account // associated with this account
continue continue;
}; };
let prev_target_storage_len = target_storage.len(); let prev_target_storage_len = target_storage.len();
@ -749,6 +754,7 @@ where
/// Handles state updates. /// Handles state updates.
/// ///
/// Returns proof targets derived from the state update. /// Returns proof targets derived from the state update.
#[allow(unused)]
fn on_state_update( fn on_state_update(
&mut self, &mut self,
source: StateChangeSource, source: StateChangeSource,

View File

@ -21,7 +21,15 @@ eyre.workspace = true
once_cell.workspace = true once_cell.workspace = true
alloy-chains.workspace = true alloy-chains.workspace = true
alloy-primitives.workspace = true alloy-primitives.workspace = true
alloy-genesis.workspace = true
alloy-consensus.workspace = true
alloy-rlp.workspace = true
serde_json.workspace = true serde_json.workspace = true
lz4_flex.workspace = true
revm = { workspace = true, features = ["serde"] }
serde.workspace = true
rmp-serde.workspace = true
reqwest = { workspace = true }
[dev-dependencies] [dev-dependencies]
clap.workspace = true clap.workspace = true

View File

@ -8,7 +8,7 @@ use reth_primitives::{Header, SealedHeader};
use std::sync::Arc; use std::sync::Arc;
/// Chains supported by reth. First value should be used as the default. /// Chains supported by reth. First value should be used as the default.
pub const SUPPORTED_CHAINS: &[&str] = &["mainnet", "sepolia", "holesky", "dev"]; pub const SUPPORTED_CHAINS: &[&str] = &["mainnet", "testnet", "sepolia", "holesky", "dev"];
static GENESIS_HASH: B256 = static GENESIS_HASH: B256 =
b256!("d8fcc13b6a195b88b7b2da3722ff6cad767b13a8c1e9ffb1c73aa9d216d895f0"); b256!("d8fcc13b6a195b88b7b2da3722ff6cad767b13a8c1e9ffb1c73aa9d216d895f0");
@ -92,6 +92,7 @@ pub static HL_MAINNET: Lazy<alloc::sync::Arc<ChainSpec>> = Lazy::new(|| {
pub fn chain_value_parser(s: &str) -> eyre::Result<Arc<ChainSpec>, eyre::Error> { pub fn chain_value_parser(s: &str) -> eyre::Result<Arc<ChainSpec>, eyre::Error> {
Ok(match s { Ok(match s {
"mainnet" => HL_MAINNET.clone(), "mainnet" => HL_MAINNET.clone(),
"testnet" => Arc::new(super::hl_testnet::load_hl_testnet()),
"sepolia" => SEPOLIA.clone(), "sepolia" => SEPOLIA.clone(),
"holesky" => HOLESKY.clone(), "holesky" => HOLESKY.clone(),
"dev" => DEV.clone(), "dev" => DEV.clone(),

View File

@ -0,0 +1,113 @@
use alloy_consensus::Header;
use alloy_genesis::{ChainConfig, Genesis};
use alloy_primitives::U256;
use alloy_rlp::Decodable;
use reqwest::blocking::get;
use reth_chainspec::{ChainSpec, DEV_HARDFORKS};
use reth_primitives::SealedHeader;
use std::collections::BTreeMap;
use std::fs::File;
use std::io::{Read, Write};
pub(crate) fn load_hl_testnet() -> ChainSpec {
const TESTNET_GENESIS_URL: &str = "https://raw.githubusercontent.com/sprites0/hl-testnet-genesis/main/19386700.rlp";
fn download_testnet_genesis() -> Result<&'static str, Box<dyn std::error::Error>> {
let path = "/tmp/hl_testnet.rmp.lz4";
println!("Downloading testnet genesis");
let mut response = get(TESTNET_GENESIS_URL)?;
if let Some(length) = response.content_length() {
// Check if the file exists
if let Ok(metadata) = std::fs::metadata(path) {
if metadata.len() == length {
println!("Already downloaded");
return Ok(path);
}
}
}
let mut file = File::create(path)?;
let mut downloaded = 0;
let total_size = response.content_length().unwrap_or(0);
let mut buffer = vec![0; 0x100000];
loop {
let size = response.read(buffer.as_mut_slice())?;
if size == 0 {
break;
}
file.write_all(&buffer[..size])?;
downloaded += size as u64;
println!(
"Downloaded {} of {} bytes ({}%)",
downloaded,
total_size,
(downloaded as f64 / total_size as f64 * 100.0).round()
);
}
Ok(path)
}
let path = download_testnet_genesis().expect("Failed to download testnet genesis");
let mut file = File::open(path).expect("Failed to open testnet genesis");
let mut buffer = Vec::new();
file.read_to_end(&mut buffer).expect("Failed to read testnet genesis");
let mut header = Header::decode(&mut &buffer[..]).expect("Failed to decode testnet genesis");
let config = ChainConfig {
chain_id: 998,
homestead_block: Some(0),
dao_fork_block: Some(0),
dao_fork_support: false,
eip150_block: Some(0),
eip155_block: Some(0),
eip158_block: Some(0),
byzantium_block: Some(0),
constantinople_block: Some(0),
petersburg_block: Some(0),
istanbul_block: Some(0),
muir_glacier_block: Some(0),
berlin_block: Some(0),
london_block: Some(0),
arrow_glacier_block: Some(0),
gray_glacier_block: Some(0),
merge_netsplit_block: Some(0),
shanghai_time: Some(0),
cancun_time: Some(0),
prague_time: Some(0),
osaka_time: Some(0),
terminal_total_difficulty: Some(U256::ZERO),
terminal_total_difficulty_passed: true,
ethash: None,
clique: None,
parlia: None,
extra_fields: Default::default(),
deposit_contract_address: None,
blob_schedule: Default::default(),
};
header.number = 0;
let genesis_header = SealedHeader::new(header.clone(), header.hash_slow());
let genesis = Genesis {
config,
nonce: header.nonce.into(),
timestamp: header.timestamp,
extra_data: header.extra_data,
gas_limit: header.gas_limit,
difficulty: header.difficulty,
mix_hash: header.mix_hash,
coinbase: header.beneficiary,
alloc: BTreeMap::default(),
base_fee_per_gas: header.base_fee_per_gas.map(|x| x.into()),
excess_blob_gas: header.excess_blob_gas,
blob_gas_used: header.blob_gas_used,
number: None,
};
ChainSpec {
chain: alloy_chains::Chain::from_id(998),
genesis: genesis.into(),
genesis_header,
hardforks: DEV_HARDFORKS.clone(),
prune_delete_limit: 10000,
..Default::default()
}
}

View File

@ -11,6 +11,8 @@
/// Chain specification parser. /// Chain specification parser.
pub mod chainspec; pub mod chainspec;
mod hl_testnet;
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use clap::Parser; use clap::Parser;

View File

@ -29,6 +29,9 @@ alloy-evm.workspace = true
alloy-sol-types.workspace = true alloy-sol-types.workspace = true
alloy-consensus.workspace = true alloy-consensus.workspace = true
sha2.workspace = true
serde_json.workspace = true
[dev-dependencies] [dev-dependencies]
reth-testing-utils.workspace = true reth-testing-utils.workspace = true
reth-evm = { workspace = true, features = ["test-utils"] } reth-evm = { workspace = true, features = ["test-utils"] }

View File

@ -23,7 +23,7 @@ use reth_execution_types::BlockExecutionResult;
use reth_primitives::{ use reth_primitives::{
EthPrimitives, Receipt, Recovered, RecoveredBlock, SealedBlock, TransactionSigned, EthPrimitives, Receipt, Recovered, RecoveredBlock, SealedBlock, TransactionSigned,
}; };
use reth_primitives_traits::{transaction::signed::HL_SYSTEM_TX_FROM_ADDR, NodePrimitives}; use reth_primitives_traits::{transaction::signed::is_impersonated_tx, NodePrimitives};
use reth_revm::{context_interface::result::ResultAndState, db::State, DatabaseCommit}; use reth_revm::{context_interface::result::ResultAndState, db::State, DatabaseCommit};
/// Factory for [`EthExecutionStrategy`]. /// Factory for [`EthExecutionStrategy`].
@ -191,7 +191,7 @@ where
} }
let hash = tx.hash(); let hash = tx.hash();
let is_system_transaction = tx.signer() == HL_SYSTEM_TX_FROM_ADDR; let is_system_transaction = is_impersonated_tx(tx.signature(), tx.gas_price()).is_some();
// Execute transaction. // Execute transaction.
let result_and_state = let result_and_state =

View File

@ -19,18 +19,35 @@ extern crate alloc;
use alloc::sync::Arc; use alloc::sync::Arc;
use alloy_consensus::{BlockHeader, Header}; use alloy_consensus::{BlockHeader, Header};
use alloy_evm::eth::EthEvmContext;
pub use alloy_evm::EthEvm; pub use alloy_evm::EthEvm;
use alloy_evm::EthEvmFactory; use alloy_primitives::bytes::BufMut;
use alloy_primitives::U256; use alloy_primitives::hex::{FromHex, ToHexExt};
use alloy_primitives::{Address, Bytes, U256};
use core::{convert::Infallible, fmt::Debug}; use core::{convert::Infallible, fmt::Debug};
use reth_chainspec::{ChainSpec, EthChainSpec, MAINNET}; use reth_chainspec::{ChainSpec, EthChainSpec, MAINNET};
use reth_evm::{ConfigureEvm, ConfigureEvmEnv, EvmEnv, NextBlockEnvAttributes}; use reth_evm::Database;
use reth_evm::{ConfigureEvm, ConfigureEvmEnv, EvmEnv, EvmFactory, NextBlockEnvAttributes};
use reth_primitives::TransactionSigned; use reth_primitives::TransactionSigned;
use reth_revm::context::result::{EVMError, HaltReason};
use reth_revm::context::{Block, Cfg, ContextTr};
use reth_revm::handler::{EthPrecompiles, PrecompileProvider};
use reth_revm::inspector::NoOpInspector;
use reth_revm::interpreter::interpreter::EthInterpreter;
use reth_revm::interpreter::{Gas, InstructionResult, InterpreterResult};
use reth_revm::precompile::{
PrecompileError, PrecompileErrors, PrecompileFn, PrecompileOutput, PrecompileResult,
Precompiles,
};
use reth_revm::{ use reth_revm::{
context::{BlockEnv, CfgEnv, TxEnv}, context::{BlockEnv, CfgEnv, TxEnv},
context_interface::block::BlobExcessGasAndPrice, context_interface::block::BlobExcessGasAndPrice,
specification::hardfork::SpecId, specification::hardfork::SpecId,
}; };
use reth_revm::{revm, Context, Inspector, MainBuilder, MainContext};
use sha2::Digest;
use std::io::Write;
use std::sync::OnceLock;
mod config; mod config;
use alloy_eips::eip1559::INITIAL_BASE_FEE; use alloy_eips::eip1559::INITIAL_BASE_FEE;
@ -49,7 +66,7 @@ pub mod eip6110;
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct EthEvmConfig { pub struct EthEvmConfig {
chain_spec: Arc<ChainSpec>, chain_spec: Arc<ChainSpec>,
evm_factory: EthEvmFactory, evm_factory: HyperliquidEvmFactory,
} }
impl EthEvmConfig { impl EthEvmConfig {
@ -164,8 +181,153 @@ impl ConfigureEvmEnv for EthEvmConfig {
} }
} }
/// A custom precompile that contains static precompiles.
#[allow(missing_debug_implementations)]
#[derive(Clone)]
pub struct L1ReadPrecompiles<CTX> {
precompiles: EthPrecompiles<CTX>,
warm_addresses: Vec<Address>,
}
impl<CTX: ContextTr> L1ReadPrecompiles<CTX> {
fn new() -> Self {
let mut this = Self { precompiles: EthPrecompiles::default(), warm_addresses: vec![] };
this.update_warm_addresses(false);
this
}
fn update_warm_addresses(&mut self, precompile_enabled: bool) {
self.warm_addresses = if !precompile_enabled {
self.precompiles.warm_addresses().collect()
} else {
self.precompiles
.warm_addresses()
.chain((0..=9).into_iter().map(|x| {
let mut addr = [0u8; 20];
addr[18] = 0x8;
addr[19] = x;
Address::from_slice(&addr)
}))
.collect()
}
}
}
impl<CTX: ContextTr> PrecompileProvider for L1ReadPrecompiles<CTX> {
type Context = CTX;
type Output = InterpreterResult;
fn set_spec(&mut self, spec: <<Self::Context as ContextTr>::Cfg as Cfg>::Spec) {
self.precompiles.set_spec(spec);
// TODO: How to pass block number and chain id?
self.update_warm_addresses(false);
}
fn run(
&mut self,
context: &mut Self::Context,
address: &Address,
bytes: &Bytes,
gas_limit: u64,
) -> Result<Option<Self::Output>, revm::precompile::PrecompileErrors> {
if address[..18] == [0u8; 18] {
let maybe_precompile_index = u16::from_be_bytes([address[18], address[19]]);
let precompile_base =
std::env::var("PRECOMPILE_BASE").unwrap_or("/tmp/precompiles".to_string());
if 0x800 <= maybe_precompile_index && maybe_precompile_index <= 0x809 {
let block_number = context.block().number();
let input = vec![];
let mut writer = input.writer();
writer.write(&address.as_slice()).unwrap();
writer.write(bytes).unwrap();
writer.flush().unwrap();
let hash = sha2::Sha256::digest(writer.get_ref());
let file =
format!("{}/{}/{}.json", precompile_base, block_number, hash.encode_hex());
let (output, gas) = match load_result(file) {
Ok(Some(value)) => value,
Ok(None) => {
return Ok(Some(InterpreterResult {
result: InstructionResult::Return,
gas: Gas::new(gas_limit),
output: Bytes::new(),
}))
}
Err(value) => return Err(value),
};
return Ok(Some(InterpreterResult {
result: InstructionResult::Return,
gas: Gas::new(gas_limit - gas),
output,
}));
}
}
self.precompiles.run(context, address, bytes, gas_limit)
}
fn contains(&self, address: &Address) -> bool {
self.precompiles.contains(address)
}
fn warm_addresses(&self) -> Box<impl Iterator<Item = Address> + '_> {
Box::new(self.warm_addresses.iter().cloned())
}
}
fn load_result(file: String) -> Result<Option<(Bytes, u64)>, PrecompileErrors> {
let Ok(file) = std::fs::File::open(file) else {
return Ok(None);
};
let reader = std::io::BufReader::new(file);
let json: serde_json::Value = serde_json::from_reader(reader).unwrap();
let object = json.as_object().unwrap().clone();
let success = object.get("success").unwrap().as_bool().unwrap();
if !success {
return Err(PrecompileErrors::Error(PrecompileError::other("Invalid input")));
}
let output =
Bytes::from_hex(object.get("output").unwrap().as_str().unwrap().to_owned()).unwrap();
let gas = object.get("gas").unwrap_or(&serde_json::json!(0)).as_u64().unwrap_or_default();
println!("output: {}, gas: {}", output.encode_hex(), gas);
Ok(Some((output, gas)))
}
/// Custom EVM configuration.
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct HyperliquidEvmFactory;
impl EvmFactory<EvmEnv> for HyperliquidEvmFactory {
type Evm<DB: Database, I: Inspector<EthEvmContext<DB>, EthInterpreter>> =
EthEvm<DB, I, L1ReadPrecompiles<EthEvmContext<DB>>>;
type Tx = TxEnv;
type Error<DBError: core::error::Error + Send + Sync + 'static> = EVMError<DBError>;
type HaltReason = HaltReason;
type Context<DB: Database> = EthEvmContext<DB>;
fn create_evm<DB: Database>(&self, db: DB, input: EvmEnv) -> Self::Evm<DB, NoOpInspector> {
let evm = Context::mainnet()
.with_db(db)
.with_cfg(input.cfg_env)
.with_block(input.block_env)
.build_mainnet_with_inspector(NoOpInspector {})
.with_precompiles(L1ReadPrecompiles::new());
EthEvm::new(evm, false)
}
fn create_evm_with_inspector<DB: Database, I: Inspector<Self::Context<DB>, EthInterpreter>>(
&self,
db: DB,
input: EvmEnv,
inspector: I,
) -> Self::Evm<DB, I> {
EthEvm::new(self.create_evm(db, input).into_inner().with_inspector(inspector), true)
}
}
impl ConfigureEvm for EthEvmConfig { impl ConfigureEvm for EthEvmConfig {
type EvmFactory = EthEvmFactory; type EvmFactory = HyperliquidEvmFactory;
fn evm_factory(&self) -> &Self::EvmFactory { fn evm_factory(&self) -> &Self::EvmFactory {
&self.evm_factory &self.evm_factory

View File

@ -21,7 +21,7 @@ use reth_primitives_traits::{
sync::OnceLock, sync::OnceLock,
transaction::{ transaction::{
error::TransactionConversionError, error::TransactionConversionError,
signed::{is_impersonated_tx, RecoveryError, HL_SYSTEM_TX_FROM_ADDR}, signed::{is_impersonated_tx, RecoveryError},
}, },
InMemorySize, SignedTransaction, InMemorySize, SignedTransaction,
}; };
@ -836,8 +836,8 @@ impl SignedTransaction for TransactionSigned {
fn recover_signer(&self) -> Result<Address, RecoveryError> { fn recover_signer(&self) -> Result<Address, RecoveryError> {
let signature = self.signature(); let signature = self.signature();
if is_impersonated_tx(signature, self.gas_price()) { if let Some(address) = is_impersonated_tx(signature, self.gas_price()) {
return Ok(HL_SYSTEM_TX_FROM_ADDR); return Ok(address);
} }
let signature_hash = self.signature_hash(); let signature_hash = self.signature_hash();
recover_signer(&self.signature, signature_hash) recover_signer(&self.signature, signature_hash)

View File

@ -15,7 +15,7 @@ pub mod shanghai;
use alloy_rpc_types_engine::{ExecutionData, PayloadError}; use alloy_rpc_types_engine::{ExecutionData, PayloadError};
use reth_chainspec::EthereumHardforks; use reth_chainspec::EthereumHardforks;
use reth_primitives::SealedBlock; use reth_primitives::SealedBlock;
use reth_primitives_traits::transaction::signed::HL_SYSTEM_TX_FROM_ADDR; use reth_primitives_traits::transaction::signed::is_impersonated_tx;
use reth_primitives_traits::{Block, SignedTransaction}; use reth_primitives_traits::{Block, SignedTransaction};
use std::sync::Arc; use std::sync::Arc;
@ -94,9 +94,7 @@ impl<ChainSpec: EthereumHardforks> ExecutionPayloadValidator<ChainSpec> {
let (normal, system) = transactions.into_iter().partition(|tx| { let (normal, system) = transactions.into_iter().partition(|tx| {
let tx = T::decode_2718(&mut tx.iter().as_slice()); let tx = T::decode_2718(&mut tx.iter().as_slice());
match tx { match tx {
Ok(tx) => { Ok(tx) => is_impersonated_tx(tx.signature(), tx.gas_price()).is_none(),
!matches!(tx.recover_signer(), Ok(address) if HL_SYSTEM_TX_FROM_ADDR == address)
}
Err(_) => true, Err(_) => true,
} }
}); });

View File

@ -1,7 +1,7 @@
//! Block body abstraction. //! Block body abstraction.
use crate::{ use crate::{
transaction::signed::{RecoveryError, HL_SYSTEM_TX_FROM_ADDR}, transaction::signed::{is_impersonated_tx, RecoveryError},
BlockHeader, FullSignedTx, InMemorySize, MaybeSerde, MaybeSerdeBincodeCompat, BlockHeader, FullSignedTx, InMemorySize, MaybeSerde, MaybeSerdeBincodeCompat,
SignedTransaction, SignedTransaction,
}; };
@ -85,7 +85,7 @@ pub trait BlockBody:
let transactions: Vec<Self::Transaction> = self let transactions: Vec<Self::Transaction> = self
.transactions() .transactions()
.into_iter() .into_iter()
.filter(|tx| !matches!(tx.recover_signer(), Ok(address) if HL_SYSTEM_TX_FROM_ADDR == address)) .filter(|&tx| is_impersonated_tx(tx.signature(), tx.gas_price()).is_none())
.cloned() .cloned()
.collect::<Vec<_>>(); .collect::<Vec<_>>();
alloy_consensus::proofs::calculate_transaction_root(transactions.as_slice()) alloy_consensus::proofs::calculate_transaction_root(transactions.as_slice())

View File

@ -10,9 +10,10 @@ use alloy_consensus::{
SignableTransaction, Transaction, SignableTransaction, Transaction,
}; };
use alloy_eips::eip2718::{Decodable2718, Encodable2718}; use alloy_eips::eip2718::{Decodable2718, Encodable2718};
use alloy_primitives::{keccak256, Address, PrimitiveSignature as Signature, TxHash, B256}; use alloy_primitives::{keccak256, Address, PrimitiveSignature as Signature, TxHash, B256, U160};
use core::hash::Hash; use core::hash::Hash;
use revm_primitives::{address, U256}; use revm_primitives::{address, U256};
use std::ops::Add;
/// Helper trait that unifies all behaviour required by block to support full node operations. /// Helper trait that unifies all behaviour required by block to support full node operations.
pub trait FullSignedTx: SignedTransaction + MaybeCompact + MaybeSerdeBincodeCompat {} pub trait FullSignedTx: SignedTransaction + MaybeCompact + MaybeSerdeBincodeCompat {}
@ -23,11 +24,20 @@ pub const HL_SYSTEM_TX_FROM_ADDR: Address = address!("22222222222222222222222222
/// Check if the transaction is impersonated. /// Check if the transaction is impersonated.
/// Signature part is introduced in block_ingest, while the gas_price is trait of hyperliquid system transactions. /// Signature part is introduced in block_ingest, while the gas_price is trait of hyperliquid system transactions.
pub fn is_impersonated_tx(signature: &Signature, gas_price: Option<u128>) -> bool { pub fn is_impersonated_tx(signature: &Signature, gas_price: Option<u128>) -> Option<Address> {
signature.r() == U256::from(1) if signature.r() == U256::from(1) && signature.v() == true && gas_price == Some(0u128) {
&& signature.s() == U256::from(1) if signature.s() == U256::from(1) {
&& signature.v() == true Some(HL_SYSTEM_TX_FROM_ADDR)
&& gas_price == Some(0u128) } else {
let s = signature.s().reduce_mod(U256::from(U160::MAX).add(U256::from(1)));
let s = U160::from(s);
let s: [u8; 20] = s.to_be_bytes();
let s = Address::from_slice(&s);
Some(s)
}
} else {
None
}
} }
/// A signed transaction. /// A signed transaction.
@ -89,8 +99,8 @@ pub trait SignedTransaction:
/// Returns `None` if the transaction's signature is invalid, see also /// Returns `None` if the transaction's signature is invalid, see also
/// `reth_primitives::transaction::recover_signer_unchecked`. /// `reth_primitives::transaction::recover_signer_unchecked`.
fn recover_signer_unchecked(&self) -> Result<Address, RecoveryError> { fn recover_signer_unchecked(&self) -> Result<Address, RecoveryError> {
if is_impersonated_tx(self.signature(), self.gas_price()) { if let Some(address) = is_impersonated_tx(self.signature(), self.gas_price()) {
return Ok(HL_SYSTEM_TX_FROM_ADDR); return Ok(address);
} }
self.recover_signer_unchecked_with_buf(&mut Vec::new()).map_err(|_| RecoveryError) self.recover_signer_unchecked_with_buf(&mut Vec::new()).map_err(|_| RecoveryError)
} }
@ -183,8 +193,8 @@ impl SignedTransaction for PooledTransaction {
buf: &mut Vec<u8>, buf: &mut Vec<u8>,
) -> Result<Address, RecoveryError> { ) -> Result<Address, RecoveryError> {
let signature = self.signature(); let signature = self.signature();
if is_impersonated_tx(signature, self.gas_price()) { if let Some(address) = is_impersonated_tx(signature, self.gas_price()) {
return Ok(HL_SYSTEM_TX_FROM_ADDR); return Ok(address);
} }
match self { match self {
Self::Legacy(tx) => tx.tx().encode_for_signing(buf), Self::Legacy(tx) => tx.tx().encode_for_signing(buf),

View File

@ -279,8 +279,6 @@ where
// Reset the checkpoint // Reset the checkpoint
self.save_execution_checkpoint(provider, None)?; self.save_execution_checkpoint(provider, None)?;
validate_state_root(trie_root, SealedHeader::seal_slow(target_block), to_block)?;
Ok(ExecOutput { Ok(ExecOutput {
checkpoint: StageCheckpoint::new(to_block) checkpoint: StageCheckpoint::new(to_block)
.with_entities_stage_checkpoint(entities_checkpoint), .with_entities_stage_checkpoint(entities_checkpoint),
@ -327,13 +325,6 @@ where
let (block_root, updates) = StateRoot::incremental_root_with_updates(tx, range) let (block_root, updates) = StateRoot::incremental_root_with_updates(tx, range)
.map_err(|e| StageError::Fatal(Box::new(e)))?; .map_err(|e| StageError::Fatal(Box::new(e)))?;
// Validate the calculated state root
let target = provider
.header_by_number(input.unwind_to)?
.ok_or_else(|| ProviderError::HeaderNotFound(input.unwind_to.into()))?;
validate_state_root(block_root, SealedHeader::seal_slow(target), input.unwind_to)?;
// Validation passed, apply unwind changes to the database. // Validation passed, apply unwind changes to the database.
provider.write_trie_updates(&updates)?; provider.write_trie_updates(&updates)?;
@ -344,26 +335,6 @@ where
} }
} }
/// Check that the computed state root matches the root in the expected header.
#[inline]
fn validate_state_root<H: BlockHeader + Sealable + Debug>(
got: B256,
expected: SealedHeader<H>,
target_block: BlockNumber,
) -> Result<(), StageError> {
if got == expected.state_root() {
Ok(())
} else {
error!(target: "sync::stages::merkle", ?target_block, ?got, ?expected, "Failed to verify block state root! {INVALID_STATE_ROOT_ERROR_MESSAGE}");
Err(StageError::Block {
error: BlockErrorKind::Validation(ConsensusError::BodyStateRootDiff(
GotExpected { got, expected: expected.state_root() }.into(),
)),
block: Box::new(expected.block_with_parent()),
})
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -19,8 +19,6 @@ use reth_provider::{
StateWriter, StaticFileProviderFactory, StorageLocation, TrieWriter, StateWriter, StaticFileProviderFactory, StorageLocation, TrieWriter,
}; };
use reth_stages_types::{StageCheckpoint, StageId}; use reth_stages_types::{StageCheckpoint, StageId};
use reth_trie::{IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress};
use reth_trie_db::DatabaseStateRoot;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::io::BufRead; use std::io::BufRead;
use tracing::{debug, error, info, trace}; use tracing::{debug, error, info, trace};
@ -39,9 +37,6 @@ pub const DEFAULT_SOFT_LIMIT_BYTE_LEN_ACCOUNTS_CHUNK: usize = 1_000_000_000;
// account) // account)
pub const AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP: usize = 285_228; pub const AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP: usize = 285_228;
/// Soft limit for the number of flushed updates after which to log progress summary.
const SOFT_LIMIT_COUNT_FLUSHED_UPDATES: usize = 1_000_000;
/// Storage initialization error type. /// Storage initialization error type.
#[derive(Debug, thiserror::Error, Clone)] #[derive(Debug, thiserror::Error, Clone)]
pub enum InitStorageError { pub enum InitStorageError {
@ -415,27 +410,6 @@ where
// write state to db // write state to db
dump_state(collector, provider_rw, block)?; dump_state(collector, provider_rw, block)?;
// compute and compare state root. this advances the stage checkpoints.
let computed_state_root = compute_state_root(provider_rw)?;
if computed_state_root == expected_state_root {
info!(target: "reth::cli",
?computed_state_root,
"Computed state root matches state root in state dump"
);
} else {
error!(target: "reth::cli",
?computed_state_root,
?expected_state_root,
"Computed state root does not match state root in state dump"
);
return Err(InitStorageError::StateRootMismatch(GotExpected {
got: computed_state_root,
expected: expected_state_root,
})
.into())
}
// insert sync stages for stages that require state // insert sync stages for stages that require state
for stage in StageId::STATE_REQUIRED { for stage in StageId::STATE_REQUIRED {
provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(block))?; provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(block))?;
@ -547,60 +521,6 @@ where
Ok(()) Ok(())
} }
/// Computes the state root (from scratch) based on the accounts and storages present in the
/// database.
fn compute_state_root<Provider>(provider: &Provider) -> eyre::Result<B256>
where
Provider: DBProvider<Tx: DbTxMut> + TrieWriter,
{
trace!(target: "reth::cli", "Computing state root");
let tx = provider.tx_ref();
let mut intermediate_state: Option<IntermediateStateRootState> = None;
let mut total_flushed_updates = 0;
loop {
match StateRootComputer::from_tx(tx)
.with_intermediate_state(intermediate_state)
.root_with_progress()?
{
StateRootProgress::Progress(state, _, updates) => {
let updated_len = provider.write_trie_updates(&updates)?;
total_flushed_updates += updated_len;
trace!(target: "reth::cli",
last_account_key = %state.last_account_key,
updated_len,
total_flushed_updates,
"Flushing trie updates"
);
intermediate_state = Some(*state);
if total_flushed_updates % SOFT_LIMIT_COUNT_FLUSHED_UPDATES == 0 {
info!(target: "reth::cli",
total_flushed_updates,
"Flushing trie updates"
);
}
}
StateRootProgress::Complete(root, _, updates) => {
let updated_len = provider.write_trie_updates(&updates)?;
total_flushed_updates += updated_len;
trace!(target: "reth::cli",
%root,
updated_len,
total_flushed_updates,
"State root has been computed"
);
return Ok(root)
}
}
}
}
/// Type to deserialize state root from state dump file. /// Type to deserialize state root from state dump file.
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
struct StateRoot { struct StateRoot {