Merge pull request #105 from hl-archive-node/feat/cache-spot-meta

feat: cache spot metadata in database to reduce API calls
This commit is contained in:
sprites0
2025-11-05 02:56:06 -05:00
committed by GitHub
18 changed files with 255 additions and 50 deletions

View File

@ -1,5 +1,5 @@
pub mod call_forwarder;
pub mod hl_node_compliance;
pub mod tx_forwarder;
pub mod subscribe_fixup;
pub mod tx_forwarder;
mod utils;

View File

@ -1,7 +1,10 @@
pub mod hl;
pub mod parser;
use crate::{hardforks::HlHardforks, node::primitives::{header::HlHeaderExtras, HlHeader}};
use crate::{
hardforks::HlHardforks,
node::primitives::{HlHeader, header::HlHeaderExtras},
};
use alloy_eips::eip7840::BlobParams;
use alloy_genesis::Genesis;
use alloy_primitives::{Address, B256, U256};

View File

@ -18,7 +18,9 @@ use reth_hl::{
HlNode,
cli::{Cli, HlNodeArgs},
rpc::precompile::{HlBlockPrecompileApiServer, HlBlockPrecompileExt},
spot_meta::init as spot_meta_init,
storage::tables::Tables,
types::set_spot_metadata_db,
},
};
use tracing::info;
@ -95,6 +97,16 @@ fn main() -> eyre::Result<()> {
})
.apply(|mut builder| {
builder.db_mut().create_tables_for::<Tables>().expect("create tables");
let chain_id = builder.config().chain.inner.chain().id();
let db = builder.db_mut().clone();
// Set database handle for on-demand persistence
set_spot_metadata_db(db.clone());
// Load spot metadata from database and initialize cache
spot_meta_init::load_spot_metadata_cache(&db, chain_id);
builder
})
.launch()

View File

@ -2,7 +2,7 @@ use crate::{
chainspec::{HlChainSpec, parser::HlChainSpecParser},
node::{
HlNode, consensus::HlConsensus, evm::config::HlEvmConfig, migrate::Migrator,
storage::tables::Tables,
spot_meta::init as spot_meta_init, storage::tables::Tables,
},
pseudo_peer::BlockSourceArgs,
};
@ -201,7 +201,12 @@ where
let data_dir = env.datadir.clone().resolve_datadir(env.chain.chain());
let db_path = data_dir.db();
init_db(db_path.clone(), env.db.database_args())?;
init_db_for::<_, Tables>(db_path, env.db.database_args())?;
init_db_for::<_, Tables>(db_path.clone(), env.db.database_args())?;
// Initialize spot metadata in database
let chain_id = env.chain.chain().id();
spot_meta_init::init_spot_metadata(db_path, env.db.database_args(), chain_id)?;
Ok(())
}

View File

@ -1,4 +1,8 @@
use crate::{hardforks::HlHardforks, node::{primitives::HlHeader, HlNode}, HlBlock, HlBlockBody, HlPrimitives};
use crate::{
HlBlock, HlBlockBody, HlPrimitives,
hardforks::HlHardforks,
node::{HlNode, primitives::HlHeader},
};
use reth::{
api::{FullNodeTypes, NodeTypes},
beacon_consensus::EthBeaconConsensus,

View File

@ -1,5 +1,6 @@
use crate::{
node::evm::config::{HlBlockExecutorFactory, HlEvmConfig}, HlBlock, HlHeader
HlBlock, HlHeader,
node::evm::config::{HlBlockExecutorFactory, HlEvmConfig},
};
use reth_evm::{
block::BlockExecutionError,

View File

@ -270,8 +270,8 @@ impl<'a, N: HlNodeType> MigrateStaticFiles<'a, N> {
let mut first = true;
for (block_range, _tx_ranges) in all_static_files {
let migration_needed = self.using_old_header(block_range.start())?
|| self.using_old_header(block_range.end())?;
let migration_needed = self.using_old_header(block_range.start())? ||
self.using_old_header(block_range.end())?;
if !migration_needed {
// Create a placeholder symlink
self.create_placeholder(block_range)?;

View File

@ -179,7 +179,7 @@ where
#[cfg(test)]
mod tests {
use crate::{chainspec::hl::hl_mainnet, HlHeader};
use crate::{HlHeader, chainspec::hl::hl_mainnet};
use super::*;
use alloy_primitives::{B256, U128};

View File

@ -3,8 +3,13 @@ use alloy_primitives::Address;
use reth_primitives_traits::{BlockBody as BlockBodyTrait, InMemorySize};
use serde::{Deserialize, Serialize};
use crate::node::types::{ReadPrecompileCall, ReadPrecompileCalls};
use crate::{HlHeader, node::primitives::TransactionSigned};
use crate::{
HlHeader,
node::{
primitives::TransactionSigned,
types::{ReadPrecompileCall, ReadPrecompileCalls},
},
};
/// Block body for HL. It is equivalent to Ethereum [`BlockBody`] but additionally stores sidecars
/// for blob transactions.
@ -33,13 +38,11 @@ pub type BlockBody = alloy_consensus::BlockBody<TransactionSigned, HlHeader>;
impl InMemorySize for HlBlockBody {
fn size(&self) -> usize {
self.inner.size()
+ self
.sidecars
self.inner.size() +
self.sidecars
.as_ref()
.map_or(0, |s| s.capacity() * core::mem::size_of::<BlobTransactionSidecar>())
+ self
.read_precompile_calls
.map_or(0, |s| s.capacity() * core::mem::size_of::<BlobTransactionSidecar>()) +
self.read_precompile_calls
.as_ref()
.map_or(0, |s| s.0.capacity() * core::mem::size_of::<ReadPrecompileCall>())
}

View File

@ -45,7 +45,11 @@ pub struct HlHeaderExtras {
}
impl HlHeader {
pub(crate) fn from_ethereum_header(header: Header, receipts: &[EthereumReceipt], system_tx_count: u64) -> HlHeader {
pub(crate) fn from_ethereum_header(
header: Header,
receipts: &[EthereumReceipt],
system_tx_count: u64,
) -> HlHeader {
let logs_bloom = logs_bloom(receipts.iter().flat_map(|r| &r.logs));
HlHeader {
inner: header,
@ -183,8 +187,9 @@ impl reth_codecs::Compact for HlHeader {
// because Compact trait requires the Bytes field to be placed at the end of the struct.
// Bytes::from_compact just reads all trailing data as the Bytes field.
//
// Hence we need to use other form of serialization, since extra headers are not Compact-compatible.
// We just treat all header fields as rmp-serialized one `Bytes` field.
// Hence we need to use other form of serialization, since extra headers are not
// Compact-compatible. We just treat all header fields as rmp-serialized one `Bytes`
// field.
let result: Bytes = rmp_serde::to_vec(&self).unwrap().into();
result.to_compact(buf)
}

View File

@ -1,6 +1,6 @@
#![allow(clippy::owned_cow)]
use super::{HlBlock, HlBlockBody, TransactionSigned};
use crate::{node::types::ReadPrecompileCalls, HlHeader};
use crate::{HlHeader, node::types::ReadPrecompileCalls};
use alloy_consensus::{BlobTransactionSidecar, BlockBody};
use alloy_eips::eip4895::Withdrawals;
use alloy_primitives::Address;

View File

@ -6,7 +6,10 @@ use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use super::{HlBlock, HlBlockBody};
use crate::{node::{primitives::BlockBody, types::ReadPrecompileCalls}, HlHeader};
use crate::{
HlHeader,
node::{primitives::BlockBody, types::ReadPrecompileCalls},
};
#[derive(Debug, Serialize, Deserialize)]
pub struct HlBlockBodyBincode<'a> {

103
src/node/spot_meta/init.rs Normal file
View File

@ -0,0 +1,103 @@
use crate::node::{
spot_meta::{SpotId, erc20_contract_to_spot_token},
storage::tables::{self, SPOT_METADATA_KEY},
types::reth_compat,
};
use alloy_primitives::Address;
use reth_db::{
DatabaseEnv,
cursor::DbCursorRO,
};
use reth_db_api::{
Database,
transaction::DbTx,
};
use std::{collections::BTreeMap, sync::Arc};
use tracing::info;
/// Load spot metadata from database and initialize cache
pub fn load_spot_metadata_cache(db: &Arc<DatabaseEnv>, chain_id: u64) {
// Try to read from database
let data = match db.view(|tx| -> Result<Option<Vec<u8>>, reth_db::DatabaseError> {
let mut cursor = tx.cursor_read::<tables::SpotMetadata>()?;
Ok(cursor.seek_exact(SPOT_METADATA_KEY)?.map(|(_, data)| data.to_vec()))
}) {
Ok(Ok(data)) => data,
Ok(Err(e)) => {
info!(
"Failed to read spot metadata from database: {}. Will fetch on-demand from API.",
e
);
return;
}
Err(e) => {
info!(
"Database view error while loading spot metadata: {}. Will fetch on-demand from API.",
e
);
return;
}
};
// Check if data exists
let Some(data) = data else {
info!(
"No spot metadata found in database for chain {}. Run 'init-state' to populate, or it will be fetched on-demand from API.",
chain_id
);
return;
};
// Deserialize metadata
let serializable_map = match rmp_serde::from_slice::<BTreeMap<Address, u64>>(&data) {
Ok(map) => map,
Err(e) => {
info!("Failed to deserialize spot metadata: {}. Will fetch on-demand from API.", e);
return;
}
};
// Convert and initialize cache
let metadata: BTreeMap<Address, SpotId> =
serializable_map.into_iter().map(|(addr, index)| (addr, SpotId { index })).collect();
info!("Loaded spot metadata from database ({} entries)", metadata.len());
reth_compat::initialize_spot_metadata_cache(metadata);
}
/// Initialize spot metadata in database from API
pub fn init_spot_metadata(
db_path: impl AsRef<std::path::Path>,
db_args: reth_db::mdbx::DatabaseArguments,
chain_id: u64,
) -> eyre::Result<()> {
info!("Initializing spot metadata for chain {}", chain_id);
let db = Arc::new(reth_db::open_db(db_path.as_ref(), db_args)?);
// Check if spot metadata already exists
let exists = db.view(|tx| -> Result<bool, reth_db::DatabaseError> {
let mut cursor = tx.cursor_read::<tables::SpotMetadata>()?;
Ok(cursor.seek_exact(SPOT_METADATA_KEY)?.is_some())
})??;
if exists {
info!("Spot metadata already exists in database");
return Ok(());
}
// Fetch from API
let metadata = match erc20_contract_to_spot_token(chain_id) {
Ok(m) => m,
Err(e) => {
info!("Failed to fetch spot metadata from API: {}. Will be fetched on-demand.", e);
return Ok(());
}
};
// Store to database
reth_compat::store_spot_metadata(&db, &metadata)?;
info!("Successfully fetched and stored spot metadata for chain {}", chain_id);
Ok(())
}

View File

@ -5,6 +5,7 @@ use std::collections::BTreeMap;
use crate::chainspec::{MAINNET_CHAIN_ID, TESTNET_CHAIN_ID};
pub mod init;
mod patch;
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -25,7 +26,7 @@ pub struct SpotMeta {
}
#[derive(Debug, Clone)]
pub(crate) struct SpotId {
pub struct SpotId {
pub index: u64,
}

View File

@ -2,10 +2,21 @@ use alloy_primitives::{BlockNumber, Bytes};
use reth_db::{TableSet, TableType, TableViewer, table::TableInfo, tables};
use std::fmt;
/// Static key used for spot metadata, as the database is unique to each chain.
/// This may later serve as a versioning key to assist with future database migrations.
pub const SPOT_METADATA_KEY: u64 = 0;
tables! {
/// Read precompile calls for each block.
table BlockReadPrecompileCalls {
type Key = BlockNumber;
type Value = Bytes;
}
/// Spot metadata mapping (EVM address to spot token index).
/// Uses a constant key since the database is chain-specific.
table SpotMetadata {
type Key = u64;
type Value = Bytes;
}
}

View File

@ -19,6 +19,9 @@ pub struct ReadPrecompileCalls(pub Vec<ReadPrecompileCall>);
pub(crate) mod reth_compat;
// Re-export spot metadata functions
pub use reth_compat::{initialize_spot_metadata_cache, set_spot_metadata_db};
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct HlExtras {
pub read_precompile_calls: Option<ReadPrecompileCalls>,

View File

@ -1,11 +1,14 @@
//! Copy of reth codebase to preserve serialization compatibility
use crate::node::storage::tables::{SPOT_METADATA_KEY, SpotMetadata};
use alloy_consensus::{Header, Signed, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy};
use alloy_primitives::{Address, BlockHash, Signature, TxKind, U256};
use alloy_primitives::{Address, BlockHash, Bytes, Signature, TxKind, U256};
use reth_db::{DatabaseEnv, DatabaseError, cursor::DbCursorRW};
use reth_db_api::{Database, transaction::DbTxMut};
use reth_primitives::TransactionSigned as RethTxSigned;
use serde::{Deserialize, Serialize};
use std::{
collections::BTreeMap,
sync::{Arc, LazyLock, RwLock},
sync::{Arc, LazyLock, Mutex, RwLock},
};
use tracing::info;
@ -81,33 +84,81 @@ pub struct SealedBlock {
pub body: BlockBody,
}
fn system_tx_to_reth_transaction(transaction: &SystemTx, chain_id: u64) -> TxSigned {
static EVM_MAP: LazyLock<Arc<RwLock<BTreeMap<Address, SpotId>>>> =
LazyLock::new(|| Arc::new(RwLock::new(BTreeMap::new())));
{
let Transaction::Legacy(tx) = &transaction.tx else {
panic!("Unexpected transaction type");
};
let TxKind::Call(to) = tx.to else {
panic!("Unexpected contract creation");
};
let s = if tx.input.is_empty() {
U256::from(0x1)
} else {
loop {
if let Some(spot) = EVM_MAP.read().unwrap().get(&to) {
break spot.to_s();
}
static SPOT_EVM_MAP: LazyLock<Arc<RwLock<BTreeMap<Address, SpotId>>>> =
LazyLock::new(|| Arc::new(RwLock::new(BTreeMap::new())));
info!("Contract not found: {to:?} from spot mapping, fetching again...");
*EVM_MAP.write().unwrap() = erc20_contract_to_spot_token(chain_id).unwrap();
}
};
let signature = Signature::new(U256::from(0x1), s, true);
TxSigned::Default(RethTxSigned::Legacy(Signed::new_unhashed(tx.clone(), signature)))
// Optional database handle for persisting on-demand fetches
static DB_HANDLE: LazyLock<Mutex<Option<Arc<DatabaseEnv>>>> = LazyLock::new(|| Mutex::new(None));
/// Set the database handle for persisting spot metadata
pub fn set_spot_metadata_db(db: Arc<DatabaseEnv>) {
*DB_HANDLE.lock().unwrap() = Some(db);
}
/// Initialize the spot metadata cache with data loaded from database.
/// This should be called during node initialization.
pub fn initialize_spot_metadata_cache(metadata: BTreeMap<Address, SpotId>) {
*SPOT_EVM_MAP.write().unwrap() = metadata;
}
/// Helper function to serialize and store spot metadata to database
pub fn store_spot_metadata(
db: &Arc<DatabaseEnv>,
metadata: &BTreeMap<Address, SpotId>,
) -> Result<(), DatabaseError> {
db.update(|tx| {
let mut cursor = tx.cursor_write::<SpotMetadata>()?;
// Serialize to BTreeMap<Address, u64>
let serializable_map: BTreeMap<Address, u64> =
metadata.iter().map(|(addr, spot)| (*addr, spot.index)).collect();
cursor.upsert(
SPOT_METADATA_KEY,
&Bytes::from(
rmp_serde::to_vec(&serializable_map).expect("Failed to serialize spot metadata"),
),
)?;
Ok(())
})?
}
/// Persist spot metadata to database if handle is available
fn persist_spot_metadata_to_db(metadata: &BTreeMap<Address, SpotId>) {
if let Some(db) = DB_HANDLE.lock().unwrap().as_ref() {
match store_spot_metadata(db, metadata) {
Ok(_) => info!("Persisted spot metadata to database"),
Err(e) => info!("Failed to persist spot metadata to database: {}", e),
}
}
}
fn system_tx_to_reth_transaction(transaction: &SystemTx, chain_id: u64) -> TxSigned {
let Transaction::Legacy(tx) = &transaction.tx else {
panic!("Unexpected transaction type");
};
let TxKind::Call(to) = tx.to else {
panic!("Unexpected contract creation");
};
let s = if tx.input.is_empty() {
U256::from(0x1)
} else {
loop {
if let Some(spot) = SPOT_EVM_MAP.read().unwrap().get(&to) {
break spot.to_s();
}
// Cache miss - fetch from API, update cache, and persist to database
info!("Contract not found: {to:?} from spot mapping, fetching from API...");
let metadata = erc20_contract_to_spot_token(chain_id).unwrap();
*SPOT_EVM_MAP.write().unwrap() = metadata.clone();
persist_spot_metadata_to_db(&metadata);
}
};
let signature = Signature::new(U256::from(0x1), s, true);
TxSigned::Default(RethTxSigned::Legacy(Signed::new_unhashed(tx.clone(), signature)))
}
impl SealedBlock {
pub fn to_reth_block(
&self,

View File

@ -82,8 +82,8 @@ impl BlockPoller {
.ok_or(eyre::eyre!("Failed to find latest block number"))?;
loop {
if let Some(debug_cutoff_height) = debug_cutoff_height
&& next_block_number > debug_cutoff_height
if let Some(debug_cutoff_height) = debug_cutoff_height &&
next_block_number > debug_cutoff_height
{
next_block_number = debug_cutoff_height;
}