mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
feat(cli): debug replay engine (#5972)
This commit is contained in:
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
use clap::Args;
|
use clap::Args;
|
||||||
use reth_primitives::{TxHash, B256};
|
use reth_primitives::{TxHash, B256};
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
/// Parameters for debugging purposes
|
/// Parameters for debugging purposes
|
||||||
#[derive(Debug, Args, PartialEq, Default)]
|
#[derive(Debug, Args, PartialEq, Default)]
|
||||||
@ -57,6 +58,12 @@ pub struct DebugArgs {
|
|||||||
conflicts_with = "hook_transaction"
|
conflicts_with = "hook_transaction"
|
||||||
)]
|
)]
|
||||||
pub hook_all: bool,
|
pub hook_all: bool,
|
||||||
|
|
||||||
|
/// The path to store engine API messages at.
|
||||||
|
/// If specified, all of the intercepted engine API messages
|
||||||
|
/// will be written to specified location.
|
||||||
|
#[arg(long = "debug.engine-api-store", help_heading = "Debug", value_name = "PATH")]
|
||||||
|
pub engine_api_store: Option<PathBuf>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@ -11,7 +11,10 @@ use crate::{
|
|||||||
db_type::{DatabaseBuilder, DatabaseInstance},
|
db_type::{DatabaseBuilder, DatabaseInstance},
|
||||||
ext::{RethCliExt, RethNodeCommandConfig},
|
ext::{RethCliExt, RethNodeCommandConfig},
|
||||||
},
|
},
|
||||||
commands::node::{cl_events::ConsensusLayerHealthEvents, events},
|
commands::{
|
||||||
|
debug_cmd::EngineApiStore,
|
||||||
|
node::{cl_events::ConsensusLayerHealthEvents, events},
|
||||||
|
},
|
||||||
dirs::{ChainPath, DataDirPath, MaybePlatformPath},
|
dirs::{ChainPath, DataDirPath, MaybePlatformPath},
|
||||||
init::init_genesis,
|
init::init_genesis,
|
||||||
prometheus_exporter,
|
prometheus_exporter,
|
||||||
@ -1109,7 +1112,16 @@ impl<DB: Database + DatabaseMetrics + DatabaseMetadata + 'static> NodeBuilderWit
|
|||||||
let payload_builder =
|
let payload_builder =
|
||||||
ext.spawn_payload_builder_service(&self.config.builder, &components)?;
|
ext.spawn_payload_builder_service(&self.config.builder, &components)?;
|
||||||
|
|
||||||
let (consensus_engine_tx, consensus_engine_rx) = unbounded_channel();
|
let (consensus_engine_tx, mut consensus_engine_rx) = unbounded_channel();
|
||||||
|
if let Some(store_path) = self.config.debug.engine_api_store.clone() {
|
||||||
|
let (engine_intercept_tx, engine_intercept_rx) = unbounded_channel();
|
||||||
|
let engine_api_store = EngineApiStore::new(store_path);
|
||||||
|
executor.spawn_critical(
|
||||||
|
"engine api interceptor",
|
||||||
|
engine_api_store.intercept(consensus_engine_rx, engine_intercept_tx),
|
||||||
|
);
|
||||||
|
consensus_engine_rx = engine_intercept_rx;
|
||||||
|
};
|
||||||
let max_block = self.config.max_block(&network_client, provider_factory.clone()).await?;
|
let max_block = self.config.max_block(&network_client, provider_factory.clone()).await?;
|
||||||
|
|
||||||
// Configure the pipeline
|
// Configure the pipeline
|
||||||
|
|||||||
@ -9,6 +9,9 @@ mod execution;
|
|||||||
mod in_memory_merkle;
|
mod in_memory_merkle;
|
||||||
mod merkle;
|
mod merkle;
|
||||||
|
|
||||||
|
mod replay_engine;
|
||||||
|
pub(crate) use replay_engine::EngineApiStore;
|
||||||
|
|
||||||
/// `reth debug` command
|
/// `reth debug` command
|
||||||
#[derive(Debug, Parser)]
|
#[derive(Debug, Parser)]
|
||||||
pub struct Command {
|
pub struct Command {
|
||||||
@ -27,6 +30,8 @@ pub enum Subcommands {
|
|||||||
InMemoryMerkle(in_memory_merkle::Command),
|
InMemoryMerkle(in_memory_merkle::Command),
|
||||||
/// Debug block building.
|
/// Debug block building.
|
||||||
BuildBlock(build_block::Command),
|
BuildBlock(build_block::Command),
|
||||||
|
/// Debug engine API by replaying stored messages.
|
||||||
|
ReplayEngine(replay_engine::Command),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Command {
|
impl Command {
|
||||||
@ -37,6 +42,7 @@ impl Command {
|
|||||||
Subcommands::Merkle(command) => command.execute(ctx).await,
|
Subcommands::Merkle(command) => command.execute(ctx).await,
|
||||||
Subcommands::InMemoryMerkle(command) => command.execute(ctx).await,
|
Subcommands::InMemoryMerkle(command) => command.execute(ctx).await,
|
||||||
Subcommands::BuildBlock(command) => command.execute(ctx).await,
|
Subcommands::BuildBlock(command) => command.execute(ctx).await,
|
||||||
|
Subcommands::ReplayEngine(command) => command.execute(ctx).await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
325
bin/reth/src/commands/debug_cmd/replay_engine.rs
Normal file
325
bin/reth/src/commands/debug_cmd/replay_engine.rs
Normal file
@ -0,0 +1,325 @@
|
|||||||
|
use crate::{
|
||||||
|
args::{
|
||||||
|
get_secret_key,
|
||||||
|
utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS},
|
||||||
|
DatabaseArgs, NetworkArgs,
|
||||||
|
},
|
||||||
|
dirs::{DataDirPath, MaybePlatformPath},
|
||||||
|
runner::CliContext,
|
||||||
|
};
|
||||||
|
use clap::Parser;
|
||||||
|
use eyre::Context;
|
||||||
|
use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig};
|
||||||
|
use reth_beacon_consensus::{
|
||||||
|
hooks::EngineHooks, BeaconConsensus, BeaconConsensusEngine, BeaconEngineMessage,
|
||||||
|
};
|
||||||
|
use reth_blockchain_tree::{
|
||||||
|
BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals,
|
||||||
|
};
|
||||||
|
use reth_config::Config;
|
||||||
|
use reth_db::{init_db, DatabaseEnv};
|
||||||
|
use reth_interfaces::consensus::Consensus;
|
||||||
|
use reth_network::NetworkHandle;
|
||||||
|
use reth_network_api::NetworkInfo;
|
||||||
|
use reth_payload_builder::PayloadBuilderService;
|
||||||
|
use reth_primitives::{
|
||||||
|
fs::{self},
|
||||||
|
ChainSpec,
|
||||||
|
};
|
||||||
|
use reth_provider::{providers::BlockchainProvider, ProviderFactory};
|
||||||
|
use reth_revm::EvmProcessorFactory;
|
||||||
|
use reth_rpc_types::{
|
||||||
|
engine::{CancunPayloadFields, ForkchoiceState, PayloadAttributes},
|
||||||
|
ExecutionPayload,
|
||||||
|
};
|
||||||
|
use reth_stages::Pipeline;
|
||||||
|
use reth_tasks::TaskExecutor;
|
||||||
|
use reth_transaction_pool::noop::NoopTransactionPool;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::{
|
||||||
|
collections::BTreeMap,
|
||||||
|
net::{SocketAddr, SocketAddrV4},
|
||||||
|
path::PathBuf,
|
||||||
|
sync::Arc,
|
||||||
|
time::{Duration, SystemTime},
|
||||||
|
};
|
||||||
|
use tokio::sync::{
|
||||||
|
mpsc::{self, UnboundedReceiver, UnboundedSender},
|
||||||
|
oneshot,
|
||||||
|
};
|
||||||
|
use tracing::*;
|
||||||
|
|
||||||
|
/// `reth debug replay-engine` command
|
||||||
|
/// This script will read stored engine API messages and replay them by the timestamp.
|
||||||
|
/// It does not require
|
||||||
|
#[derive(Debug, Parser)]
|
||||||
|
pub struct Command {
|
||||||
|
/// The path to the data dir for all reth files and subdirectories.
|
||||||
|
///
|
||||||
|
/// Defaults to the OS-specific data directory:
|
||||||
|
///
|
||||||
|
/// - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/`
|
||||||
|
/// - Windows: `{FOLDERID_RoamingAppData}/reth/`
|
||||||
|
/// - macOS: `$HOME/Library/Application Support/reth/`
|
||||||
|
#[arg(long, value_name = "DATA_DIR", verbatim_doc_comment, default_value_t)]
|
||||||
|
datadir: MaybePlatformPath<DataDirPath>,
|
||||||
|
|
||||||
|
/// The chain this node is running.
|
||||||
|
///
|
||||||
|
/// Possible values are either a built-in chain or the path to a chain specification file.
|
||||||
|
#[arg(
|
||||||
|
long,
|
||||||
|
value_name = "CHAIN_OR_PATH",
|
||||||
|
long_help = chain_help(),
|
||||||
|
default_value = SUPPORTED_CHAINS[0],
|
||||||
|
value_parser = genesis_value_parser
|
||||||
|
)]
|
||||||
|
chain: Arc<ChainSpec>,
|
||||||
|
|
||||||
|
#[clap(flatten)]
|
||||||
|
db: DatabaseArgs,
|
||||||
|
|
||||||
|
#[clap(flatten)]
|
||||||
|
network: NetworkArgs,
|
||||||
|
|
||||||
|
/// The path to read engine API messages from.
|
||||||
|
#[arg(long = "engine-api-store", value_name = "PATH")]
|
||||||
|
engine_api_store: PathBuf,
|
||||||
|
|
||||||
|
/// The number of milliseconds between Engine API messages.
|
||||||
|
#[arg(long = "interval", default_value_t = 1_000)]
|
||||||
|
interval: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Command {
|
||||||
|
async fn build_network(
|
||||||
|
&self,
|
||||||
|
config: &Config,
|
||||||
|
task_executor: TaskExecutor,
|
||||||
|
db: Arc<DatabaseEnv>,
|
||||||
|
network_secret_path: PathBuf,
|
||||||
|
default_peers_path: PathBuf,
|
||||||
|
) -> eyre::Result<NetworkHandle> {
|
||||||
|
let secret_key = get_secret_key(&network_secret_path)?;
|
||||||
|
let network = self
|
||||||
|
.network
|
||||||
|
.network_config(config, self.chain.clone(), secret_key, default_peers_path)
|
||||||
|
.with_task_executor(Box::new(task_executor))
|
||||||
|
.listener_addr(SocketAddr::V4(SocketAddrV4::new(self.network.addr, self.network.port)))
|
||||||
|
.discovery_addr(SocketAddr::V4(SocketAddrV4::new(
|
||||||
|
self.network.discovery.addr,
|
||||||
|
self.network.discovery.port,
|
||||||
|
)))
|
||||||
|
.build(ProviderFactory::new(db, self.chain.clone()))
|
||||||
|
.start_network()
|
||||||
|
.await?;
|
||||||
|
info!(target: "reth::cli", peer_id = %network.peer_id(), local_addr = %network.local_addr(), "Connected to P2P network");
|
||||||
|
debug!(target: "reth::cli", peer_id = ?network.peer_id(), "Full peer ID");
|
||||||
|
Ok(network)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Execute `debug replay-engine` command
|
||||||
|
pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> {
|
||||||
|
let config = Config::default();
|
||||||
|
|
||||||
|
// Add network name to data dir
|
||||||
|
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||||
|
let db_path = data_dir.db_path();
|
||||||
|
fs::create_dir_all(&db_path)?;
|
||||||
|
|
||||||
|
// Initialize the database
|
||||||
|
let db = Arc::new(init_db(db_path, self.db.log_level)?);
|
||||||
|
let provider_factory = ProviderFactory::new(db.clone(), self.chain.clone());
|
||||||
|
|
||||||
|
let consensus: Arc<dyn Consensus> = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain)));
|
||||||
|
|
||||||
|
// Configure blockchain tree
|
||||||
|
let tree_externals = TreeExternals::new(
|
||||||
|
provider_factory.clone(),
|
||||||
|
Arc::clone(&consensus),
|
||||||
|
EvmProcessorFactory::new(self.chain.clone()),
|
||||||
|
);
|
||||||
|
let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default(), None)?;
|
||||||
|
let blockchain_tree = ShareableBlockchainTree::new(tree);
|
||||||
|
|
||||||
|
// Set up the blockchain provider
|
||||||
|
let blockchain_db =
|
||||||
|
BlockchainProvider::new(provider_factory.clone(), blockchain_tree.clone())?;
|
||||||
|
|
||||||
|
// Set up network
|
||||||
|
let network_secret_path =
|
||||||
|
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path());
|
||||||
|
let network = self
|
||||||
|
.build_network(
|
||||||
|
&config,
|
||||||
|
ctx.task_executor.clone(),
|
||||||
|
db.clone(),
|
||||||
|
network_secret_path,
|
||||||
|
data_dir.known_peers_path(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// Set up payload builder
|
||||||
|
#[cfg(not(feature = "optimism"))]
|
||||||
|
let payload_builder = reth_ethereum_payload_builder::EthereumPayloadBuilder::default();
|
||||||
|
|
||||||
|
// Optimism's payload builder is implemented on the OptimismPayloadBuilder type.
|
||||||
|
#[cfg(feature = "optimism")]
|
||||||
|
let payload_builder = reth_optimism_payload_builder::OptimismPayloadBuilder::default();
|
||||||
|
|
||||||
|
let payload_generator = BasicPayloadJobGenerator::with_builder(
|
||||||
|
blockchain_db.clone(),
|
||||||
|
NoopTransactionPool::default(),
|
||||||
|
ctx.task_executor.clone(),
|
||||||
|
BasicPayloadJobGeneratorConfig::default(),
|
||||||
|
self.chain.clone(),
|
||||||
|
payload_builder,
|
||||||
|
);
|
||||||
|
let (payload_service, payload_builder) = PayloadBuilderService::new(payload_generator);
|
||||||
|
ctx.task_executor.spawn_critical("payload builder service", Box::pin(payload_service));
|
||||||
|
|
||||||
|
// Configure the consensus engine
|
||||||
|
let network_client = network.fetch_client().await?;
|
||||||
|
let (consensus_engine_tx, consensus_engine_rx) = mpsc::unbounded_channel();
|
||||||
|
let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::with_channel(
|
||||||
|
network_client,
|
||||||
|
Pipeline::builder().build(provider_factory),
|
||||||
|
blockchain_db.clone(),
|
||||||
|
Box::new(ctx.task_executor.clone()),
|
||||||
|
Box::new(network),
|
||||||
|
None,
|
||||||
|
false,
|
||||||
|
payload_builder,
|
||||||
|
None,
|
||||||
|
u64::MAX,
|
||||||
|
consensus_engine_tx,
|
||||||
|
consensus_engine_rx,
|
||||||
|
EngineHooks::new(),
|
||||||
|
)?;
|
||||||
|
info!(target: "reth::cli", "Consensus engine initialized");
|
||||||
|
|
||||||
|
// Run consensus engine to completion
|
||||||
|
let (tx, rx) = oneshot::channel();
|
||||||
|
info!(target: "reth::cli", "Starting consensus engine");
|
||||||
|
ctx.task_executor.spawn_critical_blocking("consensus engine", async move {
|
||||||
|
let res = beacon_consensus_engine.await;
|
||||||
|
let _ = tx.send(res);
|
||||||
|
});
|
||||||
|
|
||||||
|
let engine_api_store = EngineApiStore::new(self.engine_api_store.clone());
|
||||||
|
for filepath in engine_api_store.engine_messages_iter()? {
|
||||||
|
let contents =
|
||||||
|
fs::read(&filepath).wrap_err(format!("failed to read: {}", filepath.display()))?;
|
||||||
|
let message = serde_json::from_slice(&contents)
|
||||||
|
.wrap_err(format!("failed to parse: {}", filepath.display()))?;
|
||||||
|
debug!(target: "reth::cli", filepath = %filepath.display(), ?message, "Forwarding Engine API message");
|
||||||
|
match message {
|
||||||
|
StoredEngineApiMessage::ForkchoiceUpdated { state, payload_attrs } => {
|
||||||
|
let response =
|
||||||
|
beacon_engine_handle.fork_choice_updated(state, payload_attrs).await?;
|
||||||
|
debug!(target: "reth::cli", ?response, "Received for forkchoice updated");
|
||||||
|
}
|
||||||
|
StoredEngineApiMessage::NewPayload { payload, cancun_fields } => {
|
||||||
|
let response = beacon_engine_handle.new_payload(payload, cancun_fields).await?;
|
||||||
|
debug!(target: "reth::cli", ?response, "Received for new payload");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Pause before next message
|
||||||
|
tokio::time::sleep(Duration::from_millis(self.interval)).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
info!(target: "reth::cli", "Finished replaying engine API messages");
|
||||||
|
|
||||||
|
match rx.await? {
|
||||||
|
Ok(()) => info!("Beacon consensus engine exited successfully"),
|
||||||
|
Err(error) => {
|
||||||
|
error!(target: "reth::cli", %error, "Beacon consensus engine exited with an error")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "camelCase")]
|
||||||
|
enum StoredEngineApiMessage {
|
||||||
|
ForkchoiceUpdated { state: ForkchoiceState, payload_attrs: Option<PayloadAttributes> },
|
||||||
|
NewPayload { payload: ExecutionPayload, cancun_fields: Option<CancunPayloadFields> },
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub(crate) struct EngineApiStore {
|
||||||
|
path: PathBuf,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EngineApiStore {
|
||||||
|
pub(crate) fn new(path: PathBuf) -> Self {
|
||||||
|
Self { path }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_message(&self, msg: &BeaconEngineMessage, received_at: SystemTime) -> eyre::Result<()> {
|
||||||
|
fs::create_dir_all(&self.path)?; // ensure that store path had been created
|
||||||
|
let timestamp = received_at.duration_since(SystemTime::UNIX_EPOCH).unwrap().as_millis();
|
||||||
|
match msg {
|
||||||
|
BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx: _tx } => {
|
||||||
|
let filename = format!("{}-fcu-{}.json", timestamp, state.head_block_hash);
|
||||||
|
fs::write(
|
||||||
|
self.path.join(filename),
|
||||||
|
serde_json::to_vec(&StoredEngineApiMessage::ForkchoiceUpdated {
|
||||||
|
state: *state,
|
||||||
|
payload_attrs: payload_attrs.clone(),
|
||||||
|
})?,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
BeaconEngineMessage::NewPayload { payload, cancun_fields, tx: _tx } => {
|
||||||
|
let filename = format!("{}-new_payload-{}.json", timestamp, payload.block_hash());
|
||||||
|
fs::write(
|
||||||
|
self.path.join(filename),
|
||||||
|
serde_json::to_vec(&StoredEngineApiMessage::NewPayload {
|
||||||
|
payload: payload.clone(),
|
||||||
|
cancun_fields: cancun_fields.clone(),
|
||||||
|
})?,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
// noop
|
||||||
|
BeaconEngineMessage::TransitionConfigurationExchanged |
|
||||||
|
BeaconEngineMessage::EventListener(_) => (),
|
||||||
|
};
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn engine_messages_iter(&self) -> eyre::Result<impl Iterator<Item = PathBuf>> {
|
||||||
|
let mut filenames_by_ts = BTreeMap::<u64, Vec<PathBuf>>::default();
|
||||||
|
for entry in fs::read_dir(&self.path)? {
|
||||||
|
let entry = entry?;
|
||||||
|
let filename = entry.file_name();
|
||||||
|
if let Some(filename) = filename.to_str().filter(|n| n.ends_with(".json")) {
|
||||||
|
if let Some(Ok(timestamp)) = filename.split('-').next().map(|n| n.parse::<u64>()) {
|
||||||
|
filenames_by_ts.entry(timestamp).or_default().push(entry.path());
|
||||||
|
tracing::debug!(target: "engine::store", timestamp, filename, "Queued engine API message");
|
||||||
|
} else {
|
||||||
|
tracing::warn!(target: "engine::store", %filename, "Could not parse timestamp from filename")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tracing::warn!(target: "engine::store", ?filename, "Skipping non json file");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(filenames_by_ts.into_iter().flat_map(|(_, paths)| paths))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn intercept(
|
||||||
|
self,
|
||||||
|
mut rx: UnboundedReceiver<BeaconEngineMessage>,
|
||||||
|
to_engine: UnboundedSender<BeaconEngineMessage>,
|
||||||
|
) {
|
||||||
|
loop {
|
||||||
|
let Some(msg) = rx.recv().await else { break };
|
||||||
|
if let Err(error) = self.on_message(&msg, SystemTime::now()) {
|
||||||
|
error!(target: "engine::intercept", ?msg, %error, "Error handling Engine API message");
|
||||||
|
}
|
||||||
|
let _ = to_engine.send(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -8,7 +8,7 @@ use alloy_primitives::B256;
|
|||||||
///
|
///
|
||||||
/// See also:
|
/// See also:
|
||||||
/// <https://github.com/ethereum/execution-apis/blob/fe8e13c288c592ec154ce25c534e26cb7ce0530d/src/engine/cancun.md#request>
|
/// <https://github.com/ethereum/execution-apis/blob/fe8e13c288c592ec154ce25c534e26cb7ce0530d/src/engine/cancun.md#request>
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, Default)]
|
#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, serde::Serialize, serde::Deserialize)]
|
||||||
pub struct CancunPayloadFields {
|
pub struct CancunPayloadFields {
|
||||||
/// The parent beacon block root.
|
/// The parent beacon block root.
|
||||||
pub parent_beacon_block_root: B256,
|
pub parent_beacon_block_root: B256,
|
||||||
|
|||||||
Reference in New Issue
Block a user