mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
Compare commits
1 Commits
nb-2025101
...
860fa666e3
| Author | SHA1 | Date | |
|---|---|---|---|
| 860fa666e3 |
@ -19,23 +19,62 @@ use alloy_rpc_types::{
|
|||||||
TransactionInfo,
|
TransactionInfo,
|
||||||
pubsub::{Params, SubscriptionKind},
|
pubsub::{Params, SubscriptionKind},
|
||||||
};
|
};
|
||||||
use jsonrpsee::{PendingSubscriptionSink, proc_macros::rpc};
|
use jsonrpsee::{PendingSubscriptionSink, SubscriptionMessage, SubscriptionSink, proc_macros::rpc};
|
||||||
use jsonrpsee_core::{RpcResult, async_trait};
|
use jsonrpsee_core::{RpcResult, async_trait};
|
||||||
use jsonrpsee_types::{ErrorObject, error::INTERNAL_ERROR_CODE};
|
use jsonrpsee_types::{ErrorObject, error::INTERNAL_ERROR_CODE};
|
||||||
use reth::{api::FullNodeComponents, builder::rpc::RpcContext, tasks::TaskSpawner};
|
use reth::{api::FullNodeComponents, builder::rpc::RpcContext, tasks::TaskSpawner};
|
||||||
use reth_primitives_traits::SignedTransaction;
|
use reth_primitives_traits::SignedTransaction;
|
||||||
use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, ReceiptProvider};
|
use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, ReceiptProvider};
|
||||||
use reth_rpc::{EthFilter, EthPubSub};
|
use reth_rpc::{EthFilter, EthPubSub, RpcTypes, eth::pubsub::SubscriptionSerializeError};
|
||||||
use reth_rpc_eth_api::{
|
use reth_rpc_eth_api::{
|
||||||
EthApiTypes, EthFilterApiServer, EthPubSubApiServer, RpcBlock, RpcConvert, RpcReceipt,
|
EthApiServer, EthApiTypes, EthFilterApiServer, EthPubSubApiServer, FullEthApiTypes, RpcBlock,
|
||||||
RpcTransaction, helpers::EthBlocks, transaction::ConvertReceiptInput,
|
RpcConvert, RpcHeader, RpcNodeCoreExt, RpcReceipt, RpcTransaction, RpcTxReq,
|
||||||
|
helpers::{EthBlocks, EthTransactions, LoadReceipt},
|
||||||
|
transaction::ConvertReceiptInput,
|
||||||
};
|
};
|
||||||
use reth_rpc_eth_types::EthApiError;
|
use reth_rpc_eth_types::EthApiError;
|
||||||
|
use serde::Serialize;
|
||||||
use std::{marker::PhantomData, sync::Arc};
|
use std::{marker::PhantomData, sync::Arc};
|
||||||
use tokio_stream::StreamExt;
|
use tokio_stream::{Stream, StreamExt};
|
||||||
use tracing::{Instrument, trace};
|
use tracing::{Instrument, trace};
|
||||||
|
|
||||||
use crate::addons::utils::{EthWrapper, new_headers_stream, pipe_from_stream};
|
use crate::{HlBlock, node::primitives::HlPrimitives};
|
||||||
|
|
||||||
|
pub trait EthWrapper:
|
||||||
|
EthApiServer<
|
||||||
|
RpcTxReq<Self::NetworkTypes>,
|
||||||
|
RpcTransaction<Self::NetworkTypes>,
|
||||||
|
RpcBlock<Self::NetworkTypes>,
|
||||||
|
RpcReceipt<Self::NetworkTypes>,
|
||||||
|
RpcHeader<Self::NetworkTypes>,
|
||||||
|
> + FullEthApiTypes<
|
||||||
|
Primitives = HlPrimitives,
|
||||||
|
NetworkTypes: RpcTypes<TransactionResponse = alloy_rpc_types_eth::Transaction>,
|
||||||
|
> + RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
|
||||||
|
+ EthBlocks
|
||||||
|
+ EthTransactions
|
||||||
|
+ LoadReceipt
|
||||||
|
+ 'static
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> EthWrapper for T where
|
||||||
|
T: EthApiServer<
|
||||||
|
RpcTxReq<Self::NetworkTypes>,
|
||||||
|
RpcTransaction<Self::NetworkTypes>,
|
||||||
|
RpcBlock<Self::NetworkTypes>,
|
||||||
|
RpcReceipt<Self::NetworkTypes>,
|
||||||
|
RpcHeader<Self::NetworkTypes>,
|
||||||
|
> + FullEthApiTypes<
|
||||||
|
Primitives = HlPrimitives,
|
||||||
|
NetworkTypes: RpcTypes<TransactionResponse = alloy_rpc_types_eth::Transaction>,
|
||||||
|
> + RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
|
||||||
|
+ EthBlocks
|
||||||
|
+ EthTransactions
|
||||||
|
+ LoadReceipt
|
||||||
|
+ 'static
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
#[rpc(server, namespace = "eth")]
|
#[rpc(server, namespace = "eth")]
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
@ -347,8 +386,6 @@ where
|
|||||||
pubsub.log_stream(filter).filter_map(|log| adjust_log::<Eth>(log, &provider)),
|
pubsub.log_stream(filter).filter_map(|log| adjust_log::<Eth>(log, &provider)),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
} else if kind == SubscriptionKind::NewHeads {
|
|
||||||
let _ = pipe_from_stream(sink, new_headers_stream::<Eth>(&provider)).await;
|
|
||||||
} else {
|
} else {
|
||||||
let _ = pubsub.handle_accepted(sink, kind, params).await;
|
let _ = pubsub.handle_accepted(sink, kind, params).await;
|
||||||
}
|
}
|
||||||
@ -375,6 +412,23 @@ fn adjust_log<Eth: EthWrapper>(mut log: Log, provider: &Eth::Provider) -> Option
|
|||||||
Some(log)
|
Some(log)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn pipe_from_stream<T: Serialize, St: Stream<Item = T> + Unpin>(
|
||||||
|
sink: SubscriptionSink,
|
||||||
|
mut stream: St,
|
||||||
|
) -> Result<(), ErrorObject<'static>> {
|
||||||
|
loop {
|
||||||
|
tokio::select! {
|
||||||
|
_ = sink.closed() => break Ok(()),
|
||||||
|
maybe_item = stream.next() => {
|
||||||
|
let Some(item) = maybe_item else { break Ok(()) };
|
||||||
|
let msg = SubscriptionMessage::new(sink.method_name(), sink.subscription_id(), &item)
|
||||||
|
.map_err(SubscriptionSerializeError::from)?;
|
||||||
|
if sink.send(msg).await.is_err() { break Ok(()); }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct HlNodeBlockFilterHttp<Eth: EthWrapper> {
|
pub struct HlNodeBlockFilterHttp<Eth: EthWrapper> {
|
||||||
eth_api: Arc<Eth>,
|
eth_api: Arc<Eth>,
|
||||||
_marker: PhantomData<Eth>,
|
_marker: PhantomData<Eth>,
|
||||||
|
|||||||
@ -1,5 +1,3 @@
|
|||||||
pub mod call_forwarder;
|
pub mod call_forwarder;
|
||||||
pub mod hl_node_compliance;
|
pub mod hl_node_compliance;
|
||||||
pub mod tx_forwarder;
|
pub mod tx_forwarder;
|
||||||
pub mod subscribe_fixup;
|
|
||||||
mod utils;
|
|
||||||
|
|||||||
@ -1,54 +0,0 @@
|
|||||||
use crate::addons::utils::{EthWrapper, new_headers_stream, pipe_from_stream};
|
|
||||||
use alloy_rpc_types::pubsub::{Params, SubscriptionKind};
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use jsonrpsee::PendingSubscriptionSink;
|
|
||||||
use jsonrpsee_types::ErrorObject;
|
|
||||||
use reth::tasks::TaskSpawner;
|
|
||||||
use reth_rpc::EthPubSub;
|
|
||||||
use reth_rpc_convert::RpcTransaction;
|
|
||||||
use reth_rpc_eth_api::{EthApiTypes, EthPubSubApiServer};
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
pub struct SubscribeFixup<Eth: EthWrapper> {
|
|
||||||
pubsub: Arc<EthPubSub<Eth>>,
|
|
||||||
provider: Arc<Eth::Provider>,
|
|
||||||
subscription_task_spawner: Box<dyn TaskSpawner + 'static>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl<Eth: EthWrapper> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>> for SubscribeFixup<Eth>
|
|
||||||
where
|
|
||||||
ErrorObject<'static>: From<<Eth as EthApiTypes>::Error>,
|
|
||||||
{
|
|
||||||
async fn subscribe(
|
|
||||||
&self,
|
|
||||||
pending: PendingSubscriptionSink,
|
|
||||||
kind: SubscriptionKind,
|
|
||||||
params: Option<Params>,
|
|
||||||
) -> jsonrpsee::core::SubscriptionResult {
|
|
||||||
let sink = pending.accept().await?;
|
|
||||||
let (pubsub, provider) = (self.pubsub.clone(), self.provider.clone());
|
|
||||||
self.subscription_task_spawner.spawn(Box::pin(async move {
|
|
||||||
if kind == SubscriptionKind::NewHeads {
|
|
||||||
let _ = pipe_from_stream(sink, new_headers_stream::<Eth>(&provider)).await;
|
|
||||||
} else {
|
|
||||||
let _ = pubsub.handle_accepted(sink, kind, params).await;
|
|
||||||
}
|
|
||||||
}));
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Eth: EthWrapper> SubscribeFixup<Eth> {
|
|
||||||
pub fn new(
|
|
||||||
pubsub: Arc<EthPubSub<Eth>>,
|
|
||||||
provider: Arc<Eth::Provider>,
|
|
||||||
subscription_task_spawner: Box<dyn TaskSpawner + 'static>,
|
|
||||||
) -> Self
|
|
||||||
where
|
|
||||||
Eth: EthWrapper,
|
|
||||||
ErrorObject<'static>: From<Eth::Error>,
|
|
||||||
{
|
|
||||||
Self { pubsub, provider, subscription_task_spawner }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,90 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use crate::{HlBlock, HlPrimitives};
|
|
||||||
use alloy_primitives::U256;
|
|
||||||
use alloy_rpc_types::Header;
|
|
||||||
use futures::StreamExt;
|
|
||||||
use jsonrpsee::{SubscriptionMessage, SubscriptionSink};
|
|
||||||
use jsonrpsee_types::ErrorObject;
|
|
||||||
use reth_primitives::SealedHeader;
|
|
||||||
use reth_provider::{BlockReader, CanonStateSubscriptions};
|
|
||||||
use reth_rpc::{RpcTypes, eth::pubsub::SubscriptionSerializeError};
|
|
||||||
use reth_rpc_convert::{RpcBlock, RpcHeader, RpcReceipt, RpcTransaction, RpcTxReq};
|
|
||||||
use reth_rpc_eth_api::{
|
|
||||||
EthApiServer, FullEthApiTypes, RpcNodeCoreExt,
|
|
||||||
helpers::{EthBlocks, EthTransactions, LoadReceipt},
|
|
||||||
};
|
|
||||||
use serde::Serialize;
|
|
||||||
use tokio_stream::Stream;
|
|
||||||
|
|
||||||
pub trait EthWrapper:
|
|
||||||
EthApiServer<
|
|
||||||
RpcTxReq<Self::NetworkTypes>,
|
|
||||||
RpcTransaction<Self::NetworkTypes>,
|
|
||||||
RpcBlock<Self::NetworkTypes>,
|
|
||||||
RpcReceipt<Self::NetworkTypes>,
|
|
||||||
RpcHeader<Self::NetworkTypes>,
|
|
||||||
> + FullEthApiTypes<
|
|
||||||
Primitives = HlPrimitives,
|
|
||||||
NetworkTypes: RpcTypes<TransactionResponse = alloy_rpc_types_eth::Transaction>,
|
|
||||||
> + RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
|
|
||||||
+ EthBlocks
|
|
||||||
+ EthTransactions
|
|
||||||
+ LoadReceipt
|
|
||||||
+ 'static
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> EthWrapper for T where
|
|
||||||
T: EthApiServer<
|
|
||||||
RpcTxReq<Self::NetworkTypes>,
|
|
||||||
RpcTransaction<Self::NetworkTypes>,
|
|
||||||
RpcBlock<Self::NetworkTypes>,
|
|
||||||
RpcReceipt<Self::NetworkTypes>,
|
|
||||||
RpcHeader<Self::NetworkTypes>,
|
|
||||||
> + FullEthApiTypes<
|
|
||||||
Primitives = HlPrimitives,
|
|
||||||
NetworkTypes: RpcTypes<TransactionResponse = alloy_rpc_types_eth::Transaction>,
|
|
||||||
> + RpcNodeCoreExt<Provider: BlockReader<Block = HlBlock>>
|
|
||||||
+ EthBlocks
|
|
||||||
+ EthTransactions
|
|
||||||
+ LoadReceipt
|
|
||||||
+ 'static
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn pipe_from_stream<T: Serialize, St: Stream<Item = T> + Unpin>(
|
|
||||||
sink: SubscriptionSink,
|
|
||||||
mut stream: St,
|
|
||||||
) -> Result<(), ErrorObject<'static>> {
|
|
||||||
loop {
|
|
||||||
tokio::select! {
|
|
||||||
_ = sink.closed() => break Ok(()),
|
|
||||||
maybe_item = stream.next() => {
|
|
||||||
let Some(item) = maybe_item else { break Ok(()) };
|
|
||||||
let msg = SubscriptionMessage::new(sink.method_name(), sink.subscription_id(), &item)
|
|
||||||
.map_err(SubscriptionSerializeError::from)?;
|
|
||||||
if sink.send(msg).await.is_err() { break Ok(()); }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn new_headers_stream<Eth: EthWrapper>(
|
|
||||||
provider: &Arc<Eth::Provider>,
|
|
||||||
) -> impl Stream<Item = Header<alloy_consensus::Header>> {
|
|
||||||
provider.canonical_state_stream().flat_map(|new_chain| {
|
|
||||||
let headers = new_chain
|
|
||||||
.committed()
|
|
||||||
.blocks_iter()
|
|
||||||
.map(|block| {
|
|
||||||
Header::from_consensus(
|
|
||||||
SealedHeader::new(block.header().inner.clone(), block.hash()).into(),
|
|
||||||
None,
|
|
||||||
Some(U256::from(block.rlp_length())),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
futures::stream::iter(headers)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
17
src/main.rs
17
src/main.rs
@ -1,16 +1,12 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use reth::{
|
use reth::builder::{NodeBuilder, NodeHandle, WithLaunchContext};
|
||||||
builder::{NodeBuilder, NodeHandle, WithLaunchContext},
|
|
||||||
rpc::{api::EthPubSubApiServer, eth::RpcNodeCore},
|
|
||||||
};
|
|
||||||
use reth_db::DatabaseEnv;
|
use reth_db::DatabaseEnv;
|
||||||
use reth_hl::{
|
use reth_hl::{
|
||||||
addons::{
|
addons::{
|
||||||
call_forwarder::{self, CallForwarderApiServer},
|
call_forwarder::{self, CallForwarderApiServer},
|
||||||
hl_node_compliance::install_hl_node_compliance,
|
hl_node_compliance::install_hl_node_compliance,
|
||||||
subscribe_fixup::SubscribeFixup,
|
|
||||||
tx_forwarder::{self, EthForwarderApiServer},
|
tx_forwarder::{self, EthForwarderApiServer},
|
||||||
},
|
},
|
||||||
chainspec::{HlChainSpec, parser::HlChainSpecParser},
|
chainspec::{HlChainSpec, parser::HlChainSpecParser},
|
||||||
@ -63,17 +59,6 @@ fn main() -> eyre::Result<()> {
|
|||||||
info!("Call/gas estimation will be forwarded to {}", upstream_rpc_url);
|
info!("Call/gas estimation will be forwarded to {}", upstream_rpc_url);
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is a temporary workaround to fix the issue with custom headers
|
|
||||||
// affects `eth_subscribe[type=newHeads]`
|
|
||||||
ctx.modules.replace_configured(
|
|
||||||
SubscribeFixup::new(
|
|
||||||
Arc::new(ctx.registry.eth_handlers().pubsub.clone()),
|
|
||||||
Arc::new(ctx.registry.eth_api().provider().clone()),
|
|
||||||
Box::new(ctx.node().task_executor.clone()),
|
|
||||||
)
|
|
||||||
.into_rpc(),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
if ext.hl_node_compliant {
|
if ext.hl_node_compliant {
|
||||||
install_hl_node_compliance(&mut ctx)?;
|
install_hl_node_compliance(&mut ctx)?;
|
||||||
info!("hl-node compliant mode enabled");
|
info!("hl-node compliant mode enabled");
|
||||||
|
|||||||
@ -1,48 +1,40 @@
|
|||||||
use alloy_consensus::Header;
|
use alloy_consensus::Header;
|
||||||
use alloy_primitives::{B256, BlockHash, Bytes, U256, b256, hex::ToHexExt};
|
use alloy_primitives::{b256, hex::ToHexExt, BlockHash, B256, U256};
|
||||||
use reth::{
|
use reth::{
|
||||||
api::NodeTypesWithDBAdapter,
|
api::{NodeTypes, NodeTypesWithDBAdapter},
|
||||||
args::{DatabaseArgs, DatadirArgs},
|
args::{DatabaseArgs, DatadirArgs},
|
||||||
dirs::{ChainPath, DataDirPath},
|
dirs::{ChainPath, DataDirPath},
|
||||||
};
|
};
|
||||||
use reth_chainspec::EthChainSpec;
|
use reth_chainspec::EthChainSpec;
|
||||||
use reth_db::{
|
use reth_db::{
|
||||||
DatabaseEnv,
|
mdbx::{tx::Tx, RO},
|
||||||
mdbx::{RO, tx::Tx},
|
|
||||||
models::CompactU256,
|
models::CompactU256,
|
||||||
static_file::iter_static_files,
|
static_file::iter_static_files,
|
||||||
table::Decompress,
|
table::Decompress,
|
||||||
tables,
|
DatabaseEnv,
|
||||||
};
|
|
||||||
use reth_db_api::{
|
|
||||||
cursor::{DbCursorRO, DbCursorRW},
|
|
||||||
transaction::{DbTx, DbTxMut},
|
|
||||||
};
|
};
|
||||||
use reth_errors::ProviderResult;
|
use reth_errors::ProviderResult;
|
||||||
use reth_ethereum_primitives::EthereumReceipt;
|
|
||||||
use reth_provider::{
|
use reth_provider::{
|
||||||
DatabaseProvider, ProviderFactory, ReceiptProvider, StaticFileProviderFactory,
|
|
||||||
StaticFileSegment, StaticFileWriter,
|
|
||||||
providers::{NodeTypesForProvider, StaticFileProvider},
|
providers::{NodeTypesForProvider, StaticFileProvider},
|
||||||
static_file::SegmentRangeInclusive,
|
static_file::SegmentRangeInclusive,
|
||||||
|
DatabaseProvider, ProviderFactory, ReceiptProvider, StaticFileProviderFactory,
|
||||||
|
StaticFileSegment, StaticFileWriter,
|
||||||
};
|
};
|
||||||
use std::{fs::File, io::Write, path::PathBuf, sync::Arc};
|
use std::{marker::PhantomData, path::PathBuf, sync::Arc};
|
||||||
use tracing::{info, warn};
|
use tracing::{info, warn};
|
||||||
|
|
||||||
use crate::{HlHeader, HlPrimitives, chainspec::HlChainSpec};
|
use crate::{chainspec::HlChainSpec, HlHeader, HlPrimitives};
|
||||||
|
|
||||||
pub(crate) trait HlNodeType:
|
pub(super) struct Migrator<N: NodeTypesForProvider> {
|
||||||
NodeTypesForProvider<ChainSpec = HlChainSpec, Primitives = HlPrimitives>
|
|
||||||
{
|
|
||||||
}
|
|
||||||
impl<N: NodeTypesForProvider<ChainSpec = HlChainSpec, Primitives = HlPrimitives>> HlNodeType for N {}
|
|
||||||
|
|
||||||
pub(super) struct Migrator<N: HlNodeType> {
|
|
||||||
data_dir: ChainPath<DataDirPath>,
|
data_dir: ChainPath<DataDirPath>,
|
||||||
provider_factory: ProviderFactory<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>,
|
provider_factory: ProviderFactory<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>,
|
||||||
|
_nt: PhantomData<N>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: HlNodeType> Migrator<N> {
|
impl<N: NodeTypesForProvider> Migrator<N>
|
||||||
|
where
|
||||||
|
N: NodeTypes<ChainSpec = HlChainSpec, Primitives = HlPrimitives>,
|
||||||
|
{
|
||||||
const MIGRATION_PATH_SUFFIX: &'static str = "migration-tmp";
|
const MIGRATION_PATH_SUFFIX: &'static str = "migration-tmp";
|
||||||
|
|
||||||
pub fn new(
|
pub fn new(
|
||||||
@ -52,7 +44,7 @@ impl<N: HlNodeType> Migrator<N> {
|
|||||||
) -> eyre::Result<Self> {
|
) -> eyre::Result<Self> {
|
||||||
let data_dir = datadir.clone().resolve_datadir(chain_spec.chain());
|
let data_dir = datadir.clone().resolve_datadir(chain_spec.chain());
|
||||||
let provider_factory = Self::provider_factory(chain_spec, datadir, database_args)?;
|
let provider_factory = Self::provider_factory(chain_spec, datadir, database_args)?;
|
||||||
Ok(Self { data_dir, provider_factory })
|
Ok(Self { data_dir, provider_factory, _nt: PhantomData })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn sf_provider(&self) -> StaticFileProvider<HlPrimitives> {
|
pub fn sf_provider(&self) -> StaticFileProvider<HlPrimitives> {
|
||||||
@ -74,12 +66,9 @@ impl<N: HlNodeType> Migrator<N> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn migrate_db_inner(&self) -> eyre::Result<()> {
|
fn migrate_db_inner(&self) -> eyre::Result<()> {
|
||||||
let migrated_mdbx = MigratorMdbx::<N>(self).migrate_mdbx()?;
|
self.migrate_static_files()?;
|
||||||
let migrated_static_files = MigrateStaticFiles::<N>(self).migrate_static_files()?;
|
self.migrate_mdbx()?;
|
||||||
|
info!("Database migrated successfully");
|
||||||
if migrated_mdbx || migrated_static_files {
|
|
||||||
info!("Database migrated successfully");
|
|
||||||
}
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -87,116 +76,6 @@ impl<N: HlNodeType> Migrator<N> {
|
|||||||
self.data_dir.data_dir().join(Self::MIGRATION_PATH_SUFFIX)
|
self.data_dir.data_dir().join(Self::MIGRATION_PATH_SUFFIX)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn provider_factory(
|
|
||||||
chain_spec: HlChainSpec,
|
|
||||||
datadir: DatadirArgs,
|
|
||||||
database_args: DatabaseArgs,
|
|
||||||
) -> eyre::Result<ProviderFactory<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>> {
|
|
||||||
let data_dir = datadir.clone().resolve_datadir(chain_spec.chain());
|
|
||||||
let db_env = reth_db::init_db(data_dir.db(), database_args.database_args())?;
|
|
||||||
let static_file_provider = StaticFileProvider::read_only(data_dir.static_files(), false)?;
|
|
||||||
let db = Arc::new(db_env);
|
|
||||||
Ok(ProviderFactory::new(db, Arc::new(chain_spec), static_file_provider))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct MigratorMdbx<'a, N: HlNodeType>(&'a Migrator<N>);
|
|
||||||
|
|
||||||
impl<'a, N: HlNodeType> MigratorMdbx<'a, N> {
|
|
||||||
fn migrate_mdbx(&self) -> eyre::Result<bool> {
|
|
||||||
// if any header is in old format, we need to migrate it, so we pick the first and last one
|
|
||||||
let db_env = self.0.provider_factory.provider()?;
|
|
||||||
let mut cursor = db_env.tx_ref().cursor_read::<tables::Headers<Bytes>>()?;
|
|
||||||
|
|
||||||
let migration_needed = {
|
|
||||||
let first_is_old = match cursor.first()? {
|
|
||||||
Some((number, header)) => using_old_header(number, &header),
|
|
||||||
None => false,
|
|
||||||
};
|
|
||||||
let last_is_old = match cursor.last()? {
|
|
||||||
Some((number, header)) => using_old_header(number, &header),
|
|
||||||
None => false,
|
|
||||||
};
|
|
||||||
first_is_old || last_is_old
|
|
||||||
};
|
|
||||||
|
|
||||||
if !migration_needed {
|
|
||||||
return Ok(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
check_if_migration_enabled()?;
|
|
||||||
|
|
||||||
self.migrate_mdbx_inner()?;
|
|
||||||
Ok(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn migrate_mdbx_inner(&self) -> eyre::Result<()> {
|
|
||||||
// There shouldn't be many headers in mdbx, but using file for safety
|
|
||||||
info!("Old database detected, migrating mdbx...");
|
|
||||||
let conversion_tmp = self.0.conversion_tmp_dir();
|
|
||||||
let tmp_path = conversion_tmp.join("headers.rmp");
|
|
||||||
|
|
||||||
if conversion_tmp.exists() {
|
|
||||||
std::fs::remove_dir_all(&conversion_tmp)?;
|
|
||||||
}
|
|
||||||
std::fs::create_dir_all(&conversion_tmp)?;
|
|
||||||
|
|
||||||
let count = self.export_old_headers(&tmp_path)?;
|
|
||||||
self.import_new_headers(tmp_path, count)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn export_old_headers(&self, tmp_path: &PathBuf) -> Result<i32, eyre::Error> {
|
|
||||||
let db_env = self.0.provider_factory.provider()?;
|
|
||||||
let mut cursor_read = db_env.tx_ref().cursor_read::<tables::Headers<Bytes>>()?;
|
|
||||||
let mut tmp_writer = File::create(tmp_path)?;
|
|
||||||
let mut count = 0;
|
|
||||||
let old_headers = cursor_read.walk(None)?.filter_map(|row| {
|
|
||||||
let (block_number, header) = row.ok()?;
|
|
||||||
if !using_old_header(block_number, &header) {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some((block_number, Header::decompress(&header).ok()?))
|
|
||||||
}
|
|
||||||
});
|
|
||||||
for (block_number, header) in old_headers {
|
|
||||||
let receipt =
|
|
||||||
db_env.receipts_by_block(block_number.into())?.expect("Receipt not found");
|
|
||||||
let new_header = to_hl_header(receipt, header);
|
|
||||||
tmp_writer.write_all(&rmp_serde::to_vec(&(block_number, new_header))?)?;
|
|
||||||
count += 1;
|
|
||||||
}
|
|
||||||
Ok(count)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn import_new_headers(&self, tmp_path: PathBuf, count: i32) -> Result<(), eyre::Error> {
|
|
||||||
let mut tmp_reader = File::open(tmp_path)?;
|
|
||||||
let db_env = self.0.provider_factory.provider_rw()?;
|
|
||||||
let mut cursor_write = db_env.tx_ref().cursor_write::<tables::Headers<Bytes>>()?;
|
|
||||||
for _ in 0..count {
|
|
||||||
let (number, header) = rmp_serde::from_read::<_, (u64, HlHeader)>(&mut tmp_reader)?;
|
|
||||||
cursor_write.upsert(number, &rmp_serde::to_vec(&header)?.into())?;
|
|
||||||
}
|
|
||||||
db_env.commit()?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn check_if_migration_enabled() -> Result<(), eyre::Error> {
|
|
||||||
if std::env::var("EXPERIMENTAL_MIGRATE_DB").is_err() {
|
|
||||||
let err_msg = concat!(
|
|
||||||
"Detected an old database format but experimental database migration is currently disabled. ",
|
|
||||||
"To enable migration, set EXPERIMENTAL_MIGRATE_DB=1, or alternatively, resync your node (safest option)."
|
|
||||||
);
|
|
||||||
warn!("{}", err_msg);
|
|
||||||
return Err(eyre::eyre!("{}", err_msg));
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
struct MigrateStaticFiles<'a, N: HlNodeType>(&'a Migrator<N>);
|
|
||||||
|
|
||||||
impl<'a, N: HlNodeType> MigrateStaticFiles<'a, N> {
|
|
||||||
fn iterate_files_for_segment(
|
fn iterate_files_for_segment(
|
||||||
&self,
|
&self,
|
||||||
block_range: SegmentRangeInclusive,
|
block_range: SegmentRangeInclusive,
|
||||||
@ -223,8 +102,8 @@ impl<'a, N: HlNodeType> MigrateStaticFiles<'a, N> {
|
|||||||
|
|
||||||
fn create_placeholder(&self, block_range: SegmentRangeInclusive) -> eyre::Result<()> {
|
fn create_placeholder(&self, block_range: SegmentRangeInclusive) -> eyre::Result<()> {
|
||||||
// The direction is opposite here
|
// The direction is opposite here
|
||||||
let src = self.0.data_dir.static_files();
|
let src = self.data_dir.static_files();
|
||||||
let dst = self.0.conversion_tmp_dir();
|
let dst = self.conversion_tmp_dir();
|
||||||
|
|
||||||
for (src_path, file_name) in self.iterate_files_for_segment(block_range, &src)? {
|
for (src_path, file_name) in self.iterate_files_for_segment(block_range, &src)? {
|
||||||
let dst_path = dst.join(file_name);
|
let dst_path = dst.join(file_name);
|
||||||
@ -241,8 +120,8 @@ impl<'a, N: HlNodeType> MigrateStaticFiles<'a, N> {
|
|||||||
&self,
|
&self,
|
||||||
block_range: SegmentRangeInclusive,
|
block_range: SegmentRangeInclusive,
|
||||||
) -> eyre::Result<()> {
|
) -> eyre::Result<()> {
|
||||||
let src = self.0.conversion_tmp_dir();
|
let src = self.conversion_tmp_dir();
|
||||||
let dst = self.0.data_dir.static_files();
|
let dst = self.data_dir.static_files();
|
||||||
|
|
||||||
for (src_path, file_name) in self.iterate_files_for_segment(block_range, &src)? {
|
for (src_path, file_name) in self.iterate_files_for_segment(block_range, &src)? {
|
||||||
let dst_path = dst.join(file_name);
|
let dst_path = dst.join(file_name);
|
||||||
@ -254,9 +133,9 @@ impl<'a, N: HlNodeType> MigrateStaticFiles<'a, N> {
|
|||||||
self.create_placeholder(block_range)
|
self.create_placeholder(block_range)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn migrate_static_files(&self) -> eyre::Result<bool> {
|
fn migrate_static_files(&self) -> eyre::Result<()> {
|
||||||
let conversion_tmp = self.0.conversion_tmp_dir();
|
let conversion_tmp = self.conversion_tmp_dir();
|
||||||
let old_path = self.0.data_dir.static_files();
|
let old_path = self.data_dir.static_files();
|
||||||
|
|
||||||
if conversion_tmp.exists() {
|
if conversion_tmp.exists() {
|
||||||
std::fs::remove_dir_all(&conversion_tmp)?;
|
std::fs::remove_dir_all(&conversion_tmp)?;
|
||||||
@ -266,12 +145,13 @@ impl<'a, N: HlNodeType> MigrateStaticFiles<'a, N> {
|
|||||||
let mut all_static_files = iter_static_files(&old_path)?;
|
let mut all_static_files = iter_static_files(&old_path)?;
|
||||||
let all_static_files =
|
let all_static_files =
|
||||||
all_static_files.remove(&StaticFileSegment::Headers).unwrap_or_default();
|
all_static_files.remove(&StaticFileSegment::Headers).unwrap_or_default();
|
||||||
|
let provider = self.provider_factory.provider()?;
|
||||||
|
|
||||||
let mut first = true;
|
let mut first = true;
|
||||||
|
|
||||||
for (block_range, _tx_ranges) in all_static_files {
|
for (block_range, _tx_ranges) in all_static_files {
|
||||||
let migration_needed = self.using_old_header(block_range.start())?
|
let migration_needed = self.using_old_header(block_range.start())? ||
|
||||||
|| self.using_old_header(block_range.end())?;
|
self.using_old_header(block_range.end())?;
|
||||||
if !migration_needed {
|
if !migration_needed {
|
||||||
// Create a placeholder symlink
|
// Create a placeholder symlink
|
||||||
self.create_placeholder(block_range)?;
|
self.create_placeholder(block_range)?;
|
||||||
@ -279,34 +159,60 @@ impl<'a, N: HlNodeType> MigrateStaticFiles<'a, N> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if first {
|
if first {
|
||||||
check_if_migration_enabled()?;
|
info!("Old database detected, migrating database...");
|
||||||
|
|
||||||
info!("Old database detected, migrating static files...");
|
|
||||||
first = false;
|
first = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
let sf_provider = self.0.sf_provider();
|
let sf_provider = self.sf_provider();
|
||||||
let sf_tmp_provider = StaticFileProvider::<HlPrimitives>::read_write(&conversion_tmp)?;
|
let sf_tmp_provider = StaticFileProvider::<HlPrimitives>::read_write(&conversion_tmp)?;
|
||||||
let provider = self.0.provider_factory.provider()?;
|
|
||||||
let block_range_for_filename = sf_provider.find_fixed_range(block_range.start());
|
let block_range_for_filename = sf_provider.find_fixed_range(block_range.start());
|
||||||
migrate_single_static_file(&sf_tmp_provider, &sf_provider, &provider, block_range)?;
|
migrate_single_static_file(&sf_tmp_provider, &sf_provider, &provider, block_range)?;
|
||||||
|
|
||||||
self.move_static_files_for_segment(block_range_for_filename)?;
|
self.move_static_files_for_segment(block_range_for_filename)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(!first)
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn provider_factory(
|
||||||
|
chain_spec: HlChainSpec,
|
||||||
|
datadir: DatadirArgs,
|
||||||
|
database_args: DatabaseArgs,
|
||||||
|
) -> eyre::Result<ProviderFactory<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>> {
|
||||||
|
let data_dir = datadir.clone().resolve_datadir(chain_spec.chain());
|
||||||
|
let db_env = reth_db::init_db(data_dir.db(), database_args.database_args())?;
|
||||||
|
let static_file_provider = StaticFileProvider::read_only(data_dir.static_files(), false)?;
|
||||||
|
let db = Arc::new(db_env);
|
||||||
|
Ok(ProviderFactory::new(db, Arc::new(chain_spec), static_file_provider))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn migrate_mdbx(&self) -> eyre::Result<()> {
|
||||||
|
// Actually not much here, all of blocks should be in the static files
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn using_old_header(&self, number: u64) -> eyre::Result<bool> {
|
fn using_old_header(&self, number: u64) -> eyre::Result<bool> {
|
||||||
let sf_provider = self.0.sf_provider();
|
let sf_provider = self.sf_provider();
|
||||||
let content = old_headers_range(&sf_provider, number..=number)?;
|
let content = old_headers_range(&sf_provider, number..=number)?;
|
||||||
|
|
||||||
let &[row] = &content.as_slice() else {
|
let &[row] = &content.as_slice() else {
|
||||||
warn!("No header found for block {}", number);
|
warn!("No header found for block {}", number);
|
||||||
return Ok(false);
|
return Ok(false);
|
||||||
};
|
};
|
||||||
|
let header = &row[0];
|
||||||
|
|
||||||
Ok(using_old_header(number, &row[0]))
|
let deserialized_old = is_old_header(header);
|
||||||
|
let deserialized_new = is_new_header(header);
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
deserialized_old ^ deserialized_new,
|
||||||
|
"Header is not valid: {} {}\ndeserialized_old: {}\ndeserialized_new: {}",
|
||||||
|
number,
|
||||||
|
header.encode_hex(),
|
||||||
|
deserialized_old,
|
||||||
|
deserialized_new
|
||||||
|
);
|
||||||
|
Ok(deserialized_old && !deserialized_new)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -336,7 +242,7 @@ fn is_new_header(header: &[u8]) -> bool {
|
|||||||
rmp_serde::from_slice::<HlHeader>(header).is_ok()
|
rmp_serde::from_slice::<HlHeader>(header).is_ok()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn migrate_single_static_file<N: HlNodeType>(
|
fn migrate_single_static_file<N: NodeTypesForProvider<Primitives = HlPrimitives>>(
|
||||||
sf_out: &StaticFileProvider<HlPrimitives>,
|
sf_out: &StaticFileProvider<HlPrimitives>,
|
||||||
sf_in: &StaticFileProvider<HlPrimitives>,
|
sf_in: &StaticFileProvider<HlPrimitives>,
|
||||||
provider: &DatabaseProvider<Tx<RO>, NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>,
|
provider: &DatabaseProvider<Tx<RO>, NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>,
|
||||||
@ -344,8 +250,8 @@ fn migrate_single_static_file<N: HlNodeType>(
|
|||||||
) -> Result<(), eyre::Error> {
|
) -> Result<(), eyre::Error> {
|
||||||
info!("Migrating block range {}...", block_range);
|
info!("Migrating block range {}...", block_range);
|
||||||
|
|
||||||
// block_ranges into chunks of 50000 blocks
|
// block_ranges into chunks of 100000 blocks
|
||||||
const CHUNK_SIZE: u64 = 50000;
|
const CHUNK_SIZE: u64 = 100000;
|
||||||
for chunk in (0..=block_range.end()).step_by(CHUNK_SIZE as usize) {
|
for chunk in (0..=block_range.end()).step_by(CHUNK_SIZE as usize) {
|
||||||
let end = std::cmp::min(chunk + CHUNK_SIZE - 1, block_range.end());
|
let end = std::cmp::min(chunk + CHUNK_SIZE - 1, block_range.end());
|
||||||
let block_range = chunk..=end;
|
let block_range = chunk..=end;
|
||||||
@ -355,8 +261,11 @@ fn migrate_single_static_file<N: HlNodeType>(
|
|||||||
let mut writer = sf_out.get_writer(*block_range.start(), StaticFileSegment::Headers)?;
|
let mut writer = sf_out.get_writer(*block_range.start(), StaticFileSegment::Headers)?;
|
||||||
let new_headers = std::iter::zip(headers, receipts)
|
let new_headers = std::iter::zip(headers, receipts)
|
||||||
.map(|(header, receipts)| {
|
.map(|(header, receipts)| {
|
||||||
|
let system_tx_count =
|
||||||
|
receipts.iter().filter(|r| r.cumulative_gas_used == 0).count();
|
||||||
let eth_header = Header::decompress(&header[0]).unwrap();
|
let eth_header = Header::decompress(&header[0]).unwrap();
|
||||||
let hl_header = to_hl_header(receipts, eth_header);
|
let hl_header =
|
||||||
|
HlHeader::from_ethereum_header(eth_header, &receipts, system_tx_count as u64);
|
||||||
|
|
||||||
let difficulty: U256 = CompactU256::decompress(&header[1]).unwrap().into();
|
let difficulty: U256 = CompactU256::decompress(&header[1]).unwrap().into();
|
||||||
let hash = BlockHash::decompress(&header[2]).unwrap();
|
let hash = BlockHash::decompress(&header[2]).unwrap();
|
||||||
@ -372,11 +281,6 @@ fn migrate_single_static_file<N: HlNodeType>(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn to_hl_header(receipts: Vec<EthereumReceipt>, eth_header: Header) -> HlHeader {
|
|
||||||
let system_tx_count = receipts.iter().filter(|r| r.cumulative_gas_used == 0).count();
|
|
||||||
HlHeader::from_ethereum_header(eth_header, &receipts, system_tx_count as u64)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn old_headers_range(
|
fn old_headers_range(
|
||||||
provider: &StaticFileProvider<HlPrimitives>,
|
provider: &StaticFileProvider<HlPrimitives>,
|
||||||
block_range: impl std::ops::RangeBounds<u64>,
|
block_range: impl std::ops::RangeBounds<u64>,
|
||||||
@ -412,18 +316,3 @@ fn to_range<R: std::ops::RangeBounds<u64>>(bounds: R) -> std::ops::Range<u64> {
|
|||||||
|
|
||||||
start..end
|
start..end
|
||||||
}
|
}
|
||||||
|
|
||||||
fn using_old_header(number: u64, header: &[u8]) -> bool {
|
|
||||||
let deserialized_old = is_old_header(header);
|
|
||||||
let deserialized_new = is_new_header(header);
|
|
||||||
|
|
||||||
assert!(
|
|
||||||
deserialized_old ^ deserialized_new,
|
|
||||||
"Header is not valid: {} {}\ndeserialized_old: {}\ndeserialized_new: {}",
|
|
||||||
number,
|
|
||||||
header.encode_hex(),
|
|
||||||
deserialized_old,
|
|
||||||
deserialized_new
|
|
||||||
);
|
|
||||||
deserialized_old && !deserialized_new
|
|
||||||
}
|
|
||||||
|
|||||||
@ -81,13 +81,13 @@ impl BlockPoller {
|
|||||||
.await
|
.await
|
||||||
.ok_or(eyre::eyre!("Failed to find latest block number"))?;
|
.ok_or(eyre::eyre!("Failed to find latest block number"))?;
|
||||||
|
|
||||||
loop {
|
if let Some(debug_cutoff_height) = debug_cutoff_height &&
|
||||||
if let Some(debug_cutoff_height) = debug_cutoff_height
|
next_block_number > debug_cutoff_height
|
||||||
&& next_block_number > debug_cutoff_height
|
{
|
||||||
{
|
next_block_number = debug_cutoff_height;
|
||||||
next_block_number = debug_cutoff_height;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
|
loop {
|
||||||
match block_source.collect_block(next_block_number).await {
|
match block_source.collect_block(next_block_number).await {
|
||||||
Ok(block) => {
|
Ok(block) => {
|
||||||
block_tx.send((next_block_number, block)).await?;
|
block_tx.send((next_block_number, block)).await?;
|
||||||
|
|||||||
@ -1,49 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
export ETH_RPC_URL="${ETH_RPC_URL:-wss://hl-archive-node.xyz}"
|
|
||||||
|
|
||||||
success() {
|
|
||||||
echo "Success: $1"
|
|
||||||
}
|
|
||||||
|
|
||||||
fail() {
|
|
||||||
echo "Failed: $1"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
ensure_cmd() {
|
|
||||||
command -v "$1" > /dev/null 2>&1 || fail "$1 is required"
|
|
||||||
}
|
|
||||||
|
|
||||||
ensure_cmd jq
|
|
||||||
ensure_cmd cast
|
|
||||||
ensure_cmd wscat
|
|
||||||
|
|
||||||
if [[ ! "$ETH_RPC_URL" =~ ^wss?:// ]]; then
|
|
||||||
fail "ETH_RPC_URL must be a websocket url"
|
|
||||||
fi
|
|
||||||
|
|
||||||
TITLE="Issue #78 - eth_getLogs should return system transactions"
|
|
||||||
cast logs \
|
|
||||||
--rpc-url "$ETH_RPC_URL" \
|
|
||||||
--from-block 15312567 \
|
|
||||||
--to-block 15312570 \
|
|
||||||
--address 0x9fdbda0a5e284c32744d2f17ee5c74b284993463 \
|
|
||||||
0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef \
|
|
||||||
| grep -q "0x00000000000000000000000020000000000000000000000000000000000000c5" \
|
|
||||||
&& success "$TITLE" || fail "$TITLE"
|
|
||||||
|
|
||||||
TITLE="Issue #78 - eth_getBlockByNumber should return the same logsBloom as official RPC"
|
|
||||||
OFFICIAL_RPC="https://rpc.hyperliquid.xyz/evm"
|
|
||||||
A=$(cast block 1394092 --rpc-url "$ETH_RPC_URL" -f logsBloom | md5sum)
|
|
||||||
B=$(cast block 1394092 --rpc-url "$OFFICIAL_RPC" -f logsBloom | md5sum)
|
|
||||||
echo node "$A"
|
|
||||||
echo rpc\ "$B"
|
|
||||||
[[ "$A" == "$B" ]] && success "$TITLE" || fail "$TITLE"
|
|
||||||
|
|
||||||
TITLE="eth_subscribe newHeads via wscat"
|
|
||||||
CMD='{"jsonrpc":"2.0","id":1,"method":"eth_subscribe","params":["newHeads"]}'
|
|
||||||
wscat -w 2 -c "$ETH_RPC_URL" -x "$CMD" | tail -1 | jq -r .params.result.nonce | grep 0x \
|
|
||||||
&& success "$TITLE" || fail "$TITLE"
|
|
||||||
Reference in New Issue
Block a user