mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
clean up database file after testing (#5087)
Co-authored-by: Matthias Seitz <matthias.seitz@outlook.de> Co-authored-by: joshieDo <93316087+joshieDo@users.noreply.github.com>
This commit is contained in:
@ -1157,7 +1157,12 @@ mod tests {
|
||||
use crate::block_buffer::BufferedBlocks;
|
||||
use assert_matches::assert_matches;
|
||||
use linked_hash_set::LinkedHashSet;
|
||||
use reth_db::{tables, test_utils::create_test_rw_db, transaction::DbTxMut, DatabaseEnv};
|
||||
use reth_db::{
|
||||
tables,
|
||||
test_utils::{create_test_rw_db, TempDatabase},
|
||||
transaction::DbTxMut,
|
||||
DatabaseEnv,
|
||||
};
|
||||
use reth_interfaces::test_utils::TestConsensus;
|
||||
use reth_primitives::{
|
||||
constants::EMPTY_ROOT_HASH, stage::StageCheckpoint, ChainSpecBuilder, B256, MAINNET,
|
||||
@ -1170,7 +1175,7 @@ mod tests {
|
||||
|
||||
fn setup_externals(
|
||||
exec_res: Vec<BundleStateWithReceipts>,
|
||||
) -> TreeExternals<Arc<DatabaseEnv>, TestExecutorFactory> {
|
||||
) -> TreeExternals<Arc<TempDatabase<DatabaseEnv>>, TestExecutorFactory> {
|
||||
let db = create_test_rw_db();
|
||||
let consensus = Arc::new(TestConsensus::default());
|
||||
let chain_spec = Arc::new(
|
||||
|
||||
@ -396,7 +396,7 @@ mod tests {
|
||||
use futures::poll;
|
||||
use reth_db::{
|
||||
mdbx::{Env, WriteMap},
|
||||
test_utils::create_test_rw_db,
|
||||
test_utils::{create_test_rw_db, TempDatabase},
|
||||
};
|
||||
use reth_interfaces::{p2p::either::EitherDownloader, test_utils::TestFullBlockClient};
|
||||
use reth_primitives::{
|
||||
@ -449,7 +449,7 @@ mod tests {
|
||||
}
|
||||
|
||||
/// Builds the pipeline.
|
||||
fn build(self, chain_spec: Arc<ChainSpec>) -> Pipeline<Arc<Env<WriteMap>>> {
|
||||
fn build(self, chain_spec: Arc<ChainSpec>) -> Pipeline<Arc<TempDatabase<Env<WriteMap>>>> {
|
||||
reth_tracing::init_test_tracing();
|
||||
let db = create_test_rw_db();
|
||||
|
||||
|
||||
@ -6,7 +6,11 @@ use crate::{
|
||||
use reth_blockchain_tree::{
|
||||
config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree,
|
||||
};
|
||||
use reth_db::{test_utils::create_test_rw_db, DatabaseEnv};
|
||||
use reth_db::{
|
||||
test_utils::{create_test_rw_db, TempDatabase},
|
||||
DatabaseEnv as DE,
|
||||
};
|
||||
type DatabaseEnv = TempDatabase<DE>;
|
||||
use reth_downloaders::{
|
||||
bodies::bodies::BodiesDownloaderBuilder,
|
||||
headers::reverse_headers::ReverseHeadersDownloaderBuilder,
|
||||
|
||||
@ -616,7 +616,7 @@ mod tests {
|
||||
let db = create_test_rw_db();
|
||||
let (headers, mut bodies) = generate_bodies(0..=19);
|
||||
|
||||
insert_headers(&db, &headers);
|
||||
insert_headers(db.db(), &headers);
|
||||
|
||||
let client = Arc::new(
|
||||
TestBodiesClient::default().with_bodies(bodies.clone()).with_should_delay(true),
|
||||
@ -655,7 +655,7 @@ mod tests {
|
||||
})
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
insert_headers(&db, &headers);
|
||||
insert_headers(db.db(), &headers);
|
||||
|
||||
let request_limit = 10;
|
||||
let client = Arc::new(TestBodiesClient::default().with_bodies(bodies.clone()));
|
||||
@ -676,7 +676,7 @@ mod tests {
|
||||
let db = create_test_rw_db();
|
||||
let (headers, mut bodies) = generate_bodies(0..=99);
|
||||
|
||||
insert_headers(&db, &headers);
|
||||
insert_headers(db.db(), &headers);
|
||||
|
||||
let stream_batch_size = 20;
|
||||
let request_limit = 10;
|
||||
@ -709,7 +709,7 @@ mod tests {
|
||||
let db = create_test_rw_db();
|
||||
let (headers, mut bodies) = generate_bodies(0..=199);
|
||||
|
||||
insert_headers(&db, &headers);
|
||||
insert_headers(db.db(), &headers);
|
||||
|
||||
let client = Arc::new(TestBodiesClient::default().with_bodies(bodies.clone()));
|
||||
let mut downloader = BodiesDownloaderBuilder::default().with_stream_batch_size(100).build(
|
||||
|
||||
@ -181,7 +181,7 @@ mod tests {
|
||||
let db = create_test_rw_db();
|
||||
let (headers, mut bodies) = generate_bodies(0..=19);
|
||||
|
||||
insert_headers(&db, &headers);
|
||||
insert_headers(db.db(), &headers);
|
||||
|
||||
let client = Arc::new(
|
||||
TestBodiesClient::default().with_bodies(bodies.clone()).with_should_delay(true),
|
||||
|
||||
@ -281,7 +281,7 @@ mod tests {
|
||||
let db = create_test_rw_db();
|
||||
let (headers, mut bodies) = generate_bodies(0..=19);
|
||||
|
||||
insert_headers(&db, &headers);
|
||||
insert_headers(db.db(), &headers);
|
||||
|
||||
// create an empty file
|
||||
let file = tempfile::tempfile().unwrap();
|
||||
@ -368,7 +368,7 @@ mod tests {
|
||||
let client = Arc::new(FileClient::from_file(file).await.unwrap());
|
||||
|
||||
// insert headers in db for the bodies downloader
|
||||
insert_headers(&db, &headers);
|
||||
insert_headers(db.db(), &headers);
|
||||
|
||||
let mut downloader = BodiesDownloaderBuilder::default().build(
|
||||
client.clone(),
|
||||
|
||||
@ -1444,7 +1444,7 @@ Post-merge hard forks (timestamp based):
|
||||
// technically unecessary - but we include it here for thoroughness
|
||||
let fork_cond_block_only_case = ChainSpec::builder()
|
||||
.chain(Chain::mainnet())
|
||||
.genesis(empty_genesis.clone())
|
||||
.genesis(empty_genesis)
|
||||
.with_fork(Hardfork::Frontier, ForkCondition::Block(0))
|
||||
.with_fork(Hardfork::Homestead, ForkCondition::Block(73))
|
||||
.build();
|
||||
@ -1959,7 +1959,7 @@ Post-merge hard forks (timestamp based):
|
||||
timestamp + 1,
|
||||
),
|
||||
(
|
||||
construct_chainspec(default_spec_builder.clone(), timestamp + 1, timestamp + 2),
|
||||
construct_chainspec(default_spec_builder, timestamp + 1, timestamp + 2),
|
||||
timestamp + 1,
|
||||
),
|
||||
];
|
||||
|
||||
@ -53,7 +53,7 @@ num-traits = "0.2.15"
|
||||
|
||||
[dev-dependencies]
|
||||
# reth
|
||||
reth-primitives = { workspace = true, features = ["arbitrary"] }
|
||||
reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] }
|
||||
reth-db = { workspace = true, features = ["test-utils", "mdbx"] }
|
||||
reth-interfaces = { workspace = true, features = ["test-utils"] }
|
||||
reth-downloaders = { path = "../net/downloaders" }
|
||||
|
||||
@ -136,7 +136,7 @@ fn measure_stage_with_path<F, S>(
|
||||
},
|
||||
|_| async {
|
||||
let mut stage = stage.clone();
|
||||
let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(tx.tx.db(), MAINNET.clone());
|
||||
let provider = factory.provider_rw().unwrap();
|
||||
stage.execute(&provider, input).await.unwrap();
|
||||
provider.commit().unwrap();
|
||||
|
||||
@ -41,7 +41,7 @@ pub(crate) fn stage_unwind<S: Clone + Stage<DatabaseEnv>>(
|
||||
|
||||
tokio::runtime::Runtime::new().unwrap().block_on(async {
|
||||
let mut stage = stage.clone();
|
||||
let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(tx.tx.db(), MAINNET.clone());
|
||||
let provider = factory.provider_rw().unwrap();
|
||||
|
||||
// Clear previous run
|
||||
@ -69,7 +69,7 @@ pub(crate) fn unwind_hashes<S: Clone + Stage<DatabaseEnv>>(
|
||||
|
||||
tokio::runtime::Runtime::new().unwrap().block_on(async {
|
||||
let mut stage = stage.clone();
|
||||
let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(tx.tx.db(), MAINNET.clone());
|
||||
let provider = factory.provider_rw().unwrap();
|
||||
|
||||
StorageHashingStage::default().unwind(&provider, unwind).await.unwrap();
|
||||
|
||||
@ -458,6 +458,7 @@ mod tests {
|
||||
database::Database,
|
||||
models::{StoredBlockBodyIndices, StoredBlockOmmers},
|
||||
tables,
|
||||
test_utils::TempDatabase,
|
||||
transaction::{DbTx, DbTxMut},
|
||||
DatabaseEnv,
|
||||
};
|
||||
@ -740,7 +741,7 @@ mod tests {
|
||||
/// A [BodyDownloader] that is backed by an internal [HashMap] for testing.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct TestBodyDownloader {
|
||||
db: Arc<DatabaseEnv>,
|
||||
db: Arc<TempDatabase<DatabaseEnv>>,
|
||||
responses: HashMap<B256, BlockBody>,
|
||||
headers: VecDeque<SealedHeader>,
|
||||
batch_size: u64,
|
||||
@ -748,7 +749,7 @@ mod tests {
|
||||
|
||||
impl TestBodyDownloader {
|
||||
pub(crate) fn new(
|
||||
db: Arc<DatabaseEnv>,
|
||||
db: Arc<TempDatabase<DatabaseEnv>>,
|
||||
responses: HashMap<B256, BlockBody>,
|
||||
batch_size: u64,
|
||||
) -> Self {
|
||||
|
||||
@ -69,7 +69,7 @@ mod tests {
|
||||
#[ignore]
|
||||
async fn test_prune() {
|
||||
let test_tx = TestTransaction::default();
|
||||
let factory = Arc::new(ProviderFactory::new(test_tx.tx.as_ref(), MAINNET.clone()));
|
||||
let factory = Arc::new(ProviderFactory::new(test_tx.tx.db(), MAINNET.clone()));
|
||||
|
||||
let provider = factory.provider_rw().unwrap();
|
||||
let tip = 66;
|
||||
|
||||
@ -48,7 +48,7 @@ pub(crate) trait ExecuteStageTestRunner: StageTestRunner {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let (db, mut stage) = (self.tx().inner_raw(), self.stage());
|
||||
tokio::spawn(async move {
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.db(), MAINNET.clone());
|
||||
let provider = factory.provider_rw().unwrap();
|
||||
|
||||
let result = stage.execute(&provider, input).await;
|
||||
@ -74,7 +74,7 @@ pub(crate) trait UnwindStageTestRunner: StageTestRunner {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let (db, mut stage) = (self.tx().inner_raw(), self.stage());
|
||||
tokio::spawn(async move {
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.db(), MAINNET.clone());
|
||||
let provider = factory.provider_rw().unwrap();
|
||||
|
||||
let result = stage.unwind(&provider, input).await;
|
||||
|
||||
@ -5,7 +5,7 @@ use reth_db::{
|
||||
models::{AccountBeforeTx, StoredBlockBodyIndices},
|
||||
table::{Table, TableRow},
|
||||
tables,
|
||||
test_utils::{create_test_rw_db, create_test_rw_db_with_path},
|
||||
test_utils::{create_test_rw_db, create_test_rw_db_with_path, TempDatabase},
|
||||
transaction::{DbTx, DbTxGAT, DbTxMut, DbTxMutGAT},
|
||||
DatabaseEnv, DatabaseError as DbError,
|
||||
};
|
||||
@ -33,9 +33,9 @@ use std::{
|
||||
#[derive(Debug)]
|
||||
pub struct TestTransaction {
|
||||
/// DB
|
||||
pub tx: Arc<DatabaseEnv>,
|
||||
pub tx: Arc<TempDatabase<DatabaseEnv>>,
|
||||
pub path: Option<PathBuf>,
|
||||
pub factory: ProviderFactory<Arc<DatabaseEnv>>,
|
||||
pub factory: ProviderFactory<Arc<TempDatabase<DatabaseEnv>>>,
|
||||
}
|
||||
|
||||
impl Default for TestTransaction {
|
||||
@ -57,17 +57,17 @@ impl TestTransaction {
|
||||
}
|
||||
|
||||
/// Return a database wrapped in [DatabaseProviderRW].
|
||||
pub fn inner_rw(&self) -> DatabaseProviderRW<'_, Arc<DatabaseEnv>> {
|
||||
pub fn inner_rw(&self) -> DatabaseProviderRW<'_, Arc<TempDatabase<DatabaseEnv>>> {
|
||||
self.factory.provider_rw().expect("failed to create db container")
|
||||
}
|
||||
|
||||
/// Return a database wrapped in [DatabaseProviderRO].
|
||||
pub fn inner(&self) -> DatabaseProviderRO<'_, Arc<DatabaseEnv>> {
|
||||
pub fn inner(&self) -> DatabaseProviderRO<'_, Arc<TempDatabase<DatabaseEnv>>> {
|
||||
self.factory.provider().expect("failed to create db container")
|
||||
}
|
||||
|
||||
/// Get a pointer to an internal database.
|
||||
pub fn inner_raw(&self) -> Arc<DatabaseEnv> {
|
||||
pub fn inner_raw(&self) -> Arc<TempDatabase<DatabaseEnv>> {
|
||||
self.tx.clone()
|
||||
}
|
||||
|
||||
|
||||
@ -86,6 +86,7 @@ where
|
||||
// Reset DB
|
||||
let _ = fs::remove_dir_all(bench_db_path);
|
||||
let db = Arc::try_unwrap(create_test_rw_db_with_path(bench_db_path)).unwrap();
|
||||
let db = db.into_inner_db();
|
||||
|
||||
let mut unsorted_input = unsorted_input.clone();
|
||||
if scenario_str == "append_all" {
|
||||
|
||||
@ -72,5 +72,5 @@ where
|
||||
tx.inner.commit().unwrap();
|
||||
}
|
||||
|
||||
db
|
||||
db.into_inner_db()
|
||||
}
|
||||
|
||||
@ -160,7 +160,8 @@ pub fn open_db(path: &Path, log_level: Option<LogLevel>) -> eyre::Result<Databas
|
||||
#[cfg(any(test, feature = "test-utils"))]
|
||||
pub mod test_utils {
|
||||
use super::*;
|
||||
use std::sync::Arc;
|
||||
use crate::database::{Database, DatabaseGAT};
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
|
||||
/// Error during database open
|
||||
pub const ERROR_DB_OPEN: &str = "Not able to open the database file.";
|
||||
@ -171,26 +172,74 @@ pub mod test_utils {
|
||||
/// Error during tempdir creation
|
||||
pub const ERROR_TEMPDIR: &str = "Not able to create a temporary directory.";
|
||||
|
||||
/// Create read/write database for testing
|
||||
pub fn create_test_rw_db() -> Arc<DatabaseEnv> {
|
||||
Arc::new(
|
||||
init_db(tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path(), None)
|
||||
.expect(ERROR_DB_CREATION),
|
||||
)
|
||||
/// A database will delete the db dir when dropped.
|
||||
#[derive(Debug)]
|
||||
pub struct TempDatabase<DB> {
|
||||
db: Option<DB>,
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
impl<DB> Drop for TempDatabase<DB> {
|
||||
fn drop(&mut self) {
|
||||
if let Some(db) = self.db.take() {
|
||||
drop(db);
|
||||
let _ = std::fs::remove_dir_all(&self.path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB> TempDatabase<DB> {
|
||||
/// returns the ref of inner db
|
||||
pub fn db(&self) -> &DB {
|
||||
self.db.as_ref().unwrap()
|
||||
}
|
||||
|
||||
/// returns the inner db
|
||||
pub fn into_inner_db(mut self) -> DB {
|
||||
self.db.take().unwrap() // take out db to avoid clean path in drop fn
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, DB: Database> DatabaseGAT<'a> for TempDatabase<DB> {
|
||||
type TX = <DB as DatabaseGAT<'a>>::TX;
|
||||
type TXMut = <DB as DatabaseGAT<'a>>::TXMut;
|
||||
}
|
||||
|
||||
impl<DB: Database> Database for TempDatabase<DB> {
|
||||
fn tx(&self) -> Result<<Self as DatabaseGAT<'_>>::TX, DatabaseError> {
|
||||
self.db().tx()
|
||||
}
|
||||
|
||||
fn tx_mut(&self) -> Result<<Self as DatabaseGAT<'_>>::TXMut, DatabaseError> {
|
||||
self.db().tx_mut()
|
||||
}
|
||||
}
|
||||
|
||||
/// Create read/write database for testing
|
||||
pub fn create_test_rw_db_with_path<P: AsRef<Path>>(path: P) -> Arc<DatabaseEnv> {
|
||||
Arc::new(init_db(path.as_ref(), None).expect(ERROR_DB_CREATION))
|
||||
pub fn create_test_rw_db() -> Arc<TempDatabase<DatabaseEnv>> {
|
||||
let path = tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path();
|
||||
let emsg = format!("{}: {:?}", ERROR_DB_CREATION, path);
|
||||
|
||||
let db = init_db(&path, None).expect(&emsg);
|
||||
|
||||
Arc::new(TempDatabase { db: Some(db), path })
|
||||
}
|
||||
|
||||
/// Create read/write database for testing
|
||||
pub fn create_test_rw_db_with_path<P: AsRef<Path>>(path: P) -> Arc<TempDatabase<DatabaseEnv>> {
|
||||
let path = path.as_ref().to_path_buf();
|
||||
let db = init_db(path.as_path(), None).expect(ERROR_DB_CREATION);
|
||||
Arc::new(TempDatabase { db: Some(db), path })
|
||||
}
|
||||
|
||||
/// Create read only database for testing
|
||||
pub fn create_test_ro_db() -> Arc<DatabaseEnvRO> {
|
||||
pub fn create_test_ro_db() -> Arc<TempDatabase<DatabaseEnvRO>> {
|
||||
let path = tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path();
|
||||
{
|
||||
init_db(path.as_path(), None).expect(ERROR_DB_CREATION);
|
||||
}
|
||||
Arc::new(open_db_read_only(path.as_path(), None).expect(ERROR_DB_OPEN))
|
||||
let db = open_db_read_only(path.as_path(), None).expect(ERROR_DB_OPEN);
|
||||
Arc::new(TempDatabase { db: Some(db), path })
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -851,7 +851,7 @@ mod tests {
|
||||
.with_cuckoo_filter(col1.len())
|
||||
.with_fmph();
|
||||
|
||||
nippy.prepare_compression(data.clone()).unwrap();
|
||||
nippy.prepare_compression(data).unwrap();
|
||||
nippy.prepare_index(clone_with_result(&col1), col1.len()).unwrap();
|
||||
nippy
|
||||
.freeze(vec![clone_with_result(&col1), clone_with_result(&col2)], num_rows)
|
||||
|
||||
@ -380,7 +380,6 @@ mod tests {
|
||||
tables,
|
||||
test_utils::create_test_rw_db,
|
||||
transaction::DbTx,
|
||||
DatabaseEnv,
|
||||
};
|
||||
use reth_primitives::{
|
||||
revm::compat::into_reth_acc, Address, Receipt, Receipts, StorageEntry, B256, MAINNET, U256,
|
||||
@ -399,11 +398,10 @@ mod tests {
|
||||
},
|
||||
CacheState, DatabaseCommit, State,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[test]
|
||||
fn write_to_db_account_info() {
|
||||
let db: Arc<DatabaseEnv> = create_test_rw_db();
|
||||
let db = create_test_rw_db();
|
||||
let factory = ProviderFactory::new(db, MAINNET.clone());
|
||||
let provider = factory.provider_rw().unwrap();
|
||||
|
||||
@ -546,7 +544,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn write_to_db_storage() {
|
||||
let db: Arc<DatabaseEnv> = create_test_rw_db();
|
||||
let db = create_test_rw_db();
|
||||
let factory = ProviderFactory::new(db, MAINNET.clone());
|
||||
let provider = factory.provider_rw().unwrap();
|
||||
|
||||
@ -739,7 +737,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn write_to_db_multiple_selfdestructs() {
|
||||
let db: Arc<DatabaseEnv> = create_test_rw_db();
|
||||
let db = create_test_rw_db();
|
||||
let factory = ProviderFactory::new(db, MAINNET.clone());
|
||||
let provider = factory.provider_rw().unwrap();
|
||||
|
||||
@ -1052,7 +1050,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn storage_change_after_selfdestruct_within_block() {
|
||||
let db: Arc<DatabaseEnv> = create_test_rw_db();
|
||||
let db = create_test_rw_db();
|
||||
let factory = ProviderFactory::new(db, MAINNET.clone());
|
||||
let provider = factory.provider_rw().unwrap();
|
||||
|
||||
|
||||
@ -1138,7 +1138,7 @@ mod tests {
|
||||
#[test]
|
||||
fn account_trie_around_extension_node() {
|
||||
let db = create_test_rw_db();
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.db(), MAINNET.clone());
|
||||
let tx = factory.provider_rw().unwrap();
|
||||
|
||||
let expected = extension_node_trie(&tx);
|
||||
@ -1164,7 +1164,7 @@ mod tests {
|
||||
|
||||
fn account_trie_around_extension_node_with_dbtrie() {
|
||||
let db = create_test_rw_db();
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.db(), MAINNET.clone());
|
||||
let tx = factory.provider_rw().unwrap();
|
||||
|
||||
let expected = extension_node_trie(&tx);
|
||||
@ -1227,7 +1227,7 @@ mod tests {
|
||||
#[test]
|
||||
fn storage_trie_around_extension_node() {
|
||||
let db = create_test_rw_db();
|
||||
let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone());
|
||||
let factory = ProviderFactory::new(db.db(), MAINNET.clone());
|
||||
let tx = factory.provider_rw().unwrap();
|
||||
|
||||
let hashed_address = B256::random();
|
||||
|
||||
Reference in New Issue
Block a user