mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
chore: remove redundant suffix in ChainPath methods (#8025)
This commit is contained in:
@ -108,9 +108,9 @@ impl Command {
|
|||||||
pub async fn execute(self) -> eyre::Result<()> {
|
pub async fn execute(self) -> eyre::Result<()> {
|
||||||
// add network name to data dir
|
// add network name to data dir
|
||||||
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||||
let db_path = data_dir.db_path();
|
let db_path = data_dir.db();
|
||||||
let db_args = self.db.database_args();
|
let db_args = self.db.database_args();
|
||||||
let static_files_path = data_dir.static_files_path();
|
let static_files_path = data_dir.static_files();
|
||||||
|
|
||||||
match self.command {
|
match self.command {
|
||||||
// TODO: We'll need to add this on the DB trait.
|
// TODO: We'll need to add this on the DB trait.
|
||||||
|
|||||||
@ -96,11 +96,10 @@ impl Command {
|
|||||||
});
|
});
|
||||||
|
|
||||||
let db = open_db_read_only(
|
let db = open_db_read_only(
|
||||||
data_dir.db_path().as_path(),
|
data_dir.db().as_path(),
|
||||||
db_args.with_max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)),
|
db_args.with_max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)),
|
||||||
)?;
|
)?;
|
||||||
let provider_factory =
|
let provider_factory = Arc::new(ProviderFactory::new(db, chain, data_dir.static_files())?);
|
||||||
Arc::new(ProviderFactory::new(db, chain, data_dir.static_files_path())?);
|
|
||||||
|
|
||||||
{
|
{
|
||||||
if !self.only_bench {
|
if !self.only_bench {
|
||||||
|
|||||||
@ -174,8 +174,8 @@ impl Command {
|
|||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
|
|
||||||
let static_files = iter_static_files(data_dir.static_files_path())?;
|
let static_files = iter_static_files(data_dir.static_files())?;
|
||||||
let static_file_provider = StaticFileProvider::new(data_dir.static_files_path())?;
|
let static_file_provider = StaticFileProvider::new(data_dir.static_files())?;
|
||||||
|
|
||||||
let mut total_data_size = 0;
|
let mut total_data_size = 0;
|
||||||
let mut total_index_size = 0;
|
let mut total_index_size = 0;
|
||||||
|
|||||||
@ -114,7 +114,7 @@ impl Command {
|
|||||||
let factory = ProviderFactory::new(
|
let factory = ProviderFactory::new(
|
||||||
db,
|
db,
|
||||||
self.chain.clone(),
|
self.chain.clone(),
|
||||||
self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(),
|
self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(),
|
||||||
)?;
|
)?;
|
||||||
let provider = factory.provider()?;
|
let provider = factory.provider()?;
|
||||||
|
|
||||||
@ -148,7 +148,7 @@ impl Command {
|
|||||||
pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> {
|
pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> {
|
||||||
// add network name to data dir
|
// add network name to data dir
|
||||||
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||||
let db_path = data_dir.db_path();
|
let db_path = data_dir.db();
|
||||||
fs::create_dir_all(&db_path)?;
|
fs::create_dir_all(&db_path)?;
|
||||||
|
|
||||||
// initialize the database
|
// initialize the database
|
||||||
@ -156,7 +156,7 @@ impl Command {
|
|||||||
let provider_factory = ProviderFactory::new(
|
let provider_factory = ProviderFactory::new(
|
||||||
Arc::clone(&db),
|
Arc::clone(&db),
|
||||||
Arc::clone(&self.chain),
|
Arc::clone(&self.chain),
|
||||||
data_dir.static_files_path(),
|
data_dir.static_files(),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let consensus: Arc<dyn Consensus> = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain)));
|
let consensus: Arc<dyn Consensus> = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain)));
|
||||||
|
|||||||
@ -173,7 +173,7 @@ impl Command {
|
|||||||
.build(ProviderFactory::new(
|
.build(ProviderFactory::new(
|
||||||
db,
|
db,
|
||||||
self.chain.clone(),
|
self.chain.clone(),
|
||||||
self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(),
|
self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(),
|
||||||
)?)
|
)?)
|
||||||
.start_network()
|
.start_network()
|
||||||
.await?;
|
.await?;
|
||||||
@ -206,17 +206,17 @@ impl Command {
|
|||||||
let mut config = Config::default();
|
let mut config = Config::default();
|
||||||
|
|
||||||
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||||
let db_path = data_dir.db_path();
|
let db_path = data_dir.db();
|
||||||
|
|
||||||
// Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to
|
// Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to
|
||||||
if config.stages.etl.dir.is_none() {
|
if config.stages.etl.dir.is_none() {
|
||||||
config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path()));
|
config.stages.etl.dir = Some(EtlConfig::from_datadir(data_dir.data_dir()));
|
||||||
}
|
}
|
||||||
|
|
||||||
fs::create_dir_all(&db_path)?;
|
fs::create_dir_all(&db_path)?;
|
||||||
let db = Arc::new(init_db(db_path, self.db.database_args())?);
|
let db = Arc::new(init_db(db_path, self.db.database_args())?);
|
||||||
let provider_factory =
|
let provider_factory =
|
||||||
ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?;
|
ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?;
|
||||||
|
|
||||||
debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis");
|
debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis");
|
||||||
init_genesis(provider_factory.clone())?;
|
init_genesis(provider_factory.clone())?;
|
||||||
@ -225,14 +225,14 @@ impl Command {
|
|||||||
|
|
||||||
// Configure and build network
|
// Configure and build network
|
||||||
let network_secret_path =
|
let network_secret_path =
|
||||||
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path());
|
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret());
|
||||||
let network = self
|
let network = self
|
||||||
.build_network(
|
.build_network(
|
||||||
&config,
|
&config,
|
||||||
ctx.task_executor.clone(),
|
ctx.task_executor.clone(),
|
||||||
db.clone(),
|
db.clone(),
|
||||||
network_secret_path,
|
network_secret_path,
|
||||||
data_dir.known_peers_path(),
|
data_dir.known_peers(),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
|||||||
@ -94,7 +94,7 @@ impl Command {
|
|||||||
.build(ProviderFactory::new(
|
.build(ProviderFactory::new(
|
||||||
db,
|
db,
|
||||||
self.chain.clone(),
|
self.chain.clone(),
|
||||||
self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(),
|
self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(),
|
||||||
)?)
|
)?)
|
||||||
.start_network()
|
.start_network()
|
||||||
.await?;
|
.await?;
|
||||||
@ -109,12 +109,12 @@ impl Command {
|
|||||||
|
|
||||||
// add network name to data dir
|
// add network name to data dir
|
||||||
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||||
let db_path = data_dir.db_path();
|
let db_path = data_dir.db();
|
||||||
fs::create_dir_all(&db_path)?;
|
fs::create_dir_all(&db_path)?;
|
||||||
|
|
||||||
// initialize the database
|
// initialize the database
|
||||||
let db = Arc::new(init_db(db_path, self.db.database_args())?);
|
let db = Arc::new(init_db(db_path, self.db.database_args())?);
|
||||||
let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files_path())?;
|
let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files())?;
|
||||||
let provider = factory.provider()?;
|
let provider = factory.provider()?;
|
||||||
|
|
||||||
// Look up merkle checkpoint
|
// Look up merkle checkpoint
|
||||||
@ -126,14 +126,14 @@ impl Command {
|
|||||||
|
|
||||||
// Configure and build network
|
// Configure and build network
|
||||||
let network_secret_path =
|
let network_secret_path =
|
||||||
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path());
|
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret());
|
||||||
let network = self
|
let network = self
|
||||||
.build_network(
|
.build_network(
|
||||||
&config,
|
&config,
|
||||||
ctx.task_executor.clone(),
|
ctx.task_executor.clone(),
|
||||||
db.clone(),
|
db.clone(),
|
||||||
network_secret_path,
|
network_secret_path,
|
||||||
data_dir.known_peers_path(),
|
data_dir.known_peers(),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
|||||||
@ -104,7 +104,7 @@ impl Command {
|
|||||||
.build(ProviderFactory::new(
|
.build(ProviderFactory::new(
|
||||||
db,
|
db,
|
||||||
self.chain.clone(),
|
self.chain.clone(),
|
||||||
self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(),
|
self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(),
|
||||||
)?)
|
)?)
|
||||||
.start_network()
|
.start_network()
|
||||||
.await?;
|
.await?;
|
||||||
@ -119,24 +119,24 @@ impl Command {
|
|||||||
|
|
||||||
// add network name to data dir
|
// add network name to data dir
|
||||||
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||||
let db_path = data_dir.db_path();
|
let db_path = data_dir.db();
|
||||||
fs::create_dir_all(&db_path)?;
|
fs::create_dir_all(&db_path)?;
|
||||||
|
|
||||||
// initialize the database
|
// initialize the database
|
||||||
let db = Arc::new(init_db(db_path, self.db.database_args())?);
|
let db = Arc::new(init_db(db_path, self.db.database_args())?);
|
||||||
let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files_path())?;
|
let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files())?;
|
||||||
let provider_rw = factory.provider_rw()?;
|
let provider_rw = factory.provider_rw()?;
|
||||||
|
|
||||||
// Configure and build network
|
// Configure and build network
|
||||||
let network_secret_path =
|
let network_secret_path =
|
||||||
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path());
|
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret());
|
||||||
let network = self
|
let network = self
|
||||||
.build_network(
|
.build_network(
|
||||||
&config,
|
&config,
|
||||||
ctx.task_executor.clone(),
|
ctx.task_executor.clone(),
|
||||||
db.clone(),
|
db.clone(),
|
||||||
network_secret_path,
|
network_secret_path,
|
||||||
data_dir.known_peers_path(),
|
data_dir.known_peers(),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
|||||||
@ -101,7 +101,7 @@ impl Command {
|
|||||||
.build(ProviderFactory::new(
|
.build(ProviderFactory::new(
|
||||||
db,
|
db,
|
||||||
self.chain.clone(),
|
self.chain.clone(),
|
||||||
self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(),
|
self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(),
|
||||||
)?)
|
)?)
|
||||||
.start_network()
|
.start_network()
|
||||||
.await?;
|
.await?;
|
||||||
@ -116,13 +116,13 @@ impl Command {
|
|||||||
|
|
||||||
// Add network name to data dir
|
// Add network name to data dir
|
||||||
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||||
let db_path = data_dir.db_path();
|
let db_path = data_dir.db();
|
||||||
fs::create_dir_all(&db_path)?;
|
fs::create_dir_all(&db_path)?;
|
||||||
|
|
||||||
// Initialize the database
|
// Initialize the database
|
||||||
let db = Arc::new(init_db(db_path, self.db.database_args())?);
|
let db = Arc::new(init_db(db_path, self.db.database_args())?);
|
||||||
let provider_factory =
|
let provider_factory =
|
||||||
ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?;
|
ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?;
|
||||||
|
|
||||||
let consensus: Arc<dyn Consensus> = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain)));
|
let consensus: Arc<dyn Consensus> = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain)));
|
||||||
|
|
||||||
@ -146,14 +146,14 @@ impl Command {
|
|||||||
|
|
||||||
// Set up network
|
// Set up network
|
||||||
let network_secret_path =
|
let network_secret_path =
|
||||||
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path());
|
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret());
|
||||||
let network = self
|
let network = self
|
||||||
.build_network(
|
.build_network(
|
||||||
&config,
|
&config,
|
||||||
ctx.task_executor.clone(),
|
ctx.task_executor.clone(),
|
||||||
db.clone(),
|
db.clone(),
|
||||||
network_secret_path,
|
network_secret_path,
|
||||||
data_dir.known_peers_path(),
|
data_dir.known_peers(),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
|||||||
@ -118,23 +118,23 @@ impl ImportCommand {
|
|||||||
|
|
||||||
// add network name to data dir
|
// add network name to data dir
|
||||||
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||||
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path());
|
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config());
|
||||||
|
|
||||||
let mut config: Config = load_config(config_path.clone())?;
|
let mut config: Config = load_config(config_path.clone())?;
|
||||||
info!(target: "reth::cli", path = ?config_path, "Configuration loaded");
|
info!(target: "reth::cli", path = ?config_path, "Configuration loaded");
|
||||||
|
|
||||||
// Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to
|
// Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to
|
||||||
if config.stages.etl.dir.is_none() {
|
if config.stages.etl.dir.is_none() {
|
||||||
config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path()));
|
config.stages.etl.dir = Some(EtlConfig::from_datadir(data_dir.data_dir()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let db_path = data_dir.db_path();
|
let db_path = data_dir.db();
|
||||||
|
|
||||||
info!(target: "reth::cli", path = ?db_path, "Opening database");
|
info!(target: "reth::cli", path = ?db_path, "Opening database");
|
||||||
let db = Arc::new(init_db(db_path, self.db.database_args())?);
|
let db = Arc::new(init_db(db_path, self.db.database_args())?);
|
||||||
info!(target: "reth::cli", "Database opened");
|
info!(target: "reth::cli", "Database opened");
|
||||||
let provider_factory =
|
let provider_factory =
|
||||||
ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?;
|
ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?;
|
||||||
|
|
||||||
debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis");
|
debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis");
|
||||||
|
|
||||||
|
|||||||
@ -85,23 +85,23 @@ impl ImportOpCommand {
|
|||||||
|
|
||||||
// add network name to data dir
|
// add network name to data dir
|
||||||
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||||
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path());
|
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config());
|
||||||
|
|
||||||
let mut config: Config = load_config(config_path.clone())?;
|
let mut config: Config = load_config(config_path.clone())?;
|
||||||
info!(target: "reth::cli", path = ?config_path, "Configuration loaded");
|
info!(target: "reth::cli", path = ?config_path, "Configuration loaded");
|
||||||
|
|
||||||
// Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to
|
// Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to
|
||||||
if config.stages.etl.dir.is_none() {
|
if config.stages.etl.dir.is_none() {
|
||||||
config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path()));
|
config.stages.etl.dir = Some(EtlConfig::from_datadir(data_dir.data_dir()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let db_path = data_dir.db_path();
|
let db_path = data_dir.db();
|
||||||
|
|
||||||
info!(target: "reth::cli", path = ?db_path, "Opening database");
|
info!(target: "reth::cli", path = ?db_path, "Opening database");
|
||||||
let db = Arc::new(init_db(db_path, self.db.database_args())?);
|
let db = Arc::new(init_db(db_path, self.db.database_args())?);
|
||||||
info!(target: "reth::cli", "Database opened");
|
info!(target: "reth::cli", "Database opened");
|
||||||
let provider_factory =
|
let provider_factory =
|
||||||
ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?;
|
ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?;
|
||||||
|
|
||||||
debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis");
|
debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis");
|
||||||
|
|
||||||
|
|||||||
@ -51,12 +51,12 @@ impl InitCommand {
|
|||||||
|
|
||||||
// add network name to data dir
|
// add network name to data dir
|
||||||
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||||
let db_path = data_dir.db_path();
|
let db_path = data_dir.db();
|
||||||
info!(target: "reth::cli", path = ?db_path, "Opening database");
|
info!(target: "reth::cli", path = ?db_path, "Opening database");
|
||||||
let db = Arc::new(init_db(&db_path, self.db.database_args())?);
|
let db = Arc::new(init_db(&db_path, self.db.database_args())?);
|
||||||
info!(target: "reth::cli", "Database opened");
|
info!(target: "reth::cli", "Database opened");
|
||||||
|
|
||||||
let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files_path())?;
|
let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files())?;
|
||||||
|
|
||||||
info!(target: "reth::cli", "Writing genesis block");
|
info!(target: "reth::cli", "Writing genesis block");
|
||||||
|
|
||||||
|
|||||||
@ -72,12 +72,12 @@ impl InitStateCommand {
|
|||||||
|
|
||||||
// add network name to data dir
|
// add network name to data dir
|
||||||
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||||
let db_path = data_dir.db_path();
|
let db_path = data_dir.db();
|
||||||
info!(target: "reth::cli", path = ?db_path, "Opening database");
|
info!(target: "reth::cli", path = ?db_path, "Opening database");
|
||||||
let db = Arc::new(init_db(&db_path, self.db.database_args())?);
|
let db = Arc::new(init_db(&db_path, self.db.database_args())?);
|
||||||
info!(target: "reth::cli", "Database opened");
|
info!(target: "reth::cli", "Database opened");
|
||||||
|
|
||||||
let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files_path())?;
|
let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files())?;
|
||||||
|
|
||||||
info!(target: "reth::cli", "Writing genesis block");
|
info!(target: "reth::cli", "Writing genesis block");
|
||||||
|
|
||||||
|
|||||||
@ -180,7 +180,7 @@ impl<Ext: clap::Args + fmt::Debug> NodeCommand<Ext> {
|
|||||||
let _ = node_config.install_prometheus_recorder()?;
|
let _ = node_config.install_prometheus_recorder()?;
|
||||||
|
|
||||||
let data_dir = datadir.unwrap_or_chain_default(node_config.chain.chain);
|
let data_dir = datadir.unwrap_or_chain_default(node_config.chain.chain);
|
||||||
let db_path = data_dir.db_path();
|
let db_path = data_dir.db();
|
||||||
|
|
||||||
tracing::info!(target: "reth::cli", path = ?db_path, "Opening database");
|
tracing::info!(target: "reth::cli", path = ?db_path, "Opening database");
|
||||||
let database = Arc::new(init_db(db_path.clone(), self.db.database_args())?.with_metrics());
|
let database = Arc::new(init_db(db_path.clone(), self.db.database_args())?.with_metrics());
|
||||||
@ -280,14 +280,14 @@ mod tests {
|
|||||||
NodeCommand::try_parse_args_from(["reth", "--config", "my/path/to/reth.toml"]).unwrap();
|
NodeCommand::try_parse_args_from(["reth", "--config", "my/path/to/reth.toml"]).unwrap();
|
||||||
// always store reth.toml in the data dir, not the chain specific data dir
|
// always store reth.toml in the data dir, not the chain specific data dir
|
||||||
let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain);
|
let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain);
|
||||||
let config_path = cmd.config.unwrap_or_else(|| data_dir.config_path());
|
let config_path = cmd.config.unwrap_or_else(|| data_dir.config());
|
||||||
assert_eq!(config_path, Path::new("my/path/to/reth.toml"));
|
assert_eq!(config_path, Path::new("my/path/to/reth.toml"));
|
||||||
|
|
||||||
let cmd = NodeCommand::try_parse_args_from(["reth"]).unwrap();
|
let cmd = NodeCommand::try_parse_args_from(["reth"]).unwrap();
|
||||||
|
|
||||||
// always store reth.toml in the data dir, not the chain specific data dir
|
// always store reth.toml in the data dir, not the chain specific data dir
|
||||||
let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain);
|
let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain);
|
||||||
let config_path = cmd.config.clone().unwrap_or_else(|| data_dir.config_path());
|
let config_path = cmd.config.clone().unwrap_or_else(|| data_dir.config());
|
||||||
let end = format!("reth/{}/reth.toml", SUPPORTED_CHAINS[0]);
|
let end = format!("reth/{}/reth.toml", SUPPORTED_CHAINS[0]);
|
||||||
assert!(config_path.ends_with(end), "{:?}", cmd.config);
|
assert!(config_path.ends_with(end), "{:?}", cmd.config);
|
||||||
}
|
}
|
||||||
@ -296,14 +296,14 @@ mod tests {
|
|||||||
fn parse_db_path() {
|
fn parse_db_path() {
|
||||||
let cmd = NodeCommand::try_parse_args_from(["reth"]).unwrap();
|
let cmd = NodeCommand::try_parse_args_from(["reth"]).unwrap();
|
||||||
let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain);
|
let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain);
|
||||||
let db_path = data_dir.db_path();
|
let db_path = data_dir.db();
|
||||||
let end = format!("reth/{}/db", SUPPORTED_CHAINS[0]);
|
let end = format!("reth/{}/db", SUPPORTED_CHAINS[0]);
|
||||||
assert!(db_path.ends_with(end), "{:?}", cmd.config);
|
assert!(db_path.ends_with(end), "{:?}", cmd.config);
|
||||||
|
|
||||||
let cmd =
|
let cmd =
|
||||||
NodeCommand::try_parse_args_from(["reth", "--datadir", "my/custom/path"]).unwrap();
|
NodeCommand::try_parse_args_from(["reth", "--datadir", "my/custom/path"]).unwrap();
|
||||||
let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain);
|
let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain);
|
||||||
let db_path = data_dir.db_path();
|
let db_path = data_dir.db();
|
||||||
assert_eq!(db_path, Path::new("my/custom/path/db"));
|
assert_eq!(db_path, Path::new("my/custom/path/db"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -105,7 +105,7 @@ impl Command {
|
|||||||
|
|
||||||
// add network name to data dir
|
// add network name to data dir
|
||||||
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||||
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path());
|
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config());
|
||||||
|
|
||||||
let mut config: Config = confy::load_path(&config_path).unwrap_or_default();
|
let mut config: Config = confy::load_path(&config_path).unwrap_or_default();
|
||||||
|
|
||||||
@ -119,7 +119,7 @@ impl Command {
|
|||||||
|
|
||||||
config.peers.trusted_nodes_only = self.trusted_only;
|
config.peers.trusted_nodes_only = self.trusted_only;
|
||||||
|
|
||||||
let default_secret_key_path = data_dir.p2p_secret_path();
|
let default_secret_key_path = data_dir.p2p_secret();
|
||||||
let secret_key_path = self.p2p_secret_key.clone().unwrap_or(default_secret_key_path);
|
let secret_key_path = self.p2p_secret_key.clone().unwrap_or(default_secret_key_path);
|
||||||
let p2p_secret_key = get_secret_key(&secret_key_path)?;
|
let p2p_secret_key = get_secret_key(&secret_key_path)?;
|
||||||
|
|
||||||
@ -133,7 +133,7 @@ impl Command {
|
|||||||
let mut network_config = network_config_builder.build(Arc::new(ProviderFactory::new(
|
let mut network_config = network_config_builder.build(Arc::new(ProviderFactory::new(
|
||||||
noop_db,
|
noop_db,
|
||||||
self.chain.clone(),
|
self.chain.clone(),
|
||||||
data_dir.static_files_path(),
|
data_dir.static_files(),
|
||||||
)?));
|
)?));
|
||||||
|
|
||||||
if self.discovery.enable_discv5_discovery {
|
if self.discovery.enable_discv5_discovery {
|
||||||
|
|||||||
@ -50,11 +50,11 @@ impl Command {
|
|||||||
/// Execute `storage-tries` recovery command
|
/// Execute `storage-tries` recovery command
|
||||||
pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> {
|
pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> {
|
||||||
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||||
let db_path = data_dir.db_path();
|
let db_path = data_dir.db();
|
||||||
fs::create_dir_all(&db_path)?;
|
fs::create_dir_all(&db_path)?;
|
||||||
let db = Arc::new(init_db(db_path, self.db.database_args())?);
|
let db = Arc::new(init_db(db_path, self.db.database_args())?);
|
||||||
|
|
||||||
let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files_path())?;
|
let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files())?;
|
||||||
|
|
||||||
debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis");
|
debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis");
|
||||||
init_genesis(factory.clone())?;
|
init_genesis(factory.clone())?;
|
||||||
|
|||||||
@ -54,12 +54,12 @@ impl Command {
|
|||||||
pub async fn execute(self) -> eyre::Result<()> {
|
pub async fn execute(self) -> eyre::Result<()> {
|
||||||
// add network name to data dir
|
// add network name to data dir
|
||||||
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||||
let db_path = data_dir.db_path();
|
let db_path = data_dir.db();
|
||||||
fs::create_dir_all(&db_path)?;
|
fs::create_dir_all(&db_path)?;
|
||||||
|
|
||||||
let db = open_db(db_path.as_ref(), self.db.database_args())?;
|
let db = open_db(db_path.as_ref(), self.db.database_args())?;
|
||||||
let provider_factory =
|
let provider_factory =
|
||||||
ProviderFactory::new(db, self.chain.clone(), data_dir.static_files_path())?;
|
ProviderFactory::new(db, self.chain.clone(), data_dir.static_files())?;
|
||||||
let static_file_provider = provider_factory.static_file_provider();
|
let static_file_provider = provider_factory.static_file_provider();
|
||||||
|
|
||||||
let tool = DbTool::new(provider_factory, self.chain.clone())?;
|
let tool = DbTool::new(provider_factory, self.chain.clone())?;
|
||||||
|
|||||||
@ -20,7 +20,7 @@ pub(crate) async fn dump_execution_stage<DB: Database>(
|
|||||||
output_datadir: ChainPath<DataDirPath>,
|
output_datadir: ChainPath<DataDirPath>,
|
||||||
should_run: bool,
|
should_run: bool,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db_path(), db_tool)?;
|
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?;
|
||||||
|
|
||||||
import_tables_with_range(&output_db, db_tool, from, to)?;
|
import_tables_with_range(&output_db, db_tool, from, to)?;
|
||||||
|
|
||||||
@ -28,11 +28,7 @@ pub(crate) async fn dump_execution_stage<DB: Database>(
|
|||||||
|
|
||||||
if should_run {
|
if should_run {
|
||||||
dry_run(
|
dry_run(
|
||||||
ProviderFactory::new(
|
ProviderFactory::new(output_db, db_tool.chain.clone(), output_datadir.static_files())?,
|
||||||
output_db,
|
|
||||||
db_tool.chain.clone(),
|
|
||||||
output_datadir.static_files_path(),
|
|
||||||
)?,
|
|
||||||
to,
|
to,
|
||||||
from,
|
from,
|
||||||
)
|
)
|
||||||
|
|||||||
@ -15,7 +15,7 @@ pub(crate) async fn dump_hashing_account_stage<DB: Database>(
|
|||||||
output_datadir: ChainPath<DataDirPath>,
|
output_datadir: ChainPath<DataDirPath>,
|
||||||
should_run: bool,
|
should_run: bool,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db_path(), db_tool)?;
|
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?;
|
||||||
|
|
||||||
// Import relevant AccountChangeSets
|
// Import relevant AccountChangeSets
|
||||||
output_db.update(|tx| {
|
output_db.update(|tx| {
|
||||||
@ -30,11 +30,7 @@ pub(crate) async fn dump_hashing_account_stage<DB: Database>(
|
|||||||
|
|
||||||
if should_run {
|
if should_run {
|
||||||
dry_run(
|
dry_run(
|
||||||
ProviderFactory::new(
|
ProviderFactory::new(output_db, db_tool.chain.clone(), output_datadir.static_files())?,
|
||||||
output_db,
|
|
||||||
db_tool.chain.clone(),
|
|
||||||
output_datadir.static_files_path(),
|
|
||||||
)?,
|
|
||||||
to,
|
to,
|
||||||
from,
|
from,
|
||||||
)
|
)
|
||||||
|
|||||||
@ -15,17 +15,13 @@ pub(crate) async fn dump_hashing_storage_stage<DB: Database>(
|
|||||||
output_datadir: ChainPath<DataDirPath>,
|
output_datadir: ChainPath<DataDirPath>,
|
||||||
should_run: bool,
|
should_run: bool,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db_path(), db_tool)?;
|
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?;
|
||||||
|
|
||||||
unwind_and_copy(db_tool, from, tip_block_number, &output_db)?;
|
unwind_and_copy(db_tool, from, tip_block_number, &output_db)?;
|
||||||
|
|
||||||
if should_run {
|
if should_run {
|
||||||
dry_run(
|
dry_run(
|
||||||
ProviderFactory::new(
|
ProviderFactory::new(output_db, db_tool.chain.clone(), output_datadir.static_files())?,
|
||||||
output_db,
|
|
||||||
db_tool.chain.clone(),
|
|
||||||
output_datadir.static_files_path(),
|
|
||||||
)?,
|
|
||||||
to,
|
to,
|
||||||
from,
|
from,
|
||||||
)
|
)
|
||||||
|
|||||||
@ -24,7 +24,7 @@ pub(crate) async fn dump_merkle_stage<DB: Database>(
|
|||||||
output_datadir: ChainPath<DataDirPath>,
|
output_datadir: ChainPath<DataDirPath>,
|
||||||
should_run: bool,
|
should_run: bool,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db_path(), db_tool)?;
|
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?;
|
||||||
|
|
||||||
output_db.update(|tx| {
|
output_db.update(|tx| {
|
||||||
tx.import_table_with_range::<tables::Headers, _>(
|
tx.import_table_with_range::<tables::Headers, _>(
|
||||||
@ -46,11 +46,7 @@ pub(crate) async fn dump_merkle_stage<DB: Database>(
|
|||||||
|
|
||||||
if should_run {
|
if should_run {
|
||||||
dry_run(
|
dry_run(
|
||||||
ProviderFactory::new(
|
ProviderFactory::new(output_db, db_tool.chain.clone(), output_datadir.static_files())?,
|
||||||
output_db,
|
|
||||||
db_tool.chain.clone(),
|
|
||||||
output_datadir.static_files_path(),
|
|
||||||
)?,
|
|
||||||
to,
|
to,
|
||||||
from,
|
from,
|
||||||
)
|
)
|
||||||
|
|||||||
@ -102,11 +102,11 @@ impl Command {
|
|||||||
pub async fn execute(self) -> eyre::Result<()> {
|
pub async fn execute(self) -> eyre::Result<()> {
|
||||||
// add network name to data dir
|
// add network name to data dir
|
||||||
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||||
let db_path = data_dir.db_path();
|
let db_path = data_dir.db();
|
||||||
info!(target: "reth::cli", path = ?db_path, "Opening database");
|
info!(target: "reth::cli", path = ?db_path, "Opening database");
|
||||||
let db = Arc::new(init_db(db_path, self.db.database_args())?);
|
let db = Arc::new(init_db(db_path, self.db.database_args())?);
|
||||||
let provider_factory =
|
let provider_factory =
|
||||||
ProviderFactory::new(db, self.chain.clone(), data_dir.static_files_path())?;
|
ProviderFactory::new(db, self.chain.clone(), data_dir.static_files())?;
|
||||||
|
|
||||||
info!(target: "reth::cli", "Database opened");
|
info!(target: "reth::cli", "Database opened");
|
||||||
|
|
||||||
|
|||||||
@ -130,23 +130,20 @@ impl Command {
|
|||||||
|
|
||||||
// add network name to data dir
|
// add network name to data dir
|
||||||
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||||
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path());
|
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config());
|
||||||
|
|
||||||
let config: Config = confy::load_path(config_path).unwrap_or_default();
|
let config: Config = confy::load_path(config_path).unwrap_or_default();
|
||||||
info!(target: "reth::cli", "reth {} starting stage {:?}", SHORT_VERSION, self.stage);
|
info!(target: "reth::cli", "reth {} starting stage {:?}", SHORT_VERSION, self.stage);
|
||||||
|
|
||||||
// use the overridden db path if specified
|
// use the overridden db path if specified
|
||||||
let db_path = data_dir.db_path();
|
let db_path = data_dir.db();
|
||||||
|
|
||||||
info!(target: "reth::cli", path = ?db_path, "Opening database");
|
info!(target: "reth::cli", path = ?db_path, "Opening database");
|
||||||
let db = Arc::new(init_db(db_path, self.db.database_args())?);
|
let db = Arc::new(init_db(db_path, self.db.database_args())?);
|
||||||
info!(target: "reth::cli", "Database opened");
|
info!(target: "reth::cli", "Database opened");
|
||||||
|
|
||||||
let factory = ProviderFactory::new(
|
let factory =
|
||||||
Arc::clone(&db),
|
ProviderFactory::new(Arc::clone(&db), self.chain.clone(), data_dir.static_files())?;
|
||||||
self.chain.clone(),
|
|
||||||
data_dir.static_files_path(),
|
|
||||||
)?;
|
|
||||||
let mut provider_rw = factory.provider_rw()?;
|
let mut provider_rw = factory.provider_rw()?;
|
||||||
|
|
||||||
if let Some(listen_addr) = self.metrics {
|
if let Some(listen_addr) = self.metrics {
|
||||||
@ -165,9 +162,7 @@ impl Command {
|
|||||||
let batch_size = self.batch_size.unwrap_or(self.to.saturating_sub(self.from) + 1);
|
let batch_size = self.batch_size.unwrap_or(self.to.saturating_sub(self.from) + 1);
|
||||||
|
|
||||||
let etl_config = EtlConfig::new(
|
let etl_config = EtlConfig::new(
|
||||||
Some(
|
Some(self.etl_dir.unwrap_or_else(|| EtlConfig::from_datadir(data_dir.data_dir()))),
|
||||||
self.etl_dir.unwrap_or_else(|| EtlConfig::from_datadir(&data_dir.data_dir_path())),
|
|
||||||
),
|
|
||||||
self.etl_file_size.unwrap_or(EtlConfig::default_file_size()),
|
self.etl_file_size.unwrap_or(EtlConfig::default_file_size()),
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -188,15 +183,15 @@ impl Command {
|
|||||||
.network
|
.network
|
||||||
.p2p_secret_key
|
.p2p_secret_key
|
||||||
.clone()
|
.clone()
|
||||||
.unwrap_or_else(|| data_dir.p2p_secret_path());
|
.unwrap_or_else(|| data_dir.p2p_secret());
|
||||||
let p2p_secret_key = get_secret_key(&network_secret_path)?;
|
let p2p_secret_key = get_secret_key(&network_secret_path)?;
|
||||||
|
|
||||||
let default_peers_path = data_dir.known_peers_path();
|
let default_peers_path = data_dir.known_peers();
|
||||||
|
|
||||||
let provider_factory = Arc::new(ProviderFactory::new(
|
let provider_factory = Arc::new(ProviderFactory::new(
|
||||||
db.clone(),
|
db.clone(),
|
||||||
self.chain.clone(),
|
self.chain.clone(),
|
||||||
data_dir.static_files_path(),
|
data_dir.static_files(),
|
||||||
)?);
|
)?);
|
||||||
|
|
||||||
let network = self
|
let network = self
|
||||||
|
|||||||
@ -83,16 +83,16 @@ impl Command {
|
|||||||
pub async fn execute(self) -> eyre::Result<()> {
|
pub async fn execute(self) -> eyre::Result<()> {
|
||||||
// add network name to data dir
|
// add network name to data dir
|
||||||
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
|
||||||
let db_path = data_dir.db_path();
|
let db_path = data_dir.db();
|
||||||
if !db_path.exists() {
|
if !db_path.exists() {
|
||||||
eyre::bail!("Database {db_path:?} does not exist.")
|
eyre::bail!("Database {db_path:?} does not exist.")
|
||||||
}
|
}
|
||||||
let config_path = data_dir.config_path();
|
let config_path = data_dir.config();
|
||||||
let config: Config = confy::load_path(config_path).unwrap_or_default();
|
let config: Config = confy::load_path(config_path).unwrap_or_default();
|
||||||
|
|
||||||
let db = Arc::new(open_db(db_path.as_ref(), self.db.database_args())?);
|
let db = Arc::new(open_db(db_path.as_ref(), self.db.database_args())?);
|
||||||
let provider_factory =
|
let provider_factory =
|
||||||
ProviderFactory::new(db, self.chain.clone(), data_dir.static_files_path())?;
|
ProviderFactory::new(db, self.chain.clone(), data_dir.static_files())?;
|
||||||
|
|
||||||
let range = self.command.unwind_range(provider_factory.clone())?;
|
let range = self.command.unwind_range(provider_factory.clone())?;
|
||||||
if *range.start() == 0 {
|
if *range.start() == 0 {
|
||||||
@ -148,9 +148,9 @@ impl Command {
|
|||||||
// Even though we are not planning to download anything, we need to initialize Body and
|
// Even though we are not planning to download anything, we need to initialize Body and
|
||||||
// Header stage with a network client
|
// Header stage with a network client
|
||||||
let network_secret_path =
|
let network_secret_path =
|
||||||
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path());
|
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret());
|
||||||
let p2p_secret_key = get_secret_key(&network_secret_path)?;
|
let p2p_secret_key = get_secret_key(&network_secret_path)?;
|
||||||
let default_peers_path = data_dir.known_peers_path();
|
let default_peers_path = data_dir.known_peers();
|
||||||
let network = self
|
let network = self
|
||||||
.network
|
.network
|
||||||
.network_config(
|
.network_config(
|
||||||
|
|||||||
@ -271,63 +271,65 @@ impl<D> ChainPath<D> {
|
|||||||
/// Returns the path to the reth data directory for this chain.
|
/// Returns the path to the reth data directory for this chain.
|
||||||
///
|
///
|
||||||
/// `<DIR>/<CHAIN_ID>`
|
/// `<DIR>/<CHAIN_ID>`
|
||||||
pub fn data_dir_path(&self) -> PathBuf {
|
pub fn data_dir(&self) -> &Path {
|
||||||
self.0.as_ref().into()
|
self.0.as_ref()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the path to the db directory for this chain.
|
/// Returns the path to the db directory for this chain.
|
||||||
///
|
///
|
||||||
/// `<DIR>/<CHAIN_ID>/db`
|
/// `<DIR>/<CHAIN_ID>/db`
|
||||||
pub fn db_path(&self) -> PathBuf {
|
pub fn db(&self) -> PathBuf {
|
||||||
self.0.join("db").into()
|
self.data_dir().join("db")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the path to the static_files directory for this chain.
|
/// Returns the path to the static_files directory for this chain.
|
||||||
pub fn static_files_path(&self) -> PathBuf {
|
///
|
||||||
self.0.join("static_files").into()
|
/// `<DIR>/<CHAIN_ID>/static_files`
|
||||||
|
pub fn static_files(&self) -> PathBuf {
|
||||||
|
self.data_dir().join("static_files")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the path to the reth p2p secret key for this chain.
|
/// Returns the path to the reth p2p secret key for this chain.
|
||||||
///
|
///
|
||||||
/// `<DIR>/<CHAIN_ID>/discovery-secret`
|
/// `<DIR>/<CHAIN_ID>/discovery-secret`
|
||||||
pub fn p2p_secret_path(&self) -> PathBuf {
|
pub fn p2p_secret(&self) -> PathBuf {
|
||||||
self.0.join("discovery-secret").into()
|
self.data_dir().join("discovery-secret")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the path to the known peers file for this chain.
|
/// Returns the path to the known peers file for this chain.
|
||||||
///
|
///
|
||||||
/// `<DIR>/<CHAIN_ID>/known-peers.json`
|
/// `<DIR>/<CHAIN_ID>/known-peers.json`
|
||||||
pub fn known_peers_path(&self) -> PathBuf {
|
pub fn known_peers(&self) -> PathBuf {
|
||||||
self.0.join("known-peers.json").into()
|
self.data_dir().join("known-peers.json")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the path to the blobstore directory for this chain where blobs of unfinalized
|
/// Returns the path to the blobstore directory for this chain where blobs of unfinalized
|
||||||
/// transactions are stored.
|
/// transactions are stored.
|
||||||
///
|
///
|
||||||
/// `<DIR>/<CHAIN_ID>/blobstore`
|
/// `<DIR>/<CHAIN_ID>/blobstore`
|
||||||
pub fn blobstore_path(&self) -> PathBuf {
|
pub fn blobstore(&self) -> PathBuf {
|
||||||
self.0.join("blobstore").into()
|
self.data_dir().join("blobstore")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the path to the local transactions backup file
|
/// Returns the path to the local transactions backup file
|
||||||
///
|
///
|
||||||
/// `<DIR>/<CHAIN_ID>/txpool-transactions-backup.rlp`
|
/// `<DIR>/<CHAIN_ID>/txpool-transactions-backup.rlp`
|
||||||
pub fn txpool_transactions_path(&self) -> PathBuf {
|
pub fn txpool_transactions(&self) -> PathBuf {
|
||||||
self.0.join("txpool-transactions-backup.rlp").into()
|
self.data_dir().join("txpool-transactions-backup.rlp")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the path to the config file for this chain.
|
/// Returns the path to the config file for this chain.
|
||||||
///
|
///
|
||||||
/// `<DIR>/<CHAIN_ID>/reth.toml`
|
/// `<DIR>/<CHAIN_ID>/reth.toml`
|
||||||
pub fn config_path(&self) -> PathBuf {
|
pub fn config(&self) -> PathBuf {
|
||||||
self.0.join("reth.toml").into()
|
self.data_dir().join("reth.toml")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the path to the jwtsecret file for this chain.
|
/// Returns the path to the jwtsecret file for this chain.
|
||||||
///
|
///
|
||||||
/// `<DIR>/<CHAIN_ID>/jwt.hex`
|
/// `<DIR>/<CHAIN_ID>/jwt.hex`
|
||||||
pub fn jwt_path(&self) -> PathBuf {
|
pub fn jwt(&self) -> PathBuf {
|
||||||
self.0.join("jwt.hex").into()
|
self.data_dir().join("jwt.hex")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -359,7 +361,7 @@ mod tests {
|
|||||||
let path = path.unwrap_or_chain_default(Chain::mainnet());
|
let path = path.unwrap_or_chain_default(Chain::mainnet());
|
||||||
assert!(path.as_ref().ends_with("reth/mainnet"), "{path:?}");
|
assert!(path.as_ref().ends_with("reth/mainnet"), "{path:?}");
|
||||||
|
|
||||||
let db_path = path.db_path();
|
let db_path = path.db();
|
||||||
assert!(db_path.ends_with("reth/mainnet/db"), "{db_path:?}");
|
assert!(db_path.ends_with("reth/mainnet/db"), "{db_path:?}");
|
||||||
|
|
||||||
let path = MaybePlatformPath::<DataDirPath>::from_str("my/path/to/datadir").unwrap();
|
let path = MaybePlatformPath::<DataDirPath>::from_str("my/path/to/datadir").unwrap();
|
||||||
|
|||||||
@ -234,7 +234,7 @@ impl NodeConfig {
|
|||||||
/// Get the network secret from the given data dir
|
/// Get the network secret from the given data dir
|
||||||
pub fn network_secret(&self, data_dir: &ChainPath<DataDirPath>) -> eyre::Result<SecretKey> {
|
pub fn network_secret(&self, data_dir: &ChainPath<DataDirPath>) -> eyre::Result<SecretKey> {
|
||||||
let network_secret_path =
|
let network_secret_path =
|
||||||
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path());
|
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret());
|
||||||
debug!(target: "reth::cli", ?network_secret_path, "Loading p2p key file");
|
debug!(target: "reth::cli", ?network_secret_path, "Loading p2p key file");
|
||||||
let secret_key = get_secret_key(&network_secret_path)?;
|
let secret_key = get_secret_key(&network_secret_path)?;
|
||||||
Ok(secret_key)
|
Ok(secret_key)
|
||||||
@ -299,7 +299,7 @@ impl NodeConfig {
|
|||||||
) -> eyre::Result<NetworkConfig<C>> {
|
) -> eyre::Result<NetworkConfig<C>> {
|
||||||
info!(target: "reth::cli", "Connecting to P2P network");
|
info!(target: "reth::cli", "Connecting to P2P network");
|
||||||
let secret_key = self.network_secret(data_dir)?;
|
let secret_key = self.network_secret(data_dir)?;
|
||||||
let default_peers_path = data_dir.known_peers_path();
|
let default_peers_path = data_dir.known_peers();
|
||||||
Ok(self.load_network_config(config, client, executor, head, secret_key, default_peers_path))
|
Ok(self.load_network_config(config, client, executor, head, secret_key, default_peers_path))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -100,7 +100,7 @@ where
|
|||||||
|
|
||||||
async fn build_pool(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Pool> {
|
async fn build_pool(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Pool> {
|
||||||
let data_dir = ctx.data_dir();
|
let data_dir = ctx.data_dir();
|
||||||
let blob_store = DiskFileBlobStore::open(data_dir.blobstore_path(), Default::default())?;
|
let blob_store = DiskFileBlobStore::open(data_dir.blobstore(), Default::default())?;
|
||||||
let validator = TransactionValidationTaskExecutor::eth_builder(ctx.chain_spec())
|
let validator = TransactionValidationTaskExecutor::eth_builder(ctx.chain_spec())
|
||||||
.with_head_timestamp(ctx.head().timestamp)
|
.with_head_timestamp(ctx.head().timestamp)
|
||||||
.kzg_settings(ctx.kzg_settings()?)
|
.kzg_settings(ctx.kzg_settings()?)
|
||||||
@ -114,7 +114,7 @@ where
|
|||||||
let transaction_pool =
|
let transaction_pool =
|
||||||
reth_transaction_pool::Pool::eth_pool(validator, blob_store, ctx.pool_config());
|
reth_transaction_pool::Pool::eth_pool(validator, blob_store, ctx.pool_config());
|
||||||
info!(target: "reth::cli", "Transaction pool initialized");
|
info!(target: "reth::cli", "Transaction pool initialized");
|
||||||
let transactions_path = data_dir.txpool_transactions_path();
|
let transactions_path = data_dir.txpool_transactions();
|
||||||
|
|
||||||
// spawn txpool maintenance task
|
// spawn txpool maintenance task
|
||||||
{
|
{
|
||||||
|
|||||||
@ -533,7 +533,7 @@ impl<Node: FullNodeTypes> BuilderContext<Node> {
|
|||||||
self.executor.spawn_critical("p2p txpool", txpool);
|
self.executor.spawn_critical("p2p txpool", txpool);
|
||||||
self.executor.spawn_critical("p2p eth request handler", eth);
|
self.executor.spawn_critical("p2p eth request handler", eth);
|
||||||
|
|
||||||
let default_peers_path = self.data_dir().known_peers_path();
|
let default_peers_path = self.data_dir().known_peers();
|
||||||
let known_peers_file = self.config.network.persistent_peers_file(default_peers_path);
|
let known_peers_file = self.config.network.persistent_peers_file(default_peers_path);
|
||||||
self.executor.spawn_critical_with_graceful_shutdown_signal(
|
self.executor.spawn_critical_with_graceful_shutdown_signal(
|
||||||
"p2p network task",
|
"p2p network task",
|
||||||
|
|||||||
@ -61,7 +61,7 @@ impl LaunchContext {
|
|||||||
/// Loads the reth config with the configured `data_dir` and overrides settings according to the
|
/// Loads the reth config with the configured `data_dir` and overrides settings according to the
|
||||||
/// `config`.
|
/// `config`.
|
||||||
pub fn load_toml_config(&self, config: &NodeConfig) -> eyre::Result<reth_config::Config> {
|
pub fn load_toml_config(&self, config: &NodeConfig) -> eyre::Result<reth_config::Config> {
|
||||||
let config_path = config.config.clone().unwrap_or_else(|| self.data_dir.config_path());
|
let config_path = config.config.clone().unwrap_or_else(|| self.data_dir.config());
|
||||||
|
|
||||||
let mut toml_config = confy::load_path::<reth_config::Config>(&config_path)
|
let mut toml_config = confy::load_path::<reth_config::Config>(&config_path)
|
||||||
.wrap_err_with(|| format!("Could not load config file {config_path:?}"))?;
|
.wrap_err_with(|| format!("Could not load config file {config_path:?}"))?;
|
||||||
@ -192,7 +192,7 @@ impl<R> LaunchContextWith<Attached<WithConfigs, R>> {
|
|||||||
pub fn ensure_etl_datadir(mut self) -> Self {
|
pub fn ensure_etl_datadir(mut self) -> Self {
|
||||||
if self.toml_config_mut().stages.etl.dir.is_none() {
|
if self.toml_config_mut().stages.etl.dir.is_none() {
|
||||||
self.toml_config_mut().stages.etl.dir =
|
self.toml_config_mut().stages.etl.dir =
|
||||||
Some(EtlConfig::from_datadir(&self.data_dir().data_dir_path()))
|
Some(EtlConfig::from_datadir(self.data_dir().data_dir()))
|
||||||
}
|
}
|
||||||
|
|
||||||
self
|
self
|
||||||
@ -273,7 +273,7 @@ impl<R> LaunchContextWith<Attached<WithConfigs, R>> {
|
|||||||
|
|
||||||
/// Loads the JWT secret for the engine API
|
/// Loads the JWT secret for the engine API
|
||||||
pub fn auth_jwt_secret(&self) -> eyre::Result<JwtSecret> {
|
pub fn auth_jwt_secret(&self) -> eyre::Result<JwtSecret> {
|
||||||
let default_jwt_path = self.data_dir().jwt_path();
|
let default_jwt_path = self.data_dir().jwt();
|
||||||
let secret = self.node_config().rpc.auth_jwt_secret(default_jwt_path)?;
|
let secret = self.node_config().rpc.auth_jwt_secret(default_jwt_path)?;
|
||||||
Ok(secret)
|
Ok(secret)
|
||||||
}
|
}
|
||||||
@ -299,7 +299,7 @@ where
|
|||||||
let factory = ProviderFactory::new(
|
let factory = ProviderFactory::new(
|
||||||
self.right().clone(),
|
self.right().clone(),
|
||||||
self.chain_spec(),
|
self.chain_spec(),
|
||||||
self.data_dir().static_files_path(),
|
self.data_dir().static_files(),
|
||||||
)?
|
)?
|
||||||
.with_static_files_metrics();
|
.with_static_files_metrics();
|
||||||
|
|
||||||
|
|||||||
@ -119,7 +119,7 @@ where
|
|||||||
|
|
||||||
async fn build_pool(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Pool> {
|
async fn build_pool(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Pool> {
|
||||||
let data_dir = ctx.data_dir();
|
let data_dir = ctx.data_dir();
|
||||||
let blob_store = DiskFileBlobStore::open(data_dir.blobstore_path(), Default::default())?;
|
let blob_store = DiskFileBlobStore::open(data_dir.blobstore(), Default::default())?;
|
||||||
|
|
||||||
let validator = TransactionValidationTaskExecutor::eth_builder(ctx.chain_spec())
|
let validator = TransactionValidationTaskExecutor::eth_builder(ctx.chain_spec())
|
||||||
.with_head_timestamp(ctx.head().timestamp)
|
.with_head_timestamp(ctx.head().timestamp)
|
||||||
@ -139,7 +139,7 @@ where
|
|||||||
ctx.pool_config(),
|
ctx.pool_config(),
|
||||||
);
|
);
|
||||||
info!(target: "reth::cli", "Transaction pool initialized");
|
info!(target: "reth::cli", "Transaction pool initialized");
|
||||||
let transactions_path = data_dir.txpool_transactions_path();
|
let transactions_path = data_dir.txpool_transactions();
|
||||||
|
|
||||||
// spawn txpool maintenance task
|
// spawn txpool maintenance task
|
||||||
{
|
{
|
||||||
|
|||||||
@ -64,7 +64,7 @@ where
|
|||||||
let transaction_pool =
|
let transaction_pool =
|
||||||
reth_transaction_pool::Pool::eth_pool(validator, blob_store, self.pool_config);
|
reth_transaction_pool::Pool::eth_pool(validator, blob_store, self.pool_config);
|
||||||
info!(target: "reth::cli", "Transaction pool initialized");
|
info!(target: "reth::cli", "Transaction pool initialized");
|
||||||
let transactions_path = data_dir.txpool_transactions_path();
|
let transactions_path = data_dir.txpool_transactions();
|
||||||
|
|
||||||
// spawn txpool maintenance task
|
// spawn txpool maintenance task
|
||||||
{
|
{
|
||||||
|
|||||||
Reference in New Issue
Block a user