chore: remove redundant suffix in ChainPath methods (#8025)

This commit is contained in:
DaniPopes
2024-05-01 16:59:42 +02:00
committed by GitHub
parent c1f5b45bbd
commit f157ec83b6
30 changed files with 110 additions and 130 deletions

View File

@ -108,9 +108,9 @@ impl Command {
pub async fn execute(self) -> eyre::Result<()> {
// add network name to data dir
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path();
let db_path = data_dir.db();
let db_args = self.db.database_args();
let static_files_path = data_dir.static_files_path();
let static_files_path = data_dir.static_files();
match self.command {
// TODO: We'll need to add this on the DB trait.

View File

@ -96,11 +96,10 @@ impl Command {
});
let db = open_db_read_only(
data_dir.db_path().as_path(),
data_dir.db().as_path(),
db_args.with_max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)),
)?;
let provider_factory =
Arc::new(ProviderFactory::new(db, chain, data_dir.static_files_path())?);
let provider_factory = Arc::new(ProviderFactory::new(db, chain, data_dir.static_files())?);
{
if !self.only_bench {

View File

@ -174,8 +174,8 @@ impl Command {
]);
}
let static_files = iter_static_files(data_dir.static_files_path())?;
let static_file_provider = StaticFileProvider::new(data_dir.static_files_path())?;
let static_files = iter_static_files(data_dir.static_files())?;
let static_file_provider = StaticFileProvider::new(data_dir.static_files())?;
let mut total_data_size = 0;
let mut total_index_size = 0;

View File

@ -114,7 +114,7 @@ impl Command {
let factory = ProviderFactory::new(
db,
self.chain.clone(),
self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(),
self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(),
)?;
let provider = factory.provider()?;
@ -148,7 +148,7 @@ impl Command {
pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> {
// add network name to data dir
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path();
let db_path = data_dir.db();
fs::create_dir_all(&db_path)?;
// initialize the database
@ -156,7 +156,7 @@ impl Command {
let provider_factory = ProviderFactory::new(
Arc::clone(&db),
Arc::clone(&self.chain),
data_dir.static_files_path(),
data_dir.static_files(),
)?;
let consensus: Arc<dyn Consensus> = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain)));

View File

@ -173,7 +173,7 @@ impl Command {
.build(ProviderFactory::new(
db,
self.chain.clone(),
self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(),
self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(),
)?)
.start_network()
.await?;
@ -206,17 +206,17 @@ impl Command {
let mut config = Config::default();
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path();
let db_path = data_dir.db();
// Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to
if config.stages.etl.dir.is_none() {
config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path()));
config.stages.etl.dir = Some(EtlConfig::from_datadir(data_dir.data_dir()));
}
fs::create_dir_all(&db_path)?;
let db = Arc::new(init_db(db_path, self.db.database_args())?);
let provider_factory =
ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?;
ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?;
debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis");
init_genesis(provider_factory.clone())?;
@ -225,14 +225,14 @@ impl Command {
// Configure and build network
let network_secret_path =
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path());
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret());
let network = self
.build_network(
&config,
ctx.task_executor.clone(),
db.clone(),
network_secret_path,
data_dir.known_peers_path(),
data_dir.known_peers(),
)
.await?;

View File

@ -94,7 +94,7 @@ impl Command {
.build(ProviderFactory::new(
db,
self.chain.clone(),
self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(),
self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(),
)?)
.start_network()
.await?;
@ -109,12 +109,12 @@ impl Command {
// add network name to data dir
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path();
let db_path = data_dir.db();
fs::create_dir_all(&db_path)?;
// initialize the database
let db = Arc::new(init_db(db_path, self.db.database_args())?);
let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files_path())?;
let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files())?;
let provider = factory.provider()?;
// Look up merkle checkpoint
@ -126,14 +126,14 @@ impl Command {
// Configure and build network
let network_secret_path =
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path());
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret());
let network = self
.build_network(
&config,
ctx.task_executor.clone(),
db.clone(),
network_secret_path,
data_dir.known_peers_path(),
data_dir.known_peers(),
)
.await?;

View File

@ -104,7 +104,7 @@ impl Command {
.build(ProviderFactory::new(
db,
self.chain.clone(),
self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(),
self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(),
)?)
.start_network()
.await?;
@ -119,24 +119,24 @@ impl Command {
// add network name to data dir
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path();
let db_path = data_dir.db();
fs::create_dir_all(&db_path)?;
// initialize the database
let db = Arc::new(init_db(db_path, self.db.database_args())?);
let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files_path())?;
let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files())?;
let provider_rw = factory.provider_rw()?;
// Configure and build network
let network_secret_path =
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path());
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret());
let network = self
.build_network(
&config,
ctx.task_executor.clone(),
db.clone(),
network_secret_path,
data_dir.known_peers_path(),
data_dir.known_peers(),
)
.await?;

View File

@ -101,7 +101,7 @@ impl Command {
.build(ProviderFactory::new(
db,
self.chain.clone(),
self.datadir.unwrap_or_chain_default(self.chain.chain).static_files_path(),
self.datadir.unwrap_or_chain_default(self.chain.chain).static_files(),
)?)
.start_network()
.await?;
@ -116,13 +116,13 @@ impl Command {
// Add network name to data dir
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path();
let db_path = data_dir.db();
fs::create_dir_all(&db_path)?;
// Initialize the database
let db = Arc::new(init_db(db_path, self.db.database_args())?);
let provider_factory =
ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?;
ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?;
let consensus: Arc<dyn Consensus> = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain)));
@ -146,14 +146,14 @@ impl Command {
// Set up network
let network_secret_path =
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path());
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret());
let network = self
.build_network(
&config,
ctx.task_executor.clone(),
db.clone(),
network_secret_path,
data_dir.known_peers_path(),
data_dir.known_peers(),
)
.await?;

View File

@ -118,23 +118,23 @@ impl ImportCommand {
// add network name to data dir
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path());
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config());
let mut config: Config = load_config(config_path.clone())?;
info!(target: "reth::cli", path = ?config_path, "Configuration loaded");
// Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to
if config.stages.etl.dir.is_none() {
config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path()));
config.stages.etl.dir = Some(EtlConfig::from_datadir(data_dir.data_dir()));
}
let db_path = data_dir.db_path();
let db_path = data_dir.db();
info!(target: "reth::cli", path = ?db_path, "Opening database");
let db = Arc::new(init_db(db_path, self.db.database_args())?);
info!(target: "reth::cli", "Database opened");
let provider_factory =
ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?;
ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?;
debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis");

View File

@ -85,23 +85,23 @@ impl ImportOpCommand {
// add network name to data dir
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path());
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config());
let mut config: Config = load_config(config_path.clone())?;
info!(target: "reth::cli", path = ?config_path, "Configuration loaded");
// Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to
if config.stages.etl.dir.is_none() {
config.stages.etl.dir = Some(EtlConfig::from_datadir(&data_dir.data_dir_path()));
config.stages.etl.dir = Some(EtlConfig::from_datadir(data_dir.data_dir()));
}
let db_path = data_dir.db_path();
let db_path = data_dir.db();
info!(target: "reth::cli", path = ?db_path, "Opening database");
let db = Arc::new(init_db(db_path, self.db.database_args())?);
info!(target: "reth::cli", "Database opened");
let provider_factory =
ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files_path())?;
ProviderFactory::new(db.clone(), self.chain.clone(), data_dir.static_files())?;
debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis");

View File

@ -51,12 +51,12 @@ impl InitCommand {
// add network name to data dir
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path();
let db_path = data_dir.db();
info!(target: "reth::cli", path = ?db_path, "Opening database");
let db = Arc::new(init_db(&db_path, self.db.database_args())?);
info!(target: "reth::cli", "Database opened");
let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files_path())?;
let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files())?;
info!(target: "reth::cli", "Writing genesis block");

View File

@ -72,12 +72,12 @@ impl InitStateCommand {
// add network name to data dir
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path();
let db_path = data_dir.db();
info!(target: "reth::cli", path = ?db_path, "Opening database");
let db = Arc::new(init_db(&db_path, self.db.database_args())?);
info!(target: "reth::cli", "Database opened");
let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files_path())?;
let provider_factory = ProviderFactory::new(db, self.chain, data_dir.static_files())?;
info!(target: "reth::cli", "Writing genesis block");

View File

@ -180,7 +180,7 @@ impl<Ext: clap::Args + fmt::Debug> NodeCommand<Ext> {
let _ = node_config.install_prometheus_recorder()?;
let data_dir = datadir.unwrap_or_chain_default(node_config.chain.chain);
let db_path = data_dir.db_path();
let db_path = data_dir.db();
tracing::info!(target: "reth::cli", path = ?db_path, "Opening database");
let database = Arc::new(init_db(db_path.clone(), self.db.database_args())?.with_metrics());
@ -280,14 +280,14 @@ mod tests {
NodeCommand::try_parse_args_from(["reth", "--config", "my/path/to/reth.toml"]).unwrap();
// always store reth.toml in the data dir, not the chain specific data dir
let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain);
let config_path = cmd.config.unwrap_or_else(|| data_dir.config_path());
let config_path = cmd.config.unwrap_or_else(|| data_dir.config());
assert_eq!(config_path, Path::new("my/path/to/reth.toml"));
let cmd = NodeCommand::try_parse_args_from(["reth"]).unwrap();
// always store reth.toml in the data dir, not the chain specific data dir
let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain);
let config_path = cmd.config.clone().unwrap_or_else(|| data_dir.config_path());
let config_path = cmd.config.clone().unwrap_or_else(|| data_dir.config());
let end = format!("reth/{}/reth.toml", SUPPORTED_CHAINS[0]);
assert!(config_path.ends_with(end), "{:?}", cmd.config);
}
@ -296,14 +296,14 @@ mod tests {
fn parse_db_path() {
let cmd = NodeCommand::try_parse_args_from(["reth"]).unwrap();
let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain);
let db_path = data_dir.db_path();
let db_path = data_dir.db();
let end = format!("reth/{}/db", SUPPORTED_CHAINS[0]);
assert!(db_path.ends_with(end), "{:?}", cmd.config);
let cmd =
NodeCommand::try_parse_args_from(["reth", "--datadir", "my/custom/path"]).unwrap();
let data_dir = cmd.datadir.unwrap_or_chain_default(cmd.chain.chain);
let db_path = data_dir.db_path();
let db_path = data_dir.db();
assert_eq!(db_path, Path::new("my/custom/path/db"));
}

View File

@ -105,7 +105,7 @@ impl Command {
// add network name to data dir
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path());
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config());
let mut config: Config = confy::load_path(&config_path).unwrap_or_default();
@ -119,7 +119,7 @@ impl Command {
config.peers.trusted_nodes_only = self.trusted_only;
let default_secret_key_path = data_dir.p2p_secret_path();
let default_secret_key_path = data_dir.p2p_secret();
let secret_key_path = self.p2p_secret_key.clone().unwrap_or(default_secret_key_path);
let p2p_secret_key = get_secret_key(&secret_key_path)?;
@ -133,7 +133,7 @@ impl Command {
let mut network_config = network_config_builder.build(Arc::new(ProviderFactory::new(
noop_db,
self.chain.clone(),
data_dir.static_files_path(),
data_dir.static_files(),
)?));
if self.discovery.enable_discv5_discovery {

View File

@ -50,11 +50,11 @@ impl Command {
/// Execute `storage-tries` recovery command
pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> {
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path();
let db_path = data_dir.db();
fs::create_dir_all(&db_path)?;
let db = Arc::new(init_db(db_path, self.db.database_args())?);
let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files_path())?;
let factory = ProviderFactory::new(&db, self.chain.clone(), data_dir.static_files())?;
debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis");
init_genesis(factory.clone())?;

View File

@ -54,12 +54,12 @@ impl Command {
pub async fn execute(self) -> eyre::Result<()> {
// add network name to data dir
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path();
let db_path = data_dir.db();
fs::create_dir_all(&db_path)?;
let db = open_db(db_path.as_ref(), self.db.database_args())?;
let provider_factory =
ProviderFactory::new(db, self.chain.clone(), data_dir.static_files_path())?;
ProviderFactory::new(db, self.chain.clone(), data_dir.static_files())?;
let static_file_provider = provider_factory.static_file_provider();
let tool = DbTool::new(provider_factory, self.chain.clone())?;

View File

@ -20,7 +20,7 @@ pub(crate) async fn dump_execution_stage<DB: Database>(
output_datadir: ChainPath<DataDirPath>,
should_run: bool,
) -> Result<()> {
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db_path(), db_tool)?;
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?;
import_tables_with_range(&output_db, db_tool, from, to)?;
@ -28,11 +28,7 @@ pub(crate) async fn dump_execution_stage<DB: Database>(
if should_run {
dry_run(
ProviderFactory::new(
output_db,
db_tool.chain.clone(),
output_datadir.static_files_path(),
)?,
ProviderFactory::new(output_db, db_tool.chain.clone(), output_datadir.static_files())?,
to,
from,
)

View File

@ -15,7 +15,7 @@ pub(crate) async fn dump_hashing_account_stage<DB: Database>(
output_datadir: ChainPath<DataDirPath>,
should_run: bool,
) -> Result<()> {
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db_path(), db_tool)?;
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?;
// Import relevant AccountChangeSets
output_db.update(|tx| {
@ -30,11 +30,7 @@ pub(crate) async fn dump_hashing_account_stage<DB: Database>(
if should_run {
dry_run(
ProviderFactory::new(
output_db,
db_tool.chain.clone(),
output_datadir.static_files_path(),
)?,
ProviderFactory::new(output_db, db_tool.chain.clone(), output_datadir.static_files())?,
to,
from,
)

View File

@ -15,17 +15,13 @@ pub(crate) async fn dump_hashing_storage_stage<DB: Database>(
output_datadir: ChainPath<DataDirPath>,
should_run: bool,
) -> Result<()> {
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db_path(), db_tool)?;
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?;
unwind_and_copy(db_tool, from, tip_block_number, &output_db)?;
if should_run {
dry_run(
ProviderFactory::new(
output_db,
db_tool.chain.clone(),
output_datadir.static_files_path(),
)?,
ProviderFactory::new(output_db, db_tool.chain.clone(), output_datadir.static_files())?,
to,
from,
)

View File

@ -24,7 +24,7 @@ pub(crate) async fn dump_merkle_stage<DB: Database>(
output_datadir: ChainPath<DataDirPath>,
should_run: bool,
) -> Result<()> {
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db_path(), db_tool)?;
let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?;
output_db.update(|tx| {
tx.import_table_with_range::<tables::Headers, _>(
@ -46,11 +46,7 @@ pub(crate) async fn dump_merkle_stage<DB: Database>(
if should_run {
dry_run(
ProviderFactory::new(
output_db,
db_tool.chain.clone(),
output_datadir.static_files_path(),
)?,
ProviderFactory::new(output_db, db_tool.chain.clone(), output_datadir.static_files())?,
to,
from,
)

View File

@ -102,11 +102,11 @@ impl Command {
pub async fn execute(self) -> eyre::Result<()> {
// add network name to data dir
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path();
let db_path = data_dir.db();
info!(target: "reth::cli", path = ?db_path, "Opening database");
let db = Arc::new(init_db(db_path, self.db.database_args())?);
let provider_factory =
ProviderFactory::new(db, self.chain.clone(), data_dir.static_files_path())?;
ProviderFactory::new(db, self.chain.clone(), data_dir.static_files())?;
info!(target: "reth::cli", "Database opened");

View File

@ -130,23 +130,20 @@ impl Command {
// add network name to data dir
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config_path());
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config());
let config: Config = confy::load_path(config_path).unwrap_or_default();
info!(target: "reth::cli", "reth {} starting stage {:?}", SHORT_VERSION, self.stage);
// use the overridden db path if specified
let db_path = data_dir.db_path();
let db_path = data_dir.db();
info!(target: "reth::cli", path = ?db_path, "Opening database");
let db = Arc::new(init_db(db_path, self.db.database_args())?);
info!(target: "reth::cli", "Database opened");
let factory = ProviderFactory::new(
Arc::clone(&db),
self.chain.clone(),
data_dir.static_files_path(),
)?;
let factory =
ProviderFactory::new(Arc::clone(&db), self.chain.clone(), data_dir.static_files())?;
let mut provider_rw = factory.provider_rw()?;
if let Some(listen_addr) = self.metrics {
@ -165,9 +162,7 @@ impl Command {
let batch_size = self.batch_size.unwrap_or(self.to.saturating_sub(self.from) + 1);
let etl_config = EtlConfig::new(
Some(
self.etl_dir.unwrap_or_else(|| EtlConfig::from_datadir(&data_dir.data_dir_path())),
),
Some(self.etl_dir.unwrap_or_else(|| EtlConfig::from_datadir(data_dir.data_dir()))),
self.etl_file_size.unwrap_or(EtlConfig::default_file_size()),
);
@ -188,15 +183,15 @@ impl Command {
.network
.p2p_secret_key
.clone()
.unwrap_or_else(|| data_dir.p2p_secret_path());
.unwrap_or_else(|| data_dir.p2p_secret());
let p2p_secret_key = get_secret_key(&network_secret_path)?;
let default_peers_path = data_dir.known_peers_path();
let default_peers_path = data_dir.known_peers();
let provider_factory = Arc::new(ProviderFactory::new(
db.clone(),
self.chain.clone(),
data_dir.static_files_path(),
data_dir.static_files(),
)?);
let network = self

View File

@ -83,16 +83,16 @@ impl Command {
pub async fn execute(self) -> eyre::Result<()> {
// add network name to data dir
let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain);
let db_path = data_dir.db_path();
let db_path = data_dir.db();
if !db_path.exists() {
eyre::bail!("Database {db_path:?} does not exist.")
}
let config_path = data_dir.config_path();
let config_path = data_dir.config();
let config: Config = confy::load_path(config_path).unwrap_or_default();
let db = Arc::new(open_db(db_path.as_ref(), self.db.database_args())?);
let provider_factory =
ProviderFactory::new(db, self.chain.clone(), data_dir.static_files_path())?;
ProviderFactory::new(db, self.chain.clone(), data_dir.static_files())?;
let range = self.command.unwind_range(provider_factory.clone())?;
if *range.start() == 0 {
@ -148,9 +148,9 @@ impl Command {
// Even though we are not planning to download anything, we need to initialize Body and
// Header stage with a network client
let network_secret_path =
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path());
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret());
let p2p_secret_key = get_secret_key(&network_secret_path)?;
let default_peers_path = data_dir.known_peers_path();
let default_peers_path = data_dir.known_peers();
let network = self
.network
.network_config(

View File

@ -271,63 +271,65 @@ impl<D> ChainPath<D> {
/// Returns the path to the reth data directory for this chain.
///
/// `<DIR>/<CHAIN_ID>`
pub fn data_dir_path(&self) -> PathBuf {
self.0.as_ref().into()
pub fn data_dir(&self) -> &Path {
self.0.as_ref()
}
/// Returns the path to the db directory for this chain.
///
/// `<DIR>/<CHAIN_ID>/db`
pub fn db_path(&self) -> PathBuf {
self.0.join("db").into()
pub fn db(&self) -> PathBuf {
self.data_dir().join("db")
}
/// Returns the path to the static_files directory for this chain.
pub fn static_files_path(&self) -> PathBuf {
self.0.join("static_files").into()
///
/// `<DIR>/<CHAIN_ID>/static_files`
pub fn static_files(&self) -> PathBuf {
self.data_dir().join("static_files")
}
/// Returns the path to the reth p2p secret key for this chain.
///
/// `<DIR>/<CHAIN_ID>/discovery-secret`
pub fn p2p_secret_path(&self) -> PathBuf {
self.0.join("discovery-secret").into()
pub fn p2p_secret(&self) -> PathBuf {
self.data_dir().join("discovery-secret")
}
/// Returns the path to the known peers file for this chain.
///
/// `<DIR>/<CHAIN_ID>/known-peers.json`
pub fn known_peers_path(&self) -> PathBuf {
self.0.join("known-peers.json").into()
pub fn known_peers(&self) -> PathBuf {
self.data_dir().join("known-peers.json")
}
/// Returns the path to the blobstore directory for this chain where blobs of unfinalized
/// transactions are stored.
///
/// `<DIR>/<CHAIN_ID>/blobstore`
pub fn blobstore_path(&self) -> PathBuf {
self.0.join("blobstore").into()
pub fn blobstore(&self) -> PathBuf {
self.data_dir().join("blobstore")
}
/// Returns the path to the local transactions backup file
///
/// `<DIR>/<CHAIN_ID>/txpool-transactions-backup.rlp`
pub fn txpool_transactions_path(&self) -> PathBuf {
self.0.join("txpool-transactions-backup.rlp").into()
pub fn txpool_transactions(&self) -> PathBuf {
self.data_dir().join("txpool-transactions-backup.rlp")
}
/// Returns the path to the config file for this chain.
///
/// `<DIR>/<CHAIN_ID>/reth.toml`
pub fn config_path(&self) -> PathBuf {
self.0.join("reth.toml").into()
pub fn config(&self) -> PathBuf {
self.data_dir().join("reth.toml")
}
/// Returns the path to the jwtsecret file for this chain.
///
/// `<DIR>/<CHAIN_ID>/jwt.hex`
pub fn jwt_path(&self) -> PathBuf {
self.0.join("jwt.hex").into()
pub fn jwt(&self) -> PathBuf {
self.data_dir().join("jwt.hex")
}
}
@ -359,7 +361,7 @@ mod tests {
let path = path.unwrap_or_chain_default(Chain::mainnet());
assert!(path.as_ref().ends_with("reth/mainnet"), "{path:?}");
let db_path = path.db_path();
let db_path = path.db();
assert!(db_path.ends_with("reth/mainnet/db"), "{db_path:?}");
let path = MaybePlatformPath::<DataDirPath>::from_str("my/path/to/datadir").unwrap();

View File

@ -234,7 +234,7 @@ impl NodeConfig {
/// Get the network secret from the given data dir
pub fn network_secret(&self, data_dir: &ChainPath<DataDirPath>) -> eyre::Result<SecretKey> {
let network_secret_path =
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret_path());
self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret());
debug!(target: "reth::cli", ?network_secret_path, "Loading p2p key file");
let secret_key = get_secret_key(&network_secret_path)?;
Ok(secret_key)
@ -299,7 +299,7 @@ impl NodeConfig {
) -> eyre::Result<NetworkConfig<C>> {
info!(target: "reth::cli", "Connecting to P2P network");
let secret_key = self.network_secret(data_dir)?;
let default_peers_path = data_dir.known_peers_path();
let default_peers_path = data_dir.known_peers();
Ok(self.load_network_config(config, client, executor, head, secret_key, default_peers_path))
}

View File

@ -100,7 +100,7 @@ where
async fn build_pool(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Pool> {
let data_dir = ctx.data_dir();
let blob_store = DiskFileBlobStore::open(data_dir.blobstore_path(), Default::default())?;
let blob_store = DiskFileBlobStore::open(data_dir.blobstore(), Default::default())?;
let validator = TransactionValidationTaskExecutor::eth_builder(ctx.chain_spec())
.with_head_timestamp(ctx.head().timestamp)
.kzg_settings(ctx.kzg_settings()?)
@ -114,7 +114,7 @@ where
let transaction_pool =
reth_transaction_pool::Pool::eth_pool(validator, blob_store, ctx.pool_config());
info!(target: "reth::cli", "Transaction pool initialized");
let transactions_path = data_dir.txpool_transactions_path();
let transactions_path = data_dir.txpool_transactions();
// spawn txpool maintenance task
{

View File

@ -533,7 +533,7 @@ impl<Node: FullNodeTypes> BuilderContext<Node> {
self.executor.spawn_critical("p2p txpool", txpool);
self.executor.spawn_critical("p2p eth request handler", eth);
let default_peers_path = self.data_dir().known_peers_path();
let default_peers_path = self.data_dir().known_peers();
let known_peers_file = self.config.network.persistent_peers_file(default_peers_path);
self.executor.spawn_critical_with_graceful_shutdown_signal(
"p2p network task",

View File

@ -61,7 +61,7 @@ impl LaunchContext {
/// Loads the reth config with the configured `data_dir` and overrides settings according to the
/// `config`.
pub fn load_toml_config(&self, config: &NodeConfig) -> eyre::Result<reth_config::Config> {
let config_path = config.config.clone().unwrap_or_else(|| self.data_dir.config_path());
let config_path = config.config.clone().unwrap_or_else(|| self.data_dir.config());
let mut toml_config = confy::load_path::<reth_config::Config>(&config_path)
.wrap_err_with(|| format!("Could not load config file {config_path:?}"))?;
@ -192,7 +192,7 @@ impl<R> LaunchContextWith<Attached<WithConfigs, R>> {
pub fn ensure_etl_datadir(mut self) -> Self {
if self.toml_config_mut().stages.etl.dir.is_none() {
self.toml_config_mut().stages.etl.dir =
Some(EtlConfig::from_datadir(&self.data_dir().data_dir_path()))
Some(EtlConfig::from_datadir(self.data_dir().data_dir()))
}
self
@ -273,7 +273,7 @@ impl<R> LaunchContextWith<Attached<WithConfigs, R>> {
/// Loads the JWT secret for the engine API
pub fn auth_jwt_secret(&self) -> eyre::Result<JwtSecret> {
let default_jwt_path = self.data_dir().jwt_path();
let default_jwt_path = self.data_dir().jwt();
let secret = self.node_config().rpc.auth_jwt_secret(default_jwt_path)?;
Ok(secret)
}
@ -299,7 +299,7 @@ where
let factory = ProviderFactory::new(
self.right().clone(),
self.chain_spec(),
self.data_dir().static_files_path(),
self.data_dir().static_files(),
)?
.with_static_files_metrics();

View File

@ -119,7 +119,7 @@ where
async fn build_pool(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Pool> {
let data_dir = ctx.data_dir();
let blob_store = DiskFileBlobStore::open(data_dir.blobstore_path(), Default::default())?;
let blob_store = DiskFileBlobStore::open(data_dir.blobstore(), Default::default())?;
let validator = TransactionValidationTaskExecutor::eth_builder(ctx.chain_spec())
.with_head_timestamp(ctx.head().timestamp)
@ -139,7 +139,7 @@ where
ctx.pool_config(),
);
info!(target: "reth::cli", "Transaction pool initialized");
let transactions_path = data_dir.txpool_transactions_path();
let transactions_path = data_dir.txpool_transactions();
// spawn txpool maintenance task
{

View File

@ -64,7 +64,7 @@ where
let transaction_pool =
reth_transaction_pool::Pool::eth_pool(validator, blob_store, self.pool_config);
info!(target: "reth::cli", "Transaction pool initialized");
let transactions_path = data_dir.txpool_transactions_path();
let transactions_path = data_dir.txpool_transactions();
// spawn txpool maintenance task
{