chore: remove unused async (#9299)

This commit is contained in:
joshieDo
2024-07-04 13:10:27 +02:00
committed by GitHub
parent afe86895ff
commit af280b98f7
14 changed files with 45 additions and 53 deletions

View File

@ -39,7 +39,7 @@ impl InnerTransport {
jwt: JwtSecret,
) -> Result<(Self, Claims), AuthenticatedTransportError> {
match url.scheme() {
"http" | "https" => Self::connect_http(url, jwt).await,
"http" | "https" => Self::connect_http(url, jwt),
"ws" | "wss" => Self::connect_ws(url, jwt).await,
"file" => Ok((Self::connect_ipc(url).await?, Claims::default())),
_ => Err(AuthenticatedTransportError::BadScheme(url.scheme().to_string())),
@ -48,7 +48,7 @@ impl InnerTransport {
/// Connects to an HTTP [`alloy_transport_http::Http`] transport. Returns an [`InnerTransport`]
/// and the [Claims] generated from the jwt.
async fn connect_http(
fn connect_http(
url: Url,
jwt: JwtSecret,
) -> Result<(Self, Claims), AuthenticatedTransportError> {

View File

@ -96,8 +96,7 @@ impl ImportCommand {
Arc::new(file_client),
StaticFileProducer::new(provider_factory.clone(), PruneModes::default()),
self.no_state,
)
.await?;
)?;
// override the tip
pipeline.set_tip(tip);
@ -153,7 +152,7 @@ impl ImportCommand {
///
/// If configured to execute, all stages will run. Otherwise, only stages that don't require state
/// will run.
pub async fn build_import_pipeline<DB, C>(
pub fn build_import_pipeline<DB, C>(
config: &Config,
provider_factory: ProviderFactory<DB>,
consensus: &Arc<C>,

View File

@ -21,7 +21,7 @@ pub(crate) async fn dump_execution_stage<DB: Database>(
import_tables_with_range(&output_db, db_tool, from, to)?;
unwind_and_copy(db_tool, from, tip_block_number, &output_db).await?;
unwind_and_copy(db_tool, from, tip_block_number, &output_db)?;
if should_run {
dry_run(
@ -32,8 +32,7 @@ pub(crate) async fn dump_execution_stage<DB: Database>(
),
to,
from,
)
.await?;
)?;
}
Ok(())
@ -120,7 +119,7 @@ fn import_tables_with_range<DB: Database>(
/// Dry-run an unwind to FROM block, so we can get the `PlainStorageState` and
/// `PlainAccountState` safely. There might be some state dependency from an address
/// which hasn't been changed in the given range.
async fn unwind_and_copy<DB: Database>(
fn unwind_and_copy<DB: Database>(
db_tool: &DbTool<DB>,
from: u64,
tip_block_number: u64,
@ -151,7 +150,7 @@ async fn unwind_and_copy<DB: Database>(
}
/// Try to re-execute the stage without committing
async fn dry_run<DB: Database>(
fn dry_run<DB: Database>(
output_provider_factory: ProviderFactory<DB>,
to: u64,
from: u64,

View File

@ -38,8 +38,7 @@ pub(crate) async fn dump_hashing_account_stage<DB: Database>(
),
to,
from,
)
.await?;
)?;
}
Ok(())
@ -71,7 +70,7 @@ fn unwind_and_copy<DB: Database>(
}
/// Try to re-execute the stage straight away
async fn dry_run<DB: Database>(
fn dry_run<DB: Database>(
output_provider_factory: ProviderFactory<DB>,
to: u64,
from: u64,

View File

@ -28,8 +28,7 @@ pub(crate) async fn dump_hashing_storage_stage<DB: Database>(
),
to,
from,
)
.await?;
)?;
}
Ok(())
@ -66,7 +65,7 @@ fn unwind_and_copy<DB: Database>(
}
/// Try to re-execute the stage straight away
async fn dry_run<DB: Database>(
fn dry_run<DB: Database>(
output_provider_factory: ProviderFactory<DB>,
to: u64,
from: u64,

View File

@ -44,7 +44,7 @@ pub(crate) async fn dump_merkle_stage<DB: Database>(
)
})??;
unwind_and_copy(db_tool, (from, to), tip_block_number, &output_db).await?;
unwind_and_copy(db_tool, (from, to), tip_block_number, &output_db)?;
if should_run {
dry_run(
@ -55,15 +55,14 @@ pub(crate) async fn dump_merkle_stage<DB: Database>(
),
to,
from,
)
.await?;
)?;
}
Ok(())
}
/// Dry-run an unwind to FROM block and copy the necessary table data to the new database.
async fn unwind_and_copy<DB: Database>(
fn unwind_and_copy<DB: Database>(
db_tool: &DbTool<DB>,
range: (u64, u64),
tip_block_number: u64,
@ -143,7 +142,7 @@ async fn unwind_and_copy<DB: Database>(
}
/// Try to re-execute the stage straight away
async fn dry_run<DB: Database>(
fn dry_run<DB: Database>(
output_provider_factory: ProviderFactory<DB>,
to: u64,
from: u64,

View File

@ -78,7 +78,7 @@ impl Command {
}
// This will build an offline-only pipeline if the `offline` flag is enabled
let mut pipeline = self.build_pipeline(config, provider_factory.clone()).await?;
let mut pipeline = self.build_pipeline(config, provider_factory)?;
// Move all applicable data from database to static files.
pipeline.move_to_static_files()?;
@ -108,7 +108,7 @@ impl Command {
Ok(())
}
async fn build_pipeline<DB: Database + 'static>(
fn build_pipeline<DB: Database + 'static>(
self,
config: Config,
provider_factory: ProviderFactory<Arc<DB>>,

View File

@ -87,7 +87,7 @@ where
/// Sends a transition configuration exchange message to the beacon consensus engine.
///
/// See also <https://github.com/ethereum/execution-apis/blob/3d627c95a4d3510a8187dd02e0250ecb4331d27e/src/engine/paris.md#engine_exchangetransitionconfigurationv1>
pub async fn transition_configuration_exchanged(&self) {
pub fn transition_configuration_exchanged(&self) {
let _ = self.to_engine.send(BeaconEngineMessage::TransitionConfigurationExchanged);
}

View File

@ -71,11 +71,11 @@ impl LaunchContext {
/// `config`.
///
/// Attaches both the `NodeConfig` and the loaded `reth.toml` config to the launch context.
pub async fn with_loaded_toml_config(
pub fn with_loaded_toml_config(
self,
config: NodeConfig,
) -> eyre::Result<LaunchContextWith<WithConfigs>> {
let toml_config = self.load_toml_config(&config).await?;
let toml_config = self.load_toml_config(&config)?;
Ok(self.with(WithConfigs { config, toml_config }))
}
@ -83,7 +83,7 @@ impl LaunchContext {
/// `config`.
///
/// This is async because the trusted peers may have to be resolved.
pub async fn load_toml_config(&self, config: &NodeConfig) -> eyre::Result<reth_config::Config> {
pub fn load_toml_config(&self, config: &NodeConfig) -> eyre::Result<reth_config::Config> {
let config_path = config.config.clone().unwrap_or_else(|| self.data_dir.config());
let mut toml_config = confy::load_path::<reth_config::Config>(&config_path)
@ -518,7 +518,7 @@ where
}
/// Creates a `BlockchainProvider` and attaches it to the launch context.
pub async fn with_blockchain_db<T>(
pub fn with_blockchain_db<T>(
self,
) -> eyre::Result<LaunchContextWith<Attached<WithConfigs, WithMeteredProviders<DB, T>>>>
where

View File

@ -104,7 +104,7 @@ where
let ctx = ctx
.with_configured_globals()
// load the toml config
.with_loaded_toml_config(config).await?
.with_loaded_toml_config(config)?
// add resolved peers
.with_resolved_peers().await?
// attach the database
@ -127,7 +127,7 @@ where
.with_metrics()
// passing FullNodeTypes as type parameter here so that we can build
// later the components.
.with_blockchain_db::<T>().await?
.with_blockchain_db::<T>()?
.with_components(components_builder, on_component_initialized).await?;
// spawn exexs
@ -201,8 +201,7 @@ where
static_file_producer,
ctx.components().block_executor().clone(),
pipeline_exex_handle,
)
.await?;
)?;
let pipeline_events = pipeline.events();
task.set_pipeline_events(pipeline_events);
@ -223,8 +222,7 @@ where
static_file_producer,
ctx.components().block_executor().clone(),
pipeline_exex_handle,
)
.await?;
)?;
(pipeline, Either::Right(network_client.clone()))
};

View File

@ -24,7 +24,7 @@ use tokio::sync::watch;
/// Constructs a [Pipeline] that's wired to the network
#[allow(clippy::too_many_arguments)]
pub async fn build_networked_pipeline<DB, Client, Executor>(
pub fn build_networked_pipeline<DB, Client, Executor>(
config: &StageConfig,
client: Client,
consensus: Arc<dyn Consensus>,
@ -63,15 +63,14 @@ where
static_file_producer,
executor,
exex_manager_handle,
)
.await?;
)?;
Ok(pipeline)
}
/// Builds the [Pipeline] with the given [`ProviderFactory`] and downloaders.
#[allow(clippy::too_many_arguments)]
pub async fn build_pipeline<DB, H, B, Executor>(
pub fn build_pipeline<DB, H, B, Executor>(
provider_factory: ProviderFactory<DB>,
stage_config: &StageConfig,
header_downloader: H,

View File

@ -1699,7 +1699,7 @@ enum WsHttpServers {
impl WsHttpServers {
/// Starts the servers and returns the handles (http, ws)
async fn start(
fn start(
self,
http_module: Option<RpcModule<()>>,
ws_module: Option<RpcModule<()>>,
@ -1796,7 +1796,7 @@ impl RpcServer {
jwt_secret: None,
};
let (http, ws) = ws_http.server.start(http, ws, &config).await?;
let (http, ws) = ws_http.server.start(http, ws, &config)?;
handle.http = http;
handle.ws = ws;

View File

@ -83,7 +83,7 @@ where
}
/// Fetches the client version.
async fn get_client_version_v1(
fn get_client_version_v1(
&self,
_client: ClientVersionV1,
) -> EngineApiResult<Vec<ClientVersionV1>> {
@ -444,7 +444,7 @@ where
/// Called to verify network configuration parameters and ensure that Consensus and Execution
/// layers are using the latest configuration.
pub async fn exchange_transition_configuration(
pub fn exchange_transition_configuration(
&self,
config: TransitionConfiguration,
) -> EngineApiResult<TransitionConfiguration> {
@ -469,7 +469,7 @@ where
})
}
self.inner.beacon_consensus.transition_configuration_exchanged().await;
self.inner.beacon_consensus.transition_configuration_exchanged();
// Short circuit if communicated block hash is zero
if terminal_block_hash.is_zero() {
@ -801,7 +801,7 @@ where
) -> RpcResult<TransitionConfiguration> {
trace!(target: "rpc::engine", "Serving engine_exchangeTransitionConfigurationV1");
let start = Instant::now();
let res = Self::exchange_transition_configuration(self, config).await;
let res = Self::exchange_transition_configuration(self, config);
self.inner.metrics.latency.exchange_transition_configuration.record(start.elapsed());
Ok(res?)
}
@ -814,7 +814,7 @@ where
client: ClientVersionV1,
) -> RpcResult<Vec<ClientVersionV1>> {
trace!(target: "rpc::engine", "Serving engine_getClientVersionV1");
let res = Self::get_client_version_v1(self, client).await;
let res = Self::get_client_version_v1(self, client);
Ok(res?)
}
@ -889,7 +889,7 @@ mod tests {
commit: "defa64b2".to_string(),
};
let (_, api) = setup_engine_api();
let res = api.get_client_version_v1(client.clone()).await;
let res = api.get_client_version_v1(client.clone());
assert_eq!(res.unwrap(), vec![client]);
}
@ -1045,7 +1045,7 @@ mod tests {
..Default::default()
};
let res = api.exchange_transition_configuration(transition_config).await;
let res = api.exchange_transition_configuration(transition_config);
assert_matches!(
res,
@ -1077,7 +1077,7 @@ mod tests {
};
// Unknown block number
let res = api.exchange_transition_configuration(transition_config).await;
let res = api.exchange_transition_configuration(transition_config);
assert_matches!(
res,
@ -1091,7 +1091,7 @@ mod tests {
execution_terminal_block.clone().unseal(),
);
let res = api.exchange_transition_configuration(transition_config).await;
let res = api.exchange_transition_configuration(transition_config);
assert_matches!(
res,
@ -1120,7 +1120,7 @@ mod tests {
handle.provider.add_block(terminal_block.hash(), terminal_block.unseal());
let config = api.exchange_transition_configuration(transition_config).await.unwrap();
let config = api.exchange_transition_configuration(transition_config).unwrap();
assert_eq!(config, transition_config);
}
}

View File

@ -162,7 +162,7 @@ where
BroadcastStream::new(pubsub.chain_events.subscribe_to_canonical_state());
// get current sync status
let mut initial_sync_status = pubsub.network.is_syncing();
let current_sub_res = pubsub.sync_status(initial_sync_status).await;
let current_sub_res = pubsub.sync_status(initial_sync_status);
// send the current status immediately
let msg = SubscriptionMessage::from_json(&current_sub_res)
@ -179,7 +179,7 @@ where
initial_sync_status = current_syncing;
// send a new message now that the status changed
let sync_status = pubsub.sync_status(current_syncing).await;
let sync_status = pubsub.sync_status(current_syncing);
let msg = SubscriptionMessage::from_json(&sync_status)
.map_err(SubscriptionSerializeError::new)?;
if accepted_sink.send(msg).await.is_err() {
@ -270,7 +270,7 @@ where
Provider: BlockReader + 'static,
{
/// Returns the current sync status for the `syncing` subscription
async fn sync_status(&self, is_syncing: bool) -> EthSubscriptionResult {
fn sync_status(&self, is_syncing: bool) -> EthSubscriptionResult {
if is_syncing {
let current_block =
self.provider.chain_info().map(|info| info.best_number).unwrap_or_default();