mirror of
https://github.com/hl-archive-node/nanoreth.git
synced 2025-12-06 10:59:55 +00:00
feat(bin): separate journald and file log filters, log debug to file by default (#5197)
This commit is contained in:
@ -79,7 +79,8 @@ impl<Ext: RethCliExt> Cli<Ext> {
|
|||||||
/// Execute the configured cli command.
|
/// Execute the configured cli command.
|
||||||
pub fn run(mut self) -> eyre::Result<()> {
|
pub fn run(mut self) -> eyre::Result<()> {
|
||||||
// add network name to logs dir
|
// add network name to logs dir
|
||||||
self.logs.log_directory = self.logs.log_directory.join(self.chain.chain.to_string());
|
self.logs.log_file_directory =
|
||||||
|
self.logs.log_file_directory.join(self.chain.chain.to_string());
|
||||||
|
|
||||||
let _guard = self.init_tracing()?;
|
let _guard = self.init_tracing()?;
|
||||||
|
|
||||||
@ -105,13 +106,12 @@ impl<Ext: RethCliExt> Cli<Ext> {
|
|||||||
pub fn init_tracing(&self) -> eyre::Result<Option<FileWorkerGuard>> {
|
pub fn init_tracing(&self) -> eyre::Result<Option<FileWorkerGuard>> {
|
||||||
let mut layers =
|
let mut layers =
|
||||||
vec![reth_tracing::stdout(self.verbosity.directive(), &self.logs.color.to_string())];
|
vec![reth_tracing::stdout(self.verbosity.directive(), &self.logs.color.to_string())];
|
||||||
let guard = self.logs.layer()?.map(|(layer, guard)| {
|
|
||||||
layers.push(layer);
|
let (additional_layers, guard) = self.logs.layers()?;
|
||||||
guard
|
layers.extend(additional_layers);
|
||||||
});
|
|
||||||
|
|
||||||
reth_tracing::init(layers);
|
reth_tracing::init(layers);
|
||||||
Ok(guard.flatten())
|
Ok(guard)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Configures the given node extension.
|
/// Configures the given node extension.
|
||||||
@ -181,31 +181,34 @@ impl<Ext: RethCliExt> Commands<Ext> {
|
|||||||
#[command(next_help_heading = "Logging")]
|
#[command(next_help_heading = "Logging")]
|
||||||
pub struct Logs {
|
pub struct Logs {
|
||||||
/// The path to put log files in.
|
/// The path to put log files in.
|
||||||
#[arg(
|
#[arg(long = "log.file.directory", value_name = "PATH", global = true, default_value_t)]
|
||||||
long = "log.directory",
|
log_file_directory: PlatformPath<LogsDir>,
|
||||||
value_name = "PATH",
|
|
||||||
global = true,
|
|
||||||
default_value_t,
|
|
||||||
conflicts_with = "journald"
|
|
||||||
)]
|
|
||||||
log_directory: PlatformPath<LogsDir>,
|
|
||||||
|
|
||||||
/// The maximum size (in MB) of log files.
|
/// The maximum size (in MB) of one log file.
|
||||||
#[arg(long = "log.max-size", value_name = "SIZE", global = true, default_value_t = 200)]
|
#[arg(long = "log.file.max-size", value_name = "SIZE", global = true, default_value_t = 200)]
|
||||||
log_max_size: u64,
|
log_file_max_size: u64,
|
||||||
|
|
||||||
/// The maximum amount of log files that will be stored. If set to 0, background file logging
|
/// The maximum amount of log files that will be stored. If set to 0, background file logging
|
||||||
/// is disabled.
|
/// is disabled.
|
||||||
#[arg(long = "log.max-files", value_name = "COUNT", global = true, default_value_t = 5)]
|
#[arg(long = "log.file.max-files", value_name = "COUNT", global = true, default_value_t = 5)]
|
||||||
log_max_files: usize,
|
log_file_max_files: usize,
|
||||||
|
|
||||||
/// Log events to journald.
|
|
||||||
#[arg(long = "log.journald", global = true, conflicts_with = "log_directory")]
|
|
||||||
journald: bool,
|
|
||||||
|
|
||||||
/// The filter to use for logs written to the log file.
|
/// The filter to use for logs written to the log file.
|
||||||
#[arg(long = "log.filter", value_name = "FILTER", global = true, default_value = "error")]
|
#[arg(long = "log.file.filter", value_name = "FILTER", global = true, default_value = "debug")]
|
||||||
filter: String,
|
log_file_filter: String,
|
||||||
|
|
||||||
|
/// Write logs to journald.
|
||||||
|
#[arg(long = "log.journald", global = true)]
|
||||||
|
journald: bool,
|
||||||
|
|
||||||
|
/// The filter to use for logs written to journald.
|
||||||
|
#[arg(
|
||||||
|
long = "log.journald.filter",
|
||||||
|
value_name = "FILTER",
|
||||||
|
global = true,
|
||||||
|
default_value = "error"
|
||||||
|
)]
|
||||||
|
journald_filter: String,
|
||||||
|
|
||||||
/// Sets whether or not the formatter emits ANSI terminal escape codes for colors and other
|
/// Sets whether or not the formatter emits ANSI terminal escape codes for colors and other
|
||||||
/// text formatting.
|
/// text formatting.
|
||||||
@ -222,28 +225,36 @@ pub struct Logs {
|
|||||||
const MB_TO_BYTES: u64 = 1024 * 1024;
|
const MB_TO_BYTES: u64 = 1024 * 1024;
|
||||||
|
|
||||||
impl Logs {
|
impl Logs {
|
||||||
/// Builds a tracing layer from the current log options.
|
/// Builds tracing layers from the current log options.
|
||||||
pub fn layer<S>(&self) -> eyre::Result<Option<(BoxedLayer<S>, Option<FileWorkerGuard>)>>
|
pub fn layers<S>(&self) -> eyre::Result<(Vec<BoxedLayer<S>>, Option<FileWorkerGuard>)>
|
||||||
where
|
where
|
||||||
S: Subscriber,
|
S: Subscriber,
|
||||||
for<'a> S: LookupSpan<'a>,
|
for<'a> S: LookupSpan<'a>,
|
||||||
{
|
{
|
||||||
let filter = EnvFilter::builder().parse(&self.filter)?;
|
let mut layers = Vec::new();
|
||||||
|
|
||||||
if self.journald {
|
if self.journald {
|
||||||
Ok(Some((reth_tracing::journald(filter).expect("Could not connect to journald"), None)))
|
layers.push(
|
||||||
} else if self.log_max_files > 0 {
|
reth_tracing::journald(EnvFilter::builder().parse(&self.journald_filter)?)
|
||||||
let (layer, guard) = reth_tracing::file(
|
.expect("Could not connect to journald"),
|
||||||
filter,
|
|
||||||
&self.log_directory,
|
|
||||||
"reth.log",
|
|
||||||
self.log_max_size * MB_TO_BYTES,
|
|
||||||
self.log_max_files,
|
|
||||||
);
|
);
|
||||||
Ok(Some((layer, Some(guard))))
|
|
||||||
} else {
|
|
||||||
Ok(None)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let file_guard = if self.log_file_max_files > 0 {
|
||||||
|
let (layer, guard) = reth_tracing::file(
|
||||||
|
EnvFilter::builder().parse(&self.log_file_filter)?,
|
||||||
|
&self.log_file_directory,
|
||||||
|
"reth.log",
|
||||||
|
self.log_file_max_size * MB_TO_BYTES,
|
||||||
|
self.log_file_max_files,
|
||||||
|
);
|
||||||
|
layers.push(layer);
|
||||||
|
Some(guard)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok((layers, file_guard))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -342,13 +353,15 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn parse_logs_path() {
|
fn parse_logs_path() {
|
||||||
let mut reth = Cli::<()>::try_parse_from(["reth", "node"]).unwrap();
|
let mut reth = Cli::<()>::try_parse_from(["reth", "node"]).unwrap();
|
||||||
reth.logs.log_directory = reth.logs.log_directory.join(reth.chain.chain.to_string());
|
reth.logs.log_file_directory =
|
||||||
let log_dir = reth.logs.log_directory;
|
reth.logs.log_file_directory.join(reth.chain.chain.to_string());
|
||||||
|
let log_dir = reth.logs.log_file_directory;
|
||||||
assert!(log_dir.as_ref().ends_with("reth/logs/mainnet"), "{:?}", log_dir);
|
assert!(log_dir.as_ref().ends_with("reth/logs/mainnet"), "{:?}", log_dir);
|
||||||
|
|
||||||
let mut reth = Cli::<()>::try_parse_from(["reth", "node", "--chain", "sepolia"]).unwrap();
|
let mut reth = Cli::<()>::try_parse_from(["reth", "node", "--chain", "sepolia"]).unwrap();
|
||||||
reth.logs.log_directory = reth.logs.log_directory.join(reth.chain.chain.to_string());
|
reth.logs.log_file_directory =
|
||||||
let log_dir = reth.logs.log_directory;
|
reth.logs.log_file_directory.join(reth.chain.chain.to_string());
|
||||||
|
let log_dir = reth.logs.log_file_directory;
|
||||||
assert!(log_dir.as_ref().ends_with("reth/logs/sepolia"), "{:?}", log_dir);
|
assert!(log_dir.as_ref().ends_with("reth/logs/sepolia"), "{:?}", log_dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -990,7 +990,7 @@ async fn run_network_until_shutdown<C>(
|
|||||||
if let Some(file_path) = persistent_peers_file {
|
if let Some(file_path) = persistent_peers_file {
|
||||||
let known_peers = network.all_peers().collect::<Vec<_>>();
|
let known_peers = network.all_peers().collect::<Vec<_>>();
|
||||||
if let Ok(known_peers) = serde_json::to_string_pretty(&known_peers) {
|
if let Ok(known_peers) = serde_json::to_string_pretty(&known_peers) {
|
||||||
trace!(target : "reth::cli", peers_file =?file_path, num_peers=%known_peers.len(), "Saving current peers");
|
trace!(target: "reth::cli", peers_file =?file_path, num_peers=%known_peers.len(), "Saving current peers");
|
||||||
let parent_dir = file_path.parent().map(std::fs::create_dir_all).transpose();
|
let parent_dir = file_path.parent().map(std::fs::create_dir_all).transpose();
|
||||||
match parent_dir.and_then(|_| std::fs::write(&file_path, known_peers)) {
|
match parent_dir.and_then(|_| std::fs::write(&file_path, known_peers)) {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
|
|||||||
@ -74,27 +74,32 @@ Options:
|
|||||||
Print version
|
Print version
|
||||||
|
|
||||||
Logging:
|
Logging:
|
||||||
--log.directory <PATH>
|
--log.file.directory <PATH>
|
||||||
The path to put log files in
|
The path to put log files in
|
||||||
|
|
||||||
[default: /reth/logs]
|
[default: /reth/logs]
|
||||||
|
|
||||||
--log.max-size <SIZE>
|
--log.file.max-size <SIZE>
|
||||||
The maximum size (in MB) of log files
|
The maximum size (in MB) of one log file
|
||||||
|
|
||||||
[default: 200]
|
[default: 200]
|
||||||
|
|
||||||
--log.max-files <COUNT>
|
--log.file.max-files <COUNT>
|
||||||
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
||||||
|
|
||||||
[default: 5]
|
[default: 5]
|
||||||
|
|
||||||
--log.journald
|
--log.file.filter <FILTER>
|
||||||
Log events to journald
|
|
||||||
|
|
||||||
--log.filter <FILTER>
|
|
||||||
The filter to use for logs written to the log file
|
The filter to use for logs written to the log file
|
||||||
|
|
||||||
|
[default: debug]
|
||||||
|
|
||||||
|
--log.journald
|
||||||
|
Write logs to journald
|
||||||
|
|
||||||
|
--log.journald.filter <FILTER>
|
||||||
|
The filter to use for logs written to journald
|
||||||
|
|
||||||
[default: error]
|
[default: error]
|
||||||
|
|
||||||
--color <COLOR>
|
--color <COLOR>
|
||||||
|
|||||||
@ -42,27 +42,32 @@ Options:
|
|||||||
Print help (see a summary with '-h')
|
Print help (see a summary with '-h')
|
||||||
|
|
||||||
Logging:
|
Logging:
|
||||||
--log.directory <PATH>
|
--log.file.directory <PATH>
|
||||||
The path to put log files in
|
The path to put log files in
|
||||||
|
|
||||||
[default: /reth/logs]
|
[default: /reth/logs]
|
||||||
|
|
||||||
--log.max-size <SIZE>
|
--log.file.max-size <SIZE>
|
||||||
The maximum size (in MB) of log files
|
The maximum size (in MB) of one log file
|
||||||
|
|
||||||
[default: 200]
|
[default: 200]
|
||||||
|
|
||||||
--log.max-files <COUNT>
|
--log.file.max-files <COUNT>
|
||||||
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
||||||
|
|
||||||
[default: 5]
|
[default: 5]
|
||||||
|
|
||||||
--log.journald
|
--log.file.filter <FILTER>
|
||||||
Log events to journald
|
|
||||||
|
|
||||||
--log.filter <FILTER>
|
|
||||||
The filter to use for logs written to the log file
|
The filter to use for logs written to the log file
|
||||||
|
|
||||||
|
[default: debug]
|
||||||
|
|
||||||
|
--log.journald
|
||||||
|
Write logs to journald
|
||||||
|
|
||||||
|
--log.journald.filter <FILTER>
|
||||||
|
The filter to use for logs written to journald
|
||||||
|
|
||||||
[default: error]
|
[default: error]
|
||||||
|
|
||||||
--color <COLOR>
|
--color <COLOR>
|
||||||
|
|||||||
@ -72,27 +72,32 @@ Database:
|
|||||||
- extra: Enables logging for extra debug-level messages
|
- extra: Enables logging for extra debug-level messages
|
||||||
|
|
||||||
Logging:
|
Logging:
|
||||||
--log.directory <PATH>
|
--log.file.directory <PATH>
|
||||||
The path to put log files in
|
The path to put log files in
|
||||||
|
|
||||||
[default: /reth/logs]
|
[default: /reth/logs]
|
||||||
|
|
||||||
--log.max-size <SIZE>
|
--log.file.max-size <SIZE>
|
||||||
The maximum size (in MB) of log files
|
The maximum size (in MB) of one log file
|
||||||
|
|
||||||
[default: 200]
|
[default: 200]
|
||||||
|
|
||||||
--log.max-files <COUNT>
|
--log.file.max-files <COUNT>
|
||||||
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
||||||
|
|
||||||
[default: 5]
|
[default: 5]
|
||||||
|
|
||||||
--log.journald
|
--log.file.filter <FILTER>
|
||||||
Log events to journald
|
|
||||||
|
|
||||||
--log.filter <FILTER>
|
|
||||||
The filter to use for logs written to the log file
|
The filter to use for logs written to the log file
|
||||||
|
|
||||||
|
[default: debug]
|
||||||
|
|
||||||
|
--log.journald
|
||||||
|
Write logs to journald
|
||||||
|
|
||||||
|
--log.journald.filter <FILTER>
|
||||||
|
The filter to use for logs written to journald
|
||||||
|
|
||||||
[default: error]
|
[default: error]
|
||||||
|
|
||||||
--color <COLOR>
|
--color <COLOR>
|
||||||
|
|||||||
@ -53,27 +53,32 @@ Options:
|
|||||||
Print help (see a summary with '-h')
|
Print help (see a summary with '-h')
|
||||||
|
|
||||||
Logging:
|
Logging:
|
||||||
--log.directory <PATH>
|
--log.file.directory <PATH>
|
||||||
The path to put log files in
|
The path to put log files in
|
||||||
|
|
||||||
[default: /reth/logs]
|
[default: /reth/logs]
|
||||||
|
|
||||||
--log.max-size <SIZE>
|
--log.file.max-size <SIZE>
|
||||||
The maximum size (in MB) of log files
|
The maximum size (in MB) of one log file
|
||||||
|
|
||||||
[default: 200]
|
[default: 200]
|
||||||
|
|
||||||
--log.max-files <COUNT>
|
--log.file.max-files <COUNT>
|
||||||
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
||||||
|
|
||||||
[default: 5]
|
[default: 5]
|
||||||
|
|
||||||
--log.journald
|
--log.file.filter <FILTER>
|
||||||
Log events to journald
|
|
||||||
|
|
||||||
--log.filter <FILTER>
|
|
||||||
The filter to use for logs written to the log file
|
The filter to use for logs written to the log file
|
||||||
|
|
||||||
|
[default: debug]
|
||||||
|
|
||||||
|
--log.journald
|
||||||
|
Write logs to journald
|
||||||
|
|
||||||
|
--log.journald.filter <FILTER>
|
||||||
|
The filter to use for logs written to journald
|
||||||
|
|
||||||
[default: error]
|
[default: error]
|
||||||
|
|
||||||
--color <COLOR>
|
--color <COLOR>
|
||||||
|
|||||||
@ -70,27 +70,32 @@ Database:
|
|||||||
remaining stages are executed.
|
remaining stages are executed.
|
||||||
|
|
||||||
Logging:
|
Logging:
|
||||||
--log.directory <PATH>
|
--log.file.directory <PATH>
|
||||||
The path to put log files in
|
The path to put log files in
|
||||||
|
|
||||||
[default: /reth/logs]
|
[default: /reth/logs]
|
||||||
|
|
||||||
--log.max-size <SIZE>
|
--log.file.max-size <SIZE>
|
||||||
The maximum size (in MB) of log files
|
The maximum size (in MB) of one log file
|
||||||
|
|
||||||
[default: 200]
|
[default: 200]
|
||||||
|
|
||||||
--log.max-files <COUNT>
|
--log.file.max-files <COUNT>
|
||||||
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
||||||
|
|
||||||
[default: 5]
|
[default: 5]
|
||||||
|
|
||||||
--log.journald
|
--log.file.filter <FILTER>
|
||||||
Log events to journald
|
|
||||||
|
|
||||||
--log.filter <FILTER>
|
|
||||||
The filter to use for logs written to the log file
|
The filter to use for logs written to the log file
|
||||||
|
|
||||||
|
[default: debug]
|
||||||
|
|
||||||
|
--log.journald
|
||||||
|
Write logs to journald
|
||||||
|
|
||||||
|
--log.journald.filter <FILTER>
|
||||||
|
The filter to use for logs written to journald
|
||||||
|
|
||||||
[default: error]
|
[default: error]
|
||||||
|
|
||||||
--color <COLOR>
|
--color <COLOR>
|
||||||
|
|||||||
@ -61,27 +61,32 @@ Database:
|
|||||||
- extra: Enables logging for extra debug-level messages
|
- extra: Enables logging for extra debug-level messages
|
||||||
|
|
||||||
Logging:
|
Logging:
|
||||||
--log.directory <PATH>
|
--log.file.directory <PATH>
|
||||||
The path to put log files in
|
The path to put log files in
|
||||||
|
|
||||||
[default: /reth/logs]
|
[default: /reth/logs]
|
||||||
|
|
||||||
--log.max-size <SIZE>
|
--log.file.max-size <SIZE>
|
||||||
The maximum size (in MB) of log files
|
The maximum size (in MB) of one log file
|
||||||
|
|
||||||
[default: 200]
|
[default: 200]
|
||||||
|
|
||||||
--log.max-files <COUNT>
|
--log.file.max-files <COUNT>
|
||||||
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
||||||
|
|
||||||
[default: 5]
|
[default: 5]
|
||||||
|
|
||||||
--log.journald
|
--log.file.filter <FILTER>
|
||||||
Log events to journald
|
|
||||||
|
|
||||||
--log.filter <FILTER>
|
|
||||||
The filter to use for logs written to the log file
|
The filter to use for logs written to the log file
|
||||||
|
|
||||||
|
[default: debug]
|
||||||
|
|
||||||
|
--log.journald
|
||||||
|
Write logs to journald
|
||||||
|
|
||||||
|
--log.journald.filter <FILTER>
|
||||||
|
The filter to use for logs written to journald
|
||||||
|
|
||||||
[default: error]
|
[default: error]
|
||||||
|
|
||||||
--color <COLOR>
|
--color <COLOR>
|
||||||
|
|||||||
@ -389,27 +389,32 @@ Pruning:
|
|||||||
Run full node. Only the most recent 10064 block states are stored. This flag takes priority over pruning configuration in reth.toml
|
Run full node. Only the most recent 10064 block states are stored. This flag takes priority over pruning configuration in reth.toml
|
||||||
|
|
||||||
Logging:
|
Logging:
|
||||||
--log.directory <PATH>
|
--log.file.directory <PATH>
|
||||||
The path to put log files in
|
The path to put log files in
|
||||||
|
|
||||||
[default: /reth/logs]
|
[default: /reth/logs]
|
||||||
|
|
||||||
--log.max-size <SIZE>
|
--log.file.max-size <SIZE>
|
||||||
The maximum size (in MB) of log files
|
The maximum size (in MB) of one log file
|
||||||
|
|
||||||
[default: 200]
|
[default: 200]
|
||||||
|
|
||||||
--log.max-files <COUNT>
|
--log.file.max-files <COUNT>
|
||||||
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
||||||
|
|
||||||
[default: 5]
|
[default: 5]
|
||||||
|
|
||||||
--log.journald
|
--log.file.filter <FILTER>
|
||||||
Log events to journald
|
|
||||||
|
|
||||||
--log.filter <FILTER>
|
|
||||||
The filter to use for logs written to the log file
|
The filter to use for logs written to the log file
|
||||||
|
|
||||||
|
[default: debug]
|
||||||
|
|
||||||
|
--log.journald
|
||||||
|
Write logs to journald
|
||||||
|
|
||||||
|
--log.journald.filter <FILTER>
|
||||||
|
The filter to use for logs written to journald
|
||||||
|
|
||||||
[default: error]
|
[default: error]
|
||||||
|
|
||||||
--color <COLOR>
|
--color <COLOR>
|
||||||
|
|||||||
@ -100,27 +100,32 @@ Database:
|
|||||||
- extra: Enables logging for extra debug-level messages
|
- extra: Enables logging for extra debug-level messages
|
||||||
|
|
||||||
Logging:
|
Logging:
|
||||||
--log.directory <PATH>
|
--log.file.directory <PATH>
|
||||||
The path to put log files in
|
The path to put log files in
|
||||||
|
|
||||||
[default: /reth/logs]
|
[default: /reth/logs]
|
||||||
|
|
||||||
--log.max-size <SIZE>
|
--log.file.max-size <SIZE>
|
||||||
The maximum size (in MB) of log files
|
The maximum size (in MB) of one log file
|
||||||
|
|
||||||
[default: 200]
|
[default: 200]
|
||||||
|
|
||||||
--log.max-files <COUNT>
|
--log.file.max-files <COUNT>
|
||||||
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
||||||
|
|
||||||
[default: 5]
|
[default: 5]
|
||||||
|
|
||||||
--log.journald
|
--log.file.filter <FILTER>
|
||||||
Log events to journald
|
|
||||||
|
|
||||||
--log.filter <FILTER>
|
|
||||||
The filter to use for logs written to the log file
|
The filter to use for logs written to the log file
|
||||||
|
|
||||||
|
[default: debug]
|
||||||
|
|
||||||
|
--log.journald
|
||||||
|
Write logs to journald
|
||||||
|
|
||||||
|
--log.journald.filter <FILTER>
|
||||||
|
The filter to use for logs written to journald
|
||||||
|
|
||||||
[default: error]
|
[default: error]
|
||||||
|
|
||||||
--color <COLOR>
|
--color <COLOR>
|
||||||
|
|||||||
@ -40,27 +40,32 @@ Options:
|
|||||||
Print help (see a summary with '-h')
|
Print help (see a summary with '-h')
|
||||||
|
|
||||||
Logging:
|
Logging:
|
||||||
--log.directory <PATH>
|
--log.file.directory <PATH>
|
||||||
The path to put log files in
|
The path to put log files in
|
||||||
|
|
||||||
[default: /reth/logs]
|
[default: /reth/logs]
|
||||||
|
|
||||||
--log.max-size <SIZE>
|
--log.file.max-size <SIZE>
|
||||||
The maximum size (in MB) of log files
|
The maximum size (in MB) of one log file
|
||||||
|
|
||||||
[default: 200]
|
[default: 200]
|
||||||
|
|
||||||
--log.max-files <COUNT>
|
--log.file.max-files <COUNT>
|
||||||
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
||||||
|
|
||||||
[default: 5]
|
[default: 5]
|
||||||
|
|
||||||
--log.journald
|
--log.file.filter <FILTER>
|
||||||
Log events to journald
|
|
||||||
|
|
||||||
--log.filter <FILTER>
|
|
||||||
The filter to use for logs written to the log file
|
The filter to use for logs written to the log file
|
||||||
|
|
||||||
|
[default: debug]
|
||||||
|
|
||||||
|
--log.journald
|
||||||
|
Write logs to journald
|
||||||
|
|
||||||
|
--log.journald.filter <FILTER>
|
||||||
|
The filter to use for logs written to journald
|
||||||
|
|
||||||
[default: error]
|
[default: error]
|
||||||
|
|
||||||
--color <COLOR>
|
--color <COLOR>
|
||||||
|
|||||||
@ -43,27 +43,32 @@ Options:
|
|||||||
Print help (see a summary with '-h')
|
Print help (see a summary with '-h')
|
||||||
|
|
||||||
Logging:
|
Logging:
|
||||||
--log.directory <PATH>
|
--log.file.directory <PATH>
|
||||||
The path to put log files in
|
The path to put log files in
|
||||||
|
|
||||||
[default: /reth/logs]
|
[default: /reth/logs]
|
||||||
|
|
||||||
--log.max-size <SIZE>
|
--log.file.max-size <SIZE>
|
||||||
The maximum size (in MB) of log files
|
The maximum size (in MB) of one log file
|
||||||
|
|
||||||
[default: 200]
|
[default: 200]
|
||||||
|
|
||||||
--log.max-files <COUNT>
|
--log.file.max-files <COUNT>
|
||||||
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
||||||
|
|
||||||
[default: 5]
|
[default: 5]
|
||||||
|
|
||||||
--log.journald
|
--log.file.filter <FILTER>
|
||||||
Log events to journald
|
|
||||||
|
|
||||||
--log.filter <FILTER>
|
|
||||||
The filter to use for logs written to the log file
|
The filter to use for logs written to the log file
|
||||||
|
|
||||||
|
[default: debug]
|
||||||
|
|
||||||
|
--log.journald
|
||||||
|
Write logs to journald
|
||||||
|
|
||||||
|
--log.journald.filter <FILTER>
|
||||||
|
The filter to use for logs written to journald
|
||||||
|
|
||||||
[default: error]
|
[default: error]
|
||||||
|
|
||||||
--color <COLOR>
|
--color <COLOR>
|
||||||
|
|||||||
@ -40,27 +40,32 @@ Options:
|
|||||||
Print help (see a summary with '-h')
|
Print help (see a summary with '-h')
|
||||||
|
|
||||||
Logging:
|
Logging:
|
||||||
--log.directory <PATH>
|
--log.file.directory <PATH>
|
||||||
The path to put log files in
|
The path to put log files in
|
||||||
|
|
||||||
[default: /reth/logs]
|
[default: /reth/logs]
|
||||||
|
|
||||||
--log.max-size <SIZE>
|
--log.file.max-size <SIZE>
|
||||||
The maximum size (in MB) of log files
|
The maximum size (in MB) of one log file
|
||||||
|
|
||||||
[default: 200]
|
[default: 200]
|
||||||
|
|
||||||
--log.max-files <COUNT>
|
--log.file.max-files <COUNT>
|
||||||
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
|
||||||
|
|
||||||
[default: 5]
|
[default: 5]
|
||||||
|
|
||||||
--log.journald
|
--log.file.filter <FILTER>
|
||||||
Log events to journald
|
|
||||||
|
|
||||||
--log.filter <FILTER>
|
|
||||||
The filter to use for logs written to the log file
|
The filter to use for logs written to the log file
|
||||||
|
|
||||||
|
[default: debug]
|
||||||
|
|
||||||
|
--log.journald
|
||||||
|
Write logs to journald
|
||||||
|
|
||||||
|
--log.journald.filter <FILTER>
|
||||||
|
The filter to use for logs written to journald
|
||||||
|
|
||||||
[default: error]
|
[default: error]
|
||||||
|
|
||||||
--color <COLOR>
|
--color <COLOR>
|
||||||
|
|||||||
@ -236,7 +236,7 @@ impl Discv4 {
|
|||||||
let socket = UdpSocket::bind(local_address).await?;
|
let socket = UdpSocket::bind(local_address).await?;
|
||||||
let local_addr = socket.local_addr()?;
|
let local_addr = socket.local_addr()?;
|
||||||
local_node_record.udp_port = local_addr.port();
|
local_node_record.udp_port = local_addr.port();
|
||||||
trace!( target : "discv4", ?local_addr,"opened UDP socket");
|
trace!(target: "discv4", ?local_addr,"opened UDP socket");
|
||||||
|
|
||||||
let service = Discv4Service::new(socket, local_addr, local_node_record, secret_key, config);
|
let service = Discv4Service::new(socket, local_addr, local_node_record, secret_key, config);
|
||||||
let discv4 = service.handle();
|
let discv4 = service.handle();
|
||||||
@ -376,7 +376,7 @@ impl Discv4 {
|
|||||||
fn send_to_service(&self, cmd: Discv4Command) {
|
fn send_to_service(&self, cmd: Discv4Command) {
|
||||||
let _ = self.to_service.send(cmd).map_err(|err| {
|
let _ = self.to_service.send(cmd).map_err(|err| {
|
||||||
debug!(
|
debug!(
|
||||||
target : "discv4",
|
target: "discv4",
|
||||||
%err,
|
%err,
|
||||||
"channel capacity reached, dropping command",
|
"channel capacity reached, dropping command",
|
||||||
)
|
)
|
||||||
@ -592,12 +592,12 @@ impl Discv4Service {
|
|||||||
/// discovery
|
/// discovery
|
||||||
pub fn set_external_ip_addr(&mut self, external_ip: IpAddr) {
|
pub fn set_external_ip_addr(&mut self, external_ip: IpAddr) {
|
||||||
if self.local_node_record.address != external_ip {
|
if self.local_node_record.address != external_ip {
|
||||||
debug!(target : "discv4", ?external_ip, "Updating external ip");
|
debug!(target: "discv4", ?external_ip, "Updating external ip");
|
||||||
self.local_node_record.address = external_ip;
|
self.local_node_record.address = external_ip;
|
||||||
let _ = self.local_eip_868_enr.set_ip(external_ip, &self.secret_key);
|
let _ = self.local_eip_868_enr.set_ip(external_ip, &self.secret_key);
|
||||||
let mut lock = self.shared_node_record.lock();
|
let mut lock = self.shared_node_record.lock();
|
||||||
*lock = self.local_node_record;
|
*lock = self.local_node_record;
|
||||||
debug!(target : "discv4", enr=?self.local_eip_868_enr, "Updated local ENR");
|
debug!(target: "discv4", enr=?self.local_eip_868_enr, "Updated local ENR");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -646,7 +646,7 @@ impl Discv4Service {
|
|||||||
/// **Note:** This is a noop if there are no bootnodes.
|
/// **Note:** This is a noop if there are no bootnodes.
|
||||||
pub fn bootstrap(&mut self) {
|
pub fn bootstrap(&mut self) {
|
||||||
for record in self.config.bootstrap_nodes.clone() {
|
for record in self.config.bootstrap_nodes.clone() {
|
||||||
debug!(target : "discv4", ?record, "pinging boot node");
|
debug!(target: "discv4", ?record, "pinging boot node");
|
||||||
let key = kad_key(record.id);
|
let key = kad_key(record.id);
|
||||||
let entry = NodeEntry::new(record);
|
let entry = NodeEntry::new(record);
|
||||||
|
|
||||||
@ -675,9 +675,9 @@ impl Discv4Service {
|
|||||||
self.bootstrap();
|
self.bootstrap();
|
||||||
|
|
||||||
while let Some(event) = self.next().await {
|
while let Some(event) = self.next().await {
|
||||||
trace!(target : "discv4", ?event, "processed");
|
trace!(target: "discv4", ?event, "processed");
|
||||||
}
|
}
|
||||||
trace!(target : "discv4", "service terminated");
|
trace!(target: "discv4", "service terminated");
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -715,7 +715,7 @@ impl Discv4Service {
|
|||||||
/// This takes an optional Sender through which all successfully discovered nodes are sent once
|
/// This takes an optional Sender through which all successfully discovered nodes are sent once
|
||||||
/// the request has finished.
|
/// the request has finished.
|
||||||
fn lookup_with(&mut self, target: PeerId, tx: Option<NodeRecordSender>) {
|
fn lookup_with(&mut self, target: PeerId, tx: Option<NodeRecordSender>) {
|
||||||
trace!(target : "discv4", ?target, "Starting lookup");
|
trace!(target: "discv4", ?target, "Starting lookup");
|
||||||
let target_key = kad_key(target);
|
let target_key = kad_key(target);
|
||||||
|
|
||||||
// Start a lookup context with the 16 (MAX_NODES_PER_BUCKET) closest nodes
|
// Start a lookup context with the 16 (MAX_NODES_PER_BUCKET) closest nodes
|
||||||
@ -744,7 +744,7 @@ impl Discv4Service {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
trace!(target : "discv4", ?target, num = closest.len(), "Start lookup closest nodes");
|
trace!(target: "discv4", ?target, num = closest.len(), "Start lookup closest nodes");
|
||||||
|
|
||||||
for node in closest {
|
for node in closest {
|
||||||
self.find_node(&node, ctx.clone());
|
self.find_node(&node, ctx.clone());
|
||||||
@ -755,7 +755,7 @@ impl Discv4Service {
|
|||||||
///
|
///
|
||||||
/// CAUTION: This expects there's a valid Endpoint proof to the given `node`.
|
/// CAUTION: This expects there's a valid Endpoint proof to the given `node`.
|
||||||
fn find_node(&mut self, node: &NodeRecord, ctx: LookupContext) {
|
fn find_node(&mut self, node: &NodeRecord, ctx: LookupContext) {
|
||||||
trace!(target : "discv4", ?node, lookup=?ctx.target(), "Sending FindNode");
|
trace!(target: "discv4", ?node, lookup=?ctx.target(), "Sending FindNode");
|
||||||
ctx.mark_queried(node.id);
|
ctx.mark_queried(node.id);
|
||||||
let id = ctx.target();
|
let id = ctx.target();
|
||||||
let msg = Message::FindNode(FindNode { id, expire: self.find_node_expiration() });
|
let msg = Message::FindNode(FindNode { id, expire: self.find_node_expiration() });
|
||||||
@ -886,7 +886,7 @@ impl Discv4Service {
|
|||||||
|
|
||||||
if !old_status.is_connected() {
|
if !old_status.is_connected() {
|
||||||
let _ = entry.update(ConnectionState::Connected, Some(old_status.direction));
|
let _ = entry.update(ConnectionState::Connected, Some(old_status.direction));
|
||||||
debug!(target : "discv4", ?record, "added after successful endpoint proof");
|
debug!(target: "discv4", ?record, "added after successful endpoint proof");
|
||||||
self.notify(DiscoveryUpdate::Added(record));
|
self.notify(DiscoveryUpdate::Added(record));
|
||||||
|
|
||||||
if has_enr_seq {
|
if has_enr_seq {
|
||||||
@ -903,7 +903,7 @@ impl Discv4Service {
|
|||||||
if !status.is_connected() {
|
if !status.is_connected() {
|
||||||
status.state = ConnectionState::Connected;
|
status.state = ConnectionState::Connected;
|
||||||
let _ = entry.update(status);
|
let _ = entry.update(status);
|
||||||
debug!(target : "discv4", ?record, "added after successful endpoint proof");
|
debug!(target: "discv4", ?record, "added after successful endpoint proof");
|
||||||
self.notify(DiscoveryUpdate::Added(record));
|
self.notify(DiscoveryUpdate::Added(record));
|
||||||
|
|
||||||
if has_enr_seq {
|
if has_enr_seq {
|
||||||
@ -943,7 +943,7 @@ impl Discv4Service {
|
|||||||
},
|
},
|
||||||
) {
|
) {
|
||||||
BucketInsertResult::Inserted | BucketInsertResult::Pending { .. } => {
|
BucketInsertResult::Inserted | BucketInsertResult::Pending { .. } => {
|
||||||
debug!(target : "discv4", ?record, "inserted new record");
|
debug!(target: "discv4", ?record, "inserted new record");
|
||||||
}
|
}
|
||||||
_ => return false,
|
_ => return false,
|
||||||
}
|
}
|
||||||
@ -957,10 +957,10 @@ impl Discv4Service {
|
|||||||
/// Encodes the packet, sends it and returns the hash.
|
/// Encodes the packet, sends it and returns the hash.
|
||||||
pub(crate) fn send_packet(&mut self, msg: Message, to: SocketAddr) -> B256 {
|
pub(crate) fn send_packet(&mut self, msg: Message, to: SocketAddr) -> B256 {
|
||||||
let (payload, hash) = msg.encode(&self.secret_key);
|
let (payload, hash) = msg.encode(&self.secret_key);
|
||||||
trace!(target : "discv4", r#type=?msg.msg_type(), ?to, ?hash, "sending packet");
|
trace!(target: "discv4", r#type=?msg.msg_type(), ?to, ?hash, "sending packet");
|
||||||
let _ = self.egress.try_send((payload, to)).map_err(|err| {
|
let _ = self.egress.try_send((payload, to)).map_err(|err| {
|
||||||
debug!(
|
debug!(
|
||||||
target : "discv4",
|
target: "discv4",
|
||||||
%err,
|
%err,
|
||||||
"dropped outgoing packet",
|
"dropped outgoing packet",
|
||||||
);
|
);
|
||||||
@ -1025,7 +1025,7 @@ impl Discv4Service {
|
|||||||
// we received a ping but the corresponding bucket for the peer is already
|
// we received a ping but the corresponding bucket for the peer is already
|
||||||
// full, we can't add any additional peers to that bucket, but we still want
|
// full, we can't add any additional peers to that bucket, but we still want
|
||||||
// to emit an event that we discovered the node
|
// to emit an event that we discovered the node
|
||||||
debug!(target : "discv4", ?record, "discovered new record but bucket is full");
|
trace!(target: "discv4", ?record, "discovered new record but bucket is full");
|
||||||
self.notify(DiscoveryUpdate::DiscoveredAtCapacity(record));
|
self.notify(DiscoveryUpdate::DiscoveredAtCapacity(record));
|
||||||
needs_bond = true;
|
needs_bond = true;
|
||||||
}
|
}
|
||||||
@ -1122,7 +1122,7 @@ impl Discv4Service {
|
|||||||
expire: self.ping_expiration(),
|
expire: self.ping_expiration(),
|
||||||
enr_sq: self.enr_seq(),
|
enr_sq: self.enr_seq(),
|
||||||
};
|
};
|
||||||
trace!(target : "discv4", ?ping, "sending ping");
|
trace!(target: "discv4", ?ping, "sending ping");
|
||||||
let echo_hash = self.send_packet(Message::Ping(ping), remote_addr);
|
let echo_hash = self.send_packet(Message::Ping(ping), remote_addr);
|
||||||
|
|
||||||
self.pending_pings
|
self.pending_pings
|
||||||
@ -1140,7 +1140,7 @@ impl Discv4Service {
|
|||||||
let remote_addr = node.udp_addr();
|
let remote_addr = node.udp_addr();
|
||||||
let enr_request = EnrRequest { expire: self.enr_request_expiration() };
|
let enr_request = EnrRequest { expire: self.enr_request_expiration() };
|
||||||
|
|
||||||
trace!(target : "discv4", ?enr_request, "sending enr request");
|
trace!(target: "discv4", ?enr_request, "sending enr request");
|
||||||
let echo_hash = self.send_packet(Message::EnrRequest(enr_request), remote_addr);
|
let echo_hash = self.send_packet(Message::EnrRequest(enr_request), remote_addr);
|
||||||
|
|
||||||
self.pending_enr_requests
|
self.pending_enr_requests
|
||||||
@ -1158,7 +1158,7 @@ impl Discv4Service {
|
|||||||
{
|
{
|
||||||
let request = entry.get();
|
let request = entry.get();
|
||||||
if request.echo_hash != pong.echo {
|
if request.echo_hash != pong.echo {
|
||||||
debug!( target : "discv4", from=?remote_addr, expected=?request.echo_hash, echo_hash=?pong.echo,"Got unexpected Pong");
|
trace!(target: "discv4", from=?remote_addr, expected=?request.echo_hash, echo_hash=?pong.echo,"Got unexpected Pong");
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1209,7 +1209,7 @@ impl Discv4Service {
|
|||||||
|
|
||||||
/// Handler for incoming `EnrResponse` message
|
/// Handler for incoming `EnrResponse` message
|
||||||
fn on_enr_response(&mut self, msg: EnrResponse, remote_addr: SocketAddr, id: PeerId) {
|
fn on_enr_response(&mut self, msg: EnrResponse, remote_addr: SocketAddr, id: PeerId) {
|
||||||
trace!(target : "discv4", ?remote_addr, ?msg, "received ENR response");
|
trace!(target: "discv4", ?remote_addr, ?msg, "received ENR response");
|
||||||
if let Some(resp) = self.pending_enr_requests.remove(&id) {
|
if let Some(resp) = self.pending_enr_requests.remove(&id) {
|
||||||
if resp.echo_hash == msg.request_hash {
|
if resp.echo_hash == msg.request_hash {
|
||||||
let key = kad_key(id);
|
let key = kad_key(id);
|
||||||
@ -1281,7 +1281,7 @@ impl Discv4Service {
|
|||||||
if total <= MAX_NODES_PER_BUCKET {
|
if total <= MAX_NODES_PER_BUCKET {
|
||||||
request.response_count = total;
|
request.response_count = total;
|
||||||
} else {
|
} else {
|
||||||
debug!(target : "discv4", total, from=?remote_addr, "Received neighbors packet entries exceeds max nodes per bucket");
|
trace!(target: "discv4", total, from=?remote_addr, "Received neighbors packet entries exceeds max nodes per bucket");
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -1297,7 +1297,7 @@ impl Discv4Service {
|
|||||||
}
|
}
|
||||||
Entry::Vacant(_) => {
|
Entry::Vacant(_) => {
|
||||||
// received neighbours response without requesting it
|
// received neighbours response without requesting it
|
||||||
debug!( target : "discv4", from=?remote_addr, "Received unsolicited Neighbours");
|
trace!(target: "discv4", from=?remote_addr, "Received unsolicited Neighbours");
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -1363,7 +1363,7 @@ impl Discv4Service {
|
|||||||
|
|
||||||
for nodes in all_nodes.chunks(SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS) {
|
for nodes in all_nodes.chunks(SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS) {
|
||||||
let nodes = nodes.iter().map(|node| node.value.record).collect::<Vec<NodeRecord>>();
|
let nodes = nodes.iter().map(|node| node.value.record).collect::<Vec<NodeRecord>>();
|
||||||
trace!( target : "discv4", len = nodes.len(), to=?to,"Sent neighbours packet");
|
trace!(target: "discv4", len = nodes.len(), to=?to,"Sent neighbours packet");
|
||||||
let msg = Message::Neighbours(Neighbours { nodes, expire });
|
let msg = Message::Neighbours(Neighbours { nodes, expire });
|
||||||
self.send_packet(msg, to);
|
self.send_packet(msg, to);
|
||||||
}
|
}
|
||||||
@ -1614,10 +1614,10 @@ impl Discv4Service {
|
|||||||
match event {
|
match event {
|
||||||
IngressEvent::RecvError(_) => {}
|
IngressEvent::RecvError(_) => {}
|
||||||
IngressEvent::BadPacket(from, err, data) => {
|
IngressEvent::BadPacket(from, err, data) => {
|
||||||
debug!(target : "discv4", ?from, ?err, packet=?hex::encode(&data), "bad packet");
|
debug!(target: "discv4", ?from, ?err, packet=?hex::encode(&data), "bad packet");
|
||||||
}
|
}
|
||||||
IngressEvent::Packet(remote_addr, Packet { msg, node_id, hash }) => {
|
IngressEvent::Packet(remote_addr, Packet { msg, node_id, hash }) => {
|
||||||
trace!( target : "discv4", r#type=?msg.msg_type(), from=?remote_addr,"received packet");
|
trace!(target: "discv4", r#type=?msg.msg_type(), from=?remote_addr,"received packet");
|
||||||
let event = match msg {
|
let event = match msg {
|
||||||
Message::Ping(ping) => {
|
Message::Ping(ping) => {
|
||||||
self.on_ping(ping, remote_addr, node_id, hash);
|
self.on_ping(ping, remote_addr, node_id, hash);
|
||||||
@ -1712,10 +1712,10 @@ pub(crate) async fn send_loop(udp: Arc<UdpSocket>, rx: EgressReceiver) {
|
|||||||
while let Some((payload, to)) = stream.next().await {
|
while let Some((payload, to)) = stream.next().await {
|
||||||
match udp.send_to(&payload, to).await {
|
match udp.send_to(&payload, to).await {
|
||||||
Ok(size) => {
|
Ok(size) => {
|
||||||
trace!( target : "discv4", ?to, ?size,"sent payload");
|
trace!(target: "discv4", ?to, ?size,"sent payload");
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
debug!( target : "discv4", ?to, ?err,"Failed to send datagram.");
|
debug!(target: "discv4", ?to, ?err,"Failed to send datagram.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1726,7 +1726,7 @@ pub(crate) async fn receive_loop(udp: Arc<UdpSocket>, tx: IngressSender, local_i
|
|||||||
let send = |event: IngressEvent| async {
|
let send = |event: IngressEvent| async {
|
||||||
let _ = tx.send(event).await.map_err(|err| {
|
let _ = tx.send(event).await.map_err(|err| {
|
||||||
debug!(
|
debug!(
|
||||||
target : "discv4",
|
target: "discv4",
|
||||||
%err,
|
%err,
|
||||||
"failed send incoming packet",
|
"failed send incoming packet",
|
||||||
)
|
)
|
||||||
@ -1738,7 +1738,7 @@ pub(crate) async fn receive_loop(udp: Arc<UdpSocket>, tx: IngressSender, local_i
|
|||||||
let res = udp.recv_from(&mut buf).await;
|
let res = udp.recv_from(&mut buf).await;
|
||||||
match res {
|
match res {
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
debug!(target : "discv4", ?err, "Failed to read datagram.");
|
debug!(target: "discv4", ?err, "Failed to read datagram.");
|
||||||
send(IngressEvent::RecvError(err)).await;
|
send(IngressEvent::RecvError(err)).await;
|
||||||
}
|
}
|
||||||
Ok((read, remote_addr)) => {
|
Ok((read, remote_addr)) => {
|
||||||
@ -1747,13 +1747,13 @@ pub(crate) async fn receive_loop(udp: Arc<UdpSocket>, tx: IngressSender, local_i
|
|||||||
Ok(packet) => {
|
Ok(packet) => {
|
||||||
if packet.node_id == local_id {
|
if packet.node_id == local_id {
|
||||||
// received our own message
|
// received our own message
|
||||||
debug!(target : "discv4", ?remote_addr, "Received own packet.");
|
debug!(target: "discv4", ?remote_addr, "Received own packet.");
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
send(IngressEvent::Packet(remote_addr, packet)).await;
|
send(IngressEvent::Packet(remote_addr, packet)).await;
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
debug!( target : "discv4", ?err,"Failed to decode packet");
|
debug!(target: "discv4", ?err,"Failed to decode packet");
|
||||||
send(IngressEvent::BadPacket(remote_addr, err, packet.to_vec())).await
|
send(IngressEvent::BadPacket(remote_addr, err, packet.to_vec())).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -146,7 +146,7 @@ impl Stream for MockDiscovery {
|
|||||||
match event {
|
match event {
|
||||||
IngressEvent::RecvError(_) => {}
|
IngressEvent::RecvError(_) => {}
|
||||||
IngressEvent::BadPacket(from, err, data) => {
|
IngressEvent::BadPacket(from, err, data) => {
|
||||||
debug!( target : "discv4", ?from, ?err, packet=?hex::encode(&data), "bad packet");
|
debug!(target: "discv4", ?from, ?err, packet=?hex::encode(&data), "bad packet");
|
||||||
}
|
}
|
||||||
IngressEvent::Packet(remote_addr, Packet { msg, node_id, hash }) => match msg {
|
IngressEvent::Packet(remote_addr, Packet { msg, node_id, hash }) => match msg {
|
||||||
Message::Ping(ping) => {
|
Message::Ping(ping) => {
|
||||||
|
|||||||
@ -156,7 +156,7 @@ impl<R: Resolver> DnsDiscoveryService<R> {
|
|||||||
self.bootstrap();
|
self.bootstrap();
|
||||||
|
|
||||||
while let Some(event) = self.next().await {
|
while let Some(event) = self.next().await {
|
||||||
trace!(target : "disc::dns", ?event, "processed");
|
trace!(target: "disc::dns", ?event, "processed");
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@ -168,7 +168,7 @@ impl HeadersClient for FileClient {
|
|||||||
) -> Self::Output {
|
) -> Self::Output {
|
||||||
// this just searches the buffer, and fails if it can't find the header
|
// this just searches the buffer, and fails if it can't find the header
|
||||||
let mut headers = Vec::new();
|
let mut headers = Vec::new();
|
||||||
trace!(target : "downloaders::file", request=?request, "Getting headers");
|
trace!(target: "downloaders::file", request=?request, "Getting headers");
|
||||||
|
|
||||||
let start_num = match request.start {
|
let start_num = match request.start {
|
||||||
BlockHashOrNumber::Hash(hash) => match self.hash_to_number.get(&hash) {
|
BlockHashOrNumber::Hash(hash) => match self.hash_to_number.get(&hash) {
|
||||||
@ -192,7 +192,7 @@ impl HeadersClient for FileClient {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
trace!(target : "downloaders::file", range=?range, "Getting headers with range");
|
trace!(target: "downloaders::file", range=?range, "Getting headers with range");
|
||||||
|
|
||||||
for block_number in range {
|
for block_number in range {
|
||||||
match self.headers.get(&block_number).cloned() {
|
match self.headers.get(&block_number).cloned() {
|
||||||
|
|||||||
@ -18,7 +18,7 @@ use std::{
|
|||||||
use tokio::io::{AsyncRead, AsyncWrite};
|
use tokio::io::{AsyncRead, AsyncWrite};
|
||||||
use tokio_stream::{Stream, StreamExt};
|
use tokio_stream::{Stream, StreamExt};
|
||||||
use tokio_util::codec::{Decoder, Framed};
|
use tokio_util::codec::{Decoder, Framed};
|
||||||
use tracing::{debug, instrument, trace};
|
use tracing::{instrument, trace};
|
||||||
|
|
||||||
/// `ECIES` stream over TCP exchanging raw bytes
|
/// `ECIES` stream over TCP exchanging raw bytes
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@ -74,11 +74,11 @@ where
|
|||||||
pub async fn incoming(transport: Io, secret_key: SecretKey) -> Result<Self, ECIESError> {
|
pub async fn incoming(transport: Io, secret_key: SecretKey) -> Result<Self, ECIESError> {
|
||||||
let ecies = ECIESCodec::new_server(secret_key)?;
|
let ecies = ECIESCodec::new_server(secret_key)?;
|
||||||
|
|
||||||
debug!("incoming ecies stream ...");
|
trace!("incoming ecies stream");
|
||||||
let mut transport = ecies.framed(transport);
|
let mut transport = ecies.framed(transport);
|
||||||
let msg = transport.try_next().await?;
|
let msg = transport.try_next().await?;
|
||||||
|
|
||||||
debug!("receiving ecies auth");
|
trace!("receiving ecies auth");
|
||||||
let remote_id = match &msg {
|
let remote_id = match &msg {
|
||||||
Some(IngressECIESValue::AuthReceive(remote_id)) => *remote_id,
|
Some(IngressECIESValue::AuthReceive(remote_id)) => *remote_id,
|
||||||
_ => {
|
_ => {
|
||||||
@ -90,7 +90,7 @@ where
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
debug!("sending ecies ack ...");
|
trace!("sending ecies ack");
|
||||||
transport.send(EgressECIESValue::Ack).await?;
|
transport.send(EgressECIESValue::Ack).await?;
|
||||||
|
|
||||||
Ok(Self { stream: transport, remote_id })
|
Ok(Self { stream: transport, remote_id })
|
||||||
|
|||||||
@ -26,6 +26,7 @@ use tokio_stream::Stream;
|
|||||||
|
|
||||||
#[cfg(feature = "serde")]
|
#[cfg(feature = "serde")]
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use tracing::{debug, trace};
|
||||||
|
|
||||||
/// [`MAX_PAYLOAD_SIZE`] is the maximum size of an uncompressed message payload.
|
/// [`MAX_PAYLOAD_SIZE`] is the maximum size of an uncompressed message payload.
|
||||||
/// This is defined in [EIP-706](https://eips.ethereum.org/EIPS/eip-706).
|
/// This is defined in [EIP-706](https://eips.ethereum.org/EIPS/eip-706).
|
||||||
@ -93,7 +94,7 @@ where
|
|||||||
mut self,
|
mut self,
|
||||||
hello: HelloMessage,
|
hello: HelloMessage,
|
||||||
) -> Result<(P2PStream<S>, HelloMessage), P2PStreamError> {
|
) -> Result<(P2PStream<S>, HelloMessage), P2PStreamError> {
|
||||||
tracing::trace!(?hello, "sending p2p hello to peer");
|
trace!(?hello, "sending p2p hello to peer");
|
||||||
|
|
||||||
// send our hello message with the Sink
|
// send our hello message with the Sink
|
||||||
let mut raw_hello_bytes = BytesMut::new();
|
let mut raw_hello_bytes = BytesMut::new();
|
||||||
@ -123,21 +124,26 @@ where
|
|||||||
let their_hello = match P2PMessage::decode(&mut &first_message_bytes[..]) {
|
let their_hello = match P2PMessage::decode(&mut &first_message_bytes[..]) {
|
||||||
Ok(P2PMessage::Hello(hello)) => Ok(hello),
|
Ok(P2PMessage::Hello(hello)) => Ok(hello),
|
||||||
Ok(P2PMessage::Disconnect(reason)) => {
|
Ok(P2PMessage::Disconnect(reason)) => {
|
||||||
tracing::debug!("Disconnected by peer during handshake: {}", reason);
|
if matches!(reason, DisconnectReason::TooManyPeers) {
|
||||||
|
// Too many peers is a very common disconnect reason that spams the DEBUG logs
|
||||||
|
trace!(%reason, "Disconnected by peer during handshake");
|
||||||
|
} else {
|
||||||
|
debug!(%reason, "Disconnected by peer during handshake");
|
||||||
|
};
|
||||||
counter!("p2pstream.disconnected_errors", 1);
|
counter!("p2pstream.disconnected_errors", 1);
|
||||||
Err(P2PStreamError::HandshakeError(P2PHandshakeError::Disconnected(reason)))
|
Err(P2PStreamError::HandshakeError(P2PHandshakeError::Disconnected(reason)))
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
tracing::debug!(?err, msg=%hex::encode(&first_message_bytes), "Failed to decode first message from peer");
|
debug!(?err, msg=%hex::encode(&first_message_bytes), "Failed to decode first message from peer");
|
||||||
Err(P2PStreamError::HandshakeError(err.into()))
|
Err(P2PStreamError::HandshakeError(err.into()))
|
||||||
}
|
}
|
||||||
Ok(msg) => {
|
Ok(msg) => {
|
||||||
tracing::debug!("expected hello message but received: {:?}", msg);
|
debug!(?msg, "expected hello message but received another message");
|
||||||
Err(P2PStreamError::HandshakeError(P2PHandshakeError::NonHelloMessageInHandshake))
|
Err(P2PStreamError::HandshakeError(P2PHandshakeError::NonHelloMessageInHandshake))
|
||||||
}
|
}
|
||||||
}?;
|
}?;
|
||||||
|
|
||||||
tracing::trace!(
|
trace!(
|
||||||
hello=?their_hello,
|
hello=?their_hello,
|
||||||
"validating incoming p2p hello from peer"
|
"validating incoming p2p hello from peer"
|
||||||
);
|
);
|
||||||
@ -181,7 +187,7 @@ where
|
|||||||
) -> Result<(), P2PStreamError> {
|
) -> Result<(), P2PStreamError> {
|
||||||
let mut buf = BytesMut::new();
|
let mut buf = BytesMut::new();
|
||||||
P2PMessage::Disconnect(reason).encode(&mut buf);
|
P2PMessage::Disconnect(reason).encode(&mut buf);
|
||||||
tracing::trace!(
|
trace!(
|
||||||
%reason,
|
%reason,
|
||||||
"Sending disconnect message during the handshake",
|
"Sending disconnect message during the handshake",
|
||||||
);
|
);
|
||||||
@ -311,7 +317,7 @@ impl<S> P2PStream<S> {
|
|||||||
let mut compressed = BytesMut::zeroed(1 + snap::raw::max_compress_len(buf.len() - 1));
|
let mut compressed = BytesMut::zeroed(1 + snap::raw::max_compress_len(buf.len() - 1));
|
||||||
let compressed_size =
|
let compressed_size =
|
||||||
self.encoder.compress(&buf[1..], &mut compressed[1..]).map_err(|err| {
|
self.encoder.compress(&buf[1..], &mut compressed[1..]).map_err(|err| {
|
||||||
tracing::debug!(
|
debug!(
|
||||||
?err,
|
?err,
|
||||||
msg=%hex::encode(&buf[1..]),
|
msg=%hex::encode(&buf[1..]),
|
||||||
"error compressing disconnect"
|
"error compressing disconnect"
|
||||||
@ -389,7 +395,7 @@ where
|
|||||||
// each message following a successful handshake is compressed with snappy, so we need
|
// each message following a successful handshake is compressed with snappy, so we need
|
||||||
// to decompress the message before we can decode it.
|
// to decompress the message before we can decode it.
|
||||||
this.decoder.decompress(&bytes[1..], &mut decompress_buf[1..]).map_err(|err| {
|
this.decoder.decompress(&bytes[1..], &mut decompress_buf[1..]).map_err(|err| {
|
||||||
tracing::debug!(
|
debug!(
|
||||||
?err,
|
?err,
|
||||||
msg=%hex::encode(&bytes[1..]),
|
msg=%hex::encode(&bytes[1..]),
|
||||||
"error decompressing p2p message"
|
"error decompressing p2p message"
|
||||||
@ -400,7 +406,7 @@ where
|
|||||||
let id = *bytes.first().ok_or(P2PStreamError::EmptyProtocolMessage)?;
|
let id = *bytes.first().ok_or(P2PStreamError::EmptyProtocolMessage)?;
|
||||||
match id {
|
match id {
|
||||||
_ if id == P2PMessageID::Ping as u8 => {
|
_ if id == P2PMessageID::Ping as u8 => {
|
||||||
tracing::trace!("Received Ping, Sending Pong");
|
trace!("Received Ping, Sending Pong");
|
||||||
this.send_pong();
|
this.send_pong();
|
||||||
// This is required because the `Sink` may not be polled externally, and if
|
// This is required because the `Sink` may not be polled externally, and if
|
||||||
// that happens, the pong will never be sent.
|
// that happens, the pong will never be sent.
|
||||||
@ -408,7 +414,7 @@ where
|
|||||||
}
|
}
|
||||||
_ if id == P2PMessageID::Disconnect as u8 => {
|
_ if id == P2PMessageID::Disconnect as u8 => {
|
||||||
let reason = DisconnectReason::decode(&mut &decompress_buf[1..]).map_err(|err| {
|
let reason = DisconnectReason::decode(&mut &decompress_buf[1..]).map_err(|err| {
|
||||||
tracing::debug!(
|
debug!(
|
||||||
?err, msg=%hex::encode(&decompress_buf[1..]), "Failed to decode disconnect message from peer"
|
?err, msg=%hex::encode(&decompress_buf[1..]), "Failed to decode disconnect message from peer"
|
||||||
);
|
);
|
||||||
err
|
err
|
||||||
@ -519,7 +525,7 @@ where
|
|||||||
let mut compressed = BytesMut::zeroed(1 + snap::raw::max_compress_len(item.len() - 1));
|
let mut compressed = BytesMut::zeroed(1 + snap::raw::max_compress_len(item.len() - 1));
|
||||||
let compressed_size =
|
let compressed_size =
|
||||||
this.encoder.compress(&item[1..], &mut compressed[1..]).map_err(|err| {
|
this.encoder.compress(&item[1..], &mut compressed[1..]).map_err(|err| {
|
||||||
tracing::debug!(
|
debug!(
|
||||||
?err,
|
?err,
|
||||||
msg=%hex::encode(&item[1..]),
|
msg=%hex::encode(&item[1..]),
|
||||||
"error compressing p2p message"
|
"error compressing p2p message"
|
||||||
@ -633,7 +639,7 @@ pub fn set_capability_offsets(
|
|||||||
match shared_capability {
|
match shared_capability {
|
||||||
SharedCapability::UnknownCapability { .. } => {
|
SharedCapability::UnknownCapability { .. } => {
|
||||||
// Capabilities which are not shared are ignored
|
// Capabilities which are not shared are ignored
|
||||||
tracing::debug!("unknown capability: name={:?}, version={}", name, version,);
|
debug!("unknown capability: name={:?}, version={}", name, version,);
|
||||||
}
|
}
|
||||||
SharedCapability::Eth { .. } => {
|
SharedCapability::Eth { .. } => {
|
||||||
// increment the offset if the capability is known
|
// increment the offset if the capability is known
|
||||||
|
|||||||
@ -43,7 +43,7 @@ impl ConnectionListener {
|
|||||||
match ready!(this.incoming.poll_next(cx)) {
|
match ready!(this.incoming.poll_next(cx)) {
|
||||||
Some(Ok((stream, remote_addr))) => {
|
Some(Ok((stream, remote_addr))) => {
|
||||||
if let Err(err) = stream.set_nodelay(true) {
|
if let Err(err) = stream.set_nodelay(true) {
|
||||||
tracing::warn!(target : "net", "set nodelay failed: {:?}", err);
|
tracing::warn!(target: "net", "set nodelay failed: {:?}", err);
|
||||||
}
|
}
|
||||||
Poll::Ready(ListenerEvent::Incoming { stream, remote_addr })
|
Poll::Ready(ListenerEvent::Incoming { stream, remote_addr })
|
||||||
}
|
}
|
||||||
|
|||||||
@ -360,7 +360,7 @@ where
|
|||||||
_capabilities: Arc<Capabilities>,
|
_capabilities: Arc<Capabilities>,
|
||||||
_message: CapabilityMessage,
|
_message: CapabilityMessage,
|
||||||
) {
|
) {
|
||||||
trace!(target : "net", ?peer_id, "received unexpected message");
|
trace!(target: "net", ?peer_id, "received unexpected message");
|
||||||
self.swarm
|
self.swarm
|
||||||
.state_mut()
|
.state_mut()
|
||||||
.peers_mut()
|
.peers_mut()
|
||||||
@ -506,7 +506,7 @@ where
|
|||||||
unreachable!("Not emitted by session")
|
unreachable!("Not emitted by session")
|
||||||
}
|
}
|
||||||
PeerMessage::Other(other) => {
|
PeerMessage::Other(other) => {
|
||||||
debug!(target : "net", message_id=%other.id, "Ignoring unsupported message");
|
debug!(target: "net", message_id=%other.id, "Ignoring unsupported message");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -646,20 +646,20 @@ where
|
|||||||
this.metrics.invalid_messages_received.increment(1);
|
this.metrics.invalid_messages_received.increment(1);
|
||||||
}
|
}
|
||||||
SwarmEvent::TcpListenerClosed { remote_addr } => {
|
SwarmEvent::TcpListenerClosed { remote_addr } => {
|
||||||
trace!(target : "net", ?remote_addr, "TCP listener closed.");
|
trace!(target: "net", ?remote_addr, "TCP listener closed.");
|
||||||
}
|
}
|
||||||
SwarmEvent::TcpListenerError(err) => {
|
SwarmEvent::TcpListenerError(err) => {
|
||||||
trace!(target : "net", ?err, "TCP connection error.");
|
trace!(target: "net", ?err, "TCP connection error.");
|
||||||
}
|
}
|
||||||
SwarmEvent::IncomingTcpConnection { remote_addr, session_id } => {
|
SwarmEvent::IncomingTcpConnection { remote_addr, session_id } => {
|
||||||
trace!(target : "net", ?session_id, ?remote_addr, "Incoming connection");
|
trace!(target: "net", ?session_id, ?remote_addr, "Incoming connection");
|
||||||
this.metrics.total_incoming_connections.increment(1);
|
this.metrics.total_incoming_connections.increment(1);
|
||||||
this.metrics
|
this.metrics
|
||||||
.incoming_connections
|
.incoming_connections
|
||||||
.set(this.swarm.state().peers().num_inbound_connections() as f64);
|
.set(this.swarm.state().peers().num_inbound_connections() as f64);
|
||||||
}
|
}
|
||||||
SwarmEvent::OutgoingTcpConnection { remote_addr, peer_id } => {
|
SwarmEvent::OutgoingTcpConnection { remote_addr, peer_id } => {
|
||||||
trace!(target : "net", ?remote_addr, ?peer_id, "Starting outbound connection.");
|
trace!(target: "net", ?remote_addr, ?peer_id, "Starting outbound connection.");
|
||||||
this.metrics.total_outgoing_connections.increment(1);
|
this.metrics.total_outgoing_connections.increment(1);
|
||||||
this.metrics
|
this.metrics
|
||||||
.outgoing_connections
|
.outgoing_connections
|
||||||
@ -724,7 +724,7 @@ where
|
|||||||
this.num_active_peers.fetch_sub(1, Ordering::Relaxed) - 1;
|
this.num_active_peers.fetch_sub(1, Ordering::Relaxed) - 1;
|
||||||
this.metrics.connected_peers.set(total_active as f64);
|
this.metrics.connected_peers.set(total_active as f64);
|
||||||
trace!(
|
trace!(
|
||||||
target : "net",
|
target: "net",
|
||||||
?remote_addr,
|
?remote_addr,
|
||||||
?peer_id,
|
?peer_id,
|
||||||
?total_active,
|
?total_active,
|
||||||
@ -769,7 +769,7 @@ where
|
|||||||
}
|
}
|
||||||
SwarmEvent::IncomingPendingSessionClosed { remote_addr, error } => {
|
SwarmEvent::IncomingPendingSessionClosed { remote_addr, error } => {
|
||||||
trace!(
|
trace!(
|
||||||
target : "net",
|
target: "net",
|
||||||
?remote_addr,
|
?remote_addr,
|
||||||
?error,
|
?error,
|
||||||
"Incoming pending session failed"
|
"Incoming pending session failed"
|
||||||
@ -805,7 +805,7 @@ where
|
|||||||
error,
|
error,
|
||||||
} => {
|
} => {
|
||||||
trace!(
|
trace!(
|
||||||
target : "net",
|
target: "net",
|
||||||
?remote_addr,
|
?remote_addr,
|
||||||
?peer_id,
|
?peer_id,
|
||||||
?error,
|
?error,
|
||||||
@ -839,7 +839,7 @@ where
|
|||||||
}
|
}
|
||||||
SwarmEvent::OutgoingConnectionError { remote_addr, peer_id, error } => {
|
SwarmEvent::OutgoingConnectionError { remote_addr, peer_id, error } => {
|
||||||
trace!(
|
trace!(
|
||||||
target : "net",
|
target: "net",
|
||||||
?remote_addr,
|
?remote_addr,
|
||||||
?peer_id,
|
?peer_id,
|
||||||
?error,
|
?error,
|
||||||
|
|||||||
@ -540,7 +540,7 @@ impl PeersManager {
|
|||||||
/// protocol
|
/// protocol
|
||||||
pub(crate) fn set_discovered_fork_id(&mut self, peer_id: PeerId, fork_id: ForkId) {
|
pub(crate) fn set_discovered_fork_id(&mut self, peer_id: PeerId, fork_id: ForkId) {
|
||||||
if let Some(peer) = self.peers.get_mut(&peer_id) {
|
if let Some(peer) = self.peers.get_mut(&peer_id) {
|
||||||
trace!(target : "net::peers", ?peer_id, ?fork_id, "set discovered fork id");
|
trace!(target: "net::peers", ?peer_id, ?fork_id, "set discovered fork id");
|
||||||
peer.fork_id = Some(fork_id);
|
peer.fork_id = Some(fork_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -589,7 +589,7 @@ impl PeersManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
Entry::Vacant(entry) => {
|
Entry::Vacant(entry) => {
|
||||||
trace!(target : "net::peers", ?peer_id, ?addr, "discovered new node");
|
trace!(target: "net::peers", ?peer_id, ?addr, "discovered new node");
|
||||||
let mut peer = Peer::with_kind(addr, kind);
|
let mut peer = Peer::with_kind(addr, kind);
|
||||||
peer.fork_id = fork_id;
|
peer.fork_id = fork_id;
|
||||||
entry.insert(peer);
|
entry.insert(peer);
|
||||||
@ -606,11 +606,11 @@ impl PeersManager {
|
|||||||
}
|
}
|
||||||
let mut peer = entry.remove();
|
let mut peer = entry.remove();
|
||||||
|
|
||||||
trace!(target : "net::peers", ?peer_id, "remove discovered node");
|
trace!(target: "net::peers", ?peer_id, "remove discovered node");
|
||||||
self.queued_actions.push_back(PeerAction::PeerRemoved(peer_id));
|
self.queued_actions.push_back(PeerAction::PeerRemoved(peer_id));
|
||||||
|
|
||||||
if peer.state.is_connected() {
|
if peer.state.is_connected() {
|
||||||
trace!(target : "net::peers", ?peer_id, "disconnecting on remove from discovery");
|
trace!(target: "net::peers", ?peer_id, "disconnecting on remove from discovery");
|
||||||
// we terminate the active session here, but only remove the peer after the session
|
// we terminate the active session here, but only remove the peer after the session
|
||||||
// was disconnected, this prevents the case where the session is scheduled for
|
// was disconnected, this prevents the case where the session is scheduled for
|
||||||
// disconnect but the node is immediately rediscovered, See also
|
// disconnect but the node is immediately rediscovered, See also
|
||||||
@ -697,7 +697,7 @@ impl PeersManager {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
trace!(target : "net::peers", ?peer_id, addr=?peer.addr, "schedule outbound connection");
|
trace!(target: "net::peers", ?peer_id, addr=?peer.addr, "schedule outbound connection");
|
||||||
|
|
||||||
peer.state = PeerConnectionState::Out;
|
peer.state = PeerConnectionState::Out;
|
||||||
PeerAction::Connect { peer_id, remote_addr: peer.addr }
|
PeerAction::Connect { peer_id, remote_addr: peer.addr }
|
||||||
|
|||||||
@ -274,7 +274,7 @@ impl ActiveSession {
|
|||||||
unreachable!("Not emitted by network")
|
unreachable!("Not emitted by network")
|
||||||
}
|
}
|
||||||
PeerMessage::Other(other) => {
|
PeerMessage::Other(other) => {
|
||||||
debug!(target : "net::session", message_id=%other.id, "Ignoring unsupported message");
|
debug!(target: "net::session", message_id=%other.id, "Ignoring unsupported message");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -294,7 +294,7 @@ impl ActiveSession {
|
|||||||
self.queued_outgoing.push_back(msg.into());
|
self.queued_outgoing.push_back(msg.into());
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
debug!(target : "net", ?err, "Failed to respond to received request");
|
debug!(target: "net", ?err, "Failed to respond to received request");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -312,7 +312,7 @@ impl ActiveSession {
|
|||||||
Ok(_) => Ok(()),
|
Ok(_) => Ok(()),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
trace!(
|
trace!(
|
||||||
target : "net",
|
target: "net",
|
||||||
%err,
|
%err,
|
||||||
"no capacity for incoming broadcast",
|
"no capacity for incoming broadcast",
|
||||||
);
|
);
|
||||||
@ -338,7 +338,7 @@ impl ActiveSession {
|
|||||||
Ok(_) => Ok(()),
|
Ok(_) => Ok(()),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
trace!(
|
trace!(
|
||||||
target : "net",
|
target: "net",
|
||||||
%err,
|
%err,
|
||||||
"no capacity for incoming request",
|
"no capacity for incoming request",
|
||||||
);
|
);
|
||||||
|
|||||||
@ -202,7 +202,7 @@ impl SessionManager {
|
|||||||
let session_id = self.next_id();
|
let session_id = self.next_id();
|
||||||
|
|
||||||
trace!(
|
trace!(
|
||||||
target : "net::session",
|
target: "net::session",
|
||||||
?remote_addr,
|
?remote_addr,
|
||||||
?session_id,
|
?session_id,
|
||||||
"new pending incoming session"
|
"new pending incoming session"
|
||||||
@ -347,7 +347,7 @@ impl SessionManager {
|
|||||||
return match event {
|
return match event {
|
||||||
ActiveSessionMessage::Disconnected { peer_id, remote_addr } => {
|
ActiveSessionMessage::Disconnected { peer_id, remote_addr } => {
|
||||||
trace!(
|
trace!(
|
||||||
target : "net::session",
|
target: "net::session",
|
||||||
?peer_id,
|
?peer_id,
|
||||||
"gracefully disconnected active session."
|
"gracefully disconnected active session."
|
||||||
);
|
);
|
||||||
@ -359,7 +359,7 @@ impl SessionManager {
|
|||||||
remote_addr,
|
remote_addr,
|
||||||
error,
|
error,
|
||||||
} => {
|
} => {
|
||||||
trace!(target : "net::session", ?peer_id, ?error,"closed session.");
|
trace!(target: "net::session", ?peer_id, ?error,"closed session.");
|
||||||
self.remove_active_session(&peer_id);
|
self.remove_active_session(&peer_id);
|
||||||
Poll::Ready(SessionEvent::SessionClosedOnConnectionError {
|
Poll::Ready(SessionEvent::SessionClosedOnConnectionError {
|
||||||
remote_addr,
|
remote_addr,
|
||||||
@ -407,7 +407,7 @@ impl SessionManager {
|
|||||||
// If there's already a session to the peer then we disconnect right away
|
// If there's already a session to the peer then we disconnect right away
|
||||||
if self.active_sessions.contains_key(&peer_id) {
|
if self.active_sessions.contains_key(&peer_id) {
|
||||||
trace!(
|
trace!(
|
||||||
target : "net::session",
|
target: "net::session",
|
||||||
?session_id,
|
?session_id,
|
||||||
?remote_addr,
|
?remote_addr,
|
||||||
?peer_id,
|
?peer_id,
|
||||||
@ -501,7 +501,7 @@ impl SessionManager {
|
|||||||
}
|
}
|
||||||
PendingSessionEvent::Disconnected { remote_addr, session_id, direction, error } => {
|
PendingSessionEvent::Disconnected { remote_addr, session_id, direction, error } => {
|
||||||
trace!(
|
trace!(
|
||||||
target : "net::session",
|
target: "net::session",
|
||||||
?session_id,
|
?session_id,
|
||||||
?remote_addr,
|
?remote_addr,
|
||||||
?error,
|
?error,
|
||||||
@ -531,7 +531,7 @@ impl SessionManager {
|
|||||||
error,
|
error,
|
||||||
} => {
|
} => {
|
||||||
trace!(
|
trace!(
|
||||||
target : "net::session",
|
target: "net::session",
|
||||||
?error,
|
?error,
|
||||||
?session_id,
|
?session_id,
|
||||||
?remote_addr,
|
?remote_addr,
|
||||||
@ -544,7 +544,7 @@ impl SessionManager {
|
|||||||
PendingSessionEvent::EciesAuthError { remote_addr, session_id, error, direction } => {
|
PendingSessionEvent::EciesAuthError { remote_addr, session_id, error, direction } => {
|
||||||
self.remove_pending_session(&session_id);
|
self.remove_pending_session(&session_id);
|
||||||
trace!(
|
trace!(
|
||||||
target : "net::session",
|
target: "net::session",
|
||||||
?error,
|
?error,
|
||||||
?session_id,
|
?session_id,
|
||||||
?remote_addr,
|
?remote_addr,
|
||||||
@ -761,7 +761,7 @@ async fn start_pending_outbound_session(
|
|||||||
let stream = match TcpStream::connect(remote_addr).await {
|
let stream = match TcpStream::connect(remote_addr).await {
|
||||||
Ok(stream) => {
|
Ok(stream) => {
|
||||||
if let Err(err) = stream.set_nodelay(true) {
|
if let Err(err) = stream.set_nodelay(true) {
|
||||||
tracing::warn!(target : "net::session", "set nodelay failed: {:?}", err);
|
tracing::warn!(target: "net::session", "set nodelay failed: {:?}", err);
|
||||||
}
|
}
|
||||||
MeteredStream::new_with_meter(stream, bandwidth_meter)
|
MeteredStream::new_with_meter(stream, bandwidth_meter)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -420,7 +420,7 @@ where
|
|||||||
// check if the error is due to a closed channel to the session
|
// check if the error is due to a closed channel to the session
|
||||||
if res.err().map(|err| err.is_channel_closed()).unwrap_or_default() {
|
if res.err().map(|err| err.is_channel_closed()).unwrap_or_default() {
|
||||||
debug!(
|
debug!(
|
||||||
target : "net",
|
target: "net",
|
||||||
?id,
|
?id,
|
||||||
"Request canceled, response channel from session closed."
|
"Request canceled, response channel from session closed."
|
||||||
);
|
);
|
||||||
|
|||||||
@ -143,7 +143,7 @@ impl<'a, TX: DbTx> StateRoot<'a, TX, &'a TX> {
|
|||||||
tx: &'a TX,
|
tx: &'a TX,
|
||||||
range: RangeInclusive<BlockNumber>,
|
range: RangeInclusive<BlockNumber>,
|
||||||
) -> Result<B256, StateRootError> {
|
) -> Result<B256, StateRootError> {
|
||||||
tracing::debug!(target: "loader", "incremental state root");
|
tracing::debug!(target: "trie::loader", "incremental state root");
|
||||||
Self::incremental_root_calculator(tx, range)?.root()
|
Self::incremental_root_calculator(tx, range)?.root()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -159,7 +159,7 @@ impl<'a, TX: DbTx> StateRoot<'a, TX, &'a TX> {
|
|||||||
tx: &'a TX,
|
tx: &'a TX,
|
||||||
range: RangeInclusive<BlockNumber>,
|
range: RangeInclusive<BlockNumber>,
|
||||||
) -> Result<(B256, TrieUpdates), StateRootError> {
|
) -> Result<(B256, TrieUpdates), StateRootError> {
|
||||||
tracing::debug!(target: "loader", "incremental state root");
|
tracing::debug!(target: "trie::loader", "incremental state root");
|
||||||
Self::incremental_root_calculator(tx, range)?.root_with_updates()
|
Self::incremental_root_calculator(tx, range)?.root_with_updates()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -173,7 +173,7 @@ impl<'a, TX: DbTx> StateRoot<'a, TX, &'a TX> {
|
|||||||
tx: &'a TX,
|
tx: &'a TX,
|
||||||
range: RangeInclusive<BlockNumber>,
|
range: RangeInclusive<BlockNumber>,
|
||||||
) -> Result<StateRootProgress, StateRootError> {
|
) -> Result<StateRootProgress, StateRootError> {
|
||||||
tracing::debug!(target: "loader", "incremental state root with progress");
|
tracing::debug!(target: "trie::loader", "incremental state root with progress");
|
||||||
Self::incremental_root_calculator(tx, range)?.root_with_progress()
|
Self::incremental_root_calculator(tx, range)?.root_with_progress()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -222,7 +222,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn calculate(self, retain_updates: bool) -> Result<StateRootProgress, StateRootError> {
|
fn calculate(self, retain_updates: bool) -> Result<StateRootProgress, StateRootError> {
|
||||||
tracing::debug!(target: "loader", "calculating state root");
|
tracing::debug!(target: "trie::loader", "calculating state root");
|
||||||
let mut trie_updates = TrieUpdates::default();
|
let mut trie_updates = TrieUpdates::default();
|
||||||
|
|
||||||
let hashed_account_cursor = self.hashed_cursor_factory.hashed_account_cursor()?;
|
let hashed_account_cursor = self.hashed_cursor_factory.hashed_account_cursor()?;
|
||||||
|
|||||||
@ -139,7 +139,7 @@ impl Discv4 {
|
|||||||
let socket = UdpSocket::bind(local_address).await?;
|
let socket = UdpSocket::bind(local_address).await?;
|
||||||
let local_addr = socket.local_addr()?;
|
let local_addr = socket.local_addr()?;
|
||||||
local_node_record.udp_port = local_addr.port();
|
local_node_record.udp_port = local_addr.port();
|
||||||
trace!( target : "discv4", ?local_addr,"opened UDP socket");
|
trace!(target: "discv4", ?local_addr,"opened UDP socket");
|
||||||
|
|
||||||
let (to_service, rx) = mpsc::channel(100);
|
let (to_service, rx) = mpsc::channel(100);
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user