fix(cli): remove usage of StageDB on DbTool (#448)

* use view and update instead of StageDB

* change DbTool docs

* clippy
This commit is contained in:
joshieDo
2022-12-15 15:33:49 +08:00
committed by GitHub
parent 43f6bb9127
commit 7b6bf0820e
6 changed files with 72 additions and 62 deletions

View File

@ -4,7 +4,7 @@
//! Database debugging tool //! Database debugging tool
use clap::{Parser, Subcommand}; use clap::{Parser, Subcommand};
use eyre::Result; use eyre::{Result, WrapErr};
use reth_db::{ use reth_db::{
cursor::{DbCursorRO, Walker}, cursor::{DbCursorRO, Walker},
database::Database, database::Database,
@ -14,7 +14,6 @@ use reth_db::{
}; };
use reth_interfaces::test_utils::generators::random_block_range; use reth_interfaces::test_utils::generators::random_block_range;
use reth_provider::insert_canonical_block; use reth_provider::insert_canonical_block;
use reth_stages::StageDB;
use std::path::Path; use std::path::Path;
use tracing::info; use tracing::info;
@ -71,35 +70,40 @@ impl Command {
expanded_db_path, expanded_db_path,
reth_db::mdbx::EnvKind::RW, reth_db::mdbx::EnvKind::RW,
)?; )?;
db.create_tables()?;
let mut tool = DbTool::new(&db)?; let mut tool = DbTool::new(&db)?;
match &self.command { match &self.command {
// TODO: We'll need to add this on the DB trait. // TODO: We'll need to add this on the DB trait.
Subcommands::Stats { .. } => { Subcommands::Stats { .. } => {
// Get the env from MDBX tool.db.view(|tx| {
let env = &tool.db.inner().inner; for table in tables::TABLES.iter().map(|(_, name)| name) {
let tx = env.begin_ro_txn()?; let table_db =
for table in tables::TABLES.iter().map(|(_, name)| name) { tx.inner.open_db(Some(table)).wrap_err("Could not open db.")?;
let table_db = tx.open_db(Some(table))?;
let stats = tx.db_stat(&table_db)?;
// Defaults to 16KB right now but we should let stats = tx
// re-evaluate depending on the DB we end up using .inner
// (e.g. REDB does not have these options as configurable intentionally) .db_stat(&table_db)
let page_size = stats.page_size() as usize; .wrap_err(format!("Could not find table: {table}"))?;
let leaf_pages = stats.leaf_pages();
let branch_pages = stats.branch_pages(); // Defaults to 16KB right now but we should
let overflow_pages = stats.overflow_pages(); // re-evaluate depending on the DB we end up using
let num_pages = leaf_pages + branch_pages + overflow_pages; // (e.g. REDB does not have these options as configurable intentionally)
let table_size = page_size * num_pages; let page_size = stats.page_size() as usize;
tracing::info!( let leaf_pages = stats.leaf_pages();
"Table {} has {} entries (total size: {} KB)", let branch_pages = stats.branch_pages();
table, let overflow_pages = stats.overflow_pages();
stats.entries(), let num_pages = leaf_pages + branch_pages + overflow_pages;
table_size / 1024 let table_size = page_size * num_pages;
); tracing::info!(
} "Table {} has {} entries (total size: {} KB)",
table,
stats.entries(),
table_size / 1024
);
}
Ok::<(), eyre::Report>(())
})??;
} }
Subcommands::Seed { len } => { Subcommands::Seed { len } => {
tool.seed(*len)?; tool.seed(*len)?;
@ -113,28 +117,29 @@ impl Command {
} }
} }
/// Abstraction over StageDB for writing/reading from/to the DB /// Wrapper over DB that implements many useful DB queries.
/// Wraps over the StageDB and derefs to a transaction.
struct DbTool<'a, DB: Database> { struct DbTool<'a, DB: Database> {
// TODO: StageDB derefs to Tx, is this weird or not? pub(crate) db: &'a DB,
pub(crate) db: StageDB<'a, DB>,
} }
impl<'a, DB: Database> DbTool<'a, DB> { impl<'a, DB: Database> DbTool<'a, DB> {
/// Takes a DB where the tables have already been created /// Takes a DB where the tables have already been created.
fn new(db: &'a DB) -> eyre::Result<Self> { fn new(db: &'a DB) -> eyre::Result<Self> {
Ok(Self { db: StageDB::new(db)? }) Ok(Self { db })
} }
/// Seeds the database with some random data, only used for testing /// Seeds the database with some random data, only used for testing
fn seed(&mut self, len: u64) -> Result<()> { fn seed(&mut self, len: u64) -> Result<()> {
tracing::info!("generating random block range from 0 to {len}"); tracing::info!("generating random block range from 0 to {len}");
let chain = random_block_range(0..len, Default::default()); let chain = random_block_range(0..len, Default::default());
chain.iter().try_for_each(|block| {
insert_canonical_block(&*self.db, block, true)?; self.db.update(|tx| {
Ok::<_, eyre::Error>(()) chain.iter().try_for_each(|block| {
})?; insert_canonical_block(tx, block, true)?;
self.db.commit()?; Ok::<_, eyre::Error>(())
})
})??;
info!("Database committed with {len} blocks"); info!("Database committed with {len} blocks");
Ok(()) Ok(())
@ -143,6 +148,9 @@ impl<'a, DB: Database> DbTool<'a, DB> {
/// Lists the given table data /// Lists the given table data
fn list(&mut self, args: &ListArgs) -> Result<()> { fn list(&mut self, args: &ListArgs) -> Result<()> {
match args.table.as_str() { match args.table.as_str() {
"canonical_headers" => {
self.list_table::<tables::CanonicalHeaders>(args.start, args.len)?
}
"headers" => self.list_table::<tables::Headers>(args.start, args.len)?, "headers" => self.list_table::<tables::Headers>(args.start, args.len)?,
"txs" => self.list_table::<tables::Transactions>(args.start, args.len)?, "txs" => self.list_table::<tables::Transactions>(args.start, args.len)?,
_ => panic!(), _ => panic!(),
@ -151,18 +159,21 @@ impl<'a, DB: Database> DbTool<'a, DB> {
} }
fn list_table<T: Table>(&mut self, start: usize, len: usize) -> Result<()> { fn list_table<T: Table>(&mut self, start: usize, len: usize) -> Result<()> {
let mut cursor = self.db.cursor::<T>()?; let data = self.db.view(|tx| {
let mut cursor = tx.cursor::<T>().expect("Was not able to obtain a cursor.");
// TODO: Upstream this in the DB trait. // TODO: Upstream this in the DB trait.
let start_walker = cursor.current().transpose(); let start_walker = cursor.current().transpose();
let walker = Walker { let walker = Walker {
cursor: &mut cursor, cursor: &mut cursor,
start: start_walker, start: start_walker,
_tx_phantom: std::marker::PhantomData, _tx_phantom: std::marker::PhantomData,
}; };
let data = walker.skip(start).take(len).collect::<Vec<_>>(); walker.skip(start).take(len).collect::<Vec<_>>()
dbg!(&data); })?;
println!("{data:?}");
Ok(()) Ok(())
} }

View File

@ -175,13 +175,13 @@ mod tests {
#[test] #[test]
fn test_display_named_chain() { fn test_display_named_chain() {
let chain = Chain::Named(ethers_core::types::Chain::Mainnet); let chain = Chain::Named(ethers_core::types::Chain::Mainnet);
assert_eq!(format!("{}", chain), "mainnet"); assert_eq!(format!("{chain}"), "mainnet");
} }
#[test] #[test]
fn test_display_id_chain() { fn test_display_id_chain() {
let chain = Chain::Id(1234); let chain = Chain::Id(1234);
assert_eq!(format!("{}", chain), "1234"); assert_eq!(format!("{chain}"), "1234");
} }
#[test] #[test]

View File

@ -203,7 +203,7 @@ mod tests {
fn test_from_bytes() { fn test_from_bytes() {
let b = bytes::Bytes::from("0123456789abcdef"); let b = bytes::Bytes::from("0123456789abcdef");
let wrapped_b = Bytes::from(b.clone()); let wrapped_b = Bytes::from(b.clone());
let expected = Bytes { 0: b }; let expected = Bytes(b);
assert_eq!(wrapped_b, expected); assert_eq!(wrapped_b, expected);
} }
@ -212,7 +212,7 @@ mod tests {
fn test_from_slice() { fn test_from_slice() {
let arr = [1, 35, 69, 103, 137, 171, 205, 239]; let arr = [1, 35, 69, 103, 137, 171, 205, 239];
let b = Bytes::from(&arr); let b = Bytes::from(&arr);
let expected = Bytes { 0: bytes::Bytes::from(arr.to_vec()) }; let expected = Bytes(bytes::Bytes::from(arr.to_vec()));
assert_eq!(b, expected); assert_eq!(b, expected);
} }
@ -221,8 +221,8 @@ mod tests {
fn hex_formatting() { fn hex_formatting() {
let b = Bytes::from(vec![1, 35, 69, 103, 137, 171, 205, 239]); let b = Bytes::from(vec![1, 35, 69, 103, 137, 171, 205, 239]);
let expected = String::from("0x0123456789abcdef"); let expected = String::from("0x0123456789abcdef");
assert_eq!(format!("{:x}", b), expected); assert_eq!(format!("{b:x}"), expected);
assert_eq!(format!("{}", b), expected); assert_eq!(format!("{b}"), expected);
} }
#[test] #[test]
@ -240,8 +240,8 @@ mod tests {
#[test] #[test]
fn test_debug_formatting() { fn test_debug_formatting() {
let b = Bytes::from(vec![1, 35, 69, 103, 137, 171, 205, 239]); let b = Bytes::from(vec![1, 35, 69, 103, 137, 171, 205, 239]);
assert_eq!(format!("{:?}", b), "Bytes(0x0123456789abcdef)"); assert_eq!(format!("{b:?}"), "Bytes(0x0123456789abcdef)");
assert_eq!(format!("{:#?}", b), "Bytes(0x0123456789abcdef)"); assert_eq!(format!("{b:#?}"), "Bytes(0x0123456789abcdef)");
} }
#[test] #[test]

View File

@ -682,10 +682,10 @@ mod tests {
Some((key, _)) => key, Some((key, _)) => key,
None => return Ok(()), None => return Ok(()),
}; };
let mut walker = tx_count_cursor.walk(first_tx_count_key)?.peekable(); let walker = tx_count_cursor.walk(first_tx_count_key)?.peekable();
let mut prev_entry: Option<(BlockNumHash, NumTransactions)> = None; let mut prev_entry: Option<(BlockNumHash, NumTransactions)> = None;
while let Some(entry) = walker.next() { for entry in walker {
let (key, count) = entry?; let (key, count) = entry?;
// Validate sequentiality only after prev progress, // Validate sequentiality only after prev progress,

View File

@ -2,11 +2,11 @@ use reth_libmdbx::{Environment, NoWriteMap, WriteFlags};
use tempfile::{tempdir, TempDir}; use tempfile::{tempdir, TempDir};
pub fn get_key(n: u32) -> String { pub fn get_key(n: u32) -> String {
format!("key{}", n) format!("key{n}")
} }
pub fn get_data(n: u32) -> String { pub fn get_data(n: u32) -> String {
format!("data{}", n) format!("data{n}")
} }
pub fn setup_bench_db(num_rows: u32) -> (TempDir, Environment<NoWriteMap>) { pub fn setup_bench_db(num_rows: u32) -> (TempDir, Environment<NoWriteMap>) {

View File

@ -261,8 +261,7 @@ fn test_concurrent_writers() {
threads.push(thread::spawn(move || { threads.push(thread::spawn(move || {
let txn = writer_env.begin_rw_txn().unwrap(); let txn = writer_env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap(); let db = txn.open_db(None).unwrap();
txn.put(&db, format!("{}{}", key, i), format!("{}{}", val, i), WriteFlags::empty()) txn.put(&db, format!("{key}{i}"), format!("{val}{i}"), WriteFlags::empty()).unwrap();
.unwrap();
txn.commit().is_ok() txn.commit().is_ok()
})); }));
} }
@ -273,8 +272,8 @@ fn test_concurrent_writers() {
for i in 0..n { for i in 0..n {
assert_eq!( assert_eq!(
Cow::<Vec<u8>>::Owned(format!("{}{}", val, i).into_bytes()), Cow::<Vec<u8>>::Owned(format!("{val}{i}").into_bytes()),
txn.get(&db, format!("{}{}", key, i).as_bytes()).unwrap().unwrap() txn.get(&db, format!("{key}{i}").as_bytes()).unwrap().unwrap()
); );
} }
} }