feat(execution): Account NotExisting and block reward changesets (#349)

This commit is contained in:
rakita
2022-12-08 06:10:06 +01:00
committed by GitHub
parent 6081cdfd2b
commit d66138e143
11 changed files with 322 additions and 131 deletions

View File

@ -15,7 +15,9 @@ concurrency:
name: ci
jobs:
test:
runs-on: ubuntu-latest
# Pin to `20.04` instead of `ubuntu-latest`, until ubuntu-latest migration is complete
# See also <https://github.com/foundry-rs/foundry/issues/3827>
runs-on: ubuntu-20.04
steps:
- name: Checkout sources
uses: actions/checkout@v3
@ -38,7 +40,9 @@ jobs:
run: cargo install cargo-test-fuzz afl
fuzz:
runs-on: ubuntu-latest
# Pin to `20.04` instead of `ubuntu-latest`, until ubuntu-latest migration is complete
# See also <https://github.com/foundry-rs/foundry/issues/3827>
runs-on: ubuntu-20.04
steps:
- name: Checkout sources
uses: actions/checkout@v3
@ -62,7 +66,9 @@ jobs:
./.github/scripts/fuzz.sh reth-codecs
lint:
runs-on: ubuntu-latest
# Pin to `20.04` instead of `ubuntu-latest`, until ubuntu-latest migration is complete
# See also <https://github.com/foundry-rs/foundry/issues/3827>
runs-on: ubuntu-20.04
steps:
- name: Checkout sources
uses: actions/checkout@v3
@ -87,8 +93,9 @@ jobs:
token: ${{ secrets.GITHUB_TOKEN }}
coverage:
runs-on: ubuntu-latest
# nightly rust might break from time to time
# Pin to `20.04` instead of `ubuntu-latest`, until ubuntu-latest migration is complete
# See also <https://github.com/foundry-rs/foundry/issues/3827>
runs-on: ubuntu-20.04
continue-on-error: true
steps:
- uses: actions/checkout@v3

View File

@ -2,7 +2,7 @@
use crate::{verification, Config};
use reth_interfaces::consensus::{Consensus, Error, ForkchoiceState};
use reth_primitives::{BlockLocked, SealedHeader, H256};
use reth_primitives::{BlockLocked, BlockNumber, SealedHeader, H256};
use tokio::sync::watch;
/// Ethereum consensus
@ -47,4 +47,8 @@ impl Consensus for EthConsensus {
fn pre_validate_block(&self, block: &BlockLocked) -> Result<(), Error> {
verification::validate_block_standalone(block)
}
fn has_block_reward(&self, block_num: BlockNumber) -> bool {
block_num <= self.config.paris_hard_fork_block
}
}

View File

@ -42,6 +42,11 @@ pub struct SpecUpgrades {
}
impl SpecUpgrades {
/// After merge/peric block reward was removed from execution layer.
pub fn has_block_reward(&self, block_num: BlockNumber) -> bool {
block_num <= self.paris
}
/// Ethereum mainnet spec
pub fn new_ethereum() -> Self {
Self {
@ -65,6 +70,51 @@ impl SpecUpgrades {
}
}
/// New homestead enabled spec
pub fn new_test_homestead() -> Self {
Self { homestead: 0, ..Self::new_ethereum() }
}
/// New tangerine enabled spec
pub fn new_test_tangerine_whistle() -> Self {
Self { tangerine_whistle: 0, ..Self::new_test_homestead() }
}
/// New spurious_dragon enabled spec
pub fn new_test_spurious_dragon() -> Self {
Self { spurious_dragon: 0, ..Self::new_test_tangerine_whistle() }
}
/// New byzantium enabled spec
pub fn new_test_byzantium() -> Self {
Self { byzantium: 0, ..Self::new_test_spurious_dragon() }
}
/// New petersburg enabled spec
pub fn new_test_petersburg() -> Self {
Self { petersburg: 0, ..Self::new_test_byzantium() }
}
/// New istanbul enabled spec
pub fn new_test_istanbul() -> Self {
Self { istanbul: 0, ..Self::new_test_petersburg() }
}
/// New berlin enabled spec
pub fn new_test_berlin() -> Self {
Self { berlin: 0, ..Self::new_test_istanbul() }
}
/// New london enabled spec
pub fn new_test_london() -> Self {
Self { london: 0, ..Self::new_test_berlin() }
}
/// New paris enabled spec
pub fn new_test_paris() -> Self {
Self { paris: 0, ..Self::new_test_london() }
}
/// return revm_spec from spec configuration.
pub fn revm_spec(&self, for_block: BlockNumber) -> revm::SpecId {
match for_block {

View File

@ -3,14 +3,17 @@ use crate::{
Config,
};
use hashbrown::hash_map::Entry;
use reth_interfaces::{executor::Error, provider::StateProvider};
use reth_interfaces::{
db::{models::AccountBeforeTx, tables, DbTxMut, Error as DbError},
executor::Error,
provider::StateProvider,
};
use reth_primitives::{
bloom::logs_bloom, Account, Address, Bloom, Header, Log, Receipt, TransactionSignedEcRecovered,
H256, U256,
};
use revm::{
db::AccountState, Account as RevmAccount, AccountInfo, AnalysisKind, Bytecode, ExecutionResult,
EVM,
db::AccountState, Account as RevmAccount, AccountInfo, AnalysisKind, Bytecode, Database, EVM,
};
use std::collections::BTreeMap;
@ -46,6 +49,46 @@ pub enum AccountInfoChangeSet {
NoChange,
}
impl AccountInfoChangeSet {
/// Apply account ChangeSet to db tranasction
pub fn apply_to_db<'a, TX: DbTxMut<'a>>(
self,
address: Address,
tx_index: u64,
tx: &TX,
) -> Result<(), DbError> {
match self {
AccountInfoChangeSet::Changed { old, new } => {
// insert old account in AccountChangeSet
// check for old != new was already done
tx.put::<tables::AccountChangeSet>(
tx_index,
AccountBeforeTx { address, info: Some(old) },
)?;
tx.put::<tables::PlainAccountState>(address, new)?;
}
AccountInfoChangeSet::Created { new } => {
tx.put::<tables::AccountChangeSet>(
tx_index,
AccountBeforeTx { address, info: None },
)?;
tx.put::<tables::PlainAccountState>(address, new)?;
}
AccountInfoChangeSet::Destroyed { old } => {
tx.delete::<tables::PlainAccountState>(address, None)?;
tx.put::<tables::AccountChangeSet>(
tx_index,
AccountBeforeTx { address, info: Some(old) },
)?;
}
AccountInfoChangeSet::NoChange => {
// do nothing storage account didn't change
}
}
Ok(())
}
}
/// Diff change set that is neede for creating history index and updating current world state.
#[derive(Debug, Clone)]
pub struct AccountChangeSet {
@ -60,6 +103,15 @@ pub struct AccountChangeSet {
pub wipe_storage: bool,
}
/// Execution Result containing vector of transaction changesets
/// and block reward if present
pub struct ExecutionResult {
/// Transaction changeest contraining [Receipt], changed [Accounts][Account] and Storages.
pub changeset: Vec<TransactionChangeSet>,
/// Block reward if present. It represent
pub block_reward: Option<BTreeMap<Address, AccountInfoChangeSet>>,
}
/// Commit chgange to database and return change diff that is used to update state and create
/// history index
///
@ -81,7 +133,7 @@ pub fn commit_changes<DB: StateProvider>(
let db_account = match db.accounts.entry(address) {
Entry::Occupied(entry) => entry.into_mut(),
Entry::Vacant(_entry) => {
panic!("Left panic to critically jumpout if happens as this should not hapen.");
panic!("Left panic to critically jumpout if happens, as every account shound be hot loaded.");
}
};
// Insert into `change` a old account and None for new account
@ -178,11 +230,11 @@ pub fn commit_changes<DB: StateProvider>(
}
/// After transaction is executed this structure contain
/// every change to state that this transaction made and its old values
/// so that history account table can be updated. Receipts and new bytecodes
/// from created bytecodes.
/// transaction [Receipt] every change to state ([Account], Storage, [Bytecode])
/// that this transaction made and its old values
/// so that history account table can be updated.
#[derive(Debug, Clone)]
pub struct TransactionStatePatch {
pub struct TransactionChangeSet {
/// Transaction receipt
pub receipt: Receipt,
/// State change that this transaction made on state.
@ -197,13 +249,13 @@ pub fn execute_and_verify_receipt<DB: StateProvider>(
transactions: &[TransactionSignedEcRecovered],
config: &Config,
db: &mut SubState<DB>,
) -> Result<Vec<TransactionStatePatch>, Error> {
let transaction_patches = execute(header, transactions, config, db)?;
) -> Result<ExecutionResult, Error> {
let transaction_change_set = execute(header, transactions, config, db)?;
let receipts_iter = transaction_patches.iter().map(|patch| &patch.receipt);
let receipts_iter = transaction_change_set.changeset.iter().map(|changeset| &changeset.receipt);
verify_receipt(header.receipts_root, header.logs_bloom, receipts_iter)?;
Ok(transaction_patches)
Ok(transaction_change_set)
}
/// Verify receipts
@ -230,13 +282,15 @@ pub fn verify_receipt<'a>(
}
/// Verify block. Execute all transaction and compare results.
/// Return diff is on transaction granularity. We are returning vector of
/// Returns ChangeSet on transaction granularity.
/// NOTE: If block reward is still active (Before Paris/Merge) we would return
/// additional TransactionStatechangeset for account that receives the reward.
pub fn execute<DB: StateProvider>(
header: &Header,
transactions: &[TransactionSignedEcRecovered],
config: &Config,
db: &mut SubState<DB>,
) -> Result<Vec<TransactionStatePatch>, Error> {
) -> Result<ExecutionResult, Error> {
let mut evm = EVM::new();
evm.database(db);
@ -248,7 +302,7 @@ pub fn execute<DB: StateProvider>(
revm_wrap::fill_block_env(&mut evm.env.block, header);
let mut cumulative_gas_used = 0;
// output of verification
let mut transaction_patch = Vec::with_capacity(transactions.len());
let mut changeset = Vec::with_capacity(transactions.len());
for transaction in transactions.iter() {
// The sum of the transactions gas limit, Tg, and the gas utilised in this block prior,
@ -265,7 +319,7 @@ pub fn execute<DB: StateProvider>(
revm_wrap::fill_tx_env(&mut evm.env.tx, transaction);
// Execute transaction.
let (ExecutionResult { exit_reason, gas_used, logs, .. }, state) = evm.transact();
let (revm::ExecutionResult { exit_reason, gas_used, logs, .. }, state) = evm.transact();
// Fatal internal error.
if exit_reason == revm::Return::FatalExternalError {
@ -295,8 +349,8 @@ pub fn execute<DB: StateProvider>(
// commit state
let (state_diff, new_bytecodes) = commit_changes(evm.db().unwrap(), state);
// Push transaction patch and calculte header bloom filter for receipt.
transaction_patch.push(TransactionStatePatch {
// Push transaction changeset and calculte header bloom filter for receipt.
changeset.push(TransactionChangeSet {
receipt: Receipt {
tx_type: transaction.tx_type(),
success: is_success,
@ -314,10 +368,38 @@ pub fn execute<DB: StateProvider>(
return Err(Error::BlockGasUsed { got: cumulative_gas_used, expected: header.gas_used })
}
// TODO add validator block reward. Currently not added.
// https://github.com/paradigmxyz/reth/issues/237
// it is okay to unwrap the db. It is set at the start of the function.
let beneficiary =
evm.db.unwrap().basic(header.beneficiary).map_err(|_| Error::ProviderError)?;
Ok(transaction_patch)
// NOTE: Related to Ethereum reward change, for other network this is probably going to be moved
// to config.
let block_reward = match header.number {
n if n >= config.spec_upgrades.paris => None,
n if n >= config.spec_upgrades.petersburg => Some(0x1bc16d674ec80000u128),
n if n >= config.spec_upgrades.byzantium => Some(0x29a2241af62c0000u128),
_ => Some(0x4563918244f40000u128),
}
.map(|reward| {
// add block reward to beneficiary/miner
if let Some(beneficiary) = beneficiary {
// if account is present append `Changed` changeset for block reward
let old = to_reth_acc(&beneficiary);
let mut new = old;
new.balance += U256::from(reward);
BTreeMap::from([(header.beneficiary, AccountInfoChangeSet::Changed { new, old })])
} else {
// if account is not present append `Created` changeset
BTreeMap::from([(
header.beneficiary,
AccountInfoChangeSet::Created {
new: Account { nonce: 0, balance: reward.into(), bytecode_hash: None },
},
)])
}
});
Ok(ExecutionResult { changeset, block_reward })
}
#[cfg(test)]
@ -325,7 +407,7 @@ mod tests {
use std::collections::HashMap;
use crate::revm_wrap::State;
use crate::{config::SpecUpgrades, revm_wrap::State};
use reth_interfaces::provider::{AccountProvider, StateProvider};
use reth_primitives::{
hex_literal::hex, keccak256, Account, Address, BlockLocked, Bytes, StorageKey, H160, H256,
@ -417,7 +499,7 @@ mod tests {
let mut config = Config::new_ethereum();
// make it berlin fork
config.spec_upgrades.berlin = 0;
config.spec_upgrades = SpecUpgrades::new_test_berlin();
let mut db = SubState::new(State::new(db));
let transactions: Vec<TransactionSignedEcRecovered> =
@ -427,17 +509,17 @@ mod tests {
let out =
execute_and_verify_receipt(&block.header, &transactions, &config, &mut db).unwrap();
assert_eq!(out.len(), 1, "Should executed one transaction");
assert_eq!(out.changeset.len(), 1, "Should executed one transaction");
let patch = out[0].clone();
assert_eq!(patch.new_bytecodes.len(), 0, "Should have zero new bytecodes");
let changeset = out.changeset[0].clone();
assert_eq!(changeset.new_bytecodes.len(), 0, "Should have zero new bytecodes");
let account1 = H160(hex!("1000000000000000000000000000000000000000"));
let _account1_info = Account { balance: 0x00.into(), nonce: 0x00, bytecode_hash: None };
let account2 = H160(hex!("2adc25665018aa1fe0e6bc666dac8fc2697ff9ba"));
let account2_info = Account {
// TODO remove 2eth block reward
balance: (0x1bc16d674ece94bau128 - 0x1bc16d674ec80000u128).into(),
balance: (0x1bc16d674ece94bau128 - 0x1bc16d674ec80000u128).into(), /* decrease for
* block reward */
nonce: 0x00,
bytecode_hash: None,
};
@ -446,25 +528,37 @@ mod tests {
Account { balance: 0x3635c9adc5de996b46u128.into(), nonce: 0x01, bytecode_hash: None };
assert_eq!(
patch.state_diff.get(&account1).unwrap().account,
changeset.state_diff.get(&account1).unwrap().account,
AccountInfoChangeSet::NoChange,
"No change to account"
);
assert_eq!(
patch.state_diff.get(&account2).unwrap().account,
changeset.state_diff.get(&account2).unwrap().account,
AccountInfoChangeSet::Created { new: account2_info },
"New acccount"
);
assert_eq!(
patch.state_diff.get(&account3).unwrap().account,
changeset.state_diff.get(&account3).unwrap().account,
AccountInfoChangeSet::Changed { old: account3_old_info, new: account3_info },
"Change to account state"
);
assert_eq!(patch.new_bytecodes.len(), 0, "No new bytecodes");
// check block rewards changeset
let mut block_rewarded_acc_info = account2_info;
// add Blocks 2 eth reward
block_rewarded_acc_info.balance += 0x1bc16d674ec80000u128.into();
assert_eq!(
out.block_reward,
Some(BTreeMap::from([(
account2,
AccountInfoChangeSet::Changed { new: block_rewarded_acc_info, old: account2_info }
)]))
);
assert_eq!(changeset.new_bytecodes.len(), 0, "No new bytecodes");
// check torage
let storage = &patch.state_diff.get(&account1).unwrap().storage;
let storage = &changeset.state_diff.get(&account1).unwrap().storage;
assert_eq!(storage.len(), 1, "Only one storage change");
assert_eq!(
storage.get(&1.into()),

View File

@ -25,6 +25,13 @@ pub trait Consensus: Send + Sync {
///
/// **This should not be called for the genesis block**.
fn pre_validate_block(&self, block: &BlockLocked) -> Result<(), Error>;
/// After the Merge (aka Paris) block rewards became obsolete.
/// This flag is needed as reth change set is indexed of transaction granularity
/// (change set is indexed per transaction) we are introducing one additional index for block
/// reward This in essence would introduce gaps in [Transaction] table
/// More on it [here](https://github.com/foundry-rs/reth/issues/237)
fn has_block_reward(&self, block_num: BlockNumber) -> bool;
}
/// Consensus Errors

View File

@ -21,7 +21,7 @@ pub struct AccountBeforeTx {
/// Address for the account. Acts as `DupSort::SubKey`.
pub address: Address,
/// Account state before the transaction.
pub info: Account,
pub info: Option<Account>,
}
/// [`TxNumber`] concatenated with [`Address`]. Used as a key for [`StorageChangeSet`]

View File

@ -38,4 +38,6 @@ pub enum Error {
},
#[error("Block gas used {got} is different from expected gas used {expected}.")]
BlockGasUsed { got: u64, expected: u64 },
#[error("Provider error")]
ProviderError,
}

View File

@ -132,6 +132,7 @@ pub fn get_cumulative_tx_count_by_hash<'a, TX: DbTxMut<'a> + DbTx<'a>>(
pub fn insert_canonical_block<'a, TX: DbTxMut<'a> + DbTx<'a>>(
tx: &TX,
block: &BlockLocked,
has_block_reward: bool,
) -> Result<()> {
let block_num_hash = BlockNumHash((block.number, block.hash()));
tx.put::<tables::CanonicalHeaders>(block.number, block.hash())?;
@ -139,24 +140,28 @@ pub fn insert_canonical_block<'a, TX: DbTxMut<'a> + DbTx<'a>>(
tx.put::<tables::Headers>(block_num_hash, block.header.as_ref().clone())?;
tx.put::<tables::HeaderNumbers>(block.hash(), block.number)?;
let start_tx_number =
if block.number == 0 { 0 } else { get_cumulative_tx_count_by_hash(tx, block.parent_hash)? };
// insert body ommers data
tx.put::<tables::BlockOmmers>(
block_num_hash,
StoredBlockOmmers { ommers: block.ommers.iter().map(|h| h.as_ref().clone()).collect() },
)?;
let mut tx_number = start_tx_number;
for eth_tx in block.body.iter() {
let rec_tx = eth_tx.clone().into_ecrecovered().unwrap();
tx.put::<tables::TxSenders>(tx_number, rec_tx.signer())?;
tx.put::<tables::Transactions>(tx_number, rec_tx.as_ref().clone())?;
tx_number += 1;
}
if block.number == 0 {
tx.put::<tables::CumulativeTxCount>(block_num_hash, 0)?;
} else {
let mut tx_number = get_cumulative_tx_count_by_hash(tx, block.parent_hash)?;
tx.put::<tables::CumulativeTxCount>(block_num_hash, tx_number)?;
for eth_tx in block.body.iter() {
let rec_tx = eth_tx.clone().into_ecrecovered().unwrap();
tx.put::<tables::TxSenders>(tx_number, rec_tx.signer())?;
tx.put::<tables::Transactions>(tx_number, rec_tx.as_ref().clone())?;
tx_number += 1;
}
tx.put::<tables::CumulativeTxCount>(
block_num_hash,
tx_number + if has_block_reward { 1 } else { 0 },
)?;
}
Ok(())
}

View File

@ -13,7 +13,9 @@ use crate::{
};
use futures::{Future, FutureExt, Stream};
use reth_eth_wire::BlockHeaders;
use reth_primitives::{BlockLocked, Header, HeadersDirection, SealedHeader, H256, U256};
use reth_primitives::{
BlockLocked, BlockNumber, Header, HeadersDirection, SealedHeader, H256, U256,
};
use reth_rpc_types::engine::ForkchoiceState;
use std::{
pin::Pin,
@ -291,4 +293,7 @@ impl Consensus for TestConsensus {
Ok(())
}
}
fn has_block_reward(&self, _block_num: BlockNumber) -> bool {
true
}
}

View File

@ -137,7 +137,13 @@ impl<DB: Database, D: BodyDownloader, C: Consensus> Stage<DB> for BodyStage<D, C
// Write block
let key = (block_number, header_hash).into();
tx_count_cursor.append(key, first_tx_id + block.body.len() as u64)?;
// Additional +1, increments tx count to allow indexing of ChangeSet that contains block
// reward. This can't be added to last transaction ChangeSet as it would
// break if block is empty.
let this_tx_count = first_tx_id +
block.body.len() as u64 +
if self.consensus.has_block_reward(block_number) { 1 } else { 0 };
tx_count_cursor.append(key, this_tx_count)?;
ommers_cursor.append(
key,
StoredBlockOmmers {
@ -152,6 +158,7 @@ impl<DB: Database, D: BodyDownloader, C: Consensus> Stage<DB> for BodyStage<D, C
}
highest_block = block_number;
first_tx_id = this_tx_count;
}
// The stage is "done" if:
@ -189,6 +196,10 @@ impl<DB: Database, D: BodyDownloader, C: Consensus> Stage<DB> for BodyStage<D, C
let prev_count = entry.map(|(_, v)| v).unwrap_or_default();
for tx_id in prev_count..count {
// Block reward introduces gaps in transaction (Last tx number can be the gap)
// this is why we are checking if tx exist or not.
// NOTE: more performant way is probably to use `prev`/`next` fn. and reduce
// count by one if block has block reward.
if transaction_cursor.seek_exact(tx_id)?.is_some() {
transaction_cursor.delete_current()?;
}
@ -568,7 +579,8 @@ mod tests {
.last()?
.map(|(_, v)| v)
.unwrap_or_default();
let tx_count = last_count + progress.body.len() as u64;
// +1 for block reward,
let tx_count = last_count + progress.body.len() as u64 + 1;
tx.put::<tables::CumulativeTxCount>(key, tx_count)?;
tx.put::<tables::BlockOmmers>(key, StoredBlockOmmers { ommers: vec![] })?;
(last_count..tx_count).try_for_each(|idx| {
@ -620,9 +632,8 @@ mod tests {
self.db.insert_headers(std::iter::once(&header))?;
self.db.commit(|tx| {
let key = (0, GENESIS_HASH).into();
tx.put::<tables::CumulativeTxCount>(key, 1)?;
tx.put::<tables::BlockOmmers>(key, StoredBlockOmmers { ommers: vec![] })?;
tx.put::<tables::Transactions>(0, random_signed_tx())
tx.put::<tables::CumulativeTxCount>(key, 0)?;
tx.put::<tables::BlockOmmers>(key, StoredBlockOmmers { ommers: vec![] })
})?;
Ok(())
@ -678,7 +689,9 @@ mod tests {
// Validate that block trasactions exist
let first_tx_id = prev_entry.map(|(_, v)| v).unwrap_or_default();
for tx_id in first_tx_id..count {
// reduce by one for block_reward index
let tx_count = if count == 0 { 0} else {count-1};
for tx_id in first_tx_id..tx_count {
assert_matches!(
transaction_cursor.seek_exact(tx_id),
Ok(Some(_)),

View File

@ -4,13 +4,13 @@ use crate::{
};
use reth_executor::{
config::SpecUpgrades,
executor::{AccountChangeSet, AccountInfoChangeSet},
executor::AccountChangeSet,
revm_wrap::{State, SubState},
Config,
};
use reth_interfaces::{
db::{
models::{AccountBeforeTx, BlockNumHash, TxNumberAddress},
models::{BlockNumHash, TxNumberAddress},
tables, Database, DbCursorRO, DbCursorRW, DbDupCursorRO, DbTx, DbTxMut,
},
provider::db::StateProviderImplRefLatest,
@ -48,7 +48,15 @@ const EXECUTION: StageId = StageId("Execution");
/// [tables::PlainAccountState] [tables::StorageHistory] to remove change set and apply old values
/// to [tables::PlainStorageState]
#[derive(Debug)]
pub struct ExecutionStage;
pub struct ExecutionStage {
config: Config,
}
impl Default for ExecutionStage {
fn default() -> Self {
Self { config: Config { chain_id: 1.into(), spec_upgrades: SpecUpgrades::new_ethereum() } }
}
}
/// Specify batch sizes of block in execution
/// TODO make this as config
@ -115,7 +123,7 @@ impl<DB: Database> Stage<DB> for ExecutionStage {
.collect::<Result<Vec<_>, _>>()?;
// get last tx count so that we can know amount of transaction in the block.
let mut last_tx_count = if last_block == 0 {
let mut last_tx_index = if last_block == 0 {
0u64
} else {
// headers_batch is not empty,
@ -130,7 +138,7 @@ impl<DB: Database> Stage<DB> for ExecutionStage {
tx_cnt
};
let cumulative_tx_count_batch = canonical_batch
let tx_index_ranges = canonical_batch
.iter()
.map(|ch_index| {
// TODO see if walker next has better performance then seek_exact calls.
@ -147,8 +155,16 @@ impl<DB: Database> Stage<DB> for ExecutionStage {
})
})
.map(|(_, cumulative_tx_count)| {
let ret = (last_tx_count, cumulative_tx_count);
last_tx_count = cumulative_tx_count;
let ret = if self.config.spec_upgrades.has_block_reward(ch_index.number()) {
// if there is block reward, cumulative tx count needs to remove block
// reward index. It is okay ty subtract it, as
// block reward index is calculated in the block stage.
(last_tx_index, cumulative_tx_count - 1, Some(cumulative_tx_count - 1))
} else {
// if there is no block reward we just need to use tx_count
(last_tx_index, cumulative_tx_count, None)
};
last_tx_index = cumulative_tx_count;
ret
})
})
@ -156,15 +172,15 @@ impl<DB: Database> Stage<DB> for ExecutionStage {
// Fetch transactions, execute them and generate results
let mut block_change_patches = Vec::with_capacity(canonical_batch.len());
for (header, body_range) in headers_batch.iter().zip(cumulative_tx_count_batch.iter()) {
let start_tx_index = body_range.0;
let end_tx_index = body_range.1;
for (header, (start_tx_index, end_tx_index, block_reward_index)) in
headers_batch.iter().zip(tx_index_ranges.iter())
{
let body_tx_cnt = end_tx_index - start_tx_index;
// iterate over all transactions
let mut tx_walker = tx.walk(start_tx_index)?;
let mut tx_walker = tx.walk(*start_tx_index)?;
let mut transactions = Vec::with_capacity(body_tx_cnt as usize);
// get next N transactions.
for index in start_tx_index..end_tx_index {
for index in *start_tx_index..*end_tx_index {
let (tx_index, tx) =
tx_walker.next().ok_or(DatabaseIntegrityError::EndOfTransactionTable)??;
if tx_index != index {
@ -174,9 +190,9 @@ impl<DB: Database> Stage<DB> for ExecutionStage {
}
// take signers
let mut tx_sender_walker = tx_sender.walk(start_tx_index)?;
let mut tx_sender_walker = tx_sender.walk(*start_tx_index)?;
let mut signers = Vec::with_capacity(body_tx_cnt as usize);
for index in start_tx_index..end_tx_index {
for index in *start_tx_index..*end_tx_index {
let (tx_index, tx) = tx_sender_walker
.next()
.ok_or(DatabaseIntegrityError::EndOfTransactionSenderTable)??;
@ -197,7 +213,6 @@ impl<DB: Database> Stage<DB> for ExecutionStage {
.collect();
// for now use default eth config
let config = Config { chain_id: 1.into(), spec_upgrades: SpecUpgrades::new_ethereum() };
let mut state_provider =
SubState::new(State::new(StateProviderImplRefLatest::new(db_tx)));
@ -207,48 +222,26 @@ impl<DB: Database> Stage<DB> for ExecutionStage {
reth_executor::executor::execute_and_verify_receipt(
header,
&recovered_transactions,
&config,
&self.config,
&mut state_provider,
)
.map_err(|error| StageError::ExecutionError { block: header.number, error })?,
start_tx_index,
block_reward_index,
));
}
// apply changes to plain database.
for (results, start_tx_index) in block_change_patches.into_iter() {
for (index, result) in results.into_iter().enumerate() {
for (results, start_tx_index, block_reward_index) in block_change_patches.into_iter() {
// insert state change set
for (index, result) in results.changeset.into_iter().enumerate() {
let tx_index = start_tx_index + index as u64;
// insert account change set
for (address, AccountChangeSet { account, wipe_storage, storage }) in
result.state_diff.into_iter()
{
match account {
AccountInfoChangeSet::Changed { old, new } => {
// insert old account in AccountChangeSet
// check for old != new was already done
db_tx.put::<tables::AccountChangeSet>(
tx_index,
AccountBeforeTx { address, info: old },
)?;
db_tx.put::<tables::PlainAccountState>(address, new)?;
}
AccountInfoChangeSet::Created { new } => {
// TODO put None accounts inside changeset when `AccountBeforeTx` get
// fixed
db_tx.put::<tables::PlainAccountState>(address, new)?;
}
AccountInfoChangeSet::Destroyed { old } => {
db_tx.delete::<tables::PlainAccountState>(address, None)?;
db_tx.put::<tables::AccountChangeSet>(
tx_index,
AccountBeforeTx { address, info: old },
)?;
}
AccountInfoChangeSet::NoChange => {
// do nothing storage account didn't change
}
}
// apply account change to db. Updates AccountChangeSet and PlainAccountState
// tables.
account.apply_to_db(address, tx_index, db_tx)?;
// wipe storage
if wipe_storage {
@ -293,6 +286,15 @@ impl<DB: Database> Stage<DB> for ExecutionStage {
// table
}
}
// If there is block reward we will add account changeset to db
if let Some(block_reward_changeset) = results.block_reward {
// we are sure that block reward index is present.
let block_reward_index = block_reward_index.unwrap();
for (address, changeset) in block_reward_changeset.into_iter() {
changeset.apply_to_db(address, block_reward_index, db_tx)?;
}
}
}
let last_block = last_block + canonical_batch.len() as u64;
@ -362,22 +364,24 @@ impl<DB: Database> Stage<DB> for ExecutionStage {
// Check if walk and walk_dup would do the same thing
let account_changeset_batch = account_changeset
.walk_dup(to_tx_number, Address::zero())?
.take(num_of_tx)
.take_while(|item| item.as_ref().map_or(false, |(num, _)| *num <= from_tx_number))
.collect::<Result<Vec<_>, _>>()?;
// revert all changes to PlainState
for (_, changeset) in account_changeset_batch.into_iter().rev() {
db_tx.put::<tables::PlainAccountState>(changeset.address, changeset.info)?;
// TODO remove account if none when `AccountBeforeTx` get fixed
// if account.is_none() {
// delete account from plain state. Storage will be cleaned it is own way.
//}
if let Some(account_info) = changeset.info {
db_tx.put::<tables::PlainAccountState>(changeset.address, account_info)?;
} else {
db_tx.delete::<tables::PlainAccountState>(changeset.address, None)?;
}
}
// get all batches for account change
// get all batches for storage change
let storage_chageset_batch = storage_changeset
.walk_dup(TxNumberAddress((to_tx_number, Address::zero())), H256::zero())?
.take(num_of_tx)
.take_while(|item| {
item.as_ref().map_or(false, |(TxNumberAddress((num, _)), _)| *num <= from_tx_number)
})
.collect::<Result<Vec<_>, _>>()?;
// revert all changes to PlainStorage
@ -440,8 +444,8 @@ mod tests {
let genesis = BlockLocked::decode(&mut genesis_rlp).unwrap();
let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice();
let block = BlockLocked::decode(&mut block_rlp).unwrap();
insert_canonical_block(db.deref_mut(), &genesis).unwrap();
insert_canonical_block(db.deref_mut(), &block).unwrap();
insert_canonical_block(db.deref_mut(), &genesis, true).unwrap();
insert_canonical_block(db.deref_mut(), &block, true).unwrap();
db.commit().unwrap();
// insert pre state
@ -465,7 +469,9 @@ mod tests {
db.commit().unwrap();
// execute
let output = ExecutionStage.execute(&mut db, input).await.unwrap();
let mut execution_stage = ExecutionStage::default();
execution_stage.config.spec_upgrades = SpecUpgrades::new_test_berlin();
let output = execution_stage.execute(&mut db, input).await.unwrap();
db.commit().unwrap();
assert_eq!(output, ExecOutput { stage_progress: 1, done: true, reached_tip: true });
let tx = db.deref_mut();
@ -474,12 +480,8 @@ mod tests {
let account1_info =
Account { balance: 0x00.into(), nonce: 0x00, bytecode_hash: Some(code_hash) };
let account2 = H160(hex!("2adc25665018aa1fe0e6bc666dac8fc2697ff9ba"));
let account2_info = Account {
// TODO remove 2eth block reward
balance: (0x1bc16d674ece94bau128 - 0x1bc16d674ec80000u128).into(),
nonce: 0x00,
bytecode_hash: None,
};
let account2_info =
Account { balance: (0x1bc16d674ece94bau128).into(), nonce: 0x00, bytecode_hash: None };
let account3 = H160(hex!("a94f5374fce5edbc8e2a8697c15331677e6ebf0b"));
let account3_info =
Account { balance: 0x3635c9adc5de996b46u128.into(), nonce: 0x01, bytecode_hash: None };
@ -525,8 +527,8 @@ mod tests {
let genesis = BlockLocked::decode(&mut genesis_rlp).unwrap();
let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice();
let block = BlockLocked::decode(&mut block_rlp).unwrap();
insert_canonical_block(db.deref_mut(), &genesis).unwrap();
insert_canonical_block(db.deref_mut(), &block).unwrap();
insert_canonical_block(db.deref_mut(), &genesis, true).unwrap();
insert_canonical_block(db.deref_mut(), &block, true).unwrap();
db.commit().unwrap();
// variables
@ -546,10 +548,13 @@ mod tests {
db.commit().unwrap();
// execute
let _ = ExecutionStage.execute(&mut db, input).await.unwrap();
let mut execution_stage = ExecutionStage::default();
execution_stage.config.spec_upgrades = SpecUpgrades::new_test_berlin();
let _ = execution_stage.execute(&mut db, input).await.unwrap();
db.commit().unwrap();
let o = ExecutionStage
let o = ExecutionStage::default()
.unwind(&mut db, UnwindInput { stage_progress: 1, unwind_to: 0, bad_block: None })
.await
.unwrap();
@ -569,12 +574,11 @@ mod tests {
"Post changed of a account"
);
// TODO check this after Option is added to tables::
// let acc3 = H160(hex!("a94f5374fce5edbc8e2a8697c15331677e6ebf0b"));
// assert_eq!(
// tx.get::<tables::PlainAccountState>(acc3),
// Ok(Some(Account { balance: 0.into(), nonce: 0, bytecode_hash: Some(KECCAK_EMPTY) })),
// "Post changed of a account"
// );
let miner_acc = H160(hex!("2adc25665018aa1fe0e6bc666dac8fc2697ff9ba"));
assert_eq!(
tx.get::<tables::PlainAccountState>(miner_acc),
Ok(None),
"Third account should be unwinded"
);
}
}