diff --git a/bins/revme/src/cmd.rs b/bins/revme/src/cmd.rs index 2a7e13ba36..cd26d9bca0 100644 --- a/bins/revme/src/cmd.rs +++ b/bins/revme/src/cmd.rs @@ -12,7 +12,7 @@ use clap::Parser; pub enum MainCmd { /// Execute Ethereum state tests. Statetest(statetest::Cmd), - /// Execute eof validation tests. + /// Execute EOF validation tests. EofValidation(eofvalidation::Cmd), /// Run arbitrary EVM bytecode. Evm(evmrunner::Cmd), diff --git a/bins/revme/src/cmd/bench.rs b/bins/revme/src/cmd/bench.rs index d0ac5bc393..aab98b9c0e 100644 --- a/bins/revme/src/cmd/bench.rs +++ b/bins/revme/src/cmd/bench.rs @@ -13,7 +13,7 @@ pub enum BenchName { Transfer, } -/// `bytecode` subcommand. +/// `bytecode` subcommand #[derive(Parser, Debug)] pub struct Cmd { #[arg(value_enum)] @@ -21,7 +21,7 @@ pub struct Cmd { } impl Cmd { - /// Run bench command. + /// Runs bench command. pub fn run(&self) { match self.name { BenchName::Analysis => analysis::run(), diff --git a/bins/revme/src/cmd/bench/analysis.rs b/bins/revme/src/cmd/bench/analysis.rs index ed5b541c37..f90f53ae5e 100644 --- a/bins/revme/src/cmd/bench/analysis.rs +++ b/bins/revme/src/cmd/bench/analysis.rs @@ -16,7 +16,7 @@ pub fn run() { let context = Context::builder() .with_db(BenchmarkDB::new_bytecode(bytecode)) .modify_tx_chained(|tx| { - // execution globals block hash/gas_limit/coinbase/timestamp.. + // Execution globals block hash/gas_limit/coinbase/timestamp.. tx.caller = address!("1000000000000000000000000000000000000000"); tx.transact_to = TxKind::Call(address!("0000000000000000000000000000000000000000")); //evm.env.tx.data = Bytes::from(hex::decode("30627b7c").unwrap()); diff --git a/bins/revme/src/cmd/bench/burntpix.rs b/bins/revme/src/cmd/bench/burntpix.rs index 88f9201a13..fa0483a2d3 100644 --- a/bins/revme/src/cmd/bench/burntpix.rs +++ b/bins/revme/src/cmd/bench/burntpix.rs @@ -60,11 +60,11 @@ pub fn run() { _ => unreachable!("Execution failed: {:?}", tx_result), }; - // remove returndata offset and length from output + // Remove returndata offset and length from output let returndata_offset = 64; let data = &return_data[returndata_offset..]; - // remove trailing zeros + // Remove trailing zeros let trimmed_data = data .split_at(data.len() - data.iter().rev().filter(|&x| *x == 0).count()) .0; diff --git a/bins/revme/src/cmd/bench/snailtracer.rs b/bins/revme/src/cmd/bench/snailtracer.rs index 89a0c621c3..de55b49b36 100644 --- a/bins/revme/src/cmd/bench/snailtracer.rs +++ b/bins/revme/src/cmd/bench/snailtracer.rs @@ -10,7 +10,7 @@ pub fn simple_example(bytecode: Bytecode) { let context = Context::builder() .with_db(BenchmarkDB::new_bytecode(bytecode.clone())) .modify_tx_chained(|tx| { - // execution globals block hash/gas_limit/coinbase/timestamp.. + // Execution globals block hash/gas_limit/coinbase/timestamp.. tx.caller = address!("1000000000000000000000000000000000000000"); tx.transact_to = TxKind::Call(address!("0000000000000000000000000000000000000000")); tx.data = bytes!("30627b7c"); diff --git a/bins/revme/src/cmd/bench/transfer.rs b/bins/revme/src/cmd/bench/transfer.rs index e5a50b9167..f36889b0bc 100644 --- a/bins/revme/src/cmd/bench/transfer.rs +++ b/bins/revme/src/cmd/bench/transfer.rs @@ -11,7 +11,7 @@ pub fn run() { let context = Context::builder() .with_db(BenchmarkDB::new_bytecode(Bytecode::new())) .modify_tx_chained(|tx| { - // execution globals block hash/gas_limit/coinbase/timestamp.. + // Execution globals block hash/gas_limit/coinbase/timestamp.. tx.caller = "0x0000000000000000000000000000000000000001" .parse() .unwrap(); diff --git a/bins/revme/src/cmd/bytecode.rs b/bins/revme/src/cmd/bytecode.rs index 62dfa91d56..352e3519de 100644 --- a/bins/revme/src/cmd/bytecode.rs +++ b/bins/revme/src/cmd/bytecode.rs @@ -15,9 +15,11 @@ pub struct Cmd { /// Is EOF code in RUNTIME mode. #[arg(long)] eof_runtime: bool, - /// Bytecode in hex format. If bytes start with 0xFE it will be interpreted as a EOF. - /// Otherwise, it will be interpreted as a EOF bytecode. - /// If not provided, it will operate in interactive EOF validation mode. + /// Bytecode in hex format string. + /// + /// - If bytes start with 0xFE it will be interpreted as a EOF. + /// - Otherwise, it will be interpreted as a EOF bytecode. + /// - If not provided, it will operate in interactive EOF validation mode. #[arg()] bytes: Option, } @@ -34,7 +36,7 @@ fn trim_decode(input: &str) -> Option { } impl Cmd { - /// Run statetest command. + /// Runs statetest command. pub fn run(&self) { let container_kind = if self.eof_initcode { Some(CodeType::ReturnContract) @@ -64,12 +66,12 @@ impl Cmd { return; } - // else run command in loop. + // Else run command in loop. loop { let mut input = String::new(); io::stdin().read_line(&mut input).expect("Input Error"); if input.len() == 1 { - // just a newline, so exit + // Just a newline, so exit return; } let Some(bytes) = trim_decode(&input) else { diff --git a/bins/revme/src/cmd/eofvalidation.rs b/bins/revme/src/cmd/eofvalidation.rs index 40d0696225..1e15439b94 100644 --- a/bins/revme/src/cmd/eofvalidation.rs +++ b/bins/revme/src/cmd/eofvalidation.rs @@ -8,18 +8,18 @@ use revm::bytecode::eof::{validate_raw_eof_inner, CodeType, EofError}; use std::collections::BTreeMap; use std::path::{Path, PathBuf}; -/// `eof-validation` subcommand. +/// `eof-validation` subcommand #[derive(Parser, Debug)] pub struct Cmd { - /// Input paths to EOF validation tests. + /// Input paths to EOF validation tests #[arg(required = true, num_args = 1..)] paths: Vec, } impl Cmd { - /// Run statetest command. + /// Runs statetest command. pub fn run(&self) -> Result<(), Error> { - // check if path exists. + // Check if path exists. for path in &self.paths { if !path.exists() { return Err(Error::Custom("The specified path does not exist")); @@ -31,7 +31,7 @@ impl Cmd { } fn skip_test(name: &str) -> bool { - // embedded containers rules changed + // Embedded containers rules changed if name.starts_with("EOF1_embedded_container") { return true; } @@ -39,7 +39,7 @@ fn skip_test(name: &str) -> bool { name, "EOF1_undefined_opcodes_186" | "" - // truncated data is only allowed in embedded containers + // Truncated data is only allowed in embedded containers | "validInvalid_48" | "validInvalid_1" | "EOF1_truncated_section_3" diff --git a/bins/revme/src/cmd/evmrunner.rs b/bins/revme/src/cmd/evmrunner.rs index ce1c67a903..cb7d776e8a 100644 --- a/bins/revme/src/cmd/evmrunner.rs +++ b/bins/revme/src/cmd/evmrunner.rs @@ -28,38 +28,40 @@ pub enum Errors { BytecodeDecodeError(#[from] BytecodeDecodeError), } -/// Evm runner command allows running arbitrary evm bytecode. -/// Bytecode can be provided from cli or from file with --path option. +/// Evm runner command allows running arbitrary evm bytecode +/// +/// Bytecode can be provided from cli or from file with `--path` option. #[derive(Parser, Debug)] pub struct Cmd { - /// Hex-encoded EVM bytecode to be executed. + /// Hex-encoded EVM bytecode to be executed #[arg(required_unless_present = "path")] bytecode: Option, - /// Path to a file containing the hex-encoded EVM bytecode to be executed. + /// Path to a file containing the hex-encoded EVM bytecode to be executed + /// /// Overrides the positional `bytecode` argument. #[arg(long)] path: Option, - /// Run in benchmarking mode. + /// Whether to run in benchmarking mode #[arg(long)] bench: bool, - /// Hex-encoded input/calldata bytes. + /// Hex-encoded input/calldata bytes #[arg(long, default_value = "")] input: String, - /// Print the state. + /// Whether to print the state #[arg(long)] state: bool, - /// Print the trace. + /// Whether to print the trace #[arg(long)] trace: bool, } impl Cmd { - /// Run evm runner command. + /// Runs evm runner command. pub fn run(&self) -> Result<(), Errors> { const CALLER: Address = address!("0000000000000000000000000000000000000001"); let bytecode_str: Cow<'_, str> = if let Some(path) = &self.path { - // check if path exists. + // Check if path exists. if !path.exists() { return Err(Errors::PathNotExists); } @@ -80,7 +82,7 @@ impl Cmd { let nonce = db.basic(CALLER).unwrap().map_or(0, |account| account.nonce); // BenchmarkDB is dummy state that implements Database trait. - // the bytecode is deployed at zero address. + // The bytecode is deployed at zero address. let mut evm = MainEvm::new( Context::builder().with_db(db).modify_tx_chained(|tx| { tx.caller = CALLER; diff --git a/bins/revme/src/cmd/statetest.rs b/bins/revme/src/cmd/statetest.rs index a14419ebd4..06adc2ed90 100644 --- a/bins/revme/src/cmd/statetest.rs +++ b/bins/revme/src/cmd/statetest.rs @@ -8,33 +8,38 @@ use clap::Parser; use runner::{find_all_json_tests, run, TestError}; use std::path::PathBuf; -/// `statetest` subcommand. +/// `statetest` subcommand #[derive(Parser, Debug)] pub struct Cmd { - /// Path to folder or file containing the tests. If multiple paths are specified - /// they will be run in sequence. + /// Path to folder or file containing the tests + /// + /// If multiple paths are specified they will be run in sequence. /// /// Folders will be searched recursively for files with the extension `.json`. #[clap(required = true, num_args = 1..)] paths: Vec, - /// Run tests in a single thread. + /// Run tests in a single thread #[clap(short = 's', long)] single_thread: bool, - /// Output results in JSON format. + /// Output results in JSON format + /// /// It will stop second run of evm on failure. #[clap(long)] json: bool, - /// Output outcome in JSON format. If `--json` is true, this is implied. + /// Output outcome in JSON format + /// + /// If `--json` is true, this is implied. + /// /// It will stop second run of EVM on failure. #[clap(short = 'o', long)] json_outcome: bool, - /// Keep going after a test failure. + /// Keep going after a test failure #[clap(long, alias = "no-fail-fast")] keep_going: bool, } impl Cmd { - /// Run statetest command. + /// Runs `statetest` command. pub fn run(&self) -> Result<(), TestError> { for path in &self.paths { println!("\nRunning tests in {}...", path.display()); diff --git a/bins/revme/src/cmd/statetest/runner.rs b/bins/revme/src/cmd/statetest/runner.rs index 65a36b4b43..bfdc5c0ae3 100644 --- a/bins/revme/src/cmd/statetest/runner.rs +++ b/bins/revme/src/cmd/statetest/runner.rs @@ -86,15 +86,15 @@ fn skip_test(path: &Path) -> bool { matches!( name, - // funky test with `bigint 0x00` value in json :) not possible to happen on mainnet and require + // Funky test with `bigint 0x00` value in json :) not possible to happen on mainnet and require // custom json parser. https://github.com/ethereum/tests/issues/971 |"ValueOverflow.json"| "ValueOverflowParis.json" - // precompiles having storage is not possible + // Precompiles having storage is not possible | "RevertPrecompiledTouch_storage.json" | "RevertPrecompiledTouch.json" - // txbyte is of type 02 and we don't parse tx bytes for this test to fail. + // `txbyte` is of type 02 and we don't parse tx bytes for this test to fail. | "typeTwoBerlin.json" // Need to handle Test errors @@ -132,7 +132,7 @@ fn skip_test(path: &Path) -> bool { | "loopMul.json" | "CALLBlake2f_MaxRounds.json" - // evmone statetest + // `evmone` statetest | "initcode_transaction_before_prague.json" | "invalid_tx_non_existing_sender.json" | "tx_non_existing_sender.json" @@ -196,9 +196,9 @@ fn check_evm_execution( // Test where this happens: `tests/GeneralStateTests/stTransactionTest/NoSrcAccountCreate.json` // and you can check that we have only two "hash" values for before and after state clear. match (&test.expect_exception, exec_result) { - // do nothing + // Do nothing (None, Ok(result)) => { - // check output + // Check output if let Some((expected_output, output)) = expected_output.zip(result.output()) { if expected_output != output { let kind = TestErrorKind::UnexpectedOutput { @@ -210,7 +210,7 @@ fn check_evm_execution( } } } - // return okay, exception is expected. + // Return okay, exception is expected. (Some(_), Err(_)) => return Ok(()), _ => { let kind = TestErrorKind::UnexpectedException { @@ -282,17 +282,17 @@ pub fn execute_test_suite( let mut cfg = CfgEnv::default(); let mut block = BlockEnv::default(); let mut tx = TxEnv::default(); - // for mainnet + // For mainnet cfg.chain_id = 1; - // block env + // Block env block.number = unit.env.current_number; block.beneficiary = unit.env.current_coinbase; block.timestamp = unit.env.current_timestamp; block.gas_limit = unit.env.current_gas_limit; block.basefee = unit.env.current_base_fee.unwrap_or_default(); block.difficulty = unit.env.current_difficulty; - // after the Merge prevrandao replaces mix_hash field in block and replaced difficulty opcode in EVM. + // After the Merge prevrandao replaces mix_hash field in block and replaced difficulty opcode in EVM. block.prevrandao = unit.env.current_random; // EIP-4844 if let Some(current_excess_blob_gas) = unit.env.current_excess_blob_gas { @@ -307,7 +307,7 @@ pub fn execute_test_suite( )); } - // tx env + // Tx env tx.caller = if let Some(address) = unit.transaction.sender { address } else { @@ -327,7 +327,7 @@ pub fn execute_test_suite( tx.blob_hashes = unit.transaction.blob_versioned_hashes.clone(); tx.max_fee_per_blob_gas = unit.transaction.max_fee_per_blob_gas; - // post and execution + // Post and execution for (spec_name, tests) in unit.post { // Constantinople was immediately extended by Petersburg. // There isn't any production Constantinople transaction @@ -344,12 +344,12 @@ pub fn execute_test_suite( }; if cfg.spec.is_enabled_in(SpecId::MERGE) && block.prevrandao.is_none() { - // if spec is merge and prevrandao is not set, set it to default + // If spec is merge and prevrandao is not set, set it to default block.prevrandao = Some(B256::default()); } for (index, test) in tests.into_iter().enumerate() { - // TODO TX TYPE needs to be set + // TODO : TX TYPE needs to be set let Some(tx_type) = unit.transaction.tx_type(test.indexes.data) else { if test.expect_exception.is_some() { continue; @@ -413,7 +413,7 @@ pub fn execute_test_suite( EthHandler::default(), ); - // do the deed + // Do the deed let (e, exec_result) = if trace { let mut evm = InspectorMainEvm::new( InspectorContext::new( @@ -433,7 +433,7 @@ pub fn execute_test_suite( let spec = cfg.spec(); let db = evm.context.inner.journaled_state.database; - // dump state and traces if test failed + // Dump state and traces if test failed let output = check_evm_execution( &test, unit.out.as_ref(), @@ -454,7 +454,7 @@ pub fn execute_test_suite( let spec = cfg.spec(); let db = evm.context.journaled_state.database; - // dump state and traces if test failed + // Dump state and traces if test failed let output = check_evm_execution( &test, unit.out.as_ref(), @@ -470,7 +470,7 @@ pub fn execute_test_suite( (e, res) }; - // print only once or + // Print only once or // if we are already in trace mode, just return error static FAILED: AtomicBool = AtomicBool::new(false); if trace || FAILED.swap(true, Ordering::SeqCst) { @@ -481,7 +481,7 @@ pub fn execute_test_suite( }); } - // re build to run with tracing + // Re-build to run with tracing let mut cache = cache_state.clone(); cache.set_state_clear_flag(cfg.spec.is_enabled_in(SpecId::SPURIOUS_DRAGON)); let mut state = database::State::builder() @@ -540,11 +540,11 @@ pub fn run( mut print_outcome: bool, keep_going: bool, ) -> Result<(), TestError> { - // trace implies print_outcome + // Trace implies print_outcome if trace { print_outcome = true; } - // print_outcome or trace implies single_thread + // `print_outcome` or trace implies single_thread if print_outcome { single_thread = true; } diff --git a/bins/revme/src/cmd/statetest/utils.rs b/bins/revme/src/cmd/statetest/utils.rs index c06310aaf8..3ecc89594a 100644 --- a/bins/revme/src/cmd/statetest/utils.rs +++ b/bins/revme/src/cmd/statetest/utils.rs @@ -1,7 +1,7 @@ use k256::ecdsa::SigningKey; use revm::primitives::Address; -/// Recover the address from a private key (SigningKey). +/// Recover the address from a private key ([SigningKey]). pub fn recover_address(private_key: &[u8]) -> Option
{ let key = SigningKey::from_slice(private_key).ok()?; let public_key = key.verifying_key().to_encoded_point(false); diff --git a/crates/bytecode/src/bytecode.rs b/crates/bytecode/src/bytecode.rs index 235c64eeb4..a2df02d447 100644 --- a/crates/bytecode/src/bytecode.rs +++ b/crates/bytecode/src/bytecode.rs @@ -7,7 +7,7 @@ use core::fmt::Debug; use primitives::{keccak256, Address, Bytes, B256, KECCAK_EMPTY}; use std::sync::Arc; -/// State of the [`Bytecode`] analysis. +/// State of the [`Bytecode`] analysis #[derive(Clone, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum Bytecode { @@ -22,19 +22,18 @@ pub enum Bytecode { impl Default for Bytecode { #[inline] fn default() -> Self { - // Creates a new legacy analyzed [`Bytecode`] with exactly one STOP opcode. Self::new() } } impl Bytecode { - // Creates a new legacy analyzed [`Bytecode`] with exactly one STOP opcode. + /// Creates a new legacy analyzed [`Bytecode`] with exactly one STOP opcode. #[inline] pub fn new() -> Self { Self::LegacyAnalyzed(LegacyAnalyzedBytecode::default()) } - /// Return jump table if bytecode is analyzed + /// Returns jump table if bytecode is analyzed. #[inline] pub fn legacy_jump_table(&self) -> Option<&JumpTable> { match &self { @@ -43,7 +42,7 @@ impl Bytecode { } } - /// Calculate hash of the bytecode. + /// Calculates hash of the bytecode. pub fn hash_slow(&self) -> B256 { if self.is_empty() { KECCAK_EMPTY @@ -52,7 +51,7 @@ impl Bytecode { } } - /// Return reference to the EOF if bytecode is EOF. + /// Returns reference to the EOF if bytecode is EOF. #[inline] pub const fn eof(&self) -> Option<&Arc> { match self { @@ -61,13 +60,13 @@ impl Bytecode { } } - /// Returns true if bytecode is EOF. + /// Returns `true` if bytecode is EOF. #[inline] pub const fn is_eof(&self) -> bool { matches!(self, Self::Eof(_)) } - /// Returns true if bytecode is EIP-7702. + /// Returns `true` if bytecode is EIP-7702. pub const fn is_eip7702(&self) -> bool { matches!(self, Self::Eip7702(_)) } @@ -96,7 +95,7 @@ impl Bytecode { /// Creates a new raw [`Bytecode`]. /// - /// Returns an error on incorrect Bytecode format. + /// Returns an error on incorrect bytecode format. #[inline] pub fn new_raw_checked(bytes: Bytes) -> Result { let prefix = bytes.get(..2); @@ -117,7 +116,7 @@ impl Bytecode { /// /// # Safety /// - /// Bytecode needs to end with STOP (0x00) opcode as checked bytecode assumes + /// Bytecode needs to end with `STOP` (`0x00`) opcode as checked bytecode assumes /// that it is safe to iterate over bytecode without checking lengths. pub unsafe fn new_analyzed( bytecode: Bytes, @@ -143,7 +142,7 @@ impl Bytecode { } } - /// Returns bytes + /// Returns bytes. #[inline] pub fn bytes(&self) -> Bytes { match self { @@ -152,7 +151,7 @@ impl Bytecode { } } - /// Returns bytes slice + /// Returns bytes slice. #[inline] pub fn bytes_slice(&self) -> &[u8] { match self { diff --git a/crates/bytecode/src/decode_errors.rs b/crates/bytecode/src/decode_errors.rs index 719a9f76c5..bad811b473 100644 --- a/crates/bytecode/src/decode_errors.rs +++ b/crates/bytecode/src/decode_errors.rs @@ -2,7 +2,7 @@ use crate::{eip7702::Eip7702DecodeError, eof::EofDecodeError}; use core::fmt::Debug; use std::fmt; -/// EOF decode errors. +/// EOF decode errors #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum BytecodeDecodeError { diff --git a/crates/bytecode/src/eip7702.rs b/crates/bytecode/src/eip7702.rs index 88a1ca5319..f7f15248bd 100644 --- a/crates/bytecode/src/eip7702.rs +++ b/crates/bytecode/src/eip7702.rs @@ -1,19 +1,19 @@ use core::fmt; use primitives::{bytes, Address, Bytes}; -/// EIP-7702 Version Magic in u16 form. +/// EIP-7702 Version Magic in u16 form pub const EIP7702_MAGIC: u16 = 0xEF01; -/// EIP-7702 magic number in array form. +/// EIP-7702 magic number in array form pub static EIP7702_MAGIC_BYTES: Bytes = bytes!("ef01"); -/// EIP-7702 first version of bytecode. +/// EIP-7702 first version of bytecode pub const EIP7702_VERSION: u8 = 0; /// Bytecode of delegated account, specified in EIP-7702 /// /// Format of EIP-7702 bytecode consist of: -/// 0xEF00 (MAGIC) + 0x00 (VERSION) + 20 bytes of address. +/// `0xEF00` (MAGIC) + `0x00` (VERSION) + 20 bytes of address. #[derive(Clone, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Eip7702Bytecode { @@ -57,28 +57,34 @@ impl Eip7702Bytecode { } } - /// Return the raw bytecode with version MAGIC number. + /// Returns the raw bytecode with version MAGIC number. #[inline] pub fn raw(&self) -> &Bytes { &self.raw } - /// Return the address of the delegated contract. + /// Returns the address of the delegated contract. #[inline] pub fn address(&self) -> Address { self.delegated_address } } -/// Bytecode errors. +/// Bytecode errors #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum Eip7702DecodeError { - /// Invalid length of the raw bytecode. It should be 23 bytes. + /// Invalid length of the raw bytecode + /// + /// It should be 23 bytes. InvalidLength, + /// Invalid magic number + /// /// All Eip7702 bytecodes should start with the magic number 0xEF01. InvalidMagic, - /// Only supported version is version 0x00. + /// Unsupported version + /// + /// Only supported version is version 0x00 UnsupportedVersion, } diff --git a/crates/bytecode/src/eof.rs b/crates/bytecode/src/eof.rs index cb50c3963f..5ee155d844 100644 --- a/crates/bytecode/src/eof.rs +++ b/crates/bytecode/src/eof.rs @@ -14,17 +14,17 @@ use core::cmp::min; use primitives::{b256, bytes, Bytes, B256}; use std::{fmt, vec, vec::Vec}; -/// Hash of EF00 bytes that is used for EXTCODEHASH when called from legacy bytecode. +/// Hash of EF00 bytes that is used for EXTCODEHASH when called from legacy bytecode pub const EOF_MAGIC_HASH: B256 = b256!("9dbf3648db8210552e9c4f75c6a1c3057c0ca432043bd648be15fe7be05646f5"); -/// EOF Magic in u16 form. +/// EOF Magic in [u16] form pub const EOF_MAGIC: u16 = 0xEF00; -/// EOF magic number in array form. +/// EOF magic number in array form pub static EOF_MAGIC_BYTES: Bytes = bytes!("ef00"); -/// EVM Object Format (EOF) container. +/// EVM Object Format (EOF) container /// /// It consists of a header, body and the raw original bytes. #[derive(Clone, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] @@ -38,7 +38,7 @@ pub struct Eof { impl Default for Eof { fn default() -> Self { let body = EofBody { - // types section with zero inputs, zero outputs and zero max stack size. + // Types section with zero inputs, zero outputs and zero max stack size. types_section: vec![TypesSection::default()], code_section: vec![1], // One code section with a STOP byte. @@ -74,7 +74,7 @@ impl Eof { self.header.size() + self.header.body_size() } - /// Return raw EOF bytes. + /// Returns raw EOF bytes. pub fn raw(&self) -> &Bytes { &self.raw } @@ -95,7 +95,7 @@ impl Eof { &self.body.data_section } - /// Slow encode EOF bytes. + /// Slow encodes EOF bytes. pub fn encode_slow(&self) -> Bytes { let mut buffer: Vec = Vec::with_capacity(self.size()); self.header.encode(&mut buffer); @@ -103,7 +103,8 @@ impl Eof { buffer.into() } - /// Decode EOF that have additional dangling bytes. + /// Decodes EOF that have additional dangling bytes. + /// /// Assume that data section is fully filled. pub fn decode_dangling(mut raw: Bytes) -> Result<(Self, Bytes), EofDecodeError> { let (header, _) = EofHeader::decode(&raw)?; @@ -116,7 +117,7 @@ impl Eof { Ok((Self { header, body, raw }, dangling_data)) } - /// Decode EOF from raw bytes. + /// Decodes EOF from raw bytes. pub fn decode(raw: Bytes) -> Result { let (header, _) = EofHeader::decode(&raw)?; let body = EofBody::decode(&raw, &header)?; @@ -124,23 +125,23 @@ impl Eof { } } -/// EOF decode errors. +/// EOF decode errors #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum EofDecodeError { - /// Short input while processing EOF. + /// Short input while processing EOF MissingInput, - /// Short body while processing EOF. + /// Short body while processing EOF MissingBodyWithoutData, - /// Body size is more than specified in the header. + /// Body size is more than specified in the header DanglingData, - /// Invalid types section data. + /// Invalid types section data InvalidTypesSection, - /// Invalid types section size. + /// Invalid types section size InvalidTypesSectionSize, - /// Invalid EOF magic number. + /// Invalid EOF magic number InvalidEOFMagicNumber, - /// Invalid EOF version. + /// Invalid EOF version InvalidEOFVersion, /// Invalid number for types kind InvalidTypesKind, @@ -152,21 +153,21 @@ pub enum EofDecodeError { InvalidDataKind, /// Invalid kind after code InvalidKindAfterCode, - /// Mismatch of code and types sizes. + /// Mismatch of code and types sizes MismatchCodeAndTypesSize, - /// There should be at least one size. + /// There should be at least one size NonSizes, - /// Missing size. + /// Missing size ShortInputForSizes, /// Size cant be zero ZeroSize, - /// Invalid code number. + /// Invalid code number TooManyCodeSections, - /// Invalid number of code sections. + /// Invalid number of code sections ZeroCodeSections, - /// Invalid container number. + /// Invalid container number TooManyContainerSections, - /// Invalid initcode size. + /// Invalid initcode size InvalidEOFSize, } diff --git a/crates/bytecode/src/eof/body.rs b/crates/bytecode/src/eof/body.rs index 268136b1e8..67756c8544 100644 --- a/crates/bytecode/src/eof/body.rs +++ b/crates/bytecode/src/eof/body.rs @@ -2,7 +2,7 @@ use super::{Eof, EofDecodeError, EofHeader, TypesSection}; use primitives::Bytes; use std::vec::Vec; -/// EOF container body. +/// EOF container body /// /// Contains types, code, container and data sections. /// @@ -12,7 +12,7 @@ use std::vec::Vec; pub struct EofBody { /// Code information pub types_section: Vec, - /// Index of the last byte of each code section. + /// Index of the last byte of each code section pub code_section: Vec, pub code: Bytes, pub container_section: Vec, @@ -34,7 +34,7 @@ impl EofBody { /// Creates an EOF container from this body. pub fn into_eof(self) -> Eof { - // TODO add bounds checks. + // TODO : Add bounds checks. let mut prev_value = 0; let header = EofHeader { types_size: self.types_section.len() as u16 * 4, @@ -70,7 +70,7 @@ impl EofBody { /// /// First code section starts at 0. pub fn eof_code_section_start(&self, idx: usize) -> Option { - // starting code section start with 0. + // Starting code section start with 0. if idx == 0 { return Some(0); } @@ -116,7 +116,7 @@ impl EofBody { body.types_section.push(types_section); } - // extract code section + // Extract code section let start = header_len + header.types_size as usize; let mut code_end = 0; for size in header.code_sizes.iter().map(|x| *x as usize) { @@ -125,7 +125,7 @@ impl EofBody { } body.code = input.slice(start..start + header.sum_code_sizes); - // extract container section + // Extract container section let mut start = start + header.sum_code_sizes; for size in header.container_sizes.iter().map(|x| *x as usize) { body.container_section diff --git a/crates/bytecode/src/eof/header.rs b/crates/bytecode/src/eof/header.rs index bbe3e364c0..37b40226bb 100644 --- a/crates/bytecode/src/eof/header.rs +++ b/crates/bytecode/src/eof/header.rs @@ -8,20 +8,23 @@ use std::vec::Vec; #[derive(Clone, Debug, Default, PartialEq, Eq, Hash, Ord, PartialOrd)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct EofHeader { - /// Size of EOF types section. - /// types section includes num of input and outputs and max stack size. + /// Size of EOF types section + /// + /// Types section includes num of input and outputs and max stack size. pub types_size: u16, - /// Sizes of EOF code section. + /// Sizes of EOF code section + /// /// Code size can't be zero. pub code_sizes: Vec, - /// EOF Container size. + /// EOF Container size + /// /// Container size can be zero. pub container_sizes: Vec, - /// EOF data size. + /// EOF data size pub data_size: u16, - /// sum code sizes + /// Sum code sizes pub sum_code_sizes: usize, - /// sum container sizes + /// Sum container sizes pub sum_container_sizes: usize, } @@ -33,7 +36,7 @@ const KIND_DATA: u8 = 4; #[inline] fn consume_header_section_size(input: &[u8]) -> Result<(&[u8], Vec, usize), EofDecodeError> { - // num_sections 2 bytes 0x0001-0xFFFF + // `num_sections` 2 bytes 0x0001-0xFFFF // 16-bit unsigned big-endian integer denoting the number of the sections let (input, num_sections) = consume_u16(input)?; if num_sections == 0 { @@ -47,7 +50,7 @@ fn consume_header_section_size(input: &[u8]) -> Result<(&[u8], Vec, usize), let mut sizes = Vec::with_capacity(num_sections); let mut sum = 0; for i in 0..num_sections { - // size 2 bytes 0x0001-0xFFFF + // `code_size` 2 bytes 0x0001-0xFFFF // 16-bit unsigned big-endian integer denoting the length of the section content let code_size = u16::from_be_bytes([input[i * 2], input[i * 2 + 1]]); if code_size == 0 { @@ -61,21 +64,22 @@ fn consume_header_section_size(input: &[u8]) -> Result<(&[u8], Vec, usize), } impl EofHeader { - /// Length of the header in bytes. + /// Gets the length of the header in bytes. /// /// It is minimum 15 bytes (there is at least one code section). pub fn size(&self) -> usize { - 2 + // magic - 1 + // version - 3 + // types section - 3 + // code section - 2 * self.code_sizes.len() + // num_code_sections - if self.container_sizes.is_empty() { 0 } else { 3 + 2 * self.container_sizes.len() } + // container - 3 + // data section. - 1 // terminator + 2 + // Magic + 1 + // Version + 3 + // Types section + 3 + // Code section + 2 * self.code_sizes.len() + // `num_code_sections` + if self.container_sizes.is_empty() { 0 } else { 3 + 2 * self.container_sizes.len() } + // Container + 3 + // Data section. + 1 // Terminator } - /// Return index where data size starts. + /// Returns index where data size starts. + /// /// Data size is two bytes long. pub fn data_size_raw_i(&self) -> usize { // termination(1byte) + code size(2) bytes. @@ -87,7 +91,9 @@ impl EofHeader { self.types_size as usize / 4 } - /// Returns body size. It is sum of code sizes, container sizes and data size. + /// Returns body size. + /// + /// It is sum of code sizes, container sizes and data size. pub fn body_size(&self) -> usize { self.types_size as usize + self.sum_code_sizes @@ -102,37 +108,37 @@ impl EofHeader { /// Encodes EOF header into binary form. pub fn encode(&self, buffer: &mut Vec) { - // magic 2 bytes 0xEF00 EOF prefix + // `magic` 2 bytes 0xEF00 EOF prefix buffer.extend_from_slice(&0xEF00u16.to_be_bytes()); - // version 1 byte 0x01 EOF version + // `version` 1 byte 0x01 EOF version buffer.push(0x01); - // kind_types 1 byte 0x01 kind marker for types size section + // `kind_types` 1 byte 0x01 kind marker for types size section buffer.push(KIND_TYPES); - // types_size 2 bytes 0x0004-0xFFFF + // `types_size` 2 bytes 0x0004-0xFFFF buffer.extend_from_slice(&self.types_size.to_be_bytes()); - // kind_code 1 byte 0x02 kind marker for code size section + // `kind_code` 1 byte 0x02 kind marker for code size section buffer.push(KIND_CODE); - // code_sections_sizes + // `code_sections_sizes` buffer.extend_from_slice(&(self.code_sizes.len() as u16).to_be_bytes()); for size in &self.code_sizes { buffer.extend_from_slice(&size.to_be_bytes()); } - // kind_container_or_data 1 byte 0x03 or 0x04 kind marker for container size section or data size section + // `kind_container_or_data` 1 byte 0x03 or 0x04 kind marker for container size section or data size section if self.container_sizes.is_empty() { buffer.push(KIND_DATA); } else { buffer.push(KIND_CONTAINER); - // container_sections_sizes + // `container_sections_sizes` buffer.extend_from_slice(&(self.container_sizes.len() as u16).to_be_bytes()); for size in &self.container_sizes { buffer.extend_from_slice(&size.to_be_bytes()); } - // kind_data 1 byte 0x04 kind marker for data size section + // `kind_data` 1 byte 0x04 kind marker for data size section buffer.push(KIND_DATA); } - // data_size 2 bytes 0x0000-0xFFFF 16-bit unsigned big-endian integer denoting the length of the data section content + // `data_size` 2 bytes 0x0000-0xFFFF 16-bit unsigned big-endian integer denoting the length of the data section content buffer.extend_from_slice(&self.data_size.to_be_bytes()); - // terminator 1 byte 0x00 marks the end of the EofHeader + // `terminator` 1 byte 0x00 marks the end of the EofHeader buffer.push(KIND_TERMINAL); } @@ -140,25 +146,25 @@ impl EofHeader { pub fn decode(input: &[u8]) -> Result<(Self, &[u8]), EofDecodeError> { let mut header = EofHeader::default(); - // magic 2 bytes 0xEF00 EOF prefix + // `magic` 2 bytes 0xEF00 EOF prefix let (input, kind) = consume_u16(input)?; if kind != 0xEF00 { return Err(EofDecodeError::InvalidEOFMagicNumber); } - // version 1 byte 0x01 EOF version + // `version` 1 byte 0x01 EOF version let (input, version) = consume_u8(input)?; if version != 0x01 { return Err(EofDecodeError::InvalidEOFVersion); } - // kind_types 1 byte 0x01 kind marker for types size section + // `kind_types` 1 byte 0x01 kind marker for types size section let (input, kind_types) = consume_u8(input)?; if kind_types != KIND_TYPES { return Err(EofDecodeError::InvalidTypesKind); } - // types_size 2 bytes 0x0004-0xFFFF + // `types_size` 2 bytes 0x0004-0xFFFF // 16-bit unsigned big-endian integer denoting the length of the type section content let (input, types_size) = consume_u16(input)?; header.types_size = types_size; @@ -167,13 +173,13 @@ impl EofHeader { return Err(EofDecodeError::InvalidTypesSection); } - // kind_code 1 byte 0x02 kind marker for code size section + // `kind_code` 1 byte 0x02 kind marker for code size section let (input, kind_types) = consume_u8(input)?; if kind_types != KIND_CODE { return Err(EofDecodeError::InvalidCodeKind); } - // code_sections_sizes + // `code_sections_sizes` let (input, sizes, sum) = consume_header_section_size(input)?; // more than 1024 code sections are not allowed @@ -214,14 +220,14 @@ impl EofHeader { _ => return Err(EofDecodeError::InvalidKindAfterCode), }; - // data_size 2 bytes 0x0000-0xFFFF 16-bit + // `data_size` 2 bytes 0x0000-0xFFFF 16-bit // unsigned big-endian integer denoting the length // of the data section content (for not yet deployed // containers this can be more than the actual content, see Data Section Lifecycle) let (input, data_size) = consume_u16(input)?; header.data_size = data_size; - // terminator 1 byte 0x00 marks the end of the EofHeader + // `terminator` 1 byte 0x00 marks the end of the EofHeader let (input, terminator) = consume_u8(input)?; if terminator != KIND_TERMINAL { return Err(EofDecodeError::InvalidTerminalByte); diff --git a/crates/bytecode/src/eof/printer.rs b/crates/bytecode/src/eof/printer.rs index 9fc4096bf6..41fac035a3 100644 --- a/crates/bytecode/src/eof/printer.rs +++ b/crates/bytecode/src/eof/printer.rs @@ -17,7 +17,7 @@ pub fn print(code: &[u8]) { }; if opcode.immediate_size() != 0 { - // check if the opcode immediate are within the bounds of the code + // Check if the opcode immediate are within the bounds of the code if i + opcode.immediate_size() as usize >= code.len() { println!("Malformed code: immediate out of bounds"); break; @@ -38,7 +38,7 @@ pub fn print(code: &[u8]) { if op == RJUMPV { let max_index = code[i + 1] as usize; let len = max_index + 1; - // and max_index+1 is to get size of vtable as index starts from 0. + // And max_index+1 is to get size of vtable as index starts from 0. rjumpv_additional_immediates = len * 2; // +1 is for max_index byte diff --git a/crates/bytecode/src/eof/types_section.rs b/crates/bytecode/src/eof/types_section.rs index 87f828321c..c4c4ff50fa 100644 --- a/crates/bytecode/src/eof/types_section.rs +++ b/crates/bytecode/src/eof/types_section.rs @@ -4,21 +4,24 @@ use super::{ }; use std::vec::Vec; -/// Non returning function has a output 0x80. +/// Non returning function has a output `0x80` const EOF_NON_RETURNING_FUNCTION: u8 = 0x80; -/// Types section that contains stack information for matching code section. +/// Types section that contains stack information for matching code section #[derive(Debug, Clone, Default, Hash, PartialEq, Eq, Copy, PartialOrd, Ord)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct TypesSection { - /// inputs - 1 byte - `0x00-0x7F` - /// number of stack elements the code section consumes + /// `inputs` - 1 byte - `0x00-0x7F` + /// + /// Number of stack elements the code section consumes pub inputs: u8, - /// outputs - 1 byte - `0x00-0x80` - /// number of stack elements the code section returns or 0x80 for non-returning functions + /// `outputs` - 1 byte - `0x00-0x80` + /// + /// Number of stack elements the code section returns or 0x80 for non-returning functions pub outputs: u8, - /// max_stack_height - 2 bytes - `0x0000-0x03FF` - /// maximum number of elements ever placed onto the stack by the code section + /// `max_stack_height` - 2 bytes - `0x0000-0x03FF` + /// + /// Maximum number of elements ever placed onto the stack by the code section pub max_stack_size: u16, } @@ -32,7 +35,7 @@ impl TypesSection { } } - /// True if section is non-returning. + /// Returns `true` if section is non-returning. pub fn is_non_returning(&self) -> bool { self.outputs == EOF_NON_RETURNING_FUNCTION } @@ -43,7 +46,7 @@ impl TypesSection { self.outputs as i32 - self.inputs as i32 } - /// Encode the section into the buffer. + /// Encodes the section into the buffer. #[inline] pub fn encode(&self, buffer: &mut Vec) { buffer.push(self.inputs); @@ -51,7 +54,7 @@ impl TypesSection { buffer.extend_from_slice(&self.max_stack_size.to_be_bytes()); } - /// Decode the section from the input. + /// Decodes the section from the input. #[inline] pub fn decode(input: &[u8]) -> Result<(Self, &[u8]), EofDecodeError> { let (input, inputs) = consume_u8(input)?; @@ -66,7 +69,7 @@ impl TypesSection { Ok((section, input)) } - /// Validate the section. + /// Validates the section. pub fn validate(&self) -> Result<(), EofDecodeError> { if self.inputs > 0x7f || self.outputs > 0x80 || self.max_stack_size > 0x03FF { return Err(EofDecodeError::InvalidTypesSection); diff --git a/crates/bytecode/src/eof/verification.rs b/crates/bytecode/src/eof/verification.rs index d1c27be4e6..58bb953319 100644 --- a/crates/bytecode/src/eof/verification.rs +++ b/crates/bytecode/src/eof/verification.rs @@ -31,16 +31,17 @@ pub fn validate_raw_eof_inner( /// Fully validates an [`Eof`] container. /// /// Only place where validation happen is in Creating Transaction. -/// Because of that we are assuming CodeType is ReturnContract. /// -/// Note: If needed we can make a flag that would assume ReturnContract CodeType. +/// Because of that we are assuming [CodeType] is [ReturnContract][CodeType::ReturnContract]. +/// +/// Note: If needed we can make a flag that would assume [ReturnContract][CodeType::ReturnContract].. pub fn validate_eof(eof: &Eof) -> Result<(), EofError> { validate_eof_inner(eof, Some(CodeType::ReturnContract)) } #[inline] pub fn validate_eof_inner(eof: &Eof, first_code_type: Option) -> Result<(), EofError> { - // data needs to be filled first first container. + // Data needs to be filled first first container. if !eof.body.is_data_filled { return Err(EofError::Validation(EofValidationError::DataNotFilled)); } @@ -82,18 +83,18 @@ pub fn validate_eof_codes( } if eof.body.code_section.is_empty() { - // no code sections. This should be already checked in decode. + // No code sections. This should be already checked in decode. return Err(EofValidationError::NoCodeSections); } - // the first code section must have a type signature + // The first code section must have a type signature // (0, 0x80, max_stack_height) (0 inputs non-returning function) let first_types = &eof.body.types_section[0]; if first_types.inputs != 0 || !first_types.is_non_returning() { return Err(EofValidationError::InvalidTypesSection); } - // tracking access of code and sub containers. + // Tracking access of code and sub containers. let mut tracker: AccessTracker = AccessTracker::new( this_code_type, eof.body.code_section.len(), @@ -101,7 +102,7 @@ pub fn validate_eof_codes( ); while let Some(index) = tracker.processing_stack.pop() { - // assume index is correct. + // Assume `index` is correct. let code = eof.body.code(index).unwrap(); validate_eof_code( &code, @@ -113,11 +114,11 @@ pub fn validate_eof_codes( )?; } - // iterate over accessed codes and check if all are accessed. + // Iterate over accessed codes and check if all are accessed. if !tracker.codes.into_iter().all(identity) { return Err(EofValidationError::CodeSectionNotAccessed); } - // iterate over all accessed subcontainers and check if all are accessed. + // Iterate over all accessed subcontainers and check if all are accessed. if !tracker.subcontainers.iter().all(|i| i.is_some()) { return Err(EofValidationError::SubContainerNotAccessed); } @@ -135,7 +136,7 @@ pub fn validate_eof_codes( .collect()) } -/// EOF Error. +/// EOF Error #[derive(Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] pub enum EofError { Decode(EofDecodeError), @@ -165,79 +166,85 @@ impl fmt::Display for EofError { impl core::error::Error for EofError {} +/// EOF Validation Error #[derive(Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] pub enum EofValidationError { FalsePositive, /// Opcode is not known. It is not defined in the opcode table. UnknownOpcode, - /// Opcode is disabled in EOF. For example JUMP, JUMPI, etc. + /// Opcode is disabled in EOF. For example JUMP, JUMPI, etc OpcodeDisabled, - /// Every instruction inside bytecode should be forward accessed. + /// Every instruction inside bytecode should be forward accessed + /// /// Forward access can be a jump or sequential opcode. + /// /// In case after terminal opcode there should be a forward jump. InstructionNotForwardAccessed, - /// Bytecode is too small and is missing immediate bytes for instruction. + /// Bytecode is too small and is missing immediate bytes for instruction MissingImmediateBytes, - /// Similar to [`EofValidationError::MissingImmediateBytes`] but for special case of RJUMPV immediate bytes. + /// Bytecode is too small and is missing immediate bytes for instruction + /// + /// Similar to [`MissingImmediateBytes`][EofValidationError::MissingImmediateBytes] but for special case of RJUMPV immediate bytes. MissingRJUMPVImmediateBytes, - /// Invalid jump into immediate bytes. + /// Invalid jump into immediate bytes JumpToImmediateBytes, - /// Invalid jump into immediate bytes. + /// Invalid jump into immediate bytes BackwardJumpToImmediateBytes, - /// MaxIndex in RJUMPV can't be zero. Zero max index makes it RJUMPI. + /// MaxIndex in RJUMPV can't be zero. Zero max index makes it RJUMPI RJUMPVZeroMaxIndex, - /// Jump with zero offset would make a jump to next opcode, it does not make sense. + /// Jump with zero offset would make a jump to next opcode, it does not make sense JumpZeroOffset, - /// EOFCREATE points to container out of bounds. + /// EOFCREATE points to container out of bounds EOFCREATEInvalidIndex, - /// CALLF section out of bounds. + /// CALLF section out of bounds CodeSectionOutOfBounds, - /// CALLF to non returning function is not allowed. + /// CALLF to non returning function is not allowed CALLFNonReturningFunction, - /// CALLF stack overflow. + /// CALLF stack overflow StackOverflow, - /// JUMPF needs to have enough outputs. + /// JUMPF needs to have enough outputs JUMPFEnoughOutputs, /// JUMPF Stack JUMPFStackHigherThanOutputs, - /// DATA load out of bounds. + /// DATA load out of bounds DataLoadOutOfBounds, - /// RETF biggest stack num more then outputs. + /// RETF biggest stack num more then outputs RETFBiggestStackNumMoreThenOutputs, - /// Stack requirement is more than smallest stack items. + /// Stack requirement is more than smallest stack items StackUnderflow, - /// Smallest stack items is more than types output. + /// Smallest stack items is more than types output TypesStackUnderflow, - /// Jump out of bounds. + /// Jump out of bounds JumpUnderflow, - /// Jump to out of bounds. + /// Jump to out of bounds JumpOverflow, - /// Backward jump should have same smallest and biggest stack items. + /// Backward jump should have same smallest and biggest stack items BackwardJumpBiggestNumMismatch, - /// Backward jump should have same smallest and biggest stack items. + /// Backward jump should have same smallest and biggest stack items BackwardJumpSmallestNumMismatch, - /// Last instruction should be terminating. + /// Last instruction should be terminating LastInstructionNotTerminating, - /// Code section not accessed. + /// Code section not accessed CodeSectionNotAccessed, /// Types section invalid InvalidTypesSection, - /// First types section is invalid. - /// It should have inputs 0 and outputs 0x80. + /// First types section is invalid + /// It should have inputs 0 and outputs `0x80` InvalidFirstTypesSection, - /// Max stack element mismatch. + /// Max stack element mismatch MaxStackMismatch, /// No code sections present NoCodeSections, - /// Sub container called in two different modes. + /// Sub container called in two different modes + /// /// Check [`CodeType`] for more information. SubContainerCalledInTwoModes, - /// Sub container not accessed. + /// Sub container not accessed SubContainerNotAccessed, - /// Data size needs to be filled for ReturnContract type. + /// Data size needs to be filled for [ReturnContract][CodeType::ReturnContract] type DataNotFilled, /// Section is marked as non-returning but has either RETF or - /// JUMPF to returning section opcodes. + /// JUMPF to returning section opcodes NonReturningSectionIsReturning, } @@ -318,18 +325,20 @@ impl AccessTracker { } } -/// Types of code sections. It is a error if container to contain +/// Types of code sections +/// +/// It is a error if container to contain /// both RETURNCONTRACT and either of RETURN or STOP. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum CodeType { - /// Return contract code. + /// Return contract code ReturnContract, - /// Return or Stop opcodes. + /// Return or Stop opcodes ReturnOrStop, } impl CodeType { - /// Returns true of the code is initcode. + /// Returns `true` of the code is initcode. pub fn is_initcode(&self) -> bool { matches!(self, CodeType::ReturnContract) } @@ -439,7 +448,7 @@ pub fn validate_eof_code( } } - // all bytes that are intermediate. + // All bytes that are intermediate. let mut jumps = vec![InstructionInfo::default(); code.len()]; let mut is_after_termination = false; @@ -455,7 +464,7 @@ pub fn validate_eof_code( let opcode = &OPCODE_INFO[op as usize]; let Some(opcode) = opcode else { - // err unknown opcode. + // Err unknown opcode. return Err(EofValidationError::UnknownOpcode); }; @@ -466,7 +475,7 @@ pub fn validate_eof_code( let this_instruction = &mut jumps[i]; - // update biggest/smallest values for next instruction only if it is not after termination. + // Update biggest/smallest values for next instruction only if it is not after termination. if !is_after_termination { this_instruction.smallest = core::cmp::min(this_instruction.smallest, next_smallest); this_instruction.biggest = core::cmp::max(this_instruction.biggest, next_biggest); @@ -476,30 +485,30 @@ pub fn validate_eof_code( // Opcodes after termination should be accessed by forward jumps. if is_after_termination && !this_instruction.is_jumpdest { - // opcode after termination was not accessed. + // Opcode after termination was not accessed. return Err(EofValidationError::InstructionNotForwardAccessed); } is_after_termination = opcode.is_terminating(); - // mark immediate as non-jumpable. RJUMPV is special case covered later. + // Mark immediate as non-jumpable. RJUMPV is special case covered later. if opcode.immediate_size() != 0 { - // check if the opcode immediate are within the bounds of the code + // Check if the opcode immediate are within the bounds of the code if i + opcode.immediate_size() as usize >= code.len() { // Malfunctional code return Err(EofValidationError::MissingImmediateBytes); } - // mark immediate bytes as non-jumpable. + // Mark immediate bytes as non-jumpable. for imm in 1..opcode.immediate_size() as usize + 1 { - // SAFETY: immediate size is checked above. + // SAFETY: Immediate size is checked above. jumps[i + imm].mark_as_immediate()?; } } // IO diff used to generate next instruction smallest/biggest value. let mut stack_io_diff = opcode.io_diff() as i32; - // how many stack items are required for this opcode. + // How many stack items are required for this opcode. let mut stack_requirement = opcode.inputs() as i32; - // additional immediate bytes for RJUMPV, it has dynamic vtable. + // Additional immediate bytes for RJUMPV, it has dynamic vtable. let mut rjumpv_additional_immediates = 0; // If opcodes is RJUMP, RJUMPI or RJUMPV then this will have absolute jumpdest. let mut absolute_jumpdest = vec![]; @@ -510,10 +519,10 @@ pub fn validate_eof_code( // RJUMP is considered a terminating opcode. } opcode::RJUMPV => { - // code length for RJUMPV is checked with immediate size. + // Code length for RJUMPV is checked with immediate size. let max_index = code[i + 1] as usize; let len = max_index + 1; - // and max_index+1 is to get size of vtable as index starts from 0. + // And max_index+1 is to get size of vtable as index starts from 0. rjumpv_additional_immediates = len * 2; // +1 is for max_index byte @@ -524,7 +533,7 @@ pub fn validate_eof_code( // Mark vtable as immediate, max_index was already marked. for imm in 0..rjumpv_additional_immediates { - // SAFETY: immediate size is checked above. + // SAFETY: Immediate size is checked above. jumps[i + 2 + imm].mark_as_immediate()?; } @@ -539,7 +548,7 @@ pub fn validate_eof_code( opcode::CALLF => { let section_i: usize = unsafe { read_u16(code.as_ptr().add(i + 1)) } as usize; let Some(target_types) = types.get(section_i) else { - // code section out of bounds. + // Code section out of bounds. return Err(EofValidationError::CodeSectionOutOfBounds); }; @@ -547,32 +556,32 @@ pub fn validate_eof_code( if target_types.is_non_returning() { return Err(EofValidationError::CALLFNonReturningFunction); } - // stack input for this opcode is the input of the called code. + // Stack input for this opcode is the input of the called code. stack_requirement = target_types.inputs as i32; - // stack diff depends on input/output of the called code. + // Stack diff depends on input/output of the called code. stack_io_diff = target_types.io_diff(); - // mark called code as accessed. + // Mark called code as accessed. tracker.access_code(section_i); - // we decrement by `types.inputs` as they are considered as send + // We decrement by `types.inputs` as they are considered as send // to the called code and included in types.max_stack_size. if this_instruction.biggest - stack_requirement + target_types.max_stack_size as i32 > STACK_LIMIT as i32 { - // if stack max items + called code max stack size + // If stack max items + called code max stack size return Err(EofValidationError::StackOverflow); } } opcode::JUMPF => { let target_index = unsafe { read_u16(code.as_ptr().add(i + 1)) } as usize; - // targeted code needs to have zero outputs (be non returning). + // Targeted code needs to have zero outputs (be non returning). let Some(target_types) = types.get(target_index) else { - // code section out of bounds. + // Code section out of bounds. return Err(EofValidationError::CodeSectionOutOfBounds); }; - // we decrement types.inputs as they are considered send to the called code. - // and included in types.max_stack_size. + // We decrement types.inputs as they are considered send to the called code. + // And included in types.max_stack_size. if this_instruction.biggest - target_types.inputs as i32 + target_types.max_stack_size as i32 > STACK_LIMIT as i32 @@ -583,11 +592,11 @@ pub fn validate_eof_code( tracker.access_code(target_index); if target_types.is_non_returning() { - // if it is not returning + // If it is not returning stack_requirement = target_types.inputs as i32; } else { is_returning = true; - // check if target code produces enough outputs. + // Check if target code produces enough outputs. if this_types.outputs < target_types.outputs { return Err(EofValidationError::JUMPFEnoughOutputs); } @@ -600,7 +609,7 @@ pub fn validate_eof_code( return Err(EofValidationError::JUMPFStackHigherThanOutputs); } - // if this instruction max + target_types max is more then stack limit. + // If this instruction max + target_types max is more then stack limit. if this_instruction.biggest + stack_requirement > STACK_LIMIT as i32 { return Err(EofValidationError::StackOverflow); } @@ -609,7 +618,7 @@ pub fn validate_eof_code( opcode::EOFCREATE => { let index = code[i + 1] as usize; if index >= num_of_containers { - // code section out of bounds. + // Code section out of bounds. return Err(EofValidationError::EOFCREATEInvalidIndex); } tracker.set_subcontainer_type(index, CodeType::ReturnContract)?; @@ -617,8 +626,8 @@ pub fn validate_eof_code( opcode::RETURNCONTRACT => { let index = code[i + 1] as usize; if index >= num_of_containers { - // code section out of bounds. - // TODO custom error + // Code section out of bounds. + // TODO : Custom error return Err(EofValidationError::EOFCREATEInvalidIndex); } if *tracker @@ -626,7 +635,7 @@ pub fn validate_eof_code( .get_or_insert(CodeType::ReturnContract) != CodeType::ReturnContract { - // TODO make custom error + // TODO : Make custom error return Err(EofValidationError::SubContainerCalledInTwoModes); } tracker.set_subcontainer_type(index, CodeType::ReturnOrStop)?; @@ -643,13 +652,13 @@ pub fn validate_eof_code( opcode::DATALOADN => { let index = unsafe { read_u16(code.as_ptr().add(i + 1)) } as isize; if data_size < 32 || index > data_size as isize - 32 { - // data load out of bounds. + // Data load out of bounds. return Err(EofValidationError::DataLoadOutOfBounds); } } opcode::RETF => { stack_requirement = this_types.outputs as i32; - // mark section as returning. + // Mark section as returning. is_returning = true; if this_instruction.biggest > stack_requirement { @@ -670,26 +679,26 @@ pub fn validate_eof_code( } _ => {} } - // check if stack requirement is more than smallest stack items. + // Check if stack requirement is more than smallest stack items. if stack_requirement > this_instruction.smallest { - // opcode requirement is more than smallest stack items. + // Opcode requirement is more than smallest stack items. return Err(EofValidationError::StackUnderflow); } next_smallest = this_instruction.smallest + stack_io_diff; next_biggest = this_instruction.biggest + stack_io_diff; - // check if jumpdest are correct and mark forward jumps. + // Check if jumpdest are correct and mark forward jumps. for absolute_jump in absolute_jumpdest { if absolute_jump < 0 { - // jump out of bounds. + // Jump out of bounds. return Err(EofValidationError::JumpUnderflow); } if absolute_jump >= code.len() as isize { - // jump to out of bounds + // Jump to out of bounds return Err(EofValidationError::JumpOverflow); } - // fine to cast as bounds are checked. + // Fine to cast as bounds are checked. let absolute_jump = absolute_jump as usize; let target_jump = &mut jumps[absolute_jump]; @@ -698,50 +707,50 @@ pub fn validate_eof_code( return Err(EofValidationError::BackwardJumpToImmediateBytes); } - // needed to mark forward jumps. It does not do anything for backward jumps. + // Needed to mark forward jumps. It does not do anything for backward jumps. target_jump.is_jumpdest = true; if absolute_jump <= i { - // backward jumps should have same smallest and biggest stack items. + // Backward jumps should have same smallest and biggest stack items. if target_jump.biggest != next_biggest { - // wrong jumpdest. + // Wrong jumpdest. return Err(EofValidationError::BackwardJumpBiggestNumMismatch); } if target_jump.smallest != next_smallest { - // wrong jumpdest. + // Wrong jumpdest. return Err(EofValidationError::BackwardJumpSmallestNumMismatch); } } else { - // forward jumps can make min even smallest size - // while biggest num is needed to check stack overflow + // Forward jumps can make min even smallest size + // While biggest num is needed to check stack overflow target_jump.smallest = core::cmp::min(target_jump.smallest, next_smallest); target_jump.biggest = core::cmp::max(target_jump.biggest, next_biggest); } } - // additional immediate are from RJUMPV vtable. + // Additional immediate are from RJUMPV vtable. i += 1 + opcode.immediate_size() as usize + rjumpv_additional_immediates; } - // error if section is returning but marked as non-returning. + // Error if section is returning but marked as non-returning. if is_returning == this_types.is_non_returning() { - // wrong termination. + // Wrong termination. return Err(EofValidationError::NonReturningSectionIsReturning); } - // last opcode should be terminating + // Last opcode should be terminating if !is_after_termination { - // wrong termination. + // Wrong termination. return Err(EofValidationError::LastInstructionNotTerminating); } - // TODO integrate max so we dont need to iterate again + // TODO : Integrate max so we dont need to iterate again let mut max_stack_requirement = 0; for opcode in jumps { max_stack_requirement = core::cmp::max(opcode.biggest, max_stack_requirement); } if max_stack_requirement != types[this_types_index].max_stack_size as i32 { - // stack overflow + // Stack overflow return Err(EofValidationError::MaxStackMismatch); } diff --git a/crates/bytecode/src/legacy/analyzed.rs b/crates/bytecode/src/legacy/analyzed.rs index e16dfc8a64..c326142aff 100644 --- a/crates/bytecode/src/legacy/analyzed.rs +++ b/crates/bytecode/src/legacy/analyzed.rs @@ -7,11 +7,11 @@ use std::sync::Arc; #[derive(Clone, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct LegacyAnalyzedBytecode { - /// Bytecode with 32 zero bytes padding. + /// Bytecode with 32 zero bytes padding bytecode: Bytes, - /// Original bytes length. + /// Original bytes length original_len: usize, - /// Jump table. + /// Jump table jump_table: JumpTable, } @@ -27,7 +27,7 @@ impl Default for LegacyAnalyzedBytecode { } impl LegacyAnalyzedBytecode { - /// Create new analyzed bytecode. + /// Creates new analyzed bytecode. pub fn new(bytecode: Bytes, original_len: usize, jump_table: JumpTable) -> Self { Self { bytecode, @@ -43,22 +43,22 @@ impl LegacyAnalyzedBytecode { &self.bytecode } - /// Original bytes length. + /// Returns original bytes length. pub fn original_len(&self) -> usize { self.original_len } - /// Original bytes without padding. + /// Returns original bytes without padding. pub fn original_bytes(&self) -> Bytes { self.bytecode.slice(..self.original_len) } - /// Original bytes without padding. + /// Returns original bytes without padding. pub fn original_byte_slice(&self) -> &[u8] { &self.bytecode[..self.original_len] } - /// Jumptable of analyzed bytes. + /// Returns [JumpTable] of analyzed bytes. pub fn jump_table(&self) -> &JumpTable { &self.jump_table } diff --git a/crates/bytecode/src/legacy/jump_map.rs b/crates/bytecode/src/legacy/jump_map.rs index a3ee673472..49e3639052 100644 --- a/crates/bytecode/src/legacy/jump_map.rs +++ b/crates/bytecode/src/legacy/jump_map.rs @@ -2,7 +2,7 @@ use bitvec::vec::BitVec; use primitives::hex; use std::{fmt::Debug, sync::Arc}; -/// A map of valid `jump` destinations. +/// A map of valid `jump` destinations #[derive(Clone, Default, PartialEq, Eq, Hash, Ord, PartialOrd)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct JumpTable(pub Arc>); @@ -16,19 +16,19 @@ impl Debug for JumpTable { } impl JumpTable { - /// Get the raw bytes of the jump map + /// Gets the raw bytes of the jump map. #[inline] pub fn as_slice(&self) -> &[u8] { self.0.as_raw_slice() } - /// Construct a jump map from raw bytes + /// Constructs a jump map from raw bytes. #[inline] pub fn from_slice(slice: &[u8]) -> Self { Self(Arc::new(BitVec::from_slice(slice))) } - /// Check if `pc` is a valid jump destination. + /// Checks if `pc` is a valid jump destination. #[inline] pub fn is_valid(&self, pc: usize) -> bool { pc < self.0.len() && unsafe { *self.0.get_unchecked(pc) } diff --git a/crates/bytecode/src/legacy/raw.rs b/crates/bytecode/src/legacy/raw.rs index b55ea2845a..d8be15e157 100644 --- a/crates/bytecode/src/legacy/raw.rs +++ b/crates/bytecode/src/legacy/raw.rs @@ -55,16 +55,16 @@ pub fn analyze_legacy(bytetecode: &[u8]) -> JumpTable { while iterator < end { let opcode = unsafe { *iterator }; if opcode::JUMPDEST == opcode { - // SAFETY: jumps are max length of the code + // SAFETY: Jumps are max length of the code unsafe { jumps.set_unchecked(iterator.offset_from(start) as usize, true) } iterator = unsafe { iterator.offset(1) }; } else { let push_offset = opcode.wrapping_sub(opcode::PUSH1); if push_offset < 32 { - // SAFETY: iterator access range is checked in the while loop + // SAFETY: Iterator access range is checked in the while loop iterator = unsafe { iterator.offset((push_offset + 2) as isize) }; } else { - // SAFETY: iterator access range is checked in the while loop + // SAFETY: Iterator access range is checked in the while loop iterator = unsafe { iterator.offset(1) }; } } diff --git a/crates/bytecode/src/opcode.rs b/crates/bytecode/src/opcode.rs index d96cf5ea49..e33f3eb349 100644 --- a/crates/bytecode/src/opcode.rs +++ b/crates/bytecode/src/opcode.rs @@ -5,7 +5,7 @@ pub mod parse; use core::{fmt, ptr::NonNull}; -/// An EVM opcode. +/// An EVM opcode /// /// This is always a valid opcode, as declared in the [`opcode`][self] module or the /// [`OPCODE_INFO`] constant. @@ -25,7 +25,7 @@ impl fmt::Display for OpCode { } impl OpCode { - /// Instantiate a new opcode from a u8. + /// Instantiates a new opcode from a u8. #[inline] pub const fn new(opcode: u8) -> Option { match OPCODE_INFO[opcode as usize] { @@ -82,7 +82,7 @@ impl OpCode { } } - /// Instantiate a new opcode from a u8 without checking if it is valid. + /// Instantiates a new opcode from a u8 without checking if it is valid. /// /// # Safety /// @@ -169,7 +169,9 @@ impl OpCode { } /// Returns true if the opcode modifies memory. + /// /// + /// /// #[inline] pub const fn modifies_memory(&self) -> bool { @@ -197,23 +199,27 @@ impl OpCode { } } -/// Information about opcode, such as name, and stack inputs and outputs. +/// Information about opcode, such as name, and stack inputs and outputs #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct OpCodeInfo { - /// Invariant: `(name_ptr, name_len)` is a `&'static str`. It is a shorted variant of `str` as + /// Invariant: `(name_ptr, name_len)` is a [`&'static str`][str] + /// + /// It is a shorted variant of [`str`] as /// the name length is always less than 256 characters. name_ptr: NonNull, name_len: u8, - /// Stack inputs. + /// Stack inputs inputs: u8, - /// Stack outputs. + /// Stack outputs outputs: u8, - /// Number of intermediate bytes. + /// Number of intermediate bytes /// /// RJUMPV is a special case where the bytes len depends on bytecode value, /// for RJUMV size will be set to one byte as it is the minimum immediate size. immediate_size: u8, - /// Used by EOF verification. All not EOF opcodes are marked false. + /// Used by EOF verification + /// + /// All not EOF opcodes are marked false. not_eof: bool, /// If the opcode stops execution. aka STOP, RETURN, .. terminating: bool, @@ -252,7 +258,7 @@ impl OpCodeInfo { pub const fn name(&self) -> &'static str { // SAFETY: `self.name_*` can only be initialized with a valid `&'static str`. unsafe { - // TODO: Use `str::from_raw_parts` when it's stable. + // TODO : Use `str::from_raw_parts` when it's stable. let slice = core::slice::from_raw_parts(self.name_ptr.as_ptr(), self.name_len as usize); core::str::from_utf8_unchecked(slice) } @@ -327,7 +333,7 @@ pub const fn stack_io(mut op: OpCodeInfo, inputs: u8, outputs: u8) -> OpCodeInfo op } -/// Alias for the [`JUMPDEST`] opcode. +/// Alias for the [`JUMPDEST`] opcode pub const NOP: u8 = JUMPDEST; macro_rules! opcodes { @@ -378,7 +384,8 @@ macro_rules! phf_map_cb { } /// Stringifies identifiers with `paste` so that they are available as literals. -/// This doesn't work with `stringify!` because it cannot be expanded inside of another macro. +/// +/// This doesn't work with [`stringify!`] because it cannot be expanded inside of another macro. #[cfg(feature = "parse")] macro_rules! stringify_with_cb { ($callback:ident; $($id:ident)*) => { paste::paste! { diff --git a/crates/bytecode/src/opcode/parse.rs b/crates/bytecode/src/opcode/parse.rs index d172ae4a3a..b9fea29274 100644 --- a/crates/bytecode/src/opcode/parse.rs +++ b/crates/bytecode/src/opcode/parse.rs @@ -2,7 +2,7 @@ use super::OpCode; use crate::opcode::NAME_TO_OPCODE; use core::fmt; -/// An error indicating that an opcode is invalid. +/// An error indicating that an opcode is invalid #[derive(Debug, PartialEq, Eq)] pub struct OpCodeError(()); @@ -24,7 +24,9 @@ impl core::str::FromStr for OpCode { } impl OpCode { - /// Parses an opcode from a string. This is the inverse of [`as_str`](Self::as_str). + /// Parses an opcode from a string. + /// + /// This is the inverse of [`as_str`](Self::as_str). #[inline] pub fn parse(s: &str) -> Option { NAME_TO_OPCODE.get(s).copied() diff --git a/crates/bytecode/src/utils.rs b/crates/bytecode/src/utils.rs index 6b90c5e394..7a8d7d47fc 100644 --- a/crates/bytecode/src/utils.rs +++ b/crates/bytecode/src/utils.rs @@ -1,4 +1,4 @@ -/// Read big-endian i16 from u8 pointer +/// Reads big-endian i16 from u8 pointer. /// /// # Safety /// @@ -7,7 +7,7 @@ pub unsafe fn read_i16(ptr: *const u8) -> i16 { i16::from_be_bytes(core::slice::from_raw_parts(ptr, 2).try_into().unwrap()) } -/// Read big-endian u16 from u8 pointer +/// Reads big-endian u16 from u8 pointer. /// /// # Safety /// diff --git a/crates/context/interface/src/block.rs b/crates/context/interface/src/block.rs index 57dfaae712..fb8d996d01 100644 --- a/crates/context/interface/src/block.rs +++ b/crates/context/interface/src/block.rs @@ -5,7 +5,7 @@ pub use blob::{calc_blob_gasprice, calc_excess_blob_gas, BlobExcessGasAndPrice}; use auto_impl::auto_impl; use primitives::{Address, B256, U256}; -/// Trait for retrieving block information required for execution.\ +/// Trait for retrieving block information required for execution. #[auto_impl(&, &mut, Box, Arc)] pub trait Block { /// The number of ancestor blocks of this block (block height). @@ -36,7 +36,7 @@ pub trait Block { /// /// Replaces `difficulty` after the Paris (AKA the merge) upgrade with [EIP-4399]. /// - /// NOTE: `prevrandao` can be found in a block in place of `mix_hash`. + /// Note: `prevrandao` can be found in a block in place of `mix_hash`. /// /// [EIP-4399]: https://eips.ethereum.org/EIPS/eip-4399 fn prevrandao(&self) -> Option<&B256>; diff --git a/crates/context/interface/src/block/blob.rs b/crates/context/interface/src/block/blob.rs index fcf86659d5..2ee4b7fac9 100644 --- a/crates/context/interface/src/block/blob.rs +++ b/crates/context/interface/src/block/blob.rs @@ -2,7 +2,7 @@ use specification::eip4844::{ BLOB_GASPRICE_UPDATE_FRACTION, MIN_BLOB_GASPRICE, TARGET_BLOB_GAS_PER_BLOCK, }; -/// Structure holding block blob excess gas and it calculates blob fee. +/// Structure holding block blob excess gas and it calculates blob fee /// /// Incorporated as part of the Cancun upgrade via [EIP-4844]. /// @@ -10,9 +10,11 @@ use specification::eip4844::{ #[derive(Clone, Debug, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct BlobExcessGasAndPrice { - /// The excess blob gas of the block. + /// The excess blob gas of the block pub excess_blob_gas: u64, - /// The calculated blob gas price based on the `excess_blob_gas`, See [calc_blob_gasprice] + /// The calculated blob gas price based on the `excess_blob_gas` + /// + /// See [calc_blob_gasprice] pub blob_gasprice: u128, } @@ -146,7 +148,7 @@ mod tests { (2314057, 1), (2314058, 2), (10 * 1024 * 1024, 23), - // calc_blob_gasprice approximates `e ** (excess_blob_gas / BLOB_GASPRICE_UPDATE_FRACTION)` using Taylor expansion + // `calc_blob_gasprice` approximates `e ** (excess_blob_gas / BLOB_GASPRICE_UPDATE_FRACTION)` using Taylor expansion // // to roughly find where boundaries will be hit: // 2 ** bits = e ** (excess_blob_gas / BLOB_GASPRICE_UPDATE_FRACTION) diff --git a/crates/context/interface/src/cfg.rs b/crates/context/interface/src/cfg.rs index b099c52c95..5f2221d9c1 100644 --- a/crates/context/interface/src/cfg.rs +++ b/crates/context/interface/src/cfg.rs @@ -10,7 +10,7 @@ pub trait Cfg { fn chain_id(&self) -> u64; - // TODO Make SpecId a associated type but for faster development we use impl Into. + // TODO : Make SpecId a associated type but for faster development we use impl Into. fn spec(&self) -> Self::Spec; fn max_code_size(&self) -> usize; @@ -28,13 +28,13 @@ pub trait Cfg { fn is_base_fee_check_disabled(&self) -> bool; } -/// What bytecode analysis to perform. +/// What bytecode analysis to perform #[derive(Clone, Default, Debug, Eq, PartialEq, Hash)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum AnalysisKind { - /// Do not perform bytecode analysis. + /// Do not perform bytecode analysis Raw, - /// Perform bytecode analysis. + /// Perform bytecode analysis #[default] Analyse, } @@ -42,15 +42,15 @@ pub enum AnalysisKind { /// Transaction destination pub type TransactTo = TxKind; -/// Create scheme. +/// Create scheme #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum CreateScheme { - /// Legacy create scheme of `CREATE`. + /// Legacy create scheme of `CREATE` Create, - /// Create scheme of `CREATE2`. + /// Create scheme of `CREATE2` Create2 { - /// Salt. + /// Salt salt: U256, }, } diff --git a/crates/context/interface/src/errors.rs b/crates/context/interface/src/errors.rs index 731f438f61..ff4c078dc7 100644 --- a/crates/context/interface/src/errors.rs +++ b/crates/context/interface/src/errors.rs @@ -1,4 +1,4 @@ -/// TODO change name of the trait +// TODO : Change the name of the trait pub trait ErrorGetter { type Error; diff --git a/crates/context/interface/src/host.rs b/crates/context/interface/src/host.rs index 81f6277864..de2a2d127d 100644 --- a/crates/context/interface/src/host.rs +++ b/crates/context/interface/src/host.rs @@ -10,9 +10,9 @@ use crate::{ use primitives::{Address, Bytes, Log, B256, U256}; /// EVM context host. -/// TODO move to context-interface +// TODO : Move to context-interface pub trait Host { - /// Chain specification. + /// Chain specification type BLOCK: Block; type TX: Transaction; type CFG: Cfg; @@ -23,28 +23,28 @@ pub trait Host { /// Returns a mutable reference to the environment. fn block(&self) -> &Self::BLOCK; - /// TODO make it generic in future + // TODO : Make it generic in future fn cfg(&self) -> &Self::CFG; - /// Load an account code. + /// Loads an account code. fn load_account_delegated(&mut self, address: Address) -> Option; - /// Get the block hash of the given block `number`. + /// Gets the block hash of the given block `number`. fn block_hash(&mut self, number: u64) -> Option; - /// Get balance of `address` and if the account is cold. + /// Gets balance of `address` and if the account is cold. fn balance(&mut self, address: Address) -> Option>; - /// Get code of `address` and if the account is cold. + /// Gets code of `address` and if the account is cold. fn code(&mut self, address: Address) -> Option>; - /// Get code hash of `address` and if the account is cold. + /// Gets code hash of `address` and if the account is cold. fn code_hash(&mut self, address: Address) -> Option>; - /// Get storage value of `address` at `index` and if the account is cold. + /// Gets storage value of `address` at `index` and if the account is cold. fn sload(&mut self, address: Address, index: U256) -> Option>; - /// Set storage value of account address at index. + /// Sets storage value of account address at index. /// /// Returns [`StateLoad`] with [`SStoreResult`] that contains original/new/old storage value. fn sstore( @@ -54,16 +54,16 @@ pub trait Host { value: U256, ) -> Option>; - /// Get the transient storage value of `address` at `index`. + /// Gets the transient storage value of `address` at `index`. fn tload(&mut self, address: Address, index: U256) -> U256; - /// Set the transient storage value of `address` at `index`. + /// Sets the transient storage value of `address` at `index`. fn tstore(&mut self, address: Address, index: U256, value: U256); - /// Emit a log owned by `address` with given `LogData`. + /// Emits a log owned by `address` with given `LogData`. fn log(&mut self, log: Log); - /// Mark `address` to be deleted, with funds transferred to `target`. + /// Marks `address` to be deleted, with funds transferred to `target`. fn selfdestruct( &mut self, address: Address, @@ -121,7 +121,7 @@ impl SStoreResult { } } -/// Result of a selfdestruct action. +/// Result of a selfdestruct action /// /// Value returned are needed to calculate the gas spent. #[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] diff --git a/crates/context/interface/src/journaled_state.rs b/crates/context/interface/src/journaled_state.rs index b6cec51aac..2841298a9a 100644 --- a/crates/context/interface/src/journaled_state.rs +++ b/crates/context/interface/src/journaled_state.rs @@ -67,7 +67,7 @@ pub trait Journal { fn touch_account(&mut self, address: Address); - /// TODO instruction result is not known + // TODO : Instruction result is not known fn transfer( &mut self, from: &Address, @@ -95,10 +95,12 @@ pub trait Journal { address: Address, ) -> Result::Error>; - /// Set bytecode with hash. Assume that account is warm. + /// Sets bytecode with hash. Assume that account is warm. fn set_code_with_hash(&mut self, address: Address, code: Bytecode, hash: B256); - /// Assume account is warm + /// Sets bytecode and calculates hash. + /// + /// Assume account is warm. #[inline] fn set_code(&mut self, address: Address, code: Bytecode) { let hash = code.hash_slow(); @@ -130,12 +132,12 @@ pub trait Journal { fn finalize(&mut self) -> Result::Error>; } -/// Transfer and creation result. +/// Transfer and creation result #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum TransferError { /// Caller does not have enough funds OutOfFunds, - /// Overflow in target account. + /// Overflow in target account OverflowPayment, /// Create collision. CreateCollision, @@ -149,13 +151,13 @@ pub struct JournalCheckpoint { pub journal_i: usize, } -/// State load information that contains the data and if the account or storage is cold loaded. +/// State load information that contains the data and if the account or storage is cold loaded #[derive(Clone, Debug, Default, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct StateLoad { - /// returned data + /// Returned data pub data: T, - /// True if account is cold loaded. + /// Is account is cold loaded pub is_cold: bool, } @@ -190,13 +192,13 @@ impl StateLoad { } } -/// Result of the account load from Journal state. +/// Result of the account load from Journal state #[derive(Clone, Debug, Default, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct AccountLoad { /// Is account and delegate code are loaded pub load: Eip7702CodeLoad<()>, - /// Is account empty, if true account is not created. + /// Is account empty, if true account is not created pub is_empty: bool, } @@ -214,15 +216,15 @@ impl DerefMut for AccountLoad { } } -/// EIP-7702 code load result that contains optional delegation is_cold information. +/// EIP-7702 code load result that contains optional delegation is_cold information /// -/// [`Self::is_delegate_account_cold`] will be [`Some`] if account has delegation. +/// [`is_delegate_account_cold`][Self::is_delegate_account_cold] will be [`Some`] if account has delegation. #[derive(Clone, Debug, Default, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Eip7702CodeLoad { - /// returned data + /// Returned data pub state_load: StateLoad, - /// True if account has delegate code and delegated account is cold loaded. + /// Does account have delegate code and delegated account is cold loaded pub is_delegate_account_cold: Option, } @@ -284,7 +286,7 @@ impl Eip7702CodeLoad { } } -/// Helper that extracts database error from [`JournalStateGetter`]. +/// Helper that extracts database error from [`JournalStateGetter`] pub type JournalStateGetterDBError = <<::Journal as Journal>::Database as Database>::Error; diff --git a/crates/context/interface/src/result.rs b/crates/context/interface/src/result.rs index e3569cbfd6..ea79bdfb78 100644 --- a/crates/context/interface/src/result.rs +++ b/crates/context/interface/src/result.rs @@ -22,7 +22,7 @@ pub struct ResultAndState { pub state: EvmState, } -/// Result of a transaction execution. +/// Result of a transaction execution #[derive(Clone, Debug, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum ExecutionResult { @@ -34,9 +34,9 @@ pub enum ExecutionResult { logs: Vec, output: Output, }, - /// Reverted by `REVERT` opcode that doesn't spend all gas. + /// Reverted by `REVERT` opcode that doesn't spend all gas Revert { gas_used: u64, output: Bytes }, - /// Reverted for various reasons and spend all gas. + /// Reverted for various reasons and spend all gas Halt { reason: HaltReasonT, /// Halting will spend all the gas, and will be equal to gas_limit. @@ -46,7 +46,9 @@ pub enum ExecutionResult { impl ExecutionResult { /// Returns if transaction execution is successful. + /// /// 1 indicates success, 0 indicates revert. + /// /// pub fn is_success(&self) -> bool { matches!(self, Self::Success { .. }) @@ -59,7 +61,7 @@ impl ExecutionResult { /// Returns the output data of the execution. /// - /// Returns `None` if the execution was halted. + /// Returns [`None`] if the execution was halted. pub fn output(&self) -> Option<&Bytes> { match self { Self::Success { output, .. } => Some(output.data()), @@ -70,7 +72,7 @@ impl ExecutionResult { /// Consumes the type and returns the output data of the execution. /// - /// Returns `None` if the execution was halted. + /// Returns [`None`] if the execution was halted. pub fn into_output(self) -> Option { match self { Self::Success { output, .. } => Some(output.into_data()), @@ -87,7 +89,7 @@ impl ExecutionResult { } } - /// Consumes `self` and returns the logs if execution is successful, or an empty list otherwise. + /// Consumes [`self`] and returns the logs if execution is successful, or an empty list otherwise. pub fn into_logs(self) -> Vec { match self { Self::Success { logs, .. } => logs, @@ -105,7 +107,7 @@ impl ExecutionResult { } } -/// Output of a transaction execution. +/// Output of a transaction execution #[derive(Debug, Clone, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum Output { @@ -139,21 +141,21 @@ impl Output { } } -/// Main EVM error. +/// Main EVM error #[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum EVMError { - /// Transaction validation error. + /// Transaction validation error Transaction(TransactionError), - /// Header validation error. + /// Header validation error Header(InvalidHeader), - /// Database error. + /// Database error Database(DBError), - /// Custom error. + /// Custom error /// /// Useful for handler registers where custom logic would want to return their own custom error. Custom(String), - /// Precompile error. + /// Precompile error Precompile(String), } @@ -289,6 +291,7 @@ pub enum InvalidTransaction { /// There should be at least one blob in Blob transaction. EmptyBlobs, /// Blob transaction can't be a create transaction. + /// /// `to` must be present BlobCreateTransaction, /// Transaction has more then [`specification::eip4844::MAX_BLOB_NUMBER_PER_BLOCK`] blobs @@ -427,8 +430,9 @@ pub enum SuccessReason { EofReturnContract, } -/// Indicates that the EVM has experienced an exceptional halt. This causes execution to -/// immediately end with all gas being consumed. +/// Indicates that the EVM has experienced an exceptional halt. +/// +/// This causes execution to immediately end with all gas being consumed. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum HaltReason { @@ -457,7 +461,7 @@ pub enum HaltReason { OutOfFunds, CallTooDeep, - /// Aux data overflow, new aux data is larger than u16 max size. + /// Aux data overflow, new aux data is larger than [u16] max size. EofAuxDataOverflow, /// Aud data is smaller then already present data size. EofAuxDataTooSmall, diff --git a/crates/context/interface/src/transaction.rs b/crates/context/interface/src/transaction.rs index 2f44399e9b..db59b2bd17 100644 --- a/crates/context/interface/src/transaction.rs +++ b/crates/context/interface/src/transaction.rs @@ -24,19 +24,19 @@ use primitives::{TxKind, U256}; /// Transaction validity error types. pub trait TransactionError: Debug + core::error::Error {} -/// Main Transaction trait that abstracts and specifies all transaction currently supported by Ethereum. +/// Main Transaction trait that abstracts and specifies all transaction currently supported by Ethereum /// -/// Access to any associated type is gaited behind `tx_type` function. +/// Access to any associated type is gaited behind [`tx_type`][Transaction::tx_type] function. /// /// It can be extended to support new transaction types and only transaction types can be /// deprecated by not returning tx_type. #[auto_impl(&, Box, Arc, Rc)] pub trait Transaction { - /// An error that occurs when validating a transaction. + /// An error that occurs when validating a transaction type TransactionError: TransactionError; - /// Transaction type. + /// Transaction type type TransactionType: Into; - /// Access list type. + /// Access list type type AccessList: AccessListTrait; type Legacy: LegacyTx; @@ -45,36 +45,39 @@ pub trait Transaction { type Eip4844: Eip4844Tx; type Eip7702: Eip7702Tx; - /// Transaction type. Depending on this field other functions should be called. - /// If transaction is Legacy, then `legacy()` should be called. + /// Returns the transaction type. + /// + /// Depending on this field other functions should be called. + /// + /// If transaction is Legacy, then [`legacy()`][Transaction::legacy] should be called. fn tx_type(&self) -> Self::TransactionType; - /// Legacy transaction. + /// Returns the legacy transaction. fn legacy(&self) -> &Self::Legacy { unimplemented!("legacy tx not supported") } - /// EIP-2930 transaction. + /// Returns EIP-2930 transaction. fn eip2930(&self) -> &Self::Eip2930 { unimplemented!("Eip2930 tx not supported") } - /// EIP-1559 transaction. + /// Returns EIP-1559 transaction. fn eip1559(&self) -> &Self::Eip1559 { unimplemented!("Eip1559 tx not supported") } - /// EIP-4844 transaction. + /// Returns EIP-4844 transaction. fn eip4844(&self) -> &Self::Eip4844 { unimplemented!("Eip4844 tx not supported") } - /// EIP-7702 transaction. + /// Returns EIP-7702 transaction. fn eip7702(&self) -> &Self::Eip7702 { unimplemented!("Eip7702 tx not supported") } - /// Common fields for all transactions. + /// Returns common fields for all transactions. fn common_fields(&self) -> &dyn CommonTxFields { match self.tx_type().into() { TransactionType::Legacy => self.legacy(), @@ -86,7 +89,7 @@ pub trait Transaction { } } - /// Maximum fee that can be paid for the transaction. + /// Returns maximum fee that can be paid for the transaction. fn max_fee(&self) -> u128 { match self.tx_type().into() { TransactionType::Legacy => self.legacy().gas_price(), @@ -98,8 +101,9 @@ pub trait Transaction { } } - /// Effective gas price is gas price field for Legacy and Eip2930 transaction - /// While for transactions after Eip1559 it is minimum of max_fee and base+max_priority_fee. + /// Returns effective gas price is gas price field for Legacy and Eip2930 transaction. + /// + /// While for transactions after Eip1559 it is minimum of max_fee and `base + max_priority_fee`. fn effective_gas_price(&self, base_fee: U256) -> U256 { let tx_type = self.tx_type().into(); let (max_fee, max_priority_fee) = match tx_type { @@ -123,7 +127,7 @@ pub trait Transaction { min(U256::from(max_fee), base_fee + U256::from(max_priority_fee)) } - /// Transaction kind. + /// Returns transaction kind. fn kind(&self) -> TxKind { let tx_type = self.tx_type().into(); match tx_type { diff --git a/crates/context/interface/src/transaction/access_list.rs b/crates/context/interface/src/transaction/access_list.rs index 578f355cb4..c12b27cd17 100644 --- a/crates/context/interface/src/transaction/access_list.rs +++ b/crates/context/interface/src/transaction/access_list.rs @@ -4,9 +4,7 @@ use primitives::{Address, B256}; /// Access list type is introduced in EIP-2930, and every /// transaction after it contains access list. /// -/// Note -/// -/// Iterator over access list returns account address and storage slot keys that +/// **Note**: Iterator over access list returns account address and storage slot keys that /// are warm loaded before transaction execution. /// /// Number of account and storage slots is used to calculate initial tx gas cost. @@ -24,7 +22,7 @@ pub trait AccessListTrait: Clone { } } -// TODO move to default context +// TODO : Move to default context use specification::eip2930::AccessList; impl AccessListTrait for AccessList { diff --git a/crates/context/interface/src/transaction/common.rs b/crates/context/interface/src/transaction/common.rs index 7bce4ece0d..f4bfe7a91a 100644 --- a/crates/context/interface/src/transaction/common.rs +++ b/crates/context/interface/src/transaction/common.rs @@ -2,6 +2,7 @@ use auto_impl::auto_impl; use primitives::{Address, Bytes, U256}; /// Trait that contains all common field that are shared by all transactions. +/// /// This trait is base for Legacy, EIp2930 and Eip1559 transactions. #[auto_impl(&, Box, Arc, Rc)] pub trait CommonTxFields { @@ -11,7 +12,7 @@ pub trait CommonTxFields { /// The maximum amount of gas the transaction can use. fn gas_limit(&self) -> u64; - /// The value sent to the receiver of `TxKind::Call`. + /// The value sent to the receiver of [`TxKind::Call`][primitives::TxKind::Call]. fn value(&self) -> U256; /// Returns the input data of the transaction. diff --git a/crates/context/interface/src/transaction/eip7702.rs b/crates/context/interface/src/transaction/eip7702.rs index 0e50632975..4f58b81f16 100644 --- a/crates/context/interface/src/transaction/eip7702.rs +++ b/crates/context/interface/src/transaction/eip7702.rs @@ -2,7 +2,8 @@ use super::Eip1559Tx; use auto_impl::auto_impl; use primitives::Address; -/// EIP-7702 transaction, TODO set Trait for AuthorizationList. +/// EIP-7702 transaction +// TODO : Set Trait for AuthorizationList. #[auto_impl(&, Box, Arc, Rc)] pub trait Eip7702Tx: Eip1559Tx { /// Destination address of the call. @@ -11,7 +12,6 @@ pub trait Eip7702Tx: Eip1559Tx { /// Returns length of the authorization list. /// /// # Note - /// /// Transaction is considered invalid if list is empty. fn authorization_list_len(&self) -> usize; @@ -27,10 +27,9 @@ pub trait Eip7702Tx: Eip1559Tx { /// Authorization trait. #[auto_impl(&, Arc)] pub trait Authorization: Clone { - /// Authority address. + /// Authority address /// /// # Note - /// /// Authority signature can be invalid, so this method returns None if the authority /// could not be recovered. /// @@ -44,7 +43,6 @@ pub trait Authorization: Clone { /// Returns the nonce. /// /// # Note - /// /// If nonce is not same as the nonce of the signer account, /// the authorization is skipped. fn nonce(&self) -> u64; @@ -59,7 +57,7 @@ pub trait Authorization: Clone { fn is_invalid(&self) -> bool; } -// TODO move to default context +// TODO : Move to default context use specification::eip7702::RecoveredAuthorization; impl Authorization for RecoveredAuthorization { @@ -70,14 +68,13 @@ impl Authorization for RecoveredAuthorization { /// Returns authorization the chain id. fn chain_id(&self) -> u64 { - // TODO chain_id is set as u64 in newest EIP-7702 spec + // TODO : `chain_id` is set as u64 in newest EIP-7702 spec self.inner().chain_id().try_into().unwrap() } /// Returns the nonce. /// /// # Note - /// /// If nonce is not same as the nonce of the signer account, /// authorization is skipped and considered invalidated. fn nonce(&self) -> u64 { diff --git a/crates/context/interface/src/transaction/legacy.rs b/crates/context/interface/src/transaction/legacy.rs index 8a1c1fd0b0..b60b8e4efc 100644 --- a/crates/context/interface/src/transaction/legacy.rs +++ b/crates/context/interface/src/transaction/legacy.rs @@ -2,13 +2,14 @@ use super::CommonTxFields; use auto_impl::auto_impl; use primitives::TxKind; -/// Legacy transaction trait before introduction of EIP-2929 +/// Legacy transaction trait before introduction of EIP-2929. #[auto_impl(&, Box, Arc, Rc)] pub trait LegacyTx: CommonTxFields { /// Transaction kind. fn kind(&self) -> TxKind; - /// Chain Id is optional for legacy transactions + /// Chain Id is optional for legacy transactions. + /// /// As it was introduced in EIP-155. fn chain_id(&self) -> Option; diff --git a/crates/context/interface/src/transaction/transaction_type.rs b/crates/context/interface/src/transaction/transaction_type.rs index ef1d270d43..e3ec55910f 100644 --- a/crates/context/interface/src/transaction/transaction_type.rs +++ b/crates/context/interface/src/transaction/transaction_type.rs @@ -1,18 +1,17 @@ -/// Transaction types of all Ethereum transaction. - +/// Transaction types of all Ethereum transaction #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum TransactionType { - /// Legacy transaction type. + /// Legacy transaction type Legacy, - /// EIP-2930 Access List transaction type. + /// EIP-2930 Access List transaction type Eip2930, - /// EIP-1559 Fee market change transaction type. + /// EIP-1559 Fee market change transaction type Eip1559, - /// EIP-4844 Blob transaction type. + /// EIP-4844 Blob transaction type Eip4844, - /// EIP-7702 Set EOA account code transaction type. + /// EIP-7702 Set EOA account code transaction type Eip7702, - /// Custom type means that transaction trait was extend and have custom types. + /// Custom type means that transaction trait was extend and have custom types Custom, } diff --git a/crates/context/src/block.rs b/crates/context/src/block.rs index dadf6873f6..48cfad02ee 100644 --- a/crates/context/src/block.rs +++ b/crates/context/src/block.rs @@ -1,40 +1,41 @@ use context_interface::block::{BlobExcessGasAndPrice, Block}; use primitives::{Address, B256, U256}; -/// The block environment. +/// The block environment #[derive(Clone, Debug, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct BlockEnv { - /// The number of ancestor blocks of this block (block height). + /// The number of ancestor blocks of this block (block height) pub number: U256, - /// Beneficiary (Coinbase or miner) is a address that have signed the block. + /// Beneficiary (Coinbase or miner) is a address that have signed the block /// /// This is the receiver address of all the gas spent in the block. pub beneficiary: Address, - /// The timestamp of the block in seconds since the UNIX epoch. + /// The timestamp of the block in seconds since the UNIX epoch pub timestamp: U256, - /// The gas limit of the block. + /// The gas limit of the block pub gas_limit: U256, - /// The base fee per gas, added in the London upgrade with [EIP-1559]. + /// The base fee per gas, added in the London upgrade with [EIP-1559] /// /// [EIP-1559]: https://eips.ethereum.org/EIPS/eip-1559 pub basefee: U256, - /// The difficulty of the block. + /// The difficulty of the block /// /// Unused after the Paris (AKA the merge) upgrade, and replaced by `prevrandao`. pub difficulty: U256, - /// The output of the randomness beacon provided by the beacon chain. + /// The output of the randomness beacon provided by the beacon chain /// /// Replaces `difficulty` after the Paris (AKA the merge) upgrade with [EIP-4399]. /// - /// NOTE: `prevrandao` can be found in a block in place of `mix_hash`. + /// Note: `prevrandao` can be found in a block in place of `mix_hash`. /// /// [EIP-4399]: https://eips.ethereum.org/EIPS/eip-4399 pub prevrandao: Option, - /// Excess blob gas and blob gasprice. - /// See also [`context_interface::block::calc_excess_blob_gas`] - /// and [`context_interface::block::blob::calc_blob_gasprice`]. + /// Excess blob gas and blob gasprice + /// + /// See also [`calc_excess_blob_gas`][context_interface::block::calc_excess_blob_gas] + /// and [`calc_blob_gasprice`][context_interface::block::blob::calc_blob_gasprice]. /// /// Incorporated as part of the Cancun upgrade via [EIP-4844]. /// diff --git a/crates/context/src/cfg.rs b/crates/context/src/cfg.rs index 9176a04cf4..e5695ca306 100644 --- a/crates/context/src/cfg.rs +++ b/crates/context/src/cfg.rs @@ -3,48 +3,70 @@ pub use context_interface::Cfg; use interpreter::MAX_CODE_SIZE; use specification::hardfork::SpecId; -/// EVM configuration. +/// EVM configuration #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Clone, Debug, Eq, PartialEq)] #[non_exhaustive] pub struct CfgEnv = SpecId> { - /// Chain ID of the EVM, it will be compared to the transaction's Chain ID. - /// Chain ID is introduced EIP-155 + /// Chain ID of the EVM + /// + /// `chain_id` will be compared to the transaction's Chain ID. + /// + /// Chain ID is introduced EIP-155. pub chain_id: u64, - /// Specification for EVM represent the hardfork. + /// Specification for EVM represent the hardfork pub spec: SPEC, - /// If some it will effects EIP-170: Contract code size limit. Useful to increase this because of tests. - /// By default it is 0x6000 (~25kb). + /// If some it will effects EIP-170: Contract code size limit. + /// + /// Useful to increase this because of tests. + /// + /// By default it is `0x6000` (~25kb). pub limit_contract_code_size: Option, - /// Skips the nonce validation against the account's nonce. + /// Skips the nonce validation against the account's nonce pub disable_nonce_check: bool, - /// A hard memory limit in bytes beyond which [context_interface::result::OutOfGasError::Memory] cannot be resized. + /// A hard memory limit in bytes beyond which + /// [OutOfGasError::Memory][context_interface::result::OutOfGasError::Memory] cannot be resized. /// /// In cases where the gas limit may be extraordinarily high, it is recommended to set this to - /// a sane value to prevent memory allocation panics. Defaults to `2^32 - 1` bytes per - /// EIP-1985. + /// a sane value to prevent memory allocation panics. + /// + /// Defaults to `2^32 - 1` bytes per EIP-1985. #[cfg(feature = "memory_limit")] pub memory_limit: u64, - /// Skip balance checks if true. Adds transaction cost to balance to ensure execution doesn't fail. + /// Skip balance checks if `true` + /// + /// Adds transaction cost to balance to ensure execution doesn't fail. + /// + /// By default, it is set to `false`. #[cfg(feature = "optional_balance_check")] pub disable_balance_check: bool, - /// There are use cases where it's allowed to provide a gas limit that's higher than a block's gas limit. To that - /// end, you can disable the block gas limit validation. + /// There are use cases where it's allowed to provide a gas limit that's higher than a block's gas limit. + /// + /// To that end, you can disable the block gas limit validation. + /// /// By default, it is set to `false`. #[cfg(feature = "optional_block_gas_limit")] pub disable_block_gas_limit: bool, - /// EIP-3607 rejects transactions from senders with deployed code. In development, it can be desirable to simulate - /// calls from contracts, which this setting allows. + /// EIP-3607 rejects transactions from senders with deployed code + /// + /// In development, it can be desirable to simulate calls from contracts, which this setting allows. + /// /// By default, it is set to `false`. #[cfg(feature = "optional_eip3607")] pub disable_eip3607: bool, - /// Disables all gas refunds. This is useful when using chains that have gas refunds disabled e.g. Avalanche. + /// Disables all gas refunds + /// + /// This is useful when using chains that have gas refunds disabled, e.g. Avalanche. + /// /// Reasoning behind removing gas refunds can be found in EIP-3298. + /// /// By default, it is set to `false`. #[cfg(feature = "optional_gas_refund")] pub disable_gas_refund: bool, - /// Disables base fee checks for EIP-1559 transactions. + /// Disables base fee checks for EIP-1559 transactions + /// /// This is useful for testing method calls with zero gas price. + /// /// By default, it is set to `false`. #[cfg(feature = "optional_no_base_fee")] pub disable_base_fee: bool, diff --git a/crates/context/src/context.rs b/crates/context/src/context.rs index 7a356695bb..125e34706e 100644 --- a/crates/context/src/context.rs +++ b/crates/context/src/context.rs @@ -24,17 +24,17 @@ pub struct Context< JOURNAL: Journal = JournaledState, CHAIN = (), > { - /// Transaction information. + /// Transaction information pub tx: TX, - /// Block information. + /// Block information pub block: BLOCK, - /// Configurations. + /// Configurations pub cfg: CFG, - /// EVM State with journaling support and database. + /// EVM State with journaling support and database pub journaled_state: JOURNAL, - /// Inner context. + /// Inner context pub chain: CHAIN, - /// Error that happened during execution. + /// Error that happened during execution pub error: Result<(), ::Error>, } @@ -83,18 +83,18 @@ where DB: Database, JOURNAL: Journal, { - /// Return account code bytes and if address is cold loaded. + /// Returns account code bytes and if address is cold loaded. /// /// In case of EOF account it will return `EOF_MAGIC` (0xEF00) as code. /// - /// TODO move this in Journaled state + // TODO : Move this in Journaled state #[inline] pub fn code( &mut self, address: Address, ) -> Result, ::Error> { let a = self.journaled_state.load_account_code(address)?; - // SAFETY: safe to unwrap as load_code will insert code if it is empty. + // SAFETY: Safe to unwrap as load_code will insert code if it is empty. let code = a.info.code.as_ref().unwrap(); if code.is_eof() { return Ok(Eip7702CodeLoad::new_not_delegated( @@ -109,7 +109,7 @@ where let delegated_account = self.journaled_state.load_account_code(address)?; - // SAFETY: safe to unwrap as load_code will insert code if it is empty. + // SAFETY: Safe to unwrap as load_code will insert code if it is empty. let delegated_code = delegated_account.info.code.as_ref().unwrap(); let bytes = if delegated_code.is_eof() { @@ -145,7 +145,7 @@ where } } - /// Create a new context with a new database type. + /// Creates a new context with a new database type. pub fn with_db( self, db: ODB, @@ -163,7 +163,7 @@ where } } - /// Create a new context with a new block type. + /// Creates a new context with a new block type. pub fn with_block(self, block: OB) -> Context { Context { tx: self.tx, @@ -175,7 +175,7 @@ where } } - /// Create a new context with a new transaction type. + /// Creates a new context with a new transaction type. pub fn with_tx( self, tx: OTX, @@ -190,7 +190,7 @@ where } } - /// Create a new context with a new chain type. + /// Creates a new context with a new chain type. pub fn with_chain(self, chain: OC) -> Context { Context { tx: self.tx, @@ -202,7 +202,7 @@ where } } - /// Create a new context with a new chain type. + /// Creates a new context with a new chain type. pub fn with_cfg( mut self, cfg: OCFG, @@ -218,7 +218,7 @@ where } } - /// Modify the context configuration. + /// Modifies the context configuration. #[must_use] pub fn modify_cfg_chained(mut self, f: F) -> Self where @@ -229,7 +229,7 @@ where self } - /// Modify the context block. + /// Modifies the context block. #[must_use] pub fn modify_block_chained(mut self, f: F) -> Self where @@ -239,7 +239,7 @@ where self } - /// Modify the context transaction. + /// Modifies the context transaction. #[must_use] pub fn modify_tx_chained(mut self, f: F) -> Self where @@ -249,7 +249,7 @@ where self } - /// Modify the context chain. + /// Modifies the context chain. #[must_use] pub fn modify_chain_chained(mut self, f: F) -> Self where @@ -259,7 +259,7 @@ where self } - /// Modify the context database. + /// Modifies the context database. #[must_use] pub fn modify_db_chained(mut self, f: F) -> Self where @@ -269,7 +269,7 @@ where self } - /// Modify the context journal. + /// Modifies the context journal. #[must_use] pub fn modify_journal_chained(mut self, f: F) -> Self where @@ -279,7 +279,7 @@ where self } - /// Modify the context block. + /// Modifies the context block. pub fn modify_block(&mut self, f: F) where F: FnOnce(&mut BLOCK), @@ -323,12 +323,12 @@ where f(&mut self.journaled_state); } - /// Get code hash of address. + /// Gets code hash of address. /// /// In case of EOF account it will return `EOF_MAGIC_HASH` /// (the hash of `0xEF00`). /// - /// TODO move this in Journaled state + // TODO : Move this in Journaled state #[inline] pub fn code_hash( &mut self, @@ -338,7 +338,7 @@ where if acc.is_empty() { return Ok(Eip7702CodeLoad::new_not_delegated(B256::ZERO, acc.is_cold)); } - // SAFETY: safe to unwrap as load_code will insert code if it is empty. + // SAFETY: Safe to unwrap as load_code will insert code if it is empty. let code = acc.info.code.as_ref().unwrap(); // If bytecode is EIP-7702 then we need to load the delegated account. diff --git a/crates/context/src/journaled_state.rs b/crates/context/src/journaled_state.rs index 0f8bb77eca..7badf33c8c 100644 --- a/crates/context/src/journaled_state.rs +++ b/crates/context/src/journaled_state.rs @@ -13,27 +13,29 @@ use state::{Account, EvmState, EvmStorageSlot, TransientStorage}; use core::mem; use std::{vec, vec::Vec}; -/// A journal of state changes internal to the EVM. +/// A journal of state changes internal to the EVM /// -/// On each additional call, the depth of the journaled state is increased (`depth`) and a new journal is added. The journal contains every state change that happens within that call, making it possible to revert changes made in a specific call. +/// On each additional call, the depth of the journaled state is increased (`depth`) and a new journal is added. +/// +/// The journal contains every state change that happens within that call, making it possible to revert changes made in a specific call. #[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct JournaledState { /// Database pub database: DB, - /// The current state. + /// The current state pub state: EvmState, /// Transient storage that is discarded after every transaction. /// /// See [EIP-1153](https://eips.ethereum.org/EIPS/eip-1153). pub transient_storage: TransientStorage, - /// Emitted logs. + /// Emitted logs pub logs: Vec, - /// The current call stack depth. + /// The current call stack depth pub depth: usize, - /// The journal of state changes, one for each call. + /// The journal of state changes, one for each call pub journal: Vec>, - /// The spec ID for the EVM. + /// The spec ID for the EVM /// /// This spec is used for two things: /// @@ -54,7 +56,7 @@ pub struct JournaledState { impl Journal for JournaledState { type Database = DB; - // TODO make a struck here. + // TODO : Make a struck here. type FinalOutput = (EvmState, Vec); fn new(database: DB) -> JournaledState { @@ -135,7 +137,7 @@ impl Journal for JournaledState { to: &Address, balance: U256, ) -> Result, DB::Error> { - // TODO handle instruction result + // TODO : Handle instruction result self.transfer(from, to, balance) } @@ -195,7 +197,7 @@ impl Journal for JournaledState { balance: U256, spec_id: SpecId, ) -> Result { - // ignore error. + // Ignore error. self.create_account_checkpoint(caller, address, balance, spec_id) } @@ -223,13 +225,12 @@ impl Journal for JournaledState { } impl JournaledState { - /// Create new JournaledState. + /// Creates new JournaledState. /// - /// warm_preloaded_addresses is used to determine if address is considered warm loaded. + /// `warm_preloaded_addresses` is used to determine if address is considered warm loaded. /// In ordinary case this is precompile or beneficiary. /// /// # Note - /// /// This function will journal state after Spurious Dragon fork. /// And will not take into account if account is not existing or empty. pub fn new(spec: SpecId, database: DB) -> JournaledState { @@ -307,8 +308,9 @@ impl JournaledState { account.info.code = Some(code); } - /// use it only if you know that acc is warm - /// Assume account is warm + /// Use it only if you know that acc is warm. + /// + /// Assume account is warm. #[inline] pub fn set_code(&mut self, address: Address, code: Bytecode) { let hash = code.hash_slow(); @@ -384,7 +386,7 @@ impl JournaledState { Ok(None) } - /// Create account or return false if collision is detected. + /// Creates account or returns false if collision is detected. /// /// There are few steps done: /// 1. Make created account warm loaded (AccessList) and this should @@ -469,7 +471,7 @@ impl JournaledState { Ok(checkpoint) } - /// Revert all changes that happened in given journal entries. + /// Reverts all changes that happened in given journal entries. #[inline] fn journal_revert( state: &mut EvmState, @@ -588,7 +590,7 @@ impl JournaledState { checkpoint } - /// Commit the checkpoint. + /// Commits the checkpoint. #[inline] pub fn checkpoint_commit(&mut self) { self.depth -= 1; @@ -620,14 +622,14 @@ impl JournaledState { self.journal.truncate(checkpoint.journal_i); } - /// Performances selfdestruct action. + /// Performs selfdestruct action. /// Transfers balance from address to target. Check if target exist/is_cold /// - /// Note: balance will be lost if address and target are the same BUT when + /// Note: Balance will be lost if address and target are the same BUT when /// current spec enables Cancun, this happens only when the account associated to address /// is created in the same tx /// - /// references: + /// # References: /// * /// * /// * @@ -723,7 +725,7 @@ impl JournaledState { Ok(account) } - /// load account into memory. return if it is cold or warm accessed + /// Loads account into memory. return if it is cold or warm accessed #[inline] pub fn load_account(&mut self, address: Address) -> Result, DB::Error> { self.load_account_optional(address, false) @@ -755,7 +757,7 @@ impl JournaledState { self.load_account_optional(address, true) } - /// Loads code. + /// Loads code #[inline] pub fn load_account_optional( &mut self, @@ -810,7 +812,7 @@ impl JournaledState { Ok(load) } - /// Load storage slot + /// Loads storage slot. /// /// # Panics /// @@ -853,11 +855,10 @@ impl JournaledState { } /// Stores storage slot. - /// And returns (original,present,new) slot value. /// - /// Note: + /// And returns (original,present,new) slot value. /// - /// account should already be present in our state. + /// **Note**: Account should already be present in our state. #[inline] pub fn sstore( &mut self, @@ -957,7 +958,7 @@ impl JournaledState { } } - /// push log into subroutine + /// Pushes log into subroutine. #[inline] pub fn log(&mut self, log: Log) { self.logs.push(log); diff --git a/crates/context/src/tx.rs b/crates/context/src/tx.rs index 75474e6865..97ef568741 100644 --- a/crates/context/src/tx.rs +++ b/crates/context/src/tx.rs @@ -12,69 +12,74 @@ use specification::eip2930::AccessList; use specification::eip7702::AuthorizationList; use std::vec::Vec; -/// The transaction environment. +/// The transaction environment #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct TxEnv { pub tx_type: TransactionType, - /// Caller aka Author aka transaction signer. + /// Caller aka Author aka transaction signer pub caller: Address, - /// The gas limit of the transaction. + /// The gas limit of the transaction pub gas_limit: u64, - /// The gas price of the transaction. + /// The gas price of the transaction pub gas_price: U256, - /// The destination of the transaction. + /// The destination of the transaction pub transact_to: TxKind, - /// The value sent to `transact_to`. + /// The value sent to `transact_to` pub value: U256, - /// The data of the transaction. + /// The data of the transaction pub data: Bytes, - /// The nonce of the transaction. + /// The nonce of the transaction pub nonce: u64, - /// The chain ID of the transaction. If set to `None`, no checks are performed. + /// The chain ID of the transaction + /// + /// If set to [`None`], no checks are performed. /// /// Incorporated as part of the Spurious Dragon upgrade via [EIP-155]. /// /// [EIP-155]: https://eips.ethereum.org/EIPS/eip-155 pub chain_id: Option, - /// A list of addresses and storage keys that the transaction plans to access. + /// A list of addresses and storage keys that the transaction plans to access /// /// Added in [EIP-2930]. /// /// [EIP-2930]: https://eips.ethereum.org/EIPS/eip-2930 pub access_list: AccessList, - /// The priority fee per gas. + /// The priority fee per gas /// /// Incorporated as part of the London upgrade via [EIP-1559]. /// /// [EIP-1559]: https://eips.ethereum.org/EIPS/eip-1559 pub gas_priority_fee: Option, - /// The list of blob versioned hashes. Per EIP there should be at least - /// one blob present if [`Self::max_fee_per_blob_gas`] is `Some`. + /// The list of blob versioned hashes + /// + /// Per EIP there should be at least one blob present if [`max_fee_per_blob_gas`][Self::max_fee_per_blob_gas] is [`Some`]. /// /// Incorporated as part of the Cancun upgrade via [EIP-4844]. /// /// [EIP-4844]: https://eips.ethereum.org/EIPS/eip-4844 pub blob_hashes: Vec, - /// The max fee per blob gas. + /// The max fee per blob gas /// /// Incorporated as part of the Cancun upgrade via [EIP-4844]. /// /// [EIP-4844]: https://eips.ethereum.org/EIPS/eip-4844 pub max_fee_per_blob_gas: Option, - /// List of authorizations, that contains the signature that authorizes this + /// List of authorizations + /// + /// `authorization_list` contains the signature that authorizes this /// caller to place the code to signer account. /// - /// Set EOA account code for one transaction + /// Set EOA account code for one transaction via [EIP-7702]. /// - /// [EIP-Set EOA account code for one transaction](https://eips.ethereum.org/EIPS/eip-7702) + /// [EIP-7702]: https://eips.ethereum.org/EIPS/eip-7702 pub authorization_list: AuthorizationList, } diff --git a/crates/database/interface/src/async_db.rs b/crates/database/interface/src/async_db.rs index 1332bcc15a..2b99a6bff2 100644 --- a/crates/database/interface/src/async_db.rs +++ b/crates/database/interface/src/async_db.rs @@ -6,70 +6,70 @@ use tokio::runtime::{Handle, Runtime}; use crate::{DBErrorMarker, Database, DatabaseRef}; -/// The async EVM database interface. +/// The async EVM database interface /// /// Contains the same methods as [Database], but it returns [Future] type instead. /// /// Use [WrapDatabaseAsync] to provide [Database] implementation for a type that only implements this trait. pub trait DatabaseAsync { - /// The database error type. + /// The database error type type Error: Send + DBErrorMarker; - /// Get basic account information. + /// Gets basic account information. fn basic_async( &mut self, address: Address, ) -> impl Future, Self::Error>> + Send; - /// Get account code by its hash. + /// Gets account code by its hash. fn code_by_hash_async( &mut self, code_hash: B256, ) -> impl Future> + Send; - /// Get storage value of address at index. + /// Gets storage value of address at index. fn storage_async( &mut self, address: Address, index: U256, ) -> impl Future> + Send; - /// Get block hash by block number. + /// Gets block hash by block number. fn block_hash_async( &mut self, number: u64, ) -> impl Future> + Send; } -/// The async EVM database interface. +/// The async EVM database interface /// /// Contains the same methods as [DatabaseRef], but it returns [Future] type instead. /// /// Use [WrapDatabaseAsync] to provide [DatabaseRef] implementation for a type that only implements this trait. pub trait DatabaseAsyncRef { - /// The database error type. + /// The database error type type Error: Send + DBErrorMarker; - /// Get basic account information. + /// Gets basic account information. fn basic_async_ref( &self, address: Address, ) -> impl Future, Self::Error>> + Send; - /// Get account code by its hash. + /// Gets account code by its hash. fn code_by_hash_async_ref( &self, code_hash: B256, ) -> impl Future> + Send; - /// Get storage value of address at index. + /// Gets storage value of address at index. fn storage_async_ref( &self, address: Address, index: U256, ) -> impl Future> + Send; - /// Get block hash by block number. + /// Gets block hash by block number. fn block_hash_async_ref( &self, number: u64, @@ -84,7 +84,7 @@ pub struct WrapDatabaseAsync { } impl WrapDatabaseAsync { - /// Wrap a [DatabaseAsync] or [DatabaseAsyncRef] instance. + /// Wraps a [DatabaseAsync] or [DatabaseAsyncRef] instance. /// /// Returns `None` if no tokio runtime is available or if the current runtime is a current-thread runtime. pub fn new(db: T) -> Option { @@ -98,20 +98,22 @@ impl WrapDatabaseAsync { Some(Self { db, rt }) } - /// Wrap a [DatabaseAsync] or [DatabaseAsyncRef] instance, with a runtime. + /// Wraps a [DatabaseAsync] or [DatabaseAsyncRef] instance, with a runtime. /// /// Refer to [tokio::runtime::Builder] on how to create a runtime if you are in synchronous world. - /// If you are already using something like [tokio::main], call [WrapDatabaseAsync::new] instead. + /// + /// If you are already using something like [tokio::main], call [`WrapDatabaseAsync::new`] instead. pub fn with_runtime(db: T, runtime: Runtime) -> Self { let rt = HandleOrRuntime::Runtime(runtime); Self { db, rt } } - /// Wrap a [DatabaseAsync] or [DatabaseAsyncRef] instance, with a runtime handle. + /// Wraps a [DatabaseAsync] or [DatabaseAsyncRef] instance, with a runtime handle. /// /// This generally allows you to pass any valid runtime handle, refer to [tokio::runtime::Handle] on how - /// to obtain a handle. If you are already in asynchronous world, like [tokio::main], use [WrapDatabaseAsync::new] - /// instead. + /// to obtain a handle. + /// + /// If you are already in asynchronous world, like [tokio::main], use [`WrapDatabaseAsync::new`] instead. pub fn with_handle(db: T, handle: Handle) -> Self { let rt = HandleOrRuntime::Handle(handle); Self { db, rt } diff --git a/crates/database/interface/src/empty_db.rs b/crates/database/interface/src/empty_db.rs index b58cbeac9a..ec373139bd 100644 --- a/crates/database/interface/src/empty_db.rs +++ b/crates/database/interface/src/empty_db.rs @@ -4,10 +4,10 @@ use primitives::{keccak256, Address, B256, U256}; use state::{AccountInfo, Bytecode}; use std::string::ToString; -/// An empty database that always returns default values when queried. +/// An empty database that always returns default values when queried pub type EmptyDB = EmptyDBTyped; -/// An empty database that always returns default values when queried. +/// An empty database that always returns default values when queried /// /// This is generic over a type which is used as the database error type. #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] diff --git a/crates/database/interface/src/lib.rs b/crates/database/interface/src/lib.rs index 9548827b60..9e2fc53fa3 100644 --- a/crates/database/interface/src/lib.rs +++ b/crates/database/interface/src/lib.rs @@ -36,16 +36,16 @@ pub trait Database { type Error: DBErrorMarker; //type Bytecode: BytecodeTrait; - /// Get basic account information. + /// Gets basic account information. fn basic(&mut self, address: Address) -> Result, Self::Error>; - /// Get account code by its hash. + /// Gets account code by its hash. fn code_by_hash(&mut self, code_hash: B256) -> Result; - /// Get storage value of address at index. + /// Gets storage value of address at index. fn storage(&mut self, address: Address, index: U256) -> Result; - /// Get block hash by block number. + /// Gets block hash by block number. fn block_hash(&mut self, number: u64) -> Result; } @@ -67,16 +67,16 @@ pub trait DatabaseRef { /// The database error type. type Error: DBErrorMarker; - /// Get basic account information. + /// Gets basic account information. fn basic_ref(&self, address: Address) -> Result, Self::Error>; - /// Get account code by its hash. + /// Gets account code by its hash. fn code_by_hash_ref(&self, code_hash: B256) -> Result; - /// Get storage value of address at index. + /// Gets storage value of address at index. fn storage_ref(&self, address: Address, index: U256) -> Result; - /// Get block hash by block number. + /// Gets block hash by block number. fn block_hash_ref(&self, number: u64) -> Result; } diff --git a/crates/database/src/alloydb.rs b/crates/database/src/alloydb.rs index 60fcdd7887..0ae8531ffb 100644 --- a/crates/database/src/alloydb.rs +++ b/crates/database/src/alloydb.rs @@ -22,7 +22,7 @@ impl From for DBTransportError { } } -/// An alloy-powered REVM [database_interface::Database]. +/// An alloy-powered REVM [Database][database_interface::Database]. /// /// When accessing the database, it'll use the given provider to fetch the corresponding account's data. #[derive(Debug)] @@ -35,7 +35,7 @@ pub struct AlloyDB> { } impl> AlloyDB { - /// Create a new AlloyDB instance, with a [Provider] and a block. + /// Creates a new AlloyDB instance, with a [Provider] and a block. pub fn new(provider: P, block_number: BlockId) -> Self { Self { provider, @@ -44,7 +44,7 @@ impl> AlloyDB { } } - /// Set the block number on which the queries will be based on. + /// Sets the block number on which the queries will be based on. pub fn set_block_number(&mut self, block_number: BlockId) { self.block_number = block_number; } diff --git a/crates/database/src/in_memory_db.rs b/crates/database/src/in_memory_db.rs index 5d66a16597..447498a75f 100644 --- a/crates/database/src/in_memory_db.rs +++ b/crates/database/src/in_memory_db.rs @@ -28,7 +28,7 @@ pub struct CacheDB { pub block_hashes: HashMap, /// The underlying database ([DatabaseRef]) that is used to load data. /// - /// Note: this is read-only, data is never written to this database. + /// Note: This is read-only, data is never written to this database. pub db: ExtDB, } @@ -39,7 +39,7 @@ impl Default for CacheDB { } impl CacheDB> { - /// Flatten a nested cache by applying the outer cache to the inner cache. + /// Flattens a nested cache by applying the outer cache to the inner cache. /// /// The behavior is as follows: /// - Accounts are overridden with outer accounts @@ -62,14 +62,14 @@ impl CacheDB> { inner } - /// Discard the outer cache and return the inner cache. + /// Discards the outer cache and return the inner cache. pub fn discard_outer(self) -> CacheDB { self.db } } impl CacheDB { - /// Create a new cache with the given external database. + /// Creates a new cache with the given external database. pub fn new(db: ExtDB) -> Self { let mut contracts = HashMap::default(); contracts.insert(KECCAK_EMPTY, Bytecode::default()); @@ -104,13 +104,13 @@ impl CacheDB { } } - /// Insert account info but not override storage + /// Inserts account info but not override storage pub fn insert_account_info(&mut self, address: Address, mut info: AccountInfo) { self.insert_contract(&mut info); self.accounts.entry(address).or_default().info = info; } - /// Wrap the cache in a [CacheDB], creating a nested cache. + /// Wraps the cache in a [CacheDB], creating a nested cache. pub fn nest(self) -> CacheDB { CacheDB::new(self) } @@ -135,7 +135,7 @@ impl CacheDB { } } - /// insert account storage without overriding account info + /// Inserts account storage without overriding account info pub fn insert_account_storage( &mut self, address: Address, @@ -147,7 +147,7 @@ impl CacheDB { Ok(()) } - /// replace account storage without overriding account info + /// Replaces account storage without overriding account info pub fn replace_account_storage( &mut self, address: Address, @@ -221,7 +221,7 @@ impl Database for CacheDB { match self.contracts.entry(code_hash) { Entry::Occupied(entry) => Ok(entry.get().clone()), Entry::Vacant(entry) => { - // if you return code bytes when basic fn is called this function is not needed. + // If you return code bytes when basic fn is called this function is not needed. Ok(entry.insert(self.db.code_by_hash_ref(code_hash)?).clone()) } } @@ -251,7 +251,7 @@ impl Database for CacheDB { } } Entry::Vacant(acc_entry) => { - // acc needs to be loaded for us to access slots. + // Acc needs to be loaded for us to access slots. let info = self.db.basic_ref(address)?; let (account, value) = if info.is_some() { let value = self.db.storage_ref(address, index)?; @@ -329,7 +329,7 @@ pub struct DbAccount { pub info: AccountInfo, /// If account is selfdestructed or newly created, storage will be cleared. pub account_state: AccountState, - /// storage slots + /// Storage slots pub storage: HashMap, } diff --git a/crates/database/src/states/account_status.rs b/crates/database/src/states/account_status.rs index 4de97c8824..20bc0cf91f 100644 --- a/crates/database/src/states/account_status.rs +++ b/crates/database/src/states/account_status.rs @@ -72,11 +72,11 @@ impl AccountStatus { /// Returns the next account status on creation. pub fn on_created(&self) -> AccountStatus { match self { - // if account was destroyed previously just copy new info to it. + // If account was destroyed previously just copy new info to it. AccountStatus::DestroyedAgain | AccountStatus::Destroyed | AccountStatus::DestroyedChanged => AccountStatus::DestroyedChanged, - // if account is loaded from db. + // If account is loaded from db. AccountStatus::LoadedNotExisting // Loaded empty eip161 to creates is not possible as CREATE2 was added after EIP-161 | AccountStatus::LoadedEmptyEIP161 @@ -156,10 +156,10 @@ impl AccountStatus { // The account was loaded as existing. AccountStatus::Loaded => { if had_no_nonce_and_code { - // account is fully in memory + // Account is fully in memory AccountStatus::InMemoryChange } else { - // can be contract and some of storage slots can be present inside db. + // Can be contract and some of storage slots can be present inside db. AccountStatus::Changed } } @@ -184,7 +184,7 @@ impl AccountStatus { // Non existing account can't be destroyed. AccountStatus::LoadedNotExisting => AccountStatus::LoadedNotExisting, // If account is created and selfdestructed in the same block, mark it as destroyed again. - // Note: there is no big difference between Destroyed and DestroyedAgain in this case, + // Note: There is no big difference between Destroyed and DestroyedAgain in this case, // but was added for clarity. AccountStatus::DestroyedChanged | AccountStatus::DestroyedAgain @@ -198,15 +198,15 @@ impl AccountStatus { /// Transition to other state while preserving invariance of this state. /// /// It this account was Destroyed and other account is not: - /// we should mark extended account as destroyed too. - /// and as other account had some changes, extended account - /// should be marked as DestroyedChanged. + /// - We should mark extended account as destroyed too. + /// - And as other account had some changes, extended account + /// should be marked as DestroyedChanged. /// /// If both account are not destroyed and if this account is in memory: - /// this means that extended account is in memory too. + /// - This means that extended account is in memory too. /// /// Otherwise, if both are destroyed or other is destroyed: - /// set other status to extended account. + /// - Sets other status to extended account. pub fn transition(&mut self, other: Self) { *self = match (self.was_destroyed(), other.was_destroyed()) { (true, false) => Self::DestroyedChanged, @@ -223,7 +223,7 @@ mod test { #[test] fn test_account_status() { - // account not modified + // Account not modified assert!(AccountStatus::Loaded.is_not_modified()); assert!(AccountStatus::LoadedEmptyEIP161.is_not_modified()); assert!(AccountStatus::LoadedNotExisting.is_not_modified()); @@ -233,7 +233,7 @@ mod test { assert!(!AccountStatus::DestroyedChanged.is_not_modified()); assert!(!AccountStatus::DestroyedAgain.is_not_modified()); - // we know full storage + // We know full storage assert!(!AccountStatus::LoadedEmptyEIP161.is_storage_known()); assert!(AccountStatus::LoadedNotExisting.is_storage_known()); assert!(AccountStatus::InMemoryChange.is_storage_known()); @@ -243,7 +243,7 @@ mod test { assert!(!AccountStatus::Loaded.is_storage_known()); assert!(!AccountStatus::Changed.is_storage_known()); - // account was destroyed + // Account was destroyed assert!(!AccountStatus::LoadedEmptyEIP161.was_destroyed()); assert!(!AccountStatus::LoadedNotExisting.was_destroyed()); assert!(!AccountStatus::InMemoryChange.was_destroyed()); @@ -253,7 +253,7 @@ mod test { assert!(!AccountStatus::Loaded.was_destroyed()); assert!(!AccountStatus::Changed.was_destroyed()); - // account modified but not destroyed + // Account modified but not destroyed assert!(AccountStatus::Changed.is_modified_and_not_destroyed()); assert!(AccountStatus::InMemoryChange.is_modified_and_not_destroyed()); assert!(!AccountStatus::Loaded.is_modified_and_not_destroyed()); diff --git a/crates/database/src/states/bundle_account.rs b/crates/database/src/states/bundle_account.rs index 801646c624..d1379dec99 100644 --- a/crates/database/src/states/bundle_account.rs +++ b/crates/database/src/states/bundle_account.rs @@ -11,6 +11,7 @@ use state::AccountInfo; /// Status is needed as to know from what state we are applying the TransitionAccount. /// /// Original account info is needed to know if there was a change. +/// /// Same thing for storage with original value. /// /// On selfdestruct storage original value is ignored. @@ -46,6 +47,7 @@ impl BundleAccount { } /// The approximate size of changes needed to store this account. + /// /// `1 + storage_len` pub fn size_hint(&self) -> usize { 1 + self.storage.len() @@ -97,7 +99,7 @@ impl BundleAccount { self.storage = HashMap::default(); return true; } else { - // set all storage to zero but preserve original values. + // Set all storage to zero but preserve original values. self.storage.iter_mut().for_each(|(_, v)| { v.present_value = U256::ZERO; }); @@ -106,7 +108,7 @@ impl BundleAccount { } AccountInfoRevert::RevertTo(info) => self.info = Some(info), }; - // revert storage + // Revert storage for (key, slot) in revert.storage { match slot { RevertToSlot::Some(value) => { @@ -118,7 +120,7 @@ impl BundleAccount { .present_value = value; } RevertToSlot::Destroyed => { - // if it was destroyed this means that storage was created and we need to remove it. + // If it was destroyed this means that storage was created and we need to remove it. self.storage.remove(&key); } } @@ -127,7 +129,9 @@ impl BundleAccount { } /// Update to new state and generate AccountRevert that if applied to new state will - /// revert it to previous state. If no revert is present, update is noop. + /// revert it to previous state. + /// + /// If no revert is present, update is noop. pub fn update_and_create_revert( &mut self, transition: TransitionAccount, @@ -136,7 +140,7 @@ impl BundleAccount { let updated_storage = transition.storage; let updated_status = transition.status; - // the helper that extends this storage but preserves original value. + // The helper that extends this storage but preserves original value. let extend_storage = |this_storage: &mut StorageWithOriginalValues, storage_update: StorageWithOriginalValues| { @@ -168,7 +172,7 @@ impl BundleAccount { let previous_storage = previous_storage_from_update(&updated_storage); match self.status { AccountStatus::Changed | AccountStatus::Loaded => { - // extend the storage. original values is not used inside bundle. + // Extend the storage. original values is not used inside bundle. extend_storage(&mut self.storage, updated_storage); } AccountStatus::LoadedEmptyEIP161 => { @@ -192,7 +196,7 @@ impl BundleAccount { let previous_storage = previous_storage_from_update(&updated_storage); let in_memory_info_revert = match self.status { AccountStatus::Loaded | AccountStatus::InMemoryChange => { - // from loaded (Or LoadedEmpty) to InMemoryChange can happen if there is balance change + // From loaded (Or LoadedEmpty) to InMemoryChange can happen if there is balance change // or new created account but Loaded didn't have contract. extend_storage(&mut self.storage, updated_storage); info_revert @@ -225,7 +229,7 @@ impl BundleAccount { None } AccountStatus::Destroyed => { - // clear this storage and move it to the Revert. + // Clear this storage and move it to the Revert. let this_storage = self.storage.drain().collect(); let ret = match self.status { AccountStatus::InMemoryChange | AccountStatus::Changed | AccountStatus::Loaded | AccountStatus::LoadedEmptyEIP161 => { @@ -243,21 +247,21 @@ impl BundleAccount { self.info = None; } - // set present to destroyed. + // Set present to destroyed. ret } AccountStatus::DestroyedChanged => { // Previous block created account or changed. // (It was destroyed on previous block or one before). - // check common pre destroy paths. + // Check common pre destroy paths. // If common path is there it will drain the storage. if let Some(revert_state) = AccountRevert::new_selfdestructed_from_bundle( info_revert.clone(), self, &updated_storage, ) { - // set to destroyed and revert state. + // Set to destroyed and revert state. self.status = AccountStatus::DestroyedChanged; self.info = updated_info; self.storage = updated_storage; @@ -266,7 +270,7 @@ impl BundleAccount { } else { let ret = match self.status { AccountStatus::Destroyed | AccountStatus::LoadedNotExisting => { - // from destroyed state new account is made + // From destroyed state new account is made Some(AccountRevert { account: AccountInfoRevert::DeleteIt, storage: previous_storage_from_update(&updated_storage), @@ -284,7 +288,7 @@ impl BundleAccount { .map(|t| (t.0, RevertToSlot::Some(t.1.present_value))) .collect::>(); for key in updated_storage.keys() { - // as it is not existing inside Destroyed storage this means + // As it is not existing inside Destroyed storage this means // that previous values must be zero storage.entry(*key).or_insert(RevertToSlot::Destroyed); } @@ -302,7 +306,7 @@ impl BundleAccount { } AccountStatus::DestroyedAgain => { Some(AccountRevert::new_selfdestructed_again( - // destroyed again will set empty account. + // Destroyed again will set empty account. AccountStatus::DestroyedAgain, AccountInfoRevert::DeleteIt, HashMap::default(), @@ -313,7 +317,7 @@ impl BundleAccount { }; self.status = AccountStatus::DestroyedChanged; self.info = updated_info; - // extends current storage. + // Extends current storage. extend_storage(&mut self.storage, updated_storage); ret @@ -323,7 +327,7 @@ impl BundleAccount { // Previous block created account // (It was destroyed on previous block or one before). - // check common pre destroy paths. + // Check common pre destroy paths. // This will drain the storage if it is common transition. let ret = if let Some(revert_state) = AccountRevert::new_selfdestructed_from_bundle( info_revert, @@ -336,18 +340,18 @@ impl BundleAccount { AccountStatus::Destroyed | AccountStatus::DestroyedAgain | AccountStatus::LoadedNotExisting => { - // From destroyed to destroyed again. is noop + // From destroyed to destroyed again is noop // - // DestroyedAgain to DestroyedAgain is noop + // From DestroyedAgain to DestroyedAgain is noop // - // From LoadedNotExisting to DestroyedAgain - // is noop as account is destroyed again + // From LoadedNotExisting to DestroyedAgain is noop + // as account is destroyed again None } AccountStatus::DestroyedChanged => { // From destroyed changed to destroyed again. Some(AccountRevert::new_selfdestructed_again( - // destroyed again will set empty account. + // Destroyed again will set empty account. AccountStatus::DestroyedChanged, AccountInfoRevert::RevertTo(self.info.clone().unwrap_or_default()), self.storage.drain().collect(), @@ -357,7 +361,7 @@ impl BundleAccount { _ => unreachable!("Invalid state to DestroyedAgain from {self:?}"), } }; - // set to destroyed and revert state. + // Set to destroyed and revert state. self.status = AccountStatus::DestroyedAgain; self.info = None; self.storage.clear(); diff --git a/crates/database/src/states/bundle_state.rs b/crates/database/src/states/bundle_state.rs index 13cec018ac..53ce74780b 100644 --- a/crates/database/src/states/bundle_state.rs +++ b/crates/database/src/states/bundle_state.rs @@ -35,12 +35,12 @@ pub struct BundleBuilder { /// Option for [`BundleState`] when converting it to the plain state. #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum OriginalValuesKnown { - /// Check changed with original values that [BundleState] has. + /// Checks changed with original values that [BundleState] has. /// /// If we don't expect parent blocks to be committed or unwinded from database, this option /// should be used. Yes, - /// Don't check original values, see the implementation of [BundleState::into_plain_state] for + /// Doesn't check original values, see the implementation of [BundleState::into_plain_state] for /// more info. /// /// If the Bundle can be split or extended, we would not be sure about original values, in that @@ -71,9 +71,9 @@ impl Default for BundleBuilder { } impl BundleBuilder { - /// Create builder instance + /// Creates builder instance. /// - /// `revert_range` indicates the size of BundleState `reverts` field + /// `revert_range` indicates the size of BundleState `reverts` field. pub fn new(revert_range: RangeInclusive) -> Self { BundleBuilder { revert_range, @@ -81,7 +81,7 @@ impl BundleBuilder { } } - /// Apply a transformation to the builder. + /// Applies a transformation to the builder. pub fn apply(self, f: F) -> Self where F: FnOnce(Self) -> Self, @@ -89,7 +89,7 @@ impl BundleBuilder { f(self) } - /// Apply a mutable transformation to the builder. + /// Applies a mutable transformation to the builder. pub fn apply_mut(&mut self, f: F) -> &mut Self where F: FnOnce(&mut Self), @@ -98,43 +98,43 @@ impl BundleBuilder { self } - /// Collect address info of BundleState state + /// Collects address info of BundleState state. pub fn state_address(mut self, address: Address) -> Self { self.set_state_address(address); self } - /// Collect account info of BundleState state + /// Collects account info of BundleState state. pub fn state_original_account_info(mut self, address: Address, original: AccountInfo) -> Self { self.set_state_original_account_info(address, original); self } - /// Collect account info of BundleState state + /// Collects account info of BundleState state. pub fn state_present_account_info(mut self, address: Address, present: AccountInfo) -> Self { self.set_state_present_account_info(address, present); self } - /// Collect storage info of BundleState state + /// Collects storage info of BundleState state. pub fn state_storage(mut self, address: Address, storage: HashMap) -> Self { self.set_state_storage(address, storage); self } - /// Collect address info of BundleState reverts + /// Collects address info of BundleState reverts. /// /// `block_number` must respect `revert_range`, or the input - /// will be ignored during the final build process + /// will be ignored during the final build process. pub fn revert_address(mut self, block_number: u64, address: Address) -> Self { self.set_revert_address(block_number, address); self } - /// Collect account info of BundleState reverts + /// Collects account info of BundleState reverts. /// /// `block_number` must respect `revert_range`, or the input - /// will be ignored during the final build process + /// will be ignored during the final build process. pub fn revert_account_info( mut self, block_number: u64, @@ -145,10 +145,10 @@ impl BundleBuilder { self } - /// Collect storage info of BundleState reverts + /// Collects storage info of BundleState reverts. /// /// `block_number` must respect `revert_range`, or the input - /// will be ignored during the final build process + /// will be ignored during the final build process. pub fn revert_storage( mut self, block_number: u64, @@ -159,19 +159,19 @@ impl BundleBuilder { self } - /// Collect contracts info + /// Collects contracts info. pub fn contract(mut self, address: B256, bytecode: Bytecode) -> Self { self.set_contract(address, bytecode); self } - /// Set address info of BundleState state. + /// Sets address info of BundleState state. pub fn set_state_address(&mut self, address: Address) -> &mut Self { self.states.insert(address); self } - /// Set original account info of BundleState state. + /// Sets original account info of BundleState state. pub fn set_state_original_account_info( &mut self, address: Address, @@ -182,7 +182,7 @@ impl BundleBuilder { self } - /// Set present account info of BundleState state. + /// Sets present account info of BundleState state. pub fn set_state_present_account_info( &mut self, address: Address, @@ -193,7 +193,7 @@ impl BundleBuilder { self } - /// Set storage info of BundleState state. + /// Sets storage info of BundleState state. pub fn set_state_storage( &mut self, address: Address, @@ -204,13 +204,13 @@ impl BundleBuilder { self } - /// Set address info of BundleState reverts. + /// Sets address info of BundleState reverts. pub fn set_revert_address(&mut self, block_number: u64, address: Address) -> &mut Self { self.reverts.insert((block_number, address)); self } - /// Set account info of BundleState reverts. + /// Sets account info of BundleState reverts. pub fn set_revert_account_info( &mut self, block_number: u64, @@ -222,7 +222,7 @@ impl BundleBuilder { self } - /// Set storage info of BundleState reverts. + /// Sets storage info of BundleState reverts. pub fn set_revert_storage( &mut self, block_number: u64, @@ -234,13 +234,13 @@ impl BundleBuilder { self } - /// Set contracts info. + /// Sets contracts info. pub fn set_contract(&mut self, address: B256, bytecode: Bytecode) -> &mut Self { self.contracts.insert(address, bytecode); self } - /// Create `BundleState` instance based on collected information + /// Creates `BundleState` instance based on collected information. pub fn build(mut self) -> BundleState { let mut state_size = 0; let state = self @@ -390,35 +390,38 @@ impl BundleRetention { /// Bundle state contain only values that got changed /// /// For every account it contains both original and present state. +/// /// This is needed to decide if there were any changes to the account. /// /// Reverts and created when TransitionState is applied to BundleState. +/// /// And can be used to revert BundleState to the state before transition. #[derive(Default, Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct BundleState { - /// Account state. + /// Account state pub state: HashMap, /// All created contracts in this block. pub contracts: HashMap, - /// Changes to revert. + /// Changes to revert + /// + /// **Note**: Inside vector is *not* sorted by address. /// - /// Note: Inside vector is *not* sorted by address. /// But it is unique by address. pub reverts: Reverts, - /// The size of the plain state in the bundle state. + /// The size of the plain state in the bundle state pub state_size: usize, - /// The size of reverts in the bundle state. + /// The size of reverts in the bundle state pub reverts_size: usize, } impl BundleState { - /// Return builder instance for further manipulation + /// Returns builder instance for further manipulation. pub fn builder(revert_range: RangeInclusive) -> BundleBuilder { BundleBuilder::new(revert_range) } - /// Create it with new and old values of both Storage and AccountInfo. + /// Creates it with new and old values of both Storage and AccountInfo. pub fn new( state: impl IntoIterator< Item = ( @@ -497,18 +500,19 @@ impl BundleState { } /// Returns the approximate size of changes in the bundle state. + /// /// The estimation is not precise, because the information about the number of /// destroyed entries that need to be removed is not accessible to the bundle state. pub fn size_hint(&self) -> usize { self.state_size + self.reverts_size + self.contracts.len() } - /// Return reference to the state. + /// Returns reference to the state. pub fn state(&self) -> &HashMap { &self.state } - /// Is bundle state empty. + /// Checks whether bundle state is empty. pub fn is_empty(&self) -> bool { self.len() == 0 } @@ -518,17 +522,17 @@ impl BundleState { self.state.len() } - /// Get account from state + /// Gets account from state. pub fn account(&self, address: &Address) -> Option<&BundleAccount> { self.state.get(address) } - /// Get bytecode from state + /// Gets bytecode from state. pub fn bytecode(&self, hash: &B256) -> Option { self.contracts.get(hash).cloned() } - /// Consume [`TransitionState`] by applying the changes and creating the + /// Consumes [`TransitionState`] by applying the changes and creating the /// reverts. /// /// If [BundleRetention::includes_reverts] is `true`, then the reverts will @@ -539,7 +543,7 @@ impl BundleState { retention: BundleRetention, ) { let include_reverts = retention.includes_reverts(); - // pessimistically pre-allocate assuming _all_ accounts changed. + // Pessimistically pre-allocate assuming _all_ accounts changed. let reverts_capacity = if include_reverts { transitions.transitions.len() } else { @@ -548,23 +552,23 @@ impl BundleState { let mut reverts = Vec::with_capacity(reverts_capacity); for (address, transition) in transitions.transitions.into_iter() { - // add new contract if it was created/changed. + // Add new contract if it was created/changed. if let Some((hash, new_bytecode)) = transition.has_new_contract() { self.contracts.insert(hash, new_bytecode.clone()); } - // update state and create revert. + // Update state and create revert. let revert = match self.state.entry(address) { hash_map::Entry::Occupied(mut entry) => { let entry = entry.get_mut(); self.state_size -= entry.size_hint(); - // update and create revert if it is present + // Update and create revert if it is present let revert = entry.update_and_create_revert(transition); - // update the state size + // Update the state size self.state_size += entry.size_hint(); revert } hash_map::Entry::Vacant(entry) => { - // make revert from transition account + // Make revert from transition account let present_bundle = transition.present_bundle_account(); let revert = transition.create_revert(); if revert.is_some() { @@ -575,7 +579,7 @@ impl BundleState { } }; - // append revert if present. + // Append revert if present. if let Some(revert) = revert.filter(|_| include_reverts) { self.reverts_size += revert.size_hint(); reverts.push((address, revert)); @@ -588,22 +592,22 @@ impl BundleState { /// Generate a [`StateChangeset`] from the bundle state without consuming /// it. pub fn to_plain_state(&self, is_value_known: OriginalValuesKnown) -> StateChangeset { - // pessimistically pre-allocate assuming _all_ accounts changed. + // Pessimistically pre-allocate assuming _all_ accounts changed. let state_len = self.state.len(); let mut accounts = Vec::with_capacity(state_len); let mut storage = Vec::with_capacity(state_len); for (address, account) in self.state.iter() { - // append account info if it is changed. + // Append account info if it is changed. let was_destroyed = account.was_destroyed(); if is_value_known.is_not_known() || account.is_info_changed() { let info = account.info.as_ref().map(AccountInfo::copy_without_code); accounts.push((*address, info)); } - // append storage changes + // Append storage changes - // NOTE: Assumption is that revert is going to remove whole plain storage from + // Note: Assumption is that revert is going to remove whole plain storage from // database so we can check if plain state was wiped or not. let mut account_storage_changed = Vec::with_capacity(account.storage.len()); @@ -625,7 +629,7 @@ impl BundleState { } if !account_storage_changed.is_empty() || was_destroyed { - // append storage changes to account. + // Append storage changes to account. storage.push(PlainStorageChangeset { address: *address, wipe_storage: was_destroyed, @@ -637,7 +641,7 @@ impl BundleState { let contracts = self .contracts .iter() - // remove empty bytecodes + // Remove empty bytecodes .filter(|(b, _)| **b != KECCAK_EMPTY) .map(|(b, code)| (*b, code.clone())) .collect::>(); @@ -648,13 +652,13 @@ impl BundleState { } } - /// Convert the bundle state into a [`StateChangeset`]. + /// Converts the bundle state into a [`StateChangeset`]. #[deprecated = "Use `to_plain_state` instead"] pub fn into_plain_state(self, is_value_known: OriginalValuesKnown) -> StateChangeset { self.to_plain_state(is_value_known) } - /// Generate a [`StateChangeset`] and [`PlainStateReverts`] from the bundle + /// Generates a [`StateChangeset`] and [`PlainStateReverts`] from the bundle /// state. pub fn to_plain_state_and_reverts( &self, @@ -666,7 +670,7 @@ impl BundleState { ) } - /// Consume the bundle state and split it into a [`StateChangeset`] and a + /// Consumes the bundle state and split it into a [`StateChangeset`] and a /// [`PlainStateReverts`]. #[deprecated = "Use `to_plain_state_and_reverts` instead"] pub fn into_plain_state_and_reverts( @@ -676,9 +680,9 @@ impl BundleState { self.to_plain_state_and_reverts(is_value_known) } - /// Extend the bundle with other state + /// Extends the bundle with other state. /// - /// Update the `other` state only if `other` is not flagged as destroyed. + /// Updates the `other` state only if `other` is not flagged as destroyed. pub fn extend_state(&mut self, other_state: HashMap) { for (address, other_account) in other_state { match self.state.entry(address) { @@ -686,14 +690,14 @@ impl BundleState { let this = entry.get_mut(); self.state_size -= this.size_hint(); - // if other was destroyed. replace `this` storage with + // If other was destroyed. replace `this` storage with // the `other one. if other_account.was_destroyed() { this.storage = other_account.storage; } else { - // otherwise extend this storage with other + // Otherwise extend this storage with other for (key, storage_slot) in other_account.storage { - // update present value or insert storage slot. + // Update present value or insert storage slot. this.storage .entry(key) .or_insert(storage_slot) @@ -707,23 +711,25 @@ impl BundleState { self.state_size += this.size_hint(); } hash_map::Entry::Vacant(entry) => { - // just insert if empty + // Just insert if empty self.state_size += other_account.size_hint(); entry.insert(other_account); } } } } - /// Extend the state with state that is build on top of it. + /// Extends the state with state that is build on top of it. /// /// If storage was wiped in `other` state, copy `this` plain state /// and put it inside `other` revert (if there is no duplicates of course). /// /// If `this` and `other` accounts were both destroyed invalidate second - /// wipe flag (from `other`). As wiping from database should be done only once + /// wipe flag (from `other`). + /// + /// As wiping from database should be done only once /// and we already transferred all potentially missing storages to the `other` revert. pub fn extend(&mut self, mut other: Self) { - // iterate over reverts and if its storage is wiped try to add previous bundle + // Iterate over reverts and if its storage is wiped try to add previous bundle // state as there is potential missing slots. for (address, revert) in other.reverts.iter_mut().flatten() { if revert.wipe_storage { @@ -731,7 +737,7 @@ impl BundleState { // we need to move storage from present state. if let Some(this_account) = self.state.get_mut(address) { // As this account was destroyed inside `other` bundle. - // we are fine to wipe/drain this storage and put it inside revert. + // We are fine to wipe/drain this storage and put it inside revert. for (key, value) in this_account.storage.drain() { revert .storage @@ -739,7 +745,7 @@ impl BundleState { .or_insert(RevertToSlot::Some(value.present_value)); } - // nullify `other` wipe as primary database wipe is done in `this`. + // Nullify `other` wipe as primary database wipe is done in `this`. if this_account.was_destroyed() { revert.wipe_storage = false; } @@ -757,9 +763,9 @@ impl BundleState { self.reverts.extend(other.reverts); } - /// Take first N raw reverts from the [BundleState]. + /// Takes first N raw reverts from the [BundleState]. pub fn take_n_reverts(&mut self, reverts_to_take: usize) -> Reverts { - // split is done as [0, num) and [num, len]. + // Split is done as [0, num) and [num, len]. if reverts_to_take > self.reverts.len() { return self.take_all_reverts(); } @@ -773,19 +779,19 @@ impl BundleState { detached_reverts } - /// Return and clear all reverts from [BundleState] + /// Returns and clears all reverts from [BundleState]. pub fn take_all_reverts(&mut self) -> Reverts { self.reverts_size = 0; core::mem::take(&mut self.reverts) } - /// Reverts the state changes of the latest transition + /// Reverts the state changes of the latest transition. /// - /// Note: This is the same as `BundleState::revert(1)` + /// **Note**: This is the same as `BundleState::revert(1)` /// /// Returns true if the state was reverted. pub fn revert_latest(&mut self) -> bool { - // revert the latest recorded state + // Revert the latest recorded state if let Some(reverts) = self.reverts.pop() { for (address, revert_account) in reverts.into_iter() { self.reverts_size -= revert_account.size_hint(); @@ -800,7 +806,7 @@ impl BundleState { } } Entry::Vacant(entry) => { - // create empty account that we will revert on. + // Create empty account that we will revert on. // Only place where this account is not existing is if revert is DeleteIt. let mut account = BundleAccount::new( None, @@ -832,24 +838,25 @@ impl BundleState { while self.revert_latest() { num_transitions -= 1; if num_transitions == 0 { - // break the loop. + // Break the loop. break; } } } - /// Prepends present the state with the given BundleState. + /// Prepends present the state with the given [BundleState]. + /// /// It adds changes from the given state but does not override any existing changes. /// /// Reverts are not updated. pub fn prepend_state(&mut self, mut other: BundleState) { - // take this bundle + // Take this bundle let this_bundle = mem::take(self); - // extend other bundle state with this + // Extend other bundle state with this other.extend_state(this_bundle.state); - // extend other contracts + // Extend other contracts other.contracts.extend(this_bundle.contracts); - // swap bundles + // Swap bundles mem::swap(self, &mut other) } } @@ -861,7 +868,7 @@ mod tests { #[test] fn transition_states() { - // dummy data + // Dummy data let address = Address::new([0x01; 20]); let acc1 = AccountInfo { balance: U256::from(10), @@ -872,7 +879,7 @@ mod tests { let mut bundle_state = BundleState::default(); - // have transition from loaded to all other states + // Have transition from loaded to all other states let transition = TransitionAccount { info: Some(acc1), @@ -883,7 +890,7 @@ mod tests { storage_was_destroyed: false, }; - // apply first transition + // Apply first transition bundle_state.apply_transitions_and_create_reverts( TransitionState::single(address, transition.clone()), BundleRetention::Reverts, @@ -906,9 +913,9 @@ mod tests { U256::from(7) } - /// Test bundle one + /// Tests bundle one. fn test_bundle1() -> BundleState { - // block changes + // Block changes BundleState::new( vec![ ( @@ -949,9 +956,9 @@ mod tests { ) } - /// Test bundle two + /// Tests bundle two. fn test_bundle2() -> BundleState { - // block changes + // Block changes BundleState::new( vec![( account1(), @@ -978,7 +985,7 @@ mod tests { ) } - /// Test bundle three + /// Tests bundle three. fn test_bundle3() -> BundleState { BundleState::builder(0..=0) .state_present_account_info( @@ -1011,7 +1018,7 @@ mod tests { .build() } - /// Test bundle four + /// Tests bundle four. fn test_bundle4() -> BundleState { BundleState::builder(0..=0) .state_present_account_info( @@ -1047,21 +1054,21 @@ mod tests { extended.extend(bundle2.clone()); let mut reverted = extended.clone(); - // revert zero does nothing. + // Revert zero does nothing. reverted.revert(0); assert_eq!(reverted, extended); - // revert by one gives us bundle one. + // Revert by one gives us bundle one. reverted.revert(1); assert_eq!(reverted, bundle1); - // reverted by additional one gives us empty bundle. + // Reverted by additional one gives us empty bundle. reverted.revert(1); assert_eq!(reverted, BundleState::default()); let mut reverted = extended.clone(); - // reverted by bigger number gives us empty bundle + // Reverted by bigger number gives us empty bundle reverted.revert(10); assert_eq!(reverted, BundleState::default()); } @@ -1145,11 +1152,11 @@ mod tests { .build(); state.revert_latest(); - // state for account one was deleted + // State for account one was deleted assert_eq!(state.state.get(&account1()), None); state.revert_latest(); - // state is set to + // State is set to assert_eq!( state.state.get(&account1()), Some(&BundleAccount::new( @@ -1194,27 +1201,27 @@ mod tests { let mut extended = bundle1.clone(); extended.extend(bundle2.clone()); - // check that we have two reverts + // Check that we have two reverts assert_eq!(extended.reverts.len(), 2); - // take all by big N + // Take all by big N let mut extended2 = extended.clone(); assert_eq!(extended2.take_n_reverts(100), extended.reverts); - // take all reverts + // Take all reverts let mut extended2 = extended.clone(); assert_eq!(extended2.take_all_reverts(), extended.reverts); - // take zero revert + // Take zero revert let taken_reverts = extended.take_n_reverts(0); assert_eq!(taken_reverts, Reverts::default()); assert_eq!(extended.reverts.len(), 2); - // take one revert + // Take one revert let taken_reverts = extended.take_n_reverts(1); assert_eq!(taken_reverts, bundle1.reverts); - // take last revert + // Take last revert let taken_reverts = extended.take_n_reverts(1); assert_eq!(taken_reverts, bundle2.reverts); } @@ -1252,14 +1259,14 @@ mod tests { test.prepend_state(previous_state); assert_eq!(test.state.len(), 2); - // reverts num should stay the same. + // Reverts num should stay the same. assert_eq!(test.reverts.len(), 1); - // account1 is not overwritten. + // Account1 is not overwritten. assert_eq!( test.state.get(&address1).unwrap().info, Some(account1_changed) ); - // account2 got inserted + // Account2 got inserted assert_eq!(test.state.get(&address2).unwrap().info, Some(account2)); } diff --git a/crates/database/src/states/cache.rs b/crates/database/src/states/cache.rs index eae3031e38..2b9f29413c 100644 --- a/crates/database/src/states/cache.rs +++ b/crates/database/src/states/cache.rs @@ -6,20 +6,22 @@ use primitives::{Address, HashMap, B256}; use state::{Account, AccountInfo, EvmState}; use std::vec::Vec; -/// Cache state contains both modified and original values. +/// Cache state contains both modified and original values /// +/// # Note /// Cache state is main state that revm uses to access state. +/// /// It loads all accounts from database and applies revm output to it. /// /// It generates transitions that is used to build BundleState. #[derive(Clone, Debug, PartialEq, Eq)] pub struct CacheState { - /// Block state account with account state. + /// Block state account with account state pub accounts: HashMap, - /// Created contracts. - // TODO add bytecode counter for number of bytecodes added/removed. + /// Created contracts + // TODO : Add bytecode counter for number of bytecodes added/removed. pub contracts: HashMap, - /// Has EIP-161 state clear enabled (Spurious Dragon hardfork). + /// Has EIP-161 state clear enabled (Spurious Dragon hardfork) pub has_state_clear: bool, } @@ -30,7 +32,7 @@ impl Default for CacheState { } impl CacheState { - /// New default state. + /// Creates a new default state. pub fn new(has_state_clear: bool) -> Self { Self { accounts: HashMap::default(), @@ -39,7 +41,7 @@ impl CacheState { } } - /// Set state clear flag. EIP-161. + /// Sets state clear flag. EIP-161. pub fn set_state_clear_flag(&mut self, has_state_clear: bool) { self.has_state_clear = has_state_clear; } @@ -56,13 +58,13 @@ impl CacheState { }) } - /// Insert not existing account. + /// Inserts not existing account. pub fn insert_not_existing(&mut self, address: Address) { self.accounts .insert(address, CacheAccount::new_loaded_not_existing()); } - /// Insert Loaded (Or LoadedEmptyEip161 if account is empty) account. + /// Inserts Loaded (Or LoadedEmptyEip161 if account is empty) account. pub fn insert_account(&mut self, address: Address, info: AccountInfo) { let account = if !info.is_empty() { CacheAccount::new_loaded(info, HashMap::default()) @@ -87,7 +89,7 @@ impl CacheState { self.accounts.insert(address, account); } - /// Apply output of revm execution and create account transitions that are used to build BundleState. + /// Applies output of revm execution and create account transitions that are used to build BundleState. pub fn apply_evm_state(&mut self, evm_state: EvmState) -> Vec<(Address, TransitionAccount)> { let mut transitions = Vec::with_capacity(evm_state.len()); for (address, account) in evm_state { @@ -98,14 +100,15 @@ impl CacheState { transitions } - /// Apply updated account state to the cached account. + /// Applies updated account state to the cached account. + /// /// Returns account transition if applicable. fn apply_account_state( &mut self, address: Address, account: Account, ) -> Option { - // not touched account are never changed. + // Not touched account are never changed. if !account.is_touched() { return None; } @@ -124,7 +127,7 @@ impl CacheState { let is_created = account.is_created(); let is_empty = account.is_empty(); - // transform evm storage to storage with previous value. + // Transform evm storage to storage with previous value. let changed_storage = account .storage .into_iter() @@ -132,7 +135,7 @@ impl CacheState { .map(|(key, slot)| (key, slot.into())) .collect(); - // Note: it can happen that created contract get selfdestructed in same block + // Note: It can happen that created contract get selfdestructed in same block // that is why is_created is checked after selfdestructed // // Note: Create2 opcode (Petersburg) was after state clear EIP (Spurious Dragon) @@ -150,10 +153,10 @@ impl CacheState { // EIP-161 state clear if is_empty { if self.has_state_clear { - // touch empty account. + // Touch empty account. this_account.touch_empty_eip161() } else { - // if account is empty and state clear is not enabled we should save + // If account is empty and state clear is not enabled we should save // empty account. this_account.touch_create_pre_eip161(changed_storage) } diff --git a/crates/database/src/states/cache_account.rs b/crates/database/src/states/cache_account.rs index e3c1bc88a1..822ecc31fb 100644 --- a/crates/database/src/states/cache_account.rs +++ b/crates/database/src/states/cache_account.rs @@ -31,7 +31,7 @@ impl From for CacheAccount { } impl CacheAccount { - /// Create new account that is loaded from database. + /// Creates new account that is loaded from database. pub fn new_loaded(info: AccountInfo, storage: PlainStorage) -> Self { Self { account: Some(PlainAccount { info, storage }), @@ -39,7 +39,7 @@ impl CacheAccount { } } - /// Create new account that is loaded empty from database. + /// Creates new account that is loaded empty from database. pub fn new_loaded_empty_eip161(storage: PlainStorage) -> Self { Self { account: Some(PlainAccount::new_empty_with_storage(storage)), @@ -55,7 +55,7 @@ impl CacheAccount { } } - /// Create new account that is newly created + /// Creates new account that is newly created. pub fn new_newly_created(info: AccountInfo, storage: PlainStorage) -> Self { Self { account: Some(PlainAccount { info, storage }), @@ -63,7 +63,7 @@ impl CacheAccount { } } - /// Create account that is destroyed. + /// Creates account that is destroyed. pub fn new_destroyed() -> Self { Self { account: None, @@ -71,7 +71,7 @@ impl CacheAccount { } } - /// Create changed account + /// Creates changed account. pub fn new_changed(info: AccountInfo, storage: PlainStorage) -> Self { Self { account: Some(PlainAccount { info, storage }), @@ -79,7 +79,7 @@ impl CacheAccount { } } - /// Return true if account is some + /// Returns true if account is some. pub fn is_some(&self) -> bool { matches!( self.status, @@ -91,19 +91,19 @@ impl CacheAccount { ) } - /// Return storage slot if it exist. + /// Returns storage slot if it exist. pub fn storage_slot(&self, slot: U256) -> Option { self.account .as_ref() .and_then(|a| a.storage.get(&slot).cloned()) } - /// Fetch account info if it exist. + /// Fetches account info if it exist. pub fn account_info(&self) -> Option { self.account.as_ref().map(|a| a.info.clone()) } - /// Dissolve account into components. + /// Dissolves account into components. pub fn into_components(self) -> (Option<(AccountInfo, PlainStorage)>, AccountStatus) { (self.account.map(|a| a.into_components()), self.status) } @@ -168,11 +168,11 @@ impl CacheAccount { } } - /// Consume self and make account as destroyed. + /// Consumes self and make account as destroyed. /// - /// Set account as None and set status to Destroyer or DestroyedAgain. + /// Sets account as None and set status to Destroyer or DestroyedAgain. pub fn selfdestruct(&mut self) -> Option { - // account should be None after selfdestruct so we can take it. + // Account should be None after selfdestruct so we can take it. let previous_info = self.account.take().map(|a| a.info); let previous_status = self.status; @@ -225,7 +225,7 @@ impl CacheAccount { /// Increment balance by `balance` amount. Assume that balance will not /// overflow or be zero. /// - /// Note: only if balance is zero we would return None as no transition would be made. + /// Note: Only if balance is zero we would return None as no transition would be made. pub fn increment_balance(&mut self, balance: u128) -> Option { if balance == 0 { return None; diff --git a/crates/database/src/states/changes.rs b/crates/database/src/states/changes.rs index 81eae46fd2..b03c110896 100644 --- a/crates/database/src/states/changes.rs +++ b/crates/database/src/states/changes.rs @@ -4,11 +4,12 @@ use primitives::{Address, B256, U256}; use state::AccountInfo; use std::vec::Vec; -/// accounts/storages/contracts for inclusion into database. +/// `accounts`/`storages`/`contracts` for inclusion into database. +/// /// Structure is made so it is easier to apply directly to database -/// that mostly have separate tables to store account/storage/contract data. +/// that mostly have separate tables to store `accounts`/`storages`/`contracts` data. /// -/// Note: that data is **not** sorted. Some database benefit of faster inclusion +/// **Note**: That data is **not** sorted. Some database benefit of faster inclusion /// and smaller footprint if data is inserted in sorted order. #[derive(Clone, Debug, Default)] pub struct StateChangeset { @@ -20,29 +21,34 @@ pub struct StateChangeset { pub contracts: Vec<(B256, Bytecode)>, } -/// Plain storage changeset. Used to apply storage changes of plain state to -/// the database. +/// Plain storage changeset. +/// +/// Used to apply storage changes of plain state to the database. #[derive(Clone, Debug, PartialEq, Eq, Default)] pub struct PlainStorageChangeset { /// Address of account pub address: Address, - /// Wipe storage, + /// Wipe storage pub wipe_storage: bool, - /// Storage key value pairs. + /// Storage key value pairs pub storage: Vec<(U256, U256)>, } -/// Plain Storage Revert. Containing old values of changed storage. +/// Plain Storage Revert. +/// +/// [`PlainStorageRevert`] contains old values of changed storage. #[derive(Clone, Debug, PartialEq, Eq, Default)] pub struct PlainStorageRevert { /// Address of account pub address: Address, - /// Is storage wiped in this revert. Wiped flag is set on - /// first known selfdestruct and would require clearing the + /// Whether storage is wiped in this revert + /// + /// **Note**: Wiped flag is set on first known selfdestruct and would require clearing the /// state of this storage from database (And moving it to revert). pub wiped: bool, - /// Contains the storage key and old values of that storage. - /// Reverts are **not** sorted. + /// Contains the storage key and old values of that storage + /// + /// **Note**: Reverts are **not** sorted. pub storage_revert: Vec<(U256, RevertToSlot)>, } @@ -51,16 +57,16 @@ pub struct PlainStorageRevert { /// Note that accounts are assumed **not** sorted. #[derive(Clone, Debug, Default)] pub struct PlainStateReverts { - /// Vector of account with removed contracts bytecode + /// Vector of account with removed contracts bytecode. /// - /// Note: If AccountInfo is None means that account needs to be removed. + /// **Note**: If AccountInfo is None means that account needs to be removed. pub accounts: Vec)>>, /// Vector of storage with its address. pub storage: Vec>, } impl PlainStateReverts { - /// Constructs new [PlainStateReverts] with pre-allocated capacity. + /// Constructs new [`PlainStateReverts`] with pre-allocated capacity. pub fn with_capacity(capacity: usize) -> Self { Self { accounts: Vec::with_capacity(capacity), diff --git a/crates/database/src/states/plain_account.rs b/crates/database/src/states/plain_account.rs index 1d5520ef0f..547896e148 100644 --- a/crates/database/src/states/plain_account.rs +++ b/crates/database/src/states/plain_account.rs @@ -1,7 +1,7 @@ use primitives::{HashMap, U256}; use state::{AccountInfo, EvmStorageSlot}; -// TODO rename this to BundleAccount. As for the block level we have original state. +// TODO : Rename this to BundleAccount. As for the block level we have original state. #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct PlainAccount { pub info: AccountInfo, diff --git a/crates/database/src/states/reverts.rs b/crates/database/src/states/reverts.rs index 259739259e..4dc2e495e8 100644 --- a/crates/database/src/states/reverts.rs +++ b/crates/database/src/states/reverts.rs @@ -29,36 +29,36 @@ impl DerefMut for Reverts { } impl Reverts { - /// Create new reverts + /// Creates new reverts. pub fn new(reverts: Vec>) -> Self { Self(reverts) } - /// Sort account inside transition by their address. + /// Sorts account inside transition by their address. pub fn sort(&mut self) { for revert in &mut self.0 { revert.sort_by_key(|(address, _)| *address); } } - /// Extend reverts with other reverts. + /// Extends reverts with other reverts. pub fn extend(&mut self, other: Reverts) { self.0.extend(other.0); } - /// Generate a [`PlainStateReverts`]. + /// Generates a [`PlainStateReverts`]. /// /// Note that account are sorted by address. pub fn to_plain_state_reverts(&self) -> PlainStateReverts { let mut state_reverts = PlainStateReverts::with_capacity(self.0.len()); for reverts in &self.0 { - // pessimistically pre-allocate assuming _all_ accounts changed. + // Pessimistically pre-allocate assuming _all_ accounts changed. let mut accounts = Vec::with_capacity(reverts.len()); let mut storage = Vec::with_capacity(reverts.len()); for (address, revert_account) in reverts { match &revert_account.account { AccountInfoRevert::RevertTo(acc) => { - // cloning is cheap, because account info has 3 small + // Cloning is cheap, because account info has 3 small // fields and a Bytes accounts.push((*address, Some(acc.clone()))) } @@ -130,10 +130,13 @@ impl PartialEq for Reverts { /// Assumption is that Revert can return full state from any future state to any past state. /// +/// # Note /// It is created when new account state is applied to old account state. +/// /// And it is used to revert new account state to the old account state. /// -/// AccountRevert is structured in this way as we need to save it inside database. +/// [AccountRevert] is structured in this way as we need to save it inside database. +/// /// And we need to be able to read it from database. #[derive(Clone, Default, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] @@ -146,12 +149,13 @@ pub struct AccountRevert { impl AccountRevert { /// The approximate size of changes needed to store this account revert. + /// /// `1 + storage_reverts_len` pub fn size_hint(&self) -> usize { 1 + self.storage.len() } - /// Very similar to new_selfdestructed but it will add additional zeros (RevertToSlot::Destroyed) + /// Very similar to new_selfdestructed but it will add additional zeros ([RevertToSlot::Destroyed]) /// for the storage that are set if account is again created. pub fn new_selfdestructed_again( status: AccountStatus, @@ -178,7 +182,7 @@ impl AccountRevert { } } - /// Create revert for states that were before selfdestruct. + /// Creates revert for states that were before selfdestruct. pub fn new_selfdestructed_from_bundle( account_info_revert: AccountInfoRevert, bundle_account: &mut BundleAccount, @@ -212,7 +216,7 @@ impl AccountRevert { let previous_storage = storage .iter_mut() .map(|(key, value)| { - // take previous value and set ZERO as storage got destroyed. + // Take previous value and set ZERO as storage got destroyed. (*key, RevertToSlot::Some(value.present_value)) }) .collect(); @@ -301,8 +305,10 @@ pub enum AccountInfoRevert { /// * Value, on revert set this value /// * Destroyed, should be removed on revert but on Revert set it as zero. /// -/// Note: It is completely different state if Storage is Zero or Some or if Storage was -/// Destroyed. Because if it is destroyed, previous values can be found in database or it can be zero. +/// **Note**: It is completely different state if Storage is Zero or Some or if Storage was +/// Destroyed. +/// +/// Because if it is destroyed, previous values can be found in database or it can be zero. #[derive(Clone, Debug, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum RevertToSlot { diff --git a/crates/database/src/states/state.rs b/crates/database/src/states/state.rs index 1d870ac889..b7f521d960 100644 --- a/crates/database/src/states/state.rs +++ b/crates/database/src/states/state.rs @@ -12,47 +12,55 @@ use std::{ vec::Vec, }; -/// Database boxed with a lifetime and Send. +/// Database boxed with a lifetime and Send pub type DBBox<'a, E> = Box + Send + 'a>; -/// More constrained version of State that uses Boxed database with a lifetime. +/// More constrained version of State that uses Boxed database with a lifetime /// /// This is used to make it easier to use State. pub type StateDBBox<'a, E> = State>; -/// State of blockchain. +/// State of blockchain /// /// State clear flag is set inside CacheState and by default it is enabled. +/// /// If you want to disable it use `set_state_clear_flag` function. #[derive(Debug)] pub struct State { /// Cached state contains both changed from evm execution and cached/loaded account/storages - /// from database. This allows us to have only one layer of cache where we can fetch data. + /// from database + /// + /// This allows us to have only one layer of cache where we can fetch data. + /// /// Additionally we can introduce some preloading of data from database. pub cache: CacheState, - /// Optional database that we use to fetch data from. If database is not present, we will - /// return not existing account and storage. + /// Optional database that we use to fetch data from + /// + /// If database is not present, we will return not existing account and storage. /// - /// Note: It is marked as Send so database can be shared between threads. + /// **Note**: It is marked as Send so database can be shared between threads. pub database: DB, - /// Block state, it aggregates transactions transitions into one state. + /// Block state, it aggregates transactions transitions into one state /// /// Build reverts and state that gets applied to the state. pub transition_state: Option, - /// After block is finishes we merge those changes inside bundle. + /// After block is finishes we merge those changes inside bundle + /// /// Bundle is used to update database and create changesets. + /// /// Bundle state can be set on initialization if we want to use preloaded bundle. pub bundle_state: BundleState, /// Addition layer that is going to be used to fetched values before fetching values - /// from database. + /// from database /// /// Bundle is the main output of the state execution and this allows setting previous bundle /// and using its values for execution. pub use_preloaded_bundle: bool, - /// If EVM asks for block hash we will first check if they are found here. - /// and then ask the database. + /// If EVM asks for block hash, we will first check if they are found here, + /// then ask the database + /// + /// This map can be used to give different values for block hashes if in case. /// - /// This map can be used to give different values for block hashes if in case /// The fork block is different or some blocks are not saved inside database. pub block_hashes: BTreeMap, } @@ -67,24 +75,27 @@ impl State { impl State { /// Returns the size hint for the inner bundle state. + /// /// See [BundleState::size_hint] for more info. pub fn bundle_size_hint(&self) -> usize { self.bundle_state.size_hint() } - /// Iterate over received balances and increment all account balances. - /// If account is not found inside cache state it will be loaded from database. + /// Iterates over received balances and increment all account balances. + /// + /// **Note**: If account is not found inside cache state it will be loaded from database. /// /// Update will create transitions for all accounts that are updated. /// /// Like [CacheAccount::increment_balance], this assumes that incremented balances are not - /// zero, and will not overflow once incremented. If using this to implement withdrawals, zero - /// balances must be filtered out before calling this function. + /// zero, and will not overflow once incremented. + /// + /// If using this to implement withdrawals, zero balances must be filtered out before calling this function. pub fn increment_balances( &mut self, balances: impl IntoIterator, ) -> Result<(), DB::Error> { - // make transition and update cache state + // Make transition and update cache state let mut transitions = Vec::new(); for (address, balance) in balances { if balance == 0 { @@ -98,21 +109,21 @@ impl State { .expect("Balance is not zero"), )) } - // append transition + // Append transition if let Some(s) = self.transition_state.as_mut() { s.add_transitions(transitions) } Ok(()) } - /// Drain balances from given account and return those values. + /// Drains balances from given account and return those values. /// /// It is used for DAO hardfork state change to move values from given accounts. pub fn drain_balances( &mut self, addresses: impl IntoIterator, ) -> Result, DB::Error> { - // make transition and update cache state + // Make transition and update cache state let mut transitions = Vec::new(); let mut balances = Vec::new(); for address in addresses { @@ -121,7 +132,7 @@ impl State { balances.push(balance); transitions.push((address, transition)) } - // append transition + // Append transition if let Some(s) = self.transition_state.as_mut() { s.add_transitions(transitions) } @@ -151,15 +162,16 @@ impl State { .insert_account_with_storage(address, info, storage) } - /// Apply evm transitions to transition state. + /// Applies evm transitions to transition state. pub fn apply_transition(&mut self, transitions: Vec<(Address, TransitionAccount)>) { - // add transition to transition state. + // Add transition to transition state. if let Some(s) = self.transition_state.as_mut() { s.add_transitions(transitions) } } /// Take all transitions and merge them inside bundle state. + /// /// This action will create final post state and all reverts so that /// we at any time revert state of bundle to the state before transition /// is applied. @@ -171,20 +183,21 @@ impl State { } /// Get a mutable reference to the [`CacheAccount`] for the given address. + /// /// If the account is not found in the cache, it will be loaded from the /// database and inserted into the cache. pub fn load_cache_account(&mut self, address: Address) -> Result<&mut CacheAccount, DB::Error> { match self.cache.accounts.entry(address) { hash_map::Entry::Vacant(entry) => { if self.use_preloaded_bundle { - // load account from bundle state + // Load account from bundle state if let Some(account) = self.bundle_state.account(&address).cloned().map(Into::into) { return Ok(entry.insert(account)); } } - // if not found in bundle, load it from database + // If not found in bundle, load it from database let info = self.database.basic(address)?; let account = match info { None => CacheAccount::new_loaded_not_existing(), @@ -199,12 +212,13 @@ impl State { } } - // TODO make cache aware of transitions dropping by having global transition counter. - /// Takes the [`BundleState`] changeset from the [`State`], replacing it + // TODO : Make cache aware of transitions dropping by having global transition counter. + /// Takess the [`BundleState`] changeset from the [`State`], replacing it /// with an empty one. /// - /// This will not apply any pending [`TransitionState`]. It is recommended - /// to call [`State::merge_transitions`] before taking the bundle. + /// This will not apply any pending [`TransitionState`]. + /// + /// It is recommended to call [`State::merge_transitions`] before taking the bundle. /// /// If the `State` has been built with the /// [`StateBuilder::with_bundle_prestate`] option, the pre-state will be @@ -231,7 +245,7 @@ impl Database for State { return Ok(code.clone()); } } - // if not found in bundle ask database + // If not found in bundle ask database let code = self.database.code_by_hash(code_hash)?; entry.insert(code.clone()); Ok(code) @@ -244,7 +258,7 @@ impl Database for State { // Account is guaranteed to be loaded. // Note that storage from bundle is already loaded with account. if let Some(account) = self.cache.accounts.get_mut(&address) { - // account will always be some, but if it is not, U256::ZERO will be returned. + // Account will always be some, but if it is not, U256::ZERO will be returned. let is_storage_known = account.status.is_storage_known(); Ok(account .account @@ -252,7 +266,7 @@ impl Database for State { .map(|account| match account.storage.entry(index) { hash_map::Entry::Occupied(entry) => Ok(*entry.get()), hash_map::Entry::Vacant(entry) => { - // if account was destroyed or account is newly built + // If account was destroyed or account is newly built // we return zero and don't ask database. let value = if is_storage_known { U256::ZERO @@ -276,7 +290,7 @@ impl Database for State { btree_map::Entry::Vacant(entry) => { let ret = *entry.insert(self.database.block_hash(number)?); - // prune all hashes that are older then BLOCK_HASH_HISTORY + // Prune all hashes that are older then BLOCK_HASH_HISTORY let last_block = number.saturating_sub(BLOCK_HASH_HISTORY); while let Some(entry) = self.block_hashes.first_entry() { if *entry.key() < last_block { diff --git a/crates/database/src/states/state_builder.rs b/crates/database/src/states/state_builder.rs index ed67c253a9..4ae5292d8d 100644 --- a/crates/database/src/states/state_builder.rs +++ b/crates/database/src/states/state_builder.rs @@ -6,29 +6,33 @@ use std::collections::BTreeMap; /// Allows building of State and initializing it with different options. #[derive(Clone, Debug, PartialEq, Eq)] pub struct StateBuilder { - /// Database that we use to fetch data from. + /// Database that we use to fetch data from database: DB, - /// Enabled state clear flag that is introduced in Spurious Dragon hardfork. + /// Enabled state clear flag that is introduced in Spurious Dragon hardfork + /// /// Default is true as spurious dragon happened long time ago. with_state_clear: bool, - /// if there is prestate that we want to use. - /// This would mean that we have additional state layer between evm and disk/database. + /// If there is prestate that we want to use, + /// this would mean that we have additional state layer between evm and disk/database. with_bundle_prestate: Option, /// This will initialize cache to this state. with_cache_prestate: Option, - /// Do we want to create reverts and update bundle state. + /// Do we want to create reverts and update bundle state? + /// /// Default is false. with_bundle_update: bool, - /// Do we want to merge transitions in background. + /// Do we want to merge transitions in background? + /// /// This will allows evm to continue executing. + /// /// Default is false. with_background_transition_merge: bool, - /// If we want to set different block hashes + /// If we want to set different block hashes, with_block_hashes: BTreeMap, } impl StateBuilder { - /// Create a new builder with an empty database. + /// Creates a new builder with an empty database. /// /// If you want to instantiate it with a specific database, use /// [`new_with_database`](Self::new_with_database). @@ -59,7 +63,7 @@ impl StateBuilder { /// Set the database. pub fn with_database(self, database: ODB) -> StateBuilder { - // cast to the different database, + // Cast to the different database. // Note that we return different type depending of the database NewDBError. StateBuilder { with_state_clear: self.with_state_clear, @@ -98,8 +102,11 @@ impl StateBuilder { } /// Allows setting prestate that is going to be used for execution. + /// + /// # Note /// This bundle state will act as additional layer of cache. - /// and State after not finding data inside StateCache will try to find it inside BundleState. + /// + /// And State after not finding data inside StateCache will try to find it inside BundleState. /// /// On update Bundle state will be changed and updated. pub fn with_bundle_prestate(self, bundle: BundleState) -> Self { @@ -109,7 +116,7 @@ impl StateBuilder { } } - /// Make transitions and update bundle state. + /// Makes transitions and update bundle state. /// /// This is needed option if we want to create reverts /// and getting output of changed states. @@ -120,8 +127,11 @@ impl StateBuilder { } } - /// It will use different cache for the state. If set, it will ignore bundle prestate. - /// and will ignore `without_state_clear` flag as cache contains its own state_clear flag. + /// It will use different cache for the state. + /// + /// **Note**: If set, it will ignore bundle prestate. + /// + /// And will ignore `without_state_clear` flag as cache contains its own state_clear flag. /// /// This is useful for testing. pub fn with_cached_prestate(self, cache: CacheState) -> Self { diff --git a/crates/database/src/states/transition_account.rs b/crates/database/src/states/transition_account.rs index a2838ad8cd..a247b5a64f 100644 --- a/crates/database/src/states/transition_account.rs +++ b/crates/database/src/states/transition_account.rs @@ -76,7 +76,7 @@ impl TransitionAccount { self.info = other.info; self.status = other.status; - // if transition is from some to destroyed drop the storage. + // If transition is from some to destroyed drop the storage. // This need to be done here as it is one increment of the state. if matches!( other.status, @@ -85,7 +85,7 @@ impl TransitionAccount { self.storage = other.storage; self.storage_was_destroyed = true; } else { - // update changed values to this transition. + // Update changed values to this transition. for (key, slot) in other.storage.into_iter() { match self.storage.entry(key) { hash_map::Entry::Vacant(entry) => { @@ -93,11 +93,11 @@ impl TransitionAccount { } hash_map::Entry::Occupied(mut entry) => { let value = entry.get_mut(); - // if new value is same as original value. Remove storage entry. + // If new value is same as original value. Remove storage entry. if value.original_value() == slot.present_value() { entry.remove(); } else { - // if value is different, update transition present value; + // If value is different, update transition present value; value.present_value = slot.present_value; } } diff --git a/crates/database/src/states/transition_state.rs b/crates/database/src/states/transition_state.rs index 83be623a6e..ec1455b753 100644 --- a/crates/database/src/states/transition_state.rs +++ b/crates/database/src/states/transition_state.rs @@ -17,14 +17,17 @@ impl TransitionState { } /// Take the contents of this [`TransitionState`] and replace it with an - /// empty one. See [`core::mem::take`]. + /// empty one. + /// + /// See [core::mem::take]. pub fn take(&mut self) -> TransitionState { core::mem::take(self) } - /// Add transitions to the transition state. This will insert new - /// [`TransitionAccount`]s, or update existing ones via - /// [`TransitionAccount::update`]. + /// Add transitions to the transition state. + /// + /// This will insert new [`TransitionAccount`]s, or update existing ones via + /// [`update`][TransitionAccount::update]. pub fn add_transitions(&mut self, transitions: Vec<(Address, TransitionAccount)>) { for (address, account) in transitions { match self.transitions.entry(address) { diff --git a/crates/handler/interface/src/execution.rs b/crates/handler/interface/src/execution.rs index 6b1c9cdaa1..5a15b118ea 100644 --- a/crates/handler/interface/src/execution.rs +++ b/crates/handler/interface/src/execution.rs @@ -38,11 +38,11 @@ pub trait ExecutionHandler { frame_stack.push(new_frame); continue; } - // dont pop the frame as new frame was not created. + // Dont pop the frame as new frame was not created. FrameOrResultGen::Result(result) => result, }, FrameOrResultGen::Result(result) => { - // pop frame that returned result + // Pop frame that returned result frame_stack.pop(); result } diff --git a/crates/handler/interface/src/frame.rs b/crates/handler/interface/src/frame.rs index ba0af0217a..9452c91eec 100644 --- a/crates/handler/interface/src/frame.rs +++ b/crates/handler/interface/src/frame.rs @@ -1,6 +1,6 @@ use crate::FrameOrResultGen; -/// Call frame trait. +/// Call frame trait pub trait Frame: Sized { type Context; type FrameInit; diff --git a/crates/handler/interface/src/post_execution.rs b/crates/handler/interface/src/post_execution.rs index 64d2f08a1d..b031573273 100644 --- a/crates/handler/interface/src/post_execution.rs +++ b/crates/handler/interface/src/post_execution.rs @@ -4,7 +4,7 @@ pub trait PostExecutionHandler { type ExecResult; type Output; - /// Calculate final refund + /// Calculate final refund. fn refund( &self, context: &mut Self::Context, @@ -26,7 +26,7 @@ pub trait PostExecutionHandler { exec_result: &mut Self::ExecResult, ) -> Result<(), Self::Error>; - /// Main return handle, takes state from journal and transforms internal result to [`PostExecutionHandler::Output`]. + /// Main return handle, takes state from journal and transforms internal result to [`Output`][PostExecutionHandler::Output]. fn output( &self, context: &mut Self::Context, @@ -36,7 +36,8 @@ pub trait PostExecutionHandler { /// Called when execution ends. /// /// End handle in comparison to output handle will be called every time after execution. - /// While [`PostExecutionHandler::output`] will be omitted in case of the error. + /// + /// While [`output`][PostExecutionHandler::output] will be omitted in case of the error. fn end( &self, _context: &mut Self::Context, @@ -45,7 +46,8 @@ pub trait PostExecutionHandler { end_output } - /// Clean handler. This handle is called every time regardless - /// of the result of the transaction. + /// Clean handler. + /// + /// This handle is called every time regardless of the result of the transaction. fn clear(&self, context: &mut Self::Context); } diff --git a/crates/handler/src/execution.rs b/crates/handler/src/execution.rs index 0238feabed..2eab8c1a1c 100644 --- a/crates/handler/src/execution.rs +++ b/crates/handler/src/execution.rs @@ -65,7 +65,7 @@ where return_memory_offset: 0..0, })), TxKind::Create => { - // if first byte of data is magic 0xEF00, then it is EOFCreate. + // If first byte of data is magic 0xEF00, then it is EOFCreate. if spec.is_enabled_in(SpecId::OSAKA) && input.starts_with(&EOF_MAGIC_BYTES) { FrameInput::EOFCreate(Box::new(EOFCreateInputs::new( tx.common_fields().caller(), diff --git a/crates/handler/src/frame.rs b/crates/handler/src/frame.rs index 6bbac9354d..0d4ace11d0 100644 --- a/crates/handler/src/frame.rs +++ b/crates/handler/src/frame.rs @@ -29,7 +29,7 @@ use std::{rc::Rc, sync::Arc}; pub struct EthFrame { _phantom: core::marker::PhantomData (CTX, ERROR)>, data: FrameData, - // TODO include this + // TODO : Include this depth: usize, /// Journal checkpoint. pub checkpoint: JournalCheckpoint, @@ -147,7 +147,7 @@ where .journal() .load_account_code(inputs.bytecode_address)?; - // TODO Request from foundry to get bytecode hash. + // TODO : Request from foundry to get bytecode hash. let _code_hash = account.info.code_hash(); let mut bytecode = account.info.code.clone().unwrap_or_default(); @@ -257,7 +257,7 @@ where } // Create address - // TODO incorporating code hash inside interpreter. It was a request by foundry. + // TODO : Incorporating code hash inside interpreter. It was a request by foundry. let mut _init_code_hash = B256::ZERO; let created_address = match inputs.scheme { CreateScheme::Create => inputs.caller.create(old_nonce), @@ -270,7 +270,7 @@ where // warm load account. context.journal().load_account(created_address)?; - // create account, transfer funds and make the journal checkpoint. + // Create account, transfer funds and make the journal checkpoint. let checkpoint = match context.journal().create_account_checkpoint( inputs.caller, created_address, @@ -340,15 +340,15 @@ where created_address, } => (input.clone(), initcode.clone(), Some(*created_address)), EOFCreateKind::Tx { initdata } => { - // decode eof and init code. - // TODO handle inc_nonce handling more gracefully. + // Decode eof and init code. + // TODO : Handle inc_nonce handling more gracefully. let Ok((eof, input)) = Eof::decode_dangling(initdata.clone()) else { context.journal().inc_account_nonce(inputs.caller)?; return return_error(InstructionResult::InvalidEOFInitCode); }; if eof.validate().is_err() { - // TODO (EOF) new error type. + // TODO : (EOF) New error type. context.journal().inc_account_nonce(inputs.caller)?; return return_error(InstructionResult::InvalidEOFInitCode); } @@ -379,7 +379,7 @@ where // Increase nonce of caller and check if it overflows let Some(nonce) = context.journal().inc_account_nonce(inputs.caller)? else { - // can't happen on mainnet. + // Can't happen on mainnet. return return_error(InstructionResult::Return); }; let old_nonce = nonce - 1; @@ -389,7 +389,7 @@ where // Load account so it needs to be marked as warm for access list. context.journal().load_account(created_address)?; - // create account, transfer funds and make the journal checkpoint. + // Create account, transfer funds and make the journal checkpoint. let checkpoint = match context.journal().create_account_checkpoint( inputs.caller, created_address, @@ -474,7 +474,7 @@ where let precompiles = PRECOMPILE::new(context); let instructions = INSTRUCTION::new(context); - // load precompiles addresses as warm. + // Load precompiles addresses as warm. for address in precompiles.warm_addresses() { context.journal().warm_account(address); } @@ -505,7 +505,7 @@ where ) -> Result, Self::Error> { let spec = context.cfg().spec().into(); - // run interpreter + // Run interpreter let next_action = self.interpreter.run(self.instructions.table(), context); let mut interpreter_result = match next_action { @@ -520,7 +520,7 @@ where let result = match &self.data { FrameData::Call(frame) => { // return_call - // revert changes or not. + // Revert changes or not. if interpreter_result.result.is_ok() { context.journal().checkpoint_commit(); } else { @@ -609,7 +609,7 @@ where // Safe to push without stack limit check let _ = interpreter.stack.push(item); - // return unspend gas. + // Return unspend gas. if ins_result.is_ok_or_revert() { interpreter.control.gas().erase_cost(out_gas.remaining()); self.memory @@ -701,13 +701,13 @@ pub fn return_create( max_code_size: usize, spec_id: SpecId, ) { - // if return is not ok revert and return. + // If return is not ok revert and return. if !interpreter_result.result.is_ok() { journal.checkpoint_revert(checkpoint); return; } // Host error if present on execution - // if ok, check contract creation limit and calculate gas deduction on output len. + // If ok, check contract creation limit and calculate gas deduction on output len. // // EIP-3541: Reject new contract code starting with the 0xEF byte if spec_id.is_enabled_in(LONDON) && interpreter_result.output.first() == Some(&0xEF) { @@ -725,10 +725,10 @@ pub fn return_create( } let gas_for_code = interpreter_result.output.len() as u64 * gas::CODEDEPOSIT; if !interpreter_result.gas.record_cost(gas_for_code) { - // record code deposit gas cost and check if we are out of gas. + // Record code deposit gas cost and check if we are out of gas. // EIP-2 point 3: If contract creation does not have enough gas to pay for the // final gas fee for adding the contract code to the state, the contract - // creation fails (i.e. goes out-of-gas) rather than leaving an empty contract. + // creation fails (i.e. goes out-of-gas) rather than leaving an empty contract. if spec_id.is_enabled_in(HOMESTEAD) { journal.checkpoint_revert(checkpoint); interpreter_result.result = InstructionResult::OutOfGas; @@ -737,13 +737,13 @@ pub fn return_create( interpreter_result.output = Bytes::new(); } } - // if we have enough gas we can commit changes. + // If we have enough gas we can commit changes. journal.checkpoint_commit(); // Do analysis of bytecode straight away. let bytecode = Bytecode::new_legacy(interpreter_result.output.clone()); - // set code + // Set code journal.set_code(address, bytecode); interpreter_result.result = InstructionResult::Return; @@ -774,7 +774,7 @@ pub fn return_eofcreate( return; } - // deduct gas for code deployment. + // Deduct gas for code deployment. let gas_for_code = interpreter_result.output.len() as u64 * gas::CODEDEPOSIT; if !interpreter_result.gas.record_cost(gas_for_code) { journal.checkpoint_revert(checkpoint); @@ -784,10 +784,10 @@ pub fn return_eofcreate( journal.checkpoint_commit(); - // decode bytecode has a performance hit, but it has reasonable restrains. + // Decode bytecode has a performance hit, but it has reasonable restrains. let bytecode = Eof::decode(interpreter_result.output.clone()).expect("Eof is already verified"); - // eof bytecode is going to be hashed. + // Eof bytecode is going to be hashed. journal.set_code(address, Bytecode::Eof(Arc::new(bytecode))); } diff --git a/crates/handler/src/frame_data.rs b/crates/handler/src/frame_data.rs index 1ad312e1ae..3e0f193bf8 100644 --- a/crates/handler/src/frame_data.rs +++ b/crates/handler/src/frame_data.rs @@ -3,7 +3,7 @@ use core::ops::Range; use interpreter::{CallOutcome, CreateOutcome, Gas, InstructionResult, InterpreterResult}; use primitives::Address; -/// Call CallStackFrame. +/// Call Frame //#[derive(Debug)] //#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct CallFrame { @@ -11,6 +11,7 @@ pub struct CallFrame { pub return_memory_range: Range, } +/// Create Frame //#[derive(Debug)] //#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct CreateFrame { @@ -18,14 +19,16 @@ pub struct CreateFrame { pub created_address: Address, } -/// Eof Create Frame. +/// Eof Create Frame //#[derive(Debug)] //#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct EOFCreateFrame { pub created_address: Address, } -/// Call stack frame. +/// Frame Data +/// +/// [`FrameData`] bundles different types of frames. //#[derive(Debug)] //#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum FrameData { @@ -34,6 +37,7 @@ pub enum FrameData { EOFCreate(EOFCreateFrame), } +/// Frame Result #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Debug)] pub enum FrameResult { diff --git a/crates/handler/src/post_execution.rs b/crates/handler/src/post_execution.rs index 69dabe9e83..cb988b586b 100644 --- a/crates/handler/src/post_execution.rs +++ b/crates/handler/src/post_execution.rs @@ -70,7 +70,7 @@ where let effective_gas_price = context.tx().effective_gas_price(basefee); let gas = exec_result.gas(); - // return balance of not spend gas. + // Return balance of not spend gas. let caller_account = context.journal().load_account(caller)?; let reimbursed = effective_gas_price * U256::from(gas.remaining() + gas.refunded() as u64); @@ -92,7 +92,7 @@ where let effective_gas_price = tx.effective_gas_price(basefee); let gas = exec_result.gas(); - // transfer fee to coinbase/beneficiary. + // Transfer fee to coinbase/beneficiary. // EIP-1559 discard basefee for coinbase transfer. Basefee amount of gas is discarded. let coinbase_gas_price = if context.cfg().spec().into().is_enabled_in(SpecId::LONDON) { effective_gas_price.saturating_sub(basefee) @@ -118,13 +118,13 @@ where ) -> Result { context.take_error()?; - // used gas with refund calculated. + // Used gas with refund calculated. let gas_refunded = result.gas().refunded() as u64; let final_gas_used = result.gas().spent() - gas_refunded; let output = result.output(); let instruction_result = result.into_interpreter_result(); - // reset journal and return present state. + // Reset journal and return present state. let (state, logs) = context.journal().finalize()?; let result = match SuccessOrHalt::::from(instruction_result.result) { @@ -156,8 +156,8 @@ where } fn clear(&self, context: &mut Self::Context) { - // clear error and journaled state. - // TODO check effects of removal of take_error + // Clear error and journaled state. + // TODO : Check effects of removal of take_error // let _ = context.evm.take_error(); context.journal().clear(); } @@ -165,7 +165,7 @@ where /// Trait for post execution context. /// -/// TODO Generalize FinalOutput. +// TODO : Generalize FinalOutput. pub trait EthPostExecutionContext: TransactionGetter + ErrorGetter diff --git a/crates/handler/src/pre_execution.rs b/crates/handler/src/pre_execution.rs index daa22a2529..665827328f 100644 --- a/crates/handler/src/pre_execution.rs +++ b/crates/handler/src/pre_execution.rs @@ -44,10 +44,10 @@ where fn load_accounts(&self, context: &mut Self::Context) -> Result<(), Self::Error> { let spec = context.cfg().spec().into(); - // set journaling state flag. + // Set journaling state flag. context.journal().set_spec_id(spec); - // load coinbase + // Load coinbase // EIP-3651: Warm COINBASE. Starts the `COINBASE` address warm if spec.is_enabled_in(SpecId::SHANGHAI) { let coinbase = *context.block().beneficiary(); @@ -101,18 +101,18 @@ where let is_call = context.tx().kind().is_call(); let caller = context.tx().common_fields().caller(); - // load caller's account. + // Load caller's account. let caller_account = context.journal().load_account(caller)?.data; - // set new caller account balance. + // Set new caller account balance. caller_account.info.balance = caller_account.info.balance.saturating_sub(gas_cost); - // bump the nonce for calls. Nonce for CREATE will be bumped in `handle_create`. + // Bump the nonce for calls. Nonce for CREATE will be bumped in `handle_create`. if is_call { // Nonce is already checked caller_account.info.nonce = caller_account.info.nonce.saturating_add(1); } - // touch account so we know it is changed. + // Touch account so we know it is changed. caller_account.mark_touch(); Ok(()) } @@ -126,7 +126,7 @@ pub fn apply_eip7702_auth_list< >( context: &mut CTX, ) -> Result { - // return if there is no auth list. + // Return if there is no auth list. let tx = context.tx(); if tx.tx_type().into() != TransactionType::Eip7702 { return Ok(0); @@ -153,7 +153,7 @@ pub fn apply_eip7702_auth_list< let mut refunded_accounts = 0; for authorization in authorization_list { - // 1. recover authority and authorized addresses. + // 1. Recover authority and authorized addresses. // authority = ecrecover(keccak(MAGIC || rlp([chain_id, address, nonce])), y_parity, r, s] let Some(authority) = authorization.authority else { continue; @@ -164,13 +164,13 @@ pub fn apply_eip7702_auth_list< continue; } - // warm authority account and check nonce. + // Warm authority account and check nonce. // 3. Add authority to accessed_addresses (as defined in EIP-2929.) let mut authority_acc = context.journal().load_account_code(authority)?; // 4. Verify the code of authority is either empty or already delegated. if let Some(bytecode) = &authority_acc.info.code { - // if it is not empty and it is not eip7702 + // If it is not empty and it is not eip7702 if !bytecode.is_empty() && !bytecode.is_eip7702() { continue; } diff --git a/crates/handler/src/validation.rs b/crates/handler/src/validation.rs index 0e24809e4b..6ac4d52848 100644 --- a/crates/handler/src/validation.rs +++ b/crates/handler/src/validation.rs @@ -66,7 +66,7 @@ where fn validate_tx_against_state(&self, context: &mut Self::Context) -> Result<(), Self::Error> { let tx_caller = context.tx().common_fields().caller(); - // load acc + // Load acc let account = &mut context.journal().load_account_code(tx_caller)?; let account = account.data.clone(); @@ -87,11 +87,11 @@ pub fn validate_priority_fee_tx( base_fee: Option, ) -> Result<(), InvalidTransaction> { if max_priority_fee > max_fee { - // or gas_max_fee for eip1559 + // Or gas_max_fee for eip1559 return Err(InvalidTransaction::PriorityFeeGreaterThanMaxFee); } - // check minimal cost against basefee + // Check minimal cost against basefee if let Some(base_fee) = base_fee { let effective_gas_price = cmp::min( U256::from(max_fee), @@ -111,24 +111,24 @@ pub fn validate_eip4844_tx( max_blob_fee: u128, block_blob_gas_price: u128, ) -> Result<(), InvalidTransaction> { - // ensure that the user was willing to at least pay the current blob gasprice + // Ensure that the user was willing to at least pay the current blob gasprice if block_blob_gas_price > max_blob_fee { return Err(InvalidTransaction::BlobGasPriceGreaterThanMax); } - // there must be at least one blob + // There must be at least one blob if blobs.is_empty() { return Err(InvalidTransaction::EmptyBlobs); } - // all versioned blob hashes must start with VERSIONED_HASH_VERSION_KZG + // All versioned blob hashes must start with VERSIONED_HASH_VERSION_KZG for blob in blobs { if blob[0] != eip4844::VERSIONED_HASH_VERSION_KZG { return Err(InvalidTransaction::BlobVersionNotSupported); } } - // ensure the total blob gas spent is at most equal to the limit + // Ensure the total blob gas spent is at most equal to the limit // assert blob_gas_used <= MAX_BLOB_GAS_PER_BLOCK if blobs.len() > eip4844::MAX_BLOB_NUMBER_PER_BLOCK as usize { return Err(InvalidTransaction::TooManyBlobs { @@ -160,14 +160,14 @@ where match tx_type { TransactionType::Legacy => { let tx = context.tx().legacy(); - // check chain_id only if it is present in the legacy transaction. + // Check chain_id only if it is present in the legacy transaction. // EIP-155: Simple replay attack protection if let Some(chain_id) = tx.chain_id() { if chain_id != context.cfg().chain_id() { return Err(InvalidTransaction::InvalidChainId.into()); } } - // gas price must be at least the basefee. + // Gas price must be at least the basefee. if let Some(base_fee) = base_fee { if U256::from(tx.gas_price()) < base_fee { return Err(InvalidTransaction::GasPriceLessThanBasefee.into()); @@ -175,7 +175,7 @@ where } } TransactionType::Eip2930 => { - // enabled in BERLIN hardfork + // Enabled in BERLIN hardfork if !spec_id.is_enabled_in(SpecId::BERLIN) { return Err(InvalidTransaction::Eip2930NotSupported.into()); } @@ -185,7 +185,7 @@ where return Err(InvalidTransaction::InvalidChainId.into()); } - // gas price must be at least the basefee. + // Gas price must be at least the basefee. if let Some(base_fee) = base_fee { if U256::from(tx.gas_price()) < base_fee { return Err(InvalidTransaction::GasPriceLessThanBasefee.into()); @@ -231,7 +231,7 @@ where )?; } TransactionType::Eip7702 => { - // check if EIP-7702 transaction is enabled. + // Check if EIP-7702 transaction is enabled. if !spec_id.is_enabled_in(SpecId::PRAGUE) { return Err(InvalidTransaction::Eip7702NotSupported.into()); } @@ -253,7 +253,7 @@ where return Err(InvalidTransaction::EmptyAuthorizationList.into()); } - // TODO temporary here as newest EIP have removed this check. + // TODO : Temporary here as newest EIP have removed this check. for auth in tx.authorization_list_iter() { if auth.is_invalid() { return Err(InvalidTransaction::Eip7702NotSupported.into()); @@ -261,7 +261,7 @@ where } } TransactionType::Custom => { - // custom transaction type check is not done here. + // Custom transaction type check is not done here. } }; @@ -298,7 +298,7 @@ where // so we can leave it enabled always if !context.cfg().is_eip3607_disabled() { let bytecode = &account.info.code.as_ref().unwrap(); - // allow EOAs whose code is a valid delegation designation, + // Allow EOAs whose code is a valid delegation designation, // i.e. 0xef0100 || address, to continue to originate transactions. if !bytecode.is_empty() && !bytecode.is_eip7702() { return Err(InvalidTransaction::RejectCallerWithCode.into()); diff --git a/crates/inspector/src/eip3155.rs b/crates/inspector/src/eip3155.rs index aa19fac330..dfc225e9c0 100644 --- a/crates/inspector/src/eip3155.rs +++ b/crates/inspector/src/eip3155.rs @@ -262,7 +262,7 @@ where if context.journal().depth() == 0 { self.print_summary(&outcome.result, context); - // clear the state if we are at the top level + // Clear the state if we are at the top level self.clear(); } } @@ -273,7 +273,7 @@ where if context.journal().depth() == 0 { self.print_summary(&outcome.result, context); - // clear the state if we are at the top level + // Clear the state if we are at the top level self.clear(); } } diff --git a/crates/inspector/src/gas.rs b/crates/inspector/src/gas.rs index dcc4133f79..303d4f52f5 100644 --- a/crates/inspector/src/gas.rs +++ b/crates/inspector/src/gas.rs @@ -185,12 +185,12 @@ impl GasInspector { // .append_handler_register(inspector_handle_register) // .build(); -// // run evm. +// // Run evm. // evm.transact().unwrap(); // let inspector = evm.into_context().external; -// // starting from 100gas +// // Starting from 100gas // let steps = vec![ // // push1 -3 // (0, 97), diff --git a/crates/inspector/src/inspector.rs b/crates/inspector/src/inspector.rs index a1d41a5a19..e4affa64a0 100644 --- a/crates/inspector/src/inspector.rs +++ b/crates/inspector/src/inspector.rs @@ -297,7 +297,7 @@ impl< } fn code(&mut self, address: Address) -> Option> { - // TODO remove duplicated function name. + // TODO : Remove duplicated function name. as Host>::code(&mut self.inner, address) } @@ -535,7 +535,7 @@ where type Host = HOST; fn exec(&self, interpreter: &mut Interpreter, host: &mut Self::Host) { - // SAFETY: as the PC was already incremented we need to subtract 1 to preserve the + // SAFETY: As the PC was already incremented we need to subtract 1 to preserve the // old Inspector behavior. interpreter.bytecode.relative_jump(-1); @@ -625,8 +625,7 @@ where >(table) }; - // inspector log wrapper - + // Inspector log wrapper fn inspector_log( interpreter: &mut Interpreter<::IT>, context: &mut CTX, @@ -705,8 +704,7 @@ pub struct InspectorEthFrame where CTX: Host, { - /// TODO for now hardcode the InstructionProvider. But in future this should be configurable - /// as generic parameter. + // TODO : For now, hardcode the InstructionProvider. But in future this should be configurable as generic parameter. pub eth_frame: EthFrame< CTX, ERROR, @@ -774,7 +772,7 @@ where context.initialize_interp(&mut frame.eth_frame.interpreter); } - // TODO handle last frame_end. MAKE a separate function for `last_return_result`. + // TODO : Handle last frame_end. MAKE a separate function for `last_return_result`. ret } diff --git a/crates/interpreter/Cargo.toml b/crates/interpreter/Cargo.toml index 231d892843..983157e7d8 100644 --- a/crates/interpreter/Cargo.toml +++ b/crates/interpreter/Cargo.toml @@ -51,5 +51,5 @@ serde = [ "context-interface/serde", ] arbitrary = ["std", "primitives/arbitrary"] -# TODO Should be set from Context or from crate that consumes this PR. +# TODO : Should be set from Context or from crate that consumes this PR. memory_limit = [] diff --git a/crates/interpreter/src/gas.rs b/crates/interpreter/src/gas.rs index 308e917425..e3295cfe2b 100644 --- a/crates/interpreter/src/gas.rs +++ b/crates/interpreter/src/gas.rs @@ -187,8 +187,8 @@ impl MemoryGas { self.words_num = new_num; let mut cost = crate::gas::calc::memory_gas(new_num); core::mem::swap(&mut self.expansion_cost, &mut cost); - // safe to subtract because we know that new_len > length - // notice the swap above. + // Safe to subtract because we know that new_len > length + // Notice the swap above. Some(self.expansion_cost - cost) } } diff --git a/crates/interpreter/src/gas/calc.rs b/crates/interpreter/src/gas/calc.rs index eccbff365f..8bdee00324 100644 --- a/crates/interpreter/src/gas/calc.rs +++ b/crates/interpreter/src/gas/calc.rs @@ -296,16 +296,16 @@ pub const fn call_cost(spec_id: SpecId, transfers_value: bool, account_load: Acc 40 }; - // transfer value cost + // Transfer value cost if transfers_value { gas += CALLVALUE; } - // new account cost + // New account cost if account_load.is_empty { // EIP-161: State trie clearing (invariant-preserving alternative) if spec_id.is_enabled_in(SpecId::SPURIOUS_DRAGON) { - // account only if there is value transferred. + // Account only if there is value transferred. if transfers_value { gas += NEWACCOUNT; } @@ -361,7 +361,7 @@ pub fn validate_initial_tx_gas( let zero_data_len = input.iter().filter(|v| **v == 0).count() as u64; let non_zero_data_len = input.len() as u64 - zero_data_len; - // initdate stipend + // Initdate stipend initial_gas += zero_data_len * TRANSACTION_ZERO_DATA; // EIP-2028: Transaction data gas cost reduction initial_gas += non_zero_data_len @@ -371,14 +371,14 @@ pub fn validate_initial_tx_gas( 68 }; - // get number of access list account and storages. + // Get number of access list account and storages. if let Some(access_list) = access_list { let (account_num, storage_num) = access_list.num_account_storages(); initial_gas += account_num as u64 * ACCESS_LIST_ADDRESS; initial_gas += storage_num as u64 * ACCESS_LIST_STORAGE_KEY; } - // base stipend + // Base stipend initial_gas += if is_create { if spec_id.is_enabled_in(SpecId::HOMESTEAD) { // EIP-2: Homestead Hard-fork Changes diff --git a/crates/interpreter/src/gas/constants.rs b/crates/interpreter/src/gas/constants.rs index 85d0f4abbc..8f99987244 100644 --- a/crates/interpreter/src/gas/constants.rs +++ b/crates/interpreter/src/gas/constants.rs @@ -39,7 +39,7 @@ pub const TRANSACTION_NON_ZERO_DATA_FRONTIER: u64 = 68; pub const EOF_CREATE_GAS: u64 = 32000; -// berlin eip2929 constants +// Berlin eip2929 constants pub const ACCESS_LIST_ADDRESS: u64 = 2400; pub const ACCESS_LIST_STORAGE_KEY: u64 = 1900; pub const COLD_SLOAD_COST: u64 = 2100; diff --git a/crates/interpreter/src/instruction_result.rs b/crates/interpreter/src/instruction_result.rs index ab04bc26eb..8e4b02dfdf 100644 --- a/crates/interpreter/src/instruction_result.rs +++ b/crates/interpreter/src/instruction_result.rs @@ -370,7 +370,7 @@ impl From for SuccessOrHalt { Self::Halt(HaltReason::CreateInitCodeSizeLimit.into()) } - // TODO (EOF) add proper Revert subtype. + // TODO : (EOF) Add proper Revert subtype. InstructionResult::InvalidEOFInitCode => Self::Revert, InstructionResult::FatalExternalError => Self::FatalExternalError, InstructionResult::EOFOpcodeDisabledInLegacy => { diff --git a/crates/interpreter/src/instructions.rs b/crates/interpreter/src/instructions.rs index 2b7250b150..baed840373 100644 --- a/crates/interpreter/src/instructions.rs +++ b/crates/interpreter/src/instructions.rs @@ -226,7 +226,7 @@ mod tests { // use crate::DummyHost; // use bytecode::opcode::*; - // TODO define EthEthereumWire + // TODO : Define EthEthereumWire // #[test] // fn all_instructions_and_opcodes_used() { // // known unknown instruction we compare it with other instructions from table. diff --git a/crates/interpreter/src/instructions/arithmetic.rs b/crates/interpreter/src/instructions/arithmetic.rs index e599920d3a..4ffcc9e65b 100644 --- a/crates/interpreter/src/instructions/arithmetic.rs +++ b/crates/interpreter/src/instructions/arithmetic.rs @@ -105,20 +105,32 @@ pub fn exp( /// Implements the `SIGNEXTEND` opcode as defined in the Ethereum Yellow Paper. /// /// In the yellow paper `SIGNEXTEND` is defined to take two inputs, we will call them -/// `x` and `y`, and produce one output. The first `t` bits of the output (numbering from the -/// left, starting from 0) are equal to the `t`-th bit of `y`, where `t` is equal to -/// `256 - 8(x + 1)`. The remaining bits of the output are equal to the corresponding bits of `y`. -/// Note: if `x >= 32` then the output is equal to `y` since `t <= 0`. To efficiently implement -/// this algorithm in the case `x < 32` we do the following. Let `b` be equal to the `t`-th bit -/// of `y` and let `s = 255 - t = 8x + 7` (this is effectively the same index as `t`, but -/// numbering the bits from the right instead of the left). We can create a bit mask which is all -/// zeros up to and including the `t`-th bit, and all ones afterwards by computing the quantity -/// `2^s - 1`. We can use this mask to compute the output depending on the value of `b`. +/// `x` and `y`, and produce one output. +/// +/// The first `t` bits of the output (numbering from the left, starting from 0) are +/// equal to the `t`-th bit of `y`, where `t` is equal to `256 - 8(x + 1)`. +/// +/// The remaining bits of the output are equal to the corresponding bits of `y`. +/// +/// **Note**: If `x >= 32` then the output is equal to `y` since `t <= 0`. +/// +/// To efficiently implement this algorithm in the case `x < 32` we do the following. +/// +/// Let `b` be equal to the `t`-th bit of `y` and let `s = 255 - t = 8x + 7` +/// (this is effectively the same index as `t`, but numbering the bits from the +/// right instead of the left). +/// +/// We can create a bit mask which is all zeros up to and including the `t`-th bit, +/// and all ones afterwards by computing the quantity `2^s - 1`. +/// +/// We can use this mask to compute the output depending on the value of `b`. +/// /// If `b == 1` then the yellow paper says the output should be all ones up to /// and including the `t`-th bit, followed by the remaining bits of `y`; this is equal to -/// `y | !mask` where `|` is the bitwise `OR` and `!` is bitwise negation. Similarly, if -/// `b == 0` then the yellow paper says the output should start with all zeros, then end with -/// bits from `b`; this is equal to `y & mask` where `&` is bitwise `AND`. +/// `y | !mask` where `|` is the bitwise `OR` and `!` is bitwise negation. +/// +/// Similarly, if `b == 0` then the yellow paper says the output should start with all zeros, +/// then end with bits from `b`; this is equal to `y & mask` where `&` is bitwise `AND`. pub fn signextend( interpreter: &mut Interpreter, _host: &mut H, diff --git a/crates/interpreter/src/instructions/bitwise.rs b/crates/interpreter/src/instructions/bitwise.rs index 2e6dc68a30..ecfa1e387a 100644 --- a/crates/interpreter/src/instructions/bitwise.rs +++ b/crates/interpreter/src/instructions/bitwise.rs @@ -174,9 +174,8 @@ pub fn sar( }; } +// TODO : Tests /* -TODO TESTS - #[cfg(test)] mod tests { use crate::instructions::bitwise::{byte, sar, shl, shr}; diff --git a/crates/interpreter/src/instructions/contract.rs b/crates/interpreter/src/instructions/contract.rs index ee29298680..31c3b87f3d 100644 --- a/crates/interpreter/src/instructions/contract.rs +++ b/crates/interpreter/src/instructions/contract.rs @@ -39,7 +39,7 @@ pub fn eofcreate( .expect("valid container") .clone(); - // resize memory and get return range. + // Resize memory and get return range. let Some(input_range) = resize_memory(interpreter, data_offset, data_size) else { return; }; @@ -53,11 +53,11 @@ pub fn eofcreate( let eof = Eof::decode(container.clone()).expect("Subcontainer is verified"); if !eof.body.is_data_filled { - // should be always false as it is verified by eof verification. + // Should be always false as it is verified by eof verification. panic!("Panic if data section is not full"); } - // deduct gas for hash that is needed to calculate address. + // Deduct gas for hash that is needed to calculate address. gas_or_fail!(interpreter, cost_per_word(container.len(), KECCAK256WORD)); let created_address = interpreter @@ -104,12 +104,12 @@ pub fn return_contract( .expect("valid container") .clone(); - // convert to EOF so we can check data section size. + // Convert to EOF so we can check data section size. let (eof_header, _) = EofHeader::decode(&container).expect("valid EOF header"); let static_aux_size = eof_header.eof_size() - container.len(); - // important: offset must be ignored if len is zeros + // Important: Offset must be ignored if len is zeros let mut output = if aux_data_size != 0 { let aux_data_offset = as_usize_or_fail!(interpreter, aux_data_offset); resize_memory!(interpreter, aux_data_offset, aux_data_size); @@ -121,18 +121,18 @@ pub fn return_contract( container.to_vec() }; - // data_size - static_aux_size give us current data `container` size. - // and with aux_slice len we can calculate new data size. + // `data_size - static_aux_size` give us current data `container` size. + // And with `aux_slice` len we can calculate new data size. let new_data_size = eof_header.data_size as usize - static_aux_size + aux_data_size; if new_data_size > 0xFFFF { - // aux data is too big + // Aux data is too big interpreter .control .set_instruction_result(InstructionResult::EofAuxDataOverflow); return; } if new_data_size < eof_header.data_size as usize { - // aux data is too small + // Aux data is too small interpreter .control .set_instruction_result(InstructionResult::EofAuxDataTooSmall); @@ -140,7 +140,7 @@ pub fn return_contract( } let new_data_size = (new_data_size as u16).to_be_bytes(); - // set new data size in eof bytes as we know exact index. + // Set new data size in eof bytes as we know exact index. output[eof_header.data_size_raw_i()..][..2].clone_from_slice(&new_data_size); let output: Bytes = output.into(); @@ -195,7 +195,7 @@ pub fn extcall_gas_calc( ); gas!(interpreter, call_cost, None); - // 7. Calculate the gas available to callee as caller’s + // Calculate the gas available to callee as caller’s // remaining gas reduced by max(ceil(gas/64), MIN_RETAINED_GAS) (MIN_RETAINED_GAS is 5000). let gas_reduce = max(interpreter.control.gas().remaining() / 64, 5000); let gas_limit = interpreter @@ -238,7 +238,7 @@ pub fn pop_extcall_target_address( .set_instruction_result(InstructionResult::InvalidEXTCALLTarget); return None; } - // discard first 12 bytes. + // Discard first 12 bytes. Some(Address::from_word(target_address)) } @@ -248,12 +248,12 @@ pub fn extcall( ) { require_eof!(interpreter); - // pop target address + // Pop target address let Some(target_address) = pop_extcall_target_address(interpreter) else { return; }; - // input call + // Input call let Some(input) = extcall_input(interpreter) else { return; }; @@ -295,12 +295,12 @@ pub fn extdelegatecall( ) { require_eof!(interpreter); - // pop target address + // Pop target address let Some(target_address) = pop_extcall_target_address(interpreter) else { return; }; - // input call + // Input call let Some(input) = extcall_input(interpreter) else { return; }; @@ -333,12 +333,12 @@ pub fn extstaticcall( ) { require_eof!(interpreter); - // pop target address + // Pop target address let Some(target_address) = pop_extcall_target_address(interpreter) else { return; }; - // input call + // Input call let Some(input) = extcall_input(interpreter) else { return; }; @@ -406,7 +406,7 @@ pub fn create( // EIP-1014: Skinny CREATE2 let scheme = if IS_CREATE2 { popn!([salt], interpreter); - // SAFETY: len is reasonable in size as gas for it is already deducted. + // SAFETY: `len` is reasonable in size as gas for it is already deducted. gas_or_fail!(interpreter, gas::create2_cost(len)); CreateScheme::Create2 { salt } } else { @@ -422,7 +422,7 @@ pub fn create( .spec_id() .is_enabled_in(SpecId::TANGERINE) { - // take remaining gas and deduce l64 part of it. + // Take remaining gas and deduce l64 part of it. gas_limit -= gas_limit / 64 } gas!(interpreter, gas_limit); @@ -446,7 +446,7 @@ pub fn call( ) { popn!([local_gas_limit, to, value], interpreter); let to = to.into_address(); - // max gas limit is not possible in real ethereum situation. + // Max gas limit is not possible in real ethereum situation. let local_gas_limit = u64::try_from(local_gas_limit).unwrap_or(u64::MAX); let has_transfer = !value.is_zero(); @@ -475,7 +475,7 @@ pub fn call( gas!(interpreter, gas_limit); - // add call stipend if there is value to be transferred. + // Add call stipend if there is value to be transferred. if has_transfer { gas_limit = gas_limit.saturating_add(gas::CALL_STIPEND); } @@ -504,7 +504,7 @@ pub fn call_code( ) { popn!([local_gas_limit, to, value], interpreter); let to = Address::from_word(B256::from(to)); - // max gas limit is not possible in real ethereum situation. + // Max gas limit is not possible in real ethereum situation. let local_gas_limit = u64::try_from(local_gas_limit).unwrap_or(u64::MAX); //pop!(interpreter, value); @@ -518,7 +518,7 @@ pub fn call_code( .set_instruction_result(InstructionResult::FatalExternalError); return; }; - // set is_empty to false as we are not creating this account. + // Set `is_empty` to false as we are not creating this account. load.is_empty = false; let Some(mut gas_limit) = calc_call_gas(interpreter, load, !value.is_zero(), local_gas_limit) else { @@ -527,7 +527,7 @@ pub fn call_code( gas!(interpreter, gas_limit); - // add call stipend if there is value to be transferred. + // Add call stipend if there is value to be transferred. if !value.is_zero() { gas_limit = gas_limit.saturating_add(gas::CALL_STIPEND); } @@ -557,7 +557,7 @@ pub fn delegate_call( check!(interpreter, HOMESTEAD); popn!([local_gas_limit, to], interpreter); let to = Address::from_word(B256::from(to)); - // max gas limit is not possible in real ethereum situation. + // Max gas limit is not possible in real ethereum situation. let local_gas_limit = u64::try_from(local_gas_limit).unwrap_or(u64::MAX); let Some((input, return_memory_offset)) = get_memory_input_and_out_ranges(interpreter) else { @@ -570,7 +570,7 @@ pub fn delegate_call( .set_instruction_result(InstructionResult::FatalExternalError); return; }; - // set is_empty to false as we are not creating this account. + // Set is_empty to false as we are not creating this account. load.is_empty = false; let Some(gas_limit) = calc_call_gas(interpreter, load, false, local_gas_limit) else { return; @@ -603,7 +603,7 @@ pub fn static_call( check!(interpreter, BYZANTIUM); popn!([local_gas_limit, to], interpreter); let to = Address::from_word(B256::from(to)); - // max gas limit is not possible in real ethereum situation. + // Max gas limit is not possible in real ethereum situation. let local_gas_limit = u64::try_from(local_gas_limit).unwrap_or(u64::MAX); let Some((input, return_memory_offset)) = get_memory_input_and_out_ranges(interpreter) else { @@ -616,7 +616,7 @@ pub fn static_call( .set_instruction_result(InstructionResult::FatalExternalError); return; }; - // set is_empty to false as we are not creating this account. + // Set `is_empty` to false as we are not creating this account. load.is_empty = false; let Some(gas_limit) = calc_call_gas(interpreter, load, false, local_gas_limit) else { return; diff --git a/crates/interpreter/src/instructions/contract/call_helpers.rs b/crates/interpreter/src/instructions/contract/call_helpers.rs index 320ee9faf9..760e37d9df 100644 --- a/crates/interpreter/src/instructions/contract/call_helpers.rs +++ b/crates/interpreter/src/instructions/contract/call_helpers.rs @@ -60,7 +60,7 @@ pub fn calc_call_gas( // EIP-150: Gas cost changes for IO-heavy operations let gas_limit = if interpreter.runtime_flag.spec_id().is_enabled_in(TANGERINE) { - // take l64 part of gas_limit + // Take l64 part of gas_limit min( interpreter.control.gas().remaining_63_of_64_parts(), local_gas_limit, diff --git a/crates/interpreter/src/instructions/control.rs b/crates/interpreter/src/instructions/control.rs index 1759dabda3..0d3aa6226e 100644 --- a/crates/interpreter/src/instructions/control.rs +++ b/crates/interpreter/src/instructions/control.rs @@ -48,7 +48,7 @@ pub fn rjumpv( let case = as_isize_saturated!(case); let max_index = interpreter.bytecode.read_u8() as isize; - // for number of items we are adding 1 to max_index, multiply by 2 as each offset is 2 bytes + // For number of items we are adding 1 to max_index, multiply by 2 as each offset is 2 bytes // and add 1 for max_index itself. Note that revm already incremented the instruction pointer let mut offset = (max_index + 1) * 2 + 1; @@ -108,13 +108,13 @@ pub fn callf( let idx = interpreter.bytecode.read_u16() as usize; - // get target types + // Get target types let Some(types) = interpreter.bytecode.code_section_info(idx) else { panic!("Invalid EOF in execution, expecting correct intermediate in callf") }; // Check max stack height for target code section. - // safe to subtract as max_stack_height is always more than inputs. + // Safe to subtract as max_stack_height is always more than inputs. if interpreter.stack.len() + (types.max_stack_size - types.inputs as u16) as usize > 1024 { interpreter .control @@ -122,7 +122,7 @@ pub fn callf( return; } - // push current idx and PC to the callf stack. + // Push current idx and PC to the callf stack. // PC is incremented by 2 to point to the next instruction after callf. if !(interpreter .sub_routine @@ -163,14 +163,14 @@ pub fn jumpf( let idx = interpreter.bytecode.read_u16() as usize; - // get target types + // Get target types let types = interpreter .bytecode .code_section_info(idx) .expect("Invalid code section index"); // Check max stack height for target code section. - // safe to subtract as max_stack_height is always more than inputs. + // Safe to subtract as max_stack_height is always more than inputs. if interpreter.stack.len() + (types.max_stack_size - types.inputs as u16) as usize > 1024 { interpreter .control @@ -199,11 +199,11 @@ fn return_inner( interpreter: &mut Interpreter, instruction_result: InstructionResult, ) { - // zero gas cost + // Zero gas cost // gas!(interpreter, gas::ZERO) popn!([offset, len], interpreter); let len = as_usize_or_fail!(interpreter, len); - // important: offset must be ignored if len is zeros + // Important: Offset must be ignored if len is zeros let mut output = Bytes::default(); if len != 0 { let offset = as_usize_or_fail!(interpreter, offset); @@ -270,8 +270,8 @@ pub fn unknown( .set_instruction_result(InstructionResult::OpcodeNotFound); } +// TODO : Test /* -TODO TEST #[cfg(test)] mod test { use super::*; @@ -313,10 +313,10 @@ mod test { interp.gas = Gas::new(10000); interp.spec_id = SpecId::PRAGUE; - // dont jump + // Dont jump interp.step(&table, &mut host); assert_eq!(interp.program_counter(), 3); - // jumps to last opcode + // Jumps to last opcode interp.step(&table, &mut host); assert_eq!(interp.program_counter(), 7); } @@ -347,30 +347,30 @@ mod test { interp.gas = Gas::new(1000); interp.spec_id = SpecId::PRAGUE; - // more then max_index + // More then max_index interp.stack.push(U256::from(10)).unwrap(); interp.step(&table, &mut host); assert_eq!(interp.program_counter(), 6); - // cleanup + // Cleanup interp.step(&table, &mut host); interp.step(&table, &mut host); interp.step(&table, &mut host); interp.step(&table, &mut host); assert_eq!(interp.program_counter(), 0); - // jump to first index of vtable + // Jump to first index of vtable interp.stack.push(U256::from(0)).unwrap(); interp.step(&table, &mut host); assert_eq!(interp.program_counter(), 7); - // cleanup + // Cleanup interp.step(&table, &mut host); interp.step(&table, &mut host); interp.step(&table, &mut host); assert_eq!(interp.program_counter(), 0); - // jump to second index of vtable + // Jump to second index of vtable interp.stack.push(U256::from(1)).unwrap(); interp.step(&table, &mut host); assert_eq!(interp.program_counter(), 8); @@ -477,7 +477,7 @@ mod test { // CALLF interp.step(&table, &mut host); - // stack overflow + // Stack overflow assert_eq!(interp.instruction_result, InstructionResult::StackOverflow); } @@ -515,7 +515,7 @@ mod test { // JUMPF interp.step(&table, &mut host); - // stack overflow + // Stack overflow assert_eq!(interp.instruction_result, InstructionResult::StackOverflow); } } diff --git a/crates/interpreter/src/instructions/data.rs b/crates/interpreter/src/instructions/data.rs index 38426b3fd1..ffaa0d8a77 100644 --- a/crates/interpreter/src/instructions/data.rs +++ b/crates/interpreter/src/instructions/data.rs @@ -42,7 +42,7 @@ pub fn data_loadn( push!(interpreter, B256::new(word).into()); - // add +2 to the instruction pointer to skip the offset + // Add +2 to the instruction pointer to skip the offset interpreter.bytecode.relative_jump(2); } @@ -64,13 +64,13 @@ pub fn data_copy( gas!(interpreter, VERYLOW); popn!([mem_offset, offset, size], interpreter); - // sizes more than u64::MAX will spend all the gas in memory resize. + // Sizes more than u64::MAX will spend all the gas in memory resize. let size = as_usize_or_fail!(interpreter, size); - // size of zero should not change the memory + // Size of zero should not change the memory if size == 0 { return; } - // fail if mem offset is big as it will spend all the gas + // Fail if mem offset is big as it will spend all the gas let mem_offset = as_usize_or_fail!(interpreter, mem_offset); resize_memory!(interpreter, mem_offset, size); @@ -79,11 +79,12 @@ pub fn data_copy( let offset = as_usize_saturated!(offset); let data = interpreter.bytecode.data(); - // set data from the eof to the shared memory. Padded it with zeros. + // Set data from the eof to the shared memory. Padded it with zeros. interpreter.memory.set_data(mem_offset, offset, size, data); } + +// TODO : Test /* -TODO test #[cfg(test)] mod test { use bytecode::{Bytecode, Eof}; diff --git a/crates/interpreter/src/instructions/host.rs b/crates/interpreter/src/instructions/host.rs index f7128d7f80..b3b8a69d3e 100644 --- a/crates/interpreter/src/instructions/host.rs +++ b/crates/interpreter/src/instructions/host.rs @@ -133,7 +133,7 @@ pub fn extcodecopy( let code_offset = min(as_usize_saturated!(code_offset), code.len()); resize_memory!(interpreter, memory_offset, len); - // Note: this can't panic because we resized memory to fit. + // Note: This can't panic because we resized memory to fit. interpreter .memory .set_data(memory_offset, code_offset, len, &code); diff --git a/crates/interpreter/src/instructions/i256.rs b/crates/interpreter/src/instructions/i256.rs index 92145b4bdb..cd5034b539 100644 --- a/crates/interpreter/src/instructions/i256.rs +++ b/crates/interpreter/src/instructions/i256.rs @@ -4,7 +4,7 @@ use primitives::U256; #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(i8)] pub enum Sign { - // same as `cmp::Ordering` + // Same as `cmp::Ordering` Minus = -1, Zero = 0, #[allow(dead_code)] // "constructed" with `mem::transmute` in `i256_sign` below @@ -69,7 +69,7 @@ pub fn i256_cmp(first: &U256, second: &U256) -> Ordering { let first_sign = i256_sign(first); let second_sign = i256_sign(second); match first_sign.cmp(&second_sign) { - // note: adding `if first_sign != Sign::Zero` to short circuit zero comparisons performs + // Note: Adding `if first_sign != Sign::Zero` to short circuit zero comparisons performs // slower on average, as of #582 Ordering::Equal => first.cmp(second), o => o, @@ -88,14 +88,14 @@ pub fn i256_div(mut first: U256, mut second: U256) -> U256 { return two_compl(MIN_NEGATIVE_VALUE); } - // necessary overflow checks are done above, perform the division + // Necessary overflow checks are done above, perform the division let mut d = first / second; - // set sign bit to zero + // Set sign bit to zero u256_remove_sign(&mut d); - // two's complement only if the signs are different - // note: this condition has better codegen than an exhaustive match, as of #582 + // Two's complement only if the signs are different + // Note: This condition has better codegen than an exhaustive match, as of #582 if (first_sign == Sign::Minus && second_sign != Sign::Minus) || (second_sign == Sign::Minus && first_sign != Sign::Minus) { @@ -119,7 +119,7 @@ pub fn i256_mod(mut first: U256, mut second: U256) -> U256 { let mut r = first % second; - // set sign bit to zero + // Set sign bit to zero u256_remove_sign(&mut r); if first_sign == Sign::Minus { diff --git a/crates/interpreter/src/instructions/macros.rs b/crates/interpreter/src/instructions/macros.rs index 0f78221037..c290ba797b 100644 --- a/crates/interpreter/src/instructions/macros.rs +++ b/crates/interpreter/src/instructions/macros.rs @@ -185,7 +185,7 @@ macro_rules! as_usize_saturated { macro_rules! as_isize_saturated { ($v:expr) => { // `isize_try_from(u64::MAX)`` will fail and return isize::MAX - // this is expected behavior as we are saturating the value. + // This is expected behavior as we are saturating the value. isize::try_from($crate::as_u64_saturated!($v)).unwrap_or(isize::MAX) }; } diff --git a/crates/interpreter/src/instructions/memory.rs b/crates/interpreter/src/instructions/memory.rs index ccad3632e6..febfa77c99 100644 --- a/crates/interpreter/src/instructions/memory.rs +++ b/crates/interpreter/src/instructions/memory.rs @@ -56,9 +56,9 @@ pub fn mcopy( check!(interpreter, CANCUN); popn!([dst, src, len], interpreter); - // into usize or fail + // Into usize or fail let len = as_usize_or_fail!(interpreter, len); - // deduce gas + // Deduce gas gas_or_fail!(interpreter, gas::copy_cost_verylow(len)); if len == 0 { return; @@ -66,8 +66,8 @@ pub fn mcopy( let dst = as_usize_or_fail!(interpreter, dst); let src = as_usize_or_fail!(interpreter, src); - // resize memory + // Resize memory resize_memory!(interpreter, max(dst, src), len); - // copy memory in place + // Copy memory in place interpreter.memory.copy(dst, src, len); } diff --git a/crates/interpreter/src/instructions/stack.rs b/crates/interpreter/src/instructions/stack.rs index 6ac0e4977c..cd881fdd8a 100644 --- a/crates/interpreter/src/instructions/stack.rs +++ b/crates/interpreter/src/instructions/stack.rs @@ -14,7 +14,7 @@ pub fn pop( _host: &mut H, ) { gas!(interpreter, gas::BASE); - // can ignore return. as relative N jump is safe operation. + // Can ignore return. as relative N jump is safe operation. popn!([_i], interpreter); } @@ -35,14 +35,14 @@ pub fn push( _host: &mut H, ) { gas!(interpreter, gas::VERYLOW); - // TODO check performance degradation. + // TODO : Check performance degradation. push!(interpreter, U256::ZERO); popn_top!([], top, interpreter); let imm = interpreter.bytecode.read_slice(N); cast_slice_to_u256(imm, top); - // can ignore return. as relative N jump is safe operation + // Can ignore return. as relative N jump is safe operation interpreter.bytecode.relative_jump(N as isize); } @@ -117,9 +117,9 @@ pub fn exchange( } interpreter.bytecode.relative_jump(1); } -/* -TODO TESTS +// TODO : Tests +/* #[cfg(test)] mod test { diff --git a/crates/interpreter/src/instructions/system.rs b/crates/interpreter/src/instructions/system.rs index f35711e3a8..bde1f893d7 100644 --- a/crates/interpreter/src/instructions/system.rs +++ b/crates/interpreter/src/instructions/system.rs @@ -68,7 +68,7 @@ pub fn codecopy( }; let code_offset = as_usize_saturated!(code_offset); - // Note: this can't panic because we resized memory to fit. + // Note: This can't panic because we resized memory to fit. interpreter.memory.set_data( memory_offset, code_offset, @@ -90,7 +90,7 @@ pub fn calldataload( let input_len = input.len(); if offset < input_len { let count = 32.min(input_len - offset); - // SAFETY: count is bounded by the calldata length. + // SAFETY: `count` is bounded by the calldata length. // This is `word[..count].copy_from_slice(input[offset..offset + count])`, written using // raw pointers as apparently the compiler cannot optimize the slice version, and using // `get_unchecked` twice is uglier. @@ -127,7 +127,7 @@ pub fn calldatacopy( }; let data_offset = as_usize_saturated!(data_offset); - // Note: this can't panic because we resized memory to fit. + // Note: This can't panic because we resized memory to fit. interpreter .memory .set_data(memory_offset, data_offset, len, interpreter.input.input()); @@ -171,7 +171,7 @@ pub fn returndatacopy( return; }; - // Note: this can't panic because we resized memory to fit. + // Note: This can't panic because we resized memory to fit. interpreter.memory.set_data( memory_offset, data_offset, @@ -223,7 +223,7 @@ pub fn memory_resize( memory_offset: U256, len: usize, ) -> Option { - // safe to cast usize to u64 + // Safe to cast usize to u64 gas_or_fail!(interpreter, gas::copy_cost_verylow(len), None); if len == 0 { return None; @@ -234,8 +234,8 @@ pub fn memory_resize( Some(memory_offset) } +// TODO : Tests /* -TODO tests #[cfg(test)] mod test { use super::*; diff --git a/crates/interpreter/src/instructions/utility.rs b/crates/interpreter/src/instructions/utility.rs index f6273c8f2d..9fe9683864 100644 --- a/crates/interpreter/src/instructions/utility.rs +++ b/crates/interpreter/src/instructions/utility.rs @@ -17,7 +17,7 @@ pub fn cast_slice_to_u256(slice: &[u8], dest: &mut U256) { let n_words = (slice.len() + 31) / 32; - // SAFETY: length checked above. + // SAFETY: Length checked above. unsafe { //let dst = self.data.as_mut_ptr().add(self.data.len()).cast::(); //self.data.set_len(new_len); @@ -25,11 +25,11 @@ pub fn cast_slice_to_u256(slice: &[u8], dest: &mut U256) { let mut i = 0; - // write full words + // Write full words let words = slice.chunks_exact(32); let partial_last_word = words.remainder(); for word in words { - // Note: we unroll `U256::from_be_bytes` here to write directly into the buffer, + // Note: We unroll `U256::from_be_bytes` here to write directly into the buffer, // instead of creating a 32 byte array on the stack and then copying it over. for l in word.rchunks_exact(8) { dst.add(i).write(u64::from_be_bytes(l.try_into().unwrap())); @@ -41,7 +41,7 @@ pub fn cast_slice_to_u256(slice: &[u8], dest: &mut U256) { return; } - // write limbs of partial last word + // Write limbs of partial last word let limbs = partial_last_word.rchunks_exact(8); let partial_last_limb = limbs.remainder(); for l in limbs { @@ -49,7 +49,7 @@ pub fn cast_slice_to_u256(slice: &[u8], dest: &mut U256) { i += 1; } - // write partial last limb by padding with zeros + // Write partial last limb by padding with zeros if !partial_last_limb.is_empty() { let mut tmp = [0u8; 8]; tmp[8 - partial_last_limb.len()..].copy_from_slice(partial_last_limb); @@ -59,7 +59,7 @@ pub fn cast_slice_to_u256(slice: &[u8], dest: &mut U256) { debug_assert_eq!((i + 3) / 4, n_words, "wrote too much"); - // zero out upper bytes of last word + // Zero out upper bytes of last word let m = i % 4; // 32 / 8 if m != 0 { dst.add(i).write_bytes(0, 4 - m); diff --git a/crates/interpreter/src/interpreter.rs b/crates/interpreter/src/interpreter.rs index d7d38c05b9..c43216df3b 100644 --- a/crates/interpreter/src/interpreter.rs +++ b/crates/interpreter/src/interpreter.rs @@ -127,7 +127,7 @@ where } } - // TODO make impl a associate type. With this associate type we can implement + // TODO : Make impl a associate type. With this associate type we can implement. // InspectorInstructionProvider over generic type. fn table(&mut self) -> &[impl CustomInstruction; 256] { self.instruction_table.as_ref() @@ -166,7 +166,7 @@ impl Interpreter { // it will do noop and just stop execution of this contract self.bytecode.relative_jump(1); - // execute instruction. + // Execute instruction. instruction_table[opcode as usize].exec(self, host) } @@ -182,7 +182,7 @@ impl Interpreter { self.control .set_next_action(InterpreterAction::None, InstructionResult::Continue); - // main loop + // Main loop while self.control.instruction_result().is_continue() { self.step(instruction_table, host); } @@ -196,7 +196,7 @@ impl Interpreter { InterpreterAction::Return { result: InterpreterResult { result: self.control.instruction_result(), - // return empty bytecode + // Return empty bytecode output: Bytes::new(), gas: *self.control.gas(), }, diff --git a/crates/interpreter/src/interpreter/ext_bytecode.rs b/crates/interpreter/src/interpreter/ext_bytecode.rs index b19f25d159..0bed1ecafc 100644 --- a/crates/interpreter/src/interpreter/ext_bytecode.rs +++ b/crates/interpreter/src/interpreter/ext_bytecode.rs @@ -97,7 +97,7 @@ impl Immediates for ExtBytecode { unsafe { read_i16( self.instruction_pointer - // offset for max_index that is one byte + // Offset for max_index that is one byte .offset(offset), ) } @@ -108,7 +108,7 @@ impl Immediates for ExtBytecode { unsafe { read_u16( self.instruction_pointer - // offset for max_index that is one byte + // Offset for max_index that is one byte .offset(offset), ) } diff --git a/crates/interpreter/src/interpreter/loop_control.rs b/crates/interpreter/src/interpreter/loop_control.rs index 52c1fd00cb..7ff91285cd 100644 --- a/crates/interpreter/src/interpreter/loop_control.rs +++ b/crates/interpreter/src/interpreter/loop_control.rs @@ -5,13 +5,17 @@ use serde::{Deserialize, Serialize}; #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct LoopControl { - /// The execution control flag. If this is not set to `Continue`, the interpreter will stop - /// execution. + /// The execution control flag. + /// + /// If this is not set to [`Continue`][InstructionResult::Continue], the interpreter will stop execution. pub instruction_result: InstructionResult, /// Actions that the EVM should do. /// - /// Set inside CALL or CREATE instructions and RETURN or REVERT instructions. Additionally those instructions will set - /// InstructionResult to CallOrCreate/Return/Revert so we know the reason. + /// Set inside `CALL` or `CREATE` instructions and `RETURN` or `REVERT` instructions. + /// + /// Additionally those instructions will set [`InstructionResult`] to + /// [`CallOrCreate`][InstructionResult::CallOrCreate]/[`Return`][InstructionResult::Return]/[`Revert`][InstructionResult::Revert] + /// so we know the reason. pub next_action: InterpreterAction, pub gas: Gas, } diff --git a/crates/interpreter/src/interpreter/serde.rs b/crates/interpreter/src/interpreter/serde.rs index cf980d2b93..ebbfd19956 100644 --- a/crates/interpreter/src/interpreter/serde.rs +++ b/crates/interpreter/src/interpreter/serde.rs @@ -94,7 +94,7 @@ // return Err(serde::de::Error::custom("program_counter out of bounds")); // } -// // SAFETY: range of program_counter checked above +// // SAFETY: Range of program_counter checked above // let instruction_pointer = unsafe { bytecode.as_ptr().add(program_counter) }; // Ok(Interpreter { diff --git a/crates/interpreter/src/interpreter/shared_memory.rs b/crates/interpreter/src/interpreter/shared_memory.rs index 324d4acd22..99f7a67799 100644 --- a/crates/interpreter/src/interpreter/shared_memory.rs +++ b/crates/interpreter/src/interpreter/shared_memory.rs @@ -158,7 +158,7 @@ impl SharedMemory { pub fn free_context(&mut self) { if let Some(old_checkpoint) = self.checkpoints.pop() { self.last_checkpoint = self.checkpoints.last().cloned().unwrap_or_default(); - // SAFETY: buffer length is less than or equal `old_checkpoint` + // SAFETY: `buffer` length is less than or equal `old_checkpoint` unsafe { self.buffer.set_len(old_checkpoint) }; } } @@ -307,7 +307,7 @@ impl SharedMemory { #[cfg_attr(debug_assertions, track_caller)] pub fn set_data(&mut self, memory_offset: usize, data_offset: usize, len: usize, data: &[u8]) { if data_offset >= data.len() { - // nullify all memory slots + // Nullify all memory slots self.slice_mut(memory_offset, len).fill(0); return; } @@ -318,7 +318,7 @@ impl SharedMemory { self.slice_mut(memory_offset, data_len) .copy_from_slice(data); - // nullify rest of memory slots + // Nullify rest of memory slots // SAFETY: Memory is assumed to be valid, and it is commented where this assumption is made. self.slice_mut(memory_offset + data_len, len - data_len) .fill(0); @@ -338,7 +338,7 @@ impl SharedMemory { /// Returns a reference to the memory of the current context, the active memory. #[inline] pub fn context_memory(&self) -> &[u8] { - // SAFETY: access bounded by buffer length + // SAFETY: Access bounded by buffer length unsafe { self.buffer .get_unchecked(self.last_checkpoint..self.buffer.len()) @@ -349,7 +349,7 @@ impl SharedMemory { #[inline] pub fn context_memory_mut(&mut self) -> &mut [u8] { let buf_len = self.buffer.len(); - // SAFETY: access bounded by buffer length + // SAFETY: Access bounded by buffer length unsafe { self.buffer.get_unchecked_mut(self.last_checkpoint..buf_len) } } } @@ -405,7 +405,7 @@ mod tests { assert_eq!(shared_memory.last_checkpoint, 96); assert_eq!(shared_memory.len(), 0); - // free contexts + // Free contexts shared_memory.free_context(); assert_eq!(shared_memory.buffer.len(), 96); assert_eq!(shared_memory.checkpoints.len(), 2); diff --git a/crates/interpreter/src/interpreter/stack.rs b/crates/interpreter/src/interpreter/stack.rs index 9ae250aeaa..db4d1ff731 100644 --- a/crates/interpreter/src/interpreter/stack.rs +++ b/crates/interpreter/src/interpreter/stack.rs @@ -57,7 +57,7 @@ impl StackTrait for Stack { if self.len() < N { return None; } - // SAFETY: stack length is checked above. + // SAFETY: Stack length is checked above. Some(unsafe { self.popn::() }) } @@ -66,7 +66,7 @@ impl StackTrait for Stack { if self.len() < POPN + 1 { return None; } - // SAFETY: stack length is checked above. + // SAFETY: Stack length is checked above. Some(unsafe { self.popn_top::() }) } @@ -88,7 +88,7 @@ impl Stack { #[inline] pub fn new() -> Self { Self { - // SAFETY: expansion functions assume that capacity is `STACK_LIMIT`. + // SAFETY: Expansion functions assume that capacity is `STACK_LIMIT`. data: Vec::with_capacity(STACK_LIMIT), } } @@ -228,7 +228,7 @@ impl Stack { if len < n || len + 1 > STACK_LIMIT { false } else { - // SAFETY: check for out of bounds is done above and it makes this safe to do. + // SAFETY: Check for out of bounds is done above and it makes this safe to do. unsafe { let ptr = self.data.as_mut_ptr().add(len); ptr::copy_nonoverlapping(ptr.sub(n), ptr, 1); @@ -267,7 +267,7 @@ impl Stack { } // SAFETY: `n` and `n_m` are checked to be within bounds, and they don't overlap. unsafe { - // NOTE: `ptr::swap_nonoverlapping` is more efficient than `slice::swap` or `ptr::swap` + // Note: `ptr::swap_nonoverlapping` is more efficient than `slice::swap` or `ptr::swap` // because it operates under the assumption that the pointers do not overlap, // eliminating an intemediate copy, // which is a condition we know to be true in this context. @@ -291,18 +291,18 @@ impl Stack { return Err(InstructionResult::StackOverflow); } - // SAFETY: length checked above. + // SAFETY: Length checked above. unsafe { let dst = self.data.as_mut_ptr().add(self.data.len()).cast::(); self.data.set_len(new_len); let mut i = 0; - // write full words + // Write full words let words = slice.chunks_exact(32); let partial_last_word = words.remainder(); for word in words { - // Note: we unroll `U256::from_be_bytes` here to write directly into the buffer, + // Note: We unroll `U256::from_be_bytes` here to write directly into the buffer, // instead of creating a 32 byte array on the stack and then copying it over. for l in word.rchunks_exact(8) { dst.add(i).write(u64::from_be_bytes(l.try_into().unwrap())); @@ -314,7 +314,7 @@ impl Stack { return Ok(()); } - // write limbs of partial last word + // Write limbs of partial last word let limbs = partial_last_word.rchunks_exact(8); let partial_last_limb = limbs.remainder(); for l in limbs { @@ -322,7 +322,7 @@ impl Stack { i += 1; } - // write partial last limb by padding with zeros + // Write partial last limb by padding with zeros if !partial_last_limb.is_empty() { let mut tmp = [0u8; 8]; tmp[8 - partial_last_limb.len()..].copy_from_slice(partial_last_limb); @@ -332,7 +332,7 @@ impl Stack { debug_assert_eq!((i + 3) / 4, n_words, "wrote too much"); - // zero out upper bytes of last word + // Zero out upper bytes of last word let m = i % 4; // 32 / 8 if m != 0 { dst.add(i).write_bytes(0, 4 - m); @@ -382,7 +382,7 @@ mod tests { fn run(f: impl FnOnce(&mut Stack)) { let mut stack = Stack::new(); - // fill capacity with non-zero values + // Fill capacity with non-zero values unsafe { stack.data.set_len(STACK_LIMIT); stack.data.fill(U256::MAX); @@ -393,13 +393,13 @@ mod tests { #[test] fn push_slices() { - // no-op + // No-op run(|stack| { stack.push_slice(b"").unwrap(); assert_eq!(stack.data, []); }); - // one word + // One word run(|stack| { stack.push_slice(&[42]).unwrap(); assert_eq!(stack.data, [U256::from(42)]); @@ -411,7 +411,7 @@ mod tests { assert_eq!(stack.data, [U256::from(n)]); }); - // more than one word + // More than one word run(|stack| { let b = [U256::from(n).to_be_bytes::<32>(); 2].concat(); stack.push_slice(&b).unwrap(); diff --git a/crates/interpreter/src/interpreter/subroutine_stack.rs b/crates/interpreter/src/interpreter/subroutine_stack.rs index f6b3470041..5ea9acd787 100644 --- a/crates/interpreter/src/interpreter/subroutine_stack.rs +++ b/crates/interpreter/src/interpreter/subroutine_stack.rs @@ -2,7 +2,8 @@ use std::vec::Vec; use crate::interpreter_types::SubRoutineStack; -/// Function return frame. +/// Function(Sub Routine) return frame in eof +/// /// Needed information for returning from a function. #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] diff --git a/crates/interpreter/src/interpreter_action.rs b/crates/interpreter/src/interpreter_action.rs index eca98dc4e7..9fb4a91b36 100644 --- a/crates/interpreter/src/interpreter_action.rs +++ b/crates/interpreter/src/interpreter_action.rs @@ -16,12 +16,12 @@ use std::boxed::Box; #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum FrameInput { - /// CALL, CALLCODE, DELEGATECALL, STATICCALL - /// or EOF EXT*CALL instruction called. + /// `CALL`, `CALLCODE`, `DELEGATECALL`, `STATICCALL` + /// or EOF `EXTCALL`, `EXTDELEGATECALL`, `EXTSTATICCALL` instruction called. Call(Box), - /// CREATE or CREATE2 instruction called. + /// `CREATE` or `CREATE2` instruction called. Create(Box), - /// EOF CREATE instruction called. + /// EOF `CREATE` instruction called. EOFCreate(Box), } @@ -44,32 +44,34 @@ pub enum InterpreterAction { } impl InterpreterAction { - /// Returns true if action is call. + /// Returns `true` if action is call. pub fn is_call(&self) -> bool { matches!(self, InterpreterAction::NewFrame(FrameInput::Call(..))) } - /// Returns true if action is create. + /// Returns `true` if action is create. pub fn is_create(&self) -> bool { matches!(self, InterpreterAction::NewFrame(FrameInput::Create(..))) } - /// Returns true if action is return. + /// Returns `true` if action is return. pub fn is_return(&self) -> bool { matches!(self, InterpreterAction::Return { .. }) } - /// Returns true if action is none. + /// Returns `true` if action is none. pub fn is_none(&self) -> bool { matches!(self, InterpreterAction::None) } - /// Returns true if action is some. + /// Returns `true` if action is some. pub fn is_some(&self) -> bool { !self.is_none() } - /// Returns result if action is return. + /// Returns [`InterpreterResult`] if action is return. + /// + /// Else it returns [None]. pub fn into_result_return(self) -> Option { match self { InterpreterAction::Return { result } => Some(result), diff --git a/crates/interpreter/src/interpreter_action/call_inputs.rs b/crates/interpreter/src/interpreter_action/call_inputs.rs index 3976e80be3..2b6761f050 100644 --- a/crates/interpreter/src/interpreter_action/call_inputs.rs +++ b/crates/interpreter/src/interpreter_action/call_inputs.rs @@ -27,7 +27,7 @@ pub struct CallInputs { pub caller: Address, /// Call value. /// - /// NOTE: This value may not necessarily be transferred from caller to callee, see [`CallValue`]. + /// **Note**: This value may not necessarily be transferred from caller to callee, see [`CallValue`]. /// /// Previously `transfer.value` or `context.apparent_value`. pub value: CallValue, @@ -82,7 +82,7 @@ impl CallInputs { /// Returns the call value, regardless of the transfer value type. /// - /// NOTE: this value may not necessarily be transferred from caller to callee, see [`CallValue`]. + /// **Note**: This value may not necessarily be transferred from caller to callee, see [`CallValue`]. #[inline] pub const fn call_value(&self) -> U256 { self.value.get() diff --git a/crates/interpreter/src/interpreter_action/call_outcome.rs b/crates/interpreter/src/interpreter_action/call_outcome.rs index 3fc9f4b6b2..cd2471f2cc 100644 --- a/crates/interpreter/src/interpreter_action/call_outcome.rs +++ b/crates/interpreter/src/interpreter_action/call_outcome.rs @@ -19,9 +19,9 @@ pub struct CallOutcome { } impl CallOutcome { - /// Constructs a new `CallOutcome`. + /// Constructs a new [`CallOutcome`]. /// - /// Creates an instance of `CallOutcome` with the given interpreter result and memory offset. + /// Creates an instance of [`CallOutcome`] with the given interpreter result and memory offset. /// /// # Arguments /// @@ -40,7 +40,7 @@ impl CallOutcome { /// /// # Returns /// - /// A reference to the `InstructionResult`. + /// A reference to the [`InstructionResult`]. pub fn instruction_result(&self) -> &InstructionResult { &self.result.result } @@ -51,7 +51,7 @@ impl CallOutcome { /// /// # Returns /// - /// An instance of `Gas` representing the gas usage. + /// An instance of [`Gas`] representing the gas usage. pub fn gas(&self) -> Gas { self.result.gas } @@ -62,7 +62,7 @@ impl CallOutcome { /// /// # Returns /// - /// A reference to the output data as `Bytes`. + /// A reference to the output data as [`Bytes`]. pub fn output(&self) -> &Bytes { &self.result.output } @@ -73,7 +73,7 @@ impl CallOutcome { /// /// # Returns /// - /// The starting index of the memory offset as `usize`. + /// The starting index of the memory offset as [`usize`]. pub fn memory_start(&self) -> usize { self.memory_offset.start } @@ -84,7 +84,7 @@ impl CallOutcome { /// /// # Returns /// - /// The length of the memory range as `usize`. + /// The length of the memory range as [`usize`]. pub fn memory_length(&self) -> usize { self.memory_offset.len() } diff --git a/crates/interpreter/src/interpreter_action/create_inputs.rs b/crates/interpreter/src/interpreter_action/create_inputs.rs index 241facba6d..8d4d650477 100644 --- a/crates/interpreter/src/interpreter_action/create_inputs.rs +++ b/crates/interpreter/src/interpreter_action/create_inputs.rs @@ -1,19 +1,19 @@ use context_interface::CreateScheme; use primitives::{Address, Bytes, U256}; -/// Inputs for a create call. +/// Inputs for a create call #[derive(Clone, Debug, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct CreateInputs { - /// Caller address of the EVM. + /// Caller address of the EVM pub caller: Address, - /// The create scheme. + /// The create scheme pub scheme: CreateScheme, - /// The value to transfer. + /// The value to transfer pub value: U256, - /// The init code of the contract. + /// The init code of the contract pub init_code: Bytes, - /// The gas limit of the call. + /// The gas limit of the call pub gas_limit: u64, } diff --git a/crates/interpreter/src/interpreter_action/create_outcome.rs b/crates/interpreter/src/interpreter_action/create_outcome.rs index 76d4b44ddb..11d7204e66 100644 --- a/crates/interpreter/src/interpreter_action/create_outcome.rs +++ b/crates/interpreter/src/interpreter_action/create_outcome.rs @@ -4,66 +4,71 @@ use primitives::{Address, Bytes}; /// Represents the outcome of a create operation in an interpreter. /// /// This struct holds the result of the operation along with an optional address. +/// /// It provides methods to determine the next action based on the result of the operation. #[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct CreateOutcome { - // The result of the interpreter operation. + /// The result of the interpreter operation pub result: InterpreterResult, - // An optional address associated with the create operation. + /// An optional address associated with the create operation pub address: Option
, } impl CreateOutcome { - /// Constructs a new `CreateOutcome`. + /// Constructs a new [`CreateOutcome`]. /// /// # Arguments /// - /// * `result` - An `InterpreterResult` representing the result of the interpreter operation. - /// * `address` - An optional `Address` associated with the create operation. + /// * `result` - An [`InterpreterResult`] representing the result of the interpreter operation. + /// * `address` - An optional [`Address`] associated with the create operation. /// /// # Returns /// - /// A new `CreateOutcome` instance. + /// A new [`CreateOutcome`] instance. pub fn new(result: InterpreterResult, address: Option
) -> Self { Self { result, address } } - /// Retrieves a reference to the `InstructionResult` from the `InterpreterResult`. + /// Retrieves a reference to the [`InstructionResult`] from the [`InterpreterResult`]. + /// + /// This method provides access to the [`InstructionResult`] which represents the + /// outcome of the instruction execution. /// - /// This method provides access to the `InstructionResult` which represents the - /// outcome of the instruction execution. It encapsulates the result information - /// such as whether the instruction was executed successfully, resulted in a revert, - /// or encountered a fatal error. + /// It encapsulates the result information such as whether the instruction was + /// executed successfully, resulted in a revert, or encountered a fatal error. /// /// # Returns /// - /// A reference to the `InstructionResult`. + /// A reference to the [`InstructionResult`]. pub fn instruction_result(&self) -> &InstructionResult { &self.result.result } - /// Retrieves a reference to the output bytes from the `InterpreterResult`. + /// Retrieves a reference to the output bytes from the [`InterpreterResult`]. /// - /// This method returns the output of the interpreted operation. The output is - /// typically used when the operation successfully completes and returns data. + /// This method returns the output of the interpreted operation. + /// + /// The output is typically used when the operation successfully completes and + /// returns data. /// /// # Returns /// - /// A reference to the output `Bytes`. + /// A reference to the output [`Bytes`]. pub fn output(&self) -> &Bytes { &self.result.output } - /// Retrieves a reference to the `Gas` details from the `InterpreterResult`. + /// Retrieves a reference to the [`Gas`] details from the [`InterpreterResult`]. /// /// This method provides access to the gas details of the operation, which includes - /// information about gas used, remaining, and refunded. It is essential for - /// understanding the gas consumption of the operation. + /// information about gas used, remaining, and refunded. + /// + /// It is essential for understanding the gas consumption of the operation. /// /// # Returns /// - /// A reference to the `Gas` details. + /// A reference to the [`Gas`] details. pub fn gas(&self) -> &Gas { &self.result.gas } diff --git a/crates/interpreter/src/interpreter_action/eof_create_inputs.rs b/crates/interpreter/src/interpreter_action/eof_create_inputs.rs index 54ed29e150..dba52448df 100644 --- a/crates/interpreter/src/interpreter_action/eof_create_inputs.rs +++ b/crates/interpreter/src/interpreter_action/eof_create_inputs.rs @@ -5,10 +5,11 @@ use primitives::{Address, Bytes, U256}; /// * EOFCREATE opcode /// * Creation transaction. /// -/// Creation transaction uses initdata and packs EOF and initdata inside it. -/// This eof bytecode needs to be validated. +/// Creation transaction uses initdata and packs EOF and initdata inside it, +/// and this eof bytecode needs to be validated. /// /// Opcode creation uses already validated EOF bytecode, and input from Interpreter memory. +/// /// Address is already known and is passed as an argument. #[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] @@ -45,22 +46,22 @@ impl Default for EOFCreateKind { } } -/// Inputs for EOF create call. +/// Inputs for EOF Create call #[derive(Debug, Default, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct EOFCreateInputs { - /// Caller of Eof Craate + /// Caller of EOF Create pub caller: Address, /// Values of ether transferred pub value: U256, - /// Gas limit for the create call. + /// Gas limit for the create call pub gas_limit: u64, /// EOF Create kind pub kind: EOFCreateKind, } impl EOFCreateInputs { - /// Create new EOF crate input from transaction that has concatenated eof init code and calldata. + /// Creates new EOF Create input from transaction that has concatenated eof init code and calldata. /// /// Legacy transaction still have optional nonce so we need to obtain it. pub fn new(caller: Address, value: U256, gas_limit: u64, kind: EOFCreateKind) -> Self { diff --git a/crates/interpreter/src/interpreter_types.rs b/crates/interpreter/src/interpreter_types.rs index c9cee736fb..b95ecf8cc1 100644 --- a/crates/interpreter/src/interpreter_types.rs +++ b/crates/interpreter/src/interpreter_types.rs @@ -5,7 +5,7 @@ use crate::{Gas, InstructionResult, InterpreterAction}; use core::ops::{Deref, Range}; use primitives::{Address, Bytes, B256, U256}; -/// Helper function to read immediates data from the bytecode. +/// Helper function to read immediates data from the bytecode pub trait Immediates { fn read_i16(&self) -> i16; fn read_u16(&self) -> u16; @@ -31,18 +31,18 @@ pub trait LegacyBytecode { fn bytecode_slice(&self) -> &[u8]; } -/// Trait for interpreter to be able to jump. +/// Trait for interpreter to be able to jump pub trait Jumps { - /// Relative jumps does not require checking for overflow + /// Relative jumps does not require checking for overflow. fn relative_jump(&mut self, offset: isize); /// Absolute jumps require checking for overflow and if target is a jump destination /// from jump table. fn absolute_jump(&mut self, offset: usize); /// Check legacy jump destination from jump table. fn is_valid_legacy_jump(&mut self, offset: usize) -> bool; - /// Return current program counter. + /// Returns current program counter. fn pc(&self) -> usize; - /// Instruction opcode + /// Returns instruction opcode. fn opcode(&self) -> u8; } @@ -53,24 +53,22 @@ pub trait MemoryTrait { fn size(&self) -> usize; fn copy(&mut self, destination: usize, source: usize, len: usize); - /// Memory slice with range. + /// Memory slice with range /// /// # Panics - /// /// Panics if range is out of scope of allocated memory. fn slice(&self, range: Range) -> impl Deref + '_; /// Memory slice len /// - /// Uses [`MemoryTrait::slice`] internally. + /// Uses [`slice`][MemoryTrait::slice] internally. fn slice_len(&self, offset: usize, len: usize) -> impl Deref + '_ { self.slice(offset..offset + len) } - /// Resize memory to new size. + /// Resizes memory to new size /// /// # Note - /// /// It checks memory limits. fn resize(&mut self, new_size: usize) -> bool; } @@ -97,7 +95,7 @@ pub trait SubRoutineStack { /// Pops previous subroutine, sets previous code index and returns program counter. fn pop(&mut self) -> Option; - // /// Return code info from EOF body. + // /// Returns code info from EOF body. // fn eof_code_info(&self, idx: usize) -> Option<&TypesSection>; } @@ -110,11 +108,11 @@ pub trait StackTrait { self.len() == 0 } - /// Pushes values to the stack - /// Return `true` if push was successful, `false` if stack overflow. + /// Pushes values to the stack. /// - /// # Note + /// Returns `true` if push was successful, `false` if stack overflow. /// + /// # Note /// Error is internally set in interpreter. #[must_use] fn push(&mut self, value: U256) -> bool; @@ -124,7 +122,7 @@ pub trait StackTrait { self.push(value.into()) } - /// Pop value from the stack. + /// Pops value from the stack. #[must_use] fn popn(&mut self) -> Option<[U256; N]>; @@ -132,13 +130,13 @@ pub trait StackTrait { #[must_use] fn popn_top(&mut self) -> Option<([U256; POPN], &mut U256)>; - /// Return top value from the stack. + /// Returns top value from the stack. #[must_use] fn top(&mut self) -> Option<&mut U256> { self.popn_top::<0>().map(|(_, top)| top) } - /// Pop one value from the stack. + /// Pops one value from the stack. #[must_use] fn pop(&mut self) -> Option { self.popn::<1>().map(|[value]| value) @@ -149,11 +147,11 @@ pub trait StackTrait { self.pop().map(|value| Address::from(value.to_be_bytes())) } - /// Exchange two values on the stack. + /// Exchanges two values on the stack. /// /// Indexes are based from the top of the stack. /// - /// Return `true` if swap was successful, `false` if stack underflow. + /// Returns `true` if swap was successful, `false` if stack underflow. #[must_use] fn exchange(&mut self, n: usize, m: usize) -> bool; @@ -161,7 +159,7 @@ pub trait StackTrait { /// /// Index is based from the top of the stack. /// - /// Return `true` if duplicate was successful, `false` if stack underflow. + /// Returns `true` if duplicate was successful, `false` if stack underflow. #[must_use] fn dup(&mut self, n: usize) -> bool; } diff --git a/crates/interpreter/src/table.rs b/crates/interpreter/src/table.rs index 506d3f9ee5..ae311fea8c 100644 --- a/crates/interpreter/src/table.rs +++ b/crates/interpreter/src/table.rs @@ -151,7 +151,7 @@ where // H: Host + ?Sized, // F: Fn(&DynInstruction, &mut W, &mut H), // { -// // NOTE: This first allocation gets elided by the compiler. +// // Note: This first allocation gets elided by the compiler. // let prev = core::mem::replace(instruction, Box::new(|_, _| {})); // *instruction = Box::new(move |i, h| f(&prev, i, h)); // } diff --git a/crates/optimism/src/bn128.rs b/crates/optimism/src/bn128.rs index 70549f8427..02dbf0c6c1 100644 --- a/crates/optimism/src/bn128.rs +++ b/crates/optimism/src/bn128.rs @@ -54,7 +54,7 @@ mod tests { let outcome = pair::run_pair(&input, 260_000).unwrap(); assert_eq!(outcome.bytes, expected); - // invalid input length + // Invalid input length let input = hex::decode( "\ 1111111111111111111111111111111111111111111111111111111111111111\ @@ -70,7 +70,7 @@ mod tests { Err(PrecompileErrors::Error(PrecompileError::Bn128PairLength)) )); - // valid input length shorter than 112687 + // Valid input length shorter than 112687 let input = vec![1u8; 586 * bn128::PAIR_ELEMENT_LEN]; let res = pair::run_pair(&input, 260_000); assert!(matches!( @@ -78,7 +78,7 @@ mod tests { Err(PrecompileErrors::Error(PrecompileError::OutOfGas)) )); - // input length longer than 112687 + // Input length longer than 112687 let input = vec![1u8; 587 * bn128::PAIR_ELEMENT_LEN]; let res = pair::run_pair(&input, 260_000); assert!(matches!( diff --git a/crates/optimism/src/evm.rs b/crates/optimism/src/evm.rs index 424889b1a0..01d0148447 100644 --- a/crates/optimism/src/evm.rs +++ b/crates/optimism/src/evm.rs @@ -12,13 +12,13 @@ use revm::{ Evm, JournaledState, }; -/// Optimism Error. +/// Optimism Error pub type OpError = EVMError<::Error, InvalidTransaction>; -/// Optimism Context. +/// Optimism Context pub type OpContext = Context, CfgEnv, DB, L1BlockInfo>; -/// Optimism EVM type. +/// Optimism EVM type pub type OpEvm = Evm, OpContext, OpHandler, OpError>>; pub type InspCtxType = diff --git a/crates/optimism/src/fast_lz.rs b/crates/optimism/src/fast_lz.rs index 235172ddd7..65a23f75ef 100644 --- a/crates/optimism/src/fast_lz.rs +++ b/crates/optimism/src/fast_lz.rs @@ -1,5 +1,6 @@ /// Returns the length of the data after compression through FastLZ, based on /// +/// /// The u32s match op-geth's Go port: /// pub(crate) fn flz_compress_len(input: &[u8]) -> u32 { diff --git a/crates/optimism/src/handler.rs b/crates/optimism/src/handler.rs index f01dc6cef3..692b1ca5b7 100644 --- a/crates/optimism/src/handler.rs +++ b/crates/optimism/src/handler.rs @@ -70,7 +70,7 @@ where if tx_type == OpTransactionType::Deposit { let tx = context.op_tx().deposit(); // Do not allow for a system transaction to be processed if Regolith is enabled. - // TODO check if this is correct. + // TODO : Check if this is correct. if tx.is_system_transaction() && context.cfg().spec().is_enabled_in(OpSpecId::REGOLITH) { return Err(OpTransactionError::DepositSystemTxPostRegolith.into()); @@ -109,13 +109,13 @@ where type Error = ERROR; fn load_accounts(&self, context: &mut Self::Context) -> Result<(), Self::Error> { - // the L1-cost fee is only computed for Optimism non-deposit transactions. + // The L1-cost fee is only computed for Optimism non-deposit transactions. let spec = context.cfg().spec(); if context.tx().tx_type() != OpTransactionType::Deposit { let l1_block_info: crate::L1BlockInfo = super::L1BlockInfo::try_fetch(context.db(), spec)?; - // storage l1 block info for later use. + // Storage L1 block info for later use. *context.l1_block_info_mut() = l1_block_info; } @@ -152,7 +152,7 @@ where } // We deduct caller max balance after minting and before deducing the - // l1 cost, max values is already checked in pre_validate but l1 cost wasn't. + // L1 cost, max values is already checked in pre_validate but L1 cost wasn't. self.eth.deduct_caller(context)?; // If the transaction is not a deposit transaction, subtract the L1 data fee from the @@ -335,7 +335,7 @@ where let is_deposit = context.tx().tx_type() == OpTransactionType::Deposit; - // transfer fee to coinbase/beneficiary. + // Transfer fee to coinbase/beneficiary. if !is_deposit { self.eth.reward_beneficiary(context, exec_result)?; let basefee = *context.block().basefee(); diff --git a/crates/optimism/src/handler/precompiles.rs b/crates/optimism/src/handler/precompiles.rs index 8629b13b20..576f2a9449 100644 --- a/crates/optimism/src/handler/precompiles.rs +++ b/crates/optimism/src/handler/precompiles.rs @@ -65,7 +65,7 @@ where fn new(context: &mut Self::Context) -> Self { let spec = context.cfg().spec(); match spec { - // no changes + // No changes spec @ (OpSpec::Eth( SpecId::FRONTIER | SpecId::FRONTIER_THAWING diff --git a/crates/optimism/src/l1block.rs b/crates/optimism/src/l1block.rs index b6c909cec1..4ea6f47449 100644 --- a/crates/optimism/src/l1block.rs +++ b/crates/optimism/src/l1block.rs @@ -115,8 +115,8 @@ impl L1BlockInfo { .as_ref(), ); - // Check if the L1 fee scalars are empty. If so, we use the Bedrock cost function. The L1 fee overhead is - // only necessary if `empty_scalars` is true, as it was deprecated in Ecotone. + // Check if the L1 fee scalars are empty. If so, we use the Bedrock cost function. + // The L1 fee overhead is only necessary if `empty_scalars` is true, as it was deprecated in Ecotone. let empty_scalars = l1_blob_base_fee.is_zero() && l1_fee_scalars[BASE_FEE_SCALAR_OFFSET..BLOB_BASE_FEE_SCALAR_OFFSET + 4] == EMPTY_SCALARS; @@ -503,14 +503,14 @@ mod tests { l1_base_fee_scalar: U256::from(5227), l1_blob_base_fee_scalar: Some(U256::from(1014213)), l1_blob_base_fee: Some(U256::from(1)), - ..Default::default() // l1 fee overhead (l1 gas used) deprecated since Fjord + ..Default::default() // L1 fee overhead (l1 gas used) deprecated since Fjord }; - // second tx in OP mainnet Fjord block 124665056 + // Second tx in OP mainnet Fjord block 124665056 // const TX: &[u8] = &hex!("02f904940a8303fba78401d6d2798401db2b6d830493e0943e6f4f7866654c18f536170780344aa8772950b680b904246a761202000000000000000000000000087000a300de7200382b55d40045000000e5d60e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000014000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000022482ad56cb0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000120000000000000000000000000dc6ff44d5d932cbd77b52e5612ba0529dc6226f1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000044095ea7b300000000000000000000000021c4928109acb0659a88ae5329b5374a3024694c0000000000000000000000000000000000000000000000049b9ca9a6943400000000000000000000000000000000000000000000000000000000000000000000000000000000000021c4928109acb0659a88ae5329b5374a3024694c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000024b6b55f250000000000000000000000000000000000000000000000049b9ca9a694340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000415ec214a3950bea839a7e6fbb0ba1540ac2076acd50820e2d5ef83d0902cdffb24a47aff7de5190290769c4f0a9c6fabf63012986a0d590b1b571547a8c7050ea1b00000000000000000000000000000000000000000000000000000000000000c080a06db770e6e25a617fe9652f0958bd9bd6e49281a53036906386ed39ec48eadf63a07f47cf51a4a40b4494cf26efc686709a9b03939e20ee27e59682f5faa536667e"); - // l1 gas used for tx and l1 fee for tx, from OP mainnet block scanner + // L1 gas used for tx and L1 fee for tx, from OP mainnet block scanner // https://optimistic.etherscan.io/tx/0x1059e8004daff32caa1f1b1ef97fe3a07a8cf40508f5b835b66d9420d87c4a4a let expected_data_gas = U256::from(4471); let expected_l1_fee = U256::from_be_bytes(hex!( diff --git a/crates/optimism/src/spec.rs b/crates/optimism/src/spec.rs index 82530d3855..77e7a6ad8c 100644 --- a/crates/optimism/src/spec.rs +++ b/crates/optimism/src/spec.rs @@ -22,7 +22,7 @@ pub enum OpSpecId { } impl OpSpecId { - /// Converts the `OpSpec` into a `SpecId`. + /// Converts the [`OpSpec`] into a [`SpecId`]. pub const fn into_eth_spec(self) -> SpecId { match self { Self::BEDROCK | Self::REGOLITH => SpecId::MERGE, @@ -77,7 +77,7 @@ impl From for &'static str { } } -/// String identifiers for Optimism hardforks. +/// String identifiers for Optimism hardforks pub mod name { pub const BEDROCK: &str = "Bedrock"; pub const REGOLITH: &str = "Regolith"; @@ -99,7 +99,7 @@ impl OpSpec { } } - /// Converts the `OpSpec` into a `SpecId`. + /// Converts the [`OpSpec`] into a [`SpecId`]. pub const fn into_eth_spec(self) -> SpecId { match self { OpSpec::Eth(spec) => spec, diff --git a/crates/optimism/src/transaction/abstraction.rs b/crates/optimism/src/transaction/abstraction.rs index 7b04c14f42..c39a3a22a7 100644 --- a/crates/optimism/src/transaction/abstraction.rs +++ b/crates/optimism/src/transaction/abstraction.rs @@ -57,8 +57,9 @@ impl From for TransactionType { pub enum OpTransaction { Base { tx: T, - /// An enveloped EIP-2718 typed transaction. This is used - /// to compute the L1 tx cost using the L1 block info, as + /// An enveloped EIP-2718 typed transaction + /// + /// This is used to compute the L1 tx cost using the L1 block info, as /// opposed to requiring downstream apps to compute the cost /// externally. enveloped_tx: Option, diff --git a/crates/optimism/src/transaction/deposit.rs b/crates/optimism/src/transaction/deposit.rs index 72ce082346..e6726733c4 100644 --- a/crates/optimism/src/transaction/deposit.rs +++ b/crates/optimism/src/transaction/deposit.rs @@ -16,23 +16,23 @@ pub trait DepositTransaction: CommonTxFields { #[derive(Clone, Default, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct TxDeposit { - /// Hash that uniquely identifies the source of the deposit. + /// Hash that uniquely identifies the source of the deposit pub source_hash: B256, - /// The address of the sender account. + /// The address of the sender account pub from: Address, /// The address of the recipient account, or the null (zero-length) address if the deposited - /// transaction is a contract creation. + /// transaction is a contract creation pub to: TxKind, - /// The ETH value to mint on L2. + /// The ETH value to mint on L2 pub mint: Option, - /// The ETH value to send to the recipient account. + /// The ETH value to send to the recipient account pub value: U256, - /// The gas limit for the L2 transaction. + /// The gas limit for the L2 transaction pub gas_limit: u64, - /// Field indicating if this transaction is exempt from the L2 gas limit. + /// Field indicating if this transaction is exempt from the L2 gas limit pub is_system_transaction: bool, - /// Input has two uses depending if transaction is Create or Call (if `to` field is None or - /// Some). + /// Input has two uses depending if transaction is Create or Call (if `to` field is [None] + /// or [Some]) pub input: Bytes, } diff --git a/crates/optimism/src/transaction/error.rs b/crates/optimism/src/transaction/error.rs index 0b3cd09187..6ea61a69f9 100644 --- a/crates/optimism/src/transaction/error.rs +++ b/crates/optimism/src/transaction/error.rs @@ -16,27 +16,27 @@ pub enum OpTransactionError { /// was deprecated in the Regolith hardfork, and this error is thrown if a `Deposit` transaction /// is found with this field set to `true` after the hardfork activation. /// - /// In addition, this error is internal, and bubbles up into a [crate::OptimismHaltReason::FailedDeposit] error + /// In addition, this error is internal, and bubbles up into a [OptimismHaltReason::FailedDeposit][crate::OptimismHaltReason::FailedDeposit] error /// in the `revm` handler for the consumer to easily handle. This is due to a state transition /// rule on OP Stack chains where, if for any reason a deposit transaction fails, the transaction /// must still be included in the block, the sender nonce is bumped, the `mint` value persists, and /// special gas accounting rules are applied. Normally on L1, [EVMError::Transaction] errors - /// are cause for non-inclusion, so a special [crate::OptimismHaltReason] variant was introduced to handle this + /// are cause for non-inclusion, so a special [OptimismHaltReason][crate::OptimismHaltReason] variant was introduced to handle this /// case for failed deposit transactions. DepositSystemTxPostRegolith, /// Deposit transaction haults bubble up to the global main return handler, wiping state and /// only increasing the nonce + persisting the mint value. /// - /// This is a catch-all error for any deposit transaction that is results in a [crate::OptimismHaltReason] error + /// This is a catch-all error for any deposit transaction that is results in a [OptimismHaltReason][crate::OptimismHaltReason] error /// post-regolith hardfork. This allows for a consumer to easily handle special cases where /// a deposit transaction fails during validation, but must still be included in the block. /// - /// In addition, this error is internal, and bubbles up into a [crate::OptimismHaltReason::FailedDeposit] error + /// In addition, this error is internal, and bubbles up into a [OptimismHaltReason::FailedDeposit][crate::OptimismHaltReason::FailedDeposit] error /// in the `revm` handler for the consumer to easily handle. This is due to a state transition /// rule on OP Stack chains where, if for any reason a deposit transaction fails, the transaction /// must still be included in the block, the sender nonce is bumped, the `mint` value persists, and /// special gas accounting rules are applied. Normally on L1, [EVMError::Transaction] errors - /// are cause for non-inclusion, so a special [crate::OptimismHaltReason] variant was introduced to handle this + /// are cause for non-inclusion, so a special [OptimismHaltReason][crate::OptimismHaltReason] variant was introduced to handle this /// case for failed deposit transactions. HaltedDepositPostRegolith, } diff --git a/crates/precompile/benches/bench.rs b/crates/precompile/benches/bench.rs index 38513a68fd..02e3a58574 100644 --- a/crates/precompile/benches/bench.rs +++ b/crates/precompile/benches/bench.rs @@ -21,7 +21,7 @@ pub fn benchmark_crypto_precompiles(c: &mut Criterion) { // === ECPAIRING === - // set up ecpairing input + // Set up ecpairing input let input = hex::decode( "\ 1c76476f4def4bb94541d57ebba1193381ffa7aa76ada664dd31c16024c43f59\ @@ -68,7 +68,7 @@ pub fn benchmark_crypto_precompiles(c: &mut Criterion) { // === ECRECOVER === - // generate secp256k1 signature + // Generate secp256k1 signature let data = hex::decode("1337133713371337").unwrap(); let hash = keccak256(data); let secret_key = SecretKey::new(&mut rand::thread_rng()); @@ -83,7 +83,7 @@ pub fn benchmark_crypto_precompiles(c: &mut Criterion) { let mut message_and_signature = [0u8; 128]; message_and_signature[0..32].copy_from_slice(&hash[..]); - // fit signature into format the precompile expects + // Fit signature into format the precompile expects let rec_id = U256::from(rec_id as u64); message_and_signature[32..64].copy_from_slice(&rec_id.to_be_bytes::<32>()); message_and_signature[64..128].copy_from_slice(&data); @@ -96,7 +96,7 @@ pub fn benchmark_crypto_precompiles(c: &mut Criterion) { // === POINT_EVALUATION === - // now check kzg precompile gas + // Now check kzg precompile gas let commitment = hex!("8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7").to_vec(); let mut versioned_hash = Sha256::digest(&commitment).to_vec(); versioned_hash[0] = VERSIONED_HASH_VERSION_KZG; diff --git a/crates/precompile/src/blake2.rs b/crates/precompile/src/blake2.rs index 16b5bce77b..2dac62fa21 100644 --- a/crates/precompile/src/blake2.rs +++ b/crates/precompile/src/blake2.rs @@ -16,7 +16,7 @@ pub fn run(input: &Bytes, gas_limit: u64) -> PrecompileResult { return Err(PrecompileError::Blake2WrongLength.into()); } - // rounds 4 bytes + // Rounds 4 bytes let rounds = u32::from_be_bytes(input[..4].try_into().unwrap()) as usize; let gas_used = rounds as u64 * F_ROUND; if gas_used > gas_limit { diff --git a/crates/precompile/src/bls12_381.rs b/crates/precompile/src/bls12_381.rs index b329829864..46bd3feb6c 100644 --- a/crates/precompile/src/bls12_381.rs +++ b/crates/precompile/src/bls12_381.rs @@ -49,7 +49,7 @@ mod test { use serde_derive::{Deserialize, Serialize}; use std::{fs, path::Path}; - /// Test vector structure for BLS12-381 precompile tests. + /// Test vector structure for BLS12-381 precompile tests #[derive(Serialize, Deserialize, Debug)] #[serde(rename_all = "PascalCase")] struct TestVector { diff --git a/crates/precompile/src/bls12_381/g1.rs b/crates/precompile/src/bls12_381/g1.rs index dc6ac01e70..912141f7e0 100644 --- a/crates/precompile/src/bls12_381/g1.rs +++ b/crates/precompile/src/bls12_381/g1.rs @@ -12,7 +12,7 @@ const G1_OUTPUT_LENGTH: usize = 128; /// Encodes a G1 point in affine format into byte slice with padded elements. pub(super) fn encode_g1_point(input: *const blst_p1_affine) -> Bytes { let mut out = vec![0u8; G1_OUTPUT_LENGTH]; - // SAFETY: out comes from fixed length array, input is a blst value. + // SAFETY: Out comes from fixed length array, input is a blst value. unsafe { fp_to_bytes(&mut out[..PADDED_FP_LENGTH], &(*input).x); fp_to_bytes(&mut out[PADDED_FP_LENGTH..], &(*input).y); @@ -40,7 +40,7 @@ pub(super) fn decode_and_check_g1( /// Extracts a G1 point in Affine format from a 128 byte slice representation. /// -/// NOTE: This function will perform a G1 subgroup check if `subgroup_check` is set to `true`. +/// **Note**: This function will perform a G1 subgroup check if `subgroup_check` is set to `true`. pub(super) fn extract_g1_input( input: &[u8], subgroup_check: bool, @@ -84,7 +84,7 @@ pub(super) fn extract_g1_input( // We use blst_p1_affine_on_curve instead of blst_p1_affine_in_g1 because the latter performs // the subgroup check. // - // SAFETY: out is a blst value. + // SAFETY: Out is a blst value. if unsafe { !blst_p1_affine_on_curve(&out) } { return Err(PrecompileError::Other( "Element not on G1 curve".to_string(), diff --git a/crates/precompile/src/bls12_381/g1_add.rs b/crates/precompile/src/bls12_381/g1_add.rs index d746cc0d78..84b831797d 100644 --- a/crates/precompile/src/bls12_381/g1_add.rs +++ b/crates/precompile/src/bls12_381/g1_add.rs @@ -42,15 +42,15 @@ pub(super) fn g1_add(input: &Bytes, gas_limit: u64) -> PrecompileResult { let b_aff = &extract_g1_input(&input[G1_INPUT_ITEM_LENGTH..], false)?; let mut b = blst_p1::default(); - // SAFETY: b and b_aff are blst values. + // SAFETY: `b` and `b_aff` are blst values. unsafe { blst_p1_from_affine(&mut b, b_aff) }; let mut p = blst_p1::default(); - // SAFETY: p, b and a_aff are blst values. + // SAFETY: `p`, `b` and `a_aff` are blst values. unsafe { blst_p1_add_or_double_affine(&mut p, &b, a_aff) }; let mut p_aff = blst_p1_affine::default(); - // SAFETY: p_aff and p are blst values. + // SAFETY: `p_aff` and `p`` are blst values. unsafe { blst_p1_to_affine(&mut p_aff, &p) }; let out = encode_g1_point(&p_aff); diff --git a/crates/precompile/src/bls12_381/g1_msm.rs b/crates/precompile/src/bls12_381/g1_msm.rs index 4573394bd6..0b3023ac3d 100644 --- a/crates/precompile/src/bls12_381/g1_msm.rs +++ b/crates/precompile/src/bls12_381/g1_msm.rs @@ -59,7 +59,7 @@ pub(super) fn g1_msm(input: &Bytes, gas_limit: u64) -> PrecompileResult { let p0_aff = &extract_g1_input(slice, true)?; let mut p0 = blst_p1::default(); - // SAFETY: p0 and p0_aff are blst values. + // SAFETY: `p0` and `p0_aff` are blst values. unsafe { blst_p1_from_affine(&mut p0, p0_aff) }; g1_points.push(p0); @@ -72,7 +72,7 @@ pub(super) fn g1_msm(input: &Bytes, gas_limit: u64) -> PrecompileResult { ); } - // return infinity point if all points are infinity + // Return infinity point if all points are infinity if g1_points.is_empty() { return Ok(PrecompileOutput::new(required_gas, [0; 128].into())); } @@ -81,7 +81,7 @@ pub(super) fn g1_msm(input: &Bytes, gas_limit: u64) -> PrecompileResult { let multiexp = points.mult(&scalars, NBITS); let mut multiexp_aff = blst_p1_affine::default(); - // SAFETY: multiexp_aff and multiexp are blst values. + // SAFETY: `multiexp_aff` and `multiexp` are blst values. unsafe { blst_p1_to_affine(&mut multiexp_aff, &multiexp) }; let out = encode_g1_point(&multiexp_aff); diff --git a/crates/precompile/src/bls12_381/g1_mul.rs b/crates/precompile/src/bls12_381/g1_mul.rs index 2458de7c37..46d9863303 100644 --- a/crates/precompile/src/bls12_381/g1_mul.rs +++ b/crates/precompile/src/bls12_381/g1_mul.rs @@ -44,16 +44,16 @@ pub(super) fn g1_mul(input: &Bytes, gas_limit: u64) -> PrecompileResult { let mut p0 = blst_p1::default(); - // SAFETY: p0 and p0_aff are blst values. + // SAFETY: `p0` and `p0_aff` are blst values. unsafe { blst_p1_from_affine(&mut p0, p0_aff) }; let input_scalar0 = extract_scalar_input(&input[G1_INPUT_ITEM_LENGTH..])?; let mut p = blst_p1::default(); - // SAFETY: input_scalar0.b has fixed size, p and p0 are blst values. + // SAFETY: `input_scalar0.b` has fixed size, `p` and `p0` are blst values. unsafe { blst_p1_mult(&mut p, &p0, input_scalar0.b.as_ptr(), NBITS) }; let mut p_aff = blst_p1_affine::default(); - // SAFETY: p_aff and p are blst values. + // SAFETY: `p_aff` and `p` are blst values. unsafe { blst_p1_to_affine(&mut p_aff, &p) }; let out = encode_g1_point(&p_aff); diff --git a/crates/precompile/src/bls12_381/g2.rs b/crates/precompile/src/bls12_381/g2.rs index 53fedbe071..5a75808b28 100644 --- a/crates/precompile/src/bls12_381/g2.rs +++ b/crates/precompile/src/bls12_381/g2.rs @@ -57,7 +57,7 @@ pub(super) fn check_canonical_fp2( /// Extracts a G2 point in Affine format from a 256 byte slice representation. /// -/// NOTE: This function will perform a G2 subgroup check if `subgroup_check` is set to `true`. +/// **Note**: This function will perform a G2 subgroup check if `subgroup_check` is set to `true`. pub(super) fn extract_g2_input( input: &[u8], subgroup_check: bool, @@ -104,7 +104,7 @@ pub(super) fn extract_g2_input( // We use blst_p2_affine_on_curve instead of blst_p2_affine_in_g2 because the latter performs // the subgroup check. // - // SAFETY: out is a blst value. + // SAFETY: Out is a blst value. if unsafe { !blst_p2_affine_on_curve(&out) } { return Err(PrecompileError::Other( "Element not on G2 curve".to_string(), diff --git a/crates/precompile/src/bls12_381/g2_add.rs b/crates/precompile/src/bls12_381/g2_add.rs index f98f7345b7..a2772a2dbc 100644 --- a/crates/precompile/src/bls12_381/g2_add.rs +++ b/crates/precompile/src/bls12_381/g2_add.rs @@ -43,15 +43,15 @@ pub(super) fn g2_add(input: &Bytes, gas_limit: u64) -> PrecompileResult { let b_aff = &extract_g2_input(&input[G2_INPUT_ITEM_LENGTH..], false)?; let mut b = blst_p2::default(); - // SAFETY: b and b_aff are blst values. + // SAFETY: `b` and `b_aff` are blst values. unsafe { blst_p2_from_affine(&mut b, b_aff) }; let mut p = blst_p2::default(); - // SAFETY: p, b and a_aff are blst values. + // SAFETY: `p`, `b` and `a_aff` are blst values. unsafe { blst_p2_add_or_double_affine(&mut p, &b, a_aff) }; let mut p_aff = blst_p2_affine::default(); - // SAFETY: p_aff and p are blst values. + // SAFETY: `p_aff` and `p` are blst values. unsafe { blst_p2_to_affine(&mut p_aff, &p) }; let out = encode_g2_point(&p_aff); diff --git a/crates/precompile/src/bls12_381/g2_msm.rs b/crates/precompile/src/bls12_381/g2_msm.rs index 675bf540f4..86c9a4be50 100644 --- a/crates/precompile/src/bls12_381/g2_msm.rs +++ b/crates/precompile/src/bls12_381/g2_msm.rs @@ -58,7 +58,7 @@ pub(super) fn g2_msm(input: &Bytes, gas_limit: u64) -> PrecompileResult { let p0_aff = &extract_g2_input(slice, true)?; let mut p0 = blst_p2::default(); - // SAFETY: p0 and p0_aff are blst values. + // SAFETY: `p0` and `p0_aff` are blst values. unsafe { blst_p2_from_affine(&mut p0, p0_aff) }; g2_points.push(p0); @@ -72,7 +72,7 @@ pub(super) fn g2_msm(input: &Bytes, gas_limit: u64) -> PrecompileResult { ); } - // return infinity point if all points are infinity + // Return infinity point if all points are infinity if g2_points.is_empty() { return Ok(PrecompileOutput::new(required_gas, [0; 256].into())); } @@ -81,7 +81,7 @@ pub(super) fn g2_msm(input: &Bytes, gas_limit: u64) -> PrecompileResult { let multiexp = points.mult(&scalars, NBITS); let mut multiexp_aff = blst_p2_affine::default(); - // SAFETY: multiexp_aff and multiexp are blst values. + // SAFETY: `multiexp_aff` and `multiexp` are blst values. unsafe { blst_p2_to_affine(&mut multiexp_aff, &multiexp) }; let out = encode_g2_point(&multiexp_aff); diff --git a/crates/precompile/src/bls12_381/g2_mul.rs b/crates/precompile/src/bls12_381/g2_mul.rs index b6a5fdf4b2..97f2f87c2d 100644 --- a/crates/precompile/src/bls12_381/g2_mul.rs +++ b/crates/precompile/src/bls12_381/g2_mul.rs @@ -41,16 +41,16 @@ pub(super) fn g2_mul(input: &Bytes, gas_limit: u64) -> PrecompileResult { // So we set the subgroup_check flag to `true` let p0_aff = &extract_g2_input(&input[..G2_INPUT_ITEM_LENGTH], true)?; let mut p0 = blst_p2::default(); - // SAFETY: p0 and p0_aff are blst values. + // SAFETY: `p0` and `p0_aff` are blst values. unsafe { blst_p2_from_affine(&mut p0, p0_aff) }; let input_scalar0 = extract_scalar_input(&input[G2_INPUT_ITEM_LENGTH..])?; let mut p = blst_p2::default(); - // SAFETY: input_scalar0.b has fixed size, p and p0 are blst values. + // SAFETY: `input_scalar0.b` has fixed size, `p` and `p0` are blst values. unsafe { blst_p2_mult(&mut p, &p0, input_scalar0.b.as_ptr(), NBITS) }; let mut p_aff = blst_p2_affine::default(); - // SAFETY: p_aff and p are blst values. + // SAFETY: `p_aff` and `p` are blst values. unsafe { blst_p2_to_affine(&mut p_aff, &p) }; let out = encode_g2_point(&p_aff); diff --git a/crates/precompile/src/bls12_381/map_fp2_to_g2.rs b/crates/precompile/src/bls12_381/map_fp2_to_g2.rs index 803cd25d74..84da0b518a 100644 --- a/crates/precompile/src/bls12_381/map_fp2_to_g2.rs +++ b/crates/precompile/src/bls12_381/map_fp2_to_g2.rs @@ -40,12 +40,12 @@ pub(super) fn map_fp2_to_g2(input: &Bytes, gas_limit: u64) -> PrecompileResult { let fp2 = check_canonical_fp2(input_p0_x, input_p0_y)?; let mut p = blst_p2::default(); - // SAFETY: p and fp2 are blst values. - // third argument is unused if null. + // SAFETY: `p` and `fp2` are blst values. + // Third argument is unused if null. unsafe { blst_map_to_g2(&mut p, &fp2, core::ptr::null()) }; let mut p_aff = blst_p2_affine::default(); - // SAFETY: p_aff and p are blst values. + // SAFETY: `p_aff` and `p` are blst values. unsafe { blst_p2_to_affine(&mut p_aff, &p) }; let out = encode_g2_point(&p_aff); diff --git a/crates/precompile/src/bls12_381/map_fp_to_g1.rs b/crates/precompile/src/bls12_381/map_fp_to_g1.rs index 7bc66c829f..1037f684a6 100644 --- a/crates/precompile/src/bls12_381/map_fp_to_g1.rs +++ b/crates/precompile/src/bls12_381/map_fp_to_g1.rs @@ -37,12 +37,12 @@ pub(super) fn map_fp_to_g1(input: &Bytes, gas_limit: u64) -> PrecompileResult { let fp = fp_from_bendian(input_p0)?; let mut p = blst_p1::default(); - // SAFETY: p and fp are blst values. - // third argument is unused if null. + // SAFETY: `p` and `fp` are blst values. + // Third argument is unused if null. unsafe { blst_map_to_g1(&mut p, &fp, core::ptr::null()) }; let mut p_aff = blst_p1_affine::default(); - // SAFETY: p_aff and p are blst values. + // SAFETY: `p_aff` and `p` are blst values. unsafe { blst_p1_to_affine(&mut p_aff, &p) }; let out = encode_g1_point(&p_aff); diff --git a/crates/precompile/src/bls12_381/msm.rs b/crates/precompile/src/bls12_381/msm.rs index 9ddeedc015..5faed49057 100644 --- a/crates/precompile/src/bls12_381/msm.rs +++ b/crates/precompile/src/bls12_381/msm.rs @@ -1,7 +1,7 @@ -/// Amount used to calculate the multi-scalar-multiplication discount. +/// Amount used to calculate the multi-scalar-multiplication discount const MSM_MULTIPLIER: u64 = 1000; -/// Table of gas discounts for multi-scalar-multiplication operations. +/// Table of gas discounts for multi-scalar-multiplication operations static MSM_DISCOUNT_TABLE: [u16; 128] = [ 1200, 888, 764, 641, 594, 547, 500, 453, 438, 423, 408, 394, 379, 364, 349, 334, 330, 326, 322, 318, 314, 310, 306, 302, 298, 294, 289, 285, 281, 277, 273, 269, 268, 266, 265, 263, 262, 260, diff --git a/crates/precompile/src/bls12_381/pairing.rs b/crates/precompile/src/bls12_381/pairing.rs index becdd23009..928b6978e7 100644 --- a/crates/precompile/src/bls12_381/pairing.rs +++ b/crates/precompile/src/bls12_381/pairing.rs @@ -74,7 +74,7 @@ pub(super) fn pairing(input: &Bytes, gas_limit: u64) -> PrecompileResult { // multiplication. let mut cur_ml = blst_fp12::default(); let mut res = blst_fp12::default(); - // SAFETY: res, acc, cur_ml, p1_aff and p2_aff are blst values. + // SAFETY: `res`, `acc`, `cur_ml`, `p1_aff` and `p2_aff` are blst values. unsafe { blst_miller_loop(&mut cur_ml, p2_aff, p1_aff); blst_fp12_mul(&mut res, &acc, &cur_ml); @@ -83,21 +83,21 @@ pub(super) fn pairing(input: &Bytes, gas_limit: u64) -> PrecompileResult { } else { // On the first slice (i==0) there is no previous results and no need // to accumulate. - // SAFETY: acc, p1_aff and p2_aff are blst values. + // SAFETY: `acc`, `p1_aff` and `p2_aff` are blst values. unsafe { blst_miller_loop(&mut acc, p2_aff, p1_aff); } } } - // SAFETY: ret and acc are blst values. + // SAFETY: `ret` and `acc` are blst values. let mut ret = blst_fp12::default(); unsafe { blst_final_exp(&mut ret, &acc); } let mut result: u8 = 0; - // SAFETY: ret is a blst value. + // SAFETY: `ret` is a blst value. unsafe { if blst_fp12_is_one(&ret) { result = 1; diff --git a/crates/precompile/src/bls12_381/utils.rs b/crates/precompile/src/bls12_381/utils.rs index 6cb5a865af..2487a2a2d8 100644 --- a/crates/precompile/src/bls12_381/utils.rs +++ b/crates/precompile/src/bls12_381/utils.rs @@ -30,7 +30,7 @@ pub(super) fn fp_to_bytes(out: &mut [u8], input: *const blst_fp) { } let (padding, rest) = out.split_at_mut(PADDING_LENGTH); padding.fill(0); - // SAFETY: out length is checked previously, input is a blst value. + // SAFETY: Out length is checked previously, `input` is a blst value. unsafe { blst_bendian_from_fp(rest.as_mut_ptr(), input) }; } @@ -70,9 +70,9 @@ pub(super) fn extract_scalar_input(input: &[u8]) -> Result bool { Ordering::Equal => continue, } } - // false if matching the modulus + // Return false if matching the modulus false } @@ -102,7 +102,7 @@ pub(super) fn fp_from_bendian(input: &[u8; 48]) -> Result PrecompileResult { } } -/// Computes the RIPEMD-160 hash of the input data. +/// Computes the RIPEMD-160 hash of the input data /// /// This function follows specifications defined in the following references: /// - [Ethereum Yellow Paper](https://ethereum.github.io/yellowpaper/paper.pdf) diff --git a/crates/precompile/src/identity.rs b/crates/precompile/src/identity.rs index a4b5d2a9c8..339d68a9b0 100644 --- a/crates/precompile/src/identity.rs +++ b/crates/precompile/src/identity.rs @@ -5,14 +5,15 @@ use primitives::Bytes; pub const FUN: PrecompileWithAddress = PrecompileWithAddress(crate::u64_to_address(4), identity_run); -/// The base cost of the operation. +/// The base cost of the operation pub const IDENTITY_BASE: u64 = 15; -/// The cost per word. +/// The cost per word pub const IDENTITY_PER_WORD: u64 = 3; /// Takes the input bytes, copies them, and returns it as the output. /// /// See: +/// /// See: pub fn identity_run(input: &Bytes, gas_limit: u64) -> PrecompileResult { let gas_used = calc_linear_cost_u32(input.len(), IDENTITY_BASE, IDENTITY_PER_WORD); diff --git a/crates/precompile/src/interface.rs b/crates/precompile/src/interface.rs index 1a418b76d5..16e268b5d3 100644 --- a/crates/precompile/src/interface.rs +++ b/crates/precompile/src/interface.rs @@ -3,7 +3,7 @@ use core::fmt; use primitives::Bytes; use std::string::{String, ToString}; -/// A precompile operation result. +/// A precompile operation result type /// /// Returns either `Ok((gas_used, return_bytes))` or `Err(error)`. pub type PrecompileResult = Result; @@ -11,9 +11,9 @@ pub type PrecompileResult = Result; /// Precompile execution output #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct PrecompileOutput { - /// Gas used by the precompile. + /// Gas used by the precompile pub gas_used: u64, - /// Output bytes. + /// Output bytes pub bytes: Bytes, } @@ -66,13 +66,13 @@ pub enum PrecompileError { Bn128AffineGFailedToCreate, Bn128PairLength, // Blob errors - /// The input length is not exactly 192 bytes. + /// The input length is not exactly 192 bytes BlobInvalidInputLength, - /// The commitment does not match the versioned hash. + /// The commitment does not match the versioned hash BlobMismatchedVersion, - /// The proof verification failed. + /// The proof verification failed BlobVerifyKzgProofFailed, - /// Catch-all variant for other errors. + /// Catch-all variant for other errors Other(String), } @@ -82,7 +82,7 @@ impl PrecompileError { Self::Other(err.into()) } - /// Returns true if the error is out of gas. + /// Returns `true` if the error is out of gas. pub fn is_oog(&self) -> bool { matches!(self, Self::OutOfGas) } diff --git a/crates/precompile/src/kzg_point_evaluation.rs b/crates/precompile/src/kzg_point_evaluation.rs index 0f11062c0d..d96b7e0910 100644 --- a/crates/precompile/src/kzg_point_evaluation.rs +++ b/crates/precompile/src/kzg_point_evaluation.rs @@ -106,7 +106,7 @@ mod tests { #[test] fn basic_test() { - // test data from: https://github.com/ethereum/c-kzg-4844/blob/main/tests/verify_kzg_proof/kzg-mainnet/verify_kzg_proof_case_correct_proof_31ebd010e6098750/data.yaml + // Test data from: https://github.com/ethereum/c-kzg-4844/blob/main/tests/verify_kzg_proof/kzg-mainnet/verify_kzg_proof_case_correct_proof_31ebd010e6098750/data.yaml let commitment = hex!("8f59a8d2a1a625a17f3fea0fe5eb8c896db3764f3185481bc22f91b4aaffcca25f26936857bc3a7c2539ea8ec3a952b7").to_vec(); let mut versioned_hash = Sha256::digest(&commitment).to_vec(); diff --git a/crates/precompile/src/lib.rs b/crates/precompile/src/lib.rs index 2f7c232bfe..91053c921d 100644 --- a/crates/precompile/src/lib.rs +++ b/crates/precompile/src/lib.rs @@ -43,9 +43,9 @@ pub fn calc_linear_cost_u32(len: usize, base: u64, word: u64) -> u64 { #[derive(Clone, Default, Debug)] pub struct Precompiles { - /// Precompiles. + /// Precompiles inner: HashMap, - /// Addresses of precompile. + /// Addresses of precompile addresses: HashSet
, } @@ -303,8 +303,9 @@ impl PrecompileSpecId { /// Const function for making an address by concatenating the bytes from two given numbers. /// -/// Note that 32 + 128 = 160 = 20 bytes (the length of an address). This function is used -/// as a convenience for specifying the addresses of the various precompiles. +/// Note that 32 + 128 = 160 = 20 bytes (the length of an address). +/// +/// This function is used as a convenience for specifying the addresses of the various precompiles. #[inline] pub const fn u64_to_address(x: u64) -> Address { let x = x.to_be_bytes(); diff --git a/crates/precompile/src/modexp.rs b/crates/precompile/src/modexp.rs index 42468052f7..c563ddd0a9 100644 --- a/crates/precompile/src/modexp.rs +++ b/crates/precompile/src/modexp.rs @@ -54,15 +54,15 @@ where // The format of input is: // // Where every length is a 32-byte left-padded integer representing the number of bytes - // to be taken up by the next value + // to be taken up by the next value. const HEADER_LENGTH: usize = 96; - // Extract the header. + // Extract the header let base_len = U256::from_be_bytes(right_pad_with_offset::<32>(input, 0).into_owned()); let exp_len = U256::from_be_bytes(right_pad_with_offset::<32>(input, 32).into_owned()); let mod_len = U256::from_be_bytes(right_pad_with_offset::<32>(input, 64).into_owned()); - // cast base and modulus to usize, it does not make sense to handle larger values + // Cast base and modulus to usize, it does not make sense to handle larger values let Ok(base_len) = usize::try_from(base_len) else { return Err(PrecompileError::ModexpBaseOverflow.into()); }; @@ -87,7 +87,7 @@ where let input = input.get(HEADER_LENGTH..).unwrap_or_default(); let exp_highp = { - // get right padded bytes so if data.len is less then exp_len we will get right padded zeroes. + // Get right padded bytes so if data.len is less then exp_len we will get right padded zeroes. let right_padded_highp = right_pad_with_offset::<32>(input, base_len); // If exp_len is less then 32 bytes get only exp_len bytes and do left padding. let out = left_pad::<32>(&right_padded_highp[..exp_highp_len]); @@ -110,7 +110,7 @@ where // Call the modexp. let output = modexp(base, exponent, modulus); - // left pad the result to modulus length. bytes will always by less or equal to modulus length. + // Left pad the result to modulus length. bytes will always by less or equal to modulus length. Ok(PrecompileOutput::new( gas_cost, left_pad_vec(&output, mod_len).into_owned().into(), @@ -118,14 +118,14 @@ where } pub fn byzantium_gas_calc(base_len: u64, exp_len: u64, mod_len: u64, exp_highp: &U256) -> u64 { - // output of this function is bounded by 2^128 + // Output of this function is bounded by 2^128 fn mul_complexity(x: u64) -> U256 { if x <= 64 { U256::from(x * x) } else if x <= 1_024 { U256::from(x * x / 4 + 96 * x - 3_072) } else { - // up-cast to avoid overflow + // Up-cast to avoid overflow let x = U256::from(x); let x_sq = x * x; // x < 2^64 => x*x < 2^128 < 2^256 (no overflow) x_sq / U256::from(16) + U256::from(480) * x - U256::from(199_680) diff --git a/crates/precompile/src/secp256k1.rs b/crates/precompile/src/secp256k1.rs index e300f87b4d..0cd459723e 100644 --- a/crates/precompile/src/secp256k1.rs +++ b/crates/precompile/src/secp256k1.rs @@ -16,26 +16,26 @@ mod secp256k1 { use primitives::{alloy_primitives::B512, keccak256, B256}; pub fn ecrecover(sig: &B512, mut recid: u8, msg: &B256) -> Result { - // parse signature + // Parse signature let mut sig = Signature::from_slice(sig.as_slice())?; - // normalize signature and flip recovery id if needed. + // Normalize signature and flip recovery id if needed. if let Some(sig_normalized) = sig.normalize_s() { sig = sig_normalized; recid ^= 1; } let recid = RecoveryId::from_byte(recid).expect("recovery ID is valid"); - // recover key + // Recover key let recovered_key = VerifyingKey::recover_from_prehash(&msg[..], &sig, recid)?; - // hash it + // Hash it let mut hash = keccak256( &recovered_key .to_encoded_point(/* compress = */ false) .as_bytes()[1..], ); - // truncate to 20 bytes + // Truncate to 20 bytes hash[..12].fill(0); Ok(hash) } diff --git a/crates/precompile/src/secp256r1.rs b/crates/precompile/src/secp256r1.rs index 6f6bb84f96..ed3a38e78b 100644 --- a/crates/precompile/src/secp256r1.rs +++ b/crates/precompile/src/secp256r1.rs @@ -59,7 +59,7 @@ pub fn verify_impl(input: &[u8]) -> Option<()> { // x, y: public key let pk = &input[96..160]; - // prepend 0x04 to the public key: uncompressed form + // Prepend 0x04 to the public key: uncompressed form let mut uncompressed_pk = [0u8; 65]; uncompressed_pk[0] = 0x04; uncompressed_pk[1..].copy_from_slice(pk); @@ -80,7 +80,7 @@ mod test { use rstest::rstest; #[rstest] - // test vectors from https://github.com/daimo-eth/p256-verifier/tree/master/test-vectors + // Test vectors from https://github.com/daimo-eth/p256-verifier/tree/master/test-vectors #[case::ok_1("4cee90eb86eaa050036147a12d49004b6b9c72bd725d39d4785011fe190f0b4da73bd4903f0ce3b639bbbf6e8e80d16931ff4bcf5993d58468e8fb19086e8cac36dbcd03009df8c59286b162af3bd7fcc0450c9aa81be5d10d312af6c66b1d604aebd3099c618202fcfe16ae7770b0c49ab5eadf74b754204a3bb6060e44eff37618b065f9832de4ca6ca971a7a1adc826d0f7c00181a5fb2ddf79ae00b4e10e", true)] #[case::ok_2("3fec5769b5cf4e310a7d150508e82fb8e3eda1c2c94c61492d3bd8aea99e06c9e22466e928fdccef0de49e3503d2657d00494a00e764fd437bdafa05f5922b1fbbb77c6817ccf50748419477e843d5bac67e6a70e97dde5a57e0c983b777e1ad31a80482dadf89de6302b1988c82c29544c9c07bb910596158f6062517eb089a2f54c9a0f348752950094d3228d3b940258c75fe2a413cb70baa21dc2e352fc5", true)] #[case::ok_3("e775723953ead4a90411a02908fd1a629db584bc600664c609061f221ef6bf7c440066c8626b49daaa7bf2bcc0b74be4f7a1e3dcf0e869f1542fe821498cbf2de73ad398194129f635de4424a07ca715838aefe8fe69d1a391cfa70470795a80dd056866e6e1125aff94413921880c437c9e2570a28ced7267c8beef7e9b2d8d1547d76dfcf4bee592f5fefe10ddfb6aeb0991c5b9dbbee6ec80d11b17c0eb1a", true)] diff --git a/crates/precompile/src/utilities.rs b/crates/precompile/src/utilities.rs index c850bfe9bd..779022418b 100644 --- a/crates/precompile/src/utilities.rs +++ b/crates/precompile/src/utilities.rs @@ -73,7 +73,7 @@ pub fn left_pad_vec(data: &[u8], len: usize) -> Cow<'_, [u8]> { } } -/// Converts a boolean to a left-padded 32-byte `Bytes` value. +/// Converts a boolean to a left-padded 32-byte [`Bytes`] value. /// /// This is optimized to not allocate at runtime by using 2 static arrays. #[inline] @@ -81,7 +81,7 @@ pub const fn bool_to_bytes32(value: bool) -> Bytes { Bytes::from_static(&bool_to_b256(value).0) } -/// Converts a boolean to a left-padded `B256` value. +/// Converts a boolean to a left-padded [`B256`] value. /// /// This is optimized to not allocate at runtime by using 2 static arrays. #[inline] diff --git a/crates/primitives/src/constants.rs b/crates/primitives/src/constants.rs index 05170261aa..f98b0dbeca 100644 --- a/crates/primitives/src/constants.rs +++ b/crates/primitives/src/constants.rs @@ -1,6 +1,6 @@ use alloy_primitives::{address, Address}; -/// Number of block hashes that EVM can access in the past (pre-Prague). +/// Number of block hashes that EVM can access in the past (pre-Prague) pub const BLOCK_HASH_HISTORY: u64 = 256; /// EIP-2935: Serve historical block hashes from state @@ -8,7 +8,6 @@ pub const BLOCK_HASH_HISTORY: u64 = 256; /// Number of block hashes the EVM can access in the past (Prague). /// /// # Note -/// /// This is named `HISTORY_SERVE_WINDOW` in the EIP. pub const BLOCKHASH_SERVE_WINDOW: usize = 8192; @@ -17,10 +16,9 @@ pub const BLOCKHASH_SERVE_WINDOW: usize = 8192; /// The address where historical blockhashes are available. /// /// # Note -/// /// This is named `HISTORY_STORAGE_ADDRESS` in the EIP. pub const BLOCKHASH_STORAGE_ADDRESS: Address = address!("25a219378dad9b3503c8268c9ca836a52427a4fb"); -/// The address of precompile 3, which is handled specially in a few places. +/// The address of precompile 3, which is handled specially in a few places pub const PRECOMPILE3: Address = Address::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3]); diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index af76b91060..b5139ff74b 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -36,7 +36,7 @@ handler.workspace = true handler-interface.workspace = true # Optional -# TODO check if needed. +# TODO : Check if needed. # serde = { version = "1.0", default-features = false, features = [ # "derive", # "rc", diff --git a/crates/revm/src/evm.rs b/crates/revm/src/evm.rs index ab51a204b6..423fa75c11 100644 --- a/crates/revm/src/evm.rs +++ b/crates/revm/src/evm.rs @@ -245,23 +245,23 @@ where let context = &mut self.context; let pre_exec = self.handler.pre_execution(); - // load access list and beneficiary if needed. + // Load access list and beneficiary if needed. pre_exec.load_accounts(context)?; - // deduce caller balance with its limit. + // Deduce caller balance with its limit. pre_exec.deduct_caller(context)?; let gas_limit = context.tx().common_fields().gas_limit() - initial_gas_spend; - // apply EIP-7702 auth list. + // Apply EIP-7702 auth list. let eip7702_gas_refund = pre_exec.apply_eip7702_auth_list(context)? as i64; - // start execution + // Start execution //let instructions = self.handler.take_instruction_table(); let exec = self.handler.execution(); - // create first frame action + // Create first frame action let first_frame = exec.init_first_frame(context, gas_limit)?; let frame_result = match first_frame { FrameOrResultGen::Frame(frame) => exec.run(context, frame)?, @@ -271,7 +271,7 @@ where let mut exec_result = exec.last_frame_result(context, frame_result)?; let post_exec = self.handler.post_execution(); - // calculate final refund and add EIP-7702 refund to gas. + // Calculate final refund and add EIP-7702 refund to gas. post_exec.refund(context, &mut exec_result, eip7702_gas_refund); // Reimburse the caller post_exec.reimburse_caller(context, &mut exec_result)?; @@ -386,7 +386,7 @@ mod tests { let mut tx2 = TxEnv::default(); tx2.tx_type = TransactionType::Legacy; - // nonce was bumped from 0 to 1 + // `nonce` was bumped from 0 to 1 tx2.nonce = 1; let mut evm = EvmBuilder::new_with( diff --git a/crates/specification/src/constants.rs b/crates/specification/src/constants.rs index 31cf337c40..ae5b9c9e4b 100644 --- a/crates/specification/src/constants.rs +++ b/crates/specification/src/constants.rs @@ -1,4 +1,4 @@ -/// EVM interpreter stack limit. +/// EVM interpreter stack limit pub const STACK_LIMIT: usize = 1024; /// EIP-170: Contract code size limit @@ -11,5 +11,5 @@ pub const MAX_CODE_SIZE: usize = 0x6000; /// Limit of maximum initcode size is `2 * MAX_CODE_SIZE`. pub const MAX_INITCODE_SIZE: usize = 2 * MAX_CODE_SIZE; -/// EVM call stack limit. +/// EVM call stack limit pub const CALL_STACK_LIMIT: u64 = 1024; diff --git a/crates/specification/src/eip170.rs b/crates/specification/src/eip170.rs index 46fa7f3b58..486150ec09 100644 --- a/crates/specification/src/eip170.rs +++ b/crates/specification/src/eip170.rs @@ -2,5 +2,5 @@ /// EIP-170: Contract code size limit /// -/// By default the limit is `0x6000` (~25kb) +/// By default the limit is `0x6000` (~25kb). pub const MAX_CODE_SIZE: usize = 0x6000; diff --git a/crates/specification/src/eip2.rs b/crates/specification/src/eip2.rs index 79f3b13c40..a3af56ad62 100644 --- a/crates/specification/src/eip2.rs +++ b/crates/specification/src/eip2.rs @@ -1,7 +1,8 @@ use primitives::U256; -/// The order of the secp256k1 curve, divided by two. Signatures that should be checked according -/// to EIP-2 should have an S value less than or equal to this. +/// The order of the secp256k1 curve, divided by two. +/// +/// Signatures that should be checked according to EIP-2 should have an S value less than or equal to this: /// /// `57896044618658097711785492504343953926418782139537452191302581570759080747168` pub const SECP256K1N_HALF: U256 = U256::from_be_bytes([ diff --git a/crates/specification/src/eip4844.rs b/crates/specification/src/eip4844.rs index 9bab7e21dc..6ad46e67f3 100644 --- a/crates/specification/src/eip4844.rs +++ b/crates/specification/src/eip4844.rs @@ -1,25 +1,25 @@ -// === EIP-4844 constants === +//! EIP-4844 constants -/// Gas consumption of a single data blob (== blob byte size). +/// Gas consumption of a single data blob (== blob byte size) pub const GAS_PER_BLOB: u64 = 1 << 17; -/// Target number of the blob per block. +/// Target number of the blob per block pub const TARGET_BLOB_NUMBER_PER_BLOCK: u64 = 3; /// Max number of blobs per block pub const MAX_BLOB_NUMBER_PER_BLOCK: u64 = 2 * TARGET_BLOB_NUMBER_PER_BLOCK; -/// Maximum consumable blob gas for data blobs per block. +/// Maximum consumable blob gas for data blobs per block pub const MAX_BLOB_GAS_PER_BLOCK: u64 = MAX_BLOB_NUMBER_PER_BLOCK * GAS_PER_BLOB; -/// Target consumable blob gas for data blobs per block (for 1559-like pricing). +/// Target consumable blob gas for data blobs per block (for 1559-like pricing) pub const TARGET_BLOB_GAS_PER_BLOCK: u64 = TARGET_BLOB_NUMBER_PER_BLOCK * GAS_PER_BLOB; -/// Minimum gas price for data blobs. +/// Minimum gas price for data blobs pub const MIN_BLOB_GASPRICE: u64 = 1; -/// Controls the maximum rate of change for blob gas price. +/// Controls the maximum rate of change for blob gas price pub const BLOB_GASPRICE_UPDATE_FRACTION: u64 = 3338477; -/// First version of the blob. +/// First version of the blob pub const VERSIONED_HASH_VERSION_KZG: u8 = 0x01; diff --git a/crates/specification/src/eip7702/authorization_list.rs b/crates/specification/src/eip7702/authorization_list.rs index 0caa02ab24..668aa00f47 100644 --- a/crates/specification/src/eip7702/authorization_list.rs +++ b/crates/specification/src/eip7702/authorization_list.rs @@ -4,7 +4,7 @@ pub use alloy_primitives::{Parity, Signature}; use core::fmt; use std::{boxed::Box, vec::Vec}; -/// Authorization list for EIP-7702 transaction type. +/// Authorization list for EIP-7702 transaction type #[derive(Clone, Debug, Eq, PartialEq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum AuthorizationList { @@ -39,7 +39,7 @@ impl AuthorizationList { } } - /// Return empty authorization list. + /// Returns empty authorization list. pub fn empty() -> Self { Self::Recovered(Vec::new()) } diff --git a/crates/specification/src/eip7702/constants.rs b/crates/specification/src/eip7702/constants.rs index 626b70dbbd..a129f76732 100644 --- a/crates/specification/src/eip7702/constants.rs +++ b/crates/specification/src/eip7702/constants.rs @@ -1,7 +1,7 @@ -//! EIP-7702 constants. +//! EIP-7702 constants -// Base cost of updating authorized account. +/// Base cost of updating authorized account pub const PER_AUTH_BASE_COST: u64 = 2500; -/// Cost of creating authorized account that was previously empty. +/// Cost of creating authorized account that was previously empty pub const PER_EMPTY_ACCOUNT_COST: u64 = 25000; diff --git a/crates/specification/src/eip7702/recovered_authorization.rs b/crates/specification/src/eip7702/recovered_authorization.rs index 41f3d401fc..e5b9a5aa55 100644 --- a/crates/specification/src/eip7702/recovered_authorization.rs +++ b/crates/specification/src/eip7702/recovered_authorization.rs @@ -2,7 +2,7 @@ use crate::eip7702::{Authorization, SignedAuthorization}; use core::ops::Deref; use primitives::Address; -/// A recovered authorization. +/// A recovered authorization #[derive(Debug, Clone, Hash, Eq, PartialEq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct RecoveredAuthorization { @@ -12,7 +12,9 @@ pub struct RecoveredAuthorization { } impl RecoveredAuthorization { - /// Instantiate without performing recovery. This should be used carefully. + /// Instantiates without performing recovery. + /// + /// This should be used carefully. pub const fn new_unchecked(inner: SignedAuthorization, authority: Option
) -> Self { Self { inner, authority } } @@ -24,7 +26,7 @@ impl RecoveredAuthorization { /// Get the `authority` for the authorization. /// - /// If this is `None`, then the authority could not be recovered. + /// If this is [`None`], then the authority could not be recovered. pub const fn authority(&self) -> Option
{ self.authority } diff --git a/crates/specification/src/hardfork.rs b/crates/specification/src/hardfork.rs index f7d58a4c1b..42748c21ac 100644 --- a/crates/specification/src/hardfork.rs +++ b/crates/specification/src/hardfork.rs @@ -3,9 +3,9 @@ pub use std::string::{String, ToString}; pub use SpecId::*; -/// Specification IDs and their activation block. +/// Specification IDs and their activation block /// -/// Information was obtained from the [Ethereum Execution Specifications](https://github.com/ethereum/execution-specs) +/// Information was obtained from the [Ethereum Execution Specifications](https://github.com/ethereum/execution-specs). #[repr(u8)] #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, enumn::N)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] @@ -35,7 +35,7 @@ pub enum SpecId { } impl SpecId { - /// Returns the `SpecId` for the given `u8`. + /// Returns the [`SpecId`] for the given [`u8`]. #[inline] pub fn try_from_u8(spec_id: u8) -> Option { Self::n(spec_id) diff --git a/crates/state/src/account_info.rs b/crates/state/src/account_info.rs index 18c2824415..224c84e177 100644 --- a/crates/state/src/account_info.rs +++ b/crates/state/src/account_info.rs @@ -2,17 +2,19 @@ use bytecode::Bytecode; use core::hash::{Hash, Hasher}; use primitives::{B256, KECCAK_EMPTY, U256}; -/// AccountInfo account information. +/// AccountInfo account information #[derive(Clone, Debug, Eq, Ord, PartialOrd)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct AccountInfo { - /// Account balance. + /// Account balance pub balance: U256, - /// Account nonce. + /// Account nonce pub nonce: u64, - /// code hash, + /// code hash pub code_hash: B256, - /// code: if None, `code_by_hash` will be used to fetch it if code needs to be loaded from + /// [`Bytecode`] data associated with this account + /// + /// If [None], `code_hash` will be used to fetch it if code needs to be loaded from /// inside `revm`. pub code: Option, } @@ -56,15 +58,17 @@ impl AccountInfo { } } - /// Returns a copy of this account with the [`Bytecode`] removed. This is - /// useful when creating journals or snapshots of the state, where it is + /// Returns a copy of this account with the [`Bytecode`] removed. + /// + /// This is useful when creating journals or snapshots of the state, where it is /// desirable to store the code blobs elsewhere. /// /// ## Note /// - /// This is distinct from [`AccountInfo::without_code`] in that it returns - /// a new `AccountInfo` instance with the code removed. - /// [`AccountInfo::without_code`] will modify and return the same instance. + /// This is distinct from [`without_code`][Self::without_code] in that it returns + /// a new [`AccountInfo`] instance with the code removed. + /// + /// [`without_code`][Self::without_code] will modify and return the same instance. #[inline] pub fn copy_without_code(&self) -> Self { Self { @@ -75,15 +79,18 @@ impl AccountInfo { } } - /// Strip the [`Bytecode`] from this account and drop it. This is - /// useful when creating journals or snapshots of the state, where it is + /// Strips the [`Bytecode`] from this account and drop it. + /// + /// This is useful when creating journals or snapshots of the state, where it is /// desirable to store the code blobs elsewhere. /// /// ## Note /// - /// This is distinct from [`AccountInfo::copy_without_code`] in that it - /// modifies the account in place. [`AccountInfo::copy_without_code`] - /// will copy the non-code fields and return a new `AccountInfo` instance. + /// This is distinct from [`copy_without_code`][Self::copy_without_code] in that it + /// modifies the account in place. + /// + /// [`copy_without_code`][Self::copy_without_code] + /// will copy the non-code fields and return a new [`AccountInfo`] instance. pub fn without_code(mut self) -> Self { self.take_bytecode(); self @@ -113,7 +120,8 @@ impl AccountInfo { self.is_empty_code_hash() && self.nonce == 0 } - /// Return bytecode hash associated with this account. + /// Returns bytecode hash associated with this account. + /// /// If account does not have code, it returns `KECCAK_EMPTY` hash. #[inline] pub fn code_hash(&self) -> B256 { @@ -126,13 +134,15 @@ impl AccountInfo { self.code_hash == KECCAK_EMPTY } - /// Take bytecode from account. Code will be set to None. + /// Takes bytecode from account. + /// + /// Code will be set to [None]. #[inline] pub fn take_bytecode(&mut self) -> Option { self.code.take() } - /// Initialize an [`AccountInfo`] with the given balance, setting all other fields to their + /// Initializes an [`AccountInfo`] with the given balance, setting all other fields to their /// default values. #[inline] pub fn from_balance(balance: U256) -> Self { @@ -142,7 +152,7 @@ impl AccountInfo { } } - /// Initialize an [`AccountInfo`] with the given bytecode, setting its balance to zero, its + /// Initializes an [`AccountInfo`] with the given bytecode, setting its balance to zero, its /// nonce to `1`, and calculating the code hash from the given bytecode. #[inline] pub fn from_bytecode(bytecode: Bytecode) -> Self { diff --git a/crates/state/src/lib.rs b/crates/state/src/lib.rs index e22d22fabd..6033d52c58 100644 --- a/crates/state/src/lib.rs +++ b/crates/state/src/lib.rs @@ -19,16 +19,16 @@ use specification::hardfork::SpecId; #[derive(Debug, Clone, PartialEq, Eq, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Account { - /// Balance, nonce, and code. + /// Balance, nonce, and code pub info: AccountInfo, /// Storage cache pub storage: EvmStorage, - /// Account status flags. + /// Account status flags pub status: AccountStatus, } impl Account { - /// Create new account and mark it as non existing. + /// Creates new account and mark it as non existing. pub fn new_not_existing() -> Self { Self { info: AccountInfo::default(), @@ -37,7 +37,7 @@ impl Account { } } - /// Check if account is empty and check if empty state before spurious dragon hardfork. + /// Checks if account is empty and check if empty state before spurious dragon hardfork. #[inline] pub fn state_clear_aware_is_empty(&self, spec: SpecId) -> bool { if SpecId::is_enabled_in(spec, SpecId::SPURIOUS_DRAGON) { @@ -49,12 +49,12 @@ impl Account { } } - /// Mark account as self destructed. + /// Marks the account as self destructed. pub fn mark_selfdestruct(&mut self) { self.status |= AccountStatus::SelfDestructed; } - /// Unmark account as self destructed. + /// Unmarks the account as self destructed. pub fn unmark_selfdestruct(&mut self) { self.status -= AccountStatus::SelfDestructed; } @@ -64,12 +64,12 @@ impl Account { self.status.contains(AccountStatus::SelfDestructed) } - /// Mark account as touched + /// Marks the account as touched pub fn mark_touch(&mut self) { self.status |= AccountStatus::Touched; } - /// Unmark the touch flag. + /// Unmarks the touch flag. pub fn unmark_touch(&mut self) { self.status -= AccountStatus::Touched; } @@ -79,22 +79,22 @@ impl Account { self.status.contains(AccountStatus::Touched) } - /// Mark account as newly created. + /// Marks the account as newly created. pub fn mark_created(&mut self) { self.status |= AccountStatus::Created; } - /// Unmark created flag. + /// Unmarks the created flag. pub fn unmark_created(&mut self) { self.status -= AccountStatus::Created; } - /// Mark account as cold. + /// Marks the account as cold. pub fn mark_cold(&mut self) { self.status |= AccountStatus::Cold; } - /// Mark account as warm and return true if it was previously cold. + /// Marks the account as warm and return true if it was previously cold. pub fn mark_warm(&mut self) -> bool { if self.status.contains(AccountStatus::Cold) { self.status -= AccountStatus::Cold; @@ -104,7 +104,8 @@ impl Account { } } - /// Is account loaded as not existing from database + /// Is account loaded as not existing from database. + /// /// This is needed for pre spurious dragon hardforks where /// existing and empty were two separate states. pub fn is_loaded_as_not_existing(&self) -> bool { @@ -123,7 +124,7 @@ impl Account { /// Returns an iterator over the storage slots that have been changed. /// - /// See also [EvmStorageSlot::is_changed] + /// See also [EvmStorageSlot::is_changed]. pub fn changed_storage_slots(&self) -> impl Iterator { self.storage.iter().filter(|(_, slot)| slot.is_changed()) } @@ -173,11 +174,11 @@ impl Default for AccountStatus { #[derive(Debug, Clone, Default, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct EvmStorageSlot { - /// Original value of the storage slot. + /// Original value of the storage slot pub original_value: U256, - /// Present value of the storage slot. + /// Present value of the storage slot pub present_value: U256, - /// Represents if the storage slot is cold. + /// Represents if the storage slot is cold pub is_cold: bool, } @@ -199,7 +200,7 @@ impl EvmStorageSlot { is_cold: false, } } - /// Returns true if the present value differs from the original value + /// Returns true if the present value differs from the original value. pub fn is_changed(&self) -> bool { self.original_value != self.present_value } diff --git a/crates/state/src/types.rs b/crates/state/src/types.rs index 66a29b84e6..5eb0a67b59 100644 --- a/crates/state/src/types.rs +++ b/crates/state/src/types.rs @@ -4,7 +4,7 @@ use primitives::{Address, HashMap, U256}; /// EVM State is a mapping from addresses to accounts. pub type EvmState = HashMap; -/// Structure used for EIP-1153 transient storage. +/// Structure used for EIP-1153 transient storage pub type TransientStorage = HashMap<(Address, U256), U256>; /// An account's Storage is a mapping from 256-bit integer keys to [EvmStorageSlot]s. diff --git a/crates/statetest-types/src/account_info.rs b/crates/statetest-types/src/account_info.rs index 9ed9f78a0d..c102b03329 100644 --- a/crates/statetest-types/src/account_info.rs +++ b/crates/statetest-types/src/account_info.rs @@ -3,7 +3,7 @@ use serde::Deserialize; use crate::deserializer::deserialize_str_as_u64; -/// Account information. +/// Account information #[derive(Clone, Debug, PartialEq, Eq, Deserialize)] #[serde(rename_all = "camelCase", deny_unknown_fields)] pub struct AccountInfo { diff --git a/crates/statetest-types/src/deserializer.rs b/crates/statetest-types/src/deserializer.rs index 622b3d6ce3..8ba97e928e 100644 --- a/crates/statetest-types/src/deserializer.rs +++ b/crates/statetest-types/src/deserializer.rs @@ -1,7 +1,7 @@ use revm::primitives::Address; use serde::{de, Deserialize}; -/// Deserialize a string as a u64. +/// Deserializes a [string][String] as a [u64]. pub fn deserialize_str_as_u64<'de, D>(deserializer: D) -> Result where D: de::Deserializer<'de>, @@ -16,7 +16,7 @@ where .map_err(serde::de::Error::custom) } -/// Deserialize a string as an optional Address. +/// Deserializes a [string][String] as an optional [Address]. pub fn deserialize_maybe_empty<'de, D>(deserializer: D) -> Result, D::Error> where D: de::Deserializer<'de>, diff --git a/crates/statetest-types/src/env.rs b/crates/statetest-types/src/env.rs index b510357f19..6e39f77c8a 100644 --- a/crates/statetest-types/src/env.rs +++ b/crates/statetest-types/src/env.rs @@ -1,7 +1,7 @@ use revm::primitives::{Address, B256, U256}; use serde::Deserialize; -/// Environment variables. +/// Environment variables #[derive(Debug, PartialEq, Eq, Deserialize)] #[serde(rename_all = "camelCase", deny_unknown_fields)] pub struct Env { diff --git a/crates/statetest-types/src/spec.rs b/crates/statetest-types/src/spec.rs index fd24c54167..fa905e5d2c 100644 --- a/crates/statetest-types/src/spec.rs +++ b/crates/statetest-types/src/spec.rs @@ -1,7 +1,7 @@ use revm::specification::hardfork::SpecId; use serde::Deserialize; -/// Ethereum specification names. +/// Ethereum specification names #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Deserialize, Hash)] pub enum SpecName { Frontier, @@ -32,7 +32,7 @@ pub enum SpecName { } impl SpecName { - /// Convert to a spec id. + /// Converts to a [SpecId]. pub fn to_spec_id(&self) -> SpecId { match self { Self::Frontier => SpecId::FRONTIER, diff --git a/crates/statetest-types/src/test_authorization.rs b/crates/statetest-types/src/test_authorization.rs index c003503ebb..ef9649c129 100644 --- a/crates/statetest-types/src/test_authorization.rs +++ b/crates/statetest-types/src/test_authorization.rs @@ -4,7 +4,7 @@ use revm::{ }; use serde::{Deserialize, Serialize}; -/// Test authorization. +/// Struct for test authorization #[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone, Copy)] #[serde(rename_all = "camelCase", deny_unknown_fields)] pub struct TestAuthorization { @@ -18,14 +18,14 @@ pub struct TestAuthorization { } impl TestAuthorization { - /// Get the signature using the v, r, s values. + /// Gets the signature using the `v`, `r`, `s` values. pub fn signature(&self) -> Signature { let v = u64::try_from(self.v).unwrap_or(u64::MAX); let parity = Parity::try_from(v).unwrap_or(Parity::Eip155(36)); Signature::from_rs_and_parity(self.r, self.s, parity).unwrap() } - /// Convert to a recovered authorization. + /// Converts to a recovered authorization. pub fn into_recovered(self) -> RecoveredAuthorization { let authorization = Authorization { chain_id: self.chain_id, diff --git a/crates/statetest-types/src/test_suite.rs b/crates/statetest-types/src/test_suite.rs index 79c3d72da1..1065d2e526 100644 --- a/crates/statetest-types/src/test_suite.rs +++ b/crates/statetest-types/src/test_suite.rs @@ -3,6 +3,6 @@ use std::collections::BTreeMap; use crate::TestUnit; -/// The top level test suite. +/// The top level test suite struct #[derive(Debug, PartialEq, Eq, Deserialize)] pub struct TestSuite(pub BTreeMap); diff --git a/crates/statetest-types/src/test_unit.rs b/crates/statetest-types/src/test_unit.rs index 76309258c4..6bb7b2e0c8 100644 --- a/crates/statetest-types/src/test_unit.rs +++ b/crates/statetest-types/src/test_unit.rs @@ -4,11 +4,11 @@ use std::collections::{BTreeMap, HashMap}; use crate::{AccountInfo, Env, SpecName, Test, TransactionParts}; use revm::primitives::{Address, Bytes}; -/// A single test unit. +/// Single test unit struct #[derive(Debug, PartialEq, Eq, Deserialize)] #[serde(deny_unknown_fields)] pub struct TestUnit { - /// Test info is optional + /// Test info is optional. #[serde(default, rename = "_info")] pub info: Option, diff --git a/crates/statetest-types/src/transaction.rs b/crates/statetest-types/src/transaction.rs index 241f12a26b..da5006a797 100644 --- a/crates/statetest-types/src/transaction.rs +++ b/crates/statetest-types/src/transaction.rs @@ -44,7 +44,7 @@ impl TransactionParts { pub fn tx_type(&self, access_list_index: usize) -> Option { let mut tx_type = TransactionType::Legacy; - // if it has access list it is EIP-2930 tx + // If it has access list it is EIP-2930 tx if let Some(access_list) = self.access_lists.get(access_list_index) { if access_list.is_some() { tx_type = TransactionType::Eip2930; @@ -56,16 +56,16 @@ impl TransactionParts { tx_type = TransactionType::Eip1559; } - // if it has max_fee_per_blob_gas it is EIP-4844 tx + // If it has max_fee_per_blob_gas it is EIP-4844 tx if self.max_fee_per_blob_gas.is_some() { // target need to be present for EIP-4844 tx self.to?; tx_type = TransactionType::Eip4844; } - // and if it has authorization list it is EIP-7702 tx + // And if it has authorization list it is EIP-7702 tx if self.authorization_list.is_some() { - // target need to be present for EIP-7702 tx + // Target need to be present for EIP-7702 tx self.to?; tx_type = TransactionType::Eip7702; } diff --git a/documentation/src/crates/primitives/bits.md b/documentation/src/crates/primitives/bits.md index 6378c0730a..1bf623d867 100644 --- a/documentation/src/crates/primitives/bits.md +++ b/documentation/src/crates/primitives/bits.md @@ -1,12 +1,12 @@ # Bits -> NOTE: This module's types have been replaced by [`alloy_primitives`](https://github.com/alloy-rs/core)'s `Address` and `FixedBytes`. +> Note: This module's types have been replaced by [`alloy_primitives`](https://github.com/alloy-rs/core)'s `Address` and `FixedBytes`. This module houses the definitions for fixed-size bit arrays, `Address` and `B256`, showcasing its role in managing bits-related operations, to represent 256-bit and 160-bit fixed-size hashes respectively. These are defined using the `construct_fixed_hash!` macro from the `fixed_hash` crate. The `AsRef` and `Deref` traits from `derive_more` crate are derived for both of these structures, providing convenient methods for converting these types to and from references of their underlying data. -The `Arbitrary` trait from the `arbitrary` crate and the `PropTestArbitrary` trait from `proptest_derive` crate are derived conditionally when either testing or the "arbitrary" feature is enabled. +The `Arbitrary` trait from the `arbitrary` crate and the `PropTestArbitrary` trait from `proptest_derive` crate are derived conditionally when either testing or the "arbitrary" feature is enabled. The code also provides conversions between `B256`, `Address` and various other types such as `u64`, `primitive_types::H256`, `primitive_types::H160`, `primitive_types::U256`, and `ruint::aliases::U256`. The `impl` From blocks specify how to convert from one type to another. diff --git a/documentation/src/crates/primitives/log.md b/documentation/src/crates/primitives/log.md index accd1be6cc..4bfad958df 100644 --- a/documentation/src/crates/primitives/log.md +++ b/documentation/src/crates/primitives/log.md @@ -1,6 +1,6 @@ # Log -> NOTE: This module's types have been replaced by [`alloy_primitives`](https://github.com/alloy-rs/core)'s `Log` and `LogData`. +> Note: This module's types have been replaced by [`alloy_primitives`](https://github.com/alloy-rs/core)'s `Log` and `LogData`. This piece of Rust code defines a structure called Log which represents an Ethereum log entry. These logs are integral parts of the Ethereum network and are typically produced by smart contracts during execution. Each Log has three components: diff --git a/documentation/src/crates/revm/builder.md b/documentation/src/crates/revm/builder.md index cd9888bd40..da0be950dd 100644 --- a/documentation/src/crates/revm/builder.md +++ b/documentation/src/crates/revm/builder.md @@ -1,4 +1,3 @@ - # Evm Builder The builder creates or modifies the EVM and applies different handlers. @@ -17,7 +16,7 @@ Simple example of using `EvmBuilder`: ```rust,ignore use crate::evm::Evm; - // build Evm with default values. + // Build Evm with default values. let mut evm = Evm::builder().build(); let output = evm.transact(); ``` @@ -25,28 +24,33 @@ Simple example of using `EvmBuilder`: ## Builder Stages There are two builder stages that are used to mitigate potential misuse of the builder: - * `SetGenericStage`: Initial stage that allows setting the database and external context. - * `HandlerStage`: Allows setting the handler registers but is explicit about setting new generic type as it will void the handler registers. + +- `SetGenericStage`: Initial stage that allows setting the database and external context. +- `HandlerStage`: Allows setting the handler registers but is explicit about setting new generic type as it will void the handler registers. Functions from one stage are just renamed functions from other stage, it is made so that user is more aware of what underlying function does. For example, in `SettingDbStage` we have `with_db` function while in `HandlerStage` we have `reset_handler_with_db`, both of them set the database but the latter also resets the handler. There are multiple functions that are common to both stages such as `build`. ### Builder naming conventions + In both stages we have: - * `build` creates the Evm. - * `spec_id` creates new mainnet handler and reapplies all the handler registers. - * `modify_*` functions are used to modify the database, external context or Env. - * `clear_*` functions allows setting default values for Environment. - * `append_handler_register_*` functions are used to push handler registers. - This will transition the builder to the `HandlerStage`. + +- `build` creates the Evm. +- `spec_id` creates new mainnet handler and reapplies all the handler registers. +- `modify_*` functions are used to modify the database, external context or Env. +- `clear_*` functions allows setting default values for Environment. +- `append_handler_register_*` functions are used to push handler registers. + This will transition the builder to the `HandlerStage`. In `SetGenericStage` we have: - * `with_*` are found in `SetGenericStage` and are used to set the generics. + +- `with_*` are found in `SetGenericStage` and are used to set the generics. In `HandlerStage` we have: - * `reset_handler_with_*` is used if we want to change some of the generic types this will reset the handler registers. - This will transition the builder to the `SetGenericStage`. + +- `reset_handler_with_*` is used if we want to change some of the generic types this will reset the handler registers. + This will transition the builder to the `SetGenericStage`. # Creating and modification of Evm @@ -57,7 +61,9 @@ Additionally, a function that is very important is `evm.modify()` that allows mo It returns a builder, allowing users to modify the Evm. # Examples + The following example uses the builder to create an `Evm` with inspector: + ```rust,ignore use crate::{ db::EmptyDB, Context, EvmContext, inspector::inspector_handle_register, inspectors::NoOpInspector, Evm, @@ -72,10 +78,10 @@ The following example uses the builder to create an `Evm` with inspector: // .with_db(..) does not compile as we already locked the builder generics, // alternative fn is reset_handler_with_db(..) .build(); - + // Execute the evm. let output = evm.transact(); - + // Extract evm context. let Context { external, @@ -84,6 +90,7 @@ The following example uses the builder to create an `Evm` with inspector: ``` The next example changes the spec id and environment of an already built evm. + ```rust,ignore use crate::{Evm,SpecId::BERLIN}; diff --git a/examples/block_traces/src/main.rs b/examples/block_traces/src/main.rs index 511f510bb4..1be8697258 100644 --- a/examples/block_traces/src/main.rs +++ b/examples/block_traces/src/main.rs @@ -51,7 +51,7 @@ async fn main() -> anyhow::Result<()> { // Set up the HTTP transport which is consumed by the RPC client. let rpc_url = "https://mainnet.infura.io/v3/c60b0bb42f8a4c6481ecd229eddaca27".parse()?; - // create ethers client and wrap it in Arc + // Create ethers client and wrap it in Arc let client = ProviderBuilder::new().on_http(rpc_url); // Params diff --git a/examples/custom_opcodes/src/lib.rs b/examples/custom_opcodes/src/lib.rs index 3ec91d1fa2..f50828688a 100644 --- a/examples/custom_opcodes/src/lib.rs +++ b/examples/custom_opcodes/src/lib.rs @@ -237,7 +237,7 @@ pub fn make_custom_instruction_table< H: Host + ?Sized, SPEC: CustomOpcodeSpec, >() -> InstructionTable { - // custom opcode chain can reuse mainnet instructions + // Custom opcode chain can reuse mainnet instructions let mut table = make_instruction_table::(); table[0x0c] = custom_opcode_handler::; @@ -249,14 +249,14 @@ fn custom_opcode_handler, _host: &mut H, ) { - // opcode has access to the chain-specific spec + // Opcode has access to the chain-specific spec if SPEC::optimism_enabled(CustomOpcodeSpecId::INTRODUCES_OPCODE) { gas!(interpreter, gas::MID); } else { gas!(interpreter, gas::HIGH); } - // logic + // Logic } pub fn main() { diff --git a/examples/database_components/src/block_hash.rs b/examples/database_components/src/block_hash.rs index 9741706d87..dd95c3d172 100644 --- a/examples/database_components/src/block_hash.rs +++ b/examples/database_components/src/block_hash.rs @@ -9,7 +9,7 @@ use std::sync::Arc; pub trait BlockHash { type Error; - /// Get block hash by block number + /// Gets block hash by block number. fn block_hash(&mut self, number: u64) -> Result; } @@ -17,7 +17,7 @@ pub trait BlockHash { pub trait BlockHashRef { type Error; - /// Get block hash by block number + /// Gets block hash by block number. fn block_hash(&self, number: u64) -> Result; } diff --git a/examples/database_components/src/state.rs b/examples/database_components/src/state.rs index e295fe9601..0499cce405 100644 --- a/examples/database_components/src/state.rs +++ b/examples/database_components/src/state.rs @@ -12,13 +12,13 @@ use std::sync::Arc; pub trait State { type Error; - /// Get basic account information. + /// Gets basic account information. fn basic(&mut self, address: Address) -> Result, Self::Error>; - /// Get account code by its hash + /// Gets account code by its hash. fn code_by_hash(&mut self, code_hash: B256) -> Result; - /// Get storage value of address at index. + /// Gets storage value of address at index. fn storage(&mut self, address: Address, index: U256) -> Result; } @@ -26,13 +26,13 @@ pub trait State { pub trait StateRef { type Error; - /// Get basic account information. + /// Gets basic account information. fn basic(&self, address: Address) -> Result, Self::Error>; - /// Get account code by its hash + /// Gets account code by its hash. fn code_by_hash(&self, code_hash: B256) -> Result; - /// Get storage value of address at index. + /// Gets storage value of address at index. fn storage(&self, address: Address, index: U256) -> Result; } diff --git a/examples/uniswap_get_reserves/src/main.rs b/examples/uniswap_get_reserves/src/main.rs index 733c8ec3e0..50344476cd 100644 --- a/examples/uniswap_get_reserves/src/main.rs +++ b/examples/uniswap_get_reserves/src/main.rs @@ -23,7 +23,7 @@ async fn main() -> anyhow::Result<()> { // Set up the HTTP transport which is consumed by the RPC client. let rpc_url = "https://mainnet.infura.io/v3/c60b0bb42f8a4c6481ecd229eddaca27".parse()?; - // create ethers client and wrap it in Arc + // Create ethers client and wrap it in Arc let client = ProviderBuilder::new().on_http(rpc_url); let client = WrapDatabaseAsync::new(AlloyDB::new(client, BlockId::latest())).unwrap(); @@ -39,38 +39,38 @@ async fn main() -> anyhow::Result<()> { // storage[11] = kLast: uint256 // // =========================================================== // - // choose slot of storage that you would like to transact with + // Choose slot of storage that you would like to transact with let slot = U256::from(8); // ETH/USDT pair on Uniswap V2 let pool_address = address!("0d4a11d5EEaaC28EC3F61d100daF4d40471f1852"); - // generate abi for the calldata from the human readable interface + // Generate abi for the calldata from the human readable interface sol! { function getReserves() external view returns (uint112 reserve0, uint112 reserve1, uint32 blockTimestampLast); } - // encode abi into Bytes + // Encode abi into Bytes let encoded = getReservesCall::new(()).abi_encode(); - // query basic properties of an account incl bytecode + // Query basic properties of an account incl bytecode let acc_info = client.basic_ref(pool_address).unwrap().unwrap(); - // query value of storage slot at account address + // Query value of storage slot at account address let value = client.storage_ref(pool_address, slot).unwrap(); - // initialise empty in-memory-db + // Initialise empty in-memory-db let mut cache_db = CacheDB::new(EmptyDB::default()); - // insert basic account info which was generated via Web3DB with the corresponding address + // Insert basic account info which was generated via Web3DB with the corresponding address cache_db.insert_account_info(pool_address, acc_info); - // insert our pre-loaded storage slot to the corresponding contract key (address) in the DB + // Insert our pre-loaded storage slot to the corresponding contract key (address) in the DB cache_db .insert_account_storage(pool_address, slot, value) .unwrap(); - // initialise an empty (default) EVM + // Initialise an empty (default) EVM let mut evm = MainEvm::new( Context::builder() .with_db(cache_db) @@ -88,12 +88,12 @@ async fn main() -> anyhow::Result<()> { EthHandler::default(), ); - // execute transaction without writing to the DB + // Execute transaction without writing to the DB let ref_tx = evm.exec().unwrap(); - // select ExecutionResult struct + // Select ExecutionResult struct let result = ref_tx.result; - // unpack output call enum into raw bytes + // Unpack output call enum into raw bytes let value = match result { ExecutionResult::Success { output: Output::Call(value), @@ -102,7 +102,7 @@ async fn main() -> anyhow::Result<()> { _ => panic!("Execution failed: {result:?}"), }; - // decode bytes to reserves + ts via alloy's abi decode + // Decode bytes to reserves + ts via alloy's abi decode let return_vals = getReservesCall::abi_decode_returns(&value, true)?; // Print emulated getReserves() call output diff --git a/examples/uniswap_v2_usdc_swap/src/main.rs b/examples/uniswap_v2_usdc_swap/src/main.rs index 882fbe9b52..afdcfe0857 100644 --- a/examples/uniswap_v2_usdc_swap/src/main.rs +++ b/examples/uniswap_v2_usdc_swap/src/main.rs @@ -26,7 +26,7 @@ async fn main() -> Result<()> { // Set up the HTTP transport which is consumed by the RPC client. let rpc_url = "https://mainnet.infura.io/v3/c60b0bb42f8a4c6481ecd229eddaca27".parse()?; - // create ethers client and wrap it in Arc + // Create ethers client and wrap it in Arc let client = ProviderBuilder::new().on_http(rpc_url); let alloy = WrapDatabaseAsync::new(AlloyDB::new(client, BlockId::latest())).unwrap(); @@ -41,7 +41,7 @@ async fn main() -> Result<()> { let weth_balance_slot = U256::from(3); - // give our test account some fake WETH and ETH + // Give our test account some fake WETH and ETH let one_ether = U256::from(1_000_000_000_000_000_000u128); let hashed_acc_balance_slot = keccak256((account, weth_balance_slot).abi_encode()); cache_db @@ -65,13 +65,13 @@ async fn main() -> Result<()> { let amount_in = one_ether.div(U256::from(10)); - // calculate USDC amount out + // Calculate USDC amount out let amount_out = get_amount_out(amount_in, reserve1, reserve0, &mut cache_db).await?; - // transfer WETH to USDC-WETH pair + // Transfer WETH to USDC-WETH pair transfer(account, usdc_weth_pair, amount_in, weth, &mut cache_db)?; - // execute low-level swap without using UniswapV2 router + // Execute low-level swap without using UniswapV2 router swap( account, usdc_weth_pair,