diff --git a/Cargo.lock b/Cargo.lock index 53489b81a20ba..678f3e4154353 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1570,6 +1570,11 @@ dependencies = [ "make-cmd 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "linked-hash-map" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "linked-hash-map" version = "0.5.1" @@ -1600,6 +1605,14 @@ dependencies = [ "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "lru-cache" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "linked-hash-map 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "make-cmd" version = "0.1.0" @@ -3417,6 +3430,7 @@ dependencies = [ "kvdb-memorydb 0.1.0 (git+https://github.com/paritytech/parity-common?rev=616b40150ded71f57f650067fcbc5c99d7c343e6)", "kvdb-rocksdb 0.1.4 (git+https://github.com/paritytech/parity-common?rev=616b40150ded71f57f650067fcbc5c99d7c343e6)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4846,10 +4860,12 @@ dependencies = [ "checksum libp2p-websocket 0.1.0 (git+https://github.com/libp2p/rust-libp2p?rev=d961e656a74d1bab5366d371a06f9e10d5f4a6c5)" = "" "checksum libp2p-yamux 0.1.0 (git+https://github.com/libp2p/rust-libp2p?rev=d961e656a74d1bab5366d371a06f9e10d5f4a6c5)" = "" "checksum librocksdb-sys 5.14.2 (registry+https://github.com/rust-lang/crates.io-index)" = "474d805d72e23a06310fa5201dfe182dc4b80ab1f18bb2823c1ac17ff9dcbaa2" +"checksum linked-hash-map 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7860ec297f7008ff7a1e3382d7f7e1dcd69efc94751a2284bafc3d013c2aa939" "checksum linked-hash-map 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "70fb39025bc7cdd76305867c4eccf2f2dcf6e9a57f5b21a93e1c2d86cd03ec9e" "checksum lock_api 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c" "checksum log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" "checksum log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c84ec4b527950aa83a329754b01dbe3f58361d1c5efacd1f6d68c494d08a17c6" +"checksum lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4d06ff7ff06f729ce5f4e227876cb88d10bc59cd4ae1e09fbb2bde15c850dc21" "checksum make-cmd 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a8ca8afbe8af1785e09636acb5a41e08a765f5f0340568716c18a8700ba3c0d3" "checksum mashup 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f2d82b34c7fb11bb41719465c060589e291d505ca4735ea30016a91f6fc79c3b" "checksum mashup-impl 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "aa607bfb674b4efb310512527d64266b065de3f894fc52f84efcbf7eaa5965fb" diff --git a/core/client/db/Cargo.toml b/core/client/db/Cargo.toml index a228a8e9cab05..77dd4d9bf8d94 100644 --- a/core/client/db/Cargo.toml +++ b/core/client/db/Cargo.toml @@ -8,6 +8,7 @@ parking_lot = "0.7.1" log = "0.4" kvdb = { git = "https://github.com/paritytech/parity-common", rev="616b40150ded71f57f650067fcbc5c99d7c343e6" } kvdb-rocksdb = { git = "https://github.com/paritytech/parity-common", rev="616b40150ded71f57f650067fcbc5c99d7c343e6" } +lru-cache = "0.1" hash-db = { git = "https://github.com/paritytech/trie" } substrate-primitives = { path = "../../primitives" } sr-primitives = { path = "../../sr-primitives" } diff --git a/core/client/db/src/lib.rs b/core/client/db/src/lib.rs index 371f0a4013a3e..f663cc0834d33 100644 --- a/core/client/db/src/lib.rs +++ b/core/client/db/src/lib.rs @@ -29,6 +29,7 @@ extern crate kvdb_rocksdb; extern crate kvdb; extern crate hash_db; extern crate parking_lot; +extern crate lru_cache; extern crate substrate_state_machine as state_machine; extern crate substrate_primitives as primitives; extern crate sr_primitives as runtime_primitives; @@ -52,6 +53,7 @@ extern crate kvdb_memorydb; pub mod light; mod cache; +mod storage_cache; mod utils; use std::sync::Arc; @@ -75,10 +77,12 @@ use state_machine::{CodeExecutor, DBValue, ExecutionStrategy}; use utils::{Meta, db_err, meta_keys, open_database, read_db, block_id_to_lookup_key, read_meta}; use client::LeafSet; use state_db::StateDb; +use storage_cache::{CachingState, SharedCache, new_shared_cache}; pub use state_db::PruningMode; const CANONICALIZATION_DELAY: u64 = 256; const MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR: u64 = 32768; +const STATE_CACHE_SIZE_BYTES: usize = 16 * 1024 * 1024; /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. pub type DbState = state_machine::TrieBackend>, Blake2Hasher>; @@ -270,8 +274,9 @@ impl client::blockchain::Backend for BlockchainDb { /// Database transaction pub struct BlockImportOperation { - old_state: DbState, - updates: MemoryDB, + old_state: CachingState, + db_updates: MemoryDB, + storage_updates: Vec<(Vec, Option>)>, changes_trie_updates: MemoryDB, pending_block: Option>, aux_ops: Vec<(Vec, Option>)>, @@ -292,7 +297,7 @@ impl client::backend::BlockImportOperation for BlockImportOperation where Block: BlockT, { - type State = DbState; + type State = CachingState; fn state(&self) -> Result, client::error::Error> { Ok(Some(&self.old_state)) @@ -319,8 +324,8 @@ where Block: BlockT, // currently authorities are not cached on full nodes } - fn update_storage(&mut self, update: MemoryDB) -> Result<(), client::error::Error> { - self.updates = update; + fn update_db_storage(&mut self, update: MemoryDB) -> Result<(), client::error::Error> { + self.db_updates = update; Ok(()) } @@ -349,7 +354,7 @@ where Block: BlockT, let (root, update) = self.old_state.storage_root(top.into_iter().map(|(k, v)| (k, Some(v)))); transaction.consolidate(update); - self.updates = transaction; + self.db_updates = transaction; Ok(root) } @@ -364,6 +369,11 @@ where Block: BlockT, self.aux_ops = ops.into_iter().collect(); Ok(()) } + + fn update_storage(&mut self, update: Vec<(Vec, Option>)>) -> Result<(), client::error::Error> { + self.storage_updates = update; + Ok(()) + } } struct StorageDb { @@ -503,6 +513,7 @@ pub struct Backend { changes_tries_storage: DbChangesTrieStorage, blockchain: BlockchainDb, canonicalization_delay: u64, + shared_cache: SharedCache, } impl Backend { @@ -550,6 +561,7 @@ impl Backend { changes_tries_storage, blockchain, canonicalization_delay, + shared_cache: new_shared_cache(STATE_CACHE_SIZE_BYTES), }) } @@ -669,7 +681,7 @@ impl client::backend::AuxStore for Backend where Block: BlockT client::backend::Backend for Backend where Block: BlockT { type BlockImportOperation = BlockImportOperation; type Blockchain = BlockchainDb; - type State = DbState; + type State = CachingState; type ChangesTrieStorage = DbChangesTrieStorage; fn begin_operation(&self, block: BlockId) -> Result { @@ -677,7 +689,8 @@ impl client::backend::Backend for Backend whe Ok(BlockImportOperation { pending_block: None, old_state: state, - updates: MemoryDB::default(), + db_updates: MemoryDB::default(), + storage_updates: Default::default(), changes_trie_updates: MemoryDB::default(), aux_ops: Vec::new(), }) @@ -697,6 +710,9 @@ impl client::backend::Backend for Backend whe // blocks are keyed by number + hash. let lookup_key = ::utils::number_and_hash_to_lookup_key(number, hash); + let mut enacted = Vec::default(); + let mut retracted = Vec::default(); + if pending_block.leaf_state.is_best() { let meta = self.blockchain.meta.read(); @@ -710,10 +726,11 @@ impl client::backend::Backend for Backend whe // uncanonicalize: check safety violations and ensure the numbers no longer // point to these block hashes in the key mapping. - for retracted in tree_route.retracted() { - if retracted.hash == meta.finalized_hash { + for r in tree_route.retracted() { + retracted.push(r.hash.clone()); + if r.hash == meta.finalized_hash { warn!("Potential safety failure: reverting finalized block {:?}", - (&retracted.number, &retracted.hash)); + (&r.number, &r.hash)); return Err(::client::error::ErrorKind::NotInFinalizedChain.into()); } @@ -721,17 +738,18 @@ impl client::backend::Backend for Backend whe ::utils::remove_number_to_key_mapping( &mut transaction, columns::KEY_LOOKUP, - retracted.number + r.number ); } // canonicalize: set the number lookup to map to this block's hash. - for enacted in tree_route.enacted() { + for e in tree_route.enacted() { + enacted.push(e.hash.clone()); ::utils::insert_number_to_key_mapping( &mut transaction, columns::KEY_LOOKUP, - enacted.number, - enacted.hash + e.number, + e.hash ); } } @@ -766,7 +784,7 @@ impl client::backend::Backend for Backend whe } let mut changeset: state_db::ChangeSet = state_db::ChangeSet::default(); - for (key, (val, rc)) in operation.updates.drain() { + for (key, (val, rc)) in operation.db_updates.drain() { if rc > 0 { changeset.inserted.push((key, val.to_vec())); } else if rc < 0 { @@ -792,8 +810,8 @@ impl client::backend::Backend for Backend whe self.force_delayed_canonicalize(&mut transaction, hash, *pending_block.header.number())? } - debug!(target: "db", "DB Commit {:?} ({}), best = {}", hash, number, - pending_block.leaf_state.is_best()); + let is_best = pending_block.leaf_state.is_best(); + debug!(target: "db", "DB Commit {:?} ({}), best = {}", hash, number, is_best); { let mut leaves = self.blockchain.leaves.write(); @@ -817,6 +835,16 @@ impl client::backend::Backend for Backend whe pending_block.leaf_state.is_best(), finalized, ); + + // sync canonical state cache + operation.old_state.sync_cache( + &enacted, + &retracted, + operation.storage_updates, + Some(hash), + Some(number), + || is_best + ); } Ok(()) } @@ -898,7 +926,8 @@ impl client::backend::Backend for Backend whe BlockId::Hash(h) if h == Default::default() => { let genesis_storage = DbGenesisStorage::new(); let root = genesis_storage.0.clone(); - return Ok(DbState::new(Arc::new(genesis_storage), root)); + let state = DbState::new(Arc::new(genesis_storage), root); + return Ok(CachingState::new(state, self.shared_cache.clone(), None)); }, _ => {} } @@ -906,12 +935,21 @@ impl client::backend::Backend for Backend whe match self.blockchain.header(block) { Ok(Some(ref hdr)) if !self.storage.state_db.is_pruned(hdr.number().as_()) => { let root = H256::from_slice(hdr.state_root().as_ref()); - Ok(DbState::new(self.storage.clone(), root)) + let state = DbState::new(self.storage.clone(), root); + Ok(CachingState::new(state, self.shared_cache.clone(), Some(hdr.hash()))) }, Err(e) => Err(e), _ => Err(client::error::ErrorKind::UnknownBlock(format!("{:?}", block)).into()), } } + + fn destroy_state(&self, mut state: Self::State) -> Result<(), client::error::Error> { + if let Some(hash) = state.parent_hash.clone() { + let is_best = || self.blockchain.meta.read().best_hash == hash; + state.sync_cache(&[], &[], vec![], None, None, is_best); + } + Ok(()) + } } impl client::backend::LocalBackend for Backend @@ -1092,7 +1130,7 @@ mod tests { ]; let (root, overlay) = op.old_state.storage_root(storage.iter().cloned()); - op.update_storage(overlay).unwrap(); + op.update_db_storage(overlay).unwrap(); header.state_root = root.into(); op.set_block_data( @@ -1138,7 +1176,7 @@ mod tests { op.reset_storage(storage.iter().cloned().collect(), Default::default()).unwrap(); - key = op.updates.insert(b"hello"); + key = op.db_updates.insert(b"hello"); op.set_block_data( header, Some(vec![]), @@ -1171,8 +1209,8 @@ mod tests { ).0.into(); let hash = header.hash(); - op.updates.insert(b"hello"); - op.updates.remove(&key); + op.db_updates.insert(b"hello"); + op.db_updates.remove(&key); op.set_block_data( header, Some(vec![]), @@ -1204,7 +1242,7 @@ mod tests { .map(|(x, y)| (x, Some(y))) ).0.into(); - op.updates.remove(&key); + op.db_updates.remove(&key); op.set_block_data( header, Some(vec![]), diff --git a/core/client/db/src/storage_cache.rs b/core/client/db/src/storage_cache.rs new file mode 100644 index 0000000000000..27d5cec088769 --- /dev/null +++ b/core/client/db/src/storage_cache.rs @@ -0,0 +1,416 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Global cache state. + +use std::collections::{VecDeque, HashSet, HashMap}; +use std::sync::Arc; +use parking_lot::{Mutex, RwLock, RwLockUpgradableReadGuard}; +use lru_cache::LruCache; +use hash_db::Hasher; +use runtime_primitives::traits::{Block, Header}; +use state_machine::{backend::Backend as StateBackend, TrieBackend}; + +const STATE_CACHE_BLOCKS: usize = 12; + +type StorageKey = Vec; +type StorageValue = Vec; + +/// Shared canonical state cache. +pub struct Cache { + /// Storage cache. `None` indicates that key is known to be missing. + storage: LruCache>, + /// Storage hashes cache. `None` indicates that key is known to be missing. + hashes: LruCache>, + /// Information on the modifications in recently committed blocks; specifically which keys + /// changed in which block. Ordered by block number. + modifications: VecDeque>, +} + +pub type SharedCache = Arc>>; + +/// Create new shared cache instance with given max memory usage. +pub fn new_shared_cache(shared_cache_size: usize) -> SharedCache { + let cache_items = shared_cache_size / 100; // Estimated average item size. TODO: more accurate tracking + Arc::new(Mutex::new(Cache { + storage: LruCache::new(cache_items), + hashes: LruCache::new(cache_items), + modifications: VecDeque::new(), + })) +} + +#[derive(Debug)] +/// Accumulates a list of storage changed in a block. +struct BlockChanges { + /// Block number. + number: B::Number, + /// Block hash. + hash: B::Hash, + /// Parent block hash. + parent: B::Hash, + /// A set of modified storage keys. + storage: HashSet, + /// Block is part of the canonical chain. + is_canon: bool, +} + +/// Cached values specific to a state. +struct LocalCache { + /// Storage cache. `None` indicates that key is known to be missing. + storage: HashMap>, + /// Storage hashes cache. `None` indicates that key is known to be missing. + hashes: HashMap>, +} + +/// State abstraction. +/// Manages shared global state cache which reflects the canonical +/// state as it is on the disk. +/// A instance of `CachingState` may be created as canonical or not. +/// For canonical instances local cache is accumulated and applied +/// in `sync_cache` along with the change overlay. +/// For non-canonical clones local cache and changes are dropped. +pub struct CachingState, B: Block> { + /// Backing state. + state: S, + /// Shared canonical state cache. + shared_cache: SharedCache, + /// Local cache of values for this state. + local_cache: RwLock>, + /// Hash of the block on top of which this instance was created or + /// `None` if cache is disabled + pub parent_hash: Option, +} + +impl, B: Block> CachingState { + /// Create a new instance wrapping generic State and shared cache. + pub fn new(state: S, shared_cache: SharedCache, parent_hash: Option) -> CachingState { + CachingState { + state, + shared_cache, + local_cache: RwLock::new(LocalCache { + storage: Default::default(), + hashes: Default::default(), + }), + parent_hash: parent_hash, + } + } + + /// Propagate local cache into the shared cache and synchonize + /// the shared cache with the best block state. + /// This function updates the shared cache by removing entries + /// that are invalidated by chain reorganization. `sync_cache` + /// should be called after the block has been committed and the + /// blockchain route has been calculated. + pub fn sync_cache bool> ( + &mut self, + enacted: &[B::Hash], + retracted: &[B::Hash], + changes: Vec<(StorageKey, Option)>, + commit_hash: Option, + commit_number: Option<::Number>, + is_best: F, + ) { + let mut cache = self.shared_cache.lock(); + let is_best = is_best(); + trace!("Syncing cache, id = (#{:?}, {:?}), parent={:?}, best={}", commit_number, commit_hash, self.parent_hash, is_best); + let cache = &mut *cache; + + // Purge changes from re-enacted and retracted blocks. + // Filter out commiting block if any. + let mut clear = false; + for block in enacted.iter().filter(|h| commit_hash.as_ref().map_or(true, |p| *h != p)) { + clear = clear || { + if let Some(ref mut m) = cache.modifications.iter_mut().find(|m| &m.hash == block) { + trace!("Reverting enacted block {:?}", block); + m.is_canon = true; + for a in &m.storage { + trace!("Reverting enacted key {:?}", a); + cache.storage.remove(a); + } + false + } else { + true + } + }; + } + + for block in retracted { + clear = clear || { + if let Some(ref mut m) = cache.modifications.iter_mut().find(|m| &m.hash == block) { + trace!("Retracting block {:?}", block); + m.is_canon = false; + for a in &m.storage { + trace!("Retracted key {:?}", a); + cache.storage.remove(a); + } + false + } else { + true + } + }; + } + if clear { + // We don't know anything about the block; clear everything + trace!("Wiping cache"); + cache.storage.clear(); + cache.modifications.clear(); + } + + // Propagate cache only if committing on top of the latest canonical state + // blocks are ordered by number and only one block with a given number is marked as canonical + // (contributed to canonical state cache) + if let Some(_) = self.parent_hash { + let mut local_cache = self.local_cache.write(); + if is_best { + trace!("Committing {} local, {} hashes, {} modified entries", local_cache.storage.len(), local_cache.hashes.len(), changes.len()); + for (k, v) in local_cache.storage.drain() { + cache.storage.insert(k, v); + } + for (k, v) in local_cache.hashes.drain() { + cache.hashes.insert(k, v); + } + } + } + + if let ( + Some(ref number), Some(ref hash), Some(ref parent)) + = (commit_number, commit_hash, self.parent_hash) + { + if cache.modifications.len() == STATE_CACHE_BLOCKS { + cache.modifications.pop_back(); + } + let mut modifications = HashSet::new(); + for (k, v) in changes.into_iter() { + modifications.insert(k.clone()); + if is_best { + cache.hashes.remove(&k); + cache.storage.insert(k, v); + } + } + // Save modified storage. These are ordered by the block number. + let block_changes = BlockChanges { + storage: modifications, + number: *number, + hash: hash.clone(), + is_canon: is_best, + parent: parent.clone(), + }; + let insert_at = cache.modifications.iter() + .enumerate() + .find(|&(_, m)| m.number < *number) + .map(|(i, _)| i); + trace!("Inserting modifications at {:?}", insert_at); + if let Some(insert_at) = insert_at { + cache.modifications.insert(insert_at, block_changes); + } else { + cache.modifications.push_back(block_changes); + } + } + } + + /// Check if the key can be returned from cache by matching current block parent hash against canonical + /// state and filtering out entries modified in later blocks. + fn is_allowed( + key: &[u8], + parent_hash: &Option, + modifications: + &VecDeque> + ) -> bool + { + let mut parent = match *parent_hash { + None => { + trace!("Cache lookup skipped for {:?}: no parent hash", key); + return false; + } + Some(ref parent) => parent, + }; + if modifications.is_empty() { + trace!("Cache lookup allowed for {:?}", key); + return true; + } + // Ignore all storage modified in later blocks + // Modifications contains block ordered by the number + // We search for our parent in that list first and then for + // all its parent until we hit the canonical block, + // checking against all the intermediate modifications. + for m in modifications { + if &m.hash == parent { + if m.is_canon { + return true; + } + parent = &m.parent; + } + if m.storage.contains(key) { + trace!("Cache lookup skipped for {:?}: modified in a later block", key); + return false; + } + } + trace!("Cache lookup skipped for {:?}: parent hash is unknown", key); + false + } +} + +impl, B:Block> StateBackend for CachingState { + type Error = S::Error; + type Transaction = S::Transaction; + type TrieBackendStorage = S::TrieBackendStorage; + + fn storage(&self, key: &[u8]) -> Result>, Self::Error> { + let local_cache = self.local_cache.upgradable_read(); + if let Some(entry) = local_cache.storage.get(key).cloned() { + trace!("Found in local cache: {:?}", key); + return Ok(entry) + } + let mut cache = self.shared_cache.lock(); + if Self::is_allowed(key, &self.parent_hash, &cache.modifications) { + if let Some(entry) = cache.storage.get_mut(key).map(|a| a.clone()) { + trace!("Found in shared cache: {:?}", key); + return Ok(entry) + } + } + trace!("Cache miss: {:?}", key); + let value = self.state.storage(key)?; + RwLockUpgradableReadGuard::upgrade(local_cache).storage.insert(key.to_vec(), value.clone()); + Ok(value) + } + + fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { + let local_cache = self.local_cache.upgradable_read(); + if let Some(entry) = local_cache.hashes.get(key).cloned() { + trace!("Found hash in local cache: {:?}", key); + return Ok(entry) + } + let mut cache = self.shared_cache.lock(); + if Self::is_allowed(key, &self.parent_hash, &cache.modifications) { + if let Some(entry) = cache.hashes.get_mut(key).map(|a| a.clone()) { + trace!("Found hash in shared cache: {:?}", key); + return Ok(entry) + } + } + trace!("Cache hash miss: {:?}", key); + let hash = self.state.storage_hash(key)?; + RwLockUpgradableReadGuard::upgrade(local_cache).hashes.insert(key.to_vec(), hash.clone()); + Ok(hash) + } + + fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error> { + self.state.child_storage(storage_key, key) + } + + fn exists_storage(&self, key: &[u8]) -> Result { + Ok(self.storage(key)?.is_some()) + } + + fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result { + self.state.exists_child_storage(storage_key, key) + } + + fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { + self.state.for_keys_with_prefix(prefix, f) + } + + fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F) { + self.state.for_keys_in_child_storage(storage_key, f) + } + + fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) + where + I: IntoIterator, Option>)>, + H::Out: Ord + { + self.state.storage_root(delta) + } + + fn child_storage_root(&self, storage_key: &[u8], delta: I) -> (Vec, bool, Self::Transaction) + where + I: IntoIterator, Option>)>, + H::Out: Ord + { + self.state.child_storage_root(storage_key, delta) + } + + fn pairs(&self) -> Vec<(Vec, Vec)> { + self.state.pairs() + } + + fn try_into_trie_backend(self) -> Option> { + self.state.try_into_trie_backend() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use runtime_primitives::testing::{H256, Block as RawBlock, ExtrinsicWrapper}; + use state_machine::backend::InMemory; + use primitives::Blake2Hasher; + + type Block = RawBlock>; + #[test] + fn smoke() { + //init_log(); + let root_parent = H256::random(); + let key = H256::random()[..].to_vec(); + let h0 = H256::random(); + let h1a = H256::random(); + let h1b = H256::random(); + let h2a = H256::random(); + let h2b = H256::random(); + let h3a = H256::random(); + let h3b = H256::random(); + + let shared = new_shared_cache::(256*1024); + + // blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ] + // state [ 5 5 4 3 2 2 ] + let mut s = CachingState::new(InMemory::::default(), shared.clone(), Some(root_parent.clone())); + s.sync_cache(&[], &[], vec![(key.clone(), Some(vec![2]))], Some(h0.clone()), Some(0), || true); + + let mut s = CachingState::new(InMemory::::default(), shared.clone(), Some(h0.clone())); + s.sync_cache(&[], &[], vec![], Some(h1a.clone()), Some(1), || true); + + let mut s = CachingState::new(InMemory::::default(), shared.clone(), Some(h0.clone())); + s.sync_cache(&[], &[], vec![(key.clone(), Some(vec![3]))], Some(h1b.clone()), Some(1), || false); + + let mut s = CachingState::new(InMemory::::default(), shared.clone(), Some(h1b.clone())); + s.sync_cache(&[], &[], vec![(key.clone(), Some(vec![4]))], Some(h2b.clone()), Some(2), || false); + + let mut s = CachingState::new(InMemory::::default(), shared.clone(), Some(h1a.clone())); + s.sync_cache(&[], &[], vec![(key.clone(), Some(vec![5]))], Some(h2a.clone()), Some(2), || true); + + let mut s = CachingState::new(InMemory::::default(), shared.clone(), Some(h2a.clone())); + s.sync_cache(&[], &[], vec![], Some(h3a.clone()), Some(3), || true); + + let s = CachingState::new(InMemory::::default(), shared.clone(), Some(h3a.clone())); + assert_eq!(s.storage(&key).unwrap().unwrap(), vec![5]); + + let s = CachingState::new(InMemory::::default(), shared.clone(), Some(h1a.clone())); + assert!(s.storage(&key).unwrap().is_none()); + + let s = CachingState::new(InMemory::::default(), shared.clone(), Some(h2b.clone())); + assert!(s.storage(&key).unwrap().is_none()); + + let s = CachingState::new(InMemory::::default(), shared.clone(), Some(h1b.clone())); + assert!(s.storage(&key).unwrap().is_none()); + + // reorg to 3b + // blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ] + let mut s = CachingState::new(InMemory::::default(), shared.clone(), Some(h2b.clone())); + s.sync_cache(&[h1b.clone(), h2b.clone(), h3b.clone()], &[h1a.clone(), h2a.clone(), h3a.clone()], vec![], Some(h3b.clone()), Some(3), || true); + let s = CachingState::new(InMemory::::default(), shared.clone(), Some(h3a.clone())); + assert!(s.storage(&key).unwrap().is_none()); + } +} diff --git a/core/client/src/backend.rs b/core/client/src/backend.rs index 1f370f0e1b627..88e97c7ec400c 100644 --- a/core/client/src/backend.rs +++ b/core/client/src/backend.rs @@ -68,9 +68,11 @@ pub trait BlockImportOperation where /// has been used to check justification of this block). fn update_authorities(&mut self, authorities: Vec>); /// Inject storage data into the database. - fn update_storage(&mut self, update: >::Transaction) -> error::Result<()>; + fn update_db_storage(&mut self, update: >::Transaction) -> error::Result<()>; /// Inject storage data into the database replacing any existing data. fn reset_storage(&mut self, top: StorageMap, children: ChildrenStorageMap) -> error::Result; + /// Set top level storage changes. + fn update_storage(&mut self, update: Vec<(Vec, Option>)>) -> error::Result<()>; /// Inject changes trie data into the database. fn update_changes_trie(&mut self, update: MemoryDB) -> error::Result<()>; /// Update auxiliary keys. Values are `None` if should be deleted. @@ -127,6 +129,10 @@ pub trait Backend: AuxStore + Send + Sync where fn changes_trie_storage(&self) -> Option<&Self::ChangesTrieStorage>; /// Returns state backend with post-state of given block. fn state_at(&self, block: BlockId) -> error::Result; + /// Destroy state and save any useful data, such as cache. + fn destroy_state(&self, _state: Self::State) -> error::Result<()> { + Ok(()) + } /// Attempts to revert the chain by `n` blocks. Returns the number of blocks that were /// successfully reverted. fn revert(&self, n: NumberFor) -> error::Result>; diff --git a/core/client/src/call_executor.rs b/core/client/src/call_executor.rs index 8cbb40dfa2a3b..62104d4aa0e5a 100644 --- a/core/client/src/call_executor.rs +++ b/core/client/src/call_executor.rs @@ -24,22 +24,11 @@ use state_machine::{self, OverlayedChanges, Ext, use executor::{RuntimeVersion, RuntimeInfo, NativeVersion}; use hash_db::Hasher; use trie::MemoryDB; -use codec::Decode; use primitives::{H256, Blake2Hasher}; -use primitives::storage::well_known_keys; use backend; use error; -/// Information regarding the result of a call. -#[derive(Debug, Clone)] -pub struct CallResult { - /// The data that was returned from the call. - pub return_data: Vec, - /// The changes made to the state by the call. - pub changes: OverlayedChanges, -} - /// Method call executor. pub trait CallExecutor where @@ -58,7 +47,7 @@ where id: &BlockId, method: &str, call_data: &[u8], - ) -> Result; + ) -> Result, error::Error>; /// Execute a contextual call on top of state in a block of a given hash. /// @@ -163,16 +152,22 @@ where id: &BlockId, method: &str, call_data: &[u8], - ) -> error::Result { + ) -> error::Result> { let mut changes = OverlayedChanges::default(); - let (return_data, _, _) = self.call_at_state( - &self.backend.state_at(*id)?, + let state = self.backend.state_at(*id)?; + let return_data = state_machine::execute_using_consensus_failure_handler( + &state, + self.backend.changes_trie_storage(), &mut changes, + &self.executor, method, call_data, native_when_possible(), - )?; - Ok(CallResult { return_data, changes }) + false, + ) + .map(|(result, _, _)| result)?; + self.backend.destroy_state(state)?; + Ok(return_data) } fn contextual_call< @@ -192,28 +187,40 @@ where //TODO: Find a better way to prevent double block initialization if method != "Core_initialise_block" && initialised_block.map(|id| id != *at).unwrap_or(true) { let header = prepare_environment_block()?; - self.call_at_state(&state, changes, "Core_initialise_block", &header.encode(), manager.clone())?; + state_machine::execute_using_consensus_failure_handler( + &state, + self.backend.changes_trie_storage(), + changes, + &self.executor, + "Core_initialise_block", + &header.encode(), + manager.clone(), + false, + )?; *initialised_block = Some(*at); } - self.call_at_state(&state, changes, method, call_data, manager).map(|cr| cr.0) + let result = state_machine::execute_using_consensus_failure_handler( + &state, + self.backend.changes_trie_storage(), + changes, + &self.executor, + method, + call_data, + manager, + false, + ) + .map(|(result, _, _)| result)?; + + self.backend.destroy_state(state)?; + Ok(result) } fn runtime_version(&self, id: &BlockId) -> error::Result { let mut overlay = OverlayedChanges::default(); let state = self.backend.state_at(*id)?; - use state_machine::Backend; - let code = state.storage(well_known_keys::CODE) - .map_err(|e| error::ErrorKind::Execution(Box::new(e)))? - .ok_or(error::ErrorKind::VersionInvalid)? - .to_vec(); - let heap_pages = state.storage(well_known_keys::HEAP_PAGES) - .map_err(|e| error::ErrorKind::Execution(Box::new(e)))? - .and_then(|v| u64::decode(&mut &v[..])) - .unwrap_or(1024) as usize; - let mut ext = Ext::new(&mut overlay, &state, self.backend.changes_trie_storage()); - self.executor.runtime_version(&mut ext, heap_pages, &code) + self.executor.runtime_version(&mut ext) .ok_or(error::ErrorKind::VersionInvalid.into()) } diff --git a/core/client/src/client.rs b/core/client/src/client.rs index 79a112e616740..f02e25f78c07e 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -230,7 +230,6 @@ impl Client where let (genesis_storage, children_genesis_storage) = build_genesis_storage.build_storage()?; let mut op = backend.begin_operation(BlockId::Hash(Default::default()))?; let state_root = op.reset_storage(genesis_storage, children_genesis_storage)?; - let genesis_block = genesis::construct_genesis_block::(state_root.into()); info!("Initialising Genesis block/state (state: {}, header-hash: {})", genesis_block.header().state_root(), genesis_block.header().hash()); op.set_block_data( @@ -284,8 +283,8 @@ impl Client where match self.backend.blockchain().cache().and_then(|cache| cache.authorities_at(*id)) { Some(cached_value) => Ok(cached_value), None => self.executor.call(id, "Core_authorities", &[]) - .and_then(|r| Vec::>::decode(&mut &r.return_data[..]) - .ok_or(error::ErrorKind::InvalidAuthoritiesSet.into())) + .and_then(|r| Vec::>::decode(&mut &r[..]) + .ok_or_else(|| error::ErrorKind::InvalidAuthoritiesSet.into())) } } @@ -602,7 +601,7 @@ impl Client where ); let (_, storage_update, changes_update) = r?; overlay.commit_prospective(); - (Some(storage_update), Some(changes_update), Some(overlay.into_committed())) + (Some(storage_update), Some(changes_update), Some(overlay.into_committed().collect())) }, None => (None, None, None) }; @@ -633,7 +632,10 @@ impl Client where transaction.update_authorities(authorities); } if let Some(storage_update) = storage_update { - transaction.update_storage(storage_update)?; + transaction.update_db_storage(storage_update)?; + } + if let Some(storage_changes) = storage_changes.clone() { + transaction.update_storage(storage_changes)?; } if let Some(Some(changes_update)) = changes_update { transaction.update_changes_trie(changes_update)?; @@ -646,7 +648,7 @@ impl Client where if let Some(storage_changes) = storage_changes { // TODO [ToDr] How to handle re-orgs? Should we re-emit all storage changes? self.storage_notifications.lock() - .trigger(&hash, storage_changes); + .trigger(&hash, storage_changes.into_iter()); } if finalized { diff --git a/core/client/src/in_mem.rs b/core/client/src/in_mem.rs index 896f9821deffc..606f03d870d31 100644 --- a/core/client/src/in_mem.rs +++ b/core/client/src/in_mem.rs @@ -448,7 +448,7 @@ where self.pending_authorities = Some(authorities); } - fn update_storage(&mut self, update: as StateBackend>::Transaction) -> error::Result<()> { + fn update_db_storage(&mut self, update: as StateBackend>::Transaction) -> error::Result<()> { self.new_state = Some(self.old_state.update(update)); Ok(()) } @@ -491,6 +491,10 @@ where self.aux = Some(ops.into_iter().collect()); Ok(()) } + + fn update_storage(&mut self, _update: Vec<(Vec, Option>)>) -> error::Result<()> { + Ok(()) + } } /// In-memory backend. Keeps all states and blocks in memory. Useful for testing. diff --git a/core/client/src/lib.rs b/core/client/src/lib.rs index fb07c130b25ff..3bb65df9d184e 100644 --- a/core/client/src/lib.rs +++ b/core/client/src/lib.rs @@ -102,7 +102,7 @@ mod notifications; #[cfg(feature = "std")] pub use blockchain::Info as ChainInfo; #[cfg(feature = "std")] -pub use call_executor::{CallResult, CallExecutor, LocalCallExecutor}; +pub use call_executor::{CallExecutor, LocalCallExecutor}; #[cfg(feature = "std")] pub use client::{ new_with_backend, diff --git a/core/client/src/light/backend.rs b/core/client/src/light/backend.rs index 79a636bfbc183..6beaf1271bf4b 100644 --- a/core/client/src/light/backend.rs +++ b/core/client/src/light/backend.rs @@ -188,7 +188,7 @@ where self.authorities = Some(authorities); } - fn update_storage(&mut self, _update: >::Transaction) -> ClientResult<()> { + fn update_db_storage(&mut self, _update: >::Transaction) -> ClientResult<()> { // we're not storing anything locally => ignore changes Ok(()) } @@ -210,6 +210,11 @@ where self.aux_ops = ops.into_iter().collect(); Ok(()) } + + fn update_storage(&mut self, _update: Vec<(Vec, Option>)>) -> ClientResult<()> { + // we're not storing anything locally => ignore changes + Ok(()) + } } impl StateBackend for OnDemandState diff --git a/core/client/src/light/call_executor.rs b/core/client/src/light/call_executor.rs index d30e45921538d..84d7545d6f0f4 100644 --- a/core/client/src/light/call_executor.rs +++ b/core/client/src/light/call_executor.rs @@ -31,7 +31,7 @@ use state_machine::{self, Backend as StateBackend, CodeExecutor, OverlayedChange use hash_db::Hasher; use blockchain::Backend as ChainBackend; -use call_executor::{CallExecutor, CallResult}; +use call_executor::CallExecutor; use error::{Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult}; use light::fetcher::{Fetcher, RemoteCallRequest}; use executor::{RuntimeVersion, NativeVersion}; @@ -74,7 +74,7 @@ where { type Error = ClientError; - fn call(&self, id: &BlockId, method: &str, call_data: &[u8]) -> ClientResult { + fn call(&self, id: &BlockId, method: &str, call_data: &[u8]) -> ClientResult> { let block_hash = self.blockchain.expect_block_hash_from_id(id)?; let block_header = self.blockchain.expect_header(id.clone())?; @@ -105,12 +105,12 @@ where return Err(ClientErrorKind::NotAvailableOnLightClient.into()); } - self.call(at, method, call_data).map(|cr| cr.return_data) + self.call(at, method, call_data) } fn runtime_version(&self, id: &BlockId) -> ClientResult { let call_result = self.call(id, "version", &[])?; - RuntimeVersion::decode(&mut call_result.return_data.as_slice()) + RuntimeVersion::decode(&mut call_result.as_slice()) .ok_or_else(|| ClientErrorKind::VersionInvalid.into()) } @@ -189,7 +189,7 @@ pub fn check_execution_proof( executor: &E, request: &RemoteCallRequest
, remote_proof: Vec> -) -> ClientResult +) -> ClientResult> where Header: HeaderT, E: CodeExecutor, @@ -226,7 +226,7 @@ pub fn check_execution_proof( &request.call_data, )?; - Ok(CallResult { return_data: local_result, changes }) + Ok(local_result) } #[cfg(test)] @@ -273,7 +273,7 @@ mod tests { retry_count: None, }, remote_execution_proof).unwrap(); - (remote_result, local_result.return_data) + (remote_result, local_result) } // prepare remote client diff --git a/core/client/src/light/fetcher.rs b/core/client/src/light/fetcher.rs index 1171e15f49d6a..5f9aed2fe1632 100644 --- a/core/client/src/light/fetcher.rs +++ b/core/client/src/light/fetcher.rs @@ -28,7 +28,6 @@ use runtime_primitives::traits::{As, Block as BlockT, Header as HeaderT, NumberF use state_machine::{CodeExecutor, ChangesTrieRootsStorage, ChangesTrieAnchorBlockId, TrieBackend, read_proof_check, key_changes_proof_check, create_proof_check_backend_storage}; -use call_executor::CallResult; use cht; use error::{Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult}; use light::blockchain::{Blockchain, Storage as BlockchainStorage}; @@ -118,7 +117,7 @@ pub trait Fetcher: Send + Sync { /// Remote storage read future. type RemoteReadResult: IntoFuture>, Error=ClientError>; /// Remote call result future. - type RemoteCallResult: IntoFuture; + type RemoteCallResult: IntoFuture, Error=ClientError>; /// Remote changes result future. type RemoteChangesResult: IntoFuture, u32)>, Error=ClientError>; @@ -156,7 +155,7 @@ pub trait FetchChecker: Send + Sync { &self, request: &RemoteCallRequest, remote_proof: Vec> - ) -> ClientResult; + ) -> ClientResult>; /// Check remote changes query proof. fn check_changes_proof( &self, @@ -344,7 +343,7 @@ impl FetchChecker for LightDataChecker, remote_proof: Vec> - ) -> ClientResult { + ) -> ClientResult> { check_execution_proof::<_, _, H>(&self.executor, request, remote_proof) } @@ -392,7 +391,6 @@ pub mod tests { use futures::future::{ok, err, FutureResult}; use parking_lot::Mutex; use keyring::Keyring; - use call_executor::CallResult; use client::tests::prepare_client_with_key_changes; use executor::{self, NativeExecutionDispatch}; use error::Error as ClientError; @@ -410,12 +408,12 @@ pub mod tests { use state_machine::Backend; use super::*; - pub type OkCallFetcher = Mutex; + pub type OkCallFetcher = Mutex>; impl Fetcher for OkCallFetcher { type RemoteHeaderResult = FutureResult; type RemoteReadResult = FutureResult>, ClientError>; - type RemoteCallResult = FutureResult; + type RemoteCallResult = FutureResult, ClientError>; type RemoteChangesResult = FutureResult, u32)>, ClientError>; fn remote_header(&self, _request: RemoteHeaderRequest
) -> Self::RemoteHeaderResult { diff --git a/core/executor/src/lib.rs b/core/executor/src/lib.rs index 5f05b33f53852..ae2b383ae7ecd 100644 --- a/core/executor/src/lib.rs +++ b/core/executor/src/lib.rs @@ -44,9 +44,6 @@ extern crate parking_lot; #[macro_use] extern crate log; -#[macro_use] -extern crate lazy_static; - #[macro_use] extern crate error_chain; @@ -84,7 +81,5 @@ pub trait RuntimeInfo { fn runtime_version> ( &self, ext: &mut E, - heap_pages: usize, - code: &[u8] ) -> Option; } diff --git a/core/executor/src/native_executor.rs b/core/executor/src/native_executor.rs index 15cc8abac3edc..09606bc904f7b 100644 --- a/core/executor/src/native_executor.rs +++ b/core/executor/src/native_executor.rs @@ -14,38 +14,34 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use std::borrow::BorrowMut; +use std::cell::{RefMut, RefCell}; use error::{Error, ErrorKind, Result}; use state_machine::{CodeExecutor, Externalities}; use wasm_executor::WasmExecutor; -use wasmi::Module as WasmModule; +use wasmi::{Module as WasmModule, ModuleRef as WasmModuleInstanceRef}; use runtime_version::{NativeVersion, RuntimeVersion}; use std::collections::HashMap; use codec::Decode; -use primitives::hashing::blake2_256; -use parking_lot::{Mutex, MutexGuard}; use RuntimeInfo; use primitives::Blake2Hasher; +use primitives::storage::well_known_keys; + +/// Default num of pages for the heap +const DEFAULT_HEAP_PAGES :u64 = 1024; // For the internal Runtime Cache: // Is it compatible enough to run this natively or do we need to fall back on the WasmModule enum RuntimePreproc { InvalidCode, - ValidCode(WasmModule, Option), + ValidCode(WasmModuleInstanceRef, Option), } type CacheType = HashMap<[u8; 32], RuntimePreproc>; -lazy_static! { - static ref RUNTIMES_CACHE: Mutex = Mutex::new(HashMap::new()); -} - -// helper function to generate low-over-head caching_keys -// it is asserted that part of the audit process that any potential on-chain code change -// will have done is to ensure that the two-x hash is different to that of any other -// :code value from the same chain -fn gen_cache_key(code: &[u8]) -> [u8; 32] { - blake2_256(code) +thread_local! { + static RUNTIMES_CACHE: RefCell = RefCell::new(HashMap::new()); } /// fetch a runtime version from the cache or if there is no cached version yet, create @@ -53,27 +49,48 @@ fn gen_cache_key(code: &[u8]) -> [u8; 32] { /// can be used by comparing returned RuntimeVersion to `ref_version` fn fetch_cached_runtime_version<'a, E: Externalities>( wasm_executor: &WasmExecutor, - cache: &'a mut MutexGuard, + cache: &'a mut RefMut, ext: &mut E, - heap_pages: usize, - code: &[u8] -) -> Result<(&'a WasmModule, &'a Option)> { - let maybe_runtime_preproc = cache.entry(gen_cache_key(code)) - .or_insert_with(|| match WasmModule::from_buffer(code) { - Ok(module) => { - let version = wasm_executor.call_in_wasm_module(ext, heap_pages, &module, "Core_version", &[]) - .ok() - .and_then(|v| RuntimeVersion::decode(&mut v.as_slice())); - RuntimePreproc::ValidCode(module, version) - } - Err(e) => { - trace!(target: "executor", "Invalid code presented to executor ({:?})", e); - RuntimePreproc::InvalidCode +) -> Result<(&'a WasmModuleInstanceRef, &'a Option)> { + + let code_hash = match ext.storage_hash(well_known_keys::CODE) { + Some(code_hash) => code_hash, + None => return Err(ErrorKind::InvalidCode(vec![]).into()), + }; + let maybe_runtime_preproc = cache.borrow_mut().entry(code_hash.into()) + .or_insert_with(|| { + let code = match ext.storage(well_known_keys::CODE) { + Some(code) => code, + None => return RuntimePreproc::InvalidCode, + }; + let heap_pages = match ext.storage(well_known_keys::HEAP_PAGES) { + Some(pages) => u64::decode(&mut &pages[..]).unwrap_or(DEFAULT_HEAP_PAGES), + None => DEFAULT_HEAP_PAGES, + }; + match WasmModule::from_buffer(code) + .map_err(|_| ErrorKind::InvalidCode(vec![]).into()) + .and_then(|module| wasm_executor.prepare_module(ext, heap_pages as usize, &module)) + { + Ok(module) => { + let version = wasm_executor.call_in_wasm_module(ext, &module, "Core_version", &[]) + .ok() + .and_then(|v| RuntimeVersion::decode(&mut v.as_slice())); + RuntimePreproc::ValidCode(module, version) + } + Err(e) => { + trace!(target: "executor", "Invalid code presented to executor ({:?})", e); + RuntimePreproc::InvalidCode + } } }); match maybe_runtime_preproc { - RuntimePreproc::InvalidCode => Err(ErrorKind::InvalidCode(code.into()).into()), - RuntimePreproc::ValidCode(m, v) => Ok((m, v)), + RuntimePreproc::InvalidCode => { + let code = ext.storage(well_known_keys::CODE).unwrap_or(vec![]); + Err(ErrorKind::InvalidCode(code).into()) + }, + RuntimePreproc::ValidCode(m, v) => { + Ok((m, v)) + } } } @@ -154,10 +171,10 @@ impl RuntimeInfo for NativeExecutor { fn runtime_version>( &self, ext: &mut E, - heap_pages: usize, - code: &[u8], ) -> Option { - fetch_cached_runtime_version(&self.fallback, &mut RUNTIMES_CACHE.lock(), ext, heap_pages, code).ok()?.1.clone() + RUNTIMES_CACHE.with(|c| + fetch_cached_runtime_version(&self.fallback, &mut c.borrow_mut(), ext).ok()?.1.clone() + ) } } @@ -167,30 +184,30 @@ impl CodeExecutor for NativeExecutor>( &self, ext: &mut E, - heap_pages: usize, - code: &[u8], method: &str, data: &[u8], use_native: bool, ) -> (Result>, bool) { - let mut c = RUNTIMES_CACHE.lock(); - let (module, onchain_version) = match fetch_cached_runtime_version(&self.fallback, &mut c, ext, heap_pages, code) { - Ok((module, onchain_version)) => (module, onchain_version), - Err(_) => return (Err(ErrorKind::InvalidCode(code.into()).into()), false), - }; - match (use_native, onchain_version.as_ref().map_or(false, |v| v.can_call_with(&self.native_version.runtime_version))) { - (_, false) => { - trace!(target: "executor", "Request for native execution failed (native: {}, chain: {})", self.native_version.runtime_version, onchain_version.as_ref().map_or_else(||"".into(), |v| format!("{}", v))); - (self.fallback.call_in_wasm_module(ext, heap_pages, module, method, data), false) - } - (false, _) => { - (self.fallback.call_in_wasm_module(ext, heap_pages, module, method, data), false) + RUNTIMES_CACHE.with(|c| { + let mut c = c.borrow_mut(); + let (module, onchain_version) = match fetch_cached_runtime_version(&self.fallback, &mut c, ext) { + Ok((module, onchain_version)) => (module, onchain_version), + Err(e) => return (Err(e), false), + }; + match (use_native, onchain_version.as_ref().map_or(false, |v| v.can_call_with(&self.native_version.runtime_version))) { + (_, false) => { + trace!(target: "executor", "Request for native execution failed (native: {}, chain: {})", self.native_version.runtime_version, onchain_version.as_ref().map_or_else(||"".into(), |v| format!("{}", v))); + (self.fallback.call_in_wasm_module(ext, module, method, data), false) + } + (false, _) => { + (self.fallback.call_in_wasm_module(ext, module, method, data), false) + } + _ => { + trace!(target: "executor", "Request for native execution succeeded (native: {}, chain: {})", self.native_version.runtime_version, onchain_version.as_ref().map_or_else(||"".into(), |v| format!("{}", v))); + (D::dispatch(ext, method, data), true) + } } - _ => { - trace!(target: "executor", "Request for native execution succeeded (native: {}, chain: {})", self.native_version.runtime_version, onchain_version.as_ref().map_or_else(||"".into(), |v| format!("{}", v))); - (D::dispatch(ext, method, data), true) - } - } + }) } } diff --git a/core/executor/src/wasm_executor.rs b/core/executor/src/wasm_executor.rs index 40357946f023d..60e2c84b1eb5a 100644 --- a/core/executor/src/wasm_executor.rs +++ b/core/executor/src/wasm_executor.rs @@ -19,10 +19,10 @@ use std::collections::HashMap; use wasmi::{ - Module, ModuleInstance, MemoryInstance, MemoryRef, TableRef, ImportsBuilder + Module, ModuleInstance, MemoryInstance, MemoryRef, TableRef, ImportsBuilder, ModuleRef, }; use wasmi::RuntimeValue::{I32, I64}; -use wasmi::memory_units::{Pages, Bytes}; +use wasmi::memory_units::Pages; use state_machine::Externalities; use error::{Error, ErrorKind, Result}; use wasm_utils::UserError; @@ -39,19 +39,17 @@ struct Heap { } impl Heap { - /// Construct new `Heap` struct with a given number of pages. + /// Construct new `Heap` struct. /// /// Returns `Err` if the heap couldn't allocate required /// number of pages. /// /// This could mean that wasm binary specifies memory /// limit and we are trying to allocate beyond that limit. - fn new(memory: &MemoryRef, pages: usize) -> Result { - let prev_page_count = memory.initial(); - memory.grow(Pages(pages)).map_err(|_| Error::from(ErrorKind::Runtime))?; - Ok(Heap { - end: Bytes::from(prev_page_count).0 as u32, - }) + fn new(memory: &MemoryRef) -> Self { + Heap { + end: memory.used_size().0 as u32, + } } fn allocate(&mut self, size: u32) -> u32 { @@ -83,10 +81,10 @@ struct FunctionExecutor<'e, E: Externalities + 'e> { } impl<'e, E: Externalities> FunctionExecutor<'e, E> { - fn new(m: MemoryRef, heap_pages: usize, t: Option, e: &'e mut E) -> Result { + fn new(m: MemoryRef, t: Option, e: &'e mut E) -> Result { Ok(FunctionExecutor { sandbox_store: sandbox::Store::new(), - heap: Heap::new(&m, heap_pages)?, + heap: Heap::new(&m), memory: m, table: t, ext: e, @@ -638,51 +636,43 @@ impl WasmExecutor { method: &str, data: &[u8], ) -> Result> { - let module = ::wasmi::Module::from_buffer(code).expect("all modules compiled with rustc are valid wasm code; qed"); - self.call_in_wasm_module(ext, heap_pages, &module, method, data) + let module = ::wasmi::Module::from_buffer(code)?; + let module = self.prepare_module(ext, heap_pages, &module)?; + self.call_in_wasm_module(ext, &module, method, data) + } + + fn get_mem_instance(module: &ModuleRef) -> Result { + Ok(module + .export_by_name("memory") + .ok_or_else(|| Error::from(ErrorKind::InvalidMemoryReference))? + .as_memory() + .ok_or_else(|| Error::from(ErrorKind::InvalidMemoryReference))? + .clone()) } /// Call a given method in the given wasm-module runtime. pub fn call_in_wasm_module>( &self, ext: &mut E, - heap_pages: usize, - module: &Module, + module_instance: &ModuleRef, method: &str, data: &[u8], ) -> Result> { - // start module instantiation. Don't run 'start' function yet. - let intermediate_instance = ModuleInstance::new( - module, - &ImportsBuilder::new() - .with_resolver("env", FunctionExecutor::::resolver()) - )?; - // extract a reference to a linear memory, optional reference to a table // and then initialize FunctionExecutor. - let memory = intermediate_instance - .not_started_instance() - .export_by_name("memory") - // TODO: with code coming from the blockchain it isn't strictly been compiled with rustc anymore. - // these assumptions are probably not true anymore - .expect("all modules compiled with rustc should have an export named 'memory'; qed") - .as_memory() - .expect("in module generated by rustc export named 'memory' should be a memory; qed") - .clone(); - let table: Option = intermediate_instance - .not_started_instance() + let memory = Self::get_mem_instance(module_instance)?; + let table: Option = module_instance .export_by_name("__indirect_function_table") .and_then(|e| e.as_table().cloned()); - let mut fec = FunctionExecutor::new(memory.clone(), heap_pages, table, ext)?; - - // finish instantiation by running 'start' function (if any). - let instance = intermediate_instance.run_start(&mut fec)?; + let low = memory.lowest_used(); + let used_mem = memory.used_size(); + let mut fec = FunctionExecutor::new(memory.clone(), table, ext)?; let size = data.len() as u32; let offset = fec.heap.allocate(size); memory.set(offset, &data)?; - let result = instance.invoke_export( + let result = module_instance.invoke_export( method, &[ I32(offset as i32), @@ -690,22 +680,57 @@ impl WasmExecutor { ], &mut fec ); - let returned = match result { - Ok(x) => x, + let result = match result { + Ok(Some(I64(r))) => { + let offset = r as u32; + let length = (r >> 32) as u32 as usize; + memory.get(offset, length) + .map_err(|_| ErrorKind::Runtime.into()) + }, + Ok(_) => Err(ErrorKind::InvalidReturn.into()), Err(e) => { - trace!(target: "wasm-executor", "Failed to execute code with {} pages", heap_pages); - return Err(e.into()) + trace!(target: "wasm-executor", "Failed to execute code with {} pages", memory.current_size().0); + Err(e.into()) }, }; - if let Some(I64(r)) = returned { - let offset = r as u32; - let length = (r >> 32) as u32 as usize; - memory.get(offset, length) - .map_err(|_| ErrorKind::Runtime.into()) - } else { - Err(ErrorKind::InvalidReturn.into()) + // cleanup module instance for next use + let new_low = memory.lowest_used(); + if new_low < low { + memory.zero(new_low as usize, (low - new_low) as usize)?; + memory.reset_lowest_used(low); } + memory.with_direct_access_mut(|buf| buf.resize(used_mem.0, 0)); + result + } + + /// Prepare module instance + pub fn prepare_module>( + &self, + ext: &mut E, + heap_pages: usize, + module: &Module, + ) -> Result + { + // start module instantiation. Don't run 'start' function yet. + let intermediate_instance = ModuleInstance::new( + module, + &ImportsBuilder::new() + .with_resolver("env", FunctionExecutor::::resolver()) + )?; + + // extract a reference to a linear memory, optional reference to a table + // and then initialize FunctionExecutor. + let memory = Self::get_mem_instance(intermediate_instance.not_started_instance())?; + memory.grow(Pages(heap_pages)).map_err(|_| Error::from(ErrorKind::Runtime))?; + let table: Option = intermediate_instance + .not_started_instance() + .export_by_name("__indirect_function_table") + .and_then(|e| e.as_table().cloned()); + let mut fec = FunctionExecutor::new(memory.clone(), table, ext)?; + + // finish instantiation by running 'start' function (if any). + Ok(intermediate_instance.run_start(&mut fec)?) } } diff --git a/core/network/src/on_demand.rs b/core/network/src/on_demand.rs index 0c5140f2fe06d..db56eaf592779 100644 --- a/core/network/src/on_demand.rs +++ b/core/network/src/on_demand.rs @@ -24,7 +24,7 @@ use futures::sync::oneshot::{channel, Receiver, Sender}; use linked_hash_map::LinkedHashMap; use linked_hash_map::Entry; use parking_lot::Mutex; -use client::{self, error::{Error as ClientError, ErrorKind as ClientErrorKind}}; +use client::{error::{Error as ClientError, ErrorKind as ClientErrorKind}}; use client::light::fetcher::{Fetcher, FetchChecker, RemoteHeaderRequest, RemoteCallRequest, RemoteReadRequest, RemoteChangesRequest, ChangesProof}; use io::SyncIo; @@ -107,7 +107,7 @@ struct Request { enum RequestData { RemoteHeader(RemoteHeaderRequest, Sender>), RemoteRead(RemoteReadRequest, Sender>, ClientError>>), - RemoteCall(RemoteCallRequest, Sender>), + RemoteCall(RemoteCallRequest, Sender, ClientError>>), RemoteChanges(RemoteChangesRequest, Sender, u32)>, ClientError>>), } @@ -312,7 +312,7 @@ impl Fetcher for OnDemand where { type RemoteHeaderResult = RemoteResponse; type RemoteReadResult = RemoteResponse>>; - type RemoteCallResult = RemoteResponse; + type RemoteCallResult = RemoteResponse>; type RemoteChangesResult = RemoteResponse, u32)>>; fn remote_header(&self, request: RemoteHeaderRequest) -> Self::RemoteHeaderResult { @@ -529,7 +529,7 @@ pub mod tests { use futures::Future; use parking_lot::RwLock; use runtime_primitives::traits::NumberFor; - use client::{self, error::{ErrorKind as ClientErrorKind, Result as ClientResult}}; + use client::{error::{ErrorKind as ClientErrorKind, Result as ClientResult}}; use client::light::fetcher::{Fetcher, FetchChecker, RemoteHeaderRequest, RemoteCallRequest, RemoteReadRequest, RemoteChangesRequest, ChangesProof}; use config::Roles; @@ -567,12 +567,9 @@ pub mod tests { } } - fn check_execution_proof(&self, _: &RemoteCallRequest
, _: Vec>) -> ClientResult { + fn check_execution_proof(&self, _: &RemoteCallRequest
, _: Vec>) -> ClientResult> { match self.ok { - true => Ok(client::CallResult { - return_data: vec![42], - changes: Default::default(), - }), + true => Ok(vec![42]), false => Err(ClientErrorKind::Backend("Test error".into()).into()), } } @@ -796,7 +793,7 @@ pub mod tests { }); let thread = ::std::thread::spawn(move || { let result = response.wait().unwrap(); - assert_eq!(result.return_data, vec![42]); + assert_eq!(result, vec![42]); }); receive_call_response(&*on_demand, &mut network, 0, 0); diff --git a/core/rpc/src/lib.rs b/core/rpc/src/lib.rs index e96078ff6e33c..2e7d4b1fa8a88 100644 --- a/core/rpc/src/lib.rs +++ b/core/rpc/src/lib.rs @@ -37,7 +37,6 @@ extern crate error_chain; extern crate jsonrpc_macros; #[macro_use] extern crate log; -#[macro_use] extern crate serde_derive; #[cfg(test)] diff --git a/core/rpc/src/state/mod.rs b/core/rpc/src/state/mod.rs index d044962f0a09e..19f4046aa813c 100644 --- a/core/rpc/src/state/mod.rs +++ b/core/rpc/src/state/mod.rs @@ -144,8 +144,7 @@ impl StateApi for State where .call( &BlockId::Hash(block), &method, &data.0 - )? - .return_data; + )?; Ok(Bytes(return_data)) } diff --git a/core/state-machine/src/backend.rs b/core/state-machine/src/backend.rs index f9949deae9db1..ea1d19859ea42 100644 --- a/core/state-machine/src/backend.rs +++ b/core/state-machine/src/backend.rs @@ -40,10 +40,15 @@ pub trait Backend { /// Type of trie backend storage. type TrieBackendStorage: TrieBackendStorage; - /// Get keyed storage associated with specific address, or None if there is nothing associated. + /// Get keyed storage or None if there is nothing associated. fn storage(&self, key: &[u8]) -> Result>, Self::Error>; - /// Get keyed child storage associated with specific address, or None if there is nothing associated. + /// Get keyed storage value hash or None if there is nothing associated. + fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { + self.storage(key).map(|v| v.map(|v| H::hash(&v))) + } + + /// Get keyed child storage or None if there is nothing associated. fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error>; /// true if a key exists in storage. diff --git a/core/state-machine/src/ext.rs b/core/state-machine/src/ext.rs index 1bcb3fb732b1f..6274eef720593 100644 --- a/core/state-machine/src/ext.rs +++ b/core/state-machine/src/ext.rs @@ -184,6 +184,11 @@ where self.backend.storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL)) } + fn storage_hash(&self, key: &[u8]) -> Option { + self.overlay.storage(key).map(|x| x.map(|x| H::hash(x))).unwrap_or_else(|| + self.backend.storage_hash(key).expect(EXT_NOT_ALLOWED_TO_FAIL)) + } + fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option> { self.overlay.child_storage(storage_key, key).map(|x| x.map(|x| x.to_vec())).unwrap_or_else(|| self.backend.child_storage(storage_key, key).expect(EXT_NOT_ALLOWED_TO_FAIL)) diff --git a/core/state-machine/src/lib.rs b/core/state-machine/src/lib.rs index 5f9757bd55cd6..bc0189d398716 100644 --- a/core/state-machine/src/lib.rs +++ b/core/state-machine/src/lib.rs @@ -65,9 +65,6 @@ pub use proving_backend::{create_proof_check_backend, create_proof_check_backend pub use trie_backend_essence::{TrieBackendStorage, Storage}; pub use trie_backend::TrieBackend; -/// Default num of pages for the heap -const DEFAULT_HEAP_PAGES :u64 = 1024; - /// State Machine Error bound. /// /// This should reflect WASM error type bound for future compatibility. @@ -98,10 +95,15 @@ impl fmt::Display for ExecutionError { /// Externalities: pinned to specific active address. pub trait Externalities { - /// Read storage of current contract being called. + /// Read runtime storage. fn storage(&self, key: &[u8]) -> Option>; - /// Read child storage of current contract being called. + /// Get storage value hash. This may be optimized for large values. + fn storage_hash(&self, key: &[u8]) -> Option { + self.storage(key).map(|v| H::hash(&v)) + } + + /// Read child runtime storage. fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option>; /// Set storage entry `key` of current contract being called (effective immediately). @@ -171,8 +173,6 @@ pub trait CodeExecutor: Sized + Send + Sync { fn call>( &self, ext: &mut E, - heap_pages: usize, - code: &[u8], method: &str, data: &[u8], use_native: bool @@ -297,14 +297,6 @@ where { let strategy: ExecutionStrategy = (&manager).into(); - // make a copy. - let code = try_read_overlay_value(overlay, backend, well_known_keys::CODE)? - .ok_or_else(|| Box::new(ExecutionError::CodeEntryDoesNotExist) as Box)? - .to_vec(); - - let heap_pages = try_read_overlay_value(overlay, backend, well_known_keys::HEAP_PAGES)? - .and_then(|v| u64::decode(&mut &v[..])).unwrap_or(DEFAULT_HEAP_PAGES) as usize; - // read changes trie configuration. The reason why we're doing it here instead of the // `OverlayedChanges` constructor is that we need proofs for this read as a part of // proof-of-execution on light clients. And the proof is recorded by the backend which @@ -328,8 +320,6 @@ where let mut externalities = ext::Ext::new(overlay, backend, changes_trie_storage); let retval = exec.call( &mut externalities, - heap_pages, - &code, method, call_data, // attempt to run native first, if we're not directed to run wasm only @@ -357,8 +347,6 @@ where let mut externalities = ext::Ext::new(overlay, backend, changes_trie_storage); let retval = exec.call( &mut externalities, - heap_pages, - &code, method, call_data, false, @@ -614,8 +602,6 @@ mod tests { fn call>( &self, ext: &mut E, - _heap_pages: usize, - _code: &[u8], _method: &str, _data: &[u8], use_native: bool diff --git a/core/state-machine/src/overlayed_changes.rs b/core/state-machine/src/overlayed_changes.rs index 29ff7262da2e8..3a208ad3e2c01 100644 --- a/core/state-machine/src/overlayed_changes.rs +++ b/core/state-machine/src/overlayed_changes.rs @@ -44,7 +44,7 @@ pub struct OverlayedValue { /// Current value. None if value has been deleted. pub value: Option>, /// The set of extinsic indices where the values has been changed. - /// Is filled only if runtime ahs announced changes trie support. + /// Is filled only if runtime has announced changes trie support. pub extrinsics: Option>, } diff --git a/core/state-machine/src/testing.rs b/core/state-machine/src/testing.rs index 876a190a966a9..b9f14f18e5a0f 100644 --- a/core/state-machine/src/testing.rs +++ b/core/state-machine/src/testing.rs @@ -23,7 +23,8 @@ use heapsize::HeapSizeOf; use trie::trie_root; use backend::InMemory; use changes_trie::{compute_changes_trie_root, InMemoryStorage as ChangesTrieInMemoryStorage, AnchorBlockId}; -use primitives::storage::well_known_keys::CHANGES_TRIE_CONFIG; +use primitives::storage::well_known_keys::{CHANGES_TRIE_CONFIG, CODE, HEAP_PAGES}; +use codec::Encode; use super::{Externalities, OverlayedChanges}; /// Simple HashMap-based Externalities impl. @@ -31,11 +32,17 @@ pub struct TestExternalities where H::Out: HeapSizeOf { inner: HashMap, Vec>, changes_trie_storage: ChangesTrieInMemoryStorage, changes: OverlayedChanges, + code: Vec, } impl TestExternalities where H::Out: HeapSizeOf { /// Create a new instance of `TestExternalities` pub fn new(inner: HashMap, Vec>) -> Self { + Self::new_with_code(&[], inner) + } + + /// Create a new instance of `TestExternalities` + pub fn new_with_code(code: &[u8], inner: HashMap, Vec>) -> Self { let mut overlay = OverlayedChanges::default(); super::set_changes_trie_config( &mut overlay, @@ -47,6 +54,7 @@ impl TestExternalities where H::Out: HeapSizeOf { inner, changes_trie_storage: ChangesTrieInMemoryStorage::new(), changes: overlay, + code: code.to_vec(), } } @@ -94,13 +102,18 @@ impl From< HashMap, Vec> > for TestExternalities where inner: hashmap, changes_trie_storage: ChangesTrieInMemoryStorage::new(), changes: Default::default(), + code: Default::default(), } } } impl Externalities for TestExternalities where H::Out: Ord + HeapSizeOf { fn storage(&self, key: &[u8]) -> Option> { - self.inner.get(key).map(|x| x.to_vec()) + match key { + CODE => Some(self.code.clone()), + HEAP_PAGES => Some(8u64.encode()), + _ => self.inner.get(key).map(|x| x.to_vec()), + } } fn child_storage(&self, _storage_key: &[u8], _key: &[u8]) -> Option> { diff --git a/core/test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm b/core/test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm index 81ebc41cba2fd..56ba7e35f138a 100644 Binary files a/core/test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm and b/core/test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm differ diff --git a/node/executor/src/lib.rs b/node/executor/src/lib.rs index 8ab8a6ceab5ed..626deb0a7e3ee 100644 --- a/node/executor/src/lib.rs +++ b/node/executor/src/lib.rs @@ -122,7 +122,7 @@ mod tests { #[test] fn panic_execution_with_foreign_code_gives_error() { - let mut t = TestExternalities::::new(map![ + let mut t = TestExternalities::::new_with_code(BLOATY_CODE, map![ twox_128(&>::key_for(alice())).to_vec() => vec![69u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], twox_128(>::key()).to_vec() => vec![69u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], twox_128(>::key()).to_vec() => vec![70u8; 16], @@ -134,16 +134,16 @@ mod tests { twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32] ]); - let r = executor().call(&mut t, 8, BLOATY_CODE, "Core_initialise_block", &vec![].and(&from_block_number(1u64)), true).0; + let r = executor().call(&mut t, "Core_initialise_block", &vec![].and(&from_block_number(1u64)), true).0; assert!(r.is_ok()); - let v = executor().call(&mut t, 8, BLOATY_CODE, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true).0.unwrap(); + let v = executor().call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true).0.unwrap(); let r = ApplyResult::decode(&mut &v[..]).unwrap(); assert_eq!(r, Err(ApplyError::CantPay)); } #[test] fn bad_extrinsic_with_native_equivalent_code_gives_error() { - let mut t = TestExternalities::::new(map![ + let mut t = TestExternalities::::new_with_code(COMPACT_CODE, map![ twox_128(&>::key_for(alice())).to_vec() => vec![69u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], twox_128(>::key()).to_vec() => vec![69u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], twox_128(>::key()).to_vec() => vec![70u8; 16], @@ -155,16 +155,16 @@ mod tests { twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32] ]); - let r = executor().call(&mut t, 8, COMPACT_CODE, "Core_initialise_block", &vec![].and(&from_block_number(1u64)), true).0; + let r = executor().call(&mut t, "Core_initialise_block", &vec![].and(&from_block_number(1u64)), true).0; assert!(r.is_ok()); - let v = executor().call(&mut t, 8, COMPACT_CODE, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true).0.unwrap(); + let v = executor().call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true).0.unwrap(); let r = ApplyResult::decode(&mut &v[..]).unwrap(); assert_eq!(r, Err(ApplyError::CantPay)); } #[test] fn successful_execution_with_native_equivalent_code_gives_ok() { - let mut t = TestExternalities::::new(map![ + let mut t = TestExternalities::::new_with_code(COMPACT_CODE, map![ twox_128(&>::key_for(alice())).to_vec() => vec![111u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], twox_128(>::key()).to_vec() => vec![111u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], twox_128(>::key()).to_vec() => vec![0u8; 16], @@ -176,9 +176,9 @@ mod tests { twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32] ]); - let r = executor().call(&mut t, 8, COMPACT_CODE, "Core_initialise_block", &vec![].and(&from_block_number(1u64)), true).0; + let r = executor().call(&mut t, "Core_initialise_block", &vec![].and(&from_block_number(1u64)), true).0; assert!(r.is_ok()); - let r = executor().call(&mut t, 8, COMPACT_CODE, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true).0; + let r = executor().call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true).0; assert!(r.is_ok()); runtime_io::with_externalities(&mut t, || { @@ -189,7 +189,7 @@ mod tests { #[test] fn successful_execution_with_foreign_code_gives_ok() { - let mut t = TestExternalities::::new(map![ + let mut t = TestExternalities::::new_with_code(BLOATY_CODE, map![ twox_128(&>::key_for(alice())).to_vec() => vec![111u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], twox_128(>::key()).to_vec() => vec![111u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], twox_128(>::key()).to_vec() => vec![0u8; 16], @@ -201,9 +201,9 @@ mod tests { twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32] ]); - let r = executor().call(&mut t, 8, BLOATY_CODE, "Core_initialise_block", &vec![].and(&from_block_number(1u64)), true).0; + let r = executor().call(&mut t, "Core_initialise_block", &vec![].and(&from_block_number(1u64)), true).0; assert!(r.is_ok()); - let r = executor().call(&mut t, 8, BLOATY_CODE, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true).0; + let r = executor().call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true).0; assert!(r.is_ok()); runtime_io::with_externalities(&mut t, || { @@ -212,10 +212,10 @@ mod tests { }); } - fn new_test_ext(support_changes_trie: bool) -> TestExternalities { + fn new_test_ext(code: &[u8], support_changes_trie: bool) -> TestExternalities { use keyring::Keyring::*; let three = [3u8; 32].into(); - TestExternalities::new(GenesisConfig { + TestExternalities::new_with_code(code, GenesisConfig { consensus: Some(Default::default()), system: Some(SystemConfig { changes_trie_config: if support_changes_trie { Some(ChangesTrieConfiguration { @@ -387,9 +387,9 @@ mod tests { #[test] fn full_native_block_import_works() { - let mut t = new_test_ext(false); + let mut t = new_test_ext(COMPACT_CODE, false); - executor().call(&mut t, 8, COMPACT_CODE, "Core_execute_block", &block1(false).0, true).0.unwrap(); + executor().call(&mut t, "Core_execute_block", &block1(false).0, true).0.unwrap(); runtime_io::with_externalities(&mut t, || { assert_eq!(Balances::total_balance(&alice()), 41); @@ -431,7 +431,7 @@ mod tests { ]); }); - executor().call(&mut t, 8, COMPACT_CODE, "Core_execute_block", &block2().0, true).0.unwrap(); + executor().call(&mut t, "Core_execute_block", &block2().0, true).0.unwrap(); runtime_io::with_externalities(&mut t, || { assert_eq!(Balances::total_balance(&alice()), 30); @@ -505,7 +505,7 @@ mod tests { #[test] fn full_wasm_block_import_works() { - let mut t = new_test_ext(false); + let mut t = new_test_ext(COMPACT_CODE, false); WasmExecutor::new().call(&mut t, 8, COMPACT_CODE, "Core_execute_block", &block1(false).0).unwrap(); @@ -646,7 +646,7 @@ mod tests { #[test] fn deploying_wasm_contract_should_work() { - let mut t = new_test_ext(false); + let mut t = new_test_ext(COMPACT_CODE, false); let code_transfer = wabt::wat2wasm(CODE_TRANSFER).unwrap(); let code_ctor_transfer = wabt::wat2wasm(&code_ctor(&code_transfer)).unwrap(); @@ -682,7 +682,7 @@ mod tests { ] ); - WasmExecutor::new().call(&mut t, 8, COMPACT_CODE, "Core_execute_block", &b.0).unwrap(); + WasmExecutor::new().call(&mut t, 8, COMPACT_CODE,"Core_execute_block", &b.0).unwrap(); runtime_io::with_externalities(&mut t, || { // Verify that the contract constructor worked well and code of TRANSFER contract is actually deployed. @@ -692,7 +692,7 @@ mod tests { #[test] fn wasm_big_block_import_fails() { - let mut t = new_test_ext(false); + let mut t = new_test_ext(COMPACT_CODE, false); assert!( WasmExecutor::new().call( @@ -707,12 +707,10 @@ mod tests { #[test] fn native_big_block_import_succeeds() { - let mut t = new_test_ext(false); + let mut t = new_test_ext(COMPACT_CODE, false); Executor::new().call( &mut t, - 8, - COMPACT_CODE, "Core_execute_block", &block1big().0, true @@ -721,13 +719,11 @@ mod tests { #[test] fn native_big_block_import_fails_on_fallback() { - let mut t = new_test_ext(false); + let mut t = new_test_ext(COMPACT_CODE, false); assert!( Executor::new().call( &mut t, - 8, - COMPACT_CODE, "Core_execute_block", &block1big().0, false @@ -737,7 +733,8 @@ mod tests { #[test] fn panic_execution_gives_error() { - let mut t = TestExternalities::::new(map![ + let foreign_code = include_bytes!("../../runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.wasm"); + let mut t = TestExternalities::::new_with_code(foreign_code, map![ twox_128(&>::key_for(alice())).to_vec() => vec![69u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], twox_128(>::key()).to_vec() => vec![69u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], twox_128(>::key()).to_vec() => vec![70u8; 16], @@ -749,17 +746,17 @@ mod tests { twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32] ]); - let foreign_code = include_bytes!("../../runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.wasm"); - let r = WasmExecutor::new().call(&mut t, 8, &foreign_code[..], "Core_initialise_block", &vec![].and(&from_block_number(1u64))); + let r = WasmExecutor::new().call(&mut t, 8, COMPACT_CODE, "Core_initialise_block", &vec![].and(&from_block_number(1u64))); assert!(r.is_ok()); - let r = WasmExecutor::new().call(&mut t, 8, &foreign_code[..], "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).unwrap(); + let r = WasmExecutor::new().call(&mut t, 8, COMPACT_CODE, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).unwrap(); let r = ApplyResult::decode(&mut &r[..]).unwrap(); assert_eq!(r, Err(ApplyError::CantPay)); } #[test] fn successful_execution_gives_ok() { - let mut t = TestExternalities::::new(map![ + let foreign_code = include_bytes!("../../runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm"); + let mut t = TestExternalities::::new_with_code(foreign_code, map![ twox_128(&>::key_for(alice())).to_vec() => vec![111u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], twox_128(>::key()).to_vec() => vec![111u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], twox_128(>::key()).to_vec() => vec![0u8; 16], @@ -771,10 +768,9 @@ mod tests { twox_128(&>::key_for(0)).to_vec() => vec![0u8; 32] ]); - let foreign_code = include_bytes!("../../runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm"); - let r = WasmExecutor::new().call(&mut t, 8, &foreign_code[..], "Core_initialise_block", &vec![].and(&from_block_number(1u64))); + let r = WasmExecutor::new().call(&mut t, 8, COMPACT_CODE, "Core_initialise_block", &vec![].and(&from_block_number(1u64))); assert!(r.is_ok()); - let r = WasmExecutor::new().call(&mut t, 8, &foreign_code[..], "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).unwrap(); + let r = WasmExecutor::new().call(&mut t, 8, COMPACT_CODE, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).unwrap(); let r = ApplyResult::decode(&mut &r[..]).unwrap(); assert_eq!(r, Ok(ApplyOutcome::Success)); @@ -786,15 +782,15 @@ mod tests { #[test] fn full_native_block_import_works_with_changes_trie() { - let mut t = new_test_ext(true); - Executor::new().call(&mut t, 8, COMPACT_CODE, "Core_execute_block", &block1(true).0, true).0.unwrap(); + let mut t = new_test_ext(COMPACT_CODE, true); + Executor::new().call(&mut t, "Core_execute_block", &block1(true).0, true).0.unwrap(); assert!(t.storage_changes_root(Default::default(), 0).is_some()); } #[test] fn full_wasm_block_import_works_with_changes_trie() { - let mut t = new_test_ext(true); + let mut t = new_test_ext(COMPACT_CODE, true); WasmExecutor::new().call(&mut t, 8, COMPACT_CODE, "Core_execute_block", &block1(true).0).unwrap(); assert!(t.storage_changes_root(Default::default(), 0).is_some()); @@ -808,9 +804,9 @@ mod tests { #[bench] fn wasm_execute_block(b: &mut Bencher) { b.iter(|| { - let mut t = new_test_ext(false); - WasmExecutor::new().call(&mut t, 8, COMPACT_CODE, "Core_execute_block", &block1(false).0).unwrap(); - WasmExecutor::new().call(&mut t, 8, COMPACT_CODE, "Core_execute_block", &block2().0).unwrap(); + let mut t = new_test_ext(COMPACT_CODE, false); + WasmExecutor::new().call(&mut t, "Core_execute_block", &block1(false).0).unwrap(); + WasmExecutor::new().call(&mut t, "Core_execute_block", &block2().0).unwrap(); }); } } diff --git a/node/runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm b/node/runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm index b73bd2e691a27..657189d728982 100644 Binary files a/node/runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm and b/node/runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm differ