diff --git a/Cargo.lock b/Cargo.lock index cc1037840163f..5e07fb90d90ca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2258,12 +2258,6 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f65877bf7d44897a473350b1046277941cee20b263397e90869c50b6e766088b" -[[package]] -name = "interleaved-ordered" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "141340095b15ed7491bd3d4ced9d20cebfb826174b6bb03386381f62b01e3d77" - [[package]] name = "intervalier" version = "0.4.0" @@ -2475,8 +2469,7 @@ dependencies = [ [[package]] name = "kvdb" version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cad096c6849b2ef027fabe35c4aed356d0e3d3f586d0a8361e5e17f1e50a7ce5" +source = "git+https://github.com/cheme/parity-common?branch=upgraded_del_range_pr#cf619698bb98b4868538f74eb05da10aebb78b26" dependencies = [ "parity-util-mem", "smallvec 1.3.0", @@ -2485,8 +2478,7 @@ dependencies = [ [[package]] name = "kvdb-memorydb" version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4aa954d12cfac958822dfd77aab34f3eec71f103b918c4ab79ab59a36ee594ea" +source = "git+https://github.com/cheme/parity-common?branch=upgraded_del_range_pr#cf619698bb98b4868538f74eb05da10aebb78b26" dependencies = [ "kvdb", "parity-util-mem", @@ -2496,11 +2488,9 @@ dependencies = [ [[package]] name = "kvdb-rocksdb" version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3f14c3a10c8894d26175e57e9e26032e6d6c49c30cbe2468c5bf5f6b64bb0be" +source = "git+https://github.com/cheme/parity-common?branch=upgraded_del_range_pr#cf619698bb98b4868538f74eb05da10aebb78b26" dependencies = [ "fs-swap", - "interleaved-ordered", "kvdb", "log", "num_cpus", @@ -2515,8 +2505,7 @@ dependencies = [ [[package]] name = "kvdb-web" version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26f96eec962af83cdf7c83036b3dbb0ae6a1249ddab746820618e2567ca8ebcd" +source = "git+https://github.com/cheme/parity-common?branch=upgraded_del_range_pr#cf619698bb98b4868538f74eb05da10aebb78b26" dependencies = [ "futures 0.3.4", "js-sys", @@ -5544,18 +5533,18 @@ dependencies = [ [[package]] name = "ref-cast" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "077f197a31bfe7e4169145f9eca08d32705c6c6126c139c26793acdf163ac3ef" +checksum = "0a214c7875e1b63fc1618db7c80efc0954f6156c9ff07699fd9039e255accdd1" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c36eb52b69b87c9e3a07387f476c88fd0dba9a1713b38e56617ed66b45392c1f" +checksum = "602eb59cda66fcb9aec25841fb76bc01d2b34282dcdd705028da297db6f3eec8" dependencies = [ "proc-macro2", "quote 1.0.3", @@ -7743,6 +7732,7 @@ name = "sp-storage" version = "2.0.0-dev" dependencies = [ "impl-serde 0.2.3", + "parity-scale-codec", "ref-cast", "serde", "sp-debug-derive", diff --git a/Cargo.toml b/Cargo.toml index abb0cca39c49c..00a05cdba4dfa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -181,3 +181,11 @@ members = [ # Substrate runtime requires unwinding. panic = "unwind" +[patch.crates-io] +kvdb = { git = "https://github.com/cheme/parity-common", branch="upgraded_del_range_pr" } +kvdb-rocksdb = { git = "https://github.com/cheme/parity-common", branch="upgraded_del_range_pr" } +kvdb-web = { git = "https://github.com/cheme/parity-common", branch="upgraded_del_range_pr" } +kvdb-memorydb = { git = "https://github.com/cheme/parity-common", branch="upgraded_del_range_pr" } +#parity-util-mem = { git = "https://github.com/cheme/parity-common", branch="upgraded_del_range_pr" } +#fixed-hash = { git = "https://github.com/cheme/parity-common", branch="upgraded_del_range_pr" } +#primitive-types = { git = "https://github.com/cheme/parity-common", branch="upgraded_del_range_pr" } diff --git a/bin/node/bench/src/trie.rs b/bin/node/bench/src/trie.rs index 6f75741fa75c6..477135eac8ba1 100644 --- a/bin/node/bench/src/trie.rs +++ b/bin/node/bench/src/trie.rs @@ -22,6 +22,7 @@ use lazy_static::lazy_static; use rand::Rng; use hash_db::Prefix; use sp_state_machine::Backend as _; +use sp_core::storage::ChildInfo; use node_primitives::Hash; @@ -161,7 +162,7 @@ impl core::BenchmarkDescription for TrieBenchmarkDescription { struct Storage(Arc); impl sp_state_machine::Storage for Storage { - fn get(&self, key: &Hash, prefix: Prefix) -> Result>, String> { + fn get(&self, _trie: &ChildInfo, key: &Hash, prefix: Prefix) -> Result>, String> { let key = sp_trie::prefixed_key::(key, prefix); self.0.get(0, &key).map_err(|e| format!("Database backend error: {:?}", e)) } @@ -171,7 +172,7 @@ impl core::Benchmark for TrieBenchmark { fn run(&mut self, mode: Mode) -> std::time::Duration { let mut db = self.database.clone(); let storage: Arc> = - Arc::new(Storage(db.open())); + Arc::new(Storage(db.open())); let trie_backend = sp_state_machine::TrieBackend::new( storage, @@ -235,4 +236,4 @@ impl SizePool { rng.fill_bytes(&mut key[..]); key.to_vec() } -} \ No newline at end of file +} diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 33a370c7cb2c5..33e0dd1ac67ea 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -24,7 +24,7 @@ use sp_runtime::{generic::BlockId, Justification, Storage}; use sp_runtime::traits::{Block as BlockT, NumberFor, HashFor}; use sp_state_machine::{ ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction, - StorageCollection, ChildStorageCollection, + ChildStorageCollection, StorageCollection, }; use sp_storage::{StorageData, StorageKey, PrefixedStorageKey, ChildInfo}; use crate::{ diff --git a/client/api/src/notifications.rs b/client/api/src/notifications.rs index f154eade44d5e..106a2c9059de8 100644 --- a/client/api/src/notifications.rs +++ b/client/api/src/notifications.rs @@ -22,7 +22,7 @@ use std::{ }; use fnv::{FnvHashSet, FnvHashMap}; -use sp_core::storage::{StorageKey, StorageData}; +use sp_core::storage::{StorageKey, StorageData, ChildChange}; use sp_runtime::traits::Block as BlockT; use sp_utils::mpsc::{TracingUnboundedSender, TracingUnboundedReceiver, tracing_unbounded}; @@ -30,7 +30,7 @@ use sp_utils::mpsc::{TracingUnboundedSender, TracingUnboundedReceiver, tracing_u #[derive(Debug)] pub struct StorageChangeSet { changes: Arc)>>, - child_changes: Arc)>)>>, + child_changes: Arc)>)>>, filter: Option>, child_filters: Option>>>, } @@ -48,16 +48,25 @@ impl StorageChangeSet { .map(move |(k,v)| (None, k, v.as_ref())); let children = self.child_changes .iter() - .filter_map(move |(sk, changes)| { + .filter_map(move |(sk, change, changes)| { if let Some(cf) = self.child_filters.as_ref() { if let Some(filter) = cf.get(sk) { - Some(changes - .iter() - .filter(move |&(key, _)| match filter { - Some(ref filter) => filter.contains(key), - None => true, - }) - .map(move |(k,v)| (Some(sk), k, v.as_ref()))) + let bulk_delete = matches!(change, ChildChange::BulkDelete(..)); + Some(changes + .iter() + .filter(move |&(key, _)| if bulk_delete { + true + } else { + match filter { + Some(ref filter) => filter.contains(key), + None => true, + } + }) + .map(move |(k,v)| (Some(sk), k, if bulk_delete { + None + } else { + v.as_ref() + }))) } else { None } } else { None } }) @@ -110,7 +119,7 @@ impl StorageNotifications { hash: &Block::Hash, changeset: impl Iterator, Option>)>, child_changeset: impl Iterator< - Item=(Vec, impl Iterator, Option>)>) + Item=(Vec, ChildChange, impl Iterator, Option>)>) >, ) { let has_wildcard = !self.wildcard_listeners.is_empty(); @@ -137,7 +146,7 @@ impl StorageNotifications { changes.push((k, v.map(StorageData))); } } - for (sk, changeset) in child_changeset { + for (sk, change, changeset) in child_changeset { let sk = StorageKey(sk); if let Some((cl, cw)) = self.child_listeners.get(&sk) { let mut changes = Vec::new(); @@ -156,7 +165,7 @@ impl StorageNotifications { } } if !changes.is_empty() { - child_changes.push((sk, changes)); + child_changes.push((sk, change, changes)); } } } @@ -313,7 +322,7 @@ mod tests { type TestChangeSet = ( Vec<(StorageKey, Option)>, - Vec<(StorageKey, Vec<(StorageKey, Option)>)>, + Vec<(StorageKey, ChildChange, Vec<(StorageKey, Option)>)>, ); #[cfg(test)] @@ -360,18 +369,18 @@ mod tests { (vec![5], Some(vec![4])), (vec![6], None), ]; - let c_changeset = vec![(vec![4], c_changeset_1)]; + let c_changeset = vec![(vec![4], ChildChange::Update, c_changeset_1)]; notifications.trigger( &Hash::from_low_u64_be(1), changeset.into_iter(), - c_changeset.into_iter().map(|(a,b)| (a, b.into_iter())), + c_changeset.into_iter().map(|(a, b, c)| (a, b, c.into_iter())), ); // then assert_eq!(recv.next().unwrap(), (Hash::from_low_u64_be(1), (vec![ (StorageKey(vec![2]), Some(StorageData(vec![3]))), (StorageKey(vec![3]), None), - ], vec![(StorageKey(vec![4]), vec![ + ], vec![(StorageKey(vec![4]), ChildChange::Update, vec![ (StorageKey(vec![5]), Some(StorageData(vec![4]))), (StorageKey(vec![6]), None), ])]).into())); @@ -402,11 +411,11 @@ mod tests { (vec![6], None), ]; - let c_changeset = vec![(vec![4], c_changeset_1)]; + let c_changeset = vec![(vec![4], ChildChange::Update, c_changeset_1)]; notifications.trigger( &Hash::from_low_u64_be(1), changeset.into_iter(), - c_changeset.into_iter().map(|(a,b)| (a, b.into_iter())), + c_changeset.into_iter().map(|(a, b, c)| (a, b, c.into_iter())), ); // then @@ -417,9 +426,11 @@ mod tests { (StorageKey(vec![2]), Some(StorageData(vec![3]))), ], vec![]).into())); assert_eq!(recv3.next().unwrap(), (Hash::from_low_u64_be(1), (vec![], - vec![ - (StorageKey(vec![4]), vec![(StorageKey(vec![5]), Some(StorageData(vec![4])))]), - ]).into())); + vec![( + StorageKey(vec![4]), + ChildChange::Update, + vec![(StorageKey(vec![5]), Some(StorageData(vec![4])))], + )]).into())); } @@ -451,7 +462,7 @@ mod tests { (vec![2], Some(vec![3])), (vec![1], None), ]; - let c_changeset = empty::<(_, Empty<_>)>(); + let c_changeset = empty::<(_, _, Empty<_>)>(); notifications.trigger(&Hash::from_low_u64_be(1), changeset.into_iter(), c_changeset); // then @@ -469,7 +480,7 @@ mod tests { // when let changeset = vec![]; - let c_changeset = empty::<(_, Empty<_>)>(); + let c_changeset = empty::<(_, _, Empty<_>)>(); notifications.trigger(&Hash::from_low_u64_be(1), changeset.into_iter(), c_changeset); recv }; diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index fbe7b7e7a8eff..2e9335da30665 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -83,6 +83,7 @@ impl BuildStorage for ChainSpec { StorageChild { data: child_content.into_iter().map(|(k, v)| (k.0, v.0)).collect(), child_info, + child_change: Default::default(), }, ) }).collect(), diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 10ad5e30f15b6..cbbcdf413d0b9 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -35,6 +35,7 @@ sp-consensus = { version = "0.8.0-dev", path = "../../primitives/consensus/commo sp-blockchain = { version = "2.0.0-dev", path = "../../primitives/blockchain" } sp-database = { version = "2.0.0-dev", path = "../../primitives/database" } parity-db = { version = "0.1", optional = true } +tempfile = { version = "3", optional = true } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-dev", path = "../../utils/prometheus" } [dev-dependencies] @@ -46,5 +47,5 @@ kvdb-rocksdb = "0.7" tempfile = "3" [features] -default = [] -test-helpers = [] +default = ["parity-db"] +test-helpers = ["tempfile"] diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 9d6f595498bd0..75d5310ff264f 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -21,8 +21,8 @@ use std::cell::{Cell, RefCell}; use std::collections::HashMap; use hash_db::{Prefix, Hasher}; -use sp_trie::{MemoryDB, prefixed_key}; -use sp_core::storage::ChildInfo; +use sp_trie::MemoryDB; +use sp_core::storage::{ChildInfo, ChildType, ChildChange}; use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_runtime::Storage; use sp_state_machine::{DBValue, backend::Backend as StateBackend}; @@ -41,8 +41,13 @@ struct StorageDb { } impl sp_state_machine::Storage> for StorageDb { - fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { - let key = prefixed_key::>(key, prefix); + fn get( + &self, + child_info: &ChildInfo, + key: &Block::Hash, + prefix: Prefix, + ) -> Result, String> { + let key = crate::keyspace_and_prefixed_key(key.as_ref(), child_info.keyspace(), prefix); self.db.get(0, &key) .map_err(|e| format!("Database backend error: {:?}", e)) } @@ -79,13 +84,27 @@ impl BenchmarkingState { state.reopen()?; let child_delta = genesis.children_default.into_iter().map(|(_storage_key, child_content)| ( child_content.child_info, + child_content.child_change, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), )); - let (root, transaction): (B::Hash, _) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( + let (root, transaction, _): (B::Hash, _, _) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( genesis.top.into_iter().map(|(k, v)| (k, Some(v))), child_delta, + false, ); - state.genesis = transaction.clone().drain(); + let mut keyspace = crate::Keyspaced::new(&[]); + for (info, mut updates) in transaction.clone().into_iter() { + keyspace.change_keyspace(info.keyspace()); + for (key, rc_val) in updates.1.drain() { + let key = if info.is_top_trie() { + key + } else { + keyspace.prefix_key(key.as_slice()).to_vec() + }; + + state.genesis.insert(key, rc_val); + } + } state.genesis_root = root.clone(); state.commit(root, transaction)?; state.record.take(); @@ -200,11 +219,13 @@ impl StateBackend> for BenchmarkingState { fn child_storage_root( &self, child_info: &ChildInfo, + child_change: &ChildChange, delta: I, ) -> (B::Hash, bool, Self::Transaction) where I: IntoIterator, Option>)>, { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_storage_root(child_info, delta)) + self.state.borrow().as_ref() + .map_or(Default::default(), |s| s.child_storage_root(child_info, child_change, delta)) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -229,20 +250,41 @@ impl StateBackend> for BenchmarkingState { None } - fn commit(&self, storage_root: as Hasher>::Out, mut transaction: Self::Transaction) + fn commit(&self, storage_root: as Hasher>::Out, transaction: Self::Transaction) -> Result<(), Self::Error> { if let Some(db) = self.db.take() { let mut db_transaction = DBTransaction::new(); - let changes = transaction.drain(); - let mut keys = Vec::with_capacity(changes.len()); - for (key, (val, rc)) in changes { - if rc > 0 { - db_transaction.put(0, &key, &val); - } else if rc < 0 { - db_transaction.delete(0, &key); + let mut keys = Vec::new(); + let mut keyspace = crate::Keyspaced::new(&[]); + for (info, (change, mut updates)) in transaction.into_iter() { + // child info with strong unique id are using the same state-db with prefixed key + if info.child_type() != ChildType::ParentKeyId { + // Unhandled child kind + unimplemented!( + "Data for {:?} without a backend implementation", + info.child_type(), + ); + } + if let ChildChange::BulkDelete(..) = change { + db_transaction.delete_prefix(0, info.keyspace()); + } else { + keyspace.change_keyspace(info.keyspace()); + for (key, (val, rc)) in updates.drain() { + let key = if info.is_top_trie() { + key + } else { + keyspace.prefix_key(key.as_slice()).to_vec() + }; + + if rc > 0 { + db_transaction.put(0, &key, &val); + } else if rc < 0 { + db_transaction.delete(0, &key); + } + keys.push(key); + } } - keys.push(key); } self.record.set(keys); db.write(db_transaction).map_err(|_| String::from("Error committing transaction"))?; diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index 985251f403d62..b2ea6dc80fc21 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -488,7 +488,11 @@ where self.build_cache.read().with_changed_keys(root, functor) } - fn get(&self, key: &Block::Hash, _prefix: Prefix) -> Result>, String> { + fn get( + &self, + key: &Block::Hash, + _prefix: Prefix, + ) -> Result>, String> { Ok(self.db.get(self.changes_tries_column, key.as_ref())) } } @@ -701,7 +705,10 @@ mod tests { .log(DigestItem::as_changes_trie_root) .cloned(); match trie_root { - Some(trie_root) => backend.changes_tries_storage.get(&trie_root, EMPTY_PREFIX).unwrap().is_none(), + Some(trie_root) => backend.changes_tries_storage.get( + &trie_root, + EMPTY_PREFIX, + ).unwrap().is_none(), None => true, } }; diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 579ea2db4ad8d..ddc2c7a7de898 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -61,11 +61,11 @@ use sp_blockchain::{ }; use codec::{Decode, Encode}; use hash_db::Prefix; -use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key}; +use sp_trie::{MemoryDB, PrefixedMemoryDB}; use sp_database::Transaction; use parking_lot::RwLock; use sp_core::{ChangesTrieConfiguration, traits::CodeExecutor}; -use sp_core::storage::{well_known_keys, ChildInfo}; +use sp_core::storage::{well_known_keys, ChildInfo, ChildChange, ChildrenMap, ChildType}; use sp_runtime::{ generic::BlockId, Justification, Storage, BuildStorage, @@ -224,12 +224,13 @@ impl StateBackend> for RefTrackingState { fn child_storage_root( &self, child_info: &ChildInfo, + child_change: &ChildChange, delta: I, ) -> (B::Hash, bool, Self::Transaction) where I: IntoIterator, Option>)>, { - self.state.child_storage_root(child_info, delta) + self.state.child_storage_root(child_info, child_change, delta) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -555,7 +556,7 @@ impl HeaderMetadata for BlockchainDb { /// Database transaction pub struct BlockImportOperation { old_state: SyncingCachingState, Block>, - db_updates: PrefixedMemoryDB>, + db_updates: ChildrenMap<(ChildChange, PrefixedMemoryDB>)>, storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, changes_trie_updates: MemoryDB>, @@ -610,7 +611,10 @@ impl sc_client_api::backend::BlockImportOperation for Bloc // Currently cache isn't implemented on full nodes. } - fn update_db_storage(&mut self, update: PrefixedMemoryDB>) -> ClientResult<()> { + fn update_db_storage( + &mut self, + update: ChildrenMap<(ChildChange, PrefixedMemoryDB>)>, + ) -> ClientResult<()> { self.db_updates = update; Ok(()) } @@ -626,11 +630,12 @@ impl sc_client_api::backend::BlockImportOperation for Bloc let child_delta = storage.children_default.into_iter().map(|(_storage_key, child_content)|( child_content.child_info, + child_content.child_change, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), )); let mut changes_trie_config: Option = None; - let (root, transaction) = self.old_state.full_storage_root( + let (root, transaction, _) = self.old_state.full_storage_root( storage.top.into_iter().map(|(k, v)| { if k == well_known_keys::CHANGES_TRIE_CONFIG { changes_trie_config = Some( @@ -640,7 +645,8 @@ impl sc_client_api::backend::BlockImportOperation for Bloc } (k, Some(v)) }), - child_delta + child_delta, + false, ); self.db_updates = transaction; @@ -697,13 +703,69 @@ struct StorageDb { } impl sp_state_machine::Storage> for StorageDb { - fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { - let key = prefixed_key::>(key, prefix); + fn get( + &self, + child_info: &ChildInfo, + key: &Block::Hash, + prefix: Prefix, + ) -> Result, String> { + // Default child trie (those with strong unique id) are put + // directly into the same address space at state_db level. + let key = keyspace_and_prefixed_key(key.as_ref(), child_info.keyspace(), prefix); self.state_db.get(&key, self) .map_err(|e| format!("Database backend error: {:?}", e)) } } +/// Cursor over all child trie encoded nodes in db. +pub fn child_trie_cursor ( + db: &DB, + col: sp_database::ColumnId, + batch: sp_database::ChildBatchRemove, + state: &mut S, + action: fn(db: &DB, col: sp_database::ColumnId, key: &[u8], state: &mut S), +) where + DB: sp_database::DatabaseRef, + H: hash_db::Hasher, + H::Out: Decode, +{ + let mut prefixed_key_buf = Vec::with_capacity(batch.keyspace.len() + 40); + + // we ignore all error here, TODO EMCH we could log it in some db dedicated key (like in meta) + if let Ok(root) = H::Out::decode(&mut batch.encoded_root.as_slice()) { + let ks_db = KeyspacedDB(db, &batch.keyspace, col); + if let Ok(trie) = sp_trie::TrieDB::>::new(&ks_db, &root) { + if let Ok(iter) = sp_trie::TrieDBNodeIterator::new(&trie) { + for x in iter { + if let Ok((prefix, Some(key_hash), _)) = x { + prefixed_key_buf.clear(); + prefixed_key_buf.extend_from_slice(&batch.keyspace[..]); + let trie_key = sp_trie::prefixed_key::(&key_hash, prefix.as_prefix()); + prefixed_key_buf.extend_from_slice(trie_key.as_slice()); + action(db, col, prefixed_key_buf.as_slice(), state); + } + } + } + } + } +} + +struct KeyspacedDB<'a, DB: sp_database::DatabaseRef>(&'a DB, &'a[u8], sp_database::ColumnId); + +impl<'a, H: hash_db::Hasher, DB: sp_database::DatabaseRef> hash_db::HashDBRef> for KeyspacedDB<'a, DB> { + fn get(&self, key: &H::Out, prefix: hash_db::Prefix) -> Option> { + let mut prefixed_key = Vec::with_capacity(self.1.len() + 40); + prefixed_key.extend_from_slice(&self.1[..]); + let trie_key = sp_trie::prefixed_key::(key, prefix); + prefixed_key.extend_from_slice(trie_key.as_slice()); + ::get(self.0, self.2, &prefixed_key) + } + + fn contains(&self, key: &H::Out, prefix: hash_db::Prefix) -> bool { + >>::get(self, key, prefix).is_some() + } +} + impl sc_state_db::NodeDb for StorageDb { type Error = io::Error; type Key = [u8]; @@ -725,7 +787,12 @@ impl DbGenesisStorage { } impl sp_state_machine::Storage> for DbGenesisStorage { - fn get(&self, _key: &Block::Hash, _prefix: Prefix) -> Result, String> { + fn get( + &self, + _trie: &ChildInfo, + _key: &Block::Hash, + _prefix: Prefix, + ) -> Result, String> { Ok(None) } } @@ -799,6 +866,16 @@ impl Backend { #[cfg(any(test, feature = "test-helpers"))] pub fn new_test(keep_blocks: u32, canonicalization_delay: u64) -> Self { let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); + Self::new_test_memorydb(db, keep_blocks, canonicalization_delay) + } + + /// Create new memory-backed client backend for tests. + #[cfg(any(test, feature = "test-helpers"))] + fn new_test_memorydb( + db: kvdb_memorydb::InMemory, + keep_blocks: u32, + canonicalization_delay: u64, + ) -> Self { let db = sp_database::as_database(db); let db_setting = DatabaseSettings { state_cache_size: 16777216, @@ -810,6 +887,24 @@ impl Backend { Self::new(db_setting, canonicalization_delay).expect("failed to create test-db") } + + /// Create new memory-backed client backend for tests, using + /// parity-db + #[cfg(all(feature = "parity-db", any(test, feature = "test-helpers")))] + pub fn new_test_parity_db(keep_blocks: u32, canonicalization_delay: u64) -> Self { + use tempfile::tempdir; + let base_path = tempdir().expect("could not create a temp dir"); + let db_setting = DatabaseSettings { + state_cache_size: 16777216, + state_cache_child_ratio: Some((50, 100)), + pruning: PruningMode::keep_blocks(keep_blocks), + source: DatabaseSettingsSrc::ParityDb { + path: base_path.path().into(), + }, + }; + Self::new(db_setting, canonicalization_delay).expect("failed to create test-db") + } + fn from_database( db: Arc>, canonicalization_delay: u64, @@ -1084,22 +1179,43 @@ impl Backend { } let finalized = if operation.commit_state { - let mut changeset: sc_state_db::ChangeSet> = sc_state_db::ChangeSet::default(); + let mut state_db_changeset: sc_state_db::ChangeSet> = sc_state_db::ChangeSet::default(); let mut ops: u64 = 0; - let mut bytes: u64 = 0; + let mut bytes = 0; let mut removal: u64 = 0; let mut bytes_removal: u64 = 0; - for (key, (val, rc)) in operation.db_updates.drain() { - if rc > 0 { - ops += 1; - bytes += key.len() as u64 + val.len() as u64; - - changeset.inserted.push((key, val.to_vec())); - } else if rc < 0 { - removal += 1; - bytes_removal += key.len() as u64; - - changeset.deleted.push(key); + let mut keyspace = Keyspaced::new(&[]); + for (info, (change, mut updates)) in operation.db_updates.into_iter() { + // child info with strong unique id are using the same state-db with prefixed key + if info.child_type() != ChildType::ParentKeyId { + // Unhandled child kind + return Err(ClientError::Backend(format!( + "Data for {:?} without a backend implementation", + info.child_type(), + ))); + } + if let ChildChange::BulkDelete(encoded_root) = change { + state_db_changeset.deleted_child.push((info.keyspace().to_vec(), encoded_root)); + } else { + keyspace.change_keyspace(info.keyspace()); + for (key, (val, rc)) in updates.drain() { + let key = if info.is_top_trie() { + key + } else { + keyspace.prefix_key(key.as_slice()).to_vec() + }; + if rc > 0 { + ops += 1; + bytes += key.len() as u64 + val.len() as u64; + + state_db_changeset.inserted.push((key, val.to_vec())); + } else if rc < 0 { + removal += 1; + bytes_removal += key.len() as u64; + + state_db_changeset.deleted.push(key); + } + } } } self.state_usage.tally_writes_nodes(ops, bytes); @@ -1108,7 +1224,7 @@ impl Backend { let mut ops: u64 = 0; let mut bytes: u64 = 0; for (key, value) in operation.storage_updates.iter() - .chain(operation.child_storage_updates.iter().flat_map(|(_, s)| s.iter())) { + .chain(operation.child_storage_updates.iter().flat_map(|(_, _, s)| s.iter())) { ops += 1; bytes += key.len() as u64; if let Some(v) = value.as_ref() { @@ -1121,7 +1237,7 @@ impl Backend { &hash, number_u64, &pending_block.header.parent_hash(), - changeset, + state_db_changeset, ).map_err(|e: sc_state_db::Error| sp_blockchain::Error::from(format!("State database error: {:?}", e)) )?; @@ -1312,6 +1428,13 @@ fn apply_state_commit(transaction: &mut Transaction, commit: sc_state_db for key in commit.data.deleted.into_iter() { transaction.remove(columns::STATE, &key[..]); } + for (keyspace, encoded_root) in commit.data.deleted_child.into_iter() { + let child_remove = sp_database::ChildBatchRemove { + encoded_root, + keyspace, + }; + transaction.delete_child(columns::STATE, child_remove); + } for (key, val) in commit.meta.inserted.into_iter() { transaction.set_from_vec(columns::STATE_META, &key[..], val); } @@ -1357,7 +1480,7 @@ impl sc_client_api::backend::Backend for Backend { Ok(BlockImportOperation { pending_block: None, old_state, - db_updates: PrefixedMemoryDB::default(), + db_updates: Default::default(), storage_updates: Default::default(), child_storage_updates: Default::default(), changes_trie_config_update: None, @@ -1624,6 +1747,7 @@ impl sc_client_api::backend::Backend for Backend { Ok(Some(header)) => { sp_state_machine::Storage::get( self.storage.as_ref(), + &ChildInfo::top_trie(), &header.state_root(), (&[], None), ).unwrap_or(None).is_some() @@ -1642,6 +1766,47 @@ impl sc_client_api::backend::Backend for Backend { impl sc_client_api::backend::LocalBackend for Backend {} +/// Rules for storing a default child trie with unique id. +struct Keyspaced { + keyspace_len: usize, + buffer: Vec, +} + +impl Keyspaced { + fn new(keyspace: &[u8]) -> Self { + Keyspaced { + keyspace_len: keyspace.len(), + buffer: keyspace.to_vec(), + } + } + + fn change_keyspace(&mut self, new_keyspace: &[u8]) { + self.keyspace_len = new_keyspace.len(); + self.buffer.resize(new_keyspace.len(), 0); + self.buffer[..new_keyspace.len()].copy_from_slice(new_keyspace); + } + + fn prefix_key(&mut self, key: &[u8]) -> &[u8] { + self.buffer.resize(self.keyspace_len + key.len(), 0); + self.buffer[self.keyspace_len..].copy_from_slice(key); + self.buffer.as_slice() + } +} + +// Prefix key and add keyspace with a single vec alloc +// Warning if memory_db `sp_trie::prefixed_key` implementation change, this function +// will need change too. +fn keyspace_and_prefixed_key(key: &[u8], keyspace: &[u8], prefix: Prefix) -> Vec { + let mut prefixed_key = Vec::with_capacity(key.len() + keyspace.len() + prefix.0.len() + 1); + prefixed_key.extend_from_slice(keyspace); + prefixed_key.extend_from_slice(prefix.0); + if let Some(last) = prefix.1 { + prefixed_key.push(last); + } + prefixed_key.extend_from_slice(key); + prefixed_key +} + #[cfg(test)] pub(crate) mod tests { use hash_db::{HashDB, EMPTY_PREFIX}; @@ -1767,9 +1932,436 @@ pub(crate) mod tests { } } + #[cfg(feature = "parity-db")] + #[test] + fn bulk_delete_child_trie_non_iterable_db() { + use kvdb::KeyValueDB; + let db = Backend::::new_test_parity_db(3, 0); + + let mut count = 0; + let in_mem_ref_db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); + // implementing KeyValueDB for Arc could avoid using + // this unsafe pointer. + let in_mem_ref: *const kvdb_memorydb::InMemory = &in_mem_ref_db; + let db_ref = Backend::::new_test_memorydb(in_mem_ref_db, 3, 0); + + let child_info = sp_core::storage::ChildInfo::new_default(b"key1"); + + let (hash, child_root) = { + let mut op = db.begin_operation().unwrap(); + let mut op_ref = db_ref.begin_operation().unwrap(); + db.begin_state_operation(&mut op, BlockId::Hash(Default::default())).unwrap(); + db_ref.begin_state_operation(&mut op_ref, BlockId::Hash(Default::default())).unwrap(); + let mut header = Header { + number: 0, + parent_hash: Default::default(), + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let storage = vec![]; + + let child_storage = vec![ + (vec![2, 3, 5], Some(vec![4, 4, 6])), + (vec![2, 2, 3], Some(vec![7, 9, 9])), + (vec![1, 2, 3], Some(vec![7, 9, 8])), + ]; + + header.state_root = op.old_state.full_storage_root(storage + .iter() + .cloned() + .map(|(x, y)| (x, Some(y))), + vec![(child_info.clone(), ChildChange::Update, child_storage.clone())], + false, + ).0.into(); + let hash = header.hash(); + + let mut children_default = HashMap::default(); + children_default.insert(child_info.storage_key().to_vec(), sp_core::storage::StorageChild { + child_info: child_info.clone(), + child_change: Default::default(), + data: child_storage.iter().map(|(k, v)| (k.clone(), v.clone().unwrap())).collect(), + }); + op.reset_storage(Storage { + top: storage.iter().cloned().collect(), + children_default: children_default.clone(), + }).unwrap(); + op.set_block_data( + header.clone(), + Some(vec![]), + None, + NewBlockState::Best, + ).unwrap(); + db.commit_operation(op).unwrap(); + + op_ref.reset_storage(Storage { + top: storage.iter().cloned().collect(), + children_default, + }).unwrap(); + op_ref.set_block_data( + header.clone(), + Some(vec![]), + None, + NewBlockState::Best, + ).unwrap(); + db_ref.commit_operation(op_ref).unwrap(); + + let state = db.state_at(BlockId::Number(0)).unwrap(); + + let child_root = state.storage(&child_info.prefixed_storage_key()[..]) + .unwrap().unwrap(); + assert_eq!( + state.child_storage(&child_info, &[2, 3, 5]).unwrap(), + Some(vec![4, 4, 6]), + ); + assert_eq!( + state.child_storage(&child_info, &[2, 2, 3]).unwrap(), + Some(vec![7, 9, 9]), + ); + assert_eq!( + state.child_storage(&child_info, &[2, 3, 5]).unwrap(), + Some(vec![4, 4, 6]), + ); + + for key in unsafe { in_mem_ref.as_ref() }.unwrap().iter(columns::STATE) { + assert!(db.storage.db.get( + columns::STATE, + &key.0 + ).is_some()); + count += 1; + } + assert!(count > 0); + (hash, child_root) + }; + let assert_count = |count_ref: usize, count: &mut usize| { + let mut new_count = 0; + for key in unsafe { in_mem_ref.as_ref() }.unwrap().iter(columns::STATE) { + assert!(db.storage.db.get( + columns::STATE, + &key.0 + ).is_some()); + new_count += 1; + } + assert_eq!(new_count, count_ref); + *count = new_count; + }; + let hash = { + let mut op = db.begin_operation().unwrap(); + let mut op_ref = db_ref.begin_operation().unwrap(); + db.begin_state_operation(&mut op, BlockId::Number(0)).unwrap(); + db_ref.begin_state_operation(&mut op_ref, BlockId::Number(0)).unwrap(); + let mut header = Header { + number: 1, + parent_hash: hash, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + let storage = vec![]; + + let child_storage = vec![( + child_info.clone(), + ChildChange::BulkDelete(child_root), + vec![], + )]; + let (root, overlay, _) = op.old_state.full_storage_root(storage + .iter() + .cloned(), + child_storage.clone(), + false, + ); + op.update_db_storage(overlay.clone()).unwrap(); + op_ref.update_db_storage(overlay).unwrap(); + header.state_root = root.into(); + op.update_storage(storage.clone(), child_storage.clone()).unwrap(); + op_ref.update_storage(storage, child_storage).unwrap(); + + let hash = header.hash(); + + op.set_block_data( + header.clone(), + Some(vec![]), + None, + NewBlockState::Best, + ).unwrap(); + + db.commit_operation(op).unwrap(); + + op_ref.set_block_data( + header, + Some(vec![]), + None, + NewBlockState::Best, + ).unwrap(); + + db_ref.commit_operation(op_ref).unwrap(); + + let state = db.state_at(BlockId::Number(1)).unwrap(); + + assert_eq!( + state.child_storage(&child_info, &[2, 3, 5]).unwrap(), + None, + ); + assert_eq!( + state.child_storage(&child_info, &[2, 2, 3]).unwrap(), + None, + ); + + let mut new_count = 0; + for key in unsafe { in_mem_ref.as_ref() }.unwrap().iter(columns::STATE) { + assert!(db.storage.db.get( + columns::STATE, + &key.0 + ).is_some()); + new_count += 1; + } + // new state is empty root so it is not stored and keep count constant. + assert_count(count, &mut count); + + hash + }; + + let next_block = |number, parent_hash| { + let mut op = db.begin_operation().unwrap(); + let mut op_ref = db_ref.begin_operation().unwrap(); + db.begin_state_operation(&mut op, BlockId::Number(number)).unwrap(); + db_ref.begin_state_operation(&mut op_ref, BlockId::Number(number)).unwrap(); + let header = Header { + number: number + 1, + parent_hash, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let hash = header.hash(); + op.set_block_data( + header.clone(), + Some(vec![]), + None, + NewBlockState::Best, + ).unwrap(); + + db.commit_operation(op).unwrap(); + + op_ref.set_block_data( + header, + Some(vec![]), + None, + NewBlockState::Best, + ).unwrap(); + + db_ref.commit_operation(op_ref).unwrap(); + + hash + }; + let hash = next_block(1, hash); + assert_count(count, &mut count); + let hash = next_block(2, hash); + assert_count(count, &mut count); + next_block(3, hash); + assert_count(0, &mut count); + } + + + #[test] + fn bulk_delete_child_trie_iterable_db() { + let backend = Backend::::new_test(2, 0); + let _ = ::env_logger::try_init(); + let mut key = Vec::new(); + // This is not collision resistant but enough for the test. + let storage_key = b"unique_storage_key"; + + let hash = { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(Default::default())).unwrap(); + let mut header = Header { + number: 0, + parent_hash: Default::default(), + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let storage: Vec<(_, _)> = vec![]; + + header.state_root = op.old_state.storage_root(storage + .iter() + .cloned() + .map(|(x, y)| (x, Some(y))) + ).0.into(); + let hash = header.hash(); + + op.reset_storage(Storage { + top: storage.iter().cloned().collect(), + children_default: Default::default(), + }).unwrap(); + + let top = op.db_updates.entry(ChildInfo::top_trie()) + .or_insert_with(Default::default); + key.push(top.1.insert(EMPTY_PREFIX, b"hello1")); + key.push(top.1.insert(EMPTY_PREFIX, b"hello2")); + key.push(top.1.insert(EMPTY_PREFIX, b"hello3")); + let child = op.db_updates.entry(ChildInfo::new_default(storage_key)) + .or_insert_with(Default::default); + key.push(child.1.insert(EMPTY_PREFIX, b"hello4")); + key.push(child.1.insert(EMPTY_PREFIX, b"hello5")); + key.push(child.1.insert(EMPTY_PREFIX, b"hello6")); + op.set_block_data( + header, + Some(vec![]), + None, + NewBlockState::Best, + ).unwrap(); + + backend.commit_operation(op).unwrap(); + assert_eq!(backend.storage.db.get( + columns::STATE, + &sp_trie::prefixed_key::(&key[0], EMPTY_PREFIX) + ).unwrap(), &b"hello1"[..]); + assert_eq!(backend.storage.db.get( + columns::STATE, + &sp_trie::prefixed_key::(&key[3], (storage_key, None)) + ).unwrap(), &b"hello4"[..]); + hash + }; + + let key = key; + + let hash = { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Number(0)).unwrap(); + let mut header = Header { + number: 1, + parent_hash: hash, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let storage: Vec<(_, _)> = vec![]; + + header.state_root = op.old_state.storage_root(storage + .iter() + .cloned() + .map(|(x, y)| (x, Some(y))) + ).0.into(); + let hash = header.hash(); + + let child_info = ChildInfo::new_default(storage_key); + let child = op.db_updates.entry(child_info) + .or_insert_with(Default::default); + // this test bulk deletion on by keyspace, the root in parameter + // is unused. + child.0 = ChildChange::BulkDelete(Default::default()); + op.set_block_data( + header, + Some(vec![]), + None, + NewBlockState::Best, + ).unwrap(); + + backend.commit_operation(op).unwrap(); + assert_eq!(backend.storage.db.get( + columns::STATE, + &sp_trie::prefixed_key::(&key[0], EMPTY_PREFIX) + ).unwrap(), &b"hello1"[..]); + assert_eq!(backend.storage.db.get( + columns::STATE, + &sp_trie::prefixed_key::(&key[3], (storage_key, None)) + ).unwrap(), &b"hello4"[..]); + hash + }; + + let hash = { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Number(1)).unwrap(); + let mut header = Header { + number: 2, + parent_hash: hash, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let storage: Vec<(_, _)> = vec![]; + + header.state_root = op.old_state.storage_root(storage + .iter() + .cloned() + .map(|(x, y)| (x, Some(y))) + ).0.into(); + let hash = header.hash(); + op.set_block_data( + header, + Some(vec![]), + None, + NewBlockState::Best, + ).unwrap(); + + + backend.commit_operation(op).unwrap(); + + assert_eq!(backend.storage.db.get( + columns::STATE, + &sp_trie::prefixed_key::(&key[0], EMPTY_PREFIX) + ).unwrap(), &b"hello1"[..]); + assert_eq!(backend.storage.db.get( + columns::STATE, + &sp_trie::prefixed_key::(&key[3], (storage_key, None)) + ).unwrap(), &b"hello4"[..]); + hash + }; + + { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Number(2)).unwrap(); + let mut header = Header { + number: 3, + parent_hash: hash, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let storage: Vec<(_, _)> = vec![]; + + header.state_root = op.old_state.storage_root(storage + .iter() + .cloned() + .map(|(x, y)| (x, Some(y))) + ).0.into(); + + op.set_block_data( + header, + Some(vec![]), + None, + NewBlockState::Best, + ).unwrap(); + backend.commit_operation(op).unwrap(); + } + + assert_eq!(backend.storage.db.get( + columns::STATE, + &sp_trie::prefixed_key::(&key[0], EMPTY_PREFIX) + ).unwrap(), &b"hello1"[..]); + assert!(backend.storage.db.get( + columns::STATE, + &sp_trie::prefixed_key::(&key[3], (storage_key, None)) + ).is_none()); + assert!(backend.storage.db.get( + columns::STATE, + &sp_trie::prefixed_key::(&key[4], (storage_key, None)) + ).is_none()); + } + #[test] fn set_state_data() { let db = Backend::::new_test(2, 0); + + let child_info = sp_core::storage::ChildInfo::new_default(b"key1"); + let hash = { let mut op = db.begin_operation().unwrap(); db.begin_state_operation(&mut op, BlockId::Hash(Default::default())).unwrap(); @@ -1786,16 +2378,29 @@ pub(crate) mod tests { (vec![1, 2, 3], vec![9, 9, 9]), ]; - header.state_root = op.old_state.storage_root(storage + let child_storage = vec![ + (vec![2, 3, 5], Some(vec![4, 4, 6])), + (vec![2, 2, 3], Some(vec![7, 9, 9])), + ]; + + header.state_root = op.old_state.full_storage_root(storage .iter() .cloned() - .map(|(x, y)| (x, Some(y))) + .map(|(x, y)| (x, Some(y))), + vec![(child_info.clone(), ChildChange::Update, child_storage.clone())], + false, ).0.into(); let hash = header.hash(); + let mut children_default = HashMap::default(); + children_default.insert(child_info.storage_key().to_vec(), sp_core::storage::StorageChild { + child_info: child_info.clone(), + child_change: Default::default(), + data: child_storage.iter().map(|(k, v)| (k.clone(), v.clone().unwrap())).collect(), + }); op.reset_storage(Storage { top: storage.iter().cloned().collect(), - children_default: Default::default(), + children_default, }).unwrap(); op.set_block_data( header.clone(), @@ -1811,6 +2416,10 @@ pub(crate) mod tests { assert_eq!(state.storage(&[1, 3, 5]).unwrap(), Some(vec![2, 4, 6])); assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); assert_eq!(state.storage(&[5, 5, 5]).unwrap(), None); + assert_eq!( + state.child_storage(&child_info, &[2, 3, 5]).unwrap(), + Some(vec![4, 4, 6]), + ); hash }; @@ -1850,6 +2459,12 @@ pub(crate) mod tests { assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None); assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); assert_eq!(state.storage(&[5, 5, 5]).unwrap(), Some(vec![4, 5, 6])); + assert_eq!( + state.child_storage(&child_info, &[2, 3, 5]).unwrap(), + Some(vec![4, 4, 6]), + ); + + } } @@ -1884,7 +2499,9 @@ pub(crate) mod tests { children_default: Default::default(), }).unwrap(); - key = op.db_updates.insert(EMPTY_PREFIX, b"hello"); + key = op.db_updates.entry(ChildInfo::top_trie()) + .or_insert_with(Default::default) + .1.insert(EMPTY_PREFIX, b"hello"); op.set_block_data( header, Some(vec![]), @@ -1920,8 +2537,14 @@ pub(crate) mod tests { ).0.into(); let hash = header.hash(); - op.db_updates.insert(EMPTY_PREFIX, b"hello"); - op.db_updates.remove(&key, EMPTY_PREFIX); + op.db_updates + .entry(ChildInfo::top_trie()) + .or_insert_with(Default::default) + .1.insert(EMPTY_PREFIX, b"hello"); + op.db_updates + .entry(ChildInfo::top_trie()) + .or_insert_with(Default::default) + .1.remove(&key, EMPTY_PREFIX); op.set_block_data( header, Some(vec![]), @@ -1957,7 +2580,10 @@ pub(crate) mod tests { ).0.into(); let hash = header.hash(); - op.db_updates.remove(&key, EMPTY_PREFIX); + op.db_updates + .entry(ChildInfo::top_trie()) + .or_insert_with(Default::default) + .1.remove(&key, EMPTY_PREFIX); op.set_block_data( header, Some(vec![]), diff --git a/client/db/src/parity_db.rs b/client/db/src/parity_db.rs index a4e64d310b877..6a912749f06be 100644 --- a/client/db/src/parity_db.rs +++ b/client/db/src/parity_db.rs @@ -16,9 +16,13 @@ /// A `Database` adapter for parity-db. -use sp_database::{Database, Change, Transaction, ColumnId}; +use sp_database::{Database, DatabaseRef, Change, Transaction, ColumnId, StateCursor}; -struct DbAdapter(parity_db::Db); +/// TODO EMCH make it private again (leaked from open input type) +pub struct DbAdapter( + parity_db::Db, + StateCursor, +); fn handle_err(result: parity_db::Result) -> T { match result { @@ -29,18 +33,62 @@ fn handle_err(result: parity_db::Result) -> T { } } -/// Wrap RocksDb database into a trait object that implements `sp_database::Database` -pub fn open(path: &std::path::Path, num_columns: u32) -> parity_db::Result>> { +/// Wrap ParityDb database into a trait object that implements `sp_database::Database` +pub fn open( + path: &std::path::Path, + num_columns: u32, + cursor: StateCursor, +) -> parity_db::Result>> { let db = parity_db::Db::with_columns(path, num_columns as u8)?; - Ok(std::sync::Arc::new(DbAdapter(db))) + Ok(std::sync::Arc::new(DbAdapter(db, cursor))) +} + +const BATCH_CHILD_DELETE_SIZE: usize = 256; + +/// TODO EMCH make it private (leaked from open input type) +pub struct BatchChildDelete { + ix: usize, + batch: Vec<(u8, Vec)>, } impl Database for DbAdapter { fn commit(&self, transaction: Transaction) { - handle_err(self.0.commit(transaction.0.into_iter().map(|change| + transaction.0.iter().for_each(|change| + match change { + Change::DeleteChild(col, child) => { + let mut batch = BatchChildDelete { + ix: 0, + batch: vec![(0, Vec::new()); 256], + }; + + fn extract_input(i: &mut (u8, Vec)) -> (u8, Vec, Option>) { + let key = std::mem::replace(&mut i.1, Vec::new()); + (i.0, key, None) + }; + + self.1(&self, *col, child.clone(), &mut batch, |db, col, key, batch| { + batch.batch[batch.ix] = (col as u8, key.to_vec()); + batch.ix += 1; + if batch.ix == BATCH_CHILD_DELETE_SIZE { + handle_err(db.0.commit(batch.batch[..].iter_mut().map(extract_input))); + batch.ix = 0; + } + }); + + if batch.ix > 0 { + handle_err(self.0.commit(batch.batch[..batch.ix].iter_mut().map(extract_input))); + } + + }, + _ => (), + } + ); + + handle_err(self.0.commit(transaction.0.into_iter().filter_map(|change| match change { - Change::Set(col, key, value) => (col as u8, key, Some(value)), - Change::Remove(col, key) => (col as u8, key, None), + Change::Set(col, key, value) => Some((col as u8, key, Some(value))), + Change::Remove(col, key) => Some((col as u8, key, None)), + Change::DeleteChild(..) => None, _ => unimplemented!(), })) ); @@ -54,3 +102,9 @@ impl Database for DbAdapter { unimplemented!(); } } + +impl DatabaseRef for DbAdapter { + fn get(&self, col: ColumnId, key: &[u8]) -> Option> { + handle_err(self.0.get(col as u8, key)) + } +} diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 66ac74afa4f2a..dd79029f9d127 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -24,7 +24,7 @@ use linked_hash_map::{LinkedHashMap, Entry}; use hash_db::Hasher; use sp_runtime::traits::{Block as BlockT, Header, HashFor, NumberFor}; use sp_core::hexdisplay::HexDisplay; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, ChildChange}; use sp_state_machine::{ backend::Backend as StateBackend, TrieBackend, StorageKey, StorageValue, StorageCollection, ChildStorageCollection, @@ -148,6 +148,20 @@ impl LRUMap { } +impl LRUMap<(Vec, Vec), V> { + fn remove_by_storage_key(&mut self, storage_key: &[u8]) { + let map = &mut self.0; + let storage_used_size = &mut self.1; + map.entries().for_each(|entry| { + if entry.key().0.starts_with(storage_key) { + *storage_used_size -= entry.key().estimate_size(); + *storage_used_size -= entry.get().estimate_size(); + let _ = entry.remove(); + } + }); + } +} + impl Cache { /// Returns the used memory size of the storage cache in bytes. pub fn used_storage_cache_size(&self) -> usize { @@ -179,6 +193,10 @@ impl Cache { trace!("Reverting enacted child key {:?}", a); self.lru_child_storage.remove(a); } + for a in &m.deleted_child { + trace!("Retracted child {:?}", a); + self.lru_child_storage.remove_by_storage_key(a.as_slice()); + } false } else { true @@ -199,6 +217,10 @@ impl Cache { trace!("Retracted child key {:?}", a); self.lru_child_storage.remove(a); } + for a in &m.deleted_child { + trace!("Retracted child {:?}", a); + self.lru_child_storage.remove_by_storage_key(a.as_slice()); + } false } else { true @@ -256,6 +278,8 @@ struct BlockChanges { storage: HashSet, /// A set of modified child storage keys. child_storage: HashSet, + /// A collection of removed child. + deleted_child: HashSet>, /// Block is part of the canonical chain. is_canon: bool, } @@ -274,6 +298,8 @@ struct LocalCache { /// /// `None` indicates that key is known to be missing. child_storage: HashMap>, + /// A collection of removed child. + deleted_child: HashSet>, } /// Cache changes. @@ -358,7 +384,7 @@ impl CacheChanges { local_cache.storage.len(), local_cache.hashes.len(), changes.len(), - child_changes.iter().map(|v|v.1.len()).sum::(), + child_changes.iter().map(|v|v.2.len()).sum::(), ); for (k, v) in local_cache.storage.drain() { cache.lru_storage.add(k, v); @@ -369,6 +395,11 @@ impl CacheChanges { for (k, v) in local_cache.hashes.drain() { cache.lru_hashes.add(k, OptionHOut(v)); } + for storage_key in local_cache.deleted_child.drain() { + // currently only contain bulk deletion where storage key + // cannot be use once again. + cache.lru_child_storage.remove_by_storage_key(&storage_key); + } } } @@ -381,13 +412,21 @@ impl CacheChanges { } let mut modifications = HashSet::new(); let mut child_modifications = HashSet::new(); - child_changes.into_iter().for_each(|(sk, changes)| - for (k, v) in changes.into_iter() { - let k = (sk.clone(), k); - if is_best { - cache.lru_child_storage.add(k.clone(), v); - } - child_modifications.insert(k); + let mut deleted_child = HashSet::new(); + child_changes.into_iter().for_each(|(child_info, child_change, child_values)| + match child_change { + ChildChange::Update => for (k, v) in child_values.into_iter() { + let k = (child_info.storage_key().to_vec(), k); + if is_best { + cache.lru_child_storage.add(k.clone(), v); + } + child_modifications.insert(k); + }, + ChildChange::BulkDelete(..) => { + // Note that this is a rather costy operation. + cache.lru_child_storage.remove_by_storage_key(child_info.storage_key()); + deleted_child.insert(child_info.storage_key().to_vec()); + }, } ); for (k, v) in changes.into_iter() { @@ -397,11 +436,11 @@ impl CacheChanges { } modifications.insert(k); } - // Save modified storage. These are ordered by the block number in reverse. let block_changes = BlockChanges { storage: modifications, child_storage: child_modifications, + deleted_child, number: *number, hash: hash.clone(), is_canon: is_best, @@ -438,6 +477,7 @@ impl>, B: BlockT> CachingState { storage: Default::default(), hashes: Default::default(), child_storage: Default::default(), + deleted_child: Default::default(), }), parent_hash, }, @@ -631,12 +671,13 @@ impl>, B: BlockT> StateBackend> for Cachin fn child_storage_root( &self, child_info: &ChildInfo, + child_change: &ChildChange, delta: I, ) -> (B::Hash, bool, Self::Transaction) where I: IntoIterator, Option>)>, { - self.state.child_storage_root(child_info, delta) + self.state.child_storage_root(child_info, child_change, delta) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -816,12 +857,13 @@ impl>, B: BlockT> StateBackend> for Syncin fn child_storage_root( &self, child_info: &ChildInfo, + child_change: &ChildChange, delta: I, ) -> (B::Hash, bool, Self::Transaction) where I: IntoIterator, Option>)>, { - self.caching_state().child_storage_root(child_info, delta) + self.caching_state().child_storage_root(child_info, child_change, delta) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -1174,6 +1216,7 @@ mod tests { #[test] fn should_track_used_size_correctly() { + let child_info1 = ChildInfo::new_default(b"unique_id_1"); let root_parent = H256::random(); let shared = new_shared_cache::(109, ((109-36), 109)); let h0 = H256::random(); @@ -1183,7 +1226,6 @@ mod tests { ); let key = H256::random()[..].to_vec(); - let s_key = H256::random()[..].to_vec(); s.cache.sync_cache( &[], &[], @@ -1201,13 +1243,13 @@ mod tests { &[], &[], vec![], - vec![(s_key.clone(), vec![(key.clone(), Some(vec![1, 2]))])], + vec![(child_info1, Default::default(), vec![(key.clone(), Some(vec![1, 2]))])], Some(h0), Some(0), true, ); - // 35 + (2 * 32) key, 2 byte size - assert_eq!(shared.lock().used_storage_cache_size(), 101 /* bytes */); + // 35 + 32 key + prefixed_key, 2 byte size + assert_eq!(shared.lock().used_storage_cache_size(), 80 /* bytes */); } #[test] diff --git a/client/db/src/subdb.rs b/client/db/src/subdb.rs index 2e436aa2c92c8..d58216bea8da3 100644 --- a/client/db/src/subdb.rs +++ b/client/db/src/subdb.rs @@ -17,7 +17,9 @@ /// A `Database` adapter for subdb. use sp_database::{self, ColumnId}; +use sp_database::{DatabaseRef, StateCursor}; use parking_lot::RwLock; +use parking_lot::Mutex; use blake2_rfc::blake2b::blake2b; use codec::Encode; use subdb::{Database, KeyType}; @@ -25,15 +27,20 @@ use subdb::{Database, KeyType}; /// A database hidden behind an RwLock, so that it implements Send + Sync. /// /// Construct by creating a `Database` and then using `.into()`. -pub struct DbAdapter(RwLock>); +pub struct DbAdapter( + RwLock>, + StateCursor, BatchChildDelete>, +); -/// Wrap RocksDb database into a trait object that implements `sp_database::Database` -pub fn open( +/// Wrap SubDb database into a trait object that implements `sp_database::Database` +pub fn open>( path: &std::path::Path, _num_columns: u32, + cursor: C, ) -> Result>, subdb::Error> { let db = subdb::Options::from_path(path.into()).open()?; - Ok(std::sync::Arc::new(DbAdapter(RwLock::new(db)))) + let cursor = Mutex::new(Box::new(cursor)); + Ok(std::sync::Arc::new(DbAdapter(RwLock::new(db)), cursor)) } impl sp_database::Database for DbAdapter { @@ -84,4 +91,16 @@ impl sp_database::Database for DbAdapter { fn release(&self, hash: &H) { let _ = self.0.write().remove(hash); } + + fn delete_child(&self, col: ColumnId, child: ChildBatchRemove) { + self.1(&self, col, child, &mut (), |db, col, key, _state| { + db.remove(col, key.as_slice()) + }); + } +} + +impl sp_database::DatabaseRef for DbAdapter { + fn get(&self, col: ColumnId, key: &[u8]) -> Option> { + >::get(col, key) + } } diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index 95592d071f777..9dfc88b9c7d93 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -44,7 +44,6 @@ pub fn upgrade_db(db_path: &Path, _db_type: DatabaseType) -> sp_b update_version(db_path) } - /// Reads current database version from the file at given path. /// If the file does not exist returns 0. fn current_version(path: &Path) -> sp_blockchain::Result { diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index 9506dc4e7fab0..c2bac330cad88 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -249,12 +249,13 @@ pub fn open_database( }, #[cfg(feature = "subdb")] DatabaseSettingsSrc::SubDb { path } => { - crate::subdb::open(&path, NUM_COLUMNS) + crate::subdb::open(&path, NUM_COLUMNS, crate::child_trie_cursor) .map_err(|e| sp_blockchain::Error::Backend(format!("{:?}", e)))? }, #[cfg(feature = "parity-db")] DatabaseSettingsSrc::ParityDb { path } => { - crate::parity_db::open(&path, NUM_COLUMNS) + let cursor = crate::child_trie_cursor::<_, sp_runtime::traits::HashFor, _>; + crate::parity_db::open(&path, NUM_COLUMNS, cursor) .map_err(|e| sp_blockchain::Error::Backend(format!("{:?}", e)))? }, DatabaseSettingsSrc::Custom(db) => db.clone(), diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index d7d1d1e48d3a0..e767a1bc3a006 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -231,7 +231,7 @@ impl AuthoritySetForFinalityProver for TestApi { fn prove_authorities(&self, block: &BlockId) -> Result { let authorities = self.authorities(block)?; let backend = >>::from(vec![ - (None, vec![(b"authorities".to_vec(), Some(authorities.encode()))]) + (None, Default::default(), vec![(b"authorities".to_vec(), Some(authorities.encode()))]) ]); let proof = prove_read(backend, vec![b"authorities"]) .expect("failure proving read from in-memory storage backend"); diff --git a/client/src/cht.rs b/client/src/cht.rs index de67280632302..9f2a32a6ff29f 100644 --- a/client/src/cht.rs +++ b/client/src/cht.rs @@ -23,11 +23,10 @@ //! root has. A correct proof implies that the claimed block is identical to the one //! we discarded. -use hash_db; use codec::Encode; use sp_trie; -use sp_core::{H256, convert_hash}; +use sp_core::{H256, convert_hash, storage::ChildChange}; use sp_runtime::traits::{Header as HeaderT, AtLeast32Bit, Zero, One}; use sp_state_machine::{ MemoryDB, TrieBackend, Backend as StateBackend, StorageProof, InMemoryBackend, @@ -86,7 +85,7 @@ pub fn compute_root( ) -> ClientResult where Header: HeaderT, - Hasher: hash_db::Hasher, + Hasher: sp_core::Hasher, Hasher::Out: Ord, I: IntoIterator>>, { @@ -105,7 +104,7 @@ pub fn build_proof( ) -> ClientResult where Header: HeaderT, - Hasher: hash_db::Hasher, + Hasher: sp_core::Hasher, Hasher::Out: Ord + codec::Codec, BlocksI: IntoIterator, HashesI: IntoIterator>>, @@ -114,7 +113,8 @@ pub fn build_proof( .into_iter() .map(|(k, v)| (k, Some(v))) .collect::>(); - let mut storage = InMemoryBackend::::default().update(vec![(None, transaction)]); + let mut storage = InMemoryBackend::::default() + .update(vec![(None, ChildChange::Update, transaction)]); let trie_storage = storage.as_trie_backend() .expect("InMemoryState::as_trie_backend always returns Some; qed"); prove_read_on_trie_backend( @@ -132,7 +132,7 @@ pub fn check_proof( ) -> ClientResult<()> where Header: HeaderT, - Hasher: hash_db::Hasher, + Hasher: sp_core::Hasher, Hasher::Out: Ord + codec::Codec, { do_check_proof::( @@ -161,7 +161,7 @@ pub fn check_proof_on_proving_backend( ) -> ClientResult<()> where Header: HeaderT, - Hasher: hash_db::Hasher, + Hasher: sp_core::Hasher, Hasher::Out: Ord + codec::Codec, { do_check_proof::( @@ -185,7 +185,7 @@ fn do_check_proof( ) -> ClientResult<()> where Header: HeaderT, - Hasher: hash_db::Hasher, + Hasher: sp_core::Hasher, Hasher::Out: Ord, F: FnOnce(Hasher::Out, &[u8]) -> ClientResult>>, { diff --git a/client/src/client.rs b/client/src/client.rs index 2a8040febf3ef..adb15ef7752f9 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -398,7 +398,11 @@ impl Client where self.storage.with_cached_changed_keys(root, functor) } - fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { + fn get( + &self, + key: &Block::Hash, + prefix: Prefix, + ) -> Result, String> { self.storage.get(key, prefix) } } @@ -962,7 +966,9 @@ impl Client where .trigger( ¬ify_import.hash, storage_changes.0.into_iter(), - storage_changes.1.into_iter().map(|(sk, v)| (sk, v.into_iter())), + storage_changes.1.into_iter().map(|(child_info, child_change, child_values)| ( + child_info.storage_key().to_vec(), child_change, child_values.into_iter(), + )), ); } diff --git a/client/src/in_mem.rs b/client/src/in_mem.rs index 20b227e790f6e..e48d30896561c 100644 --- a/client/src/in_mem.rs +++ b/client/src/in_mem.rs @@ -517,12 +517,16 @@ impl backend::BlockImportOperation for BlockImportOperatio check_genesis_storage(&storage)?; let child_delta = storage.children_default.into_iter() - .map(|(_storage_key, child_content)| - (child_content.child_info, child_content.data.into_iter().map(|(k, v)| (k, Some(v))))); + .map(|(_storage_key, child_content)|( + child_content.child_info, + child_content.child_change, + child_content.data.into_iter().map(|(k, v)| (k, Some(v))), + )); - let (root, transaction) = self.old_state.full_storage_root( + let (root, transaction, _) = self.old_state.full_storage_root( storage.top.into_iter().map(|(k, v)| (k, Some(v))), - child_delta + child_delta, + false, ); self.new_state = Some(InMemoryBackend::from(transaction)); diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index 01e9854864062..bf27d44e32523 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -24,7 +24,7 @@ use parking_lot::RwLock; use codec::{Decode, Encode}; use sp_core::ChangesTrieConfiguration; -use sp_core::storage::{well_known_keys, ChildInfo}; +use sp_core::storage::{well_known_keys, ChildInfo, ChildChange}; use sp_core::offchain::storage::InMemOffchainStorage; use sp_state_machine::{ Backend as StateBackend, TrieBackend, InMemoryBackend, ChangesTrieTransaction, @@ -46,7 +46,7 @@ use sc_client_api::{ UsageInfo, }; use crate::light::blockchain::Blockchain; -use hash_db::Hasher; +use sp_core::Hasher; const IN_MEMORY_EXPECT_PROOF: &str = "InMemory state backend has Void error type and always succeeds; qed"; @@ -313,20 +313,23 @@ impl BlockImportOperation for ImportOperation // this is only called when genesis block is imported => shouldn't be performance bottleneck let mut storage: HashMap, _> = HashMap::new(); - storage.insert(None, input.top); + storage.insert(None, (ChildChange::Update, input.top)); // create a list of children keys to re-compute roots for let child_delta = input.children_default.iter() - .map(|(_storage_key, storage_child)| (storage_child.child_info.clone(), None)) - .collect::>(); + .map(|(_storage_key, storage_child)| ( + storage_child.child_info.clone(), + storage_child.child_change.clone(), + None, + )).collect::>(); // make sure to persist the child storage for (_child_key, storage_child) in input.children_default { - storage.insert(Some(storage_child.child_info), storage_child.data); + storage.insert(Some(storage_child.child_info), (storage_child.child_change, storage_child.data)); } let storage_update = InMemoryBackend::from(storage); - let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta); + let (storage_root, _, _) = storage_update.full_storage_root(std::iter::empty(), child_delta, false); self.storage_update = Some(storage_update); Ok(storage_root) @@ -471,6 +474,7 @@ impl StateBackend for GenesisOrUnavailableState fn child_storage_root( &self, child_info: &ChildInfo, + child_change: &ChildChange, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -478,7 +482,7 @@ impl StateBackend for GenesisOrUnavailableState { match *self { GenesisOrUnavailableState::Genesis(ref state) => { - let (root, is_equal, _) = state.child_storage_root(child_info, delta); + let (root, is_equal, _) = state.child_storage_root(child_info, child_change, delta); (root, is_equal, Default::default()) }, GenesisOrUnavailableState::Unavailable => diff --git a/client/src/light/call_executor.rs b/client/src/light/call_executor.rs index b439a268d2fe1..7222bd44555da 100644 --- a/client/src/light/call_executor.rs +++ b/client/src/light/call_executor.rs @@ -30,7 +30,7 @@ use sp_state_machine::{ self, Backend as StateBackend, OverlayedChanges, ExecutionStrategy, create_proof_check_backend, execution_proof_check_on_trie_backend, ExecutionManager, StorageProof, CloneableSpawn, }; -use hash_db::Hasher; +use sp_core::Hasher; use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index ef6a062cf3c07..702dd33c6c399 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -20,7 +20,8 @@ use std::sync::Arc; use std::collections::{BTreeMap, HashMap}; use std::marker::PhantomData; -use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; +use hash_db::{HashDB, EMPTY_PREFIX}; +use sp_core::Hasher; use codec::{Decode, Encode}; use sp_core::{convert_hash, traits::CodeExecutor}; use sp_core::storage::{ChildInfo, ChildType}; diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 49b1a59285e11..f7809be4e1e55 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -114,6 +114,15 @@ impl fmt::Debug for Error { } } +/// A set of key value data changes. +#[derive(Default, Debug, Clone)] +pub struct MetaChangeSet { + /// Inserted values. + pub inserted: Vec<(Vec, DBValue)>, + /// Deleted values. + pub deleted: Vec>, +} + /// A set of state node changes. #[derive(Default, Debug, Clone)] pub struct ChangeSet { @@ -121,6 +130,9 @@ pub struct ChangeSet { pub inserted: Vec<(H, DBValue)>, /// Deleted nodes. pub deleted: Vec, + /// Bulk deletion of child trie, contains + /// its unique identifier and its encoded root. + pub deleted_child: Vec<(Vec, Vec)>, } /// A set of changes to the backing database. @@ -129,7 +141,7 @@ pub struct CommitSet { /// State node changes. pub data: ChangeSet, /// Metadata changes. - pub meta: ChangeSet>, + pub meta: MetaChangeSet, } /// Pruning constraints. If none are specified pruning is @@ -246,7 +258,7 @@ impl StateDbSync, ) -> Result, Error> { - let mut meta = ChangeSet::default(); + let mut meta = MetaChangeSet::default(); if number == 0 { // Save pruning mode when writing first block. meta.inserted.push((to_meta_key(PRUNING_MODE, &()), self.mode.id().into())); @@ -283,6 +295,7 @@ impl StateDbSync { if self.mode == PruningMode::ArchiveCanonical { commit.data.deleted.clear(); + commit.data.deleted_child.clear(); } } Err(e) => return Err(e), diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 6a34523b66fff..60eb80c5783cb 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -27,6 +27,8 @@ use codec::{Encode, Decode}; use log::trace; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; +// version at start to avoid collision on v10 +const NON_CANONICAL_JOURNAL_V1: &[u8] = b"v1_non_canonical_journal"; const LAST_CANONICAL: &[u8] = b"last_canonical"; /// See module documentation. @@ -44,17 +46,42 @@ pub struct NonCanonicalOverlay { } #[derive(Encode, Decode)] -struct JournalRecord { +struct JournalRecordCompat { hash: BlockHash, parent_hash: BlockHash, inserted: Vec<(Key, DBValue)>, deleted: Vec, } -fn to_journal_key(block: u64, index: u64) -> Vec { +#[derive(Encode, Decode)] +struct JournalRecordV1 { + hash: BlockHash, + parent_hash: BlockHash, + inserted: Vec<(Key, DBValue)>, + deleted: Vec, + deleted_child: Vec<(Vec, Vec)>, +} + +impl From> for JournalRecordV1 { + fn from(old: JournalRecordCompat) -> Self { + JournalRecordV1 { + hash: old.hash, + parent_hash: old.parent_hash, + inserted: old.inserted, + deleted: old.deleted, + deleted_child: Vec::new(), + } + } +} + +fn to_old_journal_key(block: u64, index: u64) -> Vec { to_meta_key(NON_CANONICAL_JOURNAL, &(block, index)) } +fn to_journal_key_v1(block: u64, index: u64) -> Vec { + to_meta_key(NON_CANONICAL_JOURNAL_V1, &(block, index)) +} + #[cfg_attr(test, derive(PartialEq, Debug))] #[derive(parity_util_mem_derive::MallocSizeOf)] struct BlockOverlay { @@ -62,6 +89,7 @@ struct BlockOverlay { journal_key: Vec, inserted: Vec, deleted: Vec, + deleted_child: Vec<(Vec, Vec)>, } fn insert_values(values: &mut HashMap, inserted: Vec<(Key, DBValue)>) { @@ -146,26 +174,35 @@ impl NonCanonicalOverlay { let mut index: u64 = 0; let mut level = Vec::new(); loop { - let journal_key = to_journal_key(block, index); - match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { - Some(record) => { - let record: JournalRecord = Decode::decode(&mut record.as_slice())?; - let inserted = record.inserted.iter().map(|(k, _)| k.clone()).collect(); - let overlay = BlockOverlay { - hash: record.hash.clone(), - journal_key, - inserted: inserted, - deleted: record.deleted, - }; - insert_values(&mut values, record.inserted); - trace!(target: "state-db", "Uncanonicalized journal entry {}.{} ({} inserted, {} deleted)", block, index, overlay.inserted.len(), overlay.deleted.len()); - level.push(overlay); - parents.insert(record.hash, record.parent_hash); - index += 1; - total += 1; + + let journal_key = to_journal_key_v1(block, index); + let record: JournalRecordV1 = match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { + Some(record) => Decode::decode(&mut record.as_slice())?, + None => { + let journal_key = to_old_journal_key(block, index); + match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { + Some(record) => { + let record: JournalRecordCompat = Decode::decode(&mut record.as_slice())?; + record.into() + }, + None => break, + } }, - None => break, - } + }; + let inserted = record.inserted.iter().map(|(k, _)| k.clone()).collect(); + let overlay = BlockOverlay { + hash: record.hash.clone(), + journal_key, + inserted: inserted, + deleted: record.deleted, + deleted_child: record.deleted_child, + }; + insert_values(&mut values, record.inserted); + trace!(target: "state-db", "Uncanonicalized journal entry {}.{} ({} inserted, {} deleted)", block, index, overlay.inserted.len(), overlay.deleted.len()); + level.push(overlay); + parents.insert(record.hash, record.parent_hash); + index += 1; + total += 1; } if level.is_empty() { break; @@ -223,7 +260,7 @@ impl NonCanonicalOverlay { }; let index = level.len() as u64; - let journal_key = to_journal_key(number, index); + let journal_key = to_journal_key_v1(number, index); let inserted = changeset.inserted.iter().map(|(k, _)| k.clone()).collect(); let overlay = BlockOverlay { @@ -231,14 +268,16 @@ impl NonCanonicalOverlay { journal_key: journal_key.clone(), inserted: inserted, deleted: changeset.deleted.clone(), + deleted_child: changeset.deleted_child.clone(), }; level.push(overlay); self.parents.insert(hash.clone(), parent_hash.clone()); - let journal_record = JournalRecord { + let journal_record = JournalRecordV1 { hash: hash.clone(), parent_hash: parent_hash.clone(), inserted: changeset.inserted, deleted: changeset.deleted, + deleted_child: changeset.deleted_child, }; commit.meta.inserted.push((journal_key, journal_record.encode())); trace!(target: "state-db", "Inserted uncanonicalized changeset {}.{} ({} inserted, {} deleted)", number, index, journal_record.inserted.len(), journal_record.deleted.len()); @@ -324,6 +363,7 @@ impl NonCanonicalOverlay { commit.data.inserted.extend(overlay.inserted.iter() .map(|k| (k.clone(), self.values.get(k).expect("For each key in overlays there's a value in values").1.clone()))); commit.data.deleted.extend(overlay.deleted.clone()); + commit.data.deleted_child.extend(overlay.deleted_child.clone()); commit.meta.deleted.append(&mut discarded_journals); let canonicalized = (hash.clone(), self.front_block_number() + self.pending_canonicalizations.len() as u64); @@ -475,7 +515,7 @@ impl NonCanonicalOverlay { mod tests { use std::io; use sp_core::H256; - use super::{NonCanonicalOverlay, to_journal_key}; + use super::{NonCanonicalOverlay, to_journal_key_v1}; use crate::{ChangeSet, CommitSet}; use crate::test::{make_db, make_changeset}; @@ -754,11 +794,11 @@ mod tests { assert!(contains(&overlay, 111)); assert!(!contains(&overlay, 211)); // check that journals are deleted - assert!(db.get_meta(&to_journal_key(1, 0)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key(1, 1)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key(2, 1)).unwrap().is_some()); - assert!(db.get_meta(&to_journal_key(2, 2)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key(2, 3)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key_v1(1, 0)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key_v1(1, 1)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key_v1(2, 1)).unwrap().is_some()); + assert!(db.get_meta(&to_journal_key_v1(2, 2)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key_v1(2, 3)).unwrap().is_none()); // canonicalize 1_2. 1_1 and all its children should be discarded let mut commit = CommitSet::default(); diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index 6cf5f260060f5..fa3b0bdabd208 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -28,7 +28,8 @@ use crate::{CommitSet, Error, MetaDb, to_meta_key, Hash}; use log::{trace, warn}; const LAST_PRUNED: &[u8] = b"last_pruned"; -const PRUNING_JOURNAL: &[u8] = b"pruning_journal"; +const OLD_PRUNING_JOURNAL: &[u8] = b"pruning_journal"; +const PRUNING_JOURNAL_V1: &[u8] = b"v1_pruning_journal"; /// See module documentation. #[derive(parity_util_mem_derive::MallocSizeOf)] @@ -52,17 +53,41 @@ struct DeathRow { hash: BlockHash, journal_key: Vec, deleted: HashSet, + deleted_child: HashSet<(Vec, Vec)>, } #[derive(Encode, Decode)] -struct JournalRecord { +struct JournalRecordCompat { hash: BlockHash, inserted: Vec, deleted: Vec, } -fn to_journal_key(block: u64) -> Vec { - to_meta_key(PRUNING_JOURNAL, &block) +#[derive(Encode, Decode)] +struct JournalRecordV1 { + hash: BlockHash, + inserted: Vec, + deleted: Vec, + deleted_child: Vec<(Vec, Vec)>, +} + +fn to_old_journal_key(block: u64) -> Vec { + to_meta_key(OLD_PRUNING_JOURNAL, &block) +} + +fn to_journal_key_v1(block: u64) -> Vec { + to_meta_key(PRUNING_JOURNAL_V1, &block) +} + +impl From> for JournalRecordV1 { + fn from(old: JournalRecordCompat) -> Self { + JournalRecordV1 { + hash: old.hash, + inserted: old.inserted, + deleted: old.deleted, + deleted_child: Vec::new(), + } + } } impl RefWindow { @@ -84,21 +109,33 @@ impl RefWindow { // read the journal trace!(target: "state-db", "Reading pruning journal. Pending #{}", pending_number); loop { - let journal_key = to_journal_key(block); - match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { - Some(record) => { - let record: JournalRecord = Decode::decode(&mut record.as_slice())?; - trace!(target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", block, record.inserted.len(), record.deleted.len()); - pruning.import(&record.hash, journal_key, record.inserted.into_iter(), record.deleted); + let journal_key = to_journal_key_v1(block); + let record: JournalRecordV1 = match db.get_meta(&journal_key) + .map_err(|e| Error::Db(e))? { + Some(record) => Decode::decode(&mut record.as_slice())?, + None => { + let journal_key = to_old_journal_key(block); + match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { + Some(record) => JournalRecordCompat::decode(&mut record.as_slice())?.into(), + None => break, + } }, - None => break, - } + }; + trace!(target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", block, record.inserted.len(), record.deleted.len()); + pruning.import(&record.hash, journal_key, record.inserted.into_iter(), record.deleted, record.deleted_child); block += 1; } Ok(pruning) } - fn import>(&mut self, hash: &BlockHash, journal_key: Vec, inserted: I, deleted: Vec) { + fn import>( + &mut self, + hash: &BlockHash, + journal_key: Vec, + inserted: I, + deleted: Vec, + deleted_child: Vec<(Vec, Vec)>, + ) { // remove all re-inserted keys from death rows for k in inserted { if let Some(block) = self.death_index.remove(&k) { @@ -115,6 +152,7 @@ impl RefWindow { DeathRow { hash: hash.clone(), deleted: deleted.into_iter().collect(), + deleted_child: deleted_child.into_iter().collect(), journal_key: journal_key, } ); @@ -146,6 +184,7 @@ impl RefWindow { trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); let index = self.pending_number + self.pending_prunings as u64; commit.data.deleted.extend(pruned.deleted.iter().cloned()); + commit.data.deleted_child.extend(pruned.deleted_child.iter().cloned()); commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), index.encode())); commit.meta.deleted.push(pruned.journal_key.clone()); self.pending_prunings += 1; @@ -159,15 +198,17 @@ impl RefWindow { trace!(target: "state-db", "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), commit.data.deleted.len()); let inserted = commit.data.inserted.iter().map(|(k, _)| k.clone()).collect(); let deleted = ::std::mem::replace(&mut commit.data.deleted, Vec::new()); - let journal_record = JournalRecord { + let deleted_child = ::std::mem::replace(&mut commit.data.deleted_child, Vec::new()); + let journal_record = JournalRecordV1 { hash: hash.clone(), inserted, deleted, + deleted_child, }; let block = self.pending_number + self.death_rows.len() as u64; - let journal_key = to_journal_key(block); + let journal_key = to_journal_key_v1(block); commit.meta.inserted.push((journal_key.clone(), journal_record.encode())); - self.import(&journal_record.hash, journal_key, journal_record.inserted.into_iter(), journal_record.deleted); + self.import(&journal_record.hash, journal_key, journal_record.inserted.into_iter(), journal_record.deleted, journal_record.deleted_child); self.pending_canonicalizations += 1; } diff --git a/client/state-db/src/test.rs b/client/state-db/src/test.rs index accafa9bf831f..3d92ccc252e7d 100644 --- a/client/state-db/src/test.rs +++ b/client/state-db/src/test.rs @@ -18,7 +18,7 @@ use std::collections::HashMap; use sp_core::H256; -use crate::{DBValue, ChangeSet, CommitSet, MetaDb, NodeDb}; +use crate::{DBValue, ChangeSet, MetaChangeSet, CommitSet, MetaDb, NodeDb}; #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct TestDb { @@ -70,13 +70,14 @@ pub fn make_changeset(inserted: &[u64], deleted: &[u64]) -> ChangeSet { }) .collect(), deleted: deleted.iter().map(|v| H256::from_low_u64_be(*v)).collect(), + deleted_child: Default::default(), } } pub fn make_commit(inserted: &[u64], deleted: &[u64]) -> CommitSet { CommitSet { data: make_changeset(inserted, deleted), - meta: ChangeSet::default(), + meta: MetaChangeSet::default(), } } diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index aae853d2ff996..14c9ead7e6c22 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -17,7 +17,7 @@ //! Auxiliaries to help with managing partial changes to accounts state. use super::{ - AliveContractInfo, BalanceOf, CodeHash, ContractInfo, ContractInfoOf, Trait, TrieId, + AliveContractInfo, BalanceOf, CodeHash, ContractInfo, ContractInfoOf, Trait, TrieIdGenerator, }; use crate::exec::StorageKey; @@ -27,7 +27,7 @@ use sp_std::prelude::*; use sp_io::hashing::blake2_256; use sp_runtime::traits::{Bounded, Zero}; use frame_support::traits::{Currency, Get, Imbalance, SignedImbalance}; -use frame_support::{storage::child, StorageMap}; +use frame_support::{storage::child, StorageMap, storage::child::ChildInfo}; use frame_system; // Note: we don't provide Option because we can't create @@ -108,7 +108,12 @@ pub trait AccountDb { /// /// Trie id is None iff account doesn't have an associated trie id in >. /// Because DirectAccountDb bypass the lookup for this association. - fn get_storage(&self, account: &T::AccountId, trie_id: Option<&TrieId>, location: &StorageKey) -> Option>; + fn get_storage( + &self, + account: &T::AccountId, + trie_id: Option<&ChildInfo>, + location: &StorageKey + ) -> Option>; /// If account has an alive contract then return the code hash associated. fn get_code_hash(&self, account: &T::AccountId) -> Option>; /// If account has an alive contract then return the rent allowance associated. @@ -125,10 +130,10 @@ impl AccountDb for DirectAccountDb { fn get_storage( &self, _account: &T::AccountId, - trie_id: Option<&TrieId>, + trie_id: Option<&ChildInfo>, location: &StorageKey ) -> Option> { - trie_id.and_then(|id| child::get_raw(&crate::child_trie_info(&id[..]), &blake2_256(location))) + trie_id.and_then(|child_info| child::get_raw(child_info, &blake2_256(location))) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { >::get(account).and_then(|i| i.as_alive().map(|i| i.code_hash)) @@ -210,18 +215,19 @@ impl AccountDb for DirectAccountDb { new_info.last_write = Some(>::block_number()); } + let child_info = &new_info.child_trie_info(); for (k, v) in changed.storage.into_iter() { if let Some(value) = child::get_raw( - &new_info.child_trie_info(), + child_info, &blake2_256(&k), ) { new_info.storage_size -= value.len() as u32; } if let Some(value) = v { new_info.storage_size += value.len() as u32; - child::put_raw(&new_info.child_trie_info(), &blake2_256(&k), &value[..]); + child::put_raw(child_info, &blake2_256(&k), &value[..]); } else { - child::kill(&new_info.child_trie_info(), &blake2_256(&k)); + child::kill(child_info, &blake2_256(&k)); } } @@ -326,7 +332,7 @@ impl<'a, T: Trait> AccountDb for OverlayAccountDb<'a, T> { fn get_storage( &self, account: &T::AccountId, - trie_id: Option<&TrieId>, + trie_id: Option<&ChildInfo>, location: &StorageKey ) -> Option> { self.local diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 402622331d0ec..d8b42b2f9ecae 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -26,6 +26,7 @@ use frame_support::{ storage::unhashed, dispatch::DispatchError, traits::{WithdrawReason, Currency, Time, Randomness}, }; +use sp_core::storage::ChildInfo; pub type AccountIdOf = ::AccountId; pub type CallOf = ::Call; @@ -291,7 +292,7 @@ pub enum DeferredAction { pub struct ExecutionContext<'a, T: Trait + 'a, V, L> { pub caller: Option<&'a ExecutionContext<'a, T, V, L>>, pub self_account: T::AccountId, - pub self_trie_id: Option, + pub self_trie_info: Option, pub overlay: OverlayAccountDb<'a, T>, pub depth: usize, pub deferred: Vec>, @@ -315,7 +316,7 @@ where pub fn top_level(origin: T::AccountId, cfg: &'a Config, vm: &'a V, loader: &'a L) -> Self { ExecutionContext { caller: None, - self_trie_id: None, + self_trie_info: None, self_account: origin, overlay: OverlayAccountDb::::new(&DirectAccountDb), depth: 0, @@ -328,12 +329,12 @@ where } } - fn nested<'b, 'c: 'b>(&'c self, dest: T::AccountId, trie_id: Option) + fn nested<'b, 'c: 'b>(&'c self, dest: T::AccountId, trie_info: Option) -> ExecutionContext<'b, T, V, L> { ExecutionContext { caller: Some(self), - self_trie_id: trie_id, + self_trie_info: trie_info, self_account: dest, overlay: OverlayAccountDb::new(&self.overlay), depth: self.depth + 1, @@ -575,7 +576,9 @@ where where F: FnOnce(&mut ExecutionContext) -> ExecResult { let (output, change_set, deferred) = { - let mut nested = self.nested(dest, trie_id); + let mut nested = self.nested(dest, trie_id.map(|trie_id| { + crate::child_trie_info(&trie_id) + })); let output = func(&mut nested)?; (output, nested.overlay.into_change_set(), nested.deferred) }; @@ -735,7 +738,12 @@ where type T = T; fn get_storage(&self, key: &StorageKey) -> Option> { - self.ctx.overlay.get_storage(&self.ctx.self_account, self.ctx.self_trie_id.as_ref(), key) + let trie_id = self.ctx.self_trie_info.as_ref(); + self.ctx.overlay.get_storage( + &self.ctx.self_account, + trie_id, + key, + ) } fn set_storage(&mut self, key: StorageKey, value: Option>) -> Result<(), &'static str> { diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 2513f2fb618e2..99f42f5752238 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -693,10 +693,11 @@ impl Module { .get_alive() .ok_or(ContractAccessError::IsTombstone)?; + let child_info = child_trie_info(&contract_info.trie_id); let maybe_value = AccountDb::::get_storage( &DirectAccountDb, &address, - Some(&contract_info.trie_id), + Some(&child_info), &key, ); Ok(maybe_value) diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 452a4517dbe68..bd1c23b7ff44f 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -312,6 +312,10 @@ fn account_removal_does_not_remove_storage() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let trie_id1 = ::TrieIdGenerator::trie_id(&1); let trie_id2 = ::TrieIdGenerator::trie_id(&2); + let child_info1 = crate::child_trie_info(trie_id1.as_ref()); + let child_info2 = crate::child_trie_info(trie_id2.as_ref()); + let child_info1 = Some(&child_info1); + let child_info2 = Some(&child_info2); let key1 = &[1; 32]; let key2 = &[2; 32]; @@ -360,20 +364,20 @@ fn account_removal_does_not_remove_storage() { // Verify that no entries are removed. { assert_eq!( - >::get_storage(&DirectAccountDb, &1, Some(&trie_id1), key1), + >::get_storage(&DirectAccountDb, &1, child_info1, key1), Some(b"1".to_vec()) ); assert_eq!( - >::get_storage(&DirectAccountDb, &1, Some(&trie_id1), key2), + >::get_storage(&DirectAccountDb, &1, child_info1, key2), Some(b"2".to_vec()) ); assert_eq!( - >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), key1), + >::get_storage(&DirectAccountDb, &2, child_info2, key1), Some(b"3".to_vec()) ); assert_eq!( - >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), key2), + >::get_storage(&DirectAccountDb, &2, child_info2, key2), Some(b"4".to_vec()) ); } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index a3fc15ba7e249..271899004d8bc 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -43,7 +43,7 @@ pub use sp_state_machine::{ pub use sp_core::NativeOrEncoded; #[doc(hidden)] #[cfg(feature = "std")] -pub use hash_db::Hasher; +pub use sp_state_machine::{Hasher, InnerHasher}; #[doc(hidden)] #[cfg(not(feature = "std"))] pub use sp_core::to_substrate_wasm_fn_return_value; diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index 8d5ad7daaec83..18d25f1333e48 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -79,7 +79,8 @@ pub use changes_trie::{ChangesTrieConfiguration, ChangesTrieConfigurationRange}; #[cfg(feature = "full_crypto")] pub use crypto::{DeriveJunction, Pair, Public}; -pub use hash_db::Hasher; +pub use hash_db::Hasher as InnerHasher; +pub use hash_db::{Prefix, EMPTY_PREFIX}; #[cfg(feature = "std")] pub use self::hasher::blake2::Blake2Hasher; @@ -357,3 +358,33 @@ macro_rules! impl_maybe_marker { )+ } } + +/// Technical trait to avoid calculating empty root. +/// This assumes (same wrong asumption as for hashdb trait), +/// an empty node is `[0u8]`. +pub trait Hasher: InnerHasher { + /// Value for an empty root node, this + /// is the hash of `[0u8]` value. + const EMPTY_ROOT: &'static [u8]; +} + +#[cfg(feature = "std")] +impl Hasher for Blake2Hasher { + const EMPTY_ROOT: &'static [u8] = &[ + 3, 23, 10, 46, 117, 151, 183, 183, 227, 216, + 76, 5, 57, 29, 19, 154, 98, 177, 87, 231, + 135, 134, 216, 192, 130, 242, 157, 207, 76, 17, + 19, 20, + ]; +} + +#[cfg(test)] +mod test { + use super::{Blake2Hasher, Hasher, InnerHasher}; + + #[test] + fn empty_root_const() { + let empty = Blake2Hasher::hash(&[0u8]); + assert_eq!(Blake2Hasher::EMPTY_ROOT, empty.as_ref()); + } +} diff --git a/primitives/database/src/kvdb.rs b/primitives/database/src/kvdb.rs index 85a324b5c105f..a5a24821e83d1 100644 --- a/primitives/database/src/kvdb.rs +++ b/primitives/database/src/kvdb.rs @@ -43,6 +43,7 @@ impl Database for DbAdapter { match change { Change::Set(col, key, value) => tx.put_vec(col, &key, value), Change::Remove(col, key) => tx.delete(col, &key), + Change::DeleteChild(col, child) => tx.delete_prefix(col, child.keyspace.as_slice()), _ => unimplemented!(), } } diff --git a/primitives/database/src/lib.rs b/primitives/database/src/lib.rs index bd9bd2eb54c28..b2192479064cc 100644 --- a/primitives/database/src/lib.rs +++ b/primitives/database/src/lib.rs @@ -32,6 +32,7 @@ pub enum Change { Remove(ColumnId, Vec), Store(H, Vec), Release(H), + DeleteChild(ColumnId, ChildBatchRemove), } /// An alteration to the database that references the data. @@ -40,6 +41,7 @@ pub enum ChangeRef<'a, H> { Remove(ColumnId, &'a [u8]), Store(H, &'a [u8]), Release(H), + DeleteChild(ColumnId, ChildBatchRemove), } /// A series of changes to the database that can be committed atomically. They do not take effect @@ -47,6 +49,20 @@ pub enum ChangeRef<'a, H> { #[derive(Default, Clone)] pub struct Transaction(pub Vec>); +/// Removing child trie got different +/// implementation depending on database +/// capability. +#[derive(Clone)] +pub struct ChildBatchRemove { + /// For database without key iteration + /// we delete by parsing the whole trie. + pub encoded_root: Vec, + + /// Database that allows iteration can only + /// delete all key using this keyspace. + pub keyspace: Vec, +} + impl Transaction { /// Create a new transaction to be prepared and committed atomically. pub fn new() -> Self { @@ -76,11 +92,17 @@ impl Transaction { pub fn release(&mut self, hash: H) { self.0.push(Change::Release(hash)) } + /// Set the value of `key` in `col` to `value`, replacing anything that is there currently. + pub fn delete_child(&mut self, col: ColumnId, child: ChildBatchRemove) { + self.0.push(Change::DeleteChild(col, child)) + } } pub trait Database: Send + Sync { + /// Commit the `transaction` to the database atomically. Any further calls to `get` or `lookup` /// will reflect the new state. + /// A call back for manual child deletion is provided. fn commit(&self, transaction: Transaction) { for change in transaction.0.into_iter() { match change { @@ -88,6 +110,7 @@ pub trait Database: Send + Sync { Change::Remove(col, key) => self.remove(col, &key), Change::Store(hash, preimage) => self.store(&hash, &preimage), Change::Release(hash) => self.release(&hash), + Change::DeleteChild(col, child) => self.delete_child(col, child.clone()), } } } @@ -102,6 +125,7 @@ pub trait Database: Send + Sync { ChangeRef::Remove(col, key) => tx.remove(col, key), ChangeRef::Store(hash, preimage) => tx.store(hash, preimage), ChangeRef::Release(hash) => tx.release(hash), + ChangeRef::DeleteChild(col, child) => tx.delete_child(col, child.clone()), } } self.commit(tx); @@ -162,6 +186,13 @@ pub trait Database: Send + Sync { t.release(hash.clone()); self.commit(t); } + + /// Set the value of `key` in `col` to `value`, replacing anything that is there currently. + fn delete_child(&self, col: ColumnId, child: ChildBatchRemove) { + let mut t = Transaction::new(); + t.delete_child(col, child); + self.commit(t); + } } /// Call `f` with the value previously stored against `key` and return the result, or `None` if @@ -185,3 +216,28 @@ pub fn with_lookup(db: &dyn Database, hash: &H, mut f: impl FnMu db.with_lookup(hash, &mut adapter); result } + +/// To use with state cursor for querying only. +pub trait DatabaseRef: Send { + /// Retrieve the value previously stored against `key` or `None` if + /// `key` is not currently in the database. + fn get(&self, col: ColumnId, key: &[u8]) -> Option>; +} + +/// Iterate on key of a state. +pub type StateCursor = fn( + db: &DB, + col: ColumnId, + batch: ChildBatchRemove, + state: &mut S, + action: fn(db: &DB, col: ColumnId, key: &[u8], state: &mut S), +); + +/// Define context and clear previous usage state. +pub fn dummy_state_cursor( + _db: &DB, + _col: ColumnId, + _batch: ChildBatchRemove, + _state: &mut S, + _action: fn(db: &DB, col: ColumnId, key: &[u8], state: &mut S), +) { } diff --git a/primitives/database/src/mem.rs b/primitives/database/src/mem.rs index 09d6149bed174..c4a3198ee7e9d 100644 --- a/primitives/database/src/mem.rs +++ b/primitives/database/src/mem.rs @@ -17,32 +17,53 @@ //! In-memory implementation of `Database` use std::collections::HashMap; -use crate::{Database, Transaction, ColumnId, Change}; +use crate::{Database, DatabaseRef, Transaction, ColumnId, Change}; use parking_lot::RwLock; -#[derive(Default)] /// This implements `Database` as an in-memory hash map. `commit` is not atomic. -pub struct MemDb - (RwLock<(HashMap, Vec>>, HashMap>)>); +pub struct MemDb ( + InnerMemDb, + crate::StateCursor, ()>, +); + +type InnerMemDb = RwLock<(HashMap, Vec>>, HashMap>)>; + +impl Default for MemDb + where + H: Clone + Send + Sync + Eq + PartialEq + Default + std::hash::Hash, +{ + // Memdb is very unlikely to use its state cursor + // so its default implementation is using the dummy cursor. + fn default() -> Self { + MemDb( + InnerMemDb::default(), + crate::dummy_state_cursor, + ) + } +} impl Database for MemDb where H: Clone + Send + Sync + Eq + PartialEq + Default + std::hash::Hash { fn commit(&self, transaction: Transaction) { - let mut s = self.0.write(); + let s = || self.0.write(); for change in transaction.0.into_iter() { match change { - Change::Set(col, key, value) => { s.0.entry(col).or_default().insert(key, value); }, - Change::Remove(col, key) => { s.0.entry(col).or_default().remove(&key); }, - Change::Store(hash, preimage) => { s.1.insert(hash, preimage); }, - Change::Release(hash) => { s.1.remove(&hash); }, + Change::Set(col, key, value) => { s().0.entry(col).or_default().insert(key, value); }, + Change::Remove(col, key) => { s().0.entry(col).or_default().remove(&key); }, + Change::Store(hash, preimage) => { s().1.insert(hash, preimage); }, + Change::Release(hash) => { s().1.remove(&hash); }, + Change::DeleteChild(col, child) => { + self.1(&self, col, child, &mut (), |db, col, key, _state| { + db.0.write().0.entry(col).or_default().remove(key); + }); + }, } } } fn get(&self, col: ColumnId, key: &[u8]) -> Option> { - let s = self.0.read(); - s.0.get(&col).and_then(|c| c.get(key).cloned()) + ::get(self, col, key) } fn lookup(&self, hash: &H) -> Option> { @@ -55,8 +76,8 @@ impl MemDb where H: Clone + Send + Sync + Eq + PartialEq + Default + std::hash::Hash { /// Create a new instance - pub fn new() -> Self { - MemDb::default() + pub fn new(c: crate::StateCursor, ()>) -> Self { + MemDb(Default::default(), c) } /// Count number of values in a column @@ -66,3 +87,11 @@ impl MemDb } } +impl DatabaseRef for MemDb + where H: Clone + Send + Sync + Eq + PartialEq + Default + std::hash::Hash +{ + fn get(&self, col: ColumnId, key: &[u8]) -> Option> { + let s = self.0.read(); + s.0.get(&col).and_then(|c| c.get(key).cloned()) + } +} diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index a5b3e71edcde3..2fc4eee5e0f28 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -38,7 +38,7 @@ pub use paste; pub use sp_application_crypto as app_crypto; #[cfg(feature = "std")] -pub use sp_core::storage::{Storage, StorageChild}; +pub use sp_core::storage::{Storage, StorageChild, ChildUpdate, ChildChange}; use sp_std::prelude::*; use sp_std::convert::TryFrom; @@ -139,9 +139,20 @@ impl BuildStorage for sp_core::storage::Storage { for (k, other_map) in self.children_default.iter() { let k = k.clone(); if let Some(map) = storage.children_default.get_mut(&k) { - map.data.extend(other_map.data.iter().map(|(k, v)| (k.clone(), v.clone()))); - if !map.child_info.try_update(&other_map.child_info) { - return Err("Incompatible child info update".to_string()); + match map.child_info.try_update(&map.child_change, &other_map.child_info) { + ChildUpdate::Merge => { + match &other_map.child_change { + ChildChange::Update => map.data.extend( + other_map.data.iter().map(|(k, v)| (k.clone(), v.clone())) + ), + ChildChange::BulkDelete(encoded_root) => { + map.child_change = ChildChange::BulkDelete(encoded_root.clone()); + map.data.clear(); + }, + } + }, + ChildUpdate::Ignore => (), + ChildUpdate::Incompatible => return Err("Incompatible child info update".to_string()), } } else { storage.children_default.insert(k, other_map.clone()); diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index d843bdc478c49..0bc39baad6641 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -25,7 +25,7 @@ use std::fmt::Display; use std::str::FromStr; #[cfg(feature = "std")] use serde::{Serialize, Deserialize, de::DeserializeOwned}; -use sp_core::{self, Hasher, TypeId, RuntimeDebug}; +use sp_core::{self, InnerHasher, Hasher, TypeId, RuntimeDebug}; use crate::codec::{Codec, Encode, Decode}; use crate::transaction_validity::{ ValidTransaction, TransactionSource, TransactionValidity, TransactionValidityError, @@ -326,19 +326,20 @@ impl::Output> { +pub trait Hash: 'static + MaybeSerializeDeserialize + Debug + Clone + Eq + + PartialEq + InnerHasher::Output> + Hasher { /// The hash type produced. type Output: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + AsRef<[u8]> + AsMut<[u8]> + Copy + Default + Encode + Decode; /// Produce the hash of some byte-slice. fn hash(s: &[u8]) -> Self::Output { - ::hash(s) + ::hash(s) } /// Produce the hash of some codec-encodable value. fn hash_of(s: &S) -> Self::Output { - Encode::using_encoded(s, ::hash) + Encode::using_encoded(s, ::hash) } /// The ordered Patricia tree root of the given `input`. @@ -353,7 +354,7 @@ pub trait Hash: 'static + MaybeSerializeDeserialize + Debug + Clone + Eq + Parti #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct BlakeTwo256; -impl Hasher for BlakeTwo256 { +impl InnerHasher for BlakeTwo256 { type Out = sp_core::H256; type StdHasher = hash256_std_hasher::Hash256StdHasher; const LENGTH: usize = 32; @@ -375,6 +376,15 @@ impl Hash for BlakeTwo256 { } } +impl Hasher for BlakeTwo256 { + const EMPTY_ROOT: &'static [u8] = &[ + 3, 23, 10, 46, 117, 151, 183, 183, 227, 216, + 76, 5, 57, 29, 19, 154, 98, 177, 87, 231, + 135, 134, 216, 192, 130, 242, 157, 207, 76, 17, + 19, 20, + ]; +} + /// Something that can be checked for equality and printed out to a debug channel if bad. pub trait CheckEqual { /// Perform the equality check. @@ -1416,4 +1426,10 @@ mod tests { assert!(signature.verify(msg, &pair.public())); assert!(signature.verify(msg, &pair.public())); } + + #[test] + fn empty_root_const() { + let empty = ::hash(&[0u8]); + assert_eq!(BlakeTwo256::EMPTY_ROOT, empty.as_ref()); + } } diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index df8f810ceb7ce..a2fb1eb76f042 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -17,12 +17,12 @@ //! State machine backends. These manage the code and storage of contracts. use log::warn; -use hash_db::Hasher; +use sp_core::{Hasher, InnerHasher}; use codec::{Decode, Encode}; -use sp_core::{traits::RuntimeCode, storage::{ChildInfo, well_known_keys}}; +use sp_core::{traits::RuntimeCode, + storage::{ChildInfo, ChildChange, ChildrenMap, well_known_keys, PrefixedStorageKey}}; use sp_trie::{TrieMut, MemoryDB, trie_types::TrieDBMut}; - use crate::{ trie_backend::TrieBackend, trie_backend_essence::TrieBackendStorage, @@ -132,6 +132,7 @@ pub trait Backend: std::fmt::Debug { fn child_storage_root( &self, child_info: &ChildInfo, + child_change: &ChildChange, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -170,33 +171,43 @@ pub trait Backend: std::fmt::Debug { fn full_storage_root( &self, delta: I1, - child_deltas: I2) - -> (H::Out, Self::Transaction) + child_deltas: I2, + return_child_roots: bool, + ) -> (H::Out, Self::Transaction, Vec<(PrefixedStorageKey, Option)>) where I1: IntoIterator)>, I2i: IntoIterator)>, - I2: IntoIterator, + I2: IntoIterator, H::Out: Ord + Encode, { let mut txs: Self::Transaction = Default::default(); let mut child_roots: Vec<_> = Default::default(); + let mut result_child_roots: Vec<_> = Default::default(); // child first - for (child_info, child_delta) in child_deltas { + for (child_info, child_change, child_delta) in child_deltas { let (child_root, empty, child_txs) = - self.child_storage_root(&child_info, child_delta); + self.child_storage_root(&child_info, &child_change, child_delta); let prefixed_storage_key = child_info.prefixed_storage_key(); txs.consolidate(child_txs); if empty { + if return_child_roots { + result_child_roots.push((prefixed_storage_key.clone(), None)); + } child_roots.push((prefixed_storage_key.into_inner(), None)); } else { - child_roots.push((prefixed_storage_key.into_inner(), Some(child_root.encode()))); + if return_child_roots { + child_roots.push((prefixed_storage_key.clone().into_inner(), Some(child_root.encode()))); + result_child_roots.push((prefixed_storage_key, Some(child_root))); + } else { + child_roots.push((prefixed_storage_key.into_inner(), Some(child_root.encode()))); + } } } let (root, parent_txs) = self.storage_root( delta.into_iter().chain(child_roots.into_iter()) ); txs.consolidate(parent_txs); - (root, txs) + (root, txs, result_child_roots) } /// Register stats from overlay of state machine. @@ -282,13 +293,14 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn child_storage_root( &self, child_info: &ChildInfo, + child_change: &ChildChange, delta: I, ) -> (H::Out, bool, Self::Transaction) where I: IntoIterator)>, H::Out: Ord, { - (*self).child_storage_root(child_info, delta) + (*self).child_storage_root(child_info, child_change, delta) } fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { @@ -318,8 +330,22 @@ impl Consolidate for () { } } +impl Consolidate for (A, B) { + fn consolidate(&mut self, (a, b): Self) { + self.0.consolidate(a); + self.1.consolidate(b); + } +} + +impl Consolidate for ChildChange { + fn consolidate(&mut self, c: Self) { + self.update(c); + } +} + impl Consolidate for Vec<( Option, + ChildChange, StorageCollection, )> { fn consolidate(&mut self, mut other: Self) { @@ -327,6 +353,24 @@ impl Consolidate for Vec<( } } +impl Consolidate for ChildrenMap { + fn consolidate(&mut self, other: Self) { + self.extend_with(other.into_iter(), Consolidate::consolidate) + } +} + +impl Consolidate for Option { + fn consolidate(&mut self, other: Self) { + if let Some(v) = self.as_mut() { + if let Some(other) = other { + v.consolidate(other); + } + } else { + *self = other; + } + } +} + impl> Consolidate for sp_trie::GenericMemoryDB { fn consolidate(&mut self, other: Self) { sp_trie::GenericMemoryDB::consolidate(self, other) @@ -339,7 +383,7 @@ pub(crate) fn insert_into_memory_db(mdb: &mut MemoryDB, input: I) -> Op H: Hasher, I: IntoIterator, { - let mut root = ::Out::default(); + let mut root = ::Out::default(); { let mut trie = TrieDBMut::::new(mdb, &mut root); for (key, value) in input { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 7f26085958e97..32faa32b3f4eb 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -201,6 +201,7 @@ impl Externalities for BasicExternalities { let child_map = self.inner.children_default.entry(child_info.storage_key().to_vec()) .or_insert_with(|| StorageChild { data: Default::default(), + child_change: Default::default(), child_info: child_info.to_owned(), }); if let Some(value) = value { @@ -286,7 +287,7 @@ impl Externalities for BasicExternalities { let delta = child.data.clone().into_iter().map(|(k, v)| (k, Some(v))); InMemoryBackend::::default() - .child_storage_root(&child.child_info, delta).0 + .child_storage_root(&child.child_info, &child.child_change, delta).0 } else { empty_child_trie_root::>() }.encode() @@ -360,7 +361,8 @@ mod tests { children_default: map![ child_info.storage_key().to_vec() => StorageChild { data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], - child_info: child_info.to_owned(), + child_info: child_info.clone(), + child_change: Default::default(), } ] }); diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index 45535204e0884..60dd538c42ceb 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -19,7 +19,7 @@ use std::collections::{BTreeMap, BTreeSet}; use std::collections::btree_map::Entry; use codec::{Decode, Encode}; -use hash_db::Hasher; +use sp_core::{Hasher, InnerHasher}; use num_traits::One; use crate::{ StorageKey, @@ -29,10 +29,10 @@ use crate::{ changes_trie::{ AnchorBlockId, ConfigurationRange, Storage, BlockNumber, build_iterator::digest_build_iterator, - input::{InputKey, InputPair, DigestIndex, ExtrinsicIndex, ChildIndex}, + input::{InputKey, InputPair, DigestIndex, ExtrinsicIndex, ChildIndex, ChildIndexValue}, }, }; -use sp_core::storage::{ChildInfo, ChildType, PrefixedStorageKey}; +use sp_core::storage::{ChildInfo, ChildType, ChildChange, PrefixedStorageKey}; /// Prepare input pairs for building a changes trie of given block. /// @@ -46,7 +46,7 @@ pub(crate) fn prepare_input<'a, B, H, Number>( parent: &'a AnchorBlockId, ) -> Result<( impl Iterator> + 'a, - Vec<(ChildIndex, impl Iterator> + 'a)>, + Vec<(ChildIndex, (ChildChange, Option), impl Iterator> + 'a)>, Vec, ), String> where @@ -69,10 +69,11 @@ pub(crate) fn prepare_input<'a, B, H, Number>( )?; let mut children_digest = Vec::with_capacity(children_extrinsics_input.len()); - for (child_index, ext_iter) in children_extrinsics_input.into_iter() { + for (child_index, (child_change, ext_iter)) in children_extrinsics_input.into_iter() { let dig_iter = children_digest_input.remove(&child_index); children_digest.push(( child_index, + child_change, Some(ext_iter).into_iter().flatten() .chain(dig_iter.into_iter().flatten()), )); @@ -80,6 +81,7 @@ pub(crate) fn prepare_input<'a, B, H, Number>( for (child_index, dig_iter) in children_digest_input.into_iter() { children_digest.push(( child_index, + (ChildChange::Update, None), // default change type for digest None.into_iter().flatten() .chain(Some(dig_iter).into_iter().flatten()), )); @@ -98,7 +100,7 @@ fn prepare_extrinsics_input<'a, B, H, Number>( changes: &'a OverlayedChanges, ) -> Result<( impl Iterator> + 'a, - BTreeMap, impl Iterator> + 'a>, + BTreeMap, ((ChildChange, Option), impl Iterator> + 'a)>, ), String> where B: Backend, @@ -108,9 +110,9 @@ fn prepare_extrinsics_input<'a, B, H, Number>( let mut children_info = BTreeSet::::new(); let mut children_result = BTreeMap::new(); - for (_storage_key, (_map, child_info)) in changes.prospective.children_default.iter() + for (_storage_key, child) in changes.prospective.children_default.iter() .chain(changes.committed.children_default.iter()) { - children_info.insert(child_info.clone()); + children_info.insert(child.info.clone()); } for child_info in children_info { let child_index = ChildIndex:: { @@ -123,8 +125,9 @@ fn prepare_extrinsics_input<'a, B, H, Number>( } let top = prepare_extrinsics_input_inner(backend, block, changes, None)?; + debug_assert!((top.0).0 == ChildChange::Update); - Ok((top, children_result)) + Ok((top.1, children_result)) } fn prepare_extrinsics_input_inner<'a, B, H, Number>( @@ -132,7 +135,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( block: &Number, changes: &'a OverlayedChanges, child_info: Option, -) -> Result> + 'a, String> +) -> Result<((ChildChange, Option), impl Iterator> + 'a), String> where B: Backend, H: Hasher, @@ -141,15 +144,28 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( let (committed, prospective) = if let Some(child_info) = child_info.as_ref() { match child_info.child_type() { ChildType::ParentKeyId => ( - changes.committed.children_default.get(child_info.storage_key()).map(|c| &c.0), - changes.prospective.children_default.get(child_info.storage_key()).map(|c| &c.0), + changes.committed.children_default.get(child_info.storage_key()).map(|c| (c.change.clone(), &c.values)), + changes.prospective.children_default.get(child_info.storage_key()).map(|c| (c.change.clone(), &c.values)), ), } } else { - (Some(&changes.committed.top), Some(&changes.prospective.top)) + ( + Some(((ChildChange::Update, None), &changes.committed.top)), + Some(((ChildChange::Update, None), &changes.prospective.top)), + ) }; - committed.iter().flat_map(|c| c.iter()) - .chain(prospective.iter().flat_map(|c| c.iter())) + let mut change = (ChildChange::Update, None); + if let Some((child_change, _)) = prospective.as_ref().or_else(|| committed.as_ref()) { + match &child_change.0 { + ChildChange::BulkDelete(encoded_root) => { + change.0 = ChildChange::BulkDelete(encoded_root.clone()); + change.1 = child_change.1; + }, + ChildChange::Update => (), + } + } + committed.iter().flat_map(|c| c.1.iter()) + .chain(prospective.iter().flat_map(|c| c.1.iter())) .filter(|( _, v)| v.extrinsics.is_some()) .try_fold(BTreeMap::new(), |mut map: BTreeMap<&[u8], (ExtrinsicIndex, Vec)>, (k, v)| { match map.entry(k) { @@ -195,7 +211,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( Ok(map) }) - .map(|pairs| pairs.into_iter().map(|(_, (k, v))| InputPair::ExtrinsicIndex(k, v))) + .map(|pairs| (change, pairs.into_iter().map(|(_, (k, v))| InputPair::ExtrinsicIndex(k, v)))) } @@ -280,6 +296,9 @@ fn prepare_digest_input<'a, H, Number>( return Ok((map, child_map)); } + // change trie content are all stored as top_trie (default child trie with empty keyspace) + let child_info = sp_core::storage::ChildInfo::top_trie(); + let child_info = &child_info; let mut children_roots = BTreeMap::::new(); { let trie_storage = TrieBackendEssence::<_, H>::new( @@ -287,21 +306,23 @@ fn prepare_digest_input<'a, H, Number>( trie_root, ); - trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| + trie_storage.for_key_values_with_prefix(child_info, &child_prefix, |key, value| if let Ok(InputKey::ChildIndex::(trie_key)) = Decode::decode(&mut &key[..]) { - if let Ok(value) = >::decode(&mut &value[..]) { - let mut trie_root = ::Out::default(); - trie_root.as_mut().copy_from_slice(&value[..]); - children_roots.insert(trie_key.storage_key, trie_root); + if let Ok(value) = ChildIndexValue::decode(&mut &value[..]) { + value.changes_root.as_ref().map(|root| { + let mut trie_root = ::Out::default(); + trie_root.as_mut().copy_from_slice(&root[..]); + children_roots.insert(trie_key.storage_key, trie_root); + }); } }); - trie_storage.for_keys_with_prefix(&extrinsic_prefix, |key| + trie_storage.for_keys_with_prefix(child_info, &extrinsic_prefix, |key| if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = Decode::decode(&mut &key[..]) { insert_to_map(&mut map, trie_key.key); }); - trie_storage.for_keys_with_prefix(&digest_prefix, |key| + trie_storage.for_keys_with_prefix(child_info, &digest_prefix, |key| if let Ok(InputKey::DigestIndex::(trie_key)) = Decode::decode(&mut &key[..]) { insert_to_map(&mut map, trie_key.key); }); @@ -318,12 +339,12 @@ fn prepare_digest_input<'a, H, Number>( crate::changes_trie::TrieBackendStorageAdapter(storage), trie_root, ); - trie_storage.for_keys_with_prefix(&extrinsic_prefix, |key| + trie_storage.for_keys_with_prefix(child_info, &extrinsic_prefix, |key| if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = Decode::decode(&mut &key[..]) { insert_to_map(&mut map, trie_key.key); }); - trie_storage.for_keys_with_prefix(&digest_prefix, |key| + trie_storage.for_keys_with_prefix(child_info, &digest_prefix, |key| if let Ok(InputKey::DigestIndex::(trie_key)) = Decode::decode(&mut &key[..]) { insert_to_map(&mut map, trie_key.key); }); @@ -346,7 +367,7 @@ mod test { use crate::InMemoryBackend; use crate::changes_trie::{RootsStorage, Configuration, storage::InMemoryStorage}; use crate::changes_trie::build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}; - use crate::overlayed_changes::{OverlayedValue, OverlayedChangeSet}; + use crate::overlayed_changes::{OverlayedValue, OverlayedChangeSet, ChildChangeSet}; use super::*; fn prepare_for_build(zero: u64) -> ( @@ -429,18 +450,26 @@ mod test { }), ].into_iter().collect(), children_default: vec![ - (child_trie_key1.clone(), (vec![ - (vec![100], OverlayedValue { - value: Some(vec![200]), - extrinsics: Some(vec![0, 2].into_iter().collect()) - }) - ].into_iter().collect(), child_info_1.to_owned())), - (child_trie_key2, (vec![ - (vec![100], OverlayedValue { - value: Some(vec![200]), - extrinsics: Some(vec![0, 2].into_iter().collect()) - }) - ].into_iter().collect(), child_info_2.to_owned())), + (child_trie_key1.clone(), ChildChangeSet { + info: child_info_1.clone(), + change: Default::default(), + values: vec![ + (vec![100], OverlayedValue { + value: Some(vec![200]), + extrinsics: Some(vec![0, 2].into_iter().collect()) + }) + ].into_iter().collect(), + }), + (child_trie_key2, ChildChangeSet { + info: child_info_2, + change: Default::default(), + values: vec![ + (vec![100], OverlayedValue { + value: Some(vec![200]), + extrinsics: Some(vec![0, 2].into_iter().collect()) + }) + ].into_iter().collect(), + }), ].into_iter().collect() }, committed: OverlayedChangeSet { top: vec![ @@ -458,12 +487,16 @@ mod test { }), ].into_iter().collect(), children_default: vec![ - (child_trie_key1, (vec![ - (vec![100], OverlayedValue { - value: Some(vec![202]), - extrinsics: Some(vec![3].into_iter().collect()) - }) - ].into_iter().collect(), child_info_1.to_owned())), + (child_trie_key1, ChildChangeSet { + info: child_info_1, + change: Default::default(), + values: vec![ + (vec![100], OverlayedValue { + value: Some(vec![202]), + extrinsics: Some(vec![3].into_iter().collect()) + }) + ].into_iter().collect() + }), ].into_iter().collect(), }, collect_extrinsics: true, @@ -502,12 +535,12 @@ mod test { InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5, key: vec![103] }, vec![0, 1]), ]); assert_eq!(changes_trie_nodes.1.into_iter() - .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 5u64, storage_key: child_trie_key1 }, + .map(|(k, change, values)| (k, change, values.collect::>())).collect::>(), vec![ + (ChildIndex { block: zero + 5u64, storage_key: child_trie_key1 }, (ChildChange::Update, None), vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5u64, key: vec![100] }, vec![0, 2, 3]), ]), - (ChildIndex { block: zero + 5, storage_key: child_trie_key2 }, + (ChildIndex { block: zero + 5, storage_key: child_trie_key2 }, (ChildChange::Update, None), vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5, key: vec![100] }, vec![0, 2]), ]), @@ -545,8 +578,8 @@ mod test { InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1, zero + 3]), ]); assert_eq!(changes_trie_nodes.1.into_iter() - .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, + .map(|(k, change, values)| (k, change, values.collect::>())).collect::>(), vec![ + (ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, (ChildChange::Update, None), vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, vec![0, 2, 3]), @@ -555,7 +588,7 @@ mod test { InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1]), ]), - (ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, + (ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, (ChildChange::Update, None), vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2]), ]), @@ -593,14 +626,14 @@ mod test { InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![105] }, vec![zero + 4, zero + 8]), ]); assert_eq!(changes_trie_nodes.1.into_iter() - .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 16u64, storage_key: child_trie_key1.clone() }, + .map(|(k, change, values)| (k, change, values.collect::>())).collect::>(), vec![ + (ChildIndex { block: zero + 16u64, storage_key: child_trie_key1.clone() }, (ChildChange::Update, None), vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16u64, key: vec![100] }, vec![0, 2, 3]), InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![102] }, vec![zero + 4]), ]), - (ChildIndex { block: zero + 16, storage_key: child_trie_key2.clone() }, + (ChildIndex { block: zero + 16, storage_key: child_trie_key2.clone() }, (ChildChange::Update, None), vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16, key: vec![100] }, vec![0, 2]), ]), @@ -690,8 +723,8 @@ mod test { InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1, zero + 3]), ]); assert_eq!(changes_trie_nodes.1.into_iter() - .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, + .map(|(k, change, values)| (k, change, values.collect::>())).collect::>(), vec![ + (ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, (ChildChange::Update, None), vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, vec![0, 2, 3]), @@ -700,7 +733,7 @@ mod test { InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1]), ]), - (ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, + (ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, (ChildChange::Update, None), vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2]), ]), @@ -760,7 +793,7 @@ mod test { let child_changes_tries_nodes = child_changes_tries_nodes .into_iter() - .map(|(k, i)| (k, i.collect::>())) + .map(|(k, _change, i)| (k, i.collect::>())) .collect::>(); assert_eq!( child_changes_tries_nodes.get(&ChildIndex { diff --git a/primitives/state-machine/src/changes_trie/changes_iterator.rs b/primitives/state-machine/src/changes_trie/changes_iterator.rs index f5a936069ba40..c577aebfb7280 100644 --- a/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ b/primitives/state-machine/src/changes_trie/changes_iterator.rs @@ -20,14 +20,15 @@ use std::cell::RefCell; use std::collections::VecDeque; use codec::{Decode, Encode, Codec}; -use hash_db::Hasher; +use sp_core::Hasher; +use sp_core::storage::ChildInfo; use num_traits::Zero; use sp_core::storage::PrefixedStorageKey; use sp_trie::Recorder; use crate::changes_trie::{AnchorBlockId, ConfigurationRange, RootsStorage, Storage, BlockNumber}; use crate::changes_trie::input::{DigestIndex, ExtrinsicIndex, DigestIndexValue, ExtrinsicIndexValue}; use crate::changes_trie::storage::{TrieBackendAdapter, InMemoryStorage}; -use crate::changes_trie::input::ChildIndex; +use crate::changes_trie::input::{ChildIndex, ChildIndexValue}; use crate::changes_trie::surface_iterator::{surface_iterator, SurfaceIterator}; use crate::proving_backend::ProvingBackendRecorder; use crate::trie_backend_essence::{TrieBackendEssence}; @@ -68,6 +69,7 @@ pub fn key_changes<'a, H: Hasher, Number: BlockNumber>( _hasher: ::std::marker::PhantomData::::default(), }, + child_info: ChildInfo::top_trie(), }) } @@ -178,6 +180,7 @@ pub fn key_changes_proof_check_with_db<'a, H: Hasher, Number: BlockNumber>( _hasher: ::std::marker::PhantomData::::default(), }, + child_info: ChildInfo::top_trie(), }.collect() } @@ -242,12 +245,12 @@ impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> storage_key: storage_key.clone(), }.encode(); if let Some(trie_root) = trie_reader(self.storage, trie_root, &child_key)? - .and_then(|v| >::decode(&mut &v[..]).ok()) - .map(|v| { + .and_then(|v| ChildIndexValue::decode(&mut &v[..]).ok()) + .and_then(|v| v.changes_root.as_ref().map(|root| { let mut hash = H::Out::default(); - hash.as_mut().copy_from_slice(&v[..]); + hash.as_mut().copy_from_slice(&root[..]); hash - }) { + })) { trie_root } else { continue; @@ -315,6 +318,10 @@ pub struct DrilldownIterator<'a, H, Number> H::Out: 'a, { essence: DrilldownIteratorEssence<'a, H, Number>, + /// This is always top trie info, but it cannot be + /// statically instantiated at the time (vec of null + /// size could be in theory). + child_info: ChildInfo, } impl<'a, H: Hasher, Number: BlockNumber> Iterator for DrilldownIterator<'a, H, Number> @@ -323,8 +330,11 @@ impl<'a, H: Hasher, Number: BlockNumber> Iterator for DrilldownIterator<'a, H, N type Item = Result<(Number, u32), String>; fn next(&mut self) -> Option { + let child_info = &self.child_info; self.essence.next(|storage, root, key| - TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root).storage(key)) + TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root) + .storage(child_info, key) + ) } } diff --git a/primitives/state-machine/src/changes_trie/input.rs b/primitives/state-machine/src/changes_trie/input.rs index 4f0f3da40c52b..5b68c4aee97c1 100644 --- a/primitives/state-machine/src/changes_trie/input.rs +++ b/primitives/state-machine/src/changes_trie/input.rs @@ -21,7 +21,7 @@ use crate::{ StorageKey, StorageValue, changes_trie::BlockNumber }; -use sp_core::storage::PrefixedStorageKey; +use sp_core::storage::{ChildChange, PrefixedStorageKey}; /// Key of { changed key => set of extrinsic indices } mapping. #[derive(Clone, Debug, PartialEq, Eq)] @@ -57,8 +57,14 @@ pub struct ChildIndex { pub type DigestIndexValue = Vec; /// Value of { changed key => block/digest block numbers } mapping. -/// That is the root of the child change trie. -pub type ChildIndexValue = Vec; +/// That is the . +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Encode, Decode)] +pub struct ChildIndexValue { + /// Root of the child change trie. + pub changes_root: Option>, + /// Possible change occuring on the whole trie. + pub child_change: Vec<(ChildChange, u32)>, +} /// Single input pair of changes trie. #[derive(Clone, Debug, PartialEq, Eq)] diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index ee6c6778e0aad..b7bce326e4cff 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -67,7 +67,9 @@ pub use self::prune::prune; use std::collections::{HashMap, HashSet}; use std::convert::TryInto; -use hash_db::{Hasher, Prefix}; +use hash_db::Prefix; +use sp_core::Hasher; +use sp_core::storage::ChildInfo; use num_traits::{One, Zero}; use codec::{Decode, Encode}; use sp_core; @@ -160,16 +162,26 @@ pub trait Storage: RootsStorage { functor: &mut dyn FnMut(&HashMap, HashSet>), ) -> bool; /// Get a trie node. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; + fn get( + &self, + key: &H::Out, + prefix: Prefix, + ) -> Result, String>; } /// Changes trie storage -> trie backend essence adapter. pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>(pub &'a dyn Storage); -impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage for TrieBackendStorageAdapter<'a, H, N> { +impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorageRef for TrieBackendStorageAdapter<'a, H, N> { type Overlay = sp_trie::MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + child_info: &ChildInfo, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + debug_assert!(child_info.is_top_trie()); self.0.get(key, prefix) } } @@ -287,7 +299,7 @@ pub fn build_changes_trie<'a, B: Backend, H: Hasher, Number: BlockNumber>( let mut mdb = MemoryDB::default(); let mut child_roots = Vec::with_capacity(child_input_pairs.len()); - for (child_index, input_pairs) in child_input_pairs { + for (child_index, child_change, input_pairs) in child_input_pairs { let mut not_empty = false; let mut root = Default::default(); { @@ -310,8 +322,21 @@ pub fn build_changes_trie<'a, B: Backend, H: Hasher, Number: BlockNumber>( storage_changed_keys, ); } - if not_empty { - child_roots.push(input::InputPair::ChildIndex(child_index, root.as_ref().to_vec())); + let changes_root = if not_empty { + Some(root.as_ref().to_vec()) + } else { + None + }; + let child_change = if let Some(index) = child_change.1 { + vec![(child_change.0, index)] + } else { + Vec::new() + }; + if not_empty || !child_change.is_empty() { + child_roots.push(input::InputPair::ChildIndex(child_index, input::ChildIndexValue { + changes_root, + child_change, + })); } } let mut root = Default::default(); diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs index 05555df305b7c..9842928fe1d65 100644 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ b/primitives/state-machine/src/changes_trie/prune.rs @@ -16,7 +16,7 @@ //! Changes trie pruning-related functions. -use hash_db::Hasher; +use sp_core::{Hasher, InnerHasher}; use sp_trie::Recorder; use log::warn; use num_traits::One; @@ -24,7 +24,7 @@ use crate::proving_backend::ProvingBackendRecorder; use crate::trie_backend_essence::TrieBackendEssence; use crate::changes_trie::{AnchorBlockId, Storage, BlockNumber}; use crate::changes_trie::storage::TrieBackendAdapter; -use crate::changes_trie::input::{ChildIndex, InputKey}; +use crate::changes_trie::input::{ChildIndex, ChildIndexValue, InputKey}; use codec::{Decode, Codec}; /// Prune obsolete changes tries. Pruning happens at the same block, where highest @@ -65,12 +65,15 @@ pub fn prune( ); let child_prefix = ChildIndex::key_neutral_prefix(block.clone()); let mut children_roots = Vec::new(); - trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| { + let child_info = sp_core::storage::ChildInfo::top_trie(); + trie_storage.for_key_values_with_prefix(&child_info, &child_prefix, |key, value| { if let Ok(InputKey::ChildIndex::(_trie_key)) = Decode::decode(&mut &key[..]) { - if let Ok(value) = >::decode(&mut &value[..]) { - let mut trie_root = ::Out::default(); - trie_root.as_mut().copy_from_slice(&value[..]); - children_roots.push(trie_root); + if let Ok(value) = ChildIndexValue::decode(&mut &value[..]) { + value.changes_root.as_ref().map(|root| { + let mut trie_root = ::Out::default(); + trie_root.as_mut().copy_from_slice(&root[..]); + children_roots.push(trie_root); + }); } } }); @@ -100,7 +103,7 @@ fn prune_trie( backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), proof_recorder: &mut proof_recorder, }; - trie.record_all_keys(); + trie.record_all_top_trie_keys(); } // all nodes of this changes trie should be pruned @@ -153,7 +156,10 @@ mod tests { let root3 = insert_into_memory_db::(&mut mdb3, vec![ (vec![13], vec![23]), (vec![14], vec![24]), - (child_key, ch_root3.as_ref().encode()), + (child_key, ChildIndexValue { + changes_root: Some(ch_root3[..].to_vec()), + child_change: Vec::new(), + }.encode()), ]).unwrap(); let mut mdb4 = MemoryDB::::default(); let root4 = insert_into_memory_db::( diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 81651dd2e719b..f3bc6cb49bfad 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -17,21 +17,23 @@ //! Changes trie storage utilities. use std::collections::{BTreeMap, HashSet, HashMap}; -use hash_db::{Hasher, Prefix, EMPTY_PREFIX}; +use hash_db::{Prefix, EMPTY_PREFIX}; +use sp_core::Hasher; use sp_core::storage::PrefixedStorageKey; +use sp_core::storage::ChildInfo; use sp_trie::DBValue; use sp_trie::MemoryDB; use parking_lot::RwLock; use crate::{ StorageKey, - trie_backend_essence::TrieBackendStorage, + trie_backend_essence::TrieBackendStorageRef, changes_trie::{BuildCache, RootsStorage, Storage, AnchorBlockId, BlockNumber}, }; #[cfg(test)] use crate::backend::insert_into_memory_db; #[cfg(test)] -use crate::changes_trie::input::{InputPair, ChildIndex}; +use crate::changes_trie::input::{InputPair, ChildIndex, ChildIndexValue}; /// In-memory implementation of changes trie storage. pub struct InMemoryStorage { @@ -114,7 +116,10 @@ impl InMemoryStorage { }; top_inputs[ix].1.push(InputPair::ChildIndex( ChildIndex { block: block.clone(), storage_key: storage_key.clone() }, - root.as_ref().to_vec(), + ChildIndexValue { + changes_root: Some(root.as_ref().to_vec()), + child_change: Vec::new(), + }, )); } } @@ -188,8 +193,12 @@ impl Storage for InMemoryStorage Result, String> { - MemoryDB::::get(&self.data.read().mdb, key, prefix) + fn get( + &self, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + MemoryDB::::get(&self.data.read().mdb, &ChildInfo::top_trie(), key, prefix) } } @@ -199,14 +208,20 @@ impl<'a, H: Hasher, Number: BlockNumber> TrieBackendAdapter<'a, H, Number> { } } -impl<'a, H, Number> TrieBackendStorage for TrieBackendAdapter<'a, H, Number> +impl<'a, H, Number> TrieBackendStorageRef for TrieBackendAdapter<'a, H, Number> where Number: BlockNumber, H: Hasher, { type Overlay = MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + child_info: &ChildInfo, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + debug_assert!(child_info.is_top_trie()); self.storage.get(key, prefix) } } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 399bfc69d864f..8fcb094431405 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -22,9 +22,9 @@ use crate::{ changes_trie::State as ChangesTrieState, }; -use hash_db::Hasher; use sp_core::{ - storage::{well_known_keys::is_child_storage_key, ChildInfo}, + Hasher, + storage::{well_known_keys::is_child_storage_key, ChildInfo, ChildChange}, traits::Externalities, hexdisplay::HexDisplay, }; use sp_trie::{trie_types::Layout, empty_child_trie_root}; @@ -184,6 +184,9 @@ where child_info: &ChildInfo, key: &[u8], ) -> Option { + if child_info.is_top_trie() { + return self.storage(key); + } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay .child_storage(child_info, key) @@ -208,6 +211,9 @@ where child_info: &ChildInfo, key: &[u8], ) -> Option> { + if child_info.is_top_trie() { + return self.storage_hash(key); + } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay .child_storage(child_info, key) @@ -248,6 +254,9 @@ where child_info: &ChildInfo, key: &[u8], ) -> bool { + if child_info.is_top_trie() { + return self.exists_storage(key); + } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = match self.overlay.child_storage(child_info, key) { @@ -286,13 +295,20 @@ where child_info: &ChildInfo, key: &[u8], ) -> Option { - let next_backend_key = self.backend - .next_child_storage_key(child_info, key) - .expect(EXT_NOT_ALLOWED_TO_FAIL); - let next_overlay_key_change = self.overlay.next_child_storage_key_change( + if child_info.is_top_trie() { + return self.next_storage_key(key); + } + let (next_overlay_key_change, is_deleted_child) = self.overlay.next_child_storage_key_change( child_info.storage_key(), key ); + let next_backend_key = if is_deleted_child { + None + } else { + self.backend + .next_child_storage_key(child_info, key) + .expect(EXT_NOT_ALLOWED_TO_FAIL) + }; match (next_backend_key, next_overlay_key_change) { (Some(backend_key), Some(overlay_key)) if &backend_key[..] < overlay_key.0 => Some(backend_key), @@ -330,6 +346,9 @@ where key: StorageKey, value: Option, ) { + if child_info.is_top_trie() { + return self.place_storage(key, value); + } trace!(target: "state-trace", "{:04x}: PutChild({}) {}={:?}", self.id, HexDisplay::from(&child_info.storage_key()), @@ -346,6 +365,10 @@ where &mut self, child_info: &ChildInfo, ) { + if child_info.is_top_trie() { + trace!(target: "state-trace", "Ignoring kill_child_storage on top trie"); + return; + } trace!(target: "state-trace", "{:04x}: KillChild({})", self.id, HexDisplay::from(&child_info.storage_key()), @@ -353,10 +376,14 @@ where let _guard = sp_panic_handler::AbortGuard::force_abort(); self.mark_dirty(); - self.overlay.clear_child_storage(child_info); - self.backend.for_keys_in_child_storage(child_info, |key| { - self.overlay.set_child_storage(child_info, key.to_vec(), None); - }); + match self.backend.storage(&child_info.prefixed_storage_key()) { + Ok(o_encoded_root) => self.overlay.kill_child_storage(child_info, o_encoded_root), + Err(e) => trace!( + target: "state-trace", + "kill_child_storage could not access child trie root {}", + e, + ), + } } fn clear_prefix(&mut self, prefix: &[u8]) { @@ -382,6 +409,10 @@ where child_info: &ChildInfo, prefix: &[u8], ) { + if child_info.is_top_trie() { + return self.clear_prefix(prefix); + } + trace!(target: "state-trace", "{:04x}: ClearChildPrefix({}) {}", self.id, HexDisplay::from(&child_info.storage_key()), @@ -423,45 +454,37 @@ where let storage_key = child_info.storage_key(); let prefixed_storage_key = child_info.prefixed_storage_key(); if self.storage_transaction_cache.transaction_storage_root.is_some() { - let root = self - .storage(prefixed_storage_key.as_slice()) - .and_then(|k| Decode::decode(&mut &k[..]).ok()) + let root = self.storage_transaction_cache.transaction_child_storage_root + .get(&prefixed_storage_key) + .map(|root| root.encode()) .unwrap_or( - empty_child_trie_root::>() + empty_child_trie_root::>().encode() ); trace!(target: "state-trace", "{:04x}: ChildRoot({}) (cached) {}", self.id, HexDisplay::from(&storage_key), HexDisplay::from(&root.as_ref()), ); - root.encode() + root } else { - - if let Some(child_info) = self.overlay.default_child_info(storage_key).cloned() { - let (root, is_empty, _) = { + if let Some((child_info, child_change)) = self.overlay.default_child_info(storage_key) { + if let ChildChange::BulkDelete(..) = child_change { + return empty_child_trie_root::>().encode(); + } + let (root, _is_empty, _) = { let delta = self.overlay.committed.children_default.get(storage_key) .into_iter() - .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) + .flat_map(|child| child.values.clone().into_iter().map(|(k, v)| (k, v.value))) .chain( self.overlay.prospective.children_default.get(storage_key) .into_iter() - .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) + .flat_map(|child| child.values.clone().into_iter().map(|(k, v)| (k, v.value))) ); - self.backend.child_storage_root(&child_info, delta) + self.backend.child_storage_root(child_info, child_change, delta) }; let root = root.encode(); - // We store update in the overlay in order to be able to use 'self.storage_transaction' - // cache. This is brittle as it rely on Ext only querying the trie backend for - // storage root. - // A better design would be to manage 'child_storage_transaction' in a - // similar way as 'storage_transaction' but for each child trie. - if is_empty { - self.overlay.set_storage(prefixed_storage_key.into_inner(), None); - } else { - self.overlay.set_storage(prefixed_storage_key.into_inner(), Some(root.clone())); - } trace!(target: "state-trace", "{:04x}: ChildRoot({}) {}", self.id, @@ -711,7 +734,8 @@ mod tests { vec![20] => vec![20], vec![40] => vec![40] ], - child_info: child_info.to_owned(), + child_info: child_info.clone(), + child_change: Default::default(), } ], }.into(); @@ -741,6 +765,8 @@ mod tests { #[test] fn child_storage_works() { + use sp_core::InnerHasher; + let child_info = ChildInfo::new_default(b"Child1"); let child_info = &child_info; let mut cache = StorageTransactionCache::default(); @@ -756,7 +782,8 @@ mod tests { vec![20] => vec![20], vec![30] => vec![40] ], - child_info: child_info.to_owned(), + child_info: child_info.clone(), + child_change: Default::default(), } ], }.into(); diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 83126abbf78e0..d7cdd5e6a33b1 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -23,12 +23,12 @@ use crate::{ stats::UsageInfo, }; use std::{error, fmt, collections::{BTreeMap, HashMap}, marker::PhantomData, ops}; -use hash_db::Hasher; +use sp_core::{Hasher, InnerHasher}; use sp_trie::{ - MemoryDB, child_trie_root, empty_child_trie_root, TrieConfiguration, trie_types::Layout, + MemoryDB, empty_child_trie_root, TrieConfiguration, trie_types::Layout, }; use codec::Codec; -use sp_core::storage::{ChildInfo, ChildType, Storage}; +use sp_core::storage::{ChildInfo, ChildChange, ChildType, Storage}; /// Error impossible. // FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121 @@ -48,7 +48,7 @@ impl error::Error for Void { /// In-memory backend. Fully recomputes tries each time `as_trie_backend` is called but useful for /// tests and proof checking. pub struct InMemory { - inner: HashMap, BTreeMap>, + inner: HashMap, (ChildChange, BTreeMap)>, // This field is only needed for returning reference in `as_trie_backend`. trie: Option, H>>, _hasher: PhantomData, @@ -89,29 +89,30 @@ impl PartialEq for InMemory { impl InMemory { /// Copy the state, with applied updates pub fn update< - T: IntoIterator, StorageCollection)> + T: IntoIterator, ChildChange, StorageCollection)> >( &self, changes: T, ) -> Self { let mut inner = self.inner.clone(); - for (child_info, key_values) in changes.into_iter() { + for (child_info, child_change, key_values) in changes.into_iter() { let entry = inner.entry(child_info).or_default(); for (key, val) in key_values { match val { - Some(v) => { entry.insert(key, v); }, - None => { entry.remove(&key); }, + Some(v) => { entry.1.insert(key, v); }, + None => { entry.1.remove(&key); }, } } + entry.0.update(child_change); } inner.into() } } -impl From, BTreeMap>> +impl From, (ChildChange, BTreeMap)>> for InMemory { - fn from(inner: HashMap, BTreeMap>) -> Self { + fn from(inner: HashMap, (ChildChange, BTreeMap)>) -> Self { InMemory { inner, trie: None, @@ -122,9 +123,10 @@ impl From, BTreeMap From for InMemory { fn from(inners: Storage) -> Self { - let mut inner: HashMap, BTreeMap> - = inners.children_default.into_iter().map(|(_k, c)| (Some(c.child_info), c.data)).collect(); - inner.insert(None, inners.top); + let mut inner: HashMap, (ChildChange, BTreeMap)> + = inners.children_default.into_iter() + .map(|(_k, c)| (Some(c.child_info), (c.child_change, c.data))).collect(); + inner.insert(None, (ChildChange::Update, inners.top)); InMemory { inner, trie: None, @@ -136,7 +138,7 @@ impl From for InMemory { impl From> for InMemory { fn from(inner: BTreeMap) -> Self { let mut expanded = HashMap::new(); - expanded.insert(None, inner); + expanded.insert(None, (ChildChange::Update, inner)); InMemory { inner: expanded, trie: None, @@ -145,20 +147,21 @@ impl From> for InMemory { } } -impl From, StorageCollection)>> +impl From, ChildChange, StorageCollection)>> for InMemory { fn from( - inner: Vec<(Option, StorageCollection)>, + inner: Vec<(Option, ChildChange, StorageCollection)>, ) -> Self { - let mut expanded: HashMap, BTreeMap> + let mut expanded: HashMap, (ChildChange, BTreeMap)> = HashMap::new(); - for (child_info, key_values) in inner { + for (child_info, child_change, key_values) in inner { let entry = expanded.entry(child_info).or_default(); for (key, value) in key_values { if let Some(value) = value { - entry.insert(key, value); + entry.1.insert(key, value); } } + entry.0 = child_change; } expanded.into() } @@ -175,12 +178,16 @@ impl Backend for InMemory where H::Out: Codec { type Error = Void; type Transaction = Vec<( Option, + ChildChange, StorageCollection, )>; type TrieBackendStorage = MemoryDB; fn storage(&self, key: &[u8]) -> Result, Self::Error> { - Ok(self.inner.get(&None).and_then(|map| map.get(key).map(Clone::clone))) + Ok(self.inner.get(&None).and_then(|(change, map)| { + debug_assert!(!matches!(change, ChildChange::BulkDelete(..))); + map.get(key).map(Clone::clone) + })) } fn child_storage( @@ -189,20 +196,21 @@ impl Backend for InMemory where H::Out: Codec { key: &[u8], ) -> Result, Self::Error> { Ok(self.inner.get(&Some(child_info.to_owned())) - .and_then(|map| map.get(key).map(Clone::clone))) + .filter(|(change, _)| !matches!(change, ChildChange::BulkDelete(..))) + .and_then(|(_, map)| map.get(key).map(Clone::clone))) } fn exists_storage(&self, key: &[u8]) -> Result { - Ok(self.inner.get(&None).map(|map| map.get(key).is_some()).unwrap_or(false)) + Ok(self.inner.get(&None).map(|map| map.1.get(key).is_some()).unwrap_or(false)) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { self.inner.get(&None) - .map(|map| map.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f)); + .map(|map| map.1.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f)); } fn for_key_values_with_prefix(&self, prefix: &[u8], mut f: F) { - self.inner.get(&None).map(|map| map.iter().filter(|(key, _val)| key.starts_with(prefix)) + self.inner.get(&None).map(|map| map.1.iter().filter(|(key, _val)| key.starts_with(prefix)) .for_each(|(k, v)| f(k, v))); } @@ -212,7 +220,8 @@ impl Backend for InMemory where H::Out: Codec { mut f: F, ) { self.inner.get(&Some(child_info.to_owned())) - .map(|map| map.keys().for_each(|k| f(&k))); + .filter(|(change, _)| !matches!(change, ChildChange::BulkDelete(..))) + .map(|(_, map)| map.keys().for_each(|k| f(&k))); } fn for_child_keys_with_prefix( @@ -222,17 +231,18 @@ impl Backend for InMemory where H::Out: Codec { f: F, ) { self.inner.get(&Some(child_info.to_owned())) - .map(|map| map.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f)); + .filter(|(change, _)| !matches!(change, ChildChange::BulkDelete(..))) + .map(|(_, map)| map.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f)); } fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) where I: IntoIterator, Option>)>, - ::Out: Ord, + ::Out: Ord, { let existing_pairs = self.inner.get(&None) .into_iter() - .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); + .flat_map(|map| map.1.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); let transaction: Vec<_> = delta.into_iter().collect(); let root = Layout::::trie_root(existing_pairs.chain(transaction.iter().cloned()) @@ -243,12 +253,13 @@ impl Backend for InMemory where H::Out: Codec { let full_transaction = transaction.into_iter().collect(); - (root, vec![(None, full_transaction)]) + (root, vec![(None, ChildChange::Update, full_transaction)]) } fn child_storage_root( &self, child_info: &ChildInfo, + child_change: &ChildChange, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -257,32 +268,41 @@ impl Backend for InMemory where H::Out: Codec { { let child_type = child_info.child_type(); let child_info = Some(child_info.to_owned()); - - let existing_pairs = self.inner.get(&child_info) - .into_iter() - .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); - - let transaction: Vec<_> = delta.into_iter().collect(); - let root = child_trie_root::, _, _, _>( - existing_pairs.chain(transaction.iter().cloned()) - .collect::>() + let (root, full_transaction, is_default) = if let ChildChange::BulkDelete(..) = child_change { + let root = empty_child_trie_root::>(); + (root, Default::default(), true) + } else { + let existing_pairs = self.inner.get(&child_info) + .map(|(change, map)| { + debug_assert!(!matches!(change, ChildChange::BulkDelete(..))); + (change, map) + }) .into_iter() - .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) - ); + .flat_map(|(_, map)| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); - let full_transaction = transaction.into_iter().collect(); + let transaction: Vec<_> = delta.into_iter().collect(); + let root = Layout::::trie_root( + existing_pairs.chain(transaction.iter().cloned()) + .collect::>() + .into_iter() + .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) + ); + + let is_default = match child_type { + ChildType::ParentKeyId => root == empty_child_trie_root::>(), + }; - let is_default = match child_type { - ChildType::ParentKeyId => root == empty_child_trie_root::>(), + (root, transaction.into_iter().collect(), is_default) }; - (root, is_default, vec![(child_info, full_transaction)]) + + (root, is_default, vec![(child_info, child_change.clone(), full_transaction)]) } fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); let next_key = self.inner.get(&None) - .and_then(|map| map.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()); + .and_then(|map| map.1.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()); Ok(next_key) } @@ -294,7 +314,8 @@ impl Backend for InMemory where H::Out: Codec { ) -> Result, Self::Error> { let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); let next_key = self.inner.get(&Some(child_info.to_owned())) - .and_then(|map| map.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()); + .filter(|(change, _)| !matches!(change, ChildChange::BulkDelete(..))) + .and_then(|(_, map)| map.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()); Ok(next_key) } @@ -302,14 +323,14 @@ impl Backend for InMemory where H::Out: Codec { fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { self.inner.get(&None) .into_iter() - .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), v.clone()))) + .flat_map(|map| map.1.iter().map(|(k, v)| (k.clone(), v.clone()))) .collect() } fn keys(&self, prefix: &[u8]) -> Vec { self.inner.get(&None) .into_iter() - .flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned()) + .flat_map(|map| map.1.keys().filter(|k| k.starts_with(prefix)).cloned()) .collect() } @@ -319,8 +340,9 @@ impl Backend for InMemory where H::Out: Codec { prefix: &[u8], ) -> Vec { self.inner.get(&Some(child_info.to_owned())) + .filter(|(change, _)| !matches!(change, ChildChange::BulkDelete(..))) .into_iter() - .flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned()) + .flat_map(|(_, map)| map.keys().filter(|k| k.starts_with(prefix)).cloned()) .collect() } @@ -328,12 +350,18 @@ impl Backend for InMemory where H::Out: Codec { let mut mdb = MemoryDB::default(); let mut new_child_roots = Vec::new(); let mut root_map = None; - for (child_info, map) in &self.inner { + for (child_info, (child_change, map)) in &self.inner { if let Some(child_info) = child_info.as_ref() { - let prefix_storage_key = child_info.prefixed_storage_key(); - let ch = insert_into_memory_db::(&mut mdb, map.clone().into_iter())?; - new_child_roots.push((prefix_storage_key.into_inner(), ch.as_ref().into())); + if let ChildChange::BulkDelete(..) = child_change { + // we do not need to delete the existing root because we start from + // an empty db and there is no possible existing value for it. + } else { + let prefix_storage_key = child_info.prefixed_storage_key(); + let ch = insert_into_memory_db::(&mut mdb, map.clone().into_iter())?; + new_child_roots.push((prefix_storage_key.into_inner(), ch.as_ref().into())); + } } else { + debug_assert!(!matches!(child_change, ChildChange::BulkDelete(..))); root_map = Some(map); } } @@ -376,6 +404,7 @@ mod tests { let mut storage = storage.update( vec![( Some(child_info.clone()), + ChildChange::Update, vec![(b"2".to_vec(), Some(b"3".to_vec()))] )] ); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 1c0007c5f9108..00a117c836d99 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -20,7 +20,7 @@ use std::{fmt, result, collections::HashMap, panic::UnwindSafe}; use log::{warn, trace}; -use hash_db::Hasher; +pub use sp_core::{Hasher, InnerHasher}; use codec::{Decode, Encode, Codec}; use sp_core::{ storage::ChildInfo, NativeOrEncoded, NeverNativeValue, hexdisplay::HexDisplay, @@ -69,7 +69,7 @@ pub use overlayed_changes::{ pub use proving_backend::{ create_proof_check_backend, ProofRecorder, ProvingBackend, ProvingBackendRecorder, }; -pub use trie_backend_essence::{TrieBackendStorage, Storage}; +pub use trie_backend_essence::{TrieBackendStorage, TrieBackendStorageRef, Storage}; pub use trie_backend::TrieBackend; pub use error::{Error, ExecutionError}; pub use in_memory_backend::InMemory as InMemoryBackend; @@ -84,7 +84,7 @@ pub type DefaultHandler = fn(CallResult, CallResult) -> CallRe /// Type of changes trie transaction. pub type ChangesTrieTransaction = ( MemoryDB, - ChangesTrieCacheAction<::Out, N>, + ChangesTrieCacheAction<::Out, N>, ); /// Strategy for executing a call into the runtime. @@ -1090,39 +1090,4 @@ mod tests { vec![(b"value2".to_vec(), None)], ); } - - #[test] - fn child_storage_uuid() { - - let child_info_1 = ChildInfo::new_default(b"sub_test1"); - let child_info_2 = ChildInfo::new_default(b"sub_test2"); - - use crate::trie_backend::tests::test_trie; - let mut overlay = OverlayedChanges::default(); - - let mut transaction = { - let backend = test_trie(); - let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - &backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); - ext.set_child_storage(&child_info_1, b"abc".to_vec(), b"def".to_vec()); - ext.set_child_storage(&child_info_2, b"abc".to_vec(), b"def".to_vec()); - ext.storage_root(); - cache.transaction.unwrap() - }; - let mut duplicate = false; - for (k, (value, rc)) in transaction.drain().iter() { - // look for a key inserted twice: transaction rc is 2 - if *rc == 2 { - duplicate = true; - println!("test duplicate for {:?} {:?}", k, value); - } - } - assert!(!duplicate); - } } diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index f57d13ee3ffec..3391bba6dc049 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -29,10 +29,11 @@ use crate::{ use std::iter::FromIterator; use std::collections::{HashMap, BTreeMap, BTreeSet}; use codec::{Decode, Encode}; -use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo}; +use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo, + PrefixedStorageKey, ChildChange, ChildUpdate}; use std::{mem, ops}; -use hash_db::Hasher; +use sp_core::Hasher; /// Storage key. pub type StorageKey = Vec; @@ -44,7 +45,7 @@ pub type StorageValue = Vec; pub type StorageCollection = Vec<(StorageKey, Option)>; /// In memory arrays of storage values for multiple child tries. -pub type ChildStorageCollection = Vec<(StorageKey, StorageCollection)>; +pub type ChildStorageCollection = Vec<(ChildInfo, ChildChange, StorageCollection)>; /// The overlayed changes to state to be queried on top of the backend. /// @@ -73,14 +74,30 @@ pub struct OverlayedValue { pub extrinsics: Option>, } +/// Overlay change set of a given child state. +#[derive(Debug, Clone)] +#[cfg_attr(test, derive(PartialEq))] +pub struct ChildChangeSet { + /// Child definition. + pub info: ChildInfo, + /// Kind of modification for this change, and + /// possibly extrinsic index of the change. + /// Currently it can only be a single extrinsic + /// for first bulk deletion. + pub change: (ChildChange, Option), + /// Deltas of modified values for this change. + /// The map key is the child storage key without the common prefix. + pub values: BTreeMap, +} + /// Prospective or committed overlayed change set. #[derive(Debug, Default, Clone)] #[cfg_attr(test, derive(PartialEq))] pub struct OverlayedChangeSet { /// Top level storage changes. pub top: BTreeMap, - /// Child storage changes. The map key is the child storage key without the common prefix. - pub children_default: HashMap, ChildInfo)>, + /// Child storage changes. + pub children_default: HashMap, } /// A storage changes structure that can be generated by the data collected in [`OverlayedChanges`]. @@ -133,6 +150,8 @@ pub struct StorageTransactionCache { pub(crate) transaction: Option, /// The storage root after applying the transaction. pub(crate) transaction_storage_root: Option, + /// The storage child roots after applying the transaction. + pub(crate) transaction_child_storage_root: BTreeMap>, /// Contains the changes trie transaction. pub(crate) changes_trie_transaction: Option>>, /// The storage root after applying the changes trie transaction. @@ -151,6 +170,7 @@ impl Default for StorageTransactionCache Self { transaction: None, transaction_storage_root: None, + transaction_child_storage_root: Default::default(), changes_trie_transaction: None, changes_trie_transaction_storage_root: None, } @@ -220,16 +240,22 @@ impl OverlayedChanges { /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose /// value has been set. pub fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { - if let Some(map) = self.prospective.children_default.get(child_info.storage_key()) { - if let Some(val) = map.0.get(key) { + if let Some(child) = self.prospective.children_default.get(child_info.storage_key()) { + if let ChildChange::BulkDelete(..) = child.change.0 { + return Some(None); + } + if let Some(val) = child.values.get(key) { let size_read = val.value.as_ref().map(|x| x.len() as u64).unwrap_or(0); self.stats.tally_read_modified(size_read); return Some(val.value.as_ref().map(AsRef::as_ref)); } } - if let Some(map) = self.committed.children_default.get(child_info.storage_key()) { - if let Some(val) = map.0.get(key) { + if let Some(child) = self.committed.children_default.get(child_info.storage_key()) { + if let ChildChange::BulkDelete(..) = child.change.0 { + return Some(None); + } + if let Some(val) = child.values.get(key) { let size_read = val.value.as_ref().map(|x| x.len() as u64).unwrap_or(0); self.stats.tally_read_modified(size_read); return Some(val.value.as_ref().map(AsRef::as_ref)); @@ -267,13 +293,14 @@ impl OverlayedChanges { let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0); self.stats.tally_write_overlay(size_write); let extrinsic_index = self.extrinsic_index(); - let storage_key = child_info.storage_key().to_vec(); - let map_entry = self.prospective.children_default.entry(storage_key) - .or_insert_with(|| (Default::default(), child_info.to_owned())); - let updatable = map_entry.1.try_update(child_info); - debug_assert!(updatable); + let map_entry = self.get_or_init_prospective(child_info); + let updatable = map_entry.info.try_update(&map_entry.change.0, child_info); + debug_assert!(updatable != ChildUpdate::Incompatible); + if updatable == ChildUpdate::Ignore { + return; + } - let entry = map_entry.0.entry(key).or_default(); + let entry = map_entry.values.entry(key).or_default(); entry.value = val; if let Some(extrinsic) = extrinsic_index { @@ -284,44 +311,38 @@ impl OverlayedChanges { /// Clear child storage of given storage key. /// + /// If encoded child root is undefined, this indicates that the child trie only + /// exists in the overlay storage. The deletion is therefore stored with an + /// empty root to allow reverting a freshly created child, but those value needs + /// to be filtered from the resulting transactions (in root building and drain content). + /// Note that it will still be use by change trie (change trie do not use the previous + /// root). + /// /// NOTE that this doesn't take place immediately but written into the prospective /// change set, and still can be reverted by [`discard_prospective`]. /// /// [`discard_prospective`]: #method.discard_prospective - pub(crate) fn clear_child_storage( + pub(crate) fn kill_child_storage( &mut self, child_info: &ChildInfo, + encoded_child_root: Option>, ) { let extrinsic_index = self.extrinsic_index(); - let storage_key = child_info.storage_key(); - let map_entry = self.prospective.children_default.entry(storage_key.to_vec()) - .or_insert_with(|| (Default::default(), child_info.to_owned())); - let updatable = map_entry.1.try_update(child_info); - debug_assert!(updatable); - - map_entry.0.values_mut().for_each(|e| { - if let Some(extrinsic) = extrinsic_index { - e.extrinsics.get_or_insert_with(Default::default) - .insert(extrinsic); - } - e.value = None; - }); + let map_entry = self.get_or_init_prospective(child_info); + let updatable = map_entry.info.try_update(&map_entry.change.0, child_info); + debug_assert!(updatable != ChildUpdate::Incompatible); + if updatable == ChildUpdate::Ignore { + return; + } - if let Some((committed_map, _child_info)) = self.committed.children_default.get(storage_key) { - for (key, value) in committed_map.iter() { - if !map_entry.0.contains_key(key) { - map_entry.0.insert(key.clone(), OverlayedValue { - value: None, - extrinsics: extrinsic_index.map(|i| { - let mut e = value.extrinsics.clone() - .unwrap_or_else(|| BTreeSet::default()); - e.insert(i); - e - }), - }); - } - } + if let Some(encoded_child_root) = encoded_child_root { + map_entry.change = (ChildChange::BulkDelete(encoded_child_root), extrinsic_index); + } else { + map_entry.change = (ChildChange::BulkDelete(Vec::new()), extrinsic_index); + } + if !extrinsic_index.is_some() { + map_entry.values.clear(); } } @@ -368,13 +389,18 @@ impl OverlayedChanges { prefix: &[u8], ) { let extrinsic_index = self.extrinsic_index(); - let storage_key = child_info.storage_key(); - let map_entry = self.prospective.children_default.entry(storage_key.to_vec()) - .or_insert_with(|| (Default::default(), child_info.to_owned())); - let updatable = map_entry.1.try_update(child_info); - debug_assert!(updatable); + let map_entry = Self::get_or_init_prospective_inner( + &mut self.prospective, + &self.committed, + child_info, + ); + let updatable = map_entry.info.try_update(&map_entry.change.0, child_info); + debug_assert!(updatable != ChildUpdate::Incompatible); + if updatable == ChildUpdate::Ignore { + return; + } - for (key, entry) in map_entry.0.iter_mut() { + for (key, entry) in map_entry.values.iter_mut() { if key.starts_with(prefix) { entry.value = None; @@ -385,12 +411,12 @@ impl OverlayedChanges { } } - if let Some((child_committed, _child_info)) = self.committed.children_default.get(storage_key) { + if let Some(child_committed) = self.committed.children_default.get(child_info.storage_key()) { // Then do the same with keys from committed changes. // NOTE that we are making changes in the prospective change set. - for key in child_committed.keys() { + for key in child_committed.values.keys() { if key.starts_with(prefix) { - let entry = map_entry.0.entry(key.clone()).or_default(); + let entry = map_entry.values.entry(key.clone()).or_default(); entry.value = None; if let Some(extrinsic) = extrinsic_index { @@ -422,12 +448,17 @@ impl OverlayedChanges { .extend(prospective_extrinsics); } } - for (storage_key, (map, child_info)) in self.prospective.children_default.drain() { - let child_content = self.committed.children_default.entry(storage_key) - .or_insert_with(|| (Default::default(), child_info)); + for (storage_key, child) in self.prospective.children_default.drain() { + let child_committed = self.committed.children_default.entry(storage_key) + .or_insert_with(|| ChildChangeSet { + info: child.info.clone(), + change: child.change.clone(), + values: Default::default() + }); + child_committed.change = child.change; // No update to child info at this point (will be needed for deletion). - for (key, val) in map.into_iter() { - let entry = child_content.0.entry(key).or_default(); + for (key, val) in child.values.into_iter() { + let entry = child_committed.values.entry(key).or_default(); entry.value = val.value; if let Some(prospective_extrinsics) = val.extrinsics { @@ -445,7 +476,7 @@ impl OverlayedChanges { /// Will panic if there are any uncommitted prospective changes. fn drain_committed(&mut self) -> ( impl Iterator)>, - impl Iterator)>, ChildInfo))>, + impl Iterator)>)>, ) { assert!(self.prospective.is_empty()); ( @@ -454,7 +485,8 @@ impl OverlayedChanges { .map(|(k, v)| (k, v.value)), std::mem::replace(&mut self.committed.children_default, Default::default()) .into_iter() - .map(|(sk, (v, ci))| (sk, (v.into_iter().map(|(k, v)| (k, v.value)), ci))), + .filter(|(_sk, child)| !matches!(&child.change.0, ChildChange::BulkDelete(root) if root.is_empty())) + .map(|(_sk, child)| (child.info, child.change.0, child.values.into_iter().map(|(k, v)| (k, v.value)))), ) } @@ -507,7 +539,8 @@ impl OverlayedChanges { Ok(StorageChanges { main_storage_changes: main_storage_changes.collect(), - child_storage_changes: child_storage_changes.map(|(sk, it)| (sk, it.0.collect())).collect(), + child_storage_changes: child_storage_changes + .map(|(ci, cc, it)| (ci, cc, it.collect())).collect(), transaction, transaction_storage_root, changes_trie_transaction, @@ -551,29 +584,33 @@ impl OverlayedChanges { { let child_storage_keys = self.prospective.children_default.keys() .chain(self.committed.children_default.keys()); - let child_delta_iter = child_storage_keys.map(|storage_key| + let child_delta_iter = child_storage_keys.map(|storage_key| { + let (child_info, child_change) = self.default_child_info(storage_key) + .expect("child info initialized in either committed or prospective"); ( - self.default_child_info(storage_key).cloned() - .expect("child info initialized in either committed or prospective"), + child_info.clone(), + child_change.clone(), self.committed.children_default.get(storage_key) .into_iter() - .flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) + .flat_map(|child| child.values.iter().map(|(k, v)| (k.clone(), v.value.clone()))) .chain( self.prospective.children_default.get(storage_key) .into_iter() - .flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) + .flat_map(|child| child.values.iter().map(|(k, v)| (k.clone(), v.value.clone()))) ), ) - ); + }) + .filter(|(_, child_change, _)| !matches!(&child_change, ChildChange::BulkDelete(root) if root.is_empty())); // compute and memoize let delta = self.committed.top.iter().map(|(k, v)| (k.clone(), v.value.clone())) .chain(self.prospective.top.iter().map(|(k, v)| (k.clone(), v.value.clone()))); - let (root, transaction) = backend.full_storage_root(delta, child_delta_iter); + let (root, transaction, child_roots) = backend.full_storage_root(delta, child_delta_iter, true); cache.transaction = Some(transaction); cache.transaction_storage_root = Some(root); + cache.transaction_child_storage_root = child_roots.into_iter().collect(); root } @@ -609,12 +646,12 @@ impl OverlayedChanges { /// Get child info for a storage key. /// Take the latest value so prospective first. - pub fn default_child_info(&self, storage_key: &[u8]) -> Option<&ChildInfo> { - if let Some((_, ci)) = self.prospective.children_default.get(storage_key) { - return Some(&ci); + pub fn default_child_info(&self, storage_key: &[u8]) -> Option<(&ChildInfo, &ChildChange)> { + if let Some(child) = self.prospective.children_default.get(storage_key) { + return Some((&child.info, &child.change.0)); } - if let Some((_, ci)) = self.committed.children_default.get(storage_key) { - return Some(&ci); + if let Some(child) = self.committed.children_default.get(storage_key) { + return Some((&child.info, &child.change.0)); } None } @@ -645,28 +682,58 @@ impl OverlayedChanges { } /// Returns the next (in lexicographic order) child storage key in the overlayed alongside its - /// value. If no value is next then `None` is returned. + /// value. If no value is next then `None` is returned. + /// We also add a boolean indicating if the child trie has been deleted. pub fn next_child_storage_key_change( &self, storage_key: &[u8], key: &[u8] - ) -> Option<(&[u8], &OverlayedValue)> { + ) -> (Option<(&[u8], &OverlayedValue)>, bool) { let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); - let next_prospective_key = self.prospective.children_default.get(storage_key) - .and_then(|(map, _)| map.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v))); + let prospective = self.prospective.children_default.get(storage_key); + if let Some(ChildChange::BulkDelete(..)) = prospective.map(|child| &child.change.0) { + return (None, true); + } - let next_committed_key = self.committed.children_default.get(storage_key) - .and_then(|(map, _)| map.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v))); + let next_prospective_key = prospective + .and_then(|child| child.values.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v))); - match (next_committed_key, next_prospective_key) { + let committed = self.committed.children_default.get(storage_key); + if let Some(ChildChange::BulkDelete(..)) = committed.map(|child| &child.change.0) { + return (None, true); + } + + let next_committed_key = committed + .and_then(|child| child.values.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v))); + + (match (next_committed_key, next_prospective_key) { // Committed is strictly less than prospective (Some(committed_key), Some(prospective_key)) if committed_key.0 < prospective_key.0 => Some(committed_key), (committed_key, None) => committed_key, // Prospective key is less or equal to committed or committed doesn't exist (_, prospective_key) => prospective_key, - } + }, false) + } + + fn get_or_init_prospective_inner<'a>( + prospective: &'a mut OverlayedChangeSet, + committed: &OverlayedChangeSet, + child_info: &ChildInfo, + ) -> &'a mut ChildChangeSet { + prospective.children_default.entry(child_info.storage_key().to_vec()) + .or_insert(ChildChangeSet { + info: child_info.to_owned(), + change: committed.children_default.get(child_info.storage_key()) + .map(|child| (child.change.0.clone(), None)) + .unwrap_or(Default::default()), + values: Default::default(), + }) + } + + fn get_or_init_prospective(&mut self, child_info: &ChildInfo) -> &mut ChildChangeSet { + Self::get_or_init_prospective_inner(&mut self.prospective, &self.committed, child_info) } } @@ -877,28 +944,28 @@ mod tests { overlay.set_child_storage(child_info, vec![30], None); // next_prospective < next_committed - let next_to_5 = overlay.next_child_storage_key_change(child, &[5]).unwrap(); + let next_to_5 = overlay.next_child_storage_key_change(child, &[5]).0.unwrap(); assert_eq!(next_to_5.0.to_vec(), vec![10]); assert_eq!(next_to_5.1.value, Some(vec![10])); // next_committed < next_prospective - let next_to_10 = overlay.next_child_storage_key_change(child, &[10]).unwrap(); + let next_to_10 = overlay.next_child_storage_key_change(child, &[10]).0.unwrap(); assert_eq!(next_to_10.0.to_vec(), vec![20]); assert_eq!(next_to_10.1.value, Some(vec![20])); // next_committed == next_prospective - let next_to_20 = overlay.next_child_storage_key_change(child, &[20]).unwrap(); + let next_to_20 = overlay.next_child_storage_key_change(child, &[20]).0.unwrap(); assert_eq!(next_to_20.0.to_vec(), vec![30]); assert_eq!(next_to_20.1.value, None); // next_committed, no next_prospective - let next_to_30 = overlay.next_child_storage_key_change(child, &[30]).unwrap(); + let next_to_30 = overlay.next_child_storage_key_change(child, &[30]).0.unwrap(); assert_eq!(next_to_30.0.to_vec(), vec![40]); assert_eq!(next_to_30.1.value, Some(vec![40])); overlay.set_child_storage(child_info, vec![50], Some(vec![50])); // next_prospective, no next_committed - let next_to_40 = overlay.next_child_storage_key_change(child, &[40]).unwrap(); + let next_to_40 = overlay.next_child_storage_key_change(child, &[40]).0.unwrap(); assert_eq!(next_to_40.0.to_vec(), vec![50]); assert_eq!(next_to_40.1.value, Some(vec![50])); } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index deafeb902d8bf..40d476ecfe77e 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -20,19 +20,21 @@ use std::sync::Arc; use parking_lot::RwLock; use codec::{Decode, Codec}; use log::debug; -use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; +use hash_db::{HashDB, EMPTY_PREFIX, Prefix}; +use sp_core::{Hasher, InnerHasher}; use sp_trie::{ - MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, + MemoryDB, empty_child_trie_root, read_trie_value_with, record_all_keys, StorageProof, }; pub use sp_trie::Recorder; pub use sp_trie::trie_types::{Layout, TrieError}; use crate::trie_backend::TrieBackend; -use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; +use crate::trie_backend_essence::{BackendStorageDBRef, TrieBackendEssence, + TrieBackendStorage, TrieBackendStorageRef}; use crate::{Error, ExecutionError, Backend}; use std::collections::HashMap; use crate::DBValue; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, ChildChange}; /// Patricia trie-based backend specialized in get value proofs. pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { @@ -48,15 +50,15 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> { /// Produce proof for a key query. pub fn storage(&mut self, key: &[u8]) -> Result>, String> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new( + let child_info = ChildInfo::top_trie(); + let eph = BackendStorageDBRef::new( self.backend.backend_storage(), - &mut read_overlay, + &child_info, ); let map_e = |e| format!("Trie lookup error: {}", e); - read_trie_value_with::, _, Ephemeral>( + read_trie_value_with::, _, BackendStorageDBRef>( &eph, self.backend.root(), key, @@ -68,36 +70,34 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> pub fn child_storage( &mut self, child_info: &ChildInfo, - key: &[u8] + key: &[u8], ) -> Result>, String> { let storage_key = child_info.storage_key(); let root = self.storage(storage_key)? .and_then(|r| Decode::decode(&mut &r[..]).ok()) .unwrap_or(empty_child_trie_root::>()); - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new( + let eph = BackendStorageDBRef::new( self.backend.backend_storage(), - &mut read_overlay, + child_info, ); let map_e = |e| format!("Trie lookup error: {}", e); - read_child_trie_value_with::, _, _>( - child_info.keyspace(), + read_trie_value_with::, _, _>( &eph, - &root.as_ref(), + &root, key, &mut *self.proof_recorder ).map_err(map_e) } /// Produce proof for the whole backend. - pub fn record_all_keys(&mut self) { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new( + pub fn record_all_top_trie_keys(&mut self) { + let child_info = ChildInfo::top_trie(); + let eph = BackendStorageDBRef::new( self.backend.backend_storage(), - &mut read_overlay, + &child_info, ); let mut iter = move || -> Result<(), Box>> { @@ -113,7 +113,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> /// Global proof recorder, act as a layer over a hash db for recording queried /// data. -pub type ProofRecorder = Arc::Out, Option>>>; +pub type ProofRecorder = Arc::Out, Option>>>; /// Patricia trie-based backend which also tracks all touched storage trie values. /// These can be sent to remote node and used as a proof of execution. @@ -161,16 +161,23 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> } } -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage +// proof run on a flatten storage of tries and currently only need implement a single +// trie backend storage api. +impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorageRef for ProofRecorderBackend<'a, S, H> { type Overlay = S::Overlay; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + child_info: &ChildInfo, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { if let Some(v) = self.proof_recorder.read().get(key) { return Ok(v.clone()); } - let backend_value = self.backend.get(key, prefix)?; + let backend_value = self.backend.get(child_info, key, prefix)?; self.proof_recorder.write().insert(key.clone(), backend_value.clone()); Ok(backend_value) } @@ -191,7 +198,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> H::Out: Ord + Codec, { type Error = String; - type Transaction = S::Overlay; + type Transaction = Option; type TrieBackendStorage = S; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { @@ -262,19 +269,37 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) where I: IntoIterator, Option>)> { - self.0.storage_root(delta) + let (root, mut tx) = self.0.storage_root(delta); + if let Some((change, tx)) = tx.remove(&ChildInfo::top_trie()) { + debug_assert!(change == ChildChange::Update); + (root, Some(tx)) + } else { + (root, None) + } } fn child_storage_root( &self, child_info: &ChildInfo, + child_change: &ChildChange, delta: I, ) -> (H::Out, bool, Self::Transaction) where I: IntoIterator, Option>)>, H::Out: Ord { - self.0.child_storage_root(child_info, delta) + let (root, is_empty, mut tx) = self.0.child_storage_root(child_info, child_change, delta); + if let Some((change, tx)) = tx.remove(child_info) { + match change { + ChildChange::Update => (root, is_empty, Some(tx)), + ChildChange::BulkDelete(_encoded_root) => { + // no need to keep change trie info contained in tx + (root, true, Some(Default::default())) + }, + } + } else { + (root, is_empty, None) + } } fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) { } @@ -349,16 +374,22 @@ mod tests { assert_eq!(trie_backend.pairs(), proving_backend.pairs()); let (trie_root, mut trie_mdb) = trie_backend.storage_root(::std::iter::empty()); - let (proving_root, mut proving_mdb) = proving_backend.storage_root(::std::iter::empty()); + let (proving_root, proving_mdb) = proving_backend.storage_root(::std::iter::empty()); assert_eq!(trie_root, proving_root); - assert_eq!(trie_mdb.drain(), proving_mdb.drain()); + let trie_mdb = trie_mdb.remove(&ChildInfo::top_trie()).unwrap(); + let mut trie_mdb = if let ChildChange::BulkDelete(..) = trie_mdb.0 { + Default::default() + } else { + trie_mdb.1 + }; + assert_eq!(trie_mdb.drain(), proving_mdb.unwrap().drain()); } #[test] fn proof_recorded_and_checked() { let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); let in_memory = InMemoryBackend::::default(); - let mut in_memory = in_memory.update(vec![(None, contents)]); + let mut in_memory = in_memory.update(vec![(None, ChildChange::Update, contents)]); let in_memory_root = in_memory.storage_root(::std::iter::empty()).0; (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); @@ -383,17 +414,18 @@ mod tests { let child_info_1 = &child_info_1; let child_info_2 = &child_info_2; let contents = vec![ - (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some(child_info_1.clone()), + (None, ChildChange::Update, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), + (Some(child_info_1.clone()), ChildChange::Update, (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some(child_info_2.clone()), + (Some(child_info_2.clone()), ChildChange::Update, (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; let in_memory = InMemoryBackend::::default(); let mut in_memory = in_memory.update(contents); let in_memory_root = in_memory.full_storage_root::<_, Vec<_>, _>( ::std::iter::empty(), - in_memory.child_storage_infos().map(|k|(k.to_owned(), Vec::new())) + in_memory.child_storage_infos().map(|k|(k.to_owned(), ChildChange::Update, Vec::new())), + false, ).0; (0..64).for_each(|i| assert_eq!( in_memory.storage(&[i]).unwrap().unwrap(), diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 70a96c623adae..7dce37b7fcb41 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -18,7 +18,7 @@ use std::any::{Any, TypeId}; use codec::Decode; -use hash_db::Hasher; +use sp_core::Hasher; use crate::{ backend::Backend, OverlayedChanges, StorageTransactionCache, ext::Ext, InMemoryBackend, StorageKey, StorageValue, @@ -32,7 +32,7 @@ use crate::{ use sp_core::{ storage::{ well_known_keys::{CHANGES_TRIE_CONFIG, CODE, HEAP_PAGES, is_child_storage_key}, - Storage, + Storage, ChildChange, }, }; use codec::Encode; @@ -113,7 +113,7 @@ impl TestExternalities /// Insert key/value into backend pub fn insert(&mut self, k: StorageKey, v: StorageValue) { - self.backend = self.backend.update(vec![(None, vec![(k, Some(v))])]); + self.backend = self.backend.update(vec![(None, ChildChange::Update, vec![(k, Some(v))])]); } /// Registers the given extension for this instance. @@ -131,16 +131,23 @@ impl TestExternalities let top: Vec<_> = self.overlay.committed.top.clone().into_iter() .chain(self.overlay.prospective.top.clone().into_iter()) .map(|(k, v)| (k, v.value)).collect(); - let mut transaction = vec![(None, top)]; + let mut transaction = vec![(None, ChildChange::Update, top)]; self.overlay.committed.children_default.clone().into_iter() .chain(self.overlay.prospective.children_default.clone().into_iter()) - .for_each(|(_storage_key, (map, child_info))| { - transaction.push(( - Some(child_info), - map.into_iter() + .for_each(|(_storage_key, child)| { + let data = match child.change.0 { + ChildChange::Update => child.values.into_iter() .map(|(k, v)| (k, v.value)) .collect::>(), + // no need for change trie content + ChildChange::BulkDelete(..) => Vec::new(), + }; + + transaction.push(( + Some(child.info), + child.change.0, + data, )) }); diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 08eea29360465..f8e57e8bbcce7 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -15,21 +15,24 @@ // along with Substrate. If not, see . //! Trie-based state machine backend. - use log::{warn, debug}; -use hash_db::Hasher; -use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root}; +use sp_core::Hasher; +use sp_trie::{Trie, delta_trie_root, empty_child_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_core::storage::{ChildInfo, ChildType}; +use sp_core::storage::{ChildInfo, ChildChange, ChildType, ChildrenMap}; use codec::{Codec, Decode}; use crate::{ StorageKey, StorageValue, Backend, - trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral}, + trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral, BackendStorageDBRef}, }; -/// Patricia trie-based backend. Transaction type is an overlay of changes to commit. +/// Patricia trie-based backend. Transaction type is overlays of changes to commit +/// for this trie and child tries. pub struct TrieBackend, H: Hasher> { essence: TrieBackendEssence, + // storing child_info of top trie even if it is in + // theory a bit useless (no heap alloc on empty vec). + top_trie: ChildInfo, } impl, H: Hasher> TrieBackend where H::Out: Codec { @@ -37,6 +40,7 @@ impl, H: Hasher> TrieBackend where H::Out: Codec pub fn new(storage: S, root: H::Out) -> Self { TrieBackend { essence: TrieBackendEssence::new(storage, root), + top_trie: ChildInfo::top_trie(), } } @@ -71,11 +75,11 @@ impl, H: Hasher> Backend for TrieBackend where H::Out: Ord + Codec, { type Error = String; - type Transaction = S::Overlay; + type Transaction = ChildrenMap<(ChildChange, S::Overlay)>; type TrieBackendStorage = S; fn storage(&self, key: &[u8]) -> Result, Self::Error> { - self.essence.storage(key) + self.essence.storage(&self.top_trie, key) } fn child_storage( @@ -83,11 +87,15 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - self.essence.child_storage(child_info, key) + if let Some(essence) = self.child_essence(child_info)? { + essence.storage(child_info, key) + } else { + Ok(None) + } } fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { - self.essence.next_storage_key(key) + self.essence.next_storage_key(&self.top_trie, key) } fn next_child_storage_key( @@ -95,15 +103,19 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - self.essence.next_child_storage_key(child_info, key) + if let Some(essence) = self.child_essence(child_info)? { + essence.next_storage_key(child_info, key) + } else { + Ok(None) + } } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.essence.for_keys_with_prefix(prefix, f) + self.essence.for_keys_with_prefix(&self.top_trie, prefix, f) } fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.essence.for_key_values_with_prefix(prefix, f) + self.essence.for_key_values_with_prefix(&self.top_trie, prefix, f) } fn for_keys_in_child_storage( @@ -111,7 +123,9 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, f: F, ) { - self.essence.for_keys_in_child_storage(child_info, f) + if let Ok(Some(essence)) = self.child_essence(child_info) { + essence.for_keys(child_info, f) + } } fn for_child_keys_with_prefix( @@ -120,12 +134,13 @@ impl, H: Hasher> Backend for TrieBackend where prefix: &[u8], f: F, ) { - self.essence.for_child_keys_with_prefix(child_info, prefix, f) + if let Ok(Some(essence)) = self.child_essence(child_info) { + essence.for_keys_with_prefix(child_info, prefix, f) + } } fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); + let eph = BackendStorageDBRef::new(self.essence.backend_storage(), &self.top_trie); let collect_all = || -> Result<_, Box>> { let trie = TrieDB::::new(&eph, self.essence.root())?; @@ -148,8 +163,7 @@ impl, H: Hasher> Backend for TrieBackend where } fn keys(&self, prefix: &[u8]) -> Vec { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); + let eph = BackendStorageDBRef::new(self.essence.backend_storage(), &self.top_trie); let collect_all = || -> Result<_, Box>> { let trie = TrieDB::::new(&eph, self.essence.root())?; @@ -167,7 +181,7 @@ impl, H: Hasher> Backend for TrieBackend where collect_all().map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)).unwrap_or_default() } - fn storage_root(&self, delta: I) -> (H::Out, S::Overlay) + fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) where I: IntoIterator)> { let mut write_overlay = S::Overlay::default(); @@ -176,6 +190,7 @@ impl, H: Hasher> Backend for TrieBackend where { let mut eph = Ephemeral::new( self.essence.backend_storage(), + &self.top_trie, &mut write_overlay, ); @@ -184,13 +199,15 @@ impl, H: Hasher> Backend for TrieBackend where Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), } } - - (root, write_overlay) + let mut tx = ChildrenMap::default(); + tx.insert(self.top_trie.clone(), (ChildChange::Update, write_overlay)); + (root, tx) } fn child_storage_root( &self, child_info: &ChildInfo, + child_change: &ChildChange, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -201,37 +218,47 @@ impl, H: Hasher> Backend for TrieBackend where ChildType::ParentKeyId => empty_child_trie_root::>() }; - let mut write_overlay = S::Overlay::default(); - let prefixed_storage_key = child_info.prefixed_storage_key(); - let mut root = match self.storage(prefixed_storage_key.as_slice()) { - Ok(value) => - value.and_then(|r| Decode::decode(&mut &r[..]).ok()).unwrap_or(default_root.clone()), - Err(e) => { - warn!(target: "trie", "Failed to read child storage root: {}", e); - default_root.clone() - }, - }; - - { - let mut eph = Ephemeral::new( - self.essence.backend_storage(), - &mut write_overlay, - ); - - match child_delta_trie_root::, _, _, _, _, _>( - child_info.keyspace(), - &mut eph, - root, - delta - ) { - Ok(ret) => root = ret, - Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), + if let ChildChange::BulkDelete(_encoded_root) = &child_change { + let mut tx = ChildrenMap::default(); + tx.insert(child_info.clone(), (child_change.clone(), Default::default())); + (default_root, true, tx) + } else { + let mut write_overlay = S::Overlay::default(); + let prefixed_storage_key = child_info.prefixed_storage_key(); + let mut root = match self.storage(prefixed_storage_key.as_slice()) { + Ok(value) => + value.and_then(|r| Decode::decode(&mut &r[..]).ok()).unwrap_or(default_root.clone()), + Err(e) => { + warn!(target: "trie", "Failed to read child storage root: {}", e); + default_root.clone() + }, + }; + + { + let storage = self.essence.backend_storage(); + // Do not write prefix in overlay. + let mut eph = Ephemeral::new( + storage, + child_info, + &mut write_overlay, + ); + + match delta_trie_root::, _, _, _, _>( + &mut eph, + root, + delta + ) { + Ok(ret) => root = ret, + Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), + } } - } - let is_default = root == default_root; + let is_default = root == default_root; - (root, is_default, write_overlay) + let mut tx = ChildrenMap::default(); + tx.insert(child_info.clone(), (child_change.clone(), write_overlay)); + (root, is_default, tx) + } } fn as_trie_backend(&mut self) -> Option<&TrieBackend> { @@ -245,12 +272,29 @@ impl, H: Hasher> Backend for TrieBackend where } } +impl, H: Hasher> TrieBackend where + H::Out: Ord + Codec, +{ + fn child_essence<'a>( + &'a self, + child_info: &ChildInfo, + ) -> Result>, >::Error> { + let root: Option = self.storage(&child_info.prefixed_storage_key()[..])? + .and_then(|encoded_root| Decode::decode(&mut &encoded_root[..]).ok()); + Ok(if let Some(root) = root { + Some(TrieBackendEssence::new(self.essence.backend_storage(), root)) + } else { + None + }) + } +} + #[cfg(test)] pub mod tests { use std::collections::HashSet; use sp_core::H256; use codec::Encode; - use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; + use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut}; use sp_runtime::traits::BlakeTwo256; use super::*; @@ -261,7 +305,6 @@ pub mod tests { let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); { - let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); let mut trie = TrieDBMut::new(&mut mdb, &mut root); trie.insert(b"value3", &[142]).expect("insert failed"); trie.insert(b"value4", &[124]).expect("insert failed"); @@ -328,13 +371,20 @@ pub mod tests { #[test] fn storage_root_transaction_is_empty() { - assert!(test_trie().storage_root(::std::iter::empty()).1.drain().is_empty()); + let tx = test_trie().storage_root(::std::iter::empty()).1; + for (_ct, mut tx) in tx.into_iter() { + assert_eq!(tx.0, ChildChange::Update); + assert!(tx.1.drain().is_empty()); + } } #[test] fn storage_root_transaction_is_non_empty() { - let (new_root, mut tx) = test_trie().storage_root(vec![(b"new-key".to_vec(), Some(b"new-value".to_vec()))]); - assert!(!tx.drain().is_empty()); + let (new_root, tx) = test_trie().storage_root(vec![(b"new-key".to_vec(), Some(b"new-value".to_vec()))]); + for (_ct, mut tx) in tx.into_iter() { + assert_eq!(tx.0, ChildChange::Update); + assert!(!tx.1.drain().is_empty()); + } assert!(new_root != test_trie().storage_root(::std::iter::empty()).0); } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 28d1c68ca2e40..4c8cde131c440 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -19,11 +19,13 @@ use std::ops::Deref; use std::sync::Arc; +use std::marker::PhantomData; use log::{debug, warn}; -use hash_db::{self, Hasher, EMPTY_PREFIX, Prefix}; +use sp_core::Hasher; +use hash_db::{self, EMPTY_PREFIX, Prefix}; use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, - empty_child_trie_root, read_trie_value, read_child_trie_value, - for_keys_in_child_trie, KeySpacedDB, TrieDBIterator}; + check_if_empty_root, read_trie_value, + TrieDBIterator, for_keys_in_trie}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; use sp_core::storage::ChildInfo; @@ -32,16 +34,21 @@ use codec::Encode; /// Patricia trie-based storage trait. pub trait Storage: Send + Sync { /// Get a trie node. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; + fn get( + &self, + trie: &ChildInfo, + key: &H::Out, + prefix: Prefix, + ) -> Result, String>; } /// Patricia trie-based pairs storage essence. -pub struct TrieBackendEssence, H: Hasher> { +pub struct TrieBackendEssence, H: Hasher> { storage: S, root: H::Out, } -impl, H: Hasher> TrieBackendEssence where H::Out: Encode { +impl, H: Hasher> TrieBackendEssence where H::Out: Encode { /// Create new trie-based backend. pub fn new(storage: S, root: H::Out) -> Self { TrieBackendEssence { @@ -67,60 +74,10 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Return the next key in the trie i.e. the minimum key that is strictly superior to `key` in /// lexicographic order. - pub fn next_storage_key(&self, key: &[u8]) -> Result, String> { - self.next_storage_key_from_root(&self.root, None, key) - } - - /// Access the root of the child storage in its parent trie - fn child_root(&self, child_info: &ChildInfo) -> Result, String> { - self.storage(child_info.prefixed_storage_key().as_slice()) - } - - /// Return the next key in the child trie i.e. the minimum key that is strictly superior to - /// `key` in lexicographic order. - pub fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result, String> { - let child_root = match self.child_root(child_info)? { - Some(child_root) => child_root, - None => return Ok(None), - }; - - let mut hash = H::Out::default(); - - if child_root.len() != hash.as_ref().len() { - return Err(format!("Invalid child storage hash at {:?}", child_info.storage_key())); - } - // note: child_root and hash must be same size, panics otherwise. - hash.as_mut().copy_from_slice(&child_root[..]); - - self.next_storage_key_from_root(&hash, Some(child_info), key) - } - - /// Return next key from main trie or child trie by providing corresponding root. - fn next_storage_key_from_root( - &self, - root: &H::Out, - child_info: Option<&ChildInfo>, - key: &[u8], - ) -> Result, String> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; - let dyn_eph: &dyn hash_db::HashDBRef<_, _>; - let keyspace_eph; - if let Some(child_info) = child_info.as_ref() { - keyspace_eph = KeySpacedDB::new(&eph, child_info.keyspace()); - dyn_eph = &keyspace_eph; - } else { - dyn_eph = &eph; - } + pub fn next_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Result, String> { + let eph = BackendStorageDBRef::new(&self.storage, child_info); - let trie = TrieDB::::new(dyn_eph, root) + let trie = TrieDB::::new(&eph, &self.root) .map_err(|e| format!("TrieDB creation error: {}", e))?; let mut iter = trie.iter() .map_err(|e| format!("TrieDB iteration error: {}", e))?; @@ -150,63 +107,25 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } /// Get the value of storage at given key. - pub fn storage(&self, key: &[u8]) -> Result, String> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; + pub fn storage(&self, child_info: &ChildInfo, key: &[u8]) -> Result, String> { + let eph = BackendStorageDBRef::new(&self.storage, child_info); let map_e = |e| format!("Trie lookup error: {}", e); read_trie_value::, _>(&eph, &self.root, key).map_err(map_e) } - /// Get the value of child storage at given key. - pub fn child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result, String> { - let root = self.child_root(child_info)? - .unwrap_or(empty_child_trie_root::>().encode()); - - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; - - let map_e = |e| format!("Trie lookup error: {}", e); - - read_child_trie_value::, _>(child_info.keyspace(), &eph, &root, key) - .map_err(map_e) - } - - /// Retrieve all entries keys of child storage and call `f` for each of those keys. - pub fn for_keys_in_child_storage( + /// Retrieve all entries keys of storage and call `f` for each of those keys. + pub fn for_keys( &self, child_info: &ChildInfo, f: F, ) { - let root = match self.child_root(child_info) { - Ok(v) => v.unwrap_or(empty_child_trie_root::>().encode()), - Err(e) => { - debug!(target: "trie", "Error while iterating child storage: {}", e); - return; - } - }; + let eph = BackendStorageDBRef::new(&self.storage, child_info); - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; - - if let Err(e) = for_keys_in_child_trie::, _, Ephemeral>( - child_info.keyspace(), + if let Err(e) = for_keys_in_trie::, _, BackendStorageDBRef>( &eph, - &root, + &self.root, f, ) { debug!(target: "trie", "Error while iterating child storage: {}", e); @@ -214,27 +133,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } /// Execute given closure for all keys starting with prefix. - pub fn for_child_keys_with_prefix( - &self, - child_info: &ChildInfo, - prefix: &[u8], - mut f: F, - ) { - let root_vec = match self.child_root(child_info) { - Ok(v) => v.unwrap_or(empty_child_trie_root::>().encode()), - Err(e) => { - debug!(target: "trie", "Error while iterating child storage: {}", e); - return; - } - }; - let mut root = H::Out::default(); - root.as_mut().copy_from_slice(&root_vec); - self.keys_values_with_prefix_inner(&root, prefix, |k, _v| f(k), Some(child_info)) - } - - /// Execute given closure for all keys starting with prefix. - pub fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { - self.keys_values_with_prefix_inner(&self.root, prefix, |k, _v| f(k), None) + pub fn for_keys_with_prefix(&self, child_info: &ChildInfo, prefix: &[u8], mut f: F) { + self.keys_values_with_prefix_inner(&self.root, prefix, |k, _v| f(k), child_info) } fn keys_values_with_prefix_inner( @@ -242,13 +142,9 @@ impl, H: Hasher> TrieBackendEssence where H::Out: root: &H::Out, prefix: &[u8], mut f: F, - child_info: Option<&ChildInfo>, + child_info: &ChildInfo, ) { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; + let eph = BackendStorageDBRef::new(&self.storage, child_info); let mut iter = move |db| -> Result<(), Box>> { let trie = TrieDB::::new(db, root)?; @@ -264,30 +160,41 @@ impl, H: Hasher> TrieBackendEssence where H::Out: Ok(()) }; - let result = if let Some(child_info) = child_info { - let db = KeySpacedDB::new(&eph, child_info.keyspace()); - iter(&db) - } else { - iter(&eph) - }; - if let Err(e) = result { + if let Err(e) = iter(&eph) { debug!(target: "trie", "Error while iterating by prefix: {}", e); } } /// Execute given closure for all key and values starting with prefix. - pub fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.keys_values_with_prefix_inner(&self.root, prefix, f, None) + pub fn for_key_values_with_prefix(&self, child_info: &ChildInfo, prefix: &[u8], f: F) { + self.keys_values_with_prefix_inner(&self.root, prefix, f, child_info) } } -pub(crate) struct Ephemeral<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { +pub(crate) struct Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, +{ storage: &'a S, - overlay: &'a mut S::Overlay, + child_info: &'a ChildInfo, + overlay: &'a mut O, + _ph: PhantomData, } -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsPlainDB - for Ephemeral<'a, S, H> +pub(crate) struct BackendStorageDBRef<'a, S, H> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, +{ + storage: &'a S, + child_info: &'a ChildInfo, + _ph: PhantomData, +} + +impl<'a, S, H, O> hash_db::AsPlainDB for Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorage, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, { fn as_plain_db<'b>(&'b self) -> &'b (dyn hash_db::PlainDB + 'b) { self } fn as_plain_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::PlainDB + 'b) { @@ -295,41 +202,54 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsPlainDB, H: 'a + Hasher> hash_db::AsHashDB - for Ephemeral<'a, S, H> +impl<'a, S, H, O> hash_db::AsHashDB for Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorage, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, { fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { self } fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { self } } -impl<'a, S: TrieBackendStorage, H: Hasher> Ephemeral<'a, S, H> { - pub fn new(storage: &'a S, overlay: &'a mut S::Overlay) -> Self { +impl<'a, S, H, O> Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, +{ + pub fn new(storage: &'a S, child_info: &'a ChildInfo, overlay: &'a mut O) -> Self { Ephemeral { storage, + child_info, overlay, + _ph: PhantomData, } } } -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::PlainDB - for Ephemeral<'a, S, H> +impl<'a, S, H> BackendStorageDBRef<'a, S, H> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, { - fn get(&self, key: &H::Out) -> Option { - if let Some(val) = hash_db::HashDB::get(self.overlay, key, EMPTY_PREFIX) { - Some(val) - } else { - match self.storage.get(&key, EMPTY_PREFIX) { - Ok(x) => x, - Err(e) => { - warn!(target: "trie", "Failed to read from DB: {}", e); - None - }, - } + pub fn new(storage: &'a S, child_info: &'a ChildInfo) -> Self { + BackendStorageDBRef { + storage, + child_info, + _ph: PhantomData, } } +} + +impl<'a, S, H, O> hash_db::PlainDB for Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorage, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, +{ + fn get(&self, key: &H::Out) -> Option { + hash_db::PlainDBRef::get(self, key) + } fn contains(&self, key: &H::Out) -> bool { - hash_db::HashDB::get(self, key, EMPTY_PREFIX).is_some() + hash_db::PlainDBRef::contains(self, key) } fn emplace(&mut self, key: H::Out, value: DBValue) { @@ -341,21 +261,16 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::PlainDB, H: Hasher> hash_db::PlainDBRef - for Ephemeral<'a, S, H> -{ - fn get(&self, key: &H::Out) -> Option { hash_db::PlainDB::get(self, key) } - fn contains(&self, key: &H::Out) -> bool { hash_db::PlainDB::contains(self, key) } -} - -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB - for Ephemeral<'a, S, H> +impl<'a, S, H, O> hash_db::PlainDBRef for Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, { - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { + fn get(&self, key: &H::Out) -> Option { + if let Some(val) = hash_db::HashDB::get(self.overlay, key, EMPTY_PREFIX) { Some(val) } else { - match self.storage.get(&key, prefix) { + match self.storage.get(self.child_info, &key, EMPTY_PREFIX) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -365,8 +280,47 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB } } + fn contains(&self, key: &H::Out) -> bool { + hash_db::HashDBRef::get(self, key, EMPTY_PREFIX).is_some() + } +} + +impl<'a, S, H> hash_db::PlainDBRef for BackendStorageDBRef<'a, S, H> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, +{ + fn get(&self, key: &H::Out) -> Option { + if check_if_empty_root::(key.as_ref()) { + return Some(vec![0u8]); + } + + match self.storage.get(self.child_info, &key, EMPTY_PREFIX) { + Ok(x) => x, + Err(e) => { + warn!(target: "trie", "Failed to read from DB: {}", e); + None + }, + } + } + + fn contains(&self, key: &H::Out) -> bool { + hash_db::HashDBRef::get(self, key, EMPTY_PREFIX).is_some() + } +} + + +impl<'a, S, H, O> hash_db::HashDB for Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorage, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, +{ + + fn get(&self, key: &H::Out, prefix: Prefix) -> Option { + hash_db::HashDBRef::get(self, key, prefix) + } + fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - hash_db::HashDB::get(self, key, prefix).is_some() + hash_db::HashDBRef::contains(self, key, prefix) } fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { @@ -382,48 +336,122 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB } } -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDBRef - for Ephemeral<'a, S, H> +impl<'a, S, H, O> hash_db::HashDBRef for Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - hash_db::HashDB::get(self, key, prefix) + if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { + Some(val) + } else { + match self.storage.get(self.child_info, &key, prefix) { + Ok(x) => x, + Err(e) => { + warn!(target: "trie", "Failed to read from DB: {}", e); + None + }, + } + } + } + + fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { + hash_db::HashDBRef::get(self, key, prefix).is_some() + } +} + +impl<'a, S, H> hash_db::HashDBRef for BackendStorageDBRef<'a, S, H> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, +{ + fn get(&self, key: &H::Out, prefix: Prefix) -> Option { + if check_if_empty_root::(key.as_ref()) { + return Some(vec![0u8]); + } + + match self.storage.get(self.child_info, &key, prefix) { + Ok(x) => x, + Err(e) => { + warn!(target: "trie", "Failed to read from DB: {}", e); + None + }, + } } fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - hash_db::HashDB::contains(self, key, prefix) + hash_db::HashDBRef::get(self, key, prefix).is_some() } } /// Key-value pairs storage that is used by trie backend essence. -pub trait TrieBackendStorage: Send + Sync { +pub trait TrieBackendStorageRef { /// Type of in-memory overlay. type Overlay: hash_db::HashDB + Default + Consolidate; /// Get the value stored at key. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; + fn get( + &self, + child_info: &ChildInfo, + key: &H::Out, + prefix: Prefix, + ) -> Result, String>; } -// This implementation is used by normal storage trie clients. -impl TrieBackendStorage for Arc> { +/// Key-value pairs storage that is used by trie backend essence. +pub trait TrieBackendStorage: TrieBackendStorageRef + Send + Sync { } + +impl + Send + Sync> TrieBackendStorage for B {} + +impl TrieBackendStorageRef for Arc> { type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - Storage::::get(self.deref(), key, prefix) + fn get( + &self, + child_info: &ChildInfo, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + Storage::::get(self.deref(), child_info, key, prefix) + } +} + +impl> TrieBackendStorageRef for &S { + type Overlay = >::Overlay; + + fn get( + &self, + child_info: &ChildInfo, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + >::get(self, child_info, key, prefix) } } // This implementation is used by test storage trie clients. -impl TrieBackendStorage for PrefixedMemoryDB { +impl TrieBackendStorageRef for PrefixedMemoryDB { type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + _child_info: &ChildInfo, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + // No need to use keyspace for in memory db, ignoring child_info parameter. Ok(hash_db::HashDB::get(self, key, prefix)) } } -impl TrieBackendStorage for MemoryDB { +impl TrieBackendStorageRef for MemoryDB { type Overlay = MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + _child_info: &ChildInfo, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + // No need to use keyspace for in memory db, ignoring child_info parameter. Ok(hash_db::HashDB::get(self, key, prefix)) } } @@ -431,8 +459,10 @@ impl TrieBackendStorage for MemoryDB { #[cfg(test)] mod test { use sp_core::{Blake2Hasher, H256}; - use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; + use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut}; use super::*; + use crate::trie_backend::TrieBackend; + use crate::backend::Backend; #[test] fn next_storage_key_and_next_child_storage_key_work() { @@ -450,22 +480,13 @@ mod test { trie.insert(b"4", &[1]).expect("insert failed"); trie.insert(b"6", &[1]).expect("insert failed"); } - { - let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); - // reuse of root_1 implicitly assert child trie root is same - // as top trie (contents must remain the same). - let mut trie = TrieDBMut::new(&mut mdb, &mut root_1); - trie.insert(b"3", &[1]).expect("insert failed"); - trie.insert(b"4", &[1]).expect("insert failed"); - trie.insert(b"6", &[1]).expect("insert failed"); - } { let mut trie = TrieDBMut::new(&mut mdb, &mut root_2); trie.insert(child_info.prefixed_storage_key().as_slice(), root_1.as_ref()) .expect("insert failed"); }; - let essence_1 = TrieBackendEssence::new(mdb, root_1); + let essence_1 = TrieBackend::new(mdb, root_1); assert_eq!(essence_1.next_storage_key(b"2"), Ok(Some(b"3".to_vec()))); assert_eq!(essence_1.next_storage_key(b"3"), Ok(Some(b"4".to_vec()))); @@ -474,7 +495,7 @@ mod test { assert_eq!(essence_1.next_storage_key(b"6"), Ok(None)); let mdb = essence_1.into_storage(); - let essence_2 = TrieBackendEssence::new(mdb, root_2); + let essence_2 = TrieBackend::new(mdb, root_2); assert_eq!( essence_2.next_child_storage_key(child_info, b"2"), Ok(Some(b"3".to_vec())) diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index 76174d13b03c7..069353d16c599 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -18,7 +18,8 @@ serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.2.3", optional = true } ref-cast = "1.0.0" sp-debug-derive = { version = "2.0.0-dev", path = "../debug-derive" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } [features] default = [ "std" ] -std = [ "sp-std/std", "serde", "impl-serde" ] +std = [ "sp-std/std", "codec/std", "serde", "impl-serde" ] diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index d2c4a73e23d59..5fb809783374a 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -20,11 +20,14 @@ #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; +#[cfg(feature = "std")] +use sp_std::collections::btree_map::BTreeMap; use sp_debug_derive::RuntimeDebug; use sp_std::vec::Vec; use sp_std::ops::{Deref, DerefMut}; use ref_cast::RefCast; +use codec::{Encode, Decode}; /// Storage key. #[derive(PartialEq, Eq, RuntimeDebug)] @@ -90,7 +93,7 @@ pub struct StorageData( /// Map of data to use in a storage, it is a collection of /// byte key and values. #[cfg(feature = "std")] -pub type StorageMap = std::collections::BTreeMap, Vec>; +pub type StorageMap = BTreeMap, Vec>; #[cfg(feature = "std")] #[derive(Debug, PartialEq, Eq, Clone)] @@ -101,6 +104,10 @@ pub struct StorageChild { /// Associated child info for a child /// trie. pub child_info: ChildInfo, + /// Associated child change, not that + /// it does not always have a strict + /// change semantic. + pub child_change: ChildChange, } #[cfg(feature = "std")] @@ -166,6 +173,19 @@ pub enum ChildInfo { ParentKeyId(ChildTrieParentKeyId), } +/// How should I update between two child state. +#[derive(Debug, Clone)] +#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] +pub enum ChildUpdate { + /// Merge the new values. + Merge, + /// Ignore the new values, for instance on a bulk deleted + /// child state. + Ignore, + /// No update possible. + Incompatible, +} + impl ChildInfo { /// Instantiates child information for a default child trie /// of kind `ChildType::ParentKeyId`, using an unprefixed parent @@ -182,11 +202,26 @@ impl ChildInfo { }) } - /// Try to update with another instance, return false if both instance - /// are not compatible. - pub fn try_update(&mut self, other: &ChildInfo) -> bool { + /// Try to update with another instance. + /// Passing current child change as parameter is needed + pub fn try_update(&mut self, self_change: &ChildChange, other: &ChildInfo) -> ChildUpdate { + match (self, self_change) { + (_, ChildChange::BulkDelete(_encoded_root)) => ChildUpdate::Ignore, + (ChildInfo::ParentKeyId(child_trie), ChildChange::Update) => child_trie.try_update(other), + } + } + + /// Top trie defined as the unique crypto id trie with + /// 0 length unique id. + pub fn top_trie() -> Self { + Self::new_default(&[]) + } + + /// Top trie defined as the unique crypto id trie with + /// 0 length unique id. + pub fn is_top_trie(&self) -> bool { match self { - ChildInfo::ParentKeyId(child_trie) => child_trie.try_update(other), + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data }) => data.len() == 0, } } @@ -239,6 +274,14 @@ impl ChildInfo { ChildInfo::ParentKeyId(..) => ChildType::ParentKeyId, } } + + /// Return `ChildChange` applicable for this state in the case of a bulk + /// content deletion. + pub fn bulk_delete_change(&self, encoded_root: Vec) -> ChildChange { + match self { + ChildInfo::ParentKeyId(..) => ChildChange::BulkDelete(encoded_root), + } + } } /// Type of child. @@ -324,15 +367,142 @@ pub struct ChildTrieParentKeyId { impl ChildTrieParentKeyId { /// Try to update with another instance, return false if both instance /// are not compatible. - fn try_update(&mut self, other: &ChildInfo) -> bool { + fn try_update(&mut self, other: &ChildInfo) -> ChildUpdate { match other { - ChildInfo::ParentKeyId(other) => self.data[..] == other.data[..], + ChildInfo::ParentKeyId(other) => if self.data[..] == other.data[..] { + ChildUpdate::Merge + } else { + ChildUpdate::Incompatible + }, } } } +#[cfg(feature = "std")] +#[derive(Clone, PartialEq, Eq, Debug)] +/// Type for storing a map of child trie related information. +/// A few utilities methods are defined. +pub struct ChildrenMap(pub BTreeMap); + +/// Type alias for storage of children related content. +pub type ChildrenVec = Vec<(ChildInfo, T)>; + +/// Type alias for storage of children related content. +pub type ChildrenSlice<'a, T> = &'a [(ChildInfo, T)]; + +#[cfg(feature = "std")] +impl sp_std::ops::Deref for ChildrenMap { + type Target = BTreeMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[cfg(feature = "std")] +impl sp_std::ops::DerefMut for ChildrenMap { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +#[cfg(feature = "std")] +impl sp_std::default::Default for ChildrenMap { + fn default() -> Self { + ChildrenMap(BTreeMap::new()) + } +} + +#[cfg(feature = "std")] +impl ChildrenMap { + /// Extend for `ChildrenMap` is usually about merging entries, + /// this method extends two maps, by applying a merge function + /// on each of its entries. + pub fn extend_with( + &mut self, + other: impl Iterator, + merge: impl Fn(&mut T, T), + ) { + use sp_std::collections::btree_map::Entry; + for (child_info, child_content) in other { + match self.0.entry(child_info) { + Entry::Occupied(mut entry) => { + merge(entry.get_mut(), child_content) + }, + Entry::Vacant(entry) => { + entry.insert(child_content); + }, + } + } + } + + /// Extends two maps, by extending entries with the same key. + pub fn extend_replace( + &mut self, + other: impl Iterator, + ) { + self.0.extend(other) + } + + /// Retains only the elements specified by the predicate. + pub fn retain(&mut self, mut f: impl FnMut(&ChildInfo, &mut T) -> bool) { + let mut to_del = Vec::new(); + for (k, v) in self.0.iter_mut() { + if !f(k, v) { + // this clone can be avoid with unsafe code + to_del.push(k.clone()); + } + } + for k in to_del { + self.0.remove(&k); + } + } +} + +#[cfg(feature = "std")] +impl IntoIterator for ChildrenMap { + type Item = (ChildInfo, T); + type IntoIter = sp_std::collections::btree_map::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + const DEFAULT_CHILD_TYPE_PARENT_PREFIX: &'static [u8] = b":child_storage:default:"; +/// Information related to change to apply on a whole child trie. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Hash, PartialOrd, Ord))] +pub enum ChildChange { + /// Update to content of child trie. + Update, + /// The child trie allow bulk trie delete. + /// The inner data is the encoded root at the state where + /// it is been bulk deleted. + BulkDelete(Vec), +} + +impl ChildChange { + /// Update a child change with its new value. + /// This should only be call if `try_update` + /// rules did pass at child_info level. + pub fn update(&mut self, other: Self) { + if other != *self { + match self { + ChildChange::Update => *self = other, + ChildChange::BulkDelete(..) => panic!("Bulk delete cannot be overwritten"), + } + } + } +} + +impl Default for ChildChange { + fn default() -> Self { + ChildChange::Update + } +} + #[test] fn test_prefix_default_child_info() { let child_info = ChildInfo::new_default(b"any key"); diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 37fe928336337..db3a6440455d3 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -27,8 +27,9 @@ mod trie_stream; use sp_std::boxed::Box; use sp_std::marker::PhantomData; use sp_std::vec::Vec; -use hash_db::{Hasher, Prefix}; +use sp_core::{Hasher, InnerHasher}; use trie_db::proof::{generate_proof, verify_proof}; +pub use trie_db::TrieDBNodeIterator; pub use trie_db::proof::VerifyError; /// Our `NodeCodec`-specific error. pub use error::Error; @@ -49,16 +50,16 @@ pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; #[derive(Default)] /// substrate trie layout -pub struct Layout(sp_std::marker::PhantomData); +pub struct Layout(PhantomData); -impl TrieLayout for Layout { +impl TrieLayout for Layout { const USE_EXTENSION: bool = false; type Hash = H; type Codec = NodeCodec; } -impl TrieConfiguration for Layout { - fn trie_root(input: I) -> ::Out where +impl TrieConfiguration for Layout { + fn trie_root(input: I) -> ::Out where I: IntoIterator, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, @@ -82,8 +83,8 @@ impl TrieConfiguration for Layout { /// TrieDB error over `TrieConfiguration` trait. pub type TrieError = trie_db::TrieError, CError>; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. -pub trait AsHashDB: hash_db::AsHashDB {} -impl> AsHashDB for T {} +pub trait AsHashDB: hash_db::AsHashDB {} +impl> AsHashDB for T {} /// Reexport from `hash_db`, with genericity set for `Hasher` trait. pub type HashDB<'a, H> = dyn hash_db::HashDB + 'a; /// Reexport from `hash_db`, with genericity set for key only. @@ -106,7 +107,7 @@ pub type TrieDBMut<'a, L> = trie_db::TrieDBMut<'a, L>; /// Querying interface, as in `trie_db` but less generic. pub type Lookup<'a, L, Q> = trie_db::Lookup<'a, L, Q>; /// Hash type for a trie layout. -pub type TrieHash = <::Hash as Hasher>::Out; +pub type TrieHash = <::Hash as InnerHasher>::Out; /// This module is for non generic definition of trie type. /// Only the `Hasher` trait is generic in this case. @@ -213,75 +214,28 @@ pub fn read_trie_value_with< /// Determine the empty child trie root. pub fn empty_child_trie_root( -) -> ::Out { +) -> ::Out { L::trie_root::<_, Vec, Vec>(core::iter::empty()) } -/// Determine a child trie root given its ordered contents, closed form. H is the default hasher, -/// but a generic implementation may ignore this type parameter and use other hashers. -pub fn child_trie_root( - input: I, -) -> ::Out - where - I: IntoIterator, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, -{ - L::trie_root(input) -} - -/// Determine a child trie root given a hash DB and delta values. H is the default hasher, -/// but a generic implementation may ignore this type parameter and use other hashers. -pub fn child_delta_trie_root( - keyspace: &[u8], - db: &mut DB, - root_data: RD, - delta: I, -) -> Result<::Out, Box>> - where - I: IntoIterator)>, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, - RD: AsRef<[u8]>, - DB: hash_db::HashDB - + hash_db::PlainDB, trie_db::DBValue>, -{ - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_data.as_ref()); - - { - let mut db = KeySpacedDBMut::new(&mut *db, keyspace); - let mut trie = TrieDBMut::::from_existing(&mut db, &mut root)?; - - for (key, change) in delta { - match change { - Some(val) => trie.insert(key.as_ref(), val.as_ref())?, - None => trie.remove(key.as_ref())?, - }; - } - } - - Ok(root) +/// Test if this is an empty root node. +pub fn check_if_empty_root ( + root: &[u8], +) -> bool { + H::EMPTY_ROOT == root } /// Call `f` for all keys in a child trie. -pub fn for_keys_in_child_trie( - keyspace: &[u8], +pub fn for_keys_in_trie( db: &DB, - root_slice: &[u8], + root: &TrieHash, mut f: F ) -> Result<(), Box>> where DB: hash_db::HashDBRef + hash_db::PlainDBRef, trie_db::DBValue>, { - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_slice); - - let db = KeySpacedDB::new(&*db, keyspace); - let trie = TrieDB::::new(&db, &root)?; + let trie = TrieDB::::new(&*db, &root)?; let iter = trie.iter()?; for x in iter { @@ -315,141 +269,6 @@ pub fn record_all_keys( Ok(()) } -/// Read a value from the child trie. -pub fn read_child_trie_value( - keyspace: &[u8], - db: &DB, - root_slice: &[u8], - key: &[u8] -) -> Result>, Box>> - where - DB: hash_db::HashDBRef - + hash_db::PlainDBRef, trie_db::DBValue>, -{ - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_slice); - - let db = KeySpacedDB::new(&*db, keyspace); - Ok(TrieDB::::new(&db, &root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) -} - -/// Read a value from the child trie with given query. -pub fn read_child_trie_value_with, DB>( - keyspace: &[u8], - db: &DB, - root_slice: &[u8], - key: &[u8], - query: Q -) -> Result>, Box>> - where - DB: hash_db::HashDBRef - + hash_db::PlainDBRef, trie_db::DBValue>, -{ - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_slice); - - let db = KeySpacedDB::new(&*db, keyspace); - Ok(TrieDB::::new(&db, &root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) -} - -/// `HashDB` implementation that append a encoded prefix (unique id bytes) in addition to the -/// prefix of every key value. -pub struct KeySpacedDB<'a, DB, H>(&'a DB, &'a [u8], PhantomData); - -/// `HashDBMut` implementation that append a encoded prefix (unique id bytes) in addition to the -/// prefix of every key value. -/// -/// Mutable variant of `KeySpacedDB`, see [`KeySpacedDB`]. -pub struct KeySpacedDBMut<'a, DB, H>(&'a mut DB, &'a [u8], PhantomData); - -/// Utility function used to merge some byte data (keyspace) and `prefix` data -/// before calling key value database primitives. -fn keyspace_as_prefix_alloc(ks: &[u8], prefix: Prefix) -> (Vec, Option) { - let mut result = sp_std::vec![0; ks.len() + prefix.0.len()]; - result[..ks.len()].copy_from_slice(ks); - result[ks.len()..].copy_from_slice(prefix.0); - (result, prefix.1) -} - -impl<'a, DB, H> KeySpacedDB<'a, DB, H> where - H: Hasher, -{ - /// instantiate new keyspaced db - pub fn new(db: &'a DB, ks: &'a [u8]) -> Self { - KeySpacedDB(db, ks, PhantomData) - } -} - -impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> where - H: Hasher, -{ - /// instantiate new keyspaced db - pub fn new(db: &'a mut DB, ks: &'a [u8]) -> Self { - KeySpacedDBMut(db, ks, PhantomData) - } -} - -impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where - DB: hash_db::HashDBRef, - H: Hasher, - T: From<&'static [u8]>, -{ - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get(key, (&derived_prefix.0, derived_prefix.1)) - } - - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) - } -} - -impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where - DB: hash_db::HashDB, - H: Hasher, - T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, -{ - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get(key, (&derived_prefix.0, derived_prefix.1)) - } - - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) - } - - fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.insert((&derived_prefix.0, derived_prefix.1), value) - } - - fn emplace(&mut self, key: H::Out, prefix: Prefix, value: T) { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.emplace(key, (&derived_prefix.0, derived_prefix.1), value) - } - - fn remove(&mut self, key: &H::Out, prefix: Prefix) { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.remove(key, (&derived_prefix.0, derived_prefix.1)) - } -} - -impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where - DB: hash_db::HashDB, - H: Hasher, - T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, -{ - fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } - - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { - &mut *self - } -} - /// Constants used into trie simplification codec. mod trie_constants { pub const EMPTY_TRIE: u8 = 0; @@ -464,7 +283,8 @@ mod tests { use super::*; use codec::{Encode, Compact}; use sp_core::Blake2Hasher; - use hash_db::{HashDB, Hasher}; + use hash_db::HashDB; + use sp_core::InnerHasher; use trie_db::{DBValue, TrieMut, Trie, NodeCodec as NodeCodecT}; use trie_standardmap::{Alphabet, ValueMode, StandardMap}; use hex_literal::hex; @@ -648,7 +468,7 @@ mod tests { #[test] fn random_should_work() { - let mut seed = ::Out::zero(); + let mut seed = ::Out::zero(); for test_i in 0..10000 { if test_i % 50 == 0 { println!("{:?} of 10000 stress tests done", test_i); diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 4880b296c7048..b74d642bb4fc8 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -90,11 +90,26 @@ impl TestClientBuilder Self { + let backend = Arc::new(Backend::new_test_parity_db(std::u32::MAX, std::u64::MAX)); + Self::with_backend(backend) + } + /// Create new `TestClientBuilder` with default backend and pruning window size pub fn with_pruning_window(keep_blocks: u32) -> Self { let backend = Arc::new(Backend::new_test(keep_blocks, 0)); Self::with_backend(backend) } + + /// Create new `TestClientBuilder` with parity-db backend and pruning window size + #[cfg(feature = "parity-db")] + pub fn with_pruning_window_and_parity_db(keep_blocks: u32) -> Self { + let backend = Arc::new(Backend::new_test_parity_db(keep_blocks, 0)); + Self::with_backend(backend) + } + } impl TestClientBuilder { @@ -140,6 +155,7 @@ impl TestClientBuilder TestClientBuilder: Sized { .or_insert_with(|| StorageChild { data: Default::default(), child_info: child_info.clone(), + child_change: Default::default(), }).data.insert(key, value.into()); self }