diff --git a/bin/node/executor/src/lib.rs b/bin/node/executor/src/lib.rs index b2410ed15edeb..70a9853fc6b15 100644 --- a/bin/node/executor/src/lib.rs +++ b/bin/node/executor/src/lib.rs @@ -42,7 +42,7 @@ mod tests { use state_machine::TestExternalities as CoreTestExternalities; use primitives::{ Blake2Hasher, NeverNativeValue, NativeOrEncoded, map, - traits::{CodeExecutor, Externalities}, storage::well_known_keys, + traits::{CodeExecutor, Externalities}, storage::{well_known_keys, Storage}, }; use sp_runtime::{ Fixed64, traits::{Header as HeaderT, Hash as HashT, Convert}, ApplyExtrinsicResult, @@ -143,20 +143,23 @@ mod tests { #[test] fn panic_execution_with_foreign_code_gives_error() { - let mut t = TestExternalities::::new_with_code(BLOATY_CODE, (map![ - >::hashed_key_for(alice()) => { - 69_u128.encode() - }, - >::hashed_key().to_vec() => { - 69_u128.encode() - }, - >::hashed_key().to_vec() => { - 0_u128.encode() - }, - >::hashed_key_for(0) => { - vec![0u8; 32] - } - ], map![])); + let mut t = TestExternalities::::new_with_code(BLOATY_CODE, Storage { + top: map![ + >::hashed_key_for(alice()) => { + 69_u128.encode() + }, + >::hashed_key().to_vec() => { + 69_u128.encode() + }, + >::hashed_key().to_vec() => { + 0_u128.encode() + }, + >::hashed_key_for(0) => { + vec![0u8; 32] + } + ], + children: map![], + }); let r = executor_call:: _>( &mut t, @@ -179,20 +182,23 @@ mod tests { #[test] fn bad_extrinsic_with_native_equivalent_code_gives_error() { - let mut t = TestExternalities::::new_with_code(COMPACT_CODE, (map![ - >::hashed_key_for(alice()) => { - 69_u128.encode() - }, - >::hashed_key().to_vec() => { - 69_u128.encode() - }, - >::hashed_key().to_vec() => { - 0_u128.encode() - }, - >::hashed_key_for(0) => { - vec![0u8; 32] - } - ], map![])); + let mut t = TestExternalities::::new_with_code(COMPACT_CODE, Storage { + top: map![ + >::hashed_key_for(alice()) => { + 69_u128.encode() + }, + >::hashed_key().to_vec() => { + 69_u128.encode() + }, + >::hashed_key().to_vec() => { + 0_u128.encode() + }, + >::hashed_key_for(0) => { + vec![0u8; 32] + } + ], + children: map![], + }); let r = executor_call:: _>( &mut t, @@ -215,16 +221,19 @@ mod tests { #[test] fn successful_execution_with_native_equivalent_code_gives_ok() { - let mut t = TestExternalities::::new_with_code(COMPACT_CODE, (map![ - >::hashed_key_for(alice()) => { - (111 * DOLLARS).encode() - }, - >::hashed_key().to_vec() => { - (111 * DOLLARS).encode() - }, - >::hashed_key().to_vec() => vec![0u8; 16], - >::hashed_key_for(0) => vec![0u8; 32] - ], map![])); + let mut t = TestExternalities::::new_with_code(COMPACT_CODE, Storage { + top: map![ + >::hashed_key_for(alice()) => { + (111 * DOLLARS).encode() + }, + >::hashed_key().to_vec() => { + (111 * DOLLARS).encode() + }, + >::hashed_key().to_vec() => vec![0u8; 16], + >::hashed_key_for(0) => vec![0u8; 32] + ], + children: map![], + }); let r = executor_call:: _>( &mut t, @@ -254,16 +263,19 @@ mod tests { #[test] fn successful_execution_with_foreign_code_gives_ok() { - let mut t = TestExternalities::::new_with_code(BLOATY_CODE, (map![ - >::hashed_key_for(alice()) => { - (111 * DOLLARS).encode() - }, - >::hashed_key().to_vec() => { - (111 * DOLLARS).encode() - }, - >::hashed_key().to_vec() => vec![0u8; 16], - >::hashed_key_for(0) => vec![0u8; 32] - ], map![])); + let mut t = TestExternalities::::new_with_code(BLOATY_CODE, Storage { + top: map![ + >::hashed_key_for(alice()) => { + (111 * DOLLARS).encode() + }, + >::hashed_key().to_vec() => { + (111 * DOLLARS).encode() + }, + >::hashed_key().to_vec() => vec![0u8; 16], + >::hashed_key_for(0) => vec![0u8; 32] + ], + children: map![], + }); let r = executor_call:: _>( &mut t, @@ -828,16 +840,19 @@ mod tests { #[test] fn panic_execution_gives_error() { - let mut t = TestExternalities::::new_with_code(BLOATY_CODE, (map![ - >::hashed_key_for(alice()) => { - 0_u128.encode() - }, - >::hashed_key().to_vec() => { - 0_u128.encode() - }, - >::hashed_key().to_vec() => vec![0u8; 16], - >::hashed_key_for(0) => vec![0u8; 32] - ], map![])); + let mut t = TestExternalities::::new_with_code(BLOATY_CODE, Storage { + top: map![ + >::hashed_key_for(alice()) => { + 0_u128.encode() + }, + >::hashed_key().to_vec() => { + 0_u128.encode() + }, + >::hashed_key().to_vec() => vec![0u8; 16], + >::hashed_key_for(0) => vec![0u8; 32] + ], + children: map![], + }); let r = executor_call:: _>( &mut t, @@ -860,16 +875,19 @@ mod tests { #[test] fn successful_execution_gives_ok() { - let mut t = TestExternalities::::new_with_code(COMPACT_CODE, (map![ - >::hashed_key_for(alice()) => { - (111 * DOLLARS).encode() - }, - >::hashed_key().to_vec() => { - (111 * DOLLARS).encode() - }, - >::hashed_key().to_vec() => vec![0u8; 16], - >::hashed_key_for(0) => vec![0u8; 32] - ], map![])); + let mut t = TestExternalities::::new_with_code(COMPACT_CODE, Storage { + top: map![ + >::hashed_key_for(alice()) => { + (111 * DOLLARS).encode() + }, + >::hashed_key().to_vec() => { + (111 * DOLLARS).encode() + }, + >::hashed_key().to_vec() => vec![0u8; 16], + >::hashed_key_for(0) => vec![0u8; 32] + ], + children: map![], + }); let r = executor_call:: _>( &mut t, @@ -1037,19 +1055,22 @@ mod tests { // - 1 MILLICENTS in substrate node. // - 1 milli-dot based on current polkadot runtime. // (this baed on assigning 0.1 CENT to the cheapest tx with `weight = 100`) - let mut t = TestExternalities::::new_with_code(COMPACT_CODE, (map![ - >::hashed_key_for(alice()) => { - (100 * DOLLARS).encode() - }, - >::hashed_key_for(bob()) => { - (10 * DOLLARS).encode() - }, - >::hashed_key().to_vec() => { - (110 * DOLLARS).encode() - }, - >::hashed_key().to_vec() => vec![0u8; 16], - >::hashed_key_for(0) => vec![0u8; 32] - ], map![])); + let mut t = TestExternalities::::new_with_code(COMPACT_CODE, Storage { + top: map![ + >::hashed_key_for(alice()) => { + (100 * DOLLARS).encode() + }, + >::hashed_key_for(bob()) => { + (10 * DOLLARS).encode() + }, + >::hashed_key().to_vec() => { + (110 * DOLLARS).encode() + }, + >::hashed_key().to_vec() => vec![0u8; 16], + >::hashed_key_for(0) => vec![0u8; 32] + ], + children: map![], + }); let tip = 1_000_000; let xt = sign(CheckedExtrinsic { diff --git a/bin/node/testing/src/client.rs b/bin/node/testing/src/client.rs index b865a407fac4d..a6964b39156d0 100644 --- a/bin/node/testing/src/client.rs +++ b/bin/node/testing/src/client.rs @@ -42,7 +42,7 @@ pub struct GenesisParameters { } impl test_client::GenesisInit for GenesisParameters { - fn genesis_storage(&self) -> (StorageOverlay, ChildrenStorageOverlay) { + fn genesis_storage(&self) -> Storage { crate::genesis::config(self.support_changes_trie, None).build_storage().unwrap() } } diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 488cab6bb01d8..a71ffff74b56e 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -20,7 +20,7 @@ use std::sync::Arc; use std::collections::HashMap; use primitives::ChangesTrieConfiguration; use primitives::offchain::OffchainStorage; -use sp_runtime::{generic::BlockId, Justification, StorageOverlay, ChildrenStorageOverlay}; +use sp_runtime::{generic::BlockId, Justification, Storage}; use sp_runtime::traits::{Block as BlockT, NumberFor}; use state_machine::backend::Backend as StateBackend; use state_machine::{ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction}; @@ -134,7 +134,7 @@ pub trait BlockImportOperation where fn update_db_storage(&mut self, update: >::Transaction) -> sp_blockchain::Result<()>; /// Inject storage data into the database replacing any existing data. - fn reset_storage(&mut self, top: StorageOverlay, children: ChildrenStorageOverlay) -> sp_blockchain::Result; + fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result; /// Set storage changes. fn update_storage( diff --git a/client/api/src/light.rs b/client/api/src/light.rs index c368fdd108340..2c52aeca83534 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -26,7 +26,7 @@ use sp_runtime::{ }, generic::BlockId }; -use primitives::{ChangesTrieConfiguration}; +use primitives::ChangesTrieConfiguration; use state_machine::StorageProof; use sp_blockchain::{ HeaderMetadata, well_known_cache_keys, HeaderBackend, Cache as BlockchainCache, @@ -81,6 +81,11 @@ pub struct RemoteReadChildRequest { pub header: Header, /// Storage key for child. pub storage_key: Vec, + /// Child trie source information. + pub child_info: Vec, + /// Child type, its required to resolve `child_info` + /// content and choose child implementation. + pub child_type: u32, /// Child storage key to read. pub keys: Vec>, /// Number of times to retry request. None means that default RETRY_COUNT is used. diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 2ebd814c03275..3c4cc7a54abb7 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -22,8 +22,8 @@ use std::fs::File; use std::path::PathBuf; use std::rc::Rc; use serde::{Serialize, Deserialize}; -use primitives::storage::{StorageKey, StorageData}; -use sp_runtime::{BuildStorage, StorageOverlay, ChildrenStorageOverlay}; +use primitives::storage::{StorageKey, StorageData, ChildInfo, Storage, StorageChild}; +use sp_runtime::BuildStorage; use serde_json as json; use crate::RuntimeGenesis; use network::Multiaddr; @@ -71,36 +71,62 @@ impl GenesisSource { } impl<'a, G: RuntimeGenesis, E> BuildStorage for &'a ChainSpec { - fn build_storage(&self) -> Result<(StorageOverlay, ChildrenStorageOverlay), String> { + fn build_storage(&self) -> Result { match self.genesis.resolve()? { Genesis::Runtime(gc) => gc.build_storage(), - Genesis::Raw(map, children_map) => Ok(( - map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), - children_map.into_iter().map(|(sk, map)| ( - sk.0, - map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), - )).collect(), - )), + Genesis::Raw(RawGenesis { top: map, children: children_map }) => Ok(Storage { + top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), + children: children_map.into_iter().map(|(sk, child_content)| { + let child_info = ChildInfo::resolve_child_info( + child_content.child_type, + child_content.child_info.as_slice(), + ).expect("chainspec contains correct content").to_owned(); + ( + sk.0, + StorageChild { + data: child_content.data.into_iter().map(|(k, v)| (k.0, v.0)).collect(), + child_info, + }, + ) + }).collect(), + }), } } fn assimilate_storage( &self, - _: &mut (StorageOverlay, ChildrenStorageOverlay) + _: &mut Storage, ) -> Result<(), String> { Err("`assimilate_storage` not implemented for `ChainSpec`.".into()) } } +type GenesisStorage = HashMap; + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[serde(deny_unknown_fields)] +struct ChildRawStorage { + data: GenesisStorage, + child_info: Vec, + child_type: u32, +} + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[serde(deny_unknown_fields)] +/// Storage content for genesis block. +struct RawGenesis { + pub top: GenesisStorage, + pub children: HashMap, +} + #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] enum Genesis { Runtime(G), - Raw( - HashMap, - HashMap>, - ), + Raw(RawGenesis), } #[derive(Serialize, Deserialize, Clone, Debug)] @@ -255,19 +281,26 @@ impl ChainSpec { let genesis = match (raw, self.genesis.resolve()?) { (true, Genesis::Runtime(g)) => { let storage = g.build_storage()?; - let top = storage.0.into_iter() + let top = storage.top.into_iter() .map(|(k, v)| (StorageKey(k), StorageData(v))) .collect(); - let children = storage.1.into_iter() - .map(|(sk, child)| ( + let children = storage.children.into_iter() + .map(|(sk, child)| { + let info = child.child_info.as_ref(); + let (info, ci_type) = info.info(); + ( StorageKey(sk), - child.into_iter() - .map(|(k, v)| (StorageKey(k), StorageData(v))) - .collect(), - )) + ChildRawStorage { + data: child.data.into_iter() + .map(|(k, v)| (StorageKey(k), StorageData(v))) + .collect(), + child_info: info.to_vec(), + child_type: ci_type, + }, + )}) .collect(); - Genesis::Raw(top, children) + Genesis::Raw(RawGenesis { top, children }) }, (_, genesis) => genesis, }; @@ -290,9 +323,9 @@ mod tests { impl BuildStorage for Genesis { fn assimilate_storage( &self, - storage: &mut (StorageOverlay, ChildrenStorageOverlay), + storage: &mut Storage, ) -> Result<(), String> { - storage.0.extend( + storage.top.extend( self.0.iter().map(|(a, b)| (a.clone().into_bytes(), b.clone().into_bytes())) ); Ok(()) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 9f1316253b87e..a7320ec1c3d95 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -52,9 +52,9 @@ use kvdb::{KeyValueDB, DBTransaction}; use trie::{MemoryDB, PrefixedMemoryDB, prefixed_key}; use parking_lot::{Mutex, RwLock}; use primitives::{H256, Blake2Hasher, ChangesTrieConfiguration, convert_hash, traits::CodeExecutor}; -use primitives::storage::well_known_keys; +use primitives::storage::{well_known_keys, ChildInfo}; use sp_runtime::{ - generic::{BlockId, DigestItem}, Justification, StorageOverlay, ChildrenStorageOverlay, + generic::{BlockId, DigestItem}, Justification, Storage, BuildStorage, }; use sp_runtime::traits::{ @@ -139,24 +139,39 @@ impl StateBackend for RefTrackingState { self.state.storage_hash(key) } - fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error> { - self.state.child_storage(storage_key, key) + fn child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.state.child_storage(storage_key, child_info, key) } fn exists_storage(&self, key: &[u8]) -> Result { self.state.exists_storage(key) } - fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result { - self.state.exists_child_storage(storage_key, key) + fn exists_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result { + self.state.exists_child_storage(storage_key, child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { self.state.next_storage_key(key) } - fn next_child_storage_key(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error> { - self.state.next_child_storage_key(storage_key, key) + fn next_child_storage_key( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.state.next_child_storage_key(storage_key, child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -167,12 +182,23 @@ impl StateBackend for RefTrackingState { self.state.for_key_values_with_prefix(prefix, f) } - fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F) { - self.state.for_keys_in_child_storage(storage_key, f) + fn for_keys_in_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + f: F, + ) { + self.state.for_keys_in_child_storage(storage_key, child_info, f) } - fn for_child_keys_with_prefix(&self, storage_key: &[u8], prefix: &[u8], f: F) { - self.state.for_child_keys_with_prefix(storage_key, prefix, f) + fn for_child_keys_with_prefix( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + f: F, + ) { + self.state.for_child_keys_with_prefix(storage_key, child_info, prefix, f) } fn storage_root(&self, delta: I) -> (H256, Self::Transaction) @@ -182,11 +208,16 @@ impl StateBackend for RefTrackingState { self.state.storage_root(delta) } - fn child_storage_root(&self, storage_key: &[u8], delta: I) -> (H256, bool, Self::Transaction) + fn child_storage_root( + &self, + storage_key: &[u8], + child_info: ChildInfo, + delta: I, + ) -> (H256, bool, Self::Transaction) where I: IntoIterator, Option>)>, { - self.state.child_storage_root(storage_key, delta) + self.state.child_storage_root(storage_key, child_info, delta) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -197,8 +228,13 @@ impl StateBackend for RefTrackingState { self.state.keys(prefix) } - fn child_keys(&self, child_key: &[u8], prefix: &[u8]) -> Vec> { - self.state.child_keys(child_key, prefix) + fn child_keys( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + ) -> Vec> { + self.state.child_keys(storage_key, child_info, prefix) } fn as_trie_backend( @@ -523,26 +559,26 @@ impl client_api::backend::BlockImportOperation fn reset_storage( &mut self, - top: StorageOverlay, - children: ChildrenStorageOverlay + storage: Storage, ) -> ClientResult { - if top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) { + if storage.top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) { return Err(sp_blockchain::Error::GenesisInvalid.into()); } - for child_key in children.keys() { + for child_key in storage.children.keys() { if !well_known_keys::is_child_storage_key(&child_key) { return Err(sp_blockchain::Error::GenesisInvalid.into()); } } - let child_delta = children.into_iter() - .map(|(storage_key, child_overlay)| - (storage_key, child_overlay.into_iter().map(|(k, v)| (k, Some(v))))); + let child_delta = storage.children.into_iter().map(|(storage_key, child_content)| ( + storage_key, + child_content.data.into_iter().map(|(k, v)| (k, Some(v))), child_content.child_info), + ); let (root, transaction) = self.old_state.full_storage_root( - top.into_iter().map(|(k, v)| (k, Some(v))), + storage.top.into_iter().map(|(k, v)| (k, Some(v))), child_delta ); @@ -903,7 +939,8 @@ impl> Backend { }; let mut op = inmem.begin_operation().unwrap(); op.set_block_data(header, body, justification, new_block_state).unwrap(); - op.update_db_storage(state.into_iter().map(|(k, v)| (None, k, Some(v))).collect()).unwrap(); + op.update_db_storage(vec![(None, state.into_iter().map(|(k, v)| (k, Some(v))).collect())]) + .unwrap(); inmem.commit_operation(op).unwrap(); } @@ -1711,7 +1748,10 @@ mod tests { ).0.into(); let hash = header.hash(); - op.reset_storage(storage.iter().cloned().collect(), Default::default()).unwrap(); + op.reset_storage(Storage { + top: storage.iter().cloned().collect(), + children: Default::default(), + }).unwrap(); op.set_block_data( header.clone(), Some(vec![]), @@ -1793,7 +1833,10 @@ mod tests { ).0.into(); let hash = header.hash(); - op.reset_storage(storage.iter().cloned().collect(), Default::default()).unwrap(); + op.reset_storage(Storage { + top: storage.iter().cloned().collect(), + children: Default::default(), + }).unwrap(); key = op.db_updates.insert(EMPTY_PREFIX, b"hello"); op.set_block_data( diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 2b8e356d9cc82..9053491b17123 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -23,6 +23,7 @@ use linked_hash_map::{LinkedHashMap, Entry}; use hash_db::Hasher; use sp_runtime::traits::{Block as BlockT, Header}; use primitives::hexdisplay::HexDisplay; +use primitives::storage::ChildInfo; use state_machine::{backend::Backend as StateBackend, TrieBackend}; use log::trace; use client_api::backend::{StorageCollection, ChildStorageCollection}; @@ -516,7 +517,12 @@ impl, B: BlockT> StateBackend for CachingState< Ok(hash) } - fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error> { + fn child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { let key = (storage_key.to_vec(), key.to_vec()); let local_cache = self.cache.local_cache.upgradable_read(); if let Some(entry) = local_cache.child_storage.get(&key).cloned() { @@ -531,7 +537,7 @@ impl, B: BlockT> StateBackend for CachingState< } } trace!("Cache miss: {:?}", key); - let value = self.state.child_storage(storage_key, &key.1[..])?; + let value = self.state.child_storage(storage_key, child_info, &key.1[..])?; RwLockUpgradableReadGuard::upgrade(local_cache).child_storage.insert(key, value.clone()); Ok(value) } @@ -540,20 +546,35 @@ impl, B: BlockT> StateBackend for CachingState< Ok(self.storage(key)?.is_some()) } - fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result { - self.state.exists_child_storage(storage_key, key) + fn exists_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result { + self.state.exists_child_storage(storage_key, child_info, key) } - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - self.state.next_storage_key(key) + fn for_keys_in_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + f: F, + ) { + self.state.for_keys_in_child_storage(storage_key, child_info, f) } - fn next_child_storage_key(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error> { - self.state.next_child_storage_key(storage_key, key) + fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { + self.state.next_storage_key(key) } - fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F) { - self.state.for_keys_in_child_storage(storage_key, f) + fn next_child_storage_key( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.state.next_child_storage_key(storage_key, child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -564,8 +585,14 @@ impl, B: BlockT> StateBackend for CachingState< self.state.for_key_values_with_prefix(prefix, f) } - fn for_child_keys_with_prefix(&self, storage_key: &[u8], prefix: &[u8], f: F) { - self.state.for_child_keys_with_prefix(storage_key, prefix, f) + fn for_child_keys_with_prefix( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + f: F, + ) { + self.state.for_child_keys_with_prefix(storage_key, child_info, prefix, f) } fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) @@ -576,12 +603,17 @@ impl, B: BlockT> StateBackend for CachingState< self.state.storage_root(delta) } - fn child_storage_root(&self, storage_key: &[u8], delta: I) -> (H::Out, bool, Self::Transaction) + fn child_storage_root( + &self, + storage_key: &[u8], + child_info: ChildInfo, + delta: I, + ) -> (H::Out, bool, Self::Transaction) where I: IntoIterator, Option>)>, H::Out: Ord { - self.state.child_storage_root(storage_key, delta) + self.state.child_storage_root(storage_key, child_info, delta) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -592,8 +624,13 @@ impl, B: BlockT> StateBackend for CachingState< self.state.keys(prefix) } - fn child_keys(&self, child_key: &[u8], prefix: &[u8]) -> Vec> { - self.state.child_keys(child_key, prefix) + fn child_keys( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + ) -> Vec> { + self.state.child_keys(storage_key, child_info, prefix) } fn as_trie_backend(&mut self) -> Option<&TrieBackend> { diff --git a/client/executor/src/deprecated_host_interface.rs b/client/executor/src/deprecated_host_interface.rs index 7ea3687bb99ef..f585659d9e34f 100644 --- a/client/executor/src/deprecated_host_interface.rs +++ b/client/executor/src/deprecated_host_interface.rs @@ -217,38 +217,6 @@ impl_wasm_host_interface! { Ok(sp_io::storage::set(&key, &value)) } - ext_set_child_storage( - storage_key_data: Pointer, - storage_key_len: WordSize, - key_data: Pointer, - key_len: WordSize, - value_data: Pointer, - value_len: WordSize, - ) { - let storage_key = context.read_memory(storage_key_data, storage_key_len) - .map_err(|_| "Invalid attempt to determine storage_key in ext_set_child_storage")?; - let key = context.read_memory(key_data, key_len) - .map_err(|_| "Invalid attempt to determine key in ext_set_child_storage")?; - let value = context.read_memory(value_data, value_len) - .map_err(|_| "Invalid attempt to determine value in ext_set_child_storage")?; - - Ok(sp_io::storage::child_set(&storage_key, &key, &value)) - } - - ext_clear_child_storage( - storage_key_data: Pointer, - storage_key_len: WordSize, - key_data: Pointer, - key_len: WordSize, - ) { - let storage_key = context.read_memory(storage_key_data, storage_key_len) - .map_err(|_| "Invalid attempt to determine storage_key in ext_clear_child_storage")?; - let key = context.read_memory(key_data, key_len) - .map_err(|_| "Invalid attempt to determine key in ext_clear_child_storage")?; - - Ok(sp_io::storage::child_clear(&storage_key, &key)) - } - ext_clear_storage(key_data: Pointer, key_len: WordSize) { let key = context.read_memory(key_data, key_len) .map_err(|_| "Invalid attempt to determine key in ext_clear_storage")?; @@ -261,45 +229,12 @@ impl_wasm_host_interface! { Ok(if sp_io::storage::exists(&key) { 1 } else { 0 }) } - ext_exists_child_storage( - storage_key_data: Pointer, - storage_key_len: WordSize, - key_data: Pointer, - key_len: WordSize, - ) -> u32 { - let storage_key = context.read_memory(storage_key_data, storage_key_len) - .map_err(|_| "Invalid attempt to determine storage_key in ext_exists_child_storage")?; - let key = context.read_memory(key_data, key_len) - .map_err(|_| "Invalid attempt to determine key in ext_exists_child_storage")?; - - Ok(if sp_io::storage::child_exists(&storage_key, &key) { 1 } else { 0 }) - } - ext_clear_prefix(prefix_data: Pointer, prefix_len: WordSize) { let prefix = context.read_memory(prefix_data, prefix_len) .map_err(|_| "Invalid attempt to determine prefix in ext_clear_prefix")?; Ok(sp_io::storage::clear_prefix(&prefix)) } - ext_clear_child_prefix( - storage_key_data: Pointer, - storage_key_len: WordSize, - prefix_data: Pointer, - prefix_len: WordSize, - ) { - let storage_key = context.read_memory(storage_key_data, storage_key_len) - .map_err(|_| "Invalid attempt to determine storage_key in ext_clear_child_prefix")?; - let prefix = context.read_memory(prefix_data, prefix_len) - .map_err(|_| "Invalid attempt to determine prefix in ext_clear_child_prefix")?; - Ok(sp_io::storage::child_clear_prefix(&storage_key, &prefix)) - } - - ext_kill_child_storage(storage_key_data: Pointer, storage_key_len: WordSize) { - let storage_key = context.read_memory(storage_key_data, storage_key_len) - .map_err(|_| "Invalid attempt to determine storage_key in ext_kill_child_storage")?; - Ok(sp_io::storage::child_storage_kill(&storage_key)) - } - ext_get_allocated_storage( key_data: Pointer, key_len: WordSize, @@ -322,32 +257,6 @@ impl_wasm_host_interface! { } } - ext_get_allocated_child_storage( - storage_key_data: Pointer, - storage_key_len: WordSize, - key_data: Pointer, - key_len: WordSize, - written_out: Pointer, - ) -> Pointer { - let storage_key = context.read_memory(storage_key_data, storage_key_len) - .map_err(|_| "Invalid attempt to determine storage_key in ext_get_allocated_child_storage")?; - let key = context.read_memory(key_data, key_len) - .map_err(|_| "Invalid attempt to determine key in ext_get_allocated_child_storage")?; - - if let Some(value) = sp_io::storage::child_get(&storage_key, &key) { - let offset = context.allocate_memory(value.len() as u32)?; - context.write_memory(offset, &value) - .map_err(|_| "Invalid attempt to set memory in ext_get_allocated_child_storage")?; - context.write_primitive(written_out, value.len() as u32) - .map_err(|_| "Invalid attempt to write written_out in ext_get_allocated_child_storage")?; - Ok(offset) - } else { - context.write_primitive(written_out, u32::max_value()) - .map_err(|_| "Invalid attempt to write failed written_out in ext_get_allocated_child_storage")?; - Ok(Pointer::null()) - } - } - ext_get_storage_into( key_data: Pointer, key_len: WordSize, @@ -369,53 +278,11 @@ impl_wasm_host_interface! { } } - ext_get_child_storage_into( - storage_key_data: Pointer, - storage_key_len: WordSize, - key_data: Pointer, - key_len: WordSize, - value_data: Pointer, - value_len: WordSize, - value_offset: WordSize, - ) -> WordSize { - let storage_key = context.read_memory(storage_key_data, storage_key_len) - .map_err(|_| "Invalid attempt to determine storage_key in ext_get_child_storage_into")?; - let key = context.read_memory(key_data, key_len) - .map_err(|_| "Invalid attempt to get key in ext_get_child_storage_into")?; - - if let Some(value) = sp_io::storage::child_get(&storage_key, &key) { - let data = &value[value.len().min(value_offset as usize)..]; - let written = std::cmp::min(value_len as usize, data.len()); - context.write_memory(value_data, &data[..written]) - .map_err(|_| "Invalid attempt to get value in ext_get_child_storage_into")?; - Ok(value.len() as u32) - } else { - Ok(u32::max_value()) - } - } - ext_storage_root(result: Pointer) { context.write_memory(result, sp_io::storage::root().as_ref()) .map_err(|_| "Invalid attempt to set memory in ext_storage_root".into()) } - ext_child_storage_root( - storage_key_data: Pointer, - storage_key_len: WordSize, - written_out: Pointer, - ) -> Pointer { - let storage_key = context.read_memory(storage_key_data, storage_key_len) - .map_err(|_| "Invalid attempt to determine storage_key in ext_child_storage_root")?; - let value = sp_io::storage::child_root(&storage_key); - - let offset = context.allocate_memory(value.len() as u32)?; - context.write_memory(offset, &value) - .map_err(|_| "Invalid attempt to set memory in ext_child_storage_root")?; - context.write_primitive(written_out, value.len() as u32) - .map_err(|_| "Invalid attempt to write written_out in ext_child_storage_root")?; - Ok(offset) - } - ext_storage_changes_root( parent_hash_data: Pointer, _len: WordSize, diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 3cdb9fdafe264..f02b532619830 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -128,11 +128,14 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(output, b"all ok!".to_vec().encode()); } - let expected = TestExternalities::new((map![ + let expected = TestExternalities::new(primitives::storage::Storage { + top: map![ b"input".to_vec() => b"Hello world".to_vec(), b"foo".to_vec() => b"bar".to_vec(), b"baz".to_vec() => b"bar".to_vec() - ], map![])); + ], + children: map![], + }); assert_eq!(ext, expected); } @@ -162,11 +165,14 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(output, b"all ok!".to_vec().encode()); } - let expected = TestExternalities::new((map![ + let expected = TestExternalities::new(primitives::storage::Storage { + top: map![ b"aaa".to_vec() => b"1".to_vec(), b"aab".to_vec() => b"2".to_vec(), b"bbb".to_vec() => b"5".to_vec() - ], map![])); + ], + children: map![], + }); assert_eq!(expected, ext); } diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index ece9bf167b8d1..51721fba8ee7f 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -290,7 +290,7 @@ impl AuthoritySetForFinalityProver for TestApi { fn prove_authorities(&self, block: &BlockId) -> Result { let authorities = self.authorities(block)?; let backend = >::from(vec![ - (None, b"authorities".to_vec(), Some(authorities.encode())) + (None, vec![(b"authorities".to_vec(), Some(authorities.encode()))]) ]); let proof = prove_read(backend, vec![b"authorities"]) .expect("failure proving read from in-memory storage backend"); diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index 671bdb27ca8c4..59fd15bbf1e68 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -23,7 +23,8 @@ use consensus::{BlockImport, BlockStatus, Error as ConsensusError}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use sp_runtime::generic::{BlockId}; use sp_runtime::Justification; -use primitives::{H256, Blake2Hasher, storage::StorageKey}; +use primitives::{H256, Blake2Hasher}; +use primitives::storage::{StorageKey, ChildInfo}; /// Local client abstraction for the network. pub trait Client: Send + Sync { @@ -57,6 +58,7 @@ pub trait Client: Send + Sync { &self, block: &Block::Hash, storage_key: &[u8], + child_info: ChildInfo, keys: &[Vec], ) -> Result; @@ -135,10 +137,11 @@ impl Client for SubstrateClient where &self, block: &Block::Hash, storage_key: &[u8], + child_info: ChildInfo, keys: &[Vec], ) -> Result { (self as &SubstrateClient) - .read_child_proof(&BlockId::Hash(block.clone()), storage_key, keys) + .read_child_proof(&BlockId::Hash(block.clone()), storage_key, child_info, keys) } fn execution_proof(&self, block: &Block::Hash, method: &str, data: &[u8]) -> Result<(Vec, StorageProof), Error> { diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 88bc1ede52275..bf83416ae7c1a 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -24,7 +24,7 @@ use libp2p::{Multiaddr, PeerId}; use libp2p::core::{ConnectedPoint, nodes::Substream, muxing::StreamMuxerBox}; use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use primitives::storage::StorageKey; +use primitives::storage::{StorageKey, ChildInfo}; use consensus::{ BlockOrigin, block_validation::BlockAnnounceValidator, @@ -239,12 +239,16 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { id: RequestId, block: ::Hash, storage_key: Vec, + child_info: Vec, + child_type: u32, keys: Vec>, ) { let message: Message = message::generic::Message::RemoteReadChildRequest(message::RemoteReadChildRequest { id, block, storage_key, + child_info, + child_type, keys, }); @@ -1518,23 +1522,37 @@ impl, H: ExHashT> Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); - let proof = match self.context_data.chain.read_child_proof( - &request.block, - &request.storage_key, - &request.keys, - ) { - Ok(proof) => proof, - Err(error) => { - trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", - request.id, - who, - request.storage_key.to_hex::(), - keys_str(), - request.block, - error - ); - StorageProof::empty() + let proof = if let Some(child_info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { + match self.context_data.chain.read_child_proof( + &request.block, + &request.storage_key, + child_info, + &request.keys, + ) { + Ok(proof) => proof, + Err(error) => { + trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", + request.id, + who, + request.storage_key.to_hex::(), + keys_str(), + request.block, + error + ); + StorageProof::empty() + } } + } else { + trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", + request.id, + who, + request.storage_key.to_hex::(), + keys_str(), + request.block, + "invalid child info and type", + ); + + StorageProof::empty() }; self.send_message( &who, diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index 869fd703cab71..047961250f004 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -69,6 +69,8 @@ pub trait LightDispatchNetwork { id: RequestId, block: ::Hash, storage_key: Vec, + child_info: Vec, + child_type: u32, keys: Vec>, ); @@ -622,6 +624,8 @@ impl Request { self.id, data.block, data.storage_key.clone(), + data.child_info.clone(), + data.child_type, data.keys.clone(), ), RequestData::RemoteCall(ref data, _) => @@ -677,6 +681,7 @@ pub mod tests { use std::sync::Arc; use std::time::Instant; use futures::{Future, sync::oneshot}; + use primitives::storage::ChildInfo; use sp_runtime::traits::{Block as BlockT, NumberFor, Header as HeaderT}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use client_api::{FetchChecker, RemoteHeaderRequest, @@ -808,7 +813,7 @@ pub mod tests { fn send_header_request(&mut self, _: &PeerId, _: RequestId, _: <::Header as HeaderT>::Number) {} fn send_read_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec>) {} fn send_read_child_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec, - _: Vec>) {} + _: Vec, _: u32, _: Vec>) {} fn send_call_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: String, _: Vec) {} fn send_changes_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: ::Hash, _: ::Hash, _: ::Hash, _: Option>, _: Vec) {} @@ -1027,10 +1032,14 @@ pub mod tests { light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); let (tx, response) = oneshot::channel(); + let child_info = ChildInfo::new_default(b"unique_id_1"); + let (child_info, child_type) = child_info.info(); light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadChild(RemoteReadChildRequest { header: dummy_header(), block: Default::default(), storage_key: b":child_storage:sub".to_vec(), + child_info: child_info.to_vec(), + child_type, keys: vec![b":key".to_vec()], retry_count: None, }, tx)); diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index a05a254f16817..b7267f376f8fc 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -368,6 +368,11 @@ pub mod generic { pub block: H, /// Child Storage key. pub storage_key: Vec, + /// Child trie source information. + pub child_info: Vec, + /// Child type, its required to resolve `child_info` + /// content and choose child implementation. + pub child_type: u32, /// Storage key. pub keys: Vec>, } diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 0d06092ca1659..9a549b00c4304 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -60,6 +60,8 @@ pub trait StateApi { fn child_storage_keys( &self, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, prefix: StorageKey, hash: Option ) -> FutureResult>; @@ -69,6 +71,8 @@ pub trait StateApi { fn child_storage( &self, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; @@ -78,6 +82,8 @@ pub trait StateApi { fn child_storage_hash( &self, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; @@ -87,6 +93,8 @@ pub trait StateApi { fn child_storage_size( &self, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index df9a6709b9bca..53aabaf699db3 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -95,6 +95,8 @@ pub trait StateBackend: Send + Sync + 'static &self, block: Option, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, prefix: StorageKey, ) -> FutureResult>; @@ -103,6 +105,8 @@ pub trait StateBackend: Send + Sync + 'static &self, block: Option, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult>; @@ -111,6 +115,8 @@ pub trait StateBackend: Send + Sync + 'static &self, block: Option, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult>; @@ -119,9 +125,11 @@ pub trait StateBackend: Send + Sync + 'static &self, block: Option, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { - Box::new(self.child_storage(block, child_storage_key, key) + Box::new(self.child_storage(block, child_storage_key, child_info, child_type, key) .map(|x| x.map(|x| x.0.len() as u64))) } @@ -256,37 +264,45 @@ impl StateApi for State fn child_storage( &self, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage(block, child_storage_key, key) + self.backend.child_storage(block, child_storage_key, child_info, child_type, key) } fn child_storage_keys( &self, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key_prefix: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_keys(block, child_storage_key, key_prefix) + self.backend.child_storage_keys(block, child_storage_key, child_info, child_type, key_prefix) } fn child_storage_hash( &self, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_hash(block, child_storage_key, key) + self.backend.child_storage_hash(block, child_storage_key, child_info, child_type, key) } fn child_storage_size( &self, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_size(block, child_storage_key, key) + self.backend.child_storage_size(block, child_storage_key, child_info, child_type, key) } fn metadata(&self, block: Option) -> FutureResult { @@ -335,3 +351,9 @@ impl StateApi for State fn client_err(err: sp_blockchain::Error) -> Error { Error::Client(Box::new(err)) } + +const CHILD_RESOLUTION_ERROR: &str = "Unexpected child info and type"; + +fn child_resolution_error() -> sp_blockchain::Error { + sp_blockchain::Error::Msg(CHILD_RESOLUTION_ERROR.to_string()) +} diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 582d3a0e7efe7..d5de8d64413bf 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -36,7 +36,8 @@ use client::{ Client, CallExecutor, BlockchainEvents, }; use primitives::{ - H256, Blake2Hasher, Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet}, + H256, Blake2Hasher, Bytes, + storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, ChildInfo}, }; use runtime_version::RuntimeVersion; use state_machine::ExecutionStrategy; @@ -47,7 +48,7 @@ use sp_runtime::{ use sp_api::Metadata; -use super::{StateBackend, error::{FutureResult, Error, Result}, client_err}; +use super::{StateBackend, error::{FutureResult, Error, Result}, client_err, child_resolution_error}; /// Ranges to query in state_queryStorage. struct QueryStorageRange { @@ -287,11 +288,19 @@ impl StateBackend for FullState, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, prefix: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) - .and_then(|block| self.client.child_storage_keys(&BlockId::Hash(block), &child_storage_key, &prefix)) + .and_then(|block| self.client.child_storage_keys( + &BlockId::Hash(block), + &child_storage_key, + ChildInfo::resolve_child_info(child_type, &child_info.0[..]) + .ok_or_else(child_resolution_error)?, + &prefix, + )) .map_err(client_err))) } @@ -299,11 +308,19 @@ impl StateBackend for FullState, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) - .and_then(|block| self.client.child_storage(&BlockId::Hash(block), &child_storage_key, &key)) + .and_then(|block| self.client.child_storage( + &BlockId::Hash(block), + &child_storage_key, + ChildInfo::resolve_child_info(child_type, &child_info.0[..]) + .ok_or_else(child_resolution_error)?, + &key, + )) .map_err(client_err))) } @@ -311,11 +328,19 @@ impl StateBackend for FullState, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) - .and_then(|block| self.client.child_storage_hash(&BlockId::Hash(block), &child_storage_key, &key)) + .and_then(|block| self.client.child_storage_hash( + &BlockId::Hash(block), + &child_storage_key, + ChildInfo::resolve_child_info(child_type, &child_info.0[..]) + .ok_or_else(child_resolution_error)?, + &key, + )) .map_err(client_err))) } diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index 62d3404d8e001..d90ef02c3dc3b 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -236,6 +236,8 @@ impl StateBackend for LightState, _child_storage_key: StorageKey, + _child_info: StorageKey, + _child_type: u32, _prefix: StorageKey, ) -> FutureResult> { Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) @@ -245,6 +247,8 @@ impl StateBackend for LightState, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { let block = self.block_or_best(block); @@ -255,6 +259,8 @@ impl StateBackend for LightState StateBackend for LightState, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { Box::new(self - .child_storage(block, child_storage_key, key) + .child_storage(block, child_storage_key, child_info, child_type, key) .and_then(|maybe_storage| result(Ok(maybe_storage.map(|storage| Blake2Hasher::hash(&storage.0)))) ) diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 2ae22df1a0058..c0f09fafeb3cd 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -21,7 +21,7 @@ use self::error::Error; use std::sync::Arc; use assert_matches::assert_matches; use futures01::stream::Stream; -use primitives::storage::well_known_keys; +use primitives::storage::{well_known_keys, ChildInfo}; use primitives::hash::H256; use sp_io::hashing::blake2_256; use test_client::{ @@ -30,6 +30,8 @@ use test_client::{ runtime, }; +const CHILD_INFO: ChildInfo<'static> = ChildInfo::new_default(b"unique_id"); + #[test] fn should_return_storage() { const KEY: &[u8] = b":mock"; @@ -40,12 +42,14 @@ fn should_return_storage() { let mut core = tokio::runtime::Runtime::new().unwrap(); let client = TestClientBuilder::new() .add_extra_storage(KEY.to_vec(), VALUE.to_vec()) - .add_extra_child_storage(STORAGE_KEY.to_vec(), KEY.to_vec(), CHILD_VALUE.to_vec()) + .add_extra_child_storage(STORAGE_KEY.to_vec(), CHILD_INFO, KEY.to_vec(), CHILD_VALUE.to_vec()) .build(); let genesis_hash = client.genesis_hash(); let client = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); let key = StorageKey(KEY.to_vec()); let storage_key = StorageKey(STORAGE_KEY.to_vec()); + let (child_info, child_type) = CHILD_INFO.info(); + let child_info = StorageKey(child_info.to_vec()); assert_eq!( client.storage(key.clone(), Some(genesis_hash).into()).wait() @@ -63,7 +67,7 @@ fn should_return_storage() { ); assert_eq!( core.block_on( - client.child_storage(storage_key, key, Some(genesis_hash).into()) + client.child_storage(storage_key, child_info, child_type, key, Some(genesis_hash).into()) .map(|x| x.map(|x| x.0.len())) ).unwrap().unwrap() as usize, CHILD_VALUE.len(), @@ -73,27 +77,48 @@ fn should_return_storage() { #[test] fn should_return_child_storage() { + let (child_info, child_type) = CHILD_INFO.info(); + let child_info = StorageKey(child_info.to_vec()); let core = tokio::runtime::Runtime::new().unwrap(); let client = Arc::new(test_client::TestClientBuilder::new() - .add_child_storage("test", "key", vec![42_u8]) + .add_child_storage("test", "key", CHILD_INFO, vec![42_u8]) .build()); let genesis_hash = client.genesis_hash(); let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); - let child_key = StorageKey(well_known_keys::CHILD_STORAGE_KEY_PREFIX.iter().chain(b"test").cloned().collect()); + let child_key = StorageKey( + well_known_keys::CHILD_STORAGE_KEY_PREFIX.iter().chain(b"test").cloned().collect() + ); let key = StorageKey(b"key".to_vec()); assert_matches!( - client.child_storage(child_key.clone(), key.clone(), Some(genesis_hash).into()).wait(), + client.child_storage( + child_key.clone(), + child_info.clone(), + child_type, + key.clone(), + Some(genesis_hash).into(), + ).wait(), Ok(Some(StorageData(ref d))) if d[0] == 42 && d.len() == 1 ); assert_matches!( - client.child_storage_hash(child_key.clone(), key.clone(), Some(genesis_hash).into()) - .wait().map(|x| x.is_some()), + client.child_storage_hash( + child_key.clone(), + child_info.clone(), + child_type, + key.clone(), + Some(genesis_hash).into(), + ).wait().map(|x| x.is_some()), Ok(true) ); assert_matches!( - client.child_storage_size(child_key.clone(), key.clone(), None).wait(), + client.child_storage_size( + child_key.clone(), + child_info.clone(), + child_type, + key.clone(), + None, + ).wait(), Ok(Some(1)) ); } diff --git a/client/src/cht.rs b/client/src/cht.rs index 15a3b7718c0a0..389560223a40f 100644 --- a/client/src/cht.rs +++ b/client/src/cht.rs @@ -98,9 +98,9 @@ pub fn build_proof( { let transaction = build_pairs::(cht_size, cht_num, hashes)? .into_iter() - .map(|(k, v)| (None, k, Some(v))) + .map(|(k, v)| (k, Some(v))) .collect::>(); - let mut storage = InMemoryState::::default().update(transaction); + let mut storage = InMemoryState::::default().update(vec![(None, transaction)]); let trie_storage = storage.as_trie_backend() .expect("InMemoryState::as_trie_backend always returns Some; qed"); prove_read_on_trie_backend( diff --git a/client/src/client.rs b/client/src/client.rs index 88ecfecd2157f..fe3dfbead346b 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -28,7 +28,7 @@ use hash_db::{Hasher, Prefix}; use primitives::{ Blake2Hasher, H256, ChangesTrieConfiguration, convert_hash, NeverNativeValue, ExecutionContext, NativeOrEncoded, - storage::{StorageKey, StorageData, well_known_keys}, + storage::{StorageKey, StorageData, well_known_keys, ChildInfo}, traits::CodeExecutor, }; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; @@ -199,10 +199,10 @@ impl Client where execution_extensions: ExecutionExtensions, ) -> sp_blockchain::Result { if backend.blockchain().header(BlockId::Number(Zero::zero()))?.is_none() { - let (genesis_storage, children_genesis_storage) = build_genesis_storage.build_storage()?; + let genesis_storage = build_genesis_storage.build_storage()?; let mut op = backend.begin_operation()?; backend.begin_state_operation(&mut op, BlockId::Hash(Default::default()))?; - let state_root = op.reset_storage(genesis_storage, children_genesis_storage)?; + let state_root = op.reset_storage(genesis_storage)?; let genesis_block = genesis::construct_genesis_block::(state_root.into()); info!("Initializing Genesis block/state (state: {}, header-hash: {})", genesis_block.header().state_root(), @@ -267,10 +267,11 @@ impl Client where &self, id: &BlockId, child_storage_key: &StorageKey, + child_info: ChildInfo, key_prefix: &StorageKey ) -> sp_blockchain::Result> { let keys = self.state_at(id)? - .child_keys(&child_storage_key.0, &key_prefix.0) + .child_keys(&child_storage_key.0, child_info, &key_prefix.0) .into_iter() .map(StorageKey) .collect(); @@ -281,11 +282,13 @@ impl Client where pub fn child_storage( &self, id: &BlockId, - child_storage_key: &StorageKey, + storage_key: &StorageKey, + child_info: ChildInfo, key: &StorageKey ) -> sp_blockchain::Result> { Ok(self.state_at(id)? - .child_storage(&child_storage_key.0, &key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + .child_storage(&storage_key.0, child_info, &key.0) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? .map(StorageData)) } @@ -293,11 +296,13 @@ impl Client where pub fn child_storage_hash( &self, id: &BlockId, - child_storage_key: &StorageKey, + storage_key: &StorageKey, + child_info: ChildInfo, key: &StorageKey ) -> sp_blockchain::Result> { Ok(self.state_at(id)? - .child_storage_hash(&child_storage_key.0, &key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + .child_storage_hash(&storage_key.0, child_info, &key.0) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? ) } @@ -334,13 +339,14 @@ impl Client where &self, id: &BlockId, storage_key: &[u8], + child_info: ChildInfo, keys: I, ) -> sp_blockchain::Result where I: IntoIterator, I::Item: AsRef<[u8]>, { self.state_at(id) - .and_then(|state| prove_child_read(state, storage_key, keys) + .and_then(|state| prove_child_read(state, storage_key, child_info, keys) .map_err(Into::into)) } @@ -1012,7 +1018,7 @@ impl Client where overlay.commit_prospective(); let (top, children) = overlay.into_committed(); - let children = children.map(|(sk, it)| (sk, it.collect())).collect(); + let children = children.map(|(sk, it)| (sk, it.0.collect())).collect(); if import_headers.post().state_root() != &storage_update.1 { return Err(sp_blockchain::Error::InvalidStateRoot); } diff --git a/client/src/genesis.rs b/client/src/genesis.rs index d2743167422aa..a080a87cf46c5 100644 --- a/client/src/genesis.rs +++ b/client/src/genesis.rs @@ -53,7 +53,7 @@ mod tests { runtime::{Hash, Transfer, Block, BlockNumber, Header, Digest}, AccountKeyring, Sr25519Keyring, }; - use primitives::{Blake2Hasher, map}; + use primitives::Blake2Hasher; use hex_literal::*; native_executor_instance!( @@ -154,8 +154,7 @@ mod tests { vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], 1000, None, - map![], - map![], + Default::default(), ).genesis_map(); let genesis_hash = insert_genesis_block(&mut storage); @@ -183,8 +182,7 @@ mod tests { vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], 1000, None, - map![], - map![], + Default::default(), ).genesis_map(); let genesis_hash = insert_genesis_block(&mut storage); @@ -212,8 +210,7 @@ mod tests { vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], 68, None, - map![], - map![], + Default::default(), ).genesis_map(); let genesis_hash = insert_genesis_block(&mut storage); diff --git a/client/src/in_mem.rs b/client/src/in_mem.rs index e5964669c9aa9..85bdd954c80ba 100644 --- a/client/src/in_mem.rs +++ b/client/src/in_mem.rs @@ -25,7 +25,7 @@ use primitives::offchain::storage::{ }; use sp_runtime::generic::{BlockId, DigestItem}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, NumberFor}; -use sp_runtime::{Justification, StorageOverlay, ChildrenStorageOverlay}; +use sp_runtime::{Justification, Storage}; use state_machine::backend::{Backend as StateBackend, InMemory}; use state_machine::{self, InMemoryChangesTrieStorage, ChangesTrieAnchorBlockId, ChangesTrieTransaction}; use hash_db::{Hasher, Prefix}; @@ -505,15 +505,15 @@ where Ok(()) } - fn reset_storage(&mut self, top: StorageOverlay, children: ChildrenStorageOverlay) -> sp_blockchain::Result { - check_genesis_storage(&top, &children)?; + fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result { + check_genesis_storage(&storage)?; - let child_delta = children.into_iter() - .map(|(storage_key, child_overlay)| - (storage_key, child_overlay.into_iter().map(|(k, v)| (k, Some(v))))); + let child_delta = storage.children.into_iter() + .map(|(storage_key, child_content)| + (storage_key, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), child_content.child_info)); let (root, transaction) = self.old_state.full_storage_root( - top.into_iter().map(|(k, v)| (k, Some(v))), + storage.top.into_iter().map(|(k, v)| (k, Some(v))), child_delta ); @@ -796,12 +796,12 @@ impl state_machine::ChangesTrieStorage> for Change } /// Check that genesis storage is valid. -pub fn check_genesis_storage(top: &StorageOverlay, children: &ChildrenStorageOverlay) -> sp_blockchain::Result<()> { - if top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) { +pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { + if storage.top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) { return Err(sp_blockchain::Error::GenesisInvalid.into()); } - if children.keys().any(|child_key| !well_known_keys::is_child_storage_key(&child_key)) { + if storage.children.keys().any(|child_key| !well_known_keys::is_child_storage_key(&child_key)) { return Err(sp_blockchain::Error::GenesisInvalid.into()); } diff --git a/client/src/lib.rs b/client/src/lib.rs index 00a0d8c26900c..364733f1a4155 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -48,7 +48,7 @@ //! use std::sync::Arc; //! use sc_client::{Client, in_mem::Backend, LocalCallExecutor}; //! use primitives::Blake2Hasher; -//! use sp_runtime::{StorageOverlay, ChildrenStorageOverlay}; +//! use sp_runtime::Storage; //! use executor::{NativeExecutor, WasmExecutionMethod}; //! //! // In this example, we're using the `Block` and `RuntimeApi` types from the @@ -65,7 +65,7 @@ //! NativeExecutor::::new(WasmExecutionMethod::Interpreted, None), //! ), //! // This parameter provides the storage for the chain genesis. -//! <(StorageOverlay, ChildrenStorageOverlay)>::default(), +//! ::default(), //! Default::default(), //! Default::default(), //! ); diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index 0d974411a9d38..5f770394fc180 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -21,11 +21,12 @@ use std::collections::HashMap; use std::sync::Arc; use parking_lot::RwLock; +use primitives::storage::{ChildInfo, OwnedChildInfo}; use state_machine::{ Backend as StateBackend, TrieBackend, backend::InMemory as InMemoryState, ChangesTrieTransaction }; use primitives::offchain::storage::InMemOffchainStorage; -use sp_runtime::{generic::BlockId, Justification, StorageOverlay, ChildrenStorageOverlay}; +use sp_runtime::{generic::BlockId, Justification, Storage}; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero, Header}; use crate::in_mem::{self, check_genesis_storage}; use sp_blockchain::{ Error as ClientError, Result as ClientResult }; @@ -280,22 +281,21 @@ where Ok(()) } - fn reset_storage(&mut self, top: StorageOverlay, children: ChildrenStorageOverlay) -> ClientResult { - check_genesis_storage(&top, &children)?; + fn reset_storage(&mut self, input: Storage) -> ClientResult { + check_genesis_storage(&input)?; // this is only called when genesis block is imported => shouldn't be performance bottleneck - let mut storage: HashMap>, StorageOverlay> = HashMap::new(); - storage.insert(None, top); + let mut storage: HashMap, OwnedChildInfo)>, _> = HashMap::new(); + storage.insert(None, input.top); // create a list of children keys to re-compute roots for - let child_delta = children.keys() - .cloned() - .map(|storage_key| (storage_key, None)) + let child_delta = input.children.iter() + .map(|(storage_key, storage_child)| (storage_key.clone(), None, storage_child.child_info.clone())) .collect::>(); // make sure to persist the child storage - for (child_key, child_storage) in children { - storage.insert(Some(child_key), child_storage); + for (child_key, storage_child) in input.children { + storage.insert(Some((child_key, storage_child.child_info)), storage_child.data); } let storage_update: InMemoryState = storage.into(); @@ -357,10 +357,15 @@ impl StateBackend for GenesisOrUnavailableState } } - fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> ClientResult>> { + fn child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> ClientResult>> { match *self { GenesisOrUnavailableState::Genesis(ref state) => - Ok(state.child_storage(storage_key, key).expect(IN_MEMORY_EXPECT_PROOF)), + Ok(state.child_storage(storage_key, child_info, key).expect(IN_MEMORY_EXPECT_PROOF)), GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), } } @@ -373,10 +378,17 @@ impl StateBackend for GenesisOrUnavailableState } } - fn next_child_storage_key(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error> { + fn next_child_storage_key( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { match *self { - GenesisOrUnavailableState::Genesis(ref state) => - Ok(state.next_child_storage_key(storage_key, key).expect(IN_MEMORY_EXPECT_PROOF)), + GenesisOrUnavailableState::Genesis(ref state) => Ok( + state.next_child_storage_key(storage_key, child_info, key) + .expect(IN_MEMORY_EXPECT_PROOF) + ), GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), } } @@ -395,10 +407,15 @@ impl StateBackend for GenesisOrUnavailableState } } - - fn for_keys_in_child_storage(&self, storage_key: &[u8], action: A) { + fn for_keys_in_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + action: A, + ) { match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.for_keys_in_child_storage(storage_key, action), + GenesisOrUnavailableState::Genesis(ref state) => + state.for_keys_in_child_storage(storage_key, child_info, action), GenesisOrUnavailableState::Unavailable => (), } } @@ -406,12 +423,13 @@ impl StateBackend for GenesisOrUnavailableState fn for_child_keys_with_prefix( &self, storage_key: &[u8], + child_info: ChildInfo, prefix: &[u8], action: A, ) { match *self { GenesisOrUnavailableState::Genesis(ref state) => - state.for_child_keys_with_prefix(storage_key, prefix, action), + state.for_child_keys_with_prefix(storage_key, child_info, prefix, action), GenesisOrUnavailableState::Unavailable => (), } } @@ -427,13 +445,18 @@ impl StateBackend for GenesisOrUnavailableState } } - fn child_storage_root(&self, key: &[u8], delta: I) -> (H::Out, bool, Self::Transaction) + fn child_storage_root( + &self, + storage_key: &[u8], + child_info: ChildInfo, + delta: I, + ) -> (H::Out, bool, Self::Transaction) where I: IntoIterator, Option>)> { match *self { GenesisOrUnavailableState::Genesis(ref state) => { - let (root, is_equal, _) = state.child_storage_root(key, delta); + let (root, is_equal, _) = state.child_storage_root(storage_key, child_info, delta); (root, is_equal, ()) }, GenesisOrUnavailableState::Unavailable => (H::Out::default(), true, ()), @@ -478,7 +501,7 @@ mod tests { let backend: Backend<_, Blake2Hasher> = Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); let mut op = backend.begin_operation().unwrap(); op.set_block_data(header0, None, None, NewBlockState::Final).unwrap(); - op.reset_storage(Default::default(), Default::default()).unwrap(); + op.reset_storage(Default::default()).unwrap(); backend.commit_operation(op).unwrap(); match backend.state_at(BlockId::Number(0)).unwrap() { diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index 9b2c308668531..babd83cfc907b 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -339,11 +339,13 @@ pub mod tests { use crate::light::fetcher::{FetchChecker, LightDataChecker, RemoteHeaderRequest}; use crate::light::blockchain::tests::{DummyStorage, DummyBlockchain}; use primitives::{blake2_256, Blake2Hasher, H256}; - use primitives::storage::{well_known_keys, StorageKey}; + use primitives::storage::{well_known_keys, StorageKey, ChildInfo}; use sp_runtime::generic::BlockId; use state_machine::Backend; use super::*; + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + type TestChecker = LightDataChecker< NativeExecutor, Blake2Hasher, @@ -394,8 +396,12 @@ pub mod tests { use test_client::TestClientBuilderExt; // prepare remote client let remote_client = test_client::TestClientBuilder::new() - .add_extra_child_storage(b":child_storage:default:child1".to_vec(), b"key1".to_vec(), b"value1".to_vec()) - .build(); + .add_extra_child_storage( + b":child_storage:default:child1".to_vec(), + CHILD_INFO_1, + b"key1".to_vec(), + b"value1".to_vec(), + ).build(); let remote_block_id = BlockId::Number(0); let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); @@ -406,12 +412,14 @@ pub mod tests { let child_value = remote_client.child_storage( &remote_block_id, &StorageKey(b":child_storage:default:child1".to_vec()), + CHILD_INFO_1, &StorageKey(b"key1".to_vec()), ).unwrap().unwrap().0; assert_eq!(b"value1"[..], child_value[..]); let remote_read_proof = remote_client.read_child_proof( &remote_block_id, b":child_storage:default:child1", + CHILD_INFO_1, &[b"key1"], ).unwrap(); @@ -487,11 +495,14 @@ pub mod tests { remote_read_proof, result, ) = prepare_for_read_child_proof_check(); + let child_infos = CHILD_INFO_1.info(); assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( &RemoteReadChildRequest::
{ block: remote_block_header.hash(), header: remote_block_header, storage_key: b":child_storage:default:child1".to_vec(), + child_info: child_infos.0.to_vec(), + child_type: child_infos.1, keys: vec![b"key1".to_vec()], retry_count: None, }, diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index 2971f46f375c2..3f345f043f057 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -128,7 +128,7 @@ impl AccountDb for DirectAccountDb { trie_id: Option<&TrieId>, location: &StorageKey ) -> Option> { - trie_id.and_then(|id| child::get_raw(id, &blake2_256(location))) + trie_id.and_then(|id| child::get_raw(id, crate::trie_unique_id(&id[..]), &blake2_256(location))) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { >::get(account).and_then(|i| i.as_alive().map(|i| i.code_hash)) @@ -173,13 +173,13 @@ impl AccountDb for DirectAccountDb { (false, Some(info), _) => info, // Existing contract is being removed. (true, Some(info), None) => { - child::kill_storage(&info.trie_id); + child::kill_storage(&info.trie_id, info.child_trie_unique_id()); >::remove(&address); continue; } // Existing contract is being replaced by a new one. (true, Some(info), Some(code_hash)) => { - child::kill_storage(&info.trie_id); + child::kill_storage(&info.trie_id, info.child_trie_unique_id()); AliveContractInfo:: { code_hash, storage_size: T::StorageSizeOffset::get(), @@ -217,14 +217,18 @@ impl AccountDb for DirectAccountDb { } for (k, v) in changed.storage.into_iter() { - if let Some(value) = child::get_raw(&new_info.trie_id[..], &blake2_256(&k)) { + if let Some(value) = child::get_raw( + &new_info.trie_id[..], + new_info.child_trie_unique_id(), + &blake2_256(&k), + ) { new_info.storage_size -= value.len() as u32; } if let Some(value) = v { new_info.storage_size += value.len() as u32; - child::put_raw(&new_info.trie_id[..], &blake2_256(&k), &value[..]); + child::put_raw(&new_info.trie_id[..], new_info.child_trie_unique_id(), &blake2_256(&k), &value[..]); } else { - child::kill(&new_info.trie_id[..], &blake2_256(&k)); + child::kill(&new_info.trie_id[..], new_info.child_trie_unique_id(), &blake2_256(&k)); } } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index f342a36a7ab87..0642049478203 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -223,6 +223,19 @@ pub struct RawAliveContractInfo { pub last_write: Option, } +impl RawAliveContractInfo { + /// Associated child trie unique id is built from the hash part of the trie id. + pub fn child_trie_unique_id(&self) -> child::ChildInfo { + trie_unique_id(&self.trie_id[..]) + } +} + +/// Associated child trie unique id is built from the hash part of the trie id. +pub(crate) fn trie_unique_id(trie_id: &[u8]) -> child::ChildInfo { + let start = CHILD_STORAGE_KEY_PREFIX.len() + b"default:".len(); + child::ChildInfo::new_default(&trie_id[start ..]) +} + pub type TombstoneContractInfo = RawTombstoneContractInfo<::Hash, ::Hashing>; @@ -793,8 +806,17 @@ impl Module { let key_values_taken = delta.iter() .filter_map(|key| { - child::get_raw(&origin_contract.trie_id, &blake2_256(key)).map(|value| { - child::kill(&origin_contract.trie_id, &blake2_256(key)); + child::get_raw( + &origin_contract.trie_id, + origin_contract.child_trie_unique_id(), + &blake2_256(key), + ).map(|value| { + child::kill( + &origin_contract.trie_id, + origin_contract.child_trie_unique_id(), + &blake2_256(key), + ); + (key, value) }) }) @@ -803,13 +825,20 @@ impl Module { let tombstone = >::new( // This operation is cheap enough because last_write (delta not included) // is not this block as it has been checked earlier. - &sp_io::storage::child_root(&origin_contract.trie_id)[..], + &child::child_root( + &origin_contract.trie_id, + )[..], code_hash, ); if tombstone != dest_tombstone { for (key, value) in key_values_taken { - child::put_raw(&origin_contract.trie_id, &blake2_256(key), &value); + child::put_raw( + &origin_contract.trie_id, + origin_contract.child_trie_unique_id(), + &blake2_256(key), + &value, + ); } return Err("Tombstones don't match"); @@ -887,7 +916,7 @@ decl_storage! { impl OnFreeBalanceZero for Module { fn on_free_balance_zero(who: &T::AccountId) { if let Some(ContractInfo::Alive(info)) = >::take(who) { - child::kill_storage(&info.trie_id); + child::kill_storage(&info.trie_id, info.child_trie_unique_id()); } } } diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index cf96ee2c1b441..d4ca5ec7f7237 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -19,6 +19,7 @@ use sp_runtime::traits::{Bounded, CheckedDiv, CheckedMul, Saturating, Zero, SaturatedConversion}; use support::traits::{Currency, ExistenceRequirement, Get, WithdrawReason, OnUnbalanced}; use support::StorageMap; +use support::storage::child; #[derive(PartialEq, Eq, Copy, Clone)] #[must_use] @@ -99,7 +100,7 @@ fn try_evict_or_and_pay_rent( if balance < subsistence_threshold { // The contract cannot afford to leave a tombstone, so remove the contract info altogether. >::remove(account); - sp_io::storage::child_storage_kill(&contract.trie_id); + child::kill_storage(&contract.trie_id, contract.child_trie_unique_id()); return (RentOutcome::Evicted, None); } @@ -146,7 +147,9 @@ fn try_evict_or_and_pay_rent( // threshold, so it leaves a tombstone. // Note: this operation is heavy. - let child_storage_root = sp_io::storage::child_root(&contract.trie_id); + let child_storage_root = child::child_root( + &contract.trie_id, + ); let tombstone = >::new( &child_storage_root[..], @@ -155,7 +158,7 @@ fn try_evict_or_and_pay_rent( let tombstone_info = ContractInfo::Tombstone(tombstone); >::insert(account, &tombstone_info); - sp_io::storage::child_storage_kill(&contract.trie_id); + child::kill_storage(&contract.trie_id, contract.child_trie_unique_id()); return (RentOutcome::Evicted, Some(tombstone_info)); } diff --git a/frame/support/procedural/src/storage/genesis_config/mod.rs b/frame/support/procedural/src/storage/genesis_config/mod.rs index 4102513cce852..4e91ccd1df13e 100644 --- a/frame/support/procedural/src/storage/genesis_config/mod.rs +++ b/frame/support/procedural/src/storage/genesis_config/mod.rs @@ -139,13 +139,10 @@ fn impl_build_storage( #[cfg(feature = "std")] impl#genesis_impl GenesisConfig#genesis_struct #genesis_where_clause { pub fn build_storage #fn_generic (&self) -> std::result::Result< - ( - #scrate::sp_runtime::StorageOverlay, - #scrate::sp_runtime::ChildrenStorageOverlay, - ), + #scrate::sp_runtime::Storage, String > #fn_where_clause { - let mut storage = (Default::default(), Default::default()); + let mut storage = Default::default(); self.assimilate_storage::<#fn_traitinstance>(&mut storage)?; Ok(storage) } @@ -153,12 +150,9 @@ fn impl_build_storage( /// Assimilate the storage for this module into pre-existing overlays. pub fn assimilate_storage #fn_generic ( &self, - tuple_storage: &mut ( - #scrate::sp_runtime::StorageOverlay, - #scrate::sp_runtime::ChildrenStorageOverlay, - ), + storage: &mut #scrate::sp_runtime::Storage, ) -> std::result::Result<(), String> #fn_where_clause { - #scrate::BasicExternalities::execute_with_storage(tuple_storage, || { + #scrate::BasicExternalities::execute_with_storage(storage, || { #( #builder_blocks )* Ok(()) }) @@ -171,10 +165,7 @@ fn impl_build_storage( { fn build_module_genesis_storage( &self, - storage: &mut ( - #scrate::sp_runtime::StorageOverlay, - #scrate::sp_runtime::ChildrenStorageOverlay, - ), + storage: &mut #scrate::sp_runtime::Storage, ) -> std::result::Result<(), String> { self.assimilate_storage::<#fn_traitinstance> (storage) } diff --git a/frame/support/src/storage/child.rs b/frame/support/src/storage/child.rs index 6fb8ce87b7951..abd1a208c69b1 100644 --- a/frame/support/src/storage/child.rs +++ b/frame/support/src/storage/child.rs @@ -19,14 +19,29 @@ //! This module is a currently only a variant of unhashed with additional `storage_key`. //! Note that `storage_key` must be unique and strong (strong in the sense of being long enough to //! avoid collision from a resistant hash function (which unique implies)). +//! +//! A **key collision free** unique id is required as parameter to avoid key collision +//! between child tries. +//! This unique id management and generation responsability is delegated to pallet module. // NOTE: could replace unhashed by having only one kind of storage (root being null storage key (storage_key can become Option<&[u8]>). use crate::sp_std::prelude::*; use codec::{Codec, Encode, Decode}; +pub use primitives::storage::ChildInfo; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. -pub fn get(storage_key: &[u8], key: &[u8]) -> Option { - sp_io::storage::child_get(storage_key, key).and_then(|v| { +pub fn get( + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], +) -> Option { + let (data, child_type) = child_info.info(); + sp_io::storage::child_get( + storage_key, + data, + child_type, + key, + ).and_then(|v| { Decode::decode(&mut &v[..]).map(Some).unwrap_or_else(|_| { // TODO #3700: error should be handleable. runtime_print!("ERROR: Corrupted state in child trie at {:?}/{:?}", storage_key, key); @@ -37,83 +52,178 @@ pub fn get(storage_key: &[u8], key: &[u8]) -> Option { /// Return the value of the item in storage under `key`, or the type's default if there is no /// explicit entry. -pub fn get_or_default(storage_key: &[u8], key: &[u8]) -> T { - get(storage_key, key).unwrap_or_else(Default::default) +pub fn get_or_default( + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], +) -> T { + get(storage_key, child_info, key).unwrap_or_else(Default::default) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. -pub fn get_or(storage_key: &[u8], key: &[u8], default_value: T) -> T { - get(storage_key, key).unwrap_or(default_value) +pub fn get_or( + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + default_value: T, +) -> T { + get(storage_key, child_info, key).unwrap_or(default_value) } /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. pub fn get_or_else T>( storage_key: &[u8], + child_info: ChildInfo, key: &[u8], default_value: F, ) -> T { - get(storage_key, key).unwrap_or_else(default_value) + get(storage_key, child_info, key).unwrap_or_else(default_value) } /// Put `value` in storage under `key`. -pub fn put(storage_key: &[u8], key: &[u8], value: &T) { - value.using_encoded(|slice| sp_io::storage::child_set(storage_key, key, slice)); +pub fn put( + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + value: &T, +) { + let (data, child_type) = child_info.info(); + value.using_encoded(|slice| + sp_io::storage::child_set( + storage_key, + data, + child_type, + key, + slice, + ) + ); } /// Remove `key` from storage, returning its value if it had an explicit entry or `None` otherwise. -pub fn take(storage_key: &[u8], key: &[u8]) -> Option { - let r = get(storage_key, key); +pub fn take( + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], +) -> Option { + let r = get(storage_key, child_info, key); if r.is_some() { - kill(storage_key, key); + kill(storage_key, child_info, key); } r } /// Remove `key` from storage, returning its value, or, if there was no explicit entry in storage, /// the default for its type. -pub fn take_or_default(storage_key: &[u8], key: &[u8]) -> T { - take(storage_key, key).unwrap_or_else(Default::default) +pub fn take_or_default( + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], +) -> T { + take(storage_key, child_info, key).unwrap_or_else(Default::default) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. Ensure there is no explicit entry on return. -pub fn take_or(storage_key: &[u8],key: &[u8], default_value: T) -> T { - take(storage_key, key).unwrap_or(default_value) +pub fn take_or( + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + default_value: T, +) -> T { + take(storage_key, child_info, key).unwrap_or(default_value) } /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or_else T>( storage_key: &[u8], + child_info: ChildInfo, key: &[u8], default_value: F, ) -> T { - take(storage_key, key).unwrap_or_else(default_value) + take(storage_key, child_info, key).unwrap_or_else(default_value) } /// Check to see if `key` has an explicit entry in storage. -pub fn exists(storage_key: &[u8], key: &[u8]) -> bool { - sp_io::storage::child_read(storage_key, key, &mut [0;0][..], 0).is_some() +pub fn exists( + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], +) -> bool { + let (data, child_type) = child_info.info(); + sp_io::storage::child_read( + storage_key, data, child_type, + key, &mut [0;0][..], 0, + ).is_some() } /// Remove all `storage_key` key/values -pub fn kill_storage(storage_key: &[u8]) { - sp_io::storage::child_storage_kill(storage_key) +pub fn kill_storage( + storage_key: &[u8], + child_info: ChildInfo, +) { + let (data, child_type) = child_info.info(); + sp_io::storage::child_storage_kill( + storage_key, + data, + child_type, + ) } /// Ensure `key` has no explicit entry in storage. -pub fn kill(storage_key: &[u8], key: &[u8]) { - sp_io::storage::child_clear(storage_key, key); +pub fn kill( + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], +) { + let (data, child_type) = child_info.info(); + sp_io::storage::child_clear( + storage_key, + data, + child_type, + key, + ); } /// Get a Vec of bytes from storage. -pub fn get_raw(storage_key: &[u8], key: &[u8]) -> Option> { - sp_io::storage::child_get(storage_key, key) +pub fn get_raw( + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], +) -> Option> { + let (data, child_type) = child_info.info(); + sp_io::storage::child_get( + storage_key, + data, + child_type, + key, + ) } /// Put a raw byte slice into storage. -pub fn put_raw(storage_key: &[u8], key: &[u8], value: &[u8]) { - sp_io::storage::child_set(storage_key, key, value) +pub fn put_raw( + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + value: &[u8], +) { + let (data, child_type) = child_info.info(); + sp_io::storage::child_set( + storage_key, + data, + child_type, + key, + value, + ) +} + +/// Calculate current child root value. +pub fn child_root( + storage_key: &[u8], +) -> Vec { + sp_io::storage::child_root( + storage_key, + ) } diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index c2ba694f7de6d..0525589afa596 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -300,7 +300,10 @@ fn new_test_ext() -> sp_io::TestExternalities { #[test] fn storage_instance_independance() { - let mut storage = Default::default(); + let mut storage = primitives::storage::Storage { + top: std::collections::BTreeMap::new(), + children: std::collections::HashMap::new() + }; state_machine::BasicExternalities::execute_with_storage(&mut storage, || { module2::Value::::put(0); module2::Value::::put(0); @@ -320,7 +323,7 @@ fn storage_instance_independance() { module2::DoubleMap::::insert(&0, &0, &0); }); // 16 storage values + 4 linked_map head. - assert_eq!(storage.0.len(), 16 + 4); + assert_eq!(storage.top.len(), 16 + 4); } #[test] diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index d7485db096838..b8786e59a5604 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -703,11 +703,14 @@ impl Module { /// Get the basic externalities for this module, useful for tests. #[cfg(any(feature = "std", test))] pub fn externalities() -> TestExternalities { - TestExternalities::new((map![ - >::hashed_key_for(T::BlockNumber::zero()) => [69u8; 32].encode(), - >::hashed_key().to_vec() => T::BlockNumber::one().encode(), - >::hashed_key().to_vec() => [69u8; 32].encode() - ], map![])) + TestExternalities::new(primitives::storage::Storage { + top: map![ + >::hashed_key_for(T::BlockNumber::zero()) => [69u8; 32].encode(), + >::hashed_key().to_vec() => T::BlockNumber::one().encode(), + >::hashed_key().to_vec() => [69u8; 32].encode() + ], + children: map![], + }) } /// Set the block number to something in particular. Can be used as an alternative to diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 05121f34d3dec..e79d6a2e3d627 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -24,7 +24,7 @@ use std::any::{Any, TypeId}; -use primitives_storage::ChildStorageKey; +use primitives_storage::{ChildStorageKey, ChildInfo}; pub use scope_limited::{set_and_run_with_externalities, with_externalities}; pub use extensions::{Extension, Extensions, ExtensionStore}; @@ -45,7 +45,12 @@ pub trait Externalities: ExtensionStore { /// Get child storage value hash. This may be optimized for large values. /// /// Returns an `Option` that holds the SCALE encoded hash. - fn child_storage_hash(&self, storage_key: ChildStorageKey, key: &[u8]) -> Option>; + fn child_storage_hash( + &self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) -> Option>; /// Read original runtime storage, ignoring any overlayed changes. fn original_storage(&self, key: &[u8]) -> Option>; @@ -53,7 +58,12 @@ pub trait Externalities: ExtensionStore { /// Read original runtime child storage, ignoring any overlayed changes. /// /// Returns an `Option` that holds the SCALE encoded hash. - fn original_child_storage(&self, storage_key: ChildStorageKey, key: &[u8]) -> Option>; + fn original_child_storage( + &self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) -> Option>; /// Get original storage value hash, ignoring any overlayed changes. /// This may be optimized for large values. @@ -68,13 +78,19 @@ pub trait Externalities: ExtensionStore { fn original_child_storage_hash( &self, storage_key: ChildStorageKey, + child_info: ChildInfo, key: &[u8], ) -> Option>; /// Read child runtime storage. /// /// Returns an `Option` that holds the SCALE encoded hash. - fn child_storage(&self, storage_key: ChildStorageKey, key: &[u8]) -> Option>; + fn child_storage( + &self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) -> Option>; /// Set storage entry `key` of current contract being called (effective immediately). fn set_storage(&mut self, key: Vec, value: Vec) { @@ -82,8 +98,14 @@ pub trait Externalities: ExtensionStore { } /// Set child storage entry `key` of current contract being called (effective immediately). - fn set_child_storage(&mut self, storage_key: ChildStorageKey, key: Vec, value: Vec) { - self.place_child_storage(storage_key, key, Some(value)) + fn set_child_storage( + &mut self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: Vec, + value: Vec, + ) { + self.place_child_storage(storage_key, child_info, key, Some(value)) } /// Clear a storage entry (`key`) of current contract being called (effective immediately). @@ -92,8 +114,13 @@ pub trait Externalities: ExtensionStore { } /// Clear a child storage entry (`key`) of current contract being called (effective immediately). - fn clear_child_storage(&mut self, storage_key: ChildStorageKey, key: &[u8]) { - self.place_child_storage(storage_key, key.to_vec(), None) + fn clear_child_storage( + &mut self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) { + self.place_child_storage(storage_key, child_info, key.to_vec(), None) } /// Whether a storage entry exists. @@ -102,24 +129,39 @@ pub trait Externalities: ExtensionStore { } /// Whether a child storage entry exists. - fn exists_child_storage(&self, storage_key: ChildStorageKey, key: &[u8]) -> bool { - self.child_storage(storage_key, key).is_some() + fn exists_child_storage( + &self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) -> bool { + self.child_storage(storage_key, child_info, key).is_some() } /// Returns the key immediately following the given key, if it exists. fn next_storage_key(&self, key: &[u8]) -> Option>; /// Returns the key immediately following the given key, if it exists, in child storage. - fn next_child_storage_key(&self, storage_key: ChildStorageKey, key: &[u8]) -> Option>; + fn next_child_storage_key( + &self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) -> Option>; /// Clear an entire child storage. - fn kill_child_storage(&mut self, storage_key: ChildStorageKey); + fn kill_child_storage(&mut self, storage_key: ChildStorageKey, child_info: ChildInfo); /// Clear storage entries which keys are start with the given prefix. fn clear_prefix(&mut self, prefix: &[u8]); /// Clear child storage entries which keys are start with the given prefix. - fn clear_child_prefix(&mut self, storage_key: ChildStorageKey, prefix: &[u8]); + fn clear_child_prefix( + &mut self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + prefix: &[u8], + ); /// Set or clear a storage entry (`key`) of current contract being called (effective immediately). fn place_storage(&mut self, key: Vec, value: Option>); @@ -128,6 +170,7 @@ pub trait Externalities: ExtensionStore { fn place_child_storage( &mut self, storage_key: ChildStorageKey, + child_info: ChildInfo, key: Vec, value: Option>, ); @@ -147,7 +190,10 @@ pub trait Externalities: ExtensionStore { /// storage keys in the top-level storage map. /// If the storage root equals the default hash as defined by the trie, the key in the top-level /// storage map will be removed. - fn child_storage_root(&mut self, storage_key: ChildStorageKey) -> Vec; + fn child_storage_root( + &mut self, + storage_key: ChildStorageKey, + ) -> Vec; /// Get the change trie root of the current storage overlay at a block with given parent. /// `parent` is expects a SCALE endcoded hash. diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index ef4334808a7e3..5941a8fff8061 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -39,7 +39,7 @@ use primitives::{ traits::KeystoreExt, offchain::{OffchainExt, TransactionPoolExt}, hexdisplay::HexDisplay, - storage::ChildStorageKey, + storage::{ChildStorageKey, ChildInfo}, }; use primitives::{ @@ -91,10 +91,28 @@ pub trait Storage { self.storage(key).map(|s| s.to_vec()) } - /// Returns the data for `key` in the child storage or `None` if the key can not be found. - fn child_get(&self, child_storage_key: &[u8], key: &[u8]) -> Option> { + /// All Child api uses : + /// - A `child_storage_key` to define the anchor point for the child proof + /// (commonly the location where the child root is stored in its parent trie). + /// - A `child_storage_types` to identify the kind of the child type and how its + /// `child definition` parameter is encoded. + /// - A `child_definition_parameter` which is the additional information required + /// to use the child trie. For instance defaults child tries requires this to + /// contain a collision free unique id. + /// + /// This function specifically returns the data for `key` in the child storage or `None` + /// if the key can not be found. + fn child_get( + &self, + child_storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + ) -> Option> { let storage_key = child_storage_key_or_panic(child_storage_key); - self.child_storage(storage_key, key).map(|s| s.to_vec()) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.child_storage(storage_key, child_info, key).map(|s| s.to_vec()) } /// Get `key` from storage, placing the value into `value_out` and return the number of @@ -117,15 +135,21 @@ pub trait Storage { /// doesn't exist at all. /// If `value_out` length is smaller than the returned length, only `value_out` length bytes /// are copied into `value_out`. + /// + /// See `child_get` for common child api parameters. fn child_read( &self, child_storage_key: &[u8], + child_definition: &[u8], + child_type: u32, key: &[u8], value_out: &mut [u8], value_offset: u32, ) -> Option { let storage_key = child_storage_key_or_panic(child_storage_key); - self.child_storage(storage_key, key) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.child_storage(storage_key, child_info, key) .map(|value| { let value_offset = value_offset as usize; let data = &value[value_offset.min(value.len())..]; @@ -141,9 +165,20 @@ pub trait Storage { } /// Set `key` to `value` in the child storage denoted by `child_storage_key`. - fn child_set(&mut self, child_storage_key: &[u8], key: &[u8], value: &[u8]) { + /// + /// See `child_get` for common child api parameters. + fn child_set( + &mut self, + child_storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + value: &[u8], + ) { let storage_key = child_storage_key_or_panic(child_storage_key); - self.set_child_storage(storage_key, key.to_vec(), value.to_vec()); + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.set_child_storage(storage_key, child_info, key.to_vec(), value.to_vec()); } /// Clear the storage of the given `key` and its value. @@ -152,15 +187,34 @@ pub trait Storage { } /// Clear the given child storage of the given `key` and its value. - fn child_clear(&mut self, child_storage_key: &[u8], key: &[u8]) { + /// + /// See `child_get` for common child api parameters. + fn child_clear( + &mut self, + child_storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + ) { let storage_key = child_storage_key_or_panic(child_storage_key); - self.clear_child_storage(storage_key, key); + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.clear_child_storage(storage_key, child_info, key); } /// Clear an entire child storage. - fn child_storage_kill(&mut self, child_storage_key: &[u8]) { + /// + /// See `child_get` for common child api parameters. + fn child_storage_kill( + &mut self, + child_storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + ) { let storage_key = child_storage_key_or_panic(child_storage_key); - self.kill_child_storage(storage_key); + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.kill_child_storage(storage_key, child_info); } /// Check whether the given `key` exists in storage. @@ -169,9 +223,19 @@ pub trait Storage { } /// Check whether the given `key` exists in storage. - fn child_exists(&self, child_storage_key: &[u8], key: &[u8]) -> bool { + /// + /// See `child_get` for common child api parameters. + fn child_exists( + &self, + child_storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + ) -> bool { let storage_key = child_storage_key_or_panic(child_storage_key); - self.exists_child_storage(storage_key, key) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.exists_child_storage(storage_key, child_info, key) } /// Clear the storage of each key-value pair where the key starts with the given `prefix`. @@ -180,9 +244,19 @@ pub trait Storage { } /// Clear the child storage of each key-value pair where the key starts with the given `prefix`. - fn child_clear_prefix(&mut self, child_storage_key: &[u8], prefix: &[u8]) { + /// + /// See `child_get` for common child api parameters. + fn child_clear_prefix( + &mut self, + child_storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + prefix: &[u8], + ) { let storage_key = child_storage_key_or_panic(child_storage_key); - self.clear_child_prefix(storage_key, prefix); + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.clear_child_prefix(storage_key, child_info, prefix); } /// "Commit" all existing operations and compute the resulting storage root. @@ -199,7 +273,12 @@ pub trait Storage { /// The hashing algorithm is defined by the `Block`. /// /// Returns the SCALE encoded hash. - fn child_root(&mut self, child_storage_key: &[u8]) -> Vec { + /// + /// See `child_get` for common child api parameters. + fn child_root( + &mut self, + child_storage_key: &[u8], + ) -> Vec { let storage_key = child_storage_key_or_panic(child_storage_key); self.child_storage_root(storage_key) } @@ -220,9 +299,17 @@ pub trait Storage { } /// Get the next key in storage after the given one in lexicographic order in child storage. - fn child_next_key(&mut self, child_storage_key: &[u8], key: &[u8]) -> Option> { + fn child_next_key( + &mut self, + child_storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + ) -> Option> { let storage_key = child_storage_key_or_panic(child_storage_key); - self.next_child_storage_key(storage_key, key) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.next_child_storage_key(storage_key, child_info, key) } } @@ -817,6 +904,7 @@ mod tests { use super::*; use primitives::map; use sp_state_machine::BasicExternalities; + use primitives::storage::Storage; #[test] fn storage_works() { @@ -829,7 +917,10 @@ mod tests { storage::set(b"foo", &[1, 2, 3][..]); }); - t = BasicExternalities::new(map![b"foo".to_vec() => b"bar".to_vec()], map![]); + t = BasicExternalities::new(Storage { + top: map![b"foo".to_vec() => b"bar".to_vec()], + children: map![], + }); t.execute_with(|| { assert_eq!(storage::get(b"hello"), None); @@ -839,10 +930,10 @@ mod tests { #[test] fn read_storage_works() { - let mut t = BasicExternalities::new( - map![b":test".to_vec() => b"\x0b\0\0\0Hello world".to_vec()], - map![], - ); + let mut t = BasicExternalities::new(Storage { + top: map![b":test".to_vec() => b"\x0b\0\0\0Hello world".to_vec()], + children: map![], + }); t.execute_with(|| { let mut v = [0u8; 4]; @@ -856,15 +947,15 @@ mod tests { #[test] fn clear_prefix_works() { - let mut t = BasicExternalities::new( - map![ + let mut t = BasicExternalities::new(Storage { + top: map![ b":a".to_vec() => b"\x0b\0\0\0Hello world".to_vec(), b":abcd".to_vec() => b"\x0b\0\0\0Hello world".to_vec(), b":abc".to_vec() => b"\x0b\0\0\0Hello world".to_vec(), b":abdd".to_vec() => b"\x0b\0\0\0Hello world".to_vec() ], - map![], - ); + children: map![], + }); t.execute_with(|| { storage::clear_prefix(b":abc"); diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 426e29c407881..8bfd4834c5e77 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -38,7 +38,7 @@ pub use paste; pub use app_crypto; #[cfg(feature = "std")] -pub use primitives::storage::{StorageOverlay, ChildrenStorageOverlay}; +pub use primitives::storage::{Storage, StorageChild}; use sp_std::prelude::*; use sp_std::convert::TryFrom; @@ -121,15 +121,15 @@ use crate::traits::IdentifyAccount; #[cfg(feature = "std")] pub trait BuildStorage: Sized { /// Build the storage out of this builder. - fn build_storage(&self) -> Result<(StorageOverlay, ChildrenStorageOverlay), String> { - let mut storage = (Default::default(), Default::default()); + fn build_storage(&self) -> Result { + let mut storage = Default::default(); self.assimilate_storage(&mut storage)?; Ok(storage) } /// Assimilate the storage for this module into pre-existing overlays. fn assimilate_storage( &self, - storage: &mut (StorageOverlay, ChildrenStorageOverlay), + storage: &mut primitives::storage::Storage, ) -> Result<(), String>; } @@ -139,23 +139,26 @@ pub trait BuildModuleGenesisStorage: Sized { /// Create the module genesis storage into the given `storage` and `child_storage`. fn build_module_genesis_storage( &self, - storage: &mut (StorageOverlay, ChildrenStorageOverlay), + storage: &mut primitives::storage::Storage, ) -> Result<(), String>; } #[cfg(feature = "std")] -impl BuildStorage for (StorageOverlay, ChildrenStorageOverlay) { +impl BuildStorage for primitives::storage::Storage { fn assimilate_storage( &self, - storage: &mut (StorageOverlay, ChildrenStorageOverlay), + storage: &mut primitives::storage::Storage, )-> Result<(), String> { - storage.0.extend(self.0.iter().map(|(k, v)| (k.clone(), v.clone()))); - for (k, other_map) in self.1.iter() { + storage.top.extend(self.top.iter().map(|(k, v)| (k.clone(), v.clone()))); + for (k, other_map) in self.children.iter() { let k = k.clone(); - if let Some(map) = storage.1.get_mut(&k) { - map.extend(other_map.iter().map(|(k, v)| (k.clone(), v.clone()))); + if let Some(map) = storage.children.get_mut(&k) { + map.data.extend(other_map.data.iter().map(|(k, v)| (k.clone(), v.clone()))); + if !map.child_info.try_update(other_map.child_info.as_ref()) { + return Err("Incompatible child info update".to_string()); + } } else { - storage.1.insert(k, other_map.clone()); + storage.children.insert(k, other_map.clone()); } } Ok(()) @@ -532,7 +535,7 @@ macro_rules! impl_outer_config { impl $crate::BuildStorage for $main { fn assimilate_storage( &self, - storage: &mut ($crate::StorageOverlay, $crate::ChildrenStorageOverlay), + storage: &mut $crate::Storage, ) -> std::result::Result<(), String> { $( if let Some(ref extra) = self.[< $snake $(_ $instance )? >] { diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 78e08df6b9f74..8a6ba2573172a 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -26,6 +26,7 @@ use trie::{ trie_types::{TrieDBMut, Layout}, }; use codec::{Encode, Codec}; +use primitives::storage::{ChildInfo, OwnedChildInfo, Storage}; /// A state backend is used to read state data and can have changes committed /// to it. @@ -50,11 +51,21 @@ pub trait Backend: std::fmt::Debug { } /// Get keyed child storage or None if there is nothing associated. - fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error>; + fn child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error>; /// Get child keyed storage value hash or None if there is nothing associated. - fn child_storage_hash(&self, storage_key: &[u8], key: &[u8]) -> Result, Self::Error> { - self.child_storage(storage_key, key).map(|v| v.map(|v| H::hash(&v))) + fn child_storage_hash( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result, Self::Error> { + self.child_storage(storage_key, child_info, key).map(|v| v.map(|v| H::hash(&v))) } /// true if a key exists in storage. @@ -63,8 +74,13 @@ pub trait Backend: std::fmt::Debug { } /// true if a key exists in child storage. - fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result { - Ok(self.child_storage(storage_key, key)?.is_some()) + fn exists_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result { + Ok(self.child_storage(storage_key, child_info, key)?.is_some()) } /// Return the next key in storage in lexicographic order or `None` if there is no value. @@ -74,11 +90,17 @@ pub trait Backend: std::fmt::Debug { fn next_child_storage_key( &self, storage_key: &[u8], + child_info: ChildInfo, key: &[u8] ) -> Result>, Self::Error>; /// Retrieve all entries keys of child storage and call `f` for each of those keys. - fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F); + fn for_keys_in_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + f: F, + ); /// Retrieve all entries keys which start with the given prefix and /// call `f` for each of those keys. @@ -93,7 +115,13 @@ pub trait Backend: std::fmt::Debug { /// Retrieve all child entries keys which start with the given prefix and /// call `f` for each of those keys. - fn for_child_keys_with_prefix(&self, storage_key: &[u8], prefix: &[u8], f: F); + fn for_child_keys_with_prefix( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + f: F, + ); /// Calculate the storage root, with given delta over what is already stored in /// the backend, and produce a "transaction" that can be used to commit. @@ -106,7 +134,12 @@ pub trait Backend: std::fmt::Debug { /// Calculate the child storage root, with given delta over what is already stored in /// the backend, and produce a "transaction" that can be used to commit. The second argument /// is true if child storage root equals default storage root. - fn child_storage_root(&self, storage_key: &[u8], delta: I) -> (H::Out, bool, Self::Transaction) + fn child_storage_root( + &self, + storage_key: &[u8], + child_info: ChildInfo, + delta: I, + ) -> (H::Out, bool, Self::Transaction) where I: IntoIterator, Option>)>, H::Out: Ord; @@ -122,9 +155,14 @@ pub trait Backend: std::fmt::Debug { } /// Get all keys of child storage with given prefix - fn child_keys(&self, child_storage_key: &[u8], prefix: &[u8]) -> Vec> { + fn child_keys( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + ) -> Vec> { let mut all = Vec::new(); - self.for_child_keys_with_prefix(child_storage_key, prefix, |k| all.push(k.to_vec())); + self.for_child_keys_with_prefix(storage_key, child_info, prefix, |k| all.push(k.to_vec())); all } @@ -144,15 +182,15 @@ pub trait Backend: std::fmt::Debug { where I1: IntoIterator, Option>)>, I2i: IntoIterator, Option>)>, - I2: IntoIterator, I2i)>, + I2: IntoIterator, I2i, OwnedChildInfo)>, H::Out: Ord + Encode, { let mut txs: Self::Transaction = Default::default(); let mut child_roots: Vec<_> = Default::default(); // child first - for (storage_key, child_delta) in child_deltas { + for (storage_key, child_delta, child_info) in child_deltas { let (child_root, empty, child_txs) = - self.child_storage_root(&storage_key[..], child_delta); + self.child_storage_root(&storage_key[..], child_info.as_ref(), child_delta); txs.consolidate(child_txs); if empty { child_roots.push((storage_key, None)); @@ -177,28 +215,49 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { (*self).storage(key) } - fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error> { - (*self).child_storage(storage_key, key) + fn child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + (*self).child_storage(storage_key, child_info, key) } - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - (*self).next_storage_key(key) + fn for_keys_in_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + f: F, + ) { + (*self).for_keys_in_child_storage(storage_key, child_info, f) } - fn next_child_storage_key(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error> { - (*self).next_child_storage_key(storage_key, key) + fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { + (*self).next_storage_key(key) } - fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F) { - (*self).for_keys_in_child_storage(storage_key, f) + fn next_child_storage_key( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + (*self).next_child_storage_key(storage_key, child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { (*self).for_keys_with_prefix(prefix, f) } - fn for_child_keys_with_prefix(&self, storage_key: &[u8], prefix: &[u8], f: F) { - (*self).for_child_keys_with_prefix(storage_key, prefix, f) + fn for_child_keys_with_prefix( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + f: F, + ) { + (*self).for_child_keys_with_prefix(storage_key, child_info, prefix, f) } fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) @@ -209,12 +268,17 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { (*self).storage_root(delta) } - fn child_storage_root(&self, storage_key: &[u8], delta: I) -> (H::Out, bool, Self::Transaction) + fn child_storage_root( + &self, + storage_key: &[u8], + child_info: ChildInfo, + delta: I, + ) -> (H::Out, bool, Self::Transaction) where I: IntoIterator, Option>)>, H::Out: Ord, { - (*self).child_storage_root(storage_key, delta) + (*self).child_storage_root(storage_key, child_info, delta) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -238,7 +302,10 @@ impl Consolidate for () { } } -impl Consolidate for Vec<(Option>, Vec, Option>)> { +impl Consolidate for Vec<( + Option<(Vec, OwnedChildInfo)>, + Vec<(Vec, Option>)>, + )> { fn consolidate(&mut self, mut other: Self) { self.append(&mut other); } @@ -268,7 +335,7 @@ impl error::Error for Void { /// In-memory backend. Fully recomputes tries each time `as_trie_backend` is called but useful for /// tests and proof checking. pub struct InMemory { - inner: HashMap>, BTreeMap, Vec>>, + inner: HashMap, OwnedChildInfo)>, BTreeMap, Vec>>, // This field is only needed for returning reference in `as_trie_backend`. trie: Option, H>>, _hasher: PhantomData, @@ -310,19 +377,21 @@ impl InMemory where H::Out: Codec { /// Copy the state, with applied updates pub fn update(&self, changes: >::Transaction) -> Self { let mut inner = self.inner.clone(); - for (storage_key, key, val) in changes { - match val { - Some(v) => { inner.entry(storage_key).or_default().insert(key, v); }, - None => { inner.entry(storage_key).or_default().remove(&key); }, + for (child_info, key_values) in changes { + let entry = inner.entry(child_info).or_default(); + for (key, val) in key_values { + match val { + Some(v) => { entry.insert(key, v); }, + None => { entry.remove(&key); }, + } } } - inner.into() } } -impl From>, BTreeMap, Vec>>> for InMemory { - fn from(inner: HashMap>, BTreeMap, Vec>>) -> Self { +impl From, OwnedChildInfo)>, BTreeMap, Vec>>> for InMemory { + fn from(inner: HashMap, OwnedChildInfo)>, BTreeMap, Vec>>) -> Self { InMemory { inner: inner, trie: None, @@ -331,17 +400,11 @@ impl From>, BTreeMap, Vec>>> for I } } -impl From<( - BTreeMap, Vec>, - HashMap, BTreeMap, Vec>>, -)> for InMemory { - fn from(inners: ( - BTreeMap, Vec>, - HashMap, BTreeMap, Vec>>, - )) -> Self { - let mut inner: HashMap>, BTreeMap, Vec>> - = inners.1.into_iter().map(|(k, v)| (Some(k), v)).collect(); - inner.insert(None, inners.0); +impl From for InMemory { + fn from(inners: Storage) -> Self { + let mut inner: HashMap, OwnedChildInfo)>, BTreeMap, Vec>> + = inners.children.into_iter().map(|(k, c)| (Some((k, c.child_info)), c.data)).collect(); + inner.insert(None, inners.top); InMemory { inner: inner, trie: None, @@ -362,12 +425,19 @@ impl From, Vec>> for InMemory { } } -impl From>, Vec, Option>)>> for InMemory { - fn from(inner: Vec<(Option>, Vec, Option>)>) -> Self { - let mut expanded: HashMap>, BTreeMap, Vec>> = HashMap::new(); - for (child_key, key, value) in inner { - if let Some(value) = value { - expanded.entry(child_key).or_default().insert(key, value); +impl From, OwnedChildInfo)>, Vec<(Vec, Option>)>)>> + for InMemory { + fn from( + inner: Vec<(Option<(Vec, OwnedChildInfo)>, Vec<(Vec, Option>)>)>, + ) -> Self { + let mut expanded: HashMap, OwnedChildInfo)>, BTreeMap, Vec>> + = HashMap::new(); + for (child_info, key_values) in inner { + let entry = expanded.entry(child_info).or_default(); + for (key, value) in key_values { + if let Some(value) = value { + entry.insert(key, value); + } } } expanded.into() @@ -376,22 +446,33 @@ impl From>, Vec, Option>)>> for InMem impl InMemory { /// child storage key iterator - pub fn child_storage_keys(&self) -> impl Iterator { - self.inner.iter().filter_map(|item| item.0.as_ref().map(|v|&v[..])) + pub fn child_storage_keys(&self) -> impl Iterator { + self.inner.iter().filter_map(|item| + item.0.as_ref().map(|v|(&v.0[..], v.1.as_ref())) + ) } } impl Backend for InMemory where H::Out: Codec { type Error = Void; - type Transaction = Vec<(Option>, Vec, Option>)>; + type Transaction = Vec<( + Option<(Vec, OwnedChildInfo)>, + Vec<(Vec, Option>)>, + )>; type TrieBackendStorage = MemoryDB; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { Ok(self.inner.get(&None).and_then(|map| map.get(key).map(Clone::clone))) } - fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error> { - Ok(self.inner.get(&Some(storage_key.to_vec())).and_then(|map| map.get(key).map(Clone::clone))) + fn child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + Ok(self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) + .and_then(|map| map.get(key).map(Clone::clone))) } fn exists_storage(&self, key: &[u8]) -> Result { @@ -406,9 +487,14 @@ impl Backend for InMemory where H::Out: Codec { Ok(next_key) } - fn next_child_storage_key(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error> { + fn next_child_storage_key( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); - let next_key = self.inner.get(&Some(storage_key.to_vec())) + let next_key = self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) .and_then(|map| map.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()); Ok(next_key) @@ -423,12 +509,24 @@ impl Backend for InMemory where H::Out: Codec { .for_each(|(k, v)| f(k, v))); } - fn for_keys_in_child_storage(&self, storage_key: &[u8], mut f: F) { - self.inner.get(&Some(storage_key.to_vec())).map(|map| map.keys().for_each(|k| f(&k))); + fn for_keys_in_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + mut f: F, + ) { + self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) + .map(|map| map.keys().for_each(|k| f(&k))); } - fn for_child_keys_with_prefix(&self, storage_key: &[u8], prefix: &[u8], f: F) { - self.inner.get(&Some(storage_key.to_vec())) + fn for_child_keys_with_prefix( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + f: F, + ) { + self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) .map(|map| map.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f)); } @@ -448,19 +546,26 @@ impl Backend for InMemory where H::Out: Codec { .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) ); - let full_transaction = transaction.into_iter().map(|(k, v)| (None, k, v)).collect(); + let full_transaction = transaction.into_iter().collect(); - (root, full_transaction) + (root, vec![(None, full_transaction)]) } - fn child_storage_root(&self, storage_key: &[u8], delta: I) -> (H::Out, bool, Self::Transaction) + fn child_storage_root( + &self, + storage_key: &[u8], + child_info: ChildInfo, + delta: I, + ) -> (H::Out, bool, Self::Transaction) where I: IntoIterator, Option>)>, H::Out: Ord { let storage_key = storage_key.to_vec(); + let child_info = Some((storage_key.clone(), child_info.to_owned())); - let existing_pairs = self.inner.get(&Some(storage_key.clone())) + + let existing_pairs = self.inner.get(&child_info) .into_iter() .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); @@ -473,11 +578,11 @@ impl Backend for InMemory where H::Out: Codec { .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) ); - let full_transaction = transaction.into_iter().map(|(k, v)| (Some(storage_key.clone()), k, v)).collect(); + let full_transaction = transaction.into_iter().collect(); let is_default = root == default_child_trie_root::>(&storage_key); - (root, is_default, full_transaction) + (root, is_default, vec![(child_info, full_transaction)]) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -494,8 +599,13 @@ impl Backend for InMemory where H::Out: Codec { .collect() } - fn child_keys(&self, storage_key: &[u8], prefix: &[u8]) -> Vec> { - self.inner.get(&Some(storage_key.to_vec())) + fn child_keys( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + ) -> Vec> { + self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) .into_iter() .flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned()) .collect() @@ -505,8 +615,10 @@ impl Backend for InMemory where H::Out: Codec { let mut mdb = MemoryDB::default(); let mut new_child_roots = Vec::new(); let mut root_map = None; - for (storage_key, map) in &self.inner { - if let Some(storage_key) = storage_key.as_ref() { + for (child_info, map) in &self.inner { + if let Some((storage_key, _child_info)) = child_info.as_ref() { + // no need to use child_info at this point because we use a MemoryDB for + // proof (with PrefixedMemoryDB it would be needed). let ch = insert_into_memory_db::(&mut mdb, map.clone().into_iter())?; new_child_roots.push((storage_key.clone(), ch.as_ref().into())); } else { @@ -556,11 +668,16 @@ mod tests { #[test] fn in_memory_with_child_trie_only() { let storage = InMemory::::default(); + let child_info = OwnedChildInfo::new_default(b"unique_id_1".to_vec()); let mut storage = storage.update( - vec![(Some(b"1".to_vec()), b"2".to_vec(), Some(b"3".to_vec()))] + vec![( + Some((b"1".to_vec(), child_info.clone())), + vec![(b"2".to_vec(), Some(b"3".to_vec()))] + )] ); let trie_backend = storage.as_trie_backend().unwrap(); - assert_eq!(trie_backend.child_storage(b"1", b"2").unwrap(), Some(b"3".to_vec())); + assert_eq!(trie_backend.child_storage(b"1", child_info.as_ref(), b"2").unwrap(), + Some(b"3".to_vec())); assert!(trie_backend.storage(b"1").unwrap().is_some()); } } diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index deae7f2852592..62503bdee1f6c 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -17,7 +17,7 @@ //! Basic implementation for Externalities. use std::{ - collections::{HashMap, BTreeMap}, any::{TypeId, Any}, iter::FromIterator, mem, ops::Bound + collections::BTreeMap, any::{TypeId, Any}, iter::FromIterator, ops::Bound }; use crate::backend::{Backend, InMemory}; use hash_db::Hasher; @@ -25,54 +25,47 @@ use trie::{TrieConfiguration, default_child_trie_root}; use trie::trie_types::Layout; use primitives::{ storage::{ - well_known_keys::is_child_storage_key, ChildStorageKey, StorageOverlay, - ChildrenStorageOverlay + well_known_keys::is_child_storage_key, ChildStorageKey, Storage, + ChildInfo, StorageChild, }, traits::Externalities, Blake2Hasher, }; use log::warn; use codec::Encode; -/// Simple HashMap-based Externalities impl. +/// Simple Map-based Externalities impl. #[derive(Debug)] pub struct BasicExternalities { - top: StorageOverlay, - children: ChildrenStorageOverlay, + inner: Storage, } impl BasicExternalities { /// Create a new instance of `BasicExternalities` - pub fn new(top: StorageOverlay, children: ChildrenStorageOverlay) -> Self { - BasicExternalities { - top, - children, - } + pub fn new(inner: Storage) -> Self { + BasicExternalities { inner } } /// Insert key/value pub fn insert(&mut self, k: Vec, v: Vec) -> Option> { - self.top.insert(k, v) + self.inner.top.insert(k, v) } /// Consume self and returns inner storages - pub fn into_storages(self) -> ( - BTreeMap, Vec>, - HashMap, BTreeMap, Vec>>, - ) { - (self.top, self.children) + pub fn into_storages(self) -> Storage { + self.inner } /// Execute the given closure `f` with the externalities set and initialized with `storage`. /// /// Returns the result of the closure and updates `storage` with all changes. pub fn execute_with_storage( - storage: &mut (StorageOverlay, ChildrenStorageOverlay), + storage: &mut primitives::storage::Storage, f: impl FnOnce() -> R, ) -> R { - let mut ext = Self { - top: mem::replace(&mut storage.0, BTreeMap::default()), - children: mem::replace(&mut storage.1, HashMap::default()), - }; + let mut ext = Self { inner: Storage { + top: std::mem::replace(&mut storage.top, Default::default()), + children: std::mem::replace(&mut storage.children, Default::default()), + }}; let r = ext.execute_with(f); @@ -91,34 +84,35 @@ impl BasicExternalities { impl PartialEq for BasicExternalities { fn eq(&self, other: &BasicExternalities) -> bool { - self.top.eq(&other.top) && self.children.eq(&other.children) + self.inner.top.eq(&other.inner.top) + && self.inner.children.eq(&other.inner.children) } } impl FromIterator<(Vec, Vec)> for BasicExternalities { fn from_iter, Vec)>>(iter: I) -> Self { let mut t = Self::default(); - t.top.extend(iter); + t.inner.top.extend(iter); t } } impl Default for BasicExternalities { - fn default() -> Self { Self::new(Default::default(), Default::default()) } + fn default() -> Self { Self::new(Default::default()) } } impl From, Vec>> for BasicExternalities { fn from(hashmap: BTreeMap, Vec>) -> Self { - BasicExternalities { + BasicExternalities { inner: Storage { top: hashmap, children: Default::default(), - } + }} } } impl Externalities for BasicExternalities { fn storage(&self, key: &[u8]) -> Option> { - self.top.get(key).cloned() + self.inner.top.get(key).cloned() } fn storage_hash(&self, key: &[u8]) -> Option> { @@ -133,35 +127,56 @@ impl Externalities for BasicExternalities { self.storage_hash(key) } - fn child_storage(&self, storage_key: ChildStorageKey, key: &[u8]) -> Option> { - self.children.get(storage_key.as_ref()).and_then(|child| child.get(key)).cloned() + fn child_storage( + &self, + storage_key: ChildStorageKey, + _child_info: ChildInfo, + key: &[u8], + ) -> Option> { + self.inner.children.get(storage_key.as_ref()).and_then(|child| child.data.get(key)).cloned() } - fn child_storage_hash(&self, storage_key: ChildStorageKey, key: &[u8]) -> Option> { - self.child_storage(storage_key, key).map(|v| Blake2Hasher::hash(&v).encode()) + fn child_storage_hash( + &self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) -> Option> { + self.child_storage(storage_key, child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) } fn original_child_storage_hash( &self, storage_key: ChildStorageKey, + child_info: ChildInfo, key: &[u8], ) -> Option> { - self.child_storage_hash(storage_key, key) + self.child_storage_hash(storage_key, child_info, key) } - fn original_child_storage(&self, storage_key: ChildStorageKey, key: &[u8]) -> Option> { - Externalities::child_storage(self, storage_key, key) + fn original_child_storage( + &self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) -> Option> { + Externalities::child_storage(self, storage_key, child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Option> { let range = (Bound::Excluded(key), Bound::Unbounded); - self.top.range::<[u8], _>(range).next().map(|(k, _)| k).cloned() + self.inner.top.range::<[u8], _>(range).next().map(|(k, _)| k).cloned() } - fn next_child_storage_key(&self, storage_key: ChildStorageKey, key: &[u8]) -> Option> { + fn next_child_storage_key( + &self, + storage_key: ChildStorageKey, + _child_info: ChildInfo, + key: &[u8], + ) -> Option> { let range = (Bound::Excluded(key), Bound::Unbounded); - self.children.get(storage_key.as_ref()) - .and_then(|child| child.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()) + self.inner.children.get(storage_key.as_ref()) + .and_then(|child| child.data.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()) } fn place_storage(&mut self, key: Vec, maybe_value: Option>) { @@ -171,27 +186,36 @@ impl Externalities for BasicExternalities { } match maybe_value { - Some(value) => { self.top.insert(key, value); } - None => { self.top.remove(&key); } + Some(value) => { self.inner.top.insert(key, value); } + None => { self.inner.top.remove(&key); } } } fn place_child_storage( &mut self, storage_key: ChildStorageKey, + child_info: ChildInfo, key: Vec, value: Option>, ) { - let child_map = self.children.entry(storage_key.into_owned()).or_default(); + let child_map = self.inner.children.entry(storage_key.into_owned()) + .or_insert_with(|| StorageChild { + data: Default::default(), + child_info: child_info.to_owned(), + }); if let Some(value) = value { - child_map.insert(key, value); + child_map.data.insert(key, value); } else { - child_map.remove(&key); + child_map.data.remove(&key); } } - fn kill_child_storage(&mut self, storage_key: ChildStorageKey) { - self.children.remove(storage_key.as_ref()); + fn kill_child_storage( + &mut self, + storage_key: ChildStorageKey, + _child_info: ChildInfo, + ) { + self.inner.children.remove(storage_key.as_ref()); } fn clear_prefix(&mut self, prefix: &[u8]) { @@ -203,27 +227,32 @@ impl Externalities for BasicExternalities { return; } - let to_remove = self.top.range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) + let to_remove = self.inner.top.range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) .map(|(k, _)| k) .take_while(|k| k.starts_with(prefix)) .cloned() .collect::>(); for key in to_remove { - self.top.remove(&key); + self.inner.top.remove(&key); } } - fn clear_child_prefix(&mut self, storage_key: ChildStorageKey, prefix: &[u8]) { - if let Some(child) = self.children.get_mut(storage_key.as_ref()) { - let to_remove = child.range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) + fn clear_child_prefix( + &mut self, + storage_key: ChildStorageKey, + _child_info: ChildInfo, + prefix: &[u8], + ) { + if let Some(child) = self.inner.children.get_mut(storage_key.as_ref()) { + let to_remove = child.data.range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) .map(|(k, _)| k) .take_while(|k| k.starts_with(prefix)) .cloned() .collect::>(); for key in to_remove { - child.remove(&key); + child.data.remove(&key); } } } @@ -231,8 +260,8 @@ impl Externalities for BasicExternalities { fn chain_id(&self) -> u64 { 42 } fn storage_root(&mut self) -> Vec { - let mut top = self.top.clone(); - let keys: Vec<_> = self.children.keys().map(|k| k.to_vec()).collect(); + let mut top = self.inner.top.clone(); + let keys: Vec<_> = self.inner.children.keys().map(|k| k.to_vec()).collect(); // Single child trie implementation currently allows using the same child // empty root for all child trie. Using null storage key until multiple // type of child trie support. @@ -243,20 +272,24 @@ impl Externalities for BasicExternalities { .expect("Map only feed by valid keys; qed"), ); if &empty_hash[..] == &child_root[..] { - top.remove(&storage_key); + top.remove(storage_key.as_slice()); } else { top.insert(storage_key, child_root); } } - Layout::::trie_root(self.top.clone()).as_ref().into() + Layout::::trie_root(self.inner.top.clone()).as_ref().into() } - fn child_storage_root(&mut self, storage_key: ChildStorageKey) -> Vec { - if let Some(child) = self.children.get(storage_key.as_ref()) { - let delta = child.clone().into_iter().map(|(k, v)| (k, Some(v))); + fn child_storage_root( + &mut self, + storage_key: ChildStorageKey, + ) -> Vec { + if let Some(child) = self.inner.children.get(storage_key.as_ref()) { + let delta = child.data.clone().into_iter().map(|(k, v)| (k, Some(v))); - InMemory::::default().child_storage_root(storage_key.as_ref(), delta).0 + InMemory::::default() + .child_storage_root(storage_key.as_ref(), child.child_info.as_ref(), delta).0 } else { default_child_trie_root::>(storage_key.as_ref()) }.encode() @@ -278,9 +311,12 @@ impl externalities::ExtensionStore for BasicExternalities { mod tests { use super::*; use primitives::map; + use primitives::storage::{Storage, StorageChild}; use primitives::storage::well_known_keys::CODE; use hex_literal::hex; + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + #[test] fn commit_should_work() { let mut ext = BasicExternalities::default(); @@ -306,37 +342,35 @@ mod tests { fn children_works() { let child_storage = b":child_storage:default:test".to_vec(); - let mut ext = BasicExternalities::new( - Default::default(), - map![ - child_storage.clone() => map![ - b"doe".to_vec() => b"reindeer".to_vec() - ] + let mut ext = BasicExternalities::new(Storage { + top: Default::default(), + children: map![ + child_storage.clone() => StorageChild { + data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], + child_info: CHILD_INFO_1.to_owned(), + } ] - ); + }); let child = || ChildStorageKey::from_vec(child_storage.clone()).unwrap(); - assert_eq!(ext.child_storage(child(), b"doe"), Some(b"reindeer".to_vec())); + assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"doe"), Some(b"reindeer".to_vec())); - ext.set_child_storage(child(), b"dog".to_vec(), b"puppy".to_vec()); - assert_eq!(ext.child_storage(child(), b"dog"), Some(b"puppy".to_vec())); + ext.set_child_storage(child(), CHILD_INFO_1, b"dog".to_vec(), b"puppy".to_vec()); + assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"dog"), Some(b"puppy".to_vec())); - ext.clear_child_storage(child(), b"dog"); - assert_eq!(ext.child_storage(child(), b"dog"), None); + ext.clear_child_storage(child(), CHILD_INFO_1, b"dog"); + assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"dog"), None); - ext.kill_child_storage(child()); - assert_eq!(ext.child_storage(child(), b"doe"), None); + ext.kill_child_storage(child(), CHILD_INFO_1); + assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"doe"), None); } #[test] fn basic_externalities_is_empty() { // Make sure no values are set by default in `BasicExternalities`. - let (storage, child_storage) = BasicExternalities::new( - Default::default(), - Default::default(), - ).into_storages(); - assert!(storage.is_empty()); - assert!(child_storage.is_empty()); + let storage = BasicExternalities::new(Default::default()).into_storages(); + assert!(storage.top.is_empty()); + assert!(storage.children.is_empty()); } } diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index 7e082ad83276b..4bb7b6c0aa3ae 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -133,10 +133,15 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( H: Hasher, Number: BlockNumber, { - let (committed, prospective) = if let Some(sk) = storage_key.as_ref() { - (changes.committed.children.get(sk), changes.prospective.children.get(sk)) + let (committed, prospective, child_info) = if let Some(sk) = storage_key.as_ref() { + let child_info = changes.child_info(sk).cloned(); + ( + changes.committed.children.get(sk).map(|c| &c.0), + changes.prospective.children.get(sk).map(|c| &c.0), + child_info, + ) } else { - (Some(&changes.committed.top), Some(&changes.prospective.top)) + (Some(&changes.committed.top), Some(&changes.prospective.top), None) }; committed.iter().flat_map(|c| c.iter()) .chain(prospective.iter().flat_map(|c| c.iter())) @@ -148,8 +153,11 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( // AND are not in storage at the beginning of operation if let Some(sk) = storage_key.as_ref() { if !changes.child_storage(sk, k).map(|v| v.is_some()).unwrap_or_default() { - if !backend.exists_child_storage(sk, k).map_err(|e| format!("{}", e))? { - return Ok(map); + if let Some(child_info) = child_info.as_ref() { + if !backend.exists_child_storage(sk, child_info.as_ref(), k) + .map_err(|e| format!("{}", e))? { + return Ok(map); + } } } } else { @@ -332,12 +340,16 @@ mod test { use codec::Encode; use primitives::Blake2Hasher; use primitives::storage::well_known_keys::{EXTRINSIC_INDEX}; + use primitives::storage::ChildInfo; use crate::backend::InMemory; use crate::changes_trie::{RootsStorage, Configuration, storage::InMemoryStorage}; use crate::changes_trie::build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}; use crate::overlayed_changes::{OverlayedValue, OverlayedChangeSet}; use super::*; + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); + fn prepare_for_build(zero: u64) -> ( InMemory, InMemoryStorage, @@ -416,18 +428,18 @@ mod test { }), ].into_iter().collect(), children: vec![ - (child_trie_key1.clone(), vec![ + (child_trie_key1.clone(), (vec![ (vec![100], OverlayedValue { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) - ].into_iter().collect()), - (child_trie_key2, vec![ + ].into_iter().collect(), CHILD_INFO_1.to_owned())), + (child_trie_key2, (vec![ (vec![100], OverlayedValue { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) - ].into_iter().collect()), + ].into_iter().collect(), CHILD_INFO_2.to_owned())), ].into_iter().collect() }, committed: OverlayedChangeSet { top: vec![ @@ -445,12 +457,12 @@ mod test { }), ].into_iter().collect(), children: vec![ - (child_trie_key1, vec![ + (child_trie_key1, (vec![ (vec![100], OverlayedValue { value: Some(vec![202]), extrinsics: Some(vec![3].into_iter().collect()) }) - ].into_iter().collect()), + ].into_iter().collect(), CHILD_INFO_1.to_owned())), ].into_iter().collect(), }, changes_trie_config: Some(config.clone()), diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 3ac8c19048451..9e7baea45eba3 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -25,7 +25,7 @@ use crate::{ use hash_db::Hasher; use primitives::{ - storage::{ChildStorageKey, well_known_keys::is_child_storage_key}, + storage::{ChildStorageKey, well_known_keys::is_child_storage_key, ChildInfo}, traits::Externalities, hexdisplay::HexDisplay, hash::H256, }; use trie::{trie_types::Layout, MemoryDB, default_child_trie_root}; @@ -229,13 +229,19 @@ where result.map(|r| r.encode()) } - fn child_storage(&self, storage_key: ChildStorageKey, key: &[u8]) -> Option> { + fn child_storage( + &self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) -> Option> { let _guard = panic_handler::AbortGuard::force_abort(); let result = self.overlay .child_storage(storage_key.as_ref(), key) .map(|x| x.map(|x| x.to_vec())) .unwrap_or_else(|| - self.backend.child_storage(storage_key.as_ref(), key).expect(EXT_NOT_ALLOWED_TO_FAIL) + self.backend.child_storage(storage_key.as_ref(), child_info, key) + .expect(EXT_NOT_ALLOWED_TO_FAIL) ); trace!(target: "state-trace", "{:04x}: GetChild({}) {}={:?}", @@ -248,7 +254,12 @@ where result } - fn child_storage_hash(&self, storage_key: ChildStorageKey, key: &[u8]) -> Option> { + fn child_storage_hash( + &self, + storage_key: ChildStorageKey, + _child_info: ChildInfo, + key: &[u8], + ) -> Option> { let _guard = panic_handler::AbortGuard::force_abort(); let result = self.overlay .child_storage(storage_key.as_ref(), key) @@ -267,10 +278,15 @@ where result.map(|r| r.encode()) } - fn original_child_storage(&self, storage_key: ChildStorageKey, key: &[u8]) -> Option> { + fn original_child_storage( + &self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) -> Option> { let _guard = panic_handler::AbortGuard::force_abort(); let result = self.backend - .child_storage(storage_key.as_ref(), key) + .child_storage(storage_key.as_ref(), child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL); trace!(target: "state-trace", "{:04x}: ChildOriginal({}) {}={:?}", @@ -285,11 +301,12 @@ where fn original_child_storage_hash( &self, storage_key: ChildStorageKey, + child_info: ChildInfo, key: &[u8], ) -> Option> { let _guard = panic_handler::AbortGuard::force_abort(); let result = self.backend - .child_storage_hash(storage_key.as_ref(), key) + .child_storage_hash(storage_key.as_ref(), child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL); trace!(target: "state-trace", "{}: ChildHashOriginal({}) {}={:?}", @@ -317,13 +334,18 @@ where } - fn exists_child_storage(&self, storage_key: ChildStorageKey, key: &[u8]) -> bool { + fn exists_child_storage( + &self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) -> bool { let _guard = panic_handler::AbortGuard::force_abort(); let result = match self.overlay.child_storage(storage_key.as_ref(), key) { Some(x) => x.is_some(), _ => self.backend - .exists_child_storage(storage_key.as_ref(), key) + .exists_child_storage(storage_key.as_ref(), child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL), }; @@ -351,8 +373,14 @@ where } } - fn next_child_storage_key(&self, storage_key: ChildStorageKey, key: &[u8]) -> Option> { - let next_backend_key = self.backend.next_child_storage_key(storage_key.as_ref(), key) + fn next_child_storage_key( + &self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) -> Option> { + let next_backend_key = self.backend + .next_child_storage_key(storage_key.as_ref(), child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL); let next_overlay_key_change = self.overlay.next_child_storage_key_change( storage_key.as_ref(), @@ -365,7 +393,11 @@ where (_, Some(overlay_key)) => if overlay_key.1.value.is_some() { Some(overlay_key.0.to_vec()) } else { - self.next_child_storage_key(storage_key, &overlay_key.0[..]) + self.next_child_storage_key( + storage_key, + child_info, + &overlay_key.0[..], + ) }, } } @@ -389,6 +421,7 @@ where fn place_child_storage( &mut self, storage_key: ChildStorageKey, + child_info: ChildInfo, key: Vec, value: Option>, ) { @@ -401,10 +434,14 @@ where let _guard = panic_handler::AbortGuard::force_abort(); self.mark_dirty(); - self.overlay.set_child_storage(storage_key.into_owned(), key, value); + self.overlay.set_child_storage(storage_key.into_owned(), child_info, key, value); } - fn kill_child_storage(&mut self, storage_key: ChildStorageKey) { + fn kill_child_storage( + &mut self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + ) { trace!(target: "state-trace", "{:04x}: KillChild({})", self.id, HexDisplay::from(&storage_key.as_ref()), @@ -412,9 +449,9 @@ where let _guard = panic_handler::AbortGuard::force_abort(); self.mark_dirty(); - self.overlay.clear_child_storage(storage_key.as_ref()); - self.backend.for_keys_in_child_storage(storage_key.as_ref(), |key| { - self.overlay.set_child_storage(storage_key.as_ref().to_vec(), key.to_vec(), None); + self.overlay.clear_child_storage(storage_key.as_ref(), child_info); + self.backend.for_keys_in_child_storage(storage_key.as_ref(), child_info, |key| { + self.overlay.set_child_storage(storage_key.as_ref().to_vec(), child_info, key.to_vec(), None); }); } @@ -436,7 +473,12 @@ where }); } - fn clear_child_prefix(&mut self, storage_key: ChildStorageKey, prefix: &[u8]) { + fn clear_child_prefix( + &mut self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + prefix: &[u8], + ) { trace!(target: "state-trace", "{:04x}: ClearChildPrefix({}) {}", self.id, HexDisplay::from(&storage_key.as_ref()), @@ -445,9 +487,9 @@ where let _guard = panic_handler::AbortGuard::force_abort(); self.mark_dirty(); - self.overlay.clear_child_prefix(storage_key.as_ref(), prefix); - self.backend.for_child_keys_with_prefix(storage_key.as_ref(), prefix, |key| { - self.overlay.set_child_storage(storage_key.as_ref().to_vec(), key.to_vec(), None); + self.overlay.clear_child_prefix(storage_key.as_ref(), child_info, prefix); + self.backend.for_child_keys_with_prefix(storage_key.as_ref(), child_info, prefix, |key| { + self.overlay.set_child_storage(storage_key.as_ref().to_vec(), child_info, key.to_vec(), None); }); } @@ -465,16 +507,23 @@ where return root.encode(); } - let child_storage_keys = - self.overlay.prospective.children.keys() + let child_storage_keys = self.overlay.prospective.children.keys() .chain(self.overlay.committed.children.keys()); let child_delta_iter = child_storage_keys.map(|storage_key| - (storage_key.clone(), self.overlay.committed.children.get(storage_key) - .into_iter() - .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) - .chain(self.overlay.prospective.children.get(storage_key) + ( + storage_key.clone(), + self.overlay.committed.children.get(storage_key) .into_iter() - .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), v.value.clone())))))); + .flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) + .chain( + self.overlay.prospective.children.get(storage_key) + .into_iter() + .flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) + ), + self.overlay.child_info(storage_key).cloned() + .expect("child info initialized in either committed or prospective"), + ) + ); // compute and memoize @@ -490,7 +539,10 @@ where root.encode() } - fn child_storage_root(&mut self, storage_key: ChildStorageKey) -> Vec { + fn child_storage_root( + &mut self, + storage_key: ChildStorageKey, + ) -> Vec { let _guard = panic_handler::AbortGuard::force_abort(); if self.storage_transaction.is_some() { let root = self @@ -508,29 +560,53 @@ where } else { let storage_key = storage_key.as_ref(); - let (root, is_empty, _) = { - let delta = self.overlay.committed.children.get(storage_key) - .into_iter() - .flat_map(|map| map.clone().into_iter().map(|(k, v)| (k, v.value))) - .chain(self.overlay.prospective.children.get(storage_key) - .into_iter() - .flat_map(|map| map.clone().into_iter().map(|(k, v)| (k, v.value)))); - - self.backend.child_storage_root(storage_key, delta) - }; - - if is_empty { - self.overlay.set_storage(storage_key.into(), None); + if let Some(child_info) = self.overlay.child_info(storage_key).cloned() { + let (root, is_empty, _) = { + let delta = self.overlay.committed.children.get(storage_key) + .into_iter() + .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) + .chain( + self.overlay.prospective.children.get(storage_key) + .into_iter() + .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) + ); + + self.backend.child_storage_root(storage_key, child_info.as_ref(), delta) + }; + + let root = root.encode(); + // We store update in the overlay in order to be able to use 'self.storage_transaction' + // cache. This is brittle as it rely on Ext only querying the trie backend for + // storage root. + // A better design would be to manage 'child_storage_transaction' in a + // similar way as 'storage_transaction' but for each child trie. + if is_empty { + self.overlay.set_storage(storage_key.into(), None); + } else { + self.overlay.set_storage(storage_key.into(), Some(root.clone())); + } + + trace!(target: "state-trace", "{:04x}: ChildRoot({}) {}", + self.id, + HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&root.as_ref()), + ); + root } else { - self.overlay.set_storage(storage_key.into(), Some(root.encode())); + // empty overlay + let root = self + .storage(storage_key.as_ref()) + .and_then(|k| Decode::decode(&mut &k[..]).ok()) + .unwrap_or( + default_child_trie_root::>(storage_key.as_ref()) + ); + trace!(target: "state-trace", "{:04x}: ChildRoot({}) (no change) {}", + self.id, + HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&root.as_ref()), + ); + root.encode() } - - trace!(target: "state-trace", "{:04x}: ChildRoot({}) {}", - self.id, - HexDisplay::from(&storage_key.as_ref()), - HexDisplay::from(&root.as_ref()), - ); - root.encode() } } @@ -579,13 +655,14 @@ mod tests { use super::*; use hex_literal::hex; use codec::Encode; - use primitives::{Blake2Hasher, storage::well_known_keys::EXTRINSIC_INDEX}; + use primitives::{Blake2Hasher, storage::well_known_keys::EXTRINSIC_INDEX, map}; use crate::{ changes_trie::{ Configuration as ChangesTrieConfiguration, InMemoryStorage as InMemoryChangesTrieStorage, }, backend::InMemory, overlayed_changes::OverlayedValue, }; + use primitives::storage::{Storage, StorageChild}; type TestBackend = InMemory; type TestChangesTrieStorage = InMemoryChangesTrieStorage; @@ -659,11 +736,14 @@ mod tests { let mut overlay = OverlayedChanges::default(); overlay.set_storage(vec![20], None); overlay.set_storage(vec![30], Some(vec![31])); - let backend = vec![ - (None, vec![10], Some(vec![10])), - (None, vec![20], Some(vec![20])), - (None, vec![40], Some(vec![40])), - ].into(); + let backend = Storage { + top: map![ + vec![10] => vec![10], + vec![20] => vec![20], + vec![40] => vec![40] + ], + children: map![] + }.into(); let ext = TestExt::new(&mut overlay, &backend, None, None); @@ -689,35 +769,50 @@ mod tests { #[test] fn next_child_storage_key_works() { - let child = || ChildStorageKey::from_slice(b":child_storage:default:Child1").unwrap(); + const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1"; + + const CHILD_UUID_1: &[u8] = b"unique_id_1"; + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); + + + let child = || ChildStorageKey::from_slice(CHILD_KEY_1).unwrap(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child().as_ref().to_vec(), vec![20], None); - overlay.set_child_storage(child().as_ref().to_vec(), vec![30], Some(vec![31])); - let backend = vec![ - (Some(child().as_ref().to_vec()), vec![10], Some(vec![10])), - (Some(child().as_ref().to_vec()), vec![20], Some(vec![20])), - (Some(child().as_ref().to_vec()), vec![40], Some(vec![40])), - ].into(); + overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![20], None); + overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![30], Some(vec![31])); + let backend = Storage { + top: map![], + children: map![ + child().as_ref().to_vec() => StorageChild { + data: map![ + vec![10] => vec![10], + vec![20] => vec![20], + vec![40] => vec![40] + ], + child_info: CHILD_INFO_1.to_owned(), + } + ], + }.into(); + let ext = TestExt::new(&mut overlay, &backend, None, None); // next_backend < next_overlay - assert_eq!(ext.next_child_storage_key(child(), &[5]), Some(vec![10])); + assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[5]), Some(vec![10])); // next_backend == next_overlay but next_overlay is a delete - assert_eq!(ext.next_child_storage_key(child(), &[10]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[10]), Some(vec![30])); // next_overlay < next_backend - assert_eq!(ext.next_child_storage_key(child(), &[20]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[20]), Some(vec![30])); // next_backend exist but next_overlay doesn't exist - assert_eq!(ext.next_child_storage_key(child(), &[30]), Some(vec![40])); + assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[30]), Some(vec![40])); drop(ext); - overlay.set_child_storage(child().as_ref().to_vec(), vec![50], Some(vec![50])); + overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![50], Some(vec![50])); let ext = TestExt::new(&mut overlay, &backend, None, None); // next_overlay exist but next_backend doesn't exist - assert_eq!(ext.next_child_storage_key(child(), &[40]), Some(vec![50])); + assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[40]), Some(vec![50])); } } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 594e539b25fe9..e8043829e7881 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -23,7 +23,7 @@ use log::{warn, trace}; use hash_db::Hasher; use codec::{Decode, Encode, Codec}; use primitives::{ - storage::well_known_keys, NativeOrEncoded, NeverNativeValue, + storage::{well_known_keys, ChildInfo}, NativeOrEncoded, NeverNativeValue, traits::CodeExecutor, hexdisplay::HexDisplay, hash::H256, }; use overlayed_changes::OverlayedChangeSet; @@ -562,6 +562,7 @@ where pub fn prove_child_read( mut backend: B, storage_key: &[u8], + child_info: ChildInfo, keys: I, ) -> Result> where @@ -573,7 +574,7 @@ where { let trie_backend = backend.as_trie_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_child_read_on_trie_backend(trie_backend, storage_key, keys) + prove_child_read_on_trie_backend(trie_backend, storage_key, child_info, keys) } /// Generate storage read proof on pre-created trie backend. @@ -601,6 +602,7 @@ where pub fn prove_child_read_on_trie_backend( trie_backend: &TrieBackend, storage_key: &[u8], + child_info: ChildInfo, keys: I, ) -> Result> where @@ -613,7 +615,7 @@ where let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); for key in keys.into_iter() { proving_backend - .child_storage(storage_key, key.as_ref()) + .child_storage(storage_key, child_info.clone(), key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } Ok(proving_backend.extract_proof()) @@ -688,7 +690,9 @@ where H: Hasher, H::Out: Ord + Codec, { - proving_backend.child_storage(storage_key, key).map_err(|e| Box::new(e) as Box) + // Not a prefixed memory db, using empty unique id and include root resolution. + proving_backend.child_storage(storage_key, ChildInfo::new_default(&[]), key) + .map_err(|e| Box::new(e) as Box) } /// Sets overlayed changes' changes trie configuration. Returns error if configuration @@ -750,6 +754,8 @@ mod tests { fallback_succeeds: bool, } + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + impl CodeExecutor for DummyCodeExecutor { type Error = u8; @@ -982,22 +988,26 @@ mod tests { ext.set_child_storage( ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), + CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec() ); assert_eq!( ext.child_storage( ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), + CHILD_INFO_1, b"abc" ), Some(b"def".to_vec()) ); ext.kill_child_storage( - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap() + ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), + CHILD_INFO_1, ); assert_eq!( ext.child_storage( ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), + CHILD_INFO_1, b"abc" ), None @@ -1033,6 +1043,7 @@ mod tests { let remote_proof = prove_child_read( remote_backend, b":child_storage:default:sub1", + CHILD_INFO_1, &[b"value3"], ).unwrap(); let local_result1 = read_child_proof_check::( @@ -1081,6 +1092,40 @@ mod tests { assert!(state_machine.execute(ExecutionStrategy::NativeWhenPossible).is_err()); } + #[test] + fn child_storage_uuid() { + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); + use crate::trie_backend::tests::test_trie; + let mut overlay = OverlayedChanges::default(); + + let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub_test1").unwrap(); + let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub_test2").unwrap(); + let mut transaction = { + let backend = test_trie(); + let changes_trie_storage = InMemoryChangesTrieStorage::::new(); + let mut ext = Ext::new( + &mut overlay, + &backend, + Some(&changes_trie_storage), + None, + ); + ext.set_child_storage(subtrie1, CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec()); + ext.set_child_storage(subtrie2, CHILD_INFO_2, b"abc".to_vec(), b"def".to_vec()); + ext.storage_root(); + (ext.transaction().0).0 + }; + let mut duplicate = false; + for (k, (value, rc)) in transaction.drain().iter() { + // look for a key inserted twice: transaction rc is 2 + if *rc == 2 { + duplicate = true; + println!("test duplicate for {:?} {:?}", k, value); + } + } + assert!(!duplicate); + } + #[test] fn cannot_change_changes_trie_config_with_native_else_wasm() { let backend = trie_backend::tests::test_trie(); diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index d61d14961da36..0714120ae3a00 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -21,7 +21,7 @@ use std::iter::FromIterator; use std::collections::{HashMap, BTreeMap, BTreeSet}; use codec::Decode; use crate::changes_trie::{NO_EXTRINSIC_INDEX, Configuration as ChangesTrieConfig}; -use primitives::storage::well_known_keys::EXTRINSIC_INDEX; +use primitives::storage::{well_known_keys::EXTRINSIC_INDEX, OwnedChildInfo, ChildInfo}; use std::{mem, ops}; /// The overlayed changes to state to be queried on top of the backend. @@ -57,7 +57,7 @@ pub struct OverlayedChangeSet { /// Top level storage changes. pub top: BTreeMap, OverlayedValue>, /// Child storage changes. - pub children: HashMap, BTreeMap, OverlayedValue>>, + pub children: HashMap, (BTreeMap, OverlayedValue>, OwnedChildInfo)>, } #[cfg(test)] @@ -119,13 +119,13 @@ impl OverlayedChanges { /// value has been set. pub fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option> { if let Some(map) = self.prospective.children.get(storage_key) { - if let Some(val) = map.get(key) { + if let Some(val) = map.0.get(key) { return Some(val.value.as_ref().map(AsRef::as_ref)); } } if let Some(map) = self.committed.children.get(storage_key) { - if let Some(val) = map.get(key) { + if let Some(val) = map.0.get(key) { return Some(val.value.as_ref().map(AsRef::as_ref)); } } @@ -150,10 +150,20 @@ impl OverlayedChanges { /// Inserts the given key-value pair into the prospective child change set. /// /// `None` can be used to delete a value specified by the given key. - pub(crate) fn set_child_storage(&mut self, storage_key: Vec, key: Vec, val: Option>) { + pub(crate) fn set_child_storage( + &mut self, + storage_key: Vec, + child_info: ChildInfo, + key: Vec, + val: Option>, + ) { let extrinsic_index = self.extrinsic_index(); - let map_entry = self.prospective.children.entry(storage_key).or_default(); - let entry = map_entry.entry(key).or_default(); + let map_entry = self.prospective.children.entry(storage_key) + .or_insert_with(|| (Default::default(), child_info.to_owned())); + let updatable = map_entry.1.try_update(child_info); + debug_assert!(updatable); + + let entry = map_entry.0.entry(key).or_default(); entry.value = val; if let Some(extrinsic) = extrinsic_index { @@ -168,11 +178,18 @@ impl OverlayedChanges { /// change set, and still can be reverted by [`discard_prospective`]. /// /// [`discard_prospective`]: #method.discard_prospective - pub(crate) fn clear_child_storage(&mut self, storage_key: &[u8]) { + pub(crate) fn clear_child_storage( + &mut self, + storage_key: &[u8], + child_info: ChildInfo, + ) { let extrinsic_index = self.extrinsic_index(); - let map_entry = self.prospective.children.entry(storage_key.to_vec()).or_default(); + let map_entry = self.prospective.children.entry(storage_key.to_vec()) + .or_insert_with(|| (Default::default(), child_info.to_owned())); + let updatable = map_entry.1.try_update(child_info); + debug_assert!(updatable); - map_entry.values_mut().for_each(|e| { + map_entry.0.values_mut().for_each(|e| { if let Some(extrinsic) = extrinsic_index { e.extrinsics.get_or_insert_with(Default::default) .insert(extrinsic); @@ -181,10 +198,10 @@ impl OverlayedChanges { e.value = None; }); - if let Some(committed_map) = self.committed.children.get(storage_key) { + if let Some((committed_map, _child_info)) = self.committed.children.get(storage_key) { for (key, value) in committed_map.iter() { - if !map_entry.contains_key(key) { - map_entry.insert(key.clone(), OverlayedValue { + if !map_entry.0.contains_key(key) { + map_entry.0.insert(key.clone(), OverlayedValue { value: None, extrinsics: extrinsic_index.map(|i| { let mut e = value.extrinsics.clone() @@ -235,11 +252,19 @@ impl OverlayedChanges { } } - pub(crate) fn clear_child_prefix(&mut self, storage_key: &[u8], prefix: &[u8]) { + pub(crate) fn clear_child_prefix( + &mut self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + ) { let extrinsic_index = self.extrinsic_index(); - let map_entry = self.prospective.children.entry(storage_key.to_vec()).or_default(); + let map_entry = self.prospective.children.entry(storage_key.to_vec()) + .or_insert_with(|| (Default::default(), child_info.to_owned())); + let updatable = map_entry.1.try_update(child_info); + debug_assert!(updatable); - for (key, entry) in map_entry.iter_mut() { + for (key, entry) in map_entry.0.iter_mut() { if key.starts_with(prefix) { entry.value = None; @@ -250,12 +275,12 @@ impl OverlayedChanges { } } - if let Some(child_committed) = self.committed.children.get(storage_key) { + if let Some((child_committed, _child_info)) = self.committed.children.get(storage_key) { // Then do the same with keys from commited changes. // NOTE that we are making changes in the prospective change set. for key in child_committed.keys() { if key.starts_with(prefix) { - let entry = map_entry.entry(key.clone()).or_default(); + let entry = map_entry.0.entry(key.clone()).or_default(); entry.value = None; if let Some(extrinsic) = extrinsic_index { @@ -287,10 +312,12 @@ impl OverlayedChanges { .extend(prospective_extrinsics); } } - for (storage_key, map) in self.prospective.children.drain() { - let map_dest = self.committed.children.entry(storage_key).or_default(); + for (storage_key, (map, child_info)) in self.prospective.children.drain() { + let child_content = self.committed.children.entry(storage_key) + .or_insert_with(|| (Default::default(), child_info)); + // No update to child info at this point (will be needed for deletion). for (key, val) in map.into_iter() { - let entry = map_dest.entry(key).or_default(); + let entry = child_content.0.entry(key).or_default(); entry.value = val.value; if let Some(prospective_extrinsics) = val.extrinsics { @@ -308,12 +335,12 @@ impl OverlayedChanges { /// Will panic if there are any uncommitted prospective changes. pub fn into_committed(self) -> ( impl Iterator, Option>)>, - impl Iterator, impl Iterator, Option>)>)>, + impl Iterator, (impl Iterator, Option>)>, OwnedChildInfo))>, ){ assert!(self.prospective.is_empty()); (self.committed.top.into_iter().map(|(k, v)| (k, v.value)), self.committed.children.into_iter() - .map(|(sk, v)| (sk, v.into_iter().map(|(k, v)| (k, v.value))))) + .map(|(sk, (v, ci))| (sk, (v.into_iter().map(|(k, v)| (k, v.value)), ci)))) } /// Inserts storage entry responsible for current extrinsic index. @@ -342,6 +369,18 @@ impl OverlayedChanges { } } + /// Get child info for a storage key. + /// Take the latest value so prospective first. + pub fn child_info(&self, storage_key: &[u8]) -> Option<&OwnedChildInfo> { + if let Some((_, ci)) = self.prospective.children.get(storage_key) { + return Some(&ci); + } + if let Some((_, ci)) = self.committed.children.get(storage_key) { + return Some(&ci); + } + None + } + /// Returns the next (in lexicographic order) storage key in the overlayed alongside its value. /// If no value is next then `None` is returned. pub fn next_storage_key_change(&self, key: &[u8]) -> Option<(&[u8], &OverlayedValue)> { @@ -377,10 +416,10 @@ impl OverlayedChanges { let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); let next_prospective_key = self.prospective.children.get(storage_key) - .and_then(|map| map.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v))); + .and_then(|(map, _)| map.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v))); let next_committed_key = self.committed.children.get(storage_key) - .and_then(|map| map.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v))); + .and_then(|(map, _)| map.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v))); match (next_committed_key, next_prospective_key) { // Committed is strictly less than prospective @@ -636,13 +675,14 @@ mod tests { #[test] fn next_child_storage_key_change_works() { let child = b"Child1".to_vec(); + let child_info = ChildInfo::new_default(b"uniqueid"); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child.clone(), vec![20], Some(vec![20])); - overlay.set_child_storage(child.clone(), vec![30], Some(vec![30])); - overlay.set_child_storage(child.clone(), vec![40], Some(vec![40])); + overlay.set_child_storage(child.clone(), child_info, vec![20], Some(vec![20])); + overlay.set_child_storage(child.clone(), child_info, vec![30], Some(vec![30])); + overlay.set_child_storage(child.clone(), child_info, vec![40], Some(vec![40])); overlay.commit_prospective(); - overlay.set_child_storage(child.clone(), vec![10], Some(vec![10])); - overlay.set_child_storage(child.clone(), vec![30], None); + overlay.set_child_storage(child.clone(), child_info, vec![10], Some(vec![10])); + overlay.set_child_storage(child.clone(), child_info, vec![30], None); // next_prospective < next_committed let next_to_5 = overlay.next_child_storage_key_change(&child, &[5]).unwrap(); @@ -664,7 +704,7 @@ mod tests { assert_eq!(next_to_30.0.to_vec(), vec![40]); assert_eq!(next_to_30.1.value, Some(vec![40])); - overlay.set_child_storage(child.clone(), vec![50], Some(vec![50])); + overlay.set_child_storage(child.clone(), child_info, vec![50], Some(vec![50])); // next_prospective, no next_committed let next_to_40 = overlay.next_child_storage_key_change(&child, &[40]).unwrap(); assert_eq!(next_to_40.0.to_vec(), vec![50]); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 2a9146dfced4a..3809ac8446405 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -32,6 +32,7 @@ use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStor use crate::{Error, ExecutionError, Backend}; use std::collections::{HashMap, HashSet}; use crate::DBValue; +use primitives::storage::ChildInfo; /// Patricia trie-based backend specialized in get value proofs. pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { @@ -143,6 +144,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> pub fn child_storage( &mut self, storage_key: &[u8], + child_info: ChildInfo, key: &[u8] ) -> Result>, String> { let root = self.storage(storage_key)? @@ -159,6 +161,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> read_child_trie_value_with::, _, _>( storage_key, + child_info.keyspace(), &eph, &root.as_ref(), key, @@ -268,20 +271,35 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> self.0.storage(key) } - fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error> { - self.0.child_storage(storage_key, key) + fn child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.0.child_storage(storage_key, child_info, key) } - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - self.0.next_storage_key(key) + fn for_keys_in_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + f: F, + ) { + self.0.for_keys_in_child_storage(storage_key, child_info, f) } - fn next_child_storage_key(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error> { - self.0.next_child_storage_key(storage_key, key) + fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { + self.0.next_storage_key(key) } - fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F) { - self.0.for_keys_in_child_storage(storage_key, f) + fn next_child_storage_key( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.0.next_child_storage_key(storage_key, child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -292,8 +310,14 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> self.0.for_key_values_with_prefix(prefix, f) } - fn for_child_keys_with_prefix(&self, storage_key: &[u8], prefix: &[u8], f: F) { - self.0.for_child_keys_with_prefix(storage_key, prefix, f) + fn for_child_keys_with_prefix( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + f: F, + ) { + self.0.for_child_keys_with_prefix(storage_key, child_info, prefix, f) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -304,8 +328,13 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> self.0.keys(prefix) } - fn child_keys(&self, child_storage_key: &[u8], prefix: &[u8]) -> Vec> { - self.0.child_keys(child_storage_key, prefix) + fn child_keys( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + ) -> Vec> { + self.0.child_keys(storage_key, child_info, prefix) } fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) @@ -314,12 +343,17 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> self.0.storage_root(delta) } - fn child_storage_root(&self, storage_key: &[u8], delta: I) -> (H::Out, bool, Self::Transaction) + fn child_storage_root( + &self, + storage_key: &[u8], + child_info: ChildInfo, + delta: I, + ) -> (H::Out, bool, Self::Transaction) where I: IntoIterator, Option>)>, H::Out: Ord { - self.0.child_storage_root(storage_key, delta) + self.0.child_storage_root(storage_key, child_info, delta) } } @@ -363,6 +397,9 @@ mod tests { use primitives::{Blake2Hasher, storage::ChildStorageKey}; use crate::proving_backend::create_proof_check_backend; + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); + fn test_proving<'a>( trie_backend: &'a TrieBackend,Blake2Hasher>, ) -> ProvingBackend<'a, PrefixedMemoryDB, Blake2Hasher> { @@ -408,9 +445,9 @@ mod tests { #[test] fn proof_recorded_and_checked() { - let contents = (0..64).map(|i| (None, vec![i], Some(vec![i]))).collect::>(); + let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); let in_memory = InMemory::::default(); - let mut in_memory = in_memory.update(contents); + let mut in_memory = in_memory.update(vec![(None, contents)]); let in_memory_root = in_memory.storage_root(::std::iter::empty()).0; (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); @@ -434,26 +471,29 @@ mod tests { let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub2").unwrap(); let own1 = subtrie1.into_owned(); let own2 = subtrie2.into_owned(); - let contents = (0..64).map(|i| (None, vec![i], Some(vec![i]))) - .chain((28..65).map(|i| (Some(own1.clone()), vec![i], Some(vec![i])))) - .chain((10..15).map(|i| (Some(own2.clone()), vec![i], Some(vec![i])))) - .collect::>(); + let contents = vec![ + (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), + (Some((own1.clone(), CHILD_INFO_1.to_owned())), + (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), + (Some((own2.clone(), CHILD_INFO_2.to_owned())), + (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), + ]; let in_memory = InMemory::::default(); let mut in_memory = in_memory.update(contents); let in_memory_root = in_memory.full_storage_root::<_, Vec<_>, _>( ::std::iter::empty(), - in_memory.child_storage_keys().map(|k|(k.to_vec(), Vec::new())) + in_memory.child_storage_keys().map(|k|(k.0.to_vec(), Vec::new(), k.1.to_owned())) ).0; (0..64).for_each(|i| assert_eq!( in_memory.storage(&[i]).unwrap().unwrap(), vec![i] )); (28..65).for_each(|i| assert_eq!( - in_memory.child_storage(&own1[..], &[i]).unwrap().unwrap(), + in_memory.child_storage(&own1[..], CHILD_INFO_1, &[i]).unwrap().unwrap(), vec![i] )); (10..15).for_each(|i| assert_eq!( - in_memory.child_storage(&own2[..], &[i]).unwrap().unwrap(), + in_memory.child_storage(&own2[..], CHILD_INFO_2, &[i]).unwrap().unwrap(), vec![i] )); @@ -481,7 +521,7 @@ mod tests { assert_eq!(proof_check.storage(&[64]).unwrap(), None); let proving = ProvingBackend::new(trie); - assert_eq!(proving.child_storage(&own1[..], &[64]), Ok(Some(vec![64]))); + assert_eq!(proving.child_storage(&own1[..], CHILD_INFO_1, &[64]), Ok(Some(vec![64]))); let proof = proving.extract_proof(); let proof_check = create_proof_check_backend::( @@ -489,7 +529,7 @@ mod tests { proof ).unwrap(); assert_eq!( - proof_check.child_storage(&own1[..], &[64]).unwrap().unwrap(), + proof_check.child_storage(&own1[..], CHILD_INFO_1, &[64]).unwrap().unwrap(), vec![64] ); } diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 85c9260fba3db..a37cd8caef0fb 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -16,7 +16,7 @@ //! Test implementation for Externalities. -use std::{collections::{HashMap, BTreeMap}, any::{Any, TypeId}}; +use std::any::{Any, TypeId}; use hash_db::Hasher; use crate::{ backend::{InMemory, Backend}, OverlayedChanges, @@ -28,15 +28,14 @@ use crate::{ }; use primitives::{ storage::{ - well_known_keys::{CHANGES_TRIE_CONFIG, CODE, HEAP_PAGES, is_child_storage_key} + well_known_keys::{CHANGES_TRIE_CONFIG, CODE, HEAP_PAGES, is_child_storage_key}, + Storage, }, hash::H256, Blake2Hasher, }; use codec::Encode; use externalities::{Extensions, Extension}; -type StorageTuple = (BTreeMap, Vec>, HashMap, BTreeMap, Vec>>); - /// Simple HashMap-based Externalities impl. pub struct TestExternalities=Blake2Hasher, N: ChangesTrieBlockNumber=u64> { overlay: OverlayedChanges, @@ -57,42 +56,37 @@ impl, N: ChangesTrieBlockNumber> TestExternalities { } /// Create a new instance of `TestExternalities` with storage. - pub fn new(storage: StorageTuple) -> Self { + pub fn new(storage: Storage) -> Self { Self::new_with_code(&[], storage) } /// Create a new instance of `TestExternalities` with code and storage. - pub fn new_with_code(code: &[u8], mut storage: StorageTuple) -> Self { + pub fn new_with_code(code: &[u8], mut storage: Storage) -> Self { let mut overlay = OverlayedChanges::default(); - assert!(storage.0.keys().all(|key| !is_child_storage_key(key))); - assert!(storage.1.keys().all(|key| is_child_storage_key(key))); + assert!(storage.top.keys().all(|key| !is_child_storage_key(key))); + assert!(storage.children.keys().all(|key| is_child_storage_key(key))); super::set_changes_trie_config( &mut overlay, - storage.0.get(&CHANGES_TRIE_CONFIG.to_vec()).cloned(), + storage.top.get(&CHANGES_TRIE_CONFIG.to_vec()).cloned(), false, ).expect("changes trie configuration is correct in test env; qed"); - storage.0.insert(HEAP_PAGES.to_vec(), 8u64.encode()); - storage.0.insert(CODE.to_vec(), code.to_vec()); - - let backend: HashMap<_, _> = storage.1.into_iter() - .map(|(keyspace, map)| (Some(keyspace), map)) - .chain(Some((None, storage.0)).into_iter()) - .collect(); + storage.top.insert(HEAP_PAGES.to_vec(), 8u64.encode()); + storage.top.insert(CODE.to_vec(), code.to_vec()); TestExternalities { overlay, changes_trie_storage: ChangesTrieInMemoryStorage::new(), - backend: backend.into(), + backend: storage.into(), extensions: Default::default(), } } /// Insert key/value into backend pub fn insert(&mut self, k: Vec, v: Vec) { - self.backend = self.backend.update(vec![(None, k, Some(v))]); + self.backend = self.backend.update(vec![(None, vec![(k, Some(v))])]); } /// Registers the given extension for this instance. @@ -107,19 +101,23 @@ impl, N: ChangesTrieBlockNumber> TestExternalities { /// Return a new backend with all pending value. pub fn commit_all(&self) -> InMemory { - let top = self.overlay.committed.top.clone().into_iter() + let top: Vec<_> = self.overlay.committed.top.clone().into_iter() .chain(self.overlay.prospective.top.clone().into_iter()) - .map(|(k, v)| (None, k, v.value)); + .map(|(k, v)| (k, v.value)).collect(); + let mut transaction = vec![(None, top)]; - let children = self.overlay.committed.children.clone().into_iter() + self.overlay.committed.children.clone().into_iter() .chain(self.overlay.prospective.children.clone().into_iter()) - .flat_map(|(keyspace, map)| { - map.into_iter() - .map(|(k, v)| (Some(keyspace.clone()), k, v.value)) - .collect::>() + .for_each(|(keyspace, (map, child_info))| { + transaction.push(( + Some((keyspace, child_info)), + map.into_iter() + .map(|(k, v)| (k, v.value)) + .collect::>(), + )) }); - self.backend.update(top.chain(children).collect()) + self.backend.update(transaction) } /// Execute the given closure while `self` is set as externalities. @@ -149,8 +147,8 @@ impl, N: ChangesTrieBlockNumber> Default for TestExternaliti fn default() -> Self { Self::new(Default::default()) } } -impl, N: ChangesTrieBlockNumber> From for TestExternalities { - fn from(storage: StorageTuple) -> Self { +impl, N: ChangesTrieBlockNumber> From for TestExternalities { + fn from(storage: Storage) -> Self { Self::new(storage) } } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index f24c47c891bf9..5286f0e5053a8 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -22,6 +22,7 @@ use trie::{Trie, delta_trie_root, default_child_trie_root, child_delta_trie_root use trie::trie_types::{TrieDB, TrieError, Layout}; use crate::trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral}; use crate::Backend; +use primitives::storage::ChildInfo; use codec::{Codec, Decode}; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. @@ -75,16 +76,26 @@ impl, H: Hasher> Backend for TrieBackend where self.essence.storage(key) } - fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error> { - self.essence.child_storage(storage_key, key) + fn child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.essence.child_storage(storage_key, child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { self.essence.next_storage_key(key) } - fn next_child_storage_key(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error> { - self.essence.next_child_storage_key(storage_key, key) + fn next_child_storage_key( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.essence.next_child_storage_key(storage_key, child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -95,12 +106,23 @@ impl, H: Hasher> Backend for TrieBackend where self.essence.for_key_values_with_prefix(prefix, f) } - fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F) { - self.essence.for_keys_in_child_storage(storage_key, f) + fn for_keys_in_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + f: F, + ) { + self.essence.for_keys_in_child_storage(storage_key, child_info, f) } - fn for_child_keys_with_prefix(&self, storage_key: &[u8], prefix: &[u8], f: F) { - self.essence.for_child_keys_with_prefix(storage_key, prefix, f) + fn for_child_keys_with_prefix( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + f: F, + ) { + self.essence.for_child_keys_with_prefix(storage_key, child_info, prefix, f) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -168,7 +190,12 @@ impl, H: Hasher> Backend for TrieBackend where (root, write_overlay) } - fn child_storage_root(&self, storage_key: &[u8], delta: I) -> (H::Out, bool, Self::Transaction) + fn child_storage_root( + &self, + storage_key: &[u8], + child_info: ChildInfo, + delta: I, + ) -> (H::Out, bool, Self::Transaction) where I: IntoIterator, Option>)>, H::Out: Ord, @@ -193,6 +220,7 @@ impl, H: Hasher> Backend for TrieBackend where match child_delta_trie_root::, _, _, _, _, _>( storage_key, + child_info.keyspace(), &mut eph, root, delta @@ -217,13 +245,19 @@ pub mod tests { use std::collections::HashSet; use primitives::{Blake2Hasher, H256}; use codec::Encode; - use trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut}; + use trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; use super::*; + const CHILD_KEY_1: &[u8] = b":child_storage:default:sub1"; + + const CHILD_UUID_1: &[u8] = b"unique_id_1"; + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); + fn test_db() -> (PrefixedMemoryDB, H256) { let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); { + let mut mdb = KeySpacedDBMut::new(&mut mdb, CHILD_UUID_1); let mut trie = TrieDBMut::new(&mut mdb, &mut root); trie.insert(b"value3", &[142]).expect("insert failed"); trie.insert(b"value4", &[124]).expect("insert failed"); @@ -233,7 +267,7 @@ pub mod tests { let mut sub_root = Vec::new(); root.encode_to(&mut sub_root); let mut trie = TrieDBMut::new(&mut mdb, &mut root); - trie.insert(b":child_storage:default:sub1", &sub_root).expect("insert failed"); + trie.insert(CHILD_KEY_1, &sub_root[..]).expect("insert failed"); trie.insert(b"key", b"value").expect("insert failed"); trie.insert(b"value1", &[42]).expect("insert failed"); trie.insert(b"value2", &[24]).expect("insert failed"); @@ -255,6 +289,15 @@ pub mod tests { assert_eq!(test_trie().storage(b"key").unwrap(), Some(b"value".to_vec())); } + #[test] + fn read_from_child_storage_returns_some() { + let test_trie = test_trie(); + assert_eq!( + test_trie.child_storage(CHILD_KEY_1, CHILD_INFO_1, b"value3").unwrap(), + Some(vec![142u8]), + ); + } + #[test] fn read_from_storage_returns_none() { assert_eq!(test_trie().storage(b"non-existing-key").unwrap(), None); diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index f071d19a946a8..cd8f686a929ae 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -23,9 +23,10 @@ use log::{debug, warn}; use hash_db::{self, Hasher, EMPTY_PREFIX, Prefix}; use trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, default_child_trie_root, read_trie_value, read_child_trie_value, - for_keys_in_child_trie}; + for_keys_in_child_trie, KeySpacedDB}; use trie::trie_types::{TrieDB, TrieError, Layout}; use crate::backend::Consolidate; +use primitives::storage::ChildInfo; use codec::Encode; /// Patricia trie-based storage trait. @@ -67,7 +68,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Return the next key in the trie i.e. the minimum key that is strictly superior to `key` in /// lexicographic order. pub fn next_storage_key(&self, key: &[u8]) -> Result>, String> { - self.next_storage_key_from_root(&self.root, key) + self.next_storage_key_from_root(&self.root, None, key) } /// Return the next key in the child trie i.e. the minimum key that is strictly superior to @@ -75,6 +76,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: pub fn next_child_storage_key( &self, storage_key: &[u8], + child_info: ChildInfo, key: &[u8], ) -> Result>, String> { let child_root = match self.storage(storage_key)? { @@ -90,13 +92,14 @@ impl, H: Hasher> TrieBackendEssence where H::Out: // note: child_root and hash must be same size, panics otherwise. hash.as_mut().copy_from_slice(&child_root[..]); - self.next_storage_key_from_root(&hash, key) + self.next_storage_key_from_root(&hash, Some(child_info), key) } /// Return next key from main trie or child trie by providing corresponding root. fn next_storage_key_from_root( &self, root: &H::Out, + child_info: Option, key: &[u8], ) -> Result>, String> { let mut read_overlay = S::Overlay::default(); @@ -104,8 +107,16 @@ impl, H: Hasher> TrieBackendEssence where H::Out: storage: &self.storage, overlay: &mut read_overlay, }; + let dyn_eph: &dyn hash_db::HashDBRef<_, _>; + let keyspace_eph; + if let Some(child_info) = child_info.as_ref() { + keyspace_eph = KeySpacedDB::new(&eph, child_info.keyspace()); + dyn_eph = &keyspace_eph; + } else { + dyn_eph = &eph; + } - let trie = TrieDB::::new(&eph, root) + let trie = TrieDB::::new(dyn_eph, root) .map_err(|e| format!("TrieDB creation error: {}", e))?; let mut iter = trie.iter() .map_err(|e| format!("TrieDB iteration error: {}", e))?; @@ -148,7 +159,12 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } /// Get the value of child storage at given key. - pub fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, String> { + pub fn child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, String> { let root = self.storage(storage_key)? .unwrap_or(default_child_trie_root::>(storage_key).encode()); @@ -160,11 +176,17 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let map_e = |e| format!("Trie lookup error: {}", e); - read_child_trie_value::, _>(storage_key, &eph, &root, key).map_err(map_e) + read_child_trie_value::, _>(storage_key, child_info.keyspace(), &eph, &root, key) + .map_err(map_e) } /// Retrieve all entries keys of child storage and call `f` for each of those keys. - pub fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F) { + pub fn for_keys_in_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + f: F, + ) { let root = match self.storage(storage_key) { Ok(v) => v.unwrap_or(default_child_trie_root::>(storage_key).encode()), Err(e) => { @@ -181,6 +203,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: if let Err(e) = for_keys_in_child_trie::, _, Ephemeral>( storage_key, + child_info.keyspace(), &eph, &root, f, @@ -193,6 +216,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: pub fn for_child_keys_with_prefix( &self, storage_key: &[u8], + child_info: ChildInfo, prefix: &[u8], mut f: F, ) { @@ -205,13 +229,12 @@ impl, H: Hasher> TrieBackendEssence where H::Out: }; let mut root = H::Out::default(); root.as_mut().copy_from_slice(&root_vec); - - self.keys_values_with_prefix_inner(&root, prefix, |k, _v| f(k)) + self.keys_values_with_prefix_inner(&root, prefix, |k, _v| f(k), Some(child_info)) } /// Execute given closure for all keys starting with prefix. pub fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { - self.keys_values_with_prefix_inner(&self.root, prefix, |k, _v| f(k)) + self.keys_values_with_prefix_inner(&self.root, prefix, |k, _v| f(k), None) } @@ -220,15 +243,16 @@ impl, H: Hasher> TrieBackendEssence where H::Out: root: &H::Out, prefix: &[u8], mut f: F, + child_info: Option, ) { let mut read_overlay = S::Overlay::default(); let eph = Ephemeral { storage: &self.storage, overlay: &mut read_overlay, }; - - let mut iter = move || -> Result<(), Box>> { - let trie = TrieDB::::new(&eph, root)?; + + let mut iter = move |db| -> Result<(), Box>> { + let trie = TrieDB::::new(db, root)?; let mut iter = trie.iter()?; iter.seek(prefix)?; @@ -246,14 +270,20 @@ impl, H: Hasher> TrieBackendEssence where H::Out: Ok(()) }; - if let Err(e) = iter() { + let result = if let Some(child_info) = child_info { + let db = KeySpacedDB::new(&eph, child_info.keyspace()); + iter(&db) + } else { + iter(&eph) + }; + if let Err(e) = result { debug!(target: "trie", "Error while iterating by prefix: {}", e); } } /// Execute given closure for all key and values starting with prefix. pub fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.keys_values_with_prefix_inner(&self.root, prefix, f) + self.keys_values_with_prefix_inner(&self.root, prefix, f, None) } } @@ -419,11 +449,12 @@ impl TrieBackendStorage for MemoryDB { #[cfg(test)] mod test { use primitives::{Blake2Hasher, H256}; - use trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut}; + use trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; use super::*; #[test] fn next_storage_key_and_next_child_storage_key_work() { + let child_info = ChildInfo::new_default(b"uniqueid"); // Contains values let mut root_1 = H256::default(); // Contains child trie @@ -436,6 +467,15 @@ mod test { trie.insert(b"4", &[1]).expect("insert failed"); trie.insert(b"6", &[1]).expect("insert failed"); } + { + let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); + // reuse of root_1 implicitly assert child trie root is same + // as top trie (contents must remain the same). + let mut trie = TrieDBMut::new(&mut mdb, &mut root_1); + trie.insert(b"3", &[1]).expect("insert failed"); + trie.insert(b"4", &[1]).expect("insert failed"); + trie.insert(b"6", &[1]).expect("insert failed"); + } { let mut trie = TrieDBMut::new(&mut mdb, &mut root_2); trie.insert(b"MyChild", root_1.as_ref()).expect("insert failed"); @@ -452,10 +492,20 @@ mod test { let mdb = essence_1.into_storage(); let essence_2 = TrieBackendEssence::new(mdb, root_2); - assert_eq!(essence_2.next_child_storage_key(b"MyChild", b"2"), Ok(Some(b"3".to_vec()))); - assert_eq!(essence_2.next_child_storage_key(b"MyChild", b"3"), Ok(Some(b"4".to_vec()))); - assert_eq!(essence_2.next_child_storage_key(b"MyChild", b"4"), Ok(Some(b"6".to_vec()))); - assert_eq!(essence_2.next_child_storage_key(b"MyChild", b"5"), Ok(Some(b"6".to_vec()))); - assert_eq!(essence_2.next_child_storage_key(b"MyChild", b"6"), Ok(None)); + assert_eq!( + essence_2.next_child_storage_key(b"MyChild", child_info, b"2"), Ok(Some(b"3".to_vec())) + ); + assert_eq!( + essence_2.next_child_storage_key(b"MyChild", child_info, b"3"), Ok(Some(b"4".to_vec())) + ); + assert_eq!( + essence_2.next_child_storage_key(b"MyChild", child_info, b"4"), Ok(Some(b"6".to_vec())) + ); + assert_eq!( + essence_2.next_child_storage_key(b"MyChild", child_info, b"5"), Ok(Some(b"6".to_vec())) + ); + assert_eq!( + essence_2.next_child_storage_key(b"MyChild", child_info, b"6"), Ok(None) + ); } } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 9d10d692e5ce0..491a157ebe916 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -40,13 +40,31 @@ pub struct StorageData( pub Vec, ); -/// A set of key value pairs for storage. +/// Map of data to use in a storage, it is a collection of +/// byte key and values. #[cfg(feature = "std")] -pub type StorageOverlay = std::collections::BTreeMap, Vec>; +pub type StorageMap = std::collections::BTreeMap, Vec>; -/// A set of key value pairs for children storage; #[cfg(feature = "std")] -pub type ChildrenStorageOverlay = std::collections::HashMap, StorageOverlay>; +#[derive(Debug, PartialEq, Eq, Clone)] +/// Child trie storage data. +pub struct StorageChild { + /// Child data for storage. + pub data: StorageMap, + /// Associated child info for a child + /// trie. + pub child_info: OwnedChildInfo, +} + +#[cfg(feature = "std")] +#[derive(Default, Debug, Clone)] +/// Struct containing data needed for a storage. +pub struct Storage { + /// Top trie storage data. + pub top: StorageMap, + /// Children trie storage data by storage key. + pub children: std::collections::HashMap, StorageChild>, +} /// Storage change set #[derive(RuntimeDebug)] @@ -156,3 +174,132 @@ impl<'a> ChildStorageKey<'a> { self.storage_key.into_owned() } } + +#[derive(Clone, Copy)] +/// Information related to a child state. +pub enum ChildInfo<'a> { + Default(ChildTrie<'a>), +} + +/// Owned version of `ChildInfo`. +/// To be use in persistence layers. +#[derive(Debug, Clone)] +#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] +pub enum OwnedChildInfo { + Default(OwnedChildTrie), +} + +impl<'a> ChildInfo<'a> { + /// Instantiates information for a default child trie. + pub const fn new_default(unique_id: &'a[u8]) -> Self { + ChildInfo::Default(ChildTrie { + data: unique_id, + }) + } + + /// Instantiates a owned version of this child info. + pub fn to_owned(&self) -> OwnedChildInfo { + match self { + ChildInfo::Default(ChildTrie { data }) + => OwnedChildInfo::Default(OwnedChildTrie { + data: data.to_vec(), + }), + } + } + + /// Create child info from a linear byte packed value and a given type. + pub fn resolve_child_info(child_type: u32, data: &'a[u8]) -> Option { + match child_type { + x if x == ChildType::CryptoUniqueId as u32 => Some(ChildInfo::new_default(data)), + _ => None, + } + } + + /// Return a single byte vector containing packed child info content and its child info type. + /// This can be use as input for `resolve_child_info`. + pub fn info(&self) -> (&[u8], u32) { + match self { + ChildInfo::Default(ChildTrie { + data, + }) => (data, ChildType::CryptoUniqueId as u32), + } + } + + /// Return byte sequence (keyspace) that can be use by underlying db to isolate keys. + /// This is a unique id of the child trie. The collision resistance of this value + /// depends on the type of child info use. For `ChildInfo::Default` it is and need to be. + pub fn keyspace(&self) -> &[u8] { + match self { + ChildInfo::Default(ChildTrie { + data, + }) => &data[..], + } + } +} + +/// Type of child. +/// It does not strictly define different child type, it can also +/// be related to technical consideration or api variant. +#[repr(u32)] +pub enum ChildType { + /// Default, it uses a cryptographic strong unique id as input. + CryptoUniqueId = 1, +} + +impl OwnedChildInfo { + /// Instantiates info for a default child trie. + pub fn new_default(unique_id: Vec) -> Self { + OwnedChildInfo::Default(OwnedChildTrie { + data: unique_id, + }) + } + + /// Try to update with another instance, return false if both instance + /// are not compatible. + pub fn try_update(&mut self, other: ChildInfo) -> bool { + match self { + OwnedChildInfo::Default(owned_child_trie) => owned_child_trie.try_update(other), + } + } + + /// Get `ChildInfo` reference to this owned child info. + pub fn as_ref(&self) -> ChildInfo { + match self { + OwnedChildInfo::Default(OwnedChildTrie { data }) + => ChildInfo::Default(ChildTrie { + data: data.as_slice(), + }), + } + } +} + +/// A child trie of default type. +/// Default is the same implementation as the top trie. +/// It share its trie node storage with any kind of key, +/// and its unique id needs to be collision free (eg strong +/// crypto hash). +#[derive(Clone, Copy)] +pub struct ChildTrie<'a> { + /// Data containing unique id. + /// Unique id must but unique and free of any possible key collision + /// (depending on its storage behavior). + data: &'a[u8], +} + +/// Owned version of default child trie `ChildTrie`. +#[derive(Debug, Clone)] +#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] +pub struct OwnedChildTrie { + /// See `ChildTrie` reference field documentation. + data: Vec, +} + +impl OwnedChildTrie { + /// Try to update with another instance, return false if both instance + /// are not compatible. + fn try_update(&mut self, other: ChildInfo) -> bool { + match other { + ChildInfo::Default(other) => self.data[..] == other.data[..], + } + } +} diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 711ce302f0ebb..244752f44bee6 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -24,8 +24,9 @@ mod node_codec; mod trie_stream; use sp_std::boxed::Box; +use sp_std::marker::PhantomData; use sp_std::vec::Vec; -use hash_db::Hasher; +use hash_db::{Hasher, Prefix}; /// Our `NodeCodec`-specific error. pub use error::Error; /// The Substrate format implementation of `TrieStream`. @@ -191,6 +192,7 @@ pub fn child_trie_root( /// but a generic implementation may ignore this type parameter and use other hashers. pub fn child_delta_trie_root( _storage_key: &[u8], + keyspace: &[u8], db: &mut DB, root_data: RD, delta: I, @@ -208,7 +210,8 @@ pub fn child_delta_trie_root( root.as_mut().copy_from_slice(root_data.as_ref()); { - let mut trie = TrieDBMut::::from_existing(&mut *db, &mut root)?; + let mut db = KeySpacedDBMut::new(&mut *db, keyspace); + let mut trie = TrieDBMut::::from_existing(&mut db, &mut root)?; for (key, change) in delta { match change { @@ -224,6 +227,7 @@ pub fn child_delta_trie_root( /// Call `f` for all keys in a child trie. pub fn for_keys_in_child_trie( _storage_key: &[u8], + keyspace: &[u8], db: &DB, root_slice: &[u8], mut f: F @@ -236,7 +240,8 @@ pub fn for_keys_in_child_trie( // root is fetched from DB, not writable by runtime, so it's always valid. root.as_mut().copy_from_slice(root_slice); - let trie = TrieDB::::new(&*db, &root)?; + let db = KeySpacedDB::new(&*db, keyspace); + let trie = TrieDB::::new(&db, &root)?; let iter = trie.iter()?; for x in iter { @@ -273,6 +278,7 @@ pub fn record_all_keys( /// Read a value from the child trie. pub fn read_child_trie_value( _storage_key: &[u8], + keyspace: &[u8], db: &DB, root_slice: &[u8], key: &[u8] @@ -285,12 +291,14 @@ pub fn read_child_trie_value( // root is fetched from DB, not writable by runtime, so it's always valid. root.as_mut().copy_from_slice(root_slice); - Ok(TrieDB::::new(&*db, &root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) + let db = KeySpacedDB::new(&*db, keyspace); + Ok(TrieDB::::new(&db, &root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) } /// Read a value from the child trie with given query. pub fn read_child_trie_value_with, DB>( _storage_key: &[u8], + keyspace: &[u8], db: &DB, root_slice: &[u8], key: &[u8], @@ -304,7 +312,104 @@ pub fn read_child_trie_value_with::new(&*db, &root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) + let db = KeySpacedDB::new(&*db, keyspace); + Ok(TrieDB::::new(&db, &root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) +} + +/// `HashDB` implementation that append a encoded prefix (unique id bytes) in addition to the +/// prefix of every key value. +pub struct KeySpacedDB<'a, DB, H>(&'a DB, &'a [u8], PhantomData); + +/// `HashDBMut` implementation that append a encoded prefix (unique id bytes) in addition to the +/// prefix of every key value. +/// +/// Mutable variant of `KeySpacedDB`, see [`KeySpacedDB`]. +pub struct KeySpacedDBMut<'a, DB, H>(&'a mut DB, &'a [u8], PhantomData); + +/// Utility function used to merge some byte data (keyspace) and `prefix` data +/// before calling key value database primitives. +fn keyspace_as_prefix_alloc(ks: &[u8], prefix: Prefix) -> (Vec, Option) { + let mut result = sp_std::vec![0; ks.len() + prefix.0.len()]; + result[..ks.len()].copy_from_slice(ks); + result[ks.len()..].copy_from_slice(prefix.0); + (result, prefix.1) +} + +impl<'a, DB, H> KeySpacedDB<'a, DB, H> where + H: Hasher, +{ + /// instantiate new keyspaced db + pub fn new(db: &'a DB, ks: &'a [u8]) -> Self { + KeySpacedDB(db, ks, PhantomData) + } +} + +impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> where + H: Hasher, +{ + /// instantiate new keyspaced db + pub fn new(db: &'a mut DB, ks: &'a [u8]) -> Self { + KeySpacedDBMut(db, ks, PhantomData) + } +} + +impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where + DB: hash_db::HashDBRef, + H: Hasher, + T: From<&'static [u8]>, +{ + fn get(&self, key: &H::Out, prefix: Prefix) -> Option { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.get(key, (&derived_prefix.0, derived_prefix.1)) + } + + fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) + } +} + +impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where + DB: hash_db::HashDB, + H: Hasher, + T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, +{ + fn get(&self, key: &H::Out, prefix: Prefix) -> Option { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.get(key, (&derived_prefix.0, derived_prefix.1)) + } + + fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) + } + + fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.insert((&derived_prefix.0, derived_prefix.1), value) + } + + fn emplace(&mut self, key: H::Out, prefix: Prefix, value: T) { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.emplace(key, (&derived_prefix.0, derived_prefix.1), value) + } + + fn remove(&mut self, key: &H::Out, prefix: Prefix) { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.remove(key, (&derived_prefix.0, derived_prefix.1)) + } +} + +impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where + DB: hash_db::HashDB, + H: Hasher, + T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, +{ + fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } + + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { + &mut *self + } } /// Constants used into trie simplification codec. diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 29370ba16c634..c6067e5d7c557 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -32,13 +32,13 @@ pub use keyring::{ sr25519::Keyring as Sr25519Keyring, }; pub use primitives::{Blake2Hasher, traits::BareCryptoStorePtr}; -pub use sp_runtime::{StorageOverlay, ChildrenStorageOverlay}; +pub use sp_runtime::{Storage, StorageChild}; pub use state_machine::ExecutionStrategy; use std::sync::Arc; use std::collections::HashMap; use hash_db::Hasher; -use primitives::storage::well_known_keys; +use primitives::storage::{well_known_keys, ChildInfo}; use sp_runtime::traits::Block as BlockT; use client::LocalCallExecutor; @@ -51,11 +51,11 @@ pub type LightBackend = client::light::backend::Backend< /// A genesis storage initialisation trait. pub trait GenesisInit: Default { /// Construct genesis storage. - fn genesis_storage(&self) -> (StorageOverlay, ChildrenStorageOverlay); + fn genesis_storage(&self) -> Storage; } impl GenesisInit for () { - fn genesis_storage(&self) -> (StorageOverlay, ChildrenStorageOverlay) { + fn genesis_storage(&self) -> Storage { Default::default() } } @@ -64,7 +64,7 @@ impl GenesisInit for () { pub struct TestClientBuilder { execution_strategies: ExecutionStrategies, genesis_init: G, - child_storage_extension: HashMap, Vec<(Vec, Vec)>>, + child_storage_extension: HashMap, StorageChild>, backend: Arc, _executor: std::marker::PhantomData, keystore: Option, @@ -136,10 +136,15 @@ impl TestClientBuilder mut self, key: impl AsRef<[u8]>, child_key: impl AsRef<[u8]>, + child_info: ChildInfo, value: impl AsRef<[u8]>, ) -> Self { - let entry = self.child_storage_extension.entry(key.as_ref().to_vec()).or_insert_with(Vec::new); - entry.push((child_key.as_ref().to_vec(), value.as_ref().to_vec())); + let entry = self.child_storage_extension.entry(key.as_ref().to_vec()) + .or_insert_with(|| StorageChild { + data: Default::default(), + child_info: child_info.to_owned(), + }); + entry.data.insert(child_key.as_ref().to_vec(), value.as_ref().to_vec()); self } @@ -180,10 +185,13 @@ impl TestClientBuilder let mut storage = self.genesis_init.genesis_storage(); // Add some child storage keys. - for (key, value) in self.child_storage_extension { - storage.1.insert( + for (key, child_content) in self.child_storage_extension { + storage.children.insert( well_known_keys::CHILD_STORAGE_KEY_PREFIX.iter().cloned().chain(key).collect(), - value.into_iter().collect(), + StorageChild { + data: child_content.data.into_iter().collect(), + child_info: child_content.child_info, + }, ); } diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 8511750bc2c72..13d9b19553700 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -23,12 +23,13 @@ pub mod trait_tests; mod block_builder_ext; use std::sync::Arc; -use std::collections::{HashMap, BTreeMap}; +use std::collections::HashMap; pub use block_builder_ext::BlockBuilderExt; pub use generic_test_client::*; pub use runtime; use primitives::sr25519; +use primitives::storage::{ChildInfo, Storage, StorageChild}; use runtime::genesismap::{GenesisConfig, additional_storage_with_genesis}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Hash as HashT, NumberFor}; use client::{ @@ -97,8 +98,7 @@ pub type LightExecutor = client::light::call_executor::GenesisCallExecutor< pub struct GenesisParameters { support_changes_trie: bool, heap_pages_override: Option, - extra_storage: BTreeMap, Vec>, - child_extra_storage: HashMap, BTreeMap, Vec>>, + extra_storage: Storage, } impl GenesisParameters { @@ -118,27 +118,26 @@ impl GenesisParameters { 1000, self.heap_pages_override, self.extra_storage.clone(), - self.child_extra_storage.clone(), ) } } impl generic_test_client::GenesisInit for GenesisParameters { - fn genesis_storage(&self) -> (StorageOverlay, ChildrenStorageOverlay) { + fn genesis_storage(&self) -> Storage { use codec::Encode; let mut storage = self.genesis_config().genesis_map(); - let child_roots = storage.1.iter().map(|(sk, child_map)| { + let child_roots = storage.children.iter().map(|(sk, child_content)| { let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - child_map.clone().into_iter().collect() + child_content.data.clone().into_iter().collect() ); (sk.clone(), state_root.encode()) }); let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - storage.0.clone().into_iter().chain(child_roots).collect() + storage.top.clone().into_iter().chain(child_roots).collect() ); let block: runtime::Block = client::genesis::construct_genesis_block(state_root); - storage.0.extend(additional_storage_with_genesis(&block)); + storage.top.extend(additional_storage_with_genesis(&block)); storage } @@ -189,6 +188,7 @@ pub trait TestClientBuilderExt: Sized { fn add_extra_child_storage>, K: Into>, V: Into>>( self, storage_key: SK, + child_info: ChildInfo, key: K, value: V, ) -> Self; @@ -228,13 +228,14 @@ impl TestClientBuilderExt for TestClientBuilder< fn add_extra_storage>, V: Into>>(mut self, key: K, value: V) -> Self { let key = key.into(); assert!(!key.is_empty()); - self.genesis_init_mut().extra_storage.insert(key, value.into()); + self.genesis_init_mut().extra_storage.top.insert(key, value.into()); self } fn add_extra_child_storage>, K: Into>, V: Into>>( mut self, storage_key: SK, + child_info: ChildInfo, key: K, value: V, ) -> Self { @@ -242,10 +243,12 @@ impl TestClientBuilderExt for TestClientBuilder< let key = key.into(); assert!(!storage_key.is_empty()); assert!(!key.is_empty()); - self.genesis_init_mut().child_extra_storage + self.genesis_init_mut().extra_storage.children .entry(storage_key) - .or_insert_with(Default::default) - .insert(key, value.into()); + .or_insert_with(|| StorageChild { + data: Default::default(), + child_info: child_info.to_owned(), + }).data.insert(key, value.into()); self } diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index 5b65b54dced3e..85d513c2cffdd 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -16,11 +16,12 @@ //! Tool for creating the genesis block. -use std::collections::{BTreeMap, HashMap}; +use std::collections::BTreeMap; use sp_io::hashing::{blake2_256, twox_128}; use super::{AuthorityId, AccountId, WASM_BINARY, system}; use codec::{Encode, KeyedVec, Joiner}; -use primitives::{ChangesTrieConfiguration, map, storage::well_known_keys}; +use primitives::{ChangesTrieConfiguration, map}; +use primitives::storage::{well_known_keys, Storage}; use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT}; /// Configuration of a general Substrate test genesis block. @@ -30,8 +31,7 @@ pub struct GenesisConfig { balances: Vec<(AccountId, u64)>, heap_pages_override: Option, /// Additional storage key pairs that will be added to the genesis map. - extra_storage: BTreeMap, Vec>, - child_extra_storage: HashMap, BTreeMap, Vec>>, + extra_storage: Storage, } impl GenesisConfig { @@ -41,8 +41,7 @@ impl GenesisConfig { endowed_accounts: Vec, balance: u64, heap_pages_override: Option, - extra_storage: BTreeMap, Vec>, - child_extra_storage: HashMap, BTreeMap, Vec>>, + extra_storage: Storage, ) -> Self { GenesisConfig { changes_trie_config: match support_changes_trie { @@ -53,14 +52,10 @@ impl GenesisConfig { balances: endowed_accounts.into_iter().map(|a| (a, balance)).collect(), heap_pages_override, extra_storage, - child_extra_storage, } } - pub fn genesis_map(&self) -> ( - BTreeMap, Vec>, - HashMap, BTreeMap, Vec>>, - ) { + pub fn genesis_map(&self) -> Storage { let wasm_runtime = WASM_BINARY.to_vec(); let mut map: BTreeMap, Vec> = self.balances.iter() .map(|&(ref account, balance)| (account.to_keyed_vec(b"balance:"), vec![].and(&balance))) @@ -78,10 +73,10 @@ impl GenesisConfig { } map.insert(twox_128(&b"sys:auth"[..])[..].to_vec(), self.authorities.encode()); // Add the extra storage entries. - map.extend(self.extra_storage.clone().into_iter()); + map.extend(self.extra_storage.top.clone().into_iter()); // Assimilate the system genesis config. - let mut storage = (map, self.child_extra_storage.clone()); + let mut storage = Storage { top: map, children: self.extra_storage.children.clone()}; let mut config = system::GenesisConfig::default(); config.authorities = self.authorities.clone(); config.assimilate_storage(&mut storage).expect("Adding `system::GensisConfig` to the genesis"); @@ -91,23 +86,22 @@ impl GenesisConfig { } pub fn insert_genesis_block( - storage: &mut ( - BTreeMap, Vec>, - HashMap, BTreeMap, Vec>>, - ) + storage: &mut Storage, ) -> primitives::hash::H256 { - let child_roots = storage.1.iter().map(|(sk, child_map)| { + let child_roots = storage.children.iter().map(|(sk, child_content)| { let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - child_map.clone().into_iter().collect(), + child_content.data.clone().into_iter().collect(), ); (sk.clone(), state_root.encode()) }); + // add child roots to storage + storage.top.extend(child_roots); let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - storage.0.clone().into_iter().chain(child_roots).collect() + storage.top.clone().into_iter().collect() ); let block: crate::Block = sc_client::genesis::construct_genesis_block(state_root); let genesis_hash = block.header.hash(); - storage.0.extend(additional_storage_with_genesis(&block)); + storage.top.extend(additional_storage_with_genesis(&block)); genesis_hash } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index dcb76e27efbd6..785d8dec5bece 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -50,6 +50,7 @@ use runtime_version::NativeVersion; use runtime_support::{impl_outer_origin, parameter_types, weights::Weight}; use inherents::{CheckInherentsResult, InherentData}; use cfg_if::cfg_if; +use primitives::storage::ChildType; // Ensure Babe and Aura use the same crypto to simplify things a bit. pub use babe_primitives::AuthorityId; @@ -910,21 +911,37 @@ fn test_read_storage() { fn test_read_child_storage() { const CHILD_KEY: &[u8] = b":child_storage:default:read_child_storage"; + const UNIQUE_ID: &[u8] = b":unique_id"; const KEY: &[u8] = b":read_child_storage"; - sp_io::storage::child_set(CHILD_KEY, KEY, b"test"); + sp_io::storage::child_set( + CHILD_KEY, + UNIQUE_ID, + ChildType::CryptoUniqueId as u32, + KEY, + b"test", + ); let mut v = [0u8; 4]; let r = sp_io::storage::child_read( CHILD_KEY, + UNIQUE_ID, + ChildType::CryptoUniqueId as u32, KEY, &mut v, - 0 + 0, ); assert_eq!(r, Some(4)); assert_eq!(&v, b"test"); let mut v = [0u8; 4]; - let r = sp_io::storage::child_read(CHILD_KEY, KEY, &mut v, 8); + let r = sp_io::storage::child_read( + CHILD_KEY, + UNIQUE_ID, + ChildType::CryptoUniqueId as u32, + KEY, + &mut v, + 8, + ); assert_eq!(r, Some(4)); assert_eq!(&v, &[0, 0, 0, 0]); } diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index d305220d5f873..aec909f8da7db 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -361,16 +361,16 @@ mod tests { ]; TestExternalities::new_with_code( WASM_BINARY, - ( - map![ + primitives::storage::Storage { + top: map![ twox_128(b"latest").to_vec() => vec![69u8; 32], twox_128(b"sys:auth").to_vec() => authorities.encode(), blake2_256(&AccountKeyring::Alice.to_raw_public().to_keyed_vec(b"balance:")).to_vec() => { vec![111u8, 0, 0, 0, 0, 0, 0, 0] } ], - map![], - ) + children: map![], + }, ) }