diff --git a/Cargo.lock b/Cargo.lock index f7f85106414ac..608c0fa9e3a43 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5518,6 +5518,26 @@ dependencies = [ "rust-argon2", ] +[[package]] +name = "ref-cast" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "077f197a31bfe7e4169145f9eca08d32705c6c6126c139c26793acdf163ac3ef" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c36eb52b69b87c9e3a07387f476c88fd0dba9a1713b38e56617ed66b45392c1f" +dependencies = [ + "proc-macro2", + "quote 1.0.3", + "syn 1.0.17", +] + [[package]] name = "regex" version = "1.3.6" @@ -7695,6 +7715,7 @@ name = "sp-storage" version = "2.0.0-dev" dependencies = [ "impl-serde 0.2.3", + "ref-cast", "serde", "sp-debug-derive", "sp-std", diff --git a/bin/node/cli/res/flaming-fir.json b/bin/node/cli/res/flaming-fir.json index 7ed98239b54b6..3612d7284faba 100644 --- a/bin/node/cli/res/flaming-fir.json +++ b/bin/node/cli/res/flaming-fir.json @@ -134,7 +134,7 @@ "0x5f3e4907f716ac89b6347d15ececedca0b6a45321efae92aea15e0740ec7afe7": "0x00000000", "0x5f3e4907f716ac89b6347d15ececedca9220e172bed316605f73f1ff7b4ade98e54094c2d5af8ae10b91e1288f4f59f2946d7738f2c509b7effd909e5e9ba0ad": "0x00" }, - "children": {} + "childrenDefault": {} } } } diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 7fdf4e9c596a4..5b3d8f20e9df7 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -170,7 +170,7 @@ fn panic_execution_with_foreign_code_gives_error() { vec![0u8; 32] } ], - children: map![], + children_default: map![], }); let r = executor_call:: _>( @@ -206,7 +206,7 @@ fn bad_extrinsic_with_native_equivalent_code_gives_error() { vec![0u8; 32] } ], - children: map![], + children_default: map![], }); let r = executor_call:: _>( @@ -240,7 +240,7 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { }, >::hashed_key_for(0) => vec![0u8; 32] ], - children: map![], + children_default: map![], }); let r = executor_call:: _>( @@ -282,7 +282,7 @@ fn successful_execution_with_foreign_code_gives_ok() { }, >::hashed_key_for(0) => vec![0u8; 32] ], - children: map![], + children_default: map![], }); let r = executor_call:: _>( @@ -704,7 +704,7 @@ fn panic_execution_gives_error() { }, >::hashed_key_for(0) => vec![0u8; 32] ], - children: map![], + children_default: map![], }); let r = executor_call:: _>( @@ -738,7 +738,7 @@ fn successful_execution_gives_ok() { }, >::hashed_key_for(0) => vec![0u8; 32] ], - children: map![], + children_default: map![], }); let r = executor_call:: _>( diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index 32fef3b326615..91c58d68fca46 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -143,7 +143,7 @@ fn transaction_fee_is_correct_ultimate() { }, >::hashed_key_for(0) => vec![0u8; 32] ], - children: map![], + children_default: map![], }); let tip = 1_000_000; diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index d10e62cc54920..33a370c7cb2c5 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -26,7 +26,7 @@ use sp_state_machine::{ ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction, StorageCollection, ChildStorageCollection, }; -use sp_storage::{StorageData, StorageKey, ChildInfo}; +use sp_storage::{StorageData, StorageKey, PrefixedStorageKey, ChildInfo}; use crate::{ blockchain::{ Backend as BlockchainBackend, well_known_cache_keys @@ -280,6 +280,7 @@ impl<'a, State, Block> Iterator for KeyIterator<'a, State, Block> where Some(StorageKey(next_key)) } } + /// Provides acess to storage primitives pub trait StorageProvider> { /// Given a `BlockId` and a key, return the value under the key in that block. @@ -310,8 +311,7 @@ pub trait StorageProvider> { fn child_storage( &self, id: &BlockId, - storage_key: &StorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &StorageKey ) -> sp_blockchain::Result>; @@ -319,8 +319,7 @@ pub trait StorageProvider> { fn child_storage_keys( &self, id: &BlockId, - child_storage_key: &StorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key_prefix: &StorageKey ) -> sp_blockchain::Result>; @@ -328,8 +327,7 @@ pub trait StorageProvider> { fn child_storage_hash( &self, id: &BlockId, - storage_key: &StorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &StorageKey ) -> sp_blockchain::Result>; @@ -351,7 +349,7 @@ pub trait StorageProvider> { &self, first: NumberFor, last: BlockId, - storage_key: Option<&StorageKey>, + storage_key: Option<&PrefixedStorageKey>, key: &StorageKey ) -> sp_blockchain::Result, u32)>>; } diff --git a/client/api/src/light.rs b/client/api/src/light.rs index c0bebc1740a8a..30e6d14d557f1 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -26,7 +26,7 @@ use sp_runtime::{ }, generic::BlockId }; -use sp_core::ChangesTrieConfigurationRange; +use sp_core::{ChangesTrieConfigurationRange, storage::PrefixedStorageKey}; use sp_state_machine::StorageProof; use sp_blockchain::{ HeaderMetadata, well_known_cache_keys, HeaderBackend, Cache as BlockchainCache, @@ -81,12 +81,7 @@ pub struct RemoteReadChildRequest { /// Header of block at which read is performed. pub header: Header, /// Storage key for child. - pub storage_key: Vec, - /// Child trie source information. - pub child_info: Vec, - /// Child type, its required to resolve `child_info` - /// content and choose child implementation. - pub child_type: u32, + pub storage_key: PrefixedStorageKey, /// Child storage key to read. pub keys: Vec>, /// Number of times to retry request. None means that default RETRY_COUNT is used. @@ -110,7 +105,7 @@ pub struct RemoteChangesRequest { /// Proofs for roots of ascendants of tries_roots.0 are provided by the remote node. pub tries_roots: (Header::Number, Header::Hash, Vec), /// Optional Child Storage key to read. - pub storage_key: Option>, + pub storage_key: Option, /// Storage key to read. pub key: Vec, /// Number of times to retry request. None means that default RETRY_COUNT is used. diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index 2d9876f7ad278..93160855eaebe 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -19,7 +19,7 @@ use sp_runtime::{ traits::{Block as BlockT}, }; use crate::{StorageProof, ChangesProof}; -use sp_storage::{ChildInfo, StorageKey}; +use sp_storage::{ChildInfo, StorageKey, PrefixedStorageKey}; /// Interface for providing block proving utilities. pub trait ProofProvider { @@ -35,8 +35,7 @@ pub trait ProofProvider { fn read_child_proof( &self, id: &BlockId, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, keys: &mut dyn Iterator, ) -> sp_blockchain::Result; @@ -65,7 +64,7 @@ pub trait ProofProvider { last: Block::Hash, min: Block::Hash, max: Block::Hash, - storage_key: Option<&StorageKey>, + storage_key: Option<&PrefixedStorageKey>, key: &StorageKey, ) -> sp_blockchain::Result>; } diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 9f3a10ee89769..fbe7b7e7a8eff 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -74,17 +74,14 @@ impl BuildStorage for ChainSpec { fn build_storage(&self) -> Result { match self.genesis.resolve()? { Genesis::Runtime(gc) => gc.build_storage(), - Genesis::Raw(RawGenesis { top: map, children: children_map }) => Ok(Storage { + Genesis::Raw(RawGenesis { top: map, children_default: children_map }) => Ok(Storage { top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), - children: children_map.into_iter().map(|(sk, child_content)| { - let child_info = ChildInfo::resolve_child_info( - child_content.child_type, - child_content.child_info.as_slice(), - ).expect("chain spec contains correct content").to_owned(); + children_default: children_map.into_iter().map(|(storage_key, child_content)| { + let child_info = ChildInfo::new_default(storage_key.0.as_slice()); ( - sk.0, + storage_key.0, StorageChild { - data: child_content.data.into_iter().map(|(k, v)| (k.0, v.0)).collect(), + data: child_content.into_iter().map(|(k, v)| (k.0, v.0)).collect(), child_info, }, ) @@ -103,22 +100,13 @@ impl BuildStorage for ChainSpec { type GenesisStorage = HashMap; -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(deny_unknown_fields)] -struct ChildRawStorage { - data: GenesisStorage, - child_info: Vec, - child_type: u32, -} - #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] /// Storage content for genesis block. struct RawGenesis { top: GenesisStorage, - children: HashMap, + children_default: HashMap, } #[derive(Serialize, Deserialize)] @@ -291,23 +279,16 @@ impl ChainSpec { let top = storage.top.into_iter() .map(|(k, v)| (StorageKey(k), StorageData(v))) .collect(); - let children = storage.children.into_iter() - .map(|(sk, child)| { - let info = child.child_info.as_ref(); - let (info, ci_type) = info.info(); - ( - StorageKey(sk), - ChildRawStorage { - data: child.data.into_iter() - .map(|(k, v)| (StorageKey(k), StorageData(v))) - .collect(), - child_info: info.to_vec(), - child_type: ci_type, - }, - )}) + let children_default = storage.children_default.into_iter() + .map(|(sk, child)| ( + StorageKey(sk), + child.data.into_iter() + .map(|(k, v)| (StorageKey(k), StorageData(v))) + .collect(), + )) .collect(); - Genesis::Raw(RawGenesis { top, children }) + Genesis::Raw(RawGenesis { top, children_default }) }, (_, genesis) => genesis, }; diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index ddac2109d7542..9d6f595498bd0 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -77,10 +77,9 @@ impl BenchmarkingState { }; state.reopen()?; - let child_delta = genesis.children.into_iter().map(|(storage_key, child_content)| ( - storage_key, + let child_delta = genesis.children_default.into_iter().map(|(_storage_key, child_content)| ( + child_content.child_info, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), - child_content.child_info )); let (root, transaction): (B::Hash, _) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( genesis.top.into_iter().map(|(k, v)| (k, Some(v))), @@ -129,11 +128,10 @@ impl StateBackend> for BenchmarkingState { fn child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.state.borrow().as_ref().ok_or_else(state_err)?.child_storage(storage_key, child_info, key) + self.state.borrow().as_ref().ok_or_else(state_err)?.child_storage(child_info, key) } fn exists_storage(&self, key: &[u8]) -> Result { @@ -142,11 +140,10 @@ impl StateBackend> for BenchmarkingState { fn exists_child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result { - self.state.borrow().as_ref().ok_or_else(state_err)?.exists_child_storage(storage_key, child_info, key) + self.state.borrow().as_ref().ok_or_else(state_err)?.exists_child_storage(child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -155,11 +152,10 @@ impl StateBackend> for BenchmarkingState { fn next_child_storage_key( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.state.borrow().as_ref().ok_or_else(state_err)?.next_child_storage_key(storage_key, child_info, key) + self.state.borrow().as_ref().ok_or_else(state_err)?.next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -176,24 +172,22 @@ impl StateBackend> for BenchmarkingState { fn for_keys_in_child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { if let Some(ref state) = *self.state.borrow() { - state.for_keys_in_child_storage(storage_key, child_info, f) + state.for_keys_in_child_storage(child_info, f) } } fn for_child_keys_with_prefix( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { if let Some(ref state) = *self.state.borrow() { - state.for_child_keys_with_prefix(storage_key, child_info, prefix, f) + state.for_child_keys_with_prefix(child_info, prefix, f) } } @@ -205,13 +199,12 @@ impl StateBackend> for BenchmarkingState { fn child_storage_root( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where I: IntoIterator, Option>)>, { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_storage_root(storage_key, child_info, delta)) + self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_storage_root(child_info, delta)) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -224,11 +217,10 @@ impl StateBackend> for BenchmarkingState { fn child_keys( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_keys(storage_key, child_info, prefix)) + self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_keys(child_info, prefix)) } fn as_trie_backend(&mut self) diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index 5447e8b72567f..985251f403d62 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -26,6 +26,7 @@ use sp_trie::MemoryDB; use sc_client_api::backend::PrunableStateChangesTrieStorage; use sp_blockchain::{well_known_cache_keys, Cache as BlockchainCache}; use sp_core::{ChangesTrieConfiguration, ChangesTrieConfigurationRange, convert_hash}; +use sp_core::storage::PrefixedStorageKey; use sp_database::Transaction; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, HashFor, NumberFor, One, Zero, CheckedSub, @@ -482,7 +483,7 @@ where fn with_cached_changed_keys( &self, root: &Block::Hash, - functor: &mut dyn FnMut(&HashMap>, HashSet>>), + functor: &mut dyn FnMut(&HashMap, HashSet>>), ) -> bool { self.build_cache.read().with_changed_keys(root, functor) } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 782e0f6db2a62..579ea2db4ad8d 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -159,11 +159,10 @@ impl StateBackend> for RefTrackingState { fn child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.state.child_storage(storage_key, child_info, key) + self.state.child_storage(child_info, key) } fn exists_storage(&self, key: &[u8]) -> Result { @@ -172,11 +171,10 @@ impl StateBackend> for RefTrackingState { fn exists_child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result { - self.state.exists_child_storage(storage_key, child_info, key) + self.state.exists_child_storage(child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -185,11 +183,10 @@ impl StateBackend> for RefTrackingState { fn next_child_storage_key( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.state.next_child_storage_key(storage_key, child_info, key) + self.state.next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -202,21 +199,19 @@ impl StateBackend> for RefTrackingState { fn for_keys_in_child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { - self.state.for_keys_in_child_storage(storage_key, child_info, f) + self.state.for_keys_in_child_storage(child_info, f) } fn for_child_keys_with_prefix( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { - self.state.for_child_keys_with_prefix(storage_key, child_info, prefix, f) + self.state.for_child_keys_with_prefix(child_info, prefix, f) } fn storage_root(&self, delta: I) -> (B::Hash, Self::Transaction) @@ -228,14 +223,13 @@ impl StateBackend> for RefTrackingState { fn child_storage_root( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where I: IntoIterator, Option>)>, { - self.state.child_storage_root(storage_key, child_info, delta) + self.state.child_storage_root(child_info, delta) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -248,11 +242,10 @@ impl StateBackend> for RefTrackingState { fn child_keys( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { - self.state.child_keys(storage_key, child_info, prefix) + self.state.child_keys(child_info, prefix) } fn as_trie_backend(&mut self) @@ -631,16 +624,10 @@ impl sc_client_api::backend::BlockImportOperation for Bloc return Err(sp_blockchain::Error::GenesisInvalid.into()); } - for child_key in storage.children.keys() { - if !well_known_keys::is_child_storage_key(&child_key) { - return Err(sp_blockchain::Error::GenesisInvalid.into()); - } - } - - let child_delta = storage.children.into_iter().map(|(storage_key, child_content)| ( - storage_key, - child_content.data.into_iter().map(|(k, v)| (k, Some(v))), child_content.child_info), - ); + let child_delta = storage.children_default.into_iter().map(|(_storage_key, child_content)|( + child_content.child_info, + child_content.data.into_iter().map(|(k, v)| (k, Some(v))), + )); let mut changes_trie_config: Option = None; let (root, transaction) = self.old_state.full_storage_root( @@ -1808,7 +1795,7 @@ pub(crate) mod tests { op.reset_storage(Storage { top: storage.iter().cloned().collect(), - children: Default::default(), + children_default: Default::default(), }).unwrap(); op.set_block_data( header.clone(), @@ -1894,7 +1881,7 @@ pub(crate) mod tests { op.reset_storage(Storage { top: storage.iter().cloned().collect(), - children: Default::default(), + children_default: Default::default(), }).unwrap(); key = op.db_updates.insert(EMPTY_PREFIX, b"hello"); diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 6326899263241..66ac74afa4f2a 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -542,11 +542,10 @@ impl>, B: BlockT> StateBackend> for Cachin fn child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - let key = (storage_key.to_vec(), key.to_vec()); + let key = (child_info.storage_key().to_vec(), key.to_vec()); let local_cache = self.cache.local_cache.upgradable_read(); if let Some(entry) = local_cache.child_storage.get(&key).cloned() { trace!("Found in local cache: {:?}", key); @@ -564,7 +563,7 @@ impl>, B: BlockT> StateBackend> for Cachin } } trace!("Cache miss: {:?}", key); - let value = self.state.child_storage(storage_key, child_info, &key.1[..])?; + let value = self.state.child_storage(child_info, &key.1[..])?; // just pass it through the usage counter let value = self.usage.tally_child_key_read(&key, value, false); @@ -579,20 +578,18 @@ impl>, B: BlockT> StateBackend> for Cachin fn exists_child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result { - self.state.exists_child_storage(storage_key, child_info, key) + self.state.exists_child_storage(child_info, key) } fn for_keys_in_child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { - self.state.for_keys_in_child_storage(storage_key, child_info, f) + self.state.for_keys_in_child_storage(child_info, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -601,11 +598,10 @@ impl>, B: BlockT> StateBackend> for Cachin fn next_child_storage_key( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.state.next_child_storage_key(storage_key, child_info, key) + self.state.next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -618,12 +614,11 @@ impl>, B: BlockT> StateBackend> for Cachin fn for_child_keys_with_prefix( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { - self.state.for_child_keys_with_prefix(storage_key, child_info, prefix, f) + self.state.for_child_keys_with_prefix(child_info, prefix, f) } fn storage_root(&self, delta: I) -> (B::Hash, Self::Transaction) @@ -635,14 +630,13 @@ impl>, B: BlockT> StateBackend> for Cachin fn child_storage_root( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where I: IntoIterator, Option>)>, { - self.state.child_storage_root(storage_key, child_info, delta) + self.state.child_storage_root(child_info, delta) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -655,11 +649,10 @@ impl>, B: BlockT> StateBackend> for Cachin fn child_keys( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { - self.state.child_keys(storage_key, child_info, prefix) + self.state.child_keys(child_info, prefix) } fn as_trie_backend(&mut self) -> Option<&TrieBackend>> { @@ -758,11 +751,10 @@ impl>, B: BlockT> StateBackend> for Syncin fn child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.caching_state().child_storage(storage_key, child_info, key) + self.caching_state().child_storage(child_info, key) } fn exists_storage(&self, key: &[u8]) -> Result { @@ -771,20 +763,18 @@ impl>, B: BlockT> StateBackend> for Syncin fn exists_child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result { - self.caching_state().exists_child_storage(storage_key, child_info, key) + self.caching_state().exists_child_storage(child_info, key) } fn for_keys_in_child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { - self.caching_state().for_keys_in_child_storage(storage_key, child_info, f) + self.caching_state().for_keys_in_child_storage(child_info, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -793,11 +783,10 @@ impl>, B: BlockT> StateBackend> for Syncin fn next_child_storage_key( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.caching_state().next_child_storage_key(storage_key, child_info, key) + self.caching_state().next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -810,12 +799,11 @@ impl>, B: BlockT> StateBackend> for Syncin fn for_child_keys_with_prefix( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { - self.caching_state().for_child_keys_with_prefix(storage_key, child_info, prefix, f) + self.caching_state().for_child_keys_with_prefix(child_info, prefix, f) } fn storage_root(&self, delta: I) -> (B::Hash, Self::Transaction) @@ -827,14 +815,13 @@ impl>, B: BlockT> StateBackend> for Syncin fn child_storage_root( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where I: IntoIterator, Option>)>, { - self.caching_state().child_storage_root(storage_key, child_info, delta) + self.caching_state().child_storage_root(child_info, delta) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -847,11 +834,10 @@ impl>, B: BlockT> StateBackend> for Syncin fn child_keys( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { - self.caching_state().child_keys(storage_key, child_info, prefix) + self.caching_state().child_keys(child_info, prefix) } fn as_trie_backend(&mut self) -> Option<&TrieBackend>> { diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 72055b77884a6..e1ef18a0853ff 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -186,7 +186,7 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { b"foo".to_vec() => b"bar".to_vec(), b"baz".to_vec() => b"bar".to_vec() ], - children: map![], + children_default: map![], }); assert_eq!(ext, expected); } @@ -220,7 +220,7 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { b"aab".to_vec() => b"2".to_vec(), b"bbb".to_vec() => b"5".to_vec() ], - children: map![], + children_default: map![], }); assert_eq!(expected, ext); } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 49479aa2d4fb0..84b913b284c62 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -24,7 +24,7 @@ use libp2p::core::{ConnectedPoint, connection::{ConnectionId, ListenerId}}; use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; use sp_core::{ - storage::{StorageKey, ChildInfo}, + storage::{StorageKey, PrefixedStorageKey, ChildInfo, ChildType}, hexdisplay::HexDisplay }; use sp_consensus::{ @@ -1520,37 +1520,28 @@ impl Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, HexDisplay::from(&request.storage_key), keys_str(), request.block); - let proof = if let Some(child_info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { - match self.context_data.chain.read_child_proof( - &BlockId::Hash(request.block), - &request.storage_key, - child_info, - &mut request.keys.iter().map(AsRef::as_ref), - ) { - Ok(proof) => proof, - Err(error) => { - trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", - request.id, - who, - HexDisplay::from(&request.storage_key), - keys_str(), - request.block, - error - ); - StorageProof::empty() - } + let prefixed_key = PrefixedStorageKey::new_ref(&request.storage_key); + let child_info = match ChildType::from_prefixed_key(prefixed_key) { + Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), + None => Err("Invalid child storage key".into()), + }; + let proof = match child_info.and_then(|child_info| self.context_data.chain.read_child_proof( + &BlockId::Hash(request.block), + &child_info, + &mut request.keys.iter().map(AsRef::as_ref), + )) { + Ok(proof) => proof, + Err(error) => { + trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", + request.id, + who, + HexDisplay::from(&request.storage_key), + keys_str(), + request.block, + error + ); + StorageProof::empty() } - } else { - trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", - request.id, - who, - HexDisplay::from(&request.storage_key), - keys_str(), - request.block, - "invalid child info and type", - ); - - StorageProof::empty() }; self.send_message( &who, @@ -1608,14 +1599,16 @@ impl Protocol { request.first, request.last ); - let storage_key = request.storage_key.map(|sk| StorageKey(sk)); let key = StorageKey(request.key); + let prefixed_key = request.storage_key.as_ref() + .map(|storage_key| PrefixedStorageKey::new_ref(storage_key)); + let (first, last, min, max) = (request.first, request.last, request.min, request.max); let proof = match self.context_data.chain.key_changes_proof( - request.first, - request.last, - request.min, - request.max, - storage_key.as_ref(), + first, + last, + min, + max, + prefixed_key, &key, ) { Ok(proof) => proof, @@ -1623,8 +1616,8 @@ impl Protocol { trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{}) failed with: {}", request.id, who, - if let Some(sk) = storage_key { - format!("{} : {}", HexDisplay::from(&sk.0), HexDisplay::from(&key.0)) + if let Some(sk) = request.storage_key.as_ref() { + format!("{} : {}", HexDisplay::from(sk), HexDisplay::from(&key.0)) } else { HexDisplay::from(&key.0).to_string() }, diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index 85312b0803234..f4e877d675e79 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -58,7 +58,7 @@ use sc_client::light::fetcher; use sc_client_api::StorageProof; use sc_peerset::ReputationChange; use sp_core::{ - storage::{ChildInfo, StorageKey}, + storage::{ChildInfo, ChildType,StorageKey, PrefixedStorageKey}, hexdisplay::HexDisplay, }; use smallvec::SmallVec; @@ -617,35 +617,27 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let proof = - if let Some(info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { - match self.chain.read_child_proof( - &BlockId::Hash(block), - &request.storage_key, - info, - &mut request.keys.iter().map(AsRef::as_ref) - ) { - Ok(proof) => proof, - Err(error) => { - log::trace!("remote read child request from {} ({} {} at {:?}) failed with: {}", - peer, - HexDisplay::from(&request.storage_key), - fmt_keys(request.keys.first(), request.keys.last()), - request.block, - error); - StorageProof::empty() - } - } - } else { + let prefixed_key = PrefixedStorageKey::new_ref(&request.storage_key); + let child_info = match ChildType::from_prefixed_key(prefixed_key) { + Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), + None => Err("Invalid child storage key".into()), + }; + let proof = match child_info.and_then(|child_info| self.chain.read_child_proof( + &BlockId::Hash(block), + &child_info, + &mut request.keys.iter().map(AsRef::as_ref) + )) { + Ok(proof) => proof, + Err(error) => { log::trace!("remote read child request from {} ({} {} at {:?}) failed with: {}", peer, HexDisplay::from(&request.storage_key), fmt_keys(request.keys.first(), request.keys.last()), request.block, - "invalid child info and type" - ); + error); StorageProof::empty() - }; + } + }; let response = { let r = api::v1::light::RemoteReadResponse { proof: proof.encode() }; @@ -704,23 +696,18 @@ where let min = Decode::decode(&mut request.min.as_ref())?; let max = Decode::decode(&mut request.max.as_ref())?; let key = StorageKey(request.key.clone()); - let storage_key = - if request.storage_key.is_empty() { - None - } else { - Some(StorageKey(request.storage_key.clone())) - }; + let storage_key = if request.storage_key.is_empty() { + None + } else { + Some(PrefixedStorageKey::new_ref(&request.storage_key)) + }; - let proof = match self.chain.key_changes_proof(first, last, min, max, storage_key.as_ref(), &key) { + let proof = match self.chain.key_changes_proof(first, last, min, max, storage_key, &key) { Ok(proof) => proof, Err(error) => { log::trace!("remote changes proof request from {} for key {} ({:?}..{:?}) failed with: {}", peer, - if let Some(sk) = storage_key { - format!("{} : {}", HexDisplay::from(&sk.0), HexDisplay::from(&key.0)) - } else { - HexDisplay::from(&key.0).to_string() - }, + format!("{} : {}", HexDisplay::from(&request.storage_key), HexDisplay::from(&key.0)), request.first, request.last, error); @@ -1092,9 +1079,7 @@ fn serialize_request(request: &Request) -> Result, prost::E Request::ReadChild { request, .. } => { let r = api::v1::light::RemoteReadChildRequest { block: request.block.encode(), - storage_key: request.storage_key.clone(), - child_type: request.child_type.clone(), - child_info: request.child_info.clone(), + storage_key: request.storage_key.clone().into_inner(), keys: request.keys.clone(), }; api::v1::light::request::Request::RemoteReadChildRequest(r) @@ -1113,7 +1098,8 @@ fn serialize_request(request: &Request) -> Result, prost::E last: request.last_block.1.encode(), min: request.tries_roots.1.encode(), max: request.max_block.1.encode(), - storage_key: request.storage_key.clone().unwrap_or_default(), + storage_key: request.storage_key.clone().map(|s| s.into_inner()) + .unwrap_or_default(), key: request.key.clone(), }; api::v1::light::request::Request::RemoteChangesRequest(r) @@ -1343,8 +1329,6 @@ mod tests { use super::{Event, LightClientHandler, Request, Response, OutboundProtocol, PeerStatus}; use void::Void; - const CHILD_INFO: ChildInfo<'static> = ChildInfo::new_default(b"foobarbaz"); - type Block = sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; type Handler = LightClientHandler; type Swarm = libp2p::swarm::Swarm; @@ -1894,15 +1878,13 @@ mod tests { #[test] fn receives_remote_read_child_response() { - let info = CHILD_INFO.info(); let mut chan = oneshot::channel(); + let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); let request = fetcher::RemoteReadChildRequest { header: dummy_header(), block: Default::default(), - storage_key: b":child_storage:sub".to_vec(), + storage_key: child_info.prefixed_storage_key(), keys: vec![b":key".to_vec()], - child_info: info.0.to_vec(), - child_type: info.1, retry_count: None, }; issue_request(Request::ReadChild { request, sender: chan.0 }); @@ -1997,15 +1979,13 @@ mod tests { #[test] fn send_receive_read_child() { - let info = CHILD_INFO.info(); let chan = oneshot::channel(); + let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); let request = fetcher::RemoteReadChildRequest { header: dummy_header(), block: Default::default(), - storage_key: b":child_storage:sub".to_vec(), + storage_key: child_info.prefixed_storage_key(), keys: vec![b":key".to_vec()], - child_info: info.0.to_vec(), - child_type: info.1, retry_count: None, }; send_receive(Request::ReadChild { request, sender: chan.0 }); diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index ae83b49e60f57..8638e9afc59b9 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -477,11 +477,6 @@ pub mod generic { pub block: H, /// Child Storage key. pub storage_key: Vec, - /// Child trie source information. - pub child_info: Vec, - /// Child type, its required to resolve `child_info` - /// content and choose child implementation. - pub child_type: u32, /// Storage key. pub keys: Vec>, } diff --git a/client/network/src/protocol/schema/light.v1.proto b/client/network/src/protocol/schema/light.v1.proto index 1c98d49730cf9..9b5d47719dc28 100644 --- a/client/network/src/protocol/schema/light.v1.proto +++ b/client/network/src/protocol/schema/light.v1.proto @@ -67,13 +67,9 @@ message RemoteReadResponse { message RemoteReadChildRequest { // Block at which to perform call. bytes block = 2; - // Child Storage key. + // Child Storage key, this is relative + // to the child type storage location. bytes storage_key = 3; - // Child trie source information. - bytes child_info = 4; - /// Child type, its required to resolve `child_info` - /// content and choose child implementation. - uint32 child_type = 5; // Storage keys. repeated bytes keys = 6; } diff --git a/client/rpc-api/src/child_state/mod.rs b/client/rpc-api/src/child_state/mod.rs new file mode 100644 index 0000000000000..a46269cad6c0c --- /dev/null +++ b/client/rpc-api/src/child_state/mod.rs @@ -0,0 +1,69 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Substrate state API. + +use jsonrpc_derive::rpc; +use sp_core::storage::{StorageKey, PrefixedStorageKey, StorageData}; +use crate::state::error::FutureResult; + +pub use self::gen_client::Client as ChildStateClient; + +/// Substrate child state API +/// +/// Note that all `PrefixedStorageKey` are desierialized +/// from json and not guaranted valid. +#[rpc] +pub trait ChildStateApi { + /// RPC Metadata + type Metadata; + + /// Returns the keys with prefix from a child storage, leave empty to get all the keys + #[rpc(name = "childstate_getKeys")] + fn storage_keys( + &self, + child_storage_key: PrefixedStorageKey, + prefix: StorageKey, + hash: Option + ) -> FutureResult>; + + /// Returns a child storage entry at a specific block's state. + #[rpc(name = "childstate_getStorage")] + fn storage( + &self, + child_storage_key: PrefixedStorageKey, + key: StorageKey, + hash: Option + ) -> FutureResult>; + + /// Returns the hash of a child storage entry at a block's state. + #[rpc(name = "childstate_getStorageHash")] + fn storage_hash( + &self, + child_storage_key: PrefixedStorageKey, + key: StorageKey, + hash: Option + ) -> FutureResult>; + + /// Returns the size of a child storage entry at a block's state. + #[rpc(name = "childstate_getStorageSize")] + fn storage_size( + &self, + child_storage_key: PrefixedStorageKey, + key: StorageKey, + hash: Option + ) -> FutureResult>; +} diff --git a/client/rpc-api/src/lib.rs b/client/rpc-api/src/lib.rs index 2d541be2a705a..f742d73b692c0 100644 --- a/client/rpc-api/src/lib.rs +++ b/client/rpc-api/src/lib.rs @@ -34,4 +34,5 @@ pub mod author; pub mod chain; pub mod offchain; pub mod state; +pub mod child_state; pub mod system; diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index d29e46a4b5637..243a33c18d482 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -72,50 +72,6 @@ pub trait StateApi { #[rpc(name = "state_getStorageSize", alias("state_getStorageSizeAt"))] fn storage_size(&self, key: StorageKey, hash: Option) -> FutureResult>; - /// Returns the keys with prefix from a child storage, leave empty to get all the keys - #[rpc(name = "state_getChildKeys")] - fn child_storage_keys( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - prefix: StorageKey, - hash: Option - ) -> FutureResult>; - - /// Returns a child storage entry at a specific block's state. - #[rpc(name = "state_getChildStorage")] - fn child_storage( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - hash: Option - ) -> FutureResult>; - - /// Returns the hash of a child storage entry at a block's state. - #[rpc(name = "state_getChildStorageHash")] - fn child_storage_hash( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - hash: Option - ) -> FutureResult>; - - /// Returns the size of a child storage entry at a block's state. - #[rpc(name = "state_getChildStorageSize")] - fn child_storage_size( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - hash: Option - ) -> FutureResult>; - /// Returns the runtime metadata as an opaque blob. #[rpc(name = "state_getMetadata")] fn metadata(&self, hash: Option) -> FutureResult; diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 2747405c04fca..e6962824333dc 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -28,7 +28,7 @@ use rpc::{Result as RpcResult, futures::{Future, future::result}}; use sc_rpc_api::Subscriptions; use sc_client::{light::{blockchain::RemoteBlockchain, fetcher::Fetcher}}; -use sp_core::{Bytes, storage::{StorageKey, StorageData, StorageChangeSet}}; +use sp_core::{Bytes, storage::{StorageKey, PrefixedStorageKey, StorageData, StorageChangeSet}}; use sp_version::RuntimeVersion; use sp_runtime::traits::Block as BlockT; @@ -37,6 +37,7 @@ use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt}; use self::error::{Error, FutureResult}; pub use sc_rpc_api::state::*; +pub use sc_rpc_api::child_state::*; use sc_client_api::{ExecutorProvider, StorageProvider, BlockchainEvents, Backend}; use sp_blockchain::{HeaderMetadata, HeaderBackend}; @@ -103,49 +104,6 @@ pub trait StateBackend: Send + Sync + 'static .map(|x| x.map(|x| x.0.len() as u64))) } - /// Returns the keys with prefix from a child storage, leave empty to get all the keys - fn child_storage_keys( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - prefix: StorageKey, - ) -> FutureResult>; - - /// Returns a child storage entry at a specific block's state. - fn child_storage( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult>; - - /// Returns the hash of a child storage entry at a block's state. - fn child_storage_hash( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult>; - - /// Returns the size of a child storage entry at a block's state. - fn child_storage_size( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult> { - Box::new(self.child_storage(block, child_storage_key, child_info, child_type, key) - .map(|x| x.map(|x| x.0.len() as u64))) - } - /// Returns the runtime metadata as an opaque blob. fn metadata(&self, block: Option) -> FutureResult; @@ -204,7 +162,7 @@ pub trait StateBackend: Send + Sync + 'static pub fn new_full( client: Arc, subscriptions: Subscriptions, -) -> State +) -> (State, ChildState) where Block: BlockT + 'static, BE: Backend + 'static, @@ -214,9 +172,11 @@ pub fn new_full( + ProvideRuntimeApi + Send + Sync + 'static, Client::Api: Metadata, { - State { - backend: Box::new(self::state_full::FullState::new(client, subscriptions)), - } + let child_backend = Box::new( + self::state_full::FullState::new(client.clone(), subscriptions.clone()) + ); + let backend = Box::new(self::state_full::FullState::new(client, subscriptions)); + (State { backend }, ChildState { backend: child_backend }) } /// Create new state API that works on light node. @@ -225,7 +185,7 @@ pub fn new_light>( subscriptions: Subscriptions, remote_blockchain: Arc>, fetcher: Arc, -) -> State +) -> (State, ChildState) where Block: BlockT + 'static, BE: Backend + 'static, @@ -235,14 +195,20 @@ pub fn new_light>( + Send + Sync + 'static, F: Send + Sync + 'static, { - State { - backend: Box::new(self::state_light::LightState::new( + let child_backend = Box::new(self::state_light::LightState::new( + client.clone(), + subscriptions.clone(), + remote_blockchain.clone(), + fetcher.clone(), + )); + + let backend = Box::new(self::state_light::LightState::new( client, subscriptions, remote_blockchain, fetcher, - )), - } + )); + (State { backend }, ChildState { backend: child_backend }) } /// State API with subscriptions support. @@ -307,50 +273,6 @@ impl StateApi for State self.backend.storage_size(block, key) } - fn child_storage( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - block: Option - ) -> FutureResult> { - self.backend.child_storage(block, child_storage_key, child_info, child_type, key) - } - - fn child_storage_keys( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key_prefix: StorageKey, - block: Option - ) -> FutureResult> { - self.backend.child_storage_keys(block, child_storage_key, child_info, child_type, key_prefix) - } - - fn child_storage_hash( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - block: Option - ) -> FutureResult> { - self.backend.child_storage_hash(block, child_storage_key, child_info, child_type, key) - } - - fn child_storage_size( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - block: Option - ) -> FutureResult> { - self.backend.child_storage_size(block, child_storage_key, child_info, child_type, key) - } - fn metadata(&self, block: Option) -> FutureResult { self.backend.metadata(block) } @@ -402,12 +324,98 @@ impl StateApi for State } } -fn client_err(err: sp_blockchain::Error) -> Error { - Error::Client(Box::new(err)) +/// Child state backend API. +pub trait ChildStateBackend: Send + Sync + 'static + where + Block: BlockT + 'static, + Client: Send + Sync + 'static, +{ + /// Returns the keys with prefix from a child storage, + /// leave prefix empty to get all the keys. + fn storage_keys( + &self, + block: Option, + storage_key: PrefixedStorageKey, + prefix: StorageKey, + ) -> FutureResult>; + + /// Returns a child storage entry at a specific block's state. + fn storage( + &self, + block: Option, + storage_key: PrefixedStorageKey, + key: StorageKey, + ) -> FutureResult>; + + /// Returns the hash of a child storage entry at a block's state. + fn storage_hash( + &self, + block: Option, + storage_key: PrefixedStorageKey, + key: StorageKey, + ) -> FutureResult>; + + /// Returns the size of a child storage entry at a block's state. + fn storage_size( + &self, + block: Option, + storage_key: PrefixedStorageKey, + key: StorageKey, + ) -> FutureResult> { + Box::new(self.storage(block, storage_key, key) + .map(|x| x.map(|x| x.0.len() as u64))) + } } -const CHILD_RESOLUTION_ERROR: &str = "Unexpected child info and type"; +/// Child state API with subscriptions support. +pub struct ChildState { + backend: Box>, +} + +impl ChildStateApi for ChildState + where + Block: BlockT + 'static, + Client: Send + Sync + 'static, +{ + type Metadata = crate::metadata::Metadata; + + fn storage( + &self, + storage_key: PrefixedStorageKey, + key: StorageKey, + block: Option + ) -> FutureResult> { + self.backend.storage(block, storage_key, key) + } + + fn storage_keys( + &self, + storage_key: PrefixedStorageKey, + key_prefix: StorageKey, + block: Option + ) -> FutureResult> { + self.backend.storage_keys(block, storage_key, key_prefix) + } -fn child_resolution_error() -> sp_blockchain::Error { - sp_blockchain::Error::Msg(CHILD_RESOLUTION_ERROR.to_string()) + fn storage_hash( + &self, + storage_key: PrefixedStorageKey, + key: StorageKey, + block: Option + ) -> FutureResult> { + self.backend.storage_hash(block, storage_key, key) + } + + fn storage_size( + &self, + storage_key: PrefixedStorageKey, + key: StorageKey, + block: Option + ) -> FutureResult> { + self.backend.storage_size(block, storage_key, key) + } +} + +fn client_err(err: sp_blockchain::Error) -> Error { + Error::Client(Box::new(err)) } diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index bf80820543102..a9767c34fcf8e 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -29,7 +29,8 @@ use sc_client_api::backend::Backend; use sp_blockchain::{Result as ClientResult, Error as ClientError, HeaderMetadata, CachedHeaderMetadata, HeaderBackend}; use sc_client::BlockchainEvents; use sp_core::{ - Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, ChildInfo}, + Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, + ChildInfo, ChildType, PrefixedStorageKey}, }; use sp_version::RuntimeVersion; use sp_runtime::{ @@ -38,7 +39,7 @@ use sp_runtime::{ use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt}; -use super::{StateBackend, error::{FutureResult, Error, Result}, client_err, child_resolution_error}; +use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error, Result}, client_err}; use std::marker::PhantomData; use sc_client_api::{CallExecutor, StorageProvider, ExecutorProvider}; @@ -308,66 +309,6 @@ impl StateBackend for FullState, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - prefix: StorageKey, - ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| self.client.child_storage_keys( - &BlockId::Hash(block), - &child_storage_key, - ChildInfo::resolve_child_info(child_type, &child_info.0[..]) - .ok_or_else(child_resolution_error)?, - &prefix, - )) - .map_err(client_err))) - } - - fn child_storage( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| self.client.child_storage( - &BlockId::Hash(block), - &child_storage_key, - ChildInfo::resolve_child_info(child_type, &child_info.0[..]) - .ok_or_else(child_resolution_error)?, - &key, - )) - .map_err(client_err))) - } - - fn child_storage_hash( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| self.client.child_storage_hash( - &BlockId::Hash(block), - &child_storage_key, - ChildInfo::resolve_child_info(child_type, &child_info.0[..]) - .ok_or_else(child_resolution_error)?, - &key, - )) - .map_err(client_err))) - } - fn metadata(&self, block: Option) -> FutureResult { Box::new(result( self.block_or_best(block) @@ -493,7 +434,7 @@ impl StateBackend for FullState StateBackend for FullState ChildStateBackend for FullState where + Block: BlockT + 'static, + BE: Backend + 'static, + Client: ExecutorProvider + StorageProvider + HeaderBackend + + HeaderMetadata + BlockchainEvents + + CallApiAt + ProvideRuntimeApi + + Send + Sync + 'static, + Client::Api: Metadata, +{ + fn storage_keys( + &self, + block: Option, + storage_key: PrefixedStorageKey, + prefix: StorageKey, + ) -> FutureResult> { + Box::new(result( + self.block_or_best(block) + .and_then(|block| { + let child_info = match ChildType::from_prefixed_key(&storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), + None => return Err("Invalid child storage key".into()), + }; + self.client.child_storage_keys( + &BlockId::Hash(block), + &child_info, + &prefix, + ) + }) + .map_err(client_err))) + } + + fn storage( + &self, + block: Option, + storage_key: PrefixedStorageKey, + key: StorageKey, + ) -> FutureResult> { + Box::new(result( + self.block_or_best(block) + .and_then(|block| { + let child_info = match ChildType::from_prefixed_key(&storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), + None => return Err("Invalid child storage key".into()), + }; + self.client.child_storage( + &BlockId::Hash(block), + &child_info, + &key, + ) + }) + .map_err(client_err))) + } + + fn storage_hash( + &self, + block: Option, + storage_key: PrefixedStorageKey, + key: StorageKey, + ) -> FutureResult> { + Box::new(result( + self.block_or_best(block) + .and_then(|block| { + let child_info = match ChildType::from_prefixed_key(&storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), + None => return Err("Invalid child storage key".into()), + }; + self.client.child_storage_hash( + &BlockId::Hash(block), + &child_info, + &key, + ) + }) + .map_err(client_err))) + } +} + /// Splits passed range into two subranges where: /// - first range has at least one element in it; /// - second range (optionally) starts at given `middle` element. diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index 092419ad0129e..27adbcd69191f 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -48,17 +48,19 @@ use sc_client::{ }, }; use sp_core::{ - Bytes, OpaqueMetadata, storage::{StorageKey, StorageData, StorageChangeSet}, + Bytes, OpaqueMetadata, + storage::{StorageKey, PrefixedStorageKey, StorageData, StorageChangeSet}, }; use sp_version::RuntimeVersion; use sp_runtime::{generic::BlockId, traits::{Block as BlockT, HashFor}}; -use super::{StateBackend, error::{FutureResult, Error}, client_err}; +use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error}, client_err}; /// Storage data map of storage keys => (optional) storage value. type StorageMap = HashMap>; /// State API backend for light nodes. +#[derive(Clone)] pub struct LightState, Client> { client: Arc, subscriptions: Subscriptions, @@ -233,69 +235,7 @@ impl StateBackend for LightState, key: StorageKey, ) -> FutureResult> { - Box::new(self - .storage(block, key) - .and_then(|maybe_storage| - result(Ok(maybe_storage.map(|storage| HashFor::::hash(&storage.0)))) - ) - ) - } - - fn child_storage_keys( - &self, - _block: Option, - _child_storage_key: StorageKey, - _child_info: StorageKey, - _child_type: u32, - _prefix: StorageKey, - ) -> FutureResult> { - Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) - } - - fn child_storage( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult> { - let block = self.block_or_best(block); - let fetcher = self.fetcher.clone(); - let child_storage = resolve_header(&*self.remote_blockchain, &*self.fetcher, block) - .then(move |result| match result { - Ok(header) => Either::Left(fetcher.remote_read_child(RemoteReadChildRequest { - block, - header, - storage_key: child_storage_key.0, - child_info: child_info.0, - child_type, - keys: vec![key.0.clone()], - retry_count: Default::default(), - }).then(move |result| ready(result - .map(|mut data| data - .remove(&key.0) - .expect("successful result has entry for all keys; qed") - .map(StorageData) - ) - .map_err(client_err) - ))), - Err(error) => Either::Right(ready(Err(error))), - }); - - Box::new(child_storage.boxed().compat()) - } - - fn child_storage_hash( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult> { - Box::new(self - .child_storage(block, child_storage_key, child_info, child_type, key) + Box::new(StateBackend::storage(self, block, key) .and_then(|maybe_storage| result(Ok(maybe_storage.map(|storage| HashFor::::hash(&storage.0)))) ) @@ -518,6 +458,65 @@ impl StateBackend for LightState ChildStateBackend for LightState + where + Block: BlockT, + Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, + F: Fetcher + 'static +{ + fn storage_keys( + &self, + _block: Option, + _storage_key: PrefixedStorageKey, + _prefix: StorageKey, + ) -> FutureResult> { + Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) + } + + fn storage( + &self, + block: Option, + storage_key: PrefixedStorageKey, + key: StorageKey, + ) -> FutureResult> { + let block = self.block_or_best(block); + let fetcher = self.fetcher.clone(); + let child_storage = resolve_header(&*self.remote_blockchain, &*self.fetcher, block) + .then(move |result| match result { + Ok(header) => Either::Left(fetcher.remote_read_child(RemoteReadChildRequest { + block, + header, + storage_key, + keys: vec![key.0.clone()], + retry_count: Default::default(), + }).then(move |result| ready(result + .map(|mut data| data + .remove(&key.0) + .expect("successful result has entry for all keys; qed") + .map(StorageData) + ) + .map_err(client_err) + ))), + Err(error) => Either::Right(ready(Err(error))), + }); + + Box::new(child_storage.boxed().compat()) + } + + fn storage_hash( + &self, + block: Option, + storage_key: PrefixedStorageKey, + key: StorageKey, + ) -> FutureResult> { + Box::new(ChildStateBackend::storage(self, block, storage_key, key) + .and_then(|maybe_storage| + result(Ok(maybe_storage.map(|storage| HashFor::::hash(&storage.0)))) + ) + ) + } +} + /// Resolve header by hash. fn resolve_header>( remote_blockchain: &dyn RemoteBlockchain, diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 0f2358a3ed1a1..f9c6982929b4d 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -21,7 +21,7 @@ use self::error::Error; use std::sync::Arc; use assert_matches::assert_matches; use futures01::stream::Stream; -use sp_core::{storage::{well_known_keys, ChildInfo}, ChangesTrieConfiguration}; +use sp_core::{storage::ChildInfo, ChangesTrieConfiguration}; use sp_core::hash::H256; use sc_block_builder::BlockBuilderProvider; use sp_io::hashing::blake2_256; @@ -32,26 +32,28 @@ use substrate_test_runtime_client::{ }; use sp_runtime::generic::BlockId; -const CHILD_INFO: ChildInfo<'static> = ChildInfo::new_default(b"unique_id"); +const STORAGE_KEY: &[u8] = b"child"; + +fn prefixed_storage_key() -> PrefixedStorageKey { + let child_info = ChildInfo::new_default(&STORAGE_KEY[..]); + child_info.prefixed_storage_key() +} #[test] fn should_return_storage() { const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; - const STORAGE_KEY: &[u8] = b":child_storage:default:child"; const CHILD_VALUE: &[u8] = b"hello world !"; + let child_info = ChildInfo::new_default(STORAGE_KEY); let mut core = tokio::runtime::Runtime::new().unwrap(); let client = TestClientBuilder::new() .add_extra_storage(KEY.to_vec(), VALUE.to_vec()) - .add_extra_child_storage(STORAGE_KEY.to_vec(), CHILD_INFO, KEY.to_vec(), CHILD_VALUE.to_vec()) + .add_extra_child_storage(&child_info, KEY.to_vec(), CHILD_VALUE.to_vec()) .build(); let genesis_hash = client.genesis_hash(); - let client = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); + let (client, child) = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); let key = StorageKey(KEY.to_vec()); - let storage_key = StorageKey(STORAGE_KEY.to_vec()); - let (child_info, child_type) = CHILD_INFO.info(); - let child_info = StorageKey(child_info.to_vec()); assert_eq!( client.storage(key.clone(), Some(genesis_hash).into()).wait() @@ -69,7 +71,7 @@ fn should_return_storage() { ); assert_eq!( core.block_on( - client.child_storage(storage_key, child_info, child_type, key, Some(genesis_hash).into()) + child.storage(prefixed_storage_key(), key, Some(genesis_hash).into()) .map(|x| x.map(|x| x.0.len())) ).unwrap().unwrap() as usize, CHILD_VALUE.len(), @@ -79,45 +81,36 @@ fn should_return_storage() { #[test] fn should_return_child_storage() { - let (child_info, child_type) = CHILD_INFO.info(); - let child_info = StorageKey(child_info.to_vec()); + let child_info = ChildInfo::new_default(STORAGE_KEY); let core = tokio::runtime::Runtime::new().unwrap(); let client = Arc::new(substrate_test_runtime_client::TestClientBuilder::new() - .add_child_storage("test", "key", CHILD_INFO, vec![42_u8]) + .add_child_storage(&child_info, "key", vec![42_u8]) .build()); let genesis_hash = client.genesis_hash(); - let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); - let child_key = StorageKey( - well_known_keys::CHILD_STORAGE_KEY_PREFIX.iter().chain(b"test").cloned().collect() - ); + let (_client, child) = new_full(client, Subscriptions::new(Arc::new(core.executor()))); + let child_key = prefixed_storage_key(); let key = StorageKey(b"key".to_vec()); assert_matches!( - client.child_storage( + child.storage( child_key.clone(), - child_info.clone(), - child_type, key.clone(), Some(genesis_hash).into(), ).wait(), Ok(Some(StorageData(ref d))) if d[0] == 42 && d.len() == 1 ); assert_matches!( - client.child_storage_hash( + child.storage_hash( child_key.clone(), - child_info.clone(), - child_type, key.clone(), Some(genesis_hash).into(), ).wait().map(|x| x.is_some()), Ok(true) ); assert_matches!( - client.child_storage_size( + child.storage_size( child_key.clone(), - child_info.clone(), - child_type, key.clone(), None, ).wait(), @@ -130,7 +123,7 @@ fn should_call_contract() { let core = tokio::runtime::Runtime::new().unwrap(); let client = Arc::new(substrate_test_runtime_client::new()); let genesis_hash = client.genesis_hash(); - let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); + let (client, _child) = new_full(client, Subscriptions::new(Arc::new(core.executor()))); assert_matches!( client.call("balanceOf".into(), Bytes(vec![1,2,3]), Some(genesis_hash).into()).wait(), @@ -146,7 +139,7 @@ fn should_notify_about_storage_changes() { { let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); + let (api, _child) = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); api.subscribe_storage(Default::default(), subscriber, None.into()); @@ -179,7 +172,7 @@ fn should_send_initial_storage_changes_and_notifications() { { let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); + let (api, _child) = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); let alice_balance_key = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())); @@ -215,7 +208,7 @@ fn should_send_initial_storage_changes_and_notifications() { fn should_query_storage() { fn run_tests(mut client: Arc, has_changes_trie_config: bool) { let core = tokio::runtime::Runtime::new().unwrap(); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); + let (api, _child) = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); let mut add_block = |nonce| { let mut builder = client.new_block(Default::default()).unwrap(); @@ -434,7 +427,7 @@ fn should_return_runtime_version() { let core = tokio::runtime::Runtime::new().unwrap(); let client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); + let (api, _child) = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",3],\ @@ -458,7 +451,7 @@ fn should_notify_on_runtime_version_initially() { { let client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); + let (api, _child) = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); api.subscribe_runtime_version(Default::default(), subscriber); diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 200707dad441a..1cdebe8b14bf3 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -1015,7 +1015,7 @@ ServiceBuilder< let subscriptions = sc_rpc::Subscriptions::new(Arc::new(tasks_builder.spawn_handle())); - let (chain, state) = if let (Some(remote_backend), Some(on_demand)) = + let (chain, state, child_state) = if let (Some(remote_backend), Some(on_demand)) = (remote_backend.as_ref(), on_demand.as_ref()) { // Light clients let chain = sc_rpc::chain::new_light( @@ -1024,19 +1024,19 @@ ServiceBuilder< remote_backend.clone(), on_demand.clone() ); - let state = sc_rpc::state::new_light( + let (state, child_state) = sc_rpc::state::new_light( client.clone(), subscriptions.clone(), remote_backend.clone(), on_demand.clone() ); - (chain, state) + (chain, state, child_state) } else { // Full nodes let chain = sc_rpc::chain::new_full(client.clone(), subscriptions.clone()); - let state = sc_rpc::state::new_full(client.clone(), subscriptions.clone()); - (chain, state) + let (state, child_state) = sc_rpc::state::new_full(client.clone(), subscriptions.clone()); + (chain, state, child_state) }; let author = sc_rpc::author::Author::new( @@ -1059,6 +1059,7 @@ ServiceBuilder< sc_rpc_server::rpc_handler(( state::StateApi::to_delegate(state), + state::ChildStateApi::to_delegate(child_state), chain::ChainApi::to_delegate(chain), maybe_offchain_rpc, author::AuthorApi::to_delegate(author), diff --git a/client/src/client.rs b/client/src/client.rs index a71d6bf964ef4..2a8040febf3ef 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -25,8 +25,8 @@ use parking_lot::{Mutex, RwLock}; use codec::{Encode, Decode}; use hash_db::Prefix; use sp_core::{ - ChangesTrieConfiguration, convert_hash, traits::CodeExecutor, - NativeOrEncoded, storage::{StorageKey, StorageData, well_known_keys, ChildInfo}, + ChangesTrieConfiguration, convert_hash, traits::CodeExecutor, NativeOrEncoded, + storage::{StorageKey, PrefixedStorageKey, StorageData, well_known_keys, ChildInfo}, }; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; use sp_runtime::{ @@ -344,7 +344,7 @@ impl Client where last: Block::Hash, min: Block::Hash, max: Block::Hash, - storage_key: Option<&StorageKey>, + storage_key: Option<&PrefixedStorageKey>, key: &StorageKey, cht_size: NumberFor, ) -> sp_blockchain::Result> { @@ -393,7 +393,7 @@ impl Client where fn with_cached_changed_keys( &self, root: &Block::Hash, - functor: &mut dyn FnMut(&HashMap>, HashSet>>), + functor: &mut dyn FnMut(&HashMap, HashSet>>), ) -> bool { self.storage.with_cached_changed_keys(root, functor) } @@ -438,7 +438,7 @@ impl Client where number: last_number, }, max_number, - storage_key.as_ref().map(|x| &x.0[..]), + storage_key, &key.0, ) .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; @@ -1109,12 +1109,11 @@ impl ProofProvider for Client where fn read_child_proof( &self, id: &BlockId, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, keys: &mut dyn Iterator, ) -> sp_blockchain::Result { self.state_at(id) - .and_then(|state| prove_child_read(state, storage_key, child_info, keys) + .and_then(|state| prove_child_read(state, child_info, keys) .map_err(Into::into)) } @@ -1156,7 +1155,7 @@ impl ProofProvider for Client where last: Block::Hash, min: Block::Hash, max: Block::Hash, - storage_key: Option<&StorageKey>, + storage_key: Option<&PrefixedStorageKey>, key: &StorageKey, ) -> sp_blockchain::Result> { self.key_changes_proof_with_cht_size( @@ -1286,46 +1285,40 @@ impl StorageProvider for Client wher ) } - fn child_storage_keys( &self, id: &BlockId, - child_storage_key: &StorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key_prefix: &StorageKey ) -> sp_blockchain::Result> { let keys = self.state_at(id)? - .child_keys(&child_storage_key.0, child_info, &key_prefix.0) + .child_keys(child_info, &key_prefix.0) .into_iter() .map(StorageKey) .collect(); Ok(keys) } - fn child_storage( &self, id: &BlockId, - storage_key: &StorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &StorageKey ) -> sp_blockchain::Result> { Ok(self.state_at(id)? - .child_storage(&storage_key.0, child_info, &key.0) + .child_storage(child_info, &key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? .map(StorageData)) } - fn child_storage_hash( &self, id: &BlockId, - storage_key: &StorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &StorageKey ) -> sp_blockchain::Result> { Ok(self.state_at(id)? - .child_storage_hash(&storage_key.0, child_info, &key.0) + .child_storage_hash(child_info, &key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? ) } @@ -1361,7 +1354,7 @@ impl StorageProvider for Client wher &self, first: NumberFor, last: BlockId, - storage_key: Option<&StorageKey>, + storage_key: Option<&PrefixedStorageKey>, key: &StorageKey ) -> sp_blockchain::Result, u32)>> { let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; @@ -1392,7 +1385,7 @@ impl StorageProvider for Client wher range_first, &range_anchor, best_number, - storage_key.as_ref().map(|x| &x.0[..]), + storage_key, &key.0) .and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::>()) .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; diff --git a/client/src/in_mem.rs b/client/src/in_mem.rs index 3672da1822df1..20b227e790f6e 100644 --- a/client/src/in_mem.rs +++ b/client/src/in_mem.rs @@ -516,9 +516,9 @@ impl backend::BlockImportOperation for BlockImportOperatio fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result { check_genesis_storage(&storage)?; - let child_delta = storage.children.into_iter() - .map(|(storage_key, child_content)| - (storage_key, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), child_content.child_info)); + let child_delta = storage.children_default.into_iter() + .map(|(_storage_key, child_content)| + (child_content.child_info, child_content.data.into_iter().map(|(k, v)| (k, Some(v))))); let (root, transaction) = self.old_state.full_storage_root( storage.top.into_iter().map(|(k, v)| (k, Some(v))), @@ -725,8 +725,9 @@ pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { return Err(sp_blockchain::Error::GenesisInvalid.into()); } - if storage.children.keys().any(|child_key| !well_known_keys::is_child_storage_key(&child_key)) { - return Err(sp_blockchain::Error::GenesisInvalid.into()); + if storage.children_default.keys() + .any(|child_key| !well_known_keys::is_child_storage_key(&child_key)) { + return Err(sp_blockchain::Error::GenesisInvalid.into()); } Ok(()) diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index 0b334d48b7a25..01e9854864062 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -24,7 +24,7 @@ use parking_lot::RwLock; use codec::{Decode, Encode}; use sp_core::ChangesTrieConfiguration; -use sp_core::storage::{well_known_keys, ChildInfo, OwnedChildInfo}; +use sp_core::storage::{well_known_keys, ChildInfo}; use sp_core::offchain::storage::InMemOffchainStorage; use sp_state_machine::{ Backend as StateBackend, TrieBackend, InMemoryBackend, ChangesTrieTransaction, @@ -312,17 +312,17 @@ impl BlockImportOperation for ImportOperation self.changes_trie_config_update = Some(changes_trie_config); // this is only called when genesis block is imported => shouldn't be performance bottleneck - let mut storage: HashMap, OwnedChildInfo)>, _> = HashMap::new(); + let mut storage: HashMap, _> = HashMap::new(); storage.insert(None, input.top); // create a list of children keys to re-compute roots for - let child_delta = input.children.iter() - .map(|(storage_key, storage_child)| (storage_key.clone(), None, storage_child.child_info.clone())) + let child_delta = input.children_default.iter() + .map(|(_storage_key, storage_child)| (storage_child.child_info.clone(), None)) .collect::>(); // make sure to persist the child storage - for (child_key, storage_child) in input.children { - storage.insert(Some((child_key, storage_child.child_info)), storage_child.data); + for (_child_key, storage_child) in input.children_default { + storage.insert(Some(storage_child.child_info), storage_child.data); } let storage_update = InMemoryBackend::from(storage); @@ -386,13 +386,12 @@ impl StateBackend for GenesisOrUnavailableState fn child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> ClientResult>> { match *self { GenesisOrUnavailableState::Genesis(ref state) => - Ok(state.child_storage(storage_key, child_info, key).expect(IN_MEMORY_EXPECT_PROOF)), + Ok(state.child_storage(child_info, key).expect(IN_MEMORY_EXPECT_PROOF)), GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), } } @@ -407,13 +406,12 @@ impl StateBackend for GenesisOrUnavailableState fn next_child_storage_key( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { match *self { GenesisOrUnavailableState::Genesis(ref state) => Ok( - state.next_child_storage_key(storage_key, child_info, key) + state.next_child_storage_key(child_info, key) .expect(IN_MEMORY_EXPECT_PROOF) ), GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), @@ -436,27 +434,25 @@ impl StateBackend for GenesisOrUnavailableState fn for_keys_in_child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, action: A, ) { match *self { GenesisOrUnavailableState::Genesis(ref state) => - state.for_keys_in_child_storage(storage_key, child_info, action), + state.for_keys_in_child_storage(child_info, action), GenesisOrUnavailableState::Unavailable => (), } } fn for_child_keys_with_prefix( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], action: A, ) { match *self { GenesisOrUnavailableState::Genesis(ref state) => - state.for_child_keys_with_prefix(storage_key, child_info, prefix, action), + state.for_child_keys_with_prefix(child_info, prefix, action), GenesisOrUnavailableState::Unavailable => (), } } @@ -474,8 +470,7 @@ impl StateBackend for GenesisOrUnavailableState fn child_storage_root( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -483,7 +478,7 @@ impl StateBackend for GenesisOrUnavailableState { match *self { GenesisOrUnavailableState::Genesis(ref state) => { - let (root, is_equal, _) = state.child_storage_root(storage_key, child_info, delta); + let (root, is_equal, _) = state.child_storage_root(child_info, delta); (root, is_equal, Default::default()) }, GenesisOrUnavailableState::Unavailable => diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index 0ae0e68e0c878..ef6a062cf3c07 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -23,6 +23,7 @@ use std::marker::PhantomData; use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; use codec::{Decode, Encode}; use sp_core::{convert_hash, traits::CodeExecutor}; +use sp_core::storage::{ChildInfo, ChildType}; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, Hash, HashFor, NumberFor, AtLeast32Bit, CheckedConversion, @@ -135,7 +136,7 @@ impl> LightDataChecker { number: request.last_block.0, }, remote_max_block, - request.storage_key.as_ref().map(Vec::as_slice), + request.storage_key.as_ref(), &request.key) .map_err(|err| ClientError::ChangesTrieAccessFailed(err))?; result.extend(result_range); @@ -242,10 +243,14 @@ impl FetchChecker for LightDataChecker request: &RemoteReadChildRequest, remote_proof: StorageProof, ) -> ClientResult, Option>>> { + let child_info = match ChildType::from_prefixed_key(&request.storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), + None => return Err("Invalid child type".into()), + }; read_child_proof_check::( convert_hash(request.header.state_root()), remote_proof, - &request.storage_key, + &child_info, request.keys.iter(), ).map_err(Into::into) } @@ -360,8 +365,6 @@ pub mod tests { use sc_client_api::{StorageProvider, ProofProvider}; use sc_block_builder::BlockBuilderProvider; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - type TestChecker = LightDataChecker< NativeExecutor, BlakeTwo256, @@ -411,11 +414,12 @@ pub mod tests { fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { use substrate_test_runtime_client::DefaultTestClientBuilderExt; use substrate_test_runtime_client::TestClientBuilderExt; + let child_info = ChildInfo::new_default(b"child1"); + let child_info = &child_info; // prepare remote client let remote_client = substrate_test_runtime_client::TestClientBuilder::new() .add_extra_child_storage( - b":child_storage:default:child1".to_vec(), - CHILD_INFO_1, + child_info, b"key1".to_vec(), b"value1".to_vec(), ).build(); @@ -428,15 +432,13 @@ pub mod tests { // 'fetch' child read proof from remote node let child_value = remote_client.child_storage( &remote_block_id, - &StorageKey(b":child_storage:default:child1".to_vec()), - CHILD_INFO_1, + child_info, &StorageKey(b"key1".to_vec()), ).unwrap().unwrap().0; assert_eq!(b"value1"[..], child_value[..]); let remote_read_proof = remote_client.read_child_proof( &remote_block_id, - b":child_storage:default:child1", - CHILD_INFO_1, + child_info, &mut std::iter::once("key1".as_bytes()), ).unwrap(); @@ -510,20 +512,18 @@ pub mod tests { #[test] fn storage_child_read_proof_is_generated_and_checked() { + let child_info = ChildInfo::new_default(&b"child1"[..]); let ( local_checker, remote_block_header, remote_read_proof, result, ) = prepare_for_read_child_proof_check(); - let child_infos = CHILD_INFO_1.info(); assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( &RemoteReadChildRequest::
{ block: remote_block_header.hash(), header: remote_block_header, - storage_key: b":child_storage:default:child1".to_vec(), - child_info: child_infos.0.to_vec(), - child_type: child_infos.1, + storage_key: child_info.prefixed_storage_key(), keys: vec![b"key1".to_vec()], retry_count: None, }, diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index 165581e67646d..aae853d2ff996 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -128,7 +128,7 @@ impl AccountDb for DirectAccountDb { trie_id: Option<&TrieId>, location: &StorageKey ) -> Option> { - trie_id.and_then(|id| child::get_raw(id, crate::trie_unique_id(&id[..]), &blake2_256(location))) + trie_id.and_then(|id| child::get_raw(&crate::child_trie_info(&id[..]), &blake2_256(location))) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { >::get(account).and_then(|i| i.as_alive().map(|i| i.code_hash)) @@ -167,13 +167,13 @@ impl AccountDb for DirectAccountDb { (false, Some(info), _) => info, // Existing contract is being removed. (true, Some(info), None) => { - child::kill_storage(&info.trie_id, info.child_trie_unique_id()); + child::kill_storage(&info.child_trie_info()); >::remove(&address); continue; } // Existing contract is being replaced by a new one. (true, Some(info), Some(code_hash)) => { - child::kill_storage(&info.trie_id, info.child_trie_unique_id()); + child::kill_storage(&info.child_trie_info()); AliveContractInfo:: { code_hash, storage_size: T::StorageSizeOffset::get(), @@ -212,17 +212,16 @@ impl AccountDb for DirectAccountDb { for (k, v) in changed.storage.into_iter() { if let Some(value) = child::get_raw( - &new_info.trie_id[..], - new_info.child_trie_unique_id(), + &new_info.child_trie_info(), &blake2_256(&k), ) { new_info.storage_size -= value.len() as u32; } if let Some(value) = v { new_info.storage_size += value.len() as u32; - child::put_raw(&new_info.trie_id[..], new_info.child_trie_unique_id(), &blake2_256(&k), &value[..]); + child::put_raw(&new_info.child_trie_info(), &blake2_256(&k), &value[..]); } else { - child::kill(&new_info.trie_id[..], new_info.child_trie_unique_id(), &blake2_256(&k)); + child::kill(&new_info.child_trie_info(), &blake2_256(&k)); } } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 91f06d5607c8b..00b23cad19915 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -125,12 +125,11 @@ use sp_runtime::{ use frame_support::dispatch::{DispatchResult, Dispatchable}; use frame_support::weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}; use frame_support::{ - Parameter, decl_module, decl_event, decl_storage, decl_error, storage::child, - parameter_types, IsSubType, + Parameter, decl_module, decl_event, decl_storage, decl_error, + parameter_types, IsSubType, storage::child::{self, ChildInfo}, }; use frame_support::traits::{OnUnbalanced, Currency, Get, Time, Randomness}; use frame_system::{self as system, ensure_signed, RawOrigin, ensure_root}; -use sp_core::storage::well_known_keys::CHILD_STORAGE_KEY_PREFIX; use pallet_contracts_primitives::{RentProjection, ContractAccessError}; pub type CodeHash = ::Hash; @@ -229,15 +228,14 @@ pub struct RawAliveContractInfo { impl RawAliveContractInfo { /// Associated child trie unique id is built from the hash part of the trie id. - pub fn child_trie_unique_id(&self) -> child::ChildInfo { - trie_unique_id(&self.trie_id[..]) + pub fn child_trie_info(&self) -> ChildInfo { + child_trie_info(&self.trie_id[..]) } } /// Associated child trie unique id is built from the hash part of the trie id. -pub(crate) fn trie_unique_id(trie_id: &[u8]) -> child::ChildInfo { - let start = CHILD_STORAGE_KEY_PREFIX.len() + b"default:".len(); - child::ChildInfo::new_default(&trie_id[start ..]) +pub(crate) fn child_trie_info(trie_id: &[u8]) -> ChildInfo { + ChildInfo::new_default(trie_id) } pub type TombstoneContractInfo = @@ -270,10 +268,6 @@ pub trait TrieIdGenerator { /// /// The implementation must ensure every new trie id is unique: two consecutive calls with the /// same parameter needs to return different trie id values. - /// - /// Also, the implementation is responsible for ensuring that `TrieId` starts with - /// `:child_storage:`. - /// TODO: We want to change this, see https://github.com/paritytech/substrate/issues/2325 fn trie_id(account_id: &AccountId) -> TrieId; } @@ -297,13 +291,7 @@ where let mut buf = Vec::new(); buf.extend_from_slice(account_id.as_ref()); buf.extend_from_slice(&new_seed.to_le_bytes()[..]); - - // TODO: see https://github.com/paritytech/substrate/issues/2325 - CHILD_STORAGE_KEY_PREFIX.iter() - .chain(b"default:") - .chain(T::Hashing::hash(&buf[..]).as_ref().iter()) - .cloned() - .collect() + T::Hashing::hash(&buf[..]).as_ref().into() } } @@ -824,13 +812,11 @@ impl Module { let key_values_taken = delta.iter() .filter_map(|key| { child::get_raw( - &origin_contract.trie_id, - origin_contract.child_trie_unique_id(), + &origin_contract.child_trie_info(), &blake2_256(key), ).map(|value| { child::kill( - &origin_contract.trie_id, - origin_contract.child_trie_unique_id(), + &origin_contract.child_trie_info(), &blake2_256(key), ); @@ -842,8 +828,8 @@ impl Module { let tombstone = >::new( // This operation is cheap enough because last_write (delta not included) // is not this block as it has been checked earlier. - &child::child_root( - &origin_contract.trie_id, + &child::root( + &origin_contract.child_trie_info(), )[..], code_hash, ); @@ -851,8 +837,7 @@ impl Module { if tombstone != dest_tombstone { for (key, value) in key_values_taken { child::put_raw( - &origin_contract.trie_id, - origin_contract.child_trie_unique_id(), + &origin_contract.child_trie_info(), &blake2_256(key), &value, ); diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index 8b6825419c81e..1aa52fff31435 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -223,8 +223,7 @@ fn enact_verdict( Verdict::Kill => { >::remove(account); child::kill_storage( - &alive_contract_info.trie_id, - alive_contract_info.child_trie_unique_id(), + &alive_contract_info.child_trie_info(), ); >::deposit_event(RawEvent::Evicted(account.clone(), false)); None @@ -235,7 +234,9 @@ fn enact_verdict( } // Note: this operation is heavy. - let child_storage_root = child::child_root(&alive_contract_info.trie_id); + let child_storage_root = child::root( + &alive_contract_info.child_trie_info(), + ); let tombstone = >::new( &child_storage_root[..], @@ -245,8 +246,7 @@ fn enact_verdict( >::insert(account, &tombstone_info); child::kill_storage( - &alive_contract_info.trie_id, - alive_contract_info.child_trie_unique_id(), + &alive_contract_info.child_trie_info(), ); >::deposit_event(RawEvent::Evicted(account.clone(), true)); diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 2bcd708904b41..f7170e9172eb4 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -200,10 +200,7 @@ impl TrieIdGenerator for DummyTrieIdGenerator { *v }); - // TODO: see https://github.com/paritytech/substrate/issues/2325 let mut res = vec![]; - res.extend_from_slice(well_known_keys::CHILD_STORAGE_KEY_PREFIX); - res.extend_from_slice(b"default:"); res.extend_from_slice(&new_seed.to_le_bytes()); res.extend_from_slice(&account_id.to_le_bytes()); res diff --git a/frame/support/src/storage/child.rs b/frame/support/src/storage/child.rs index d4d046a9d4245..658908d258a2f 100644 --- a/frame/support/src/storage/child.rs +++ b/frame/support/src/storage/child.rs @@ -16,100 +16,90 @@ //! Operation on runtime child storages. //! -//! This module is a currently only a variant of unhashed with additional `storage_key`. -//! Note that `storage_key` must be unique and strong (strong in the sense of being long enough to -//! avoid collision from a resistant hash function (which unique implies)). -//! -//! A **key collision free** unique id is required as parameter to avoid key collision -//! between child tries. -//! This unique id management and generation responsibility is delegated to pallet module. -// NOTE: could replace unhashed by having only one kind of storage (root being null storage key (storage_key can become Option<&[u8]>). +//! This module is a currently only a variant of unhashed with additional `child_info`. +// NOTE: could replace unhashed by having only one kind of storage (top trie being the child info +// of null length parent storage key). use crate::sp_std::prelude::*; use codec::{Codec, Encode, Decode}; -pub use sp_core::storage::ChildInfo; +pub use sp_core::storage::{ChildInfo, ChildType}; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get( - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { - let (data, child_type) = child_info.info(); - sp_io::storage::child_get( - storage_key, - data, - child_type, - key, - ).and_then(|v| { - Decode::decode(&mut &v[..]).map(Some).unwrap_or_else(|_| { - // TODO #3700: error should be handleable. - runtime_print!("ERROR: Corrupted state in child trie at {:?}/{:?}", storage_key, key); - None - }) - }) + match child_info.child_type() { + ChildType::ParentKeyId => { + let storage_key = child_info.storage_key(); + sp_io::default_child_storage::get( + storage_key, + key, + ).and_then(|v| { + Decode::decode(&mut &v[..]).map(Some).unwrap_or_else(|_| { + // TODO #3700: error should be handleable. + runtime_print!("ERROR: Corrupted state in child trie at {:?}/{:?}", storage_key, key); + None + }) + }) + }, + } } /// Return the value of the item in storage under `key`, or the type's default if there is no /// explicit entry. pub fn get_or_default( - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> T { - get(storage_key, child_info, key).unwrap_or_else(Default::default) + get(child_info, key).unwrap_or_else(Default::default) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. pub fn get_or( - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], default_value: T, ) -> T { - get(storage_key, child_info, key).unwrap_or(default_value) + get(child_info, key).unwrap_or(default_value) } /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. pub fn get_or_else T>( - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], default_value: F, ) -> T { - get(storage_key, child_info, key).unwrap_or_else(default_value) + get(child_info, key).unwrap_or_else(default_value) } /// Put `value` in storage under `key`. pub fn put( - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], value: &T, ) { - let (data, child_type) = child_info.info(); - value.using_encoded(|slice| - sp_io::storage::child_set( - storage_key, - data, - child_type, - key, - slice, - ) - ); + match child_info.child_type() { + ChildType::ParentKeyId => value.using_encoded(|slice| + sp_io::default_child_storage::set( + child_info.storage_key(), + key, + slice, + ) + ), + } } /// Remove `key` from storage, returning its value if it had an explicit entry or `None` otherwise. pub fn take( - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { - let r = get(storage_key, child_info, key); + let r = get(child_info, key); if r.is_some() { - kill(storage_key, child_info, key); + kill(child_info, key); } r } @@ -117,113 +107,106 @@ pub fn take( /// Remove `key` from storage, returning its value, or, if there was no explicit entry in storage, /// the default for its type. pub fn take_or_default( - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> T { - take(storage_key, child_info, key).unwrap_or_else(Default::default) + take(child_info, key).unwrap_or_else(Default::default) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or( - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], default_value: T, ) -> T { - take(storage_key, child_info, key).unwrap_or(default_value) + take(child_info, key).unwrap_or(default_value) } /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or_else T>( - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], default_value: F, ) -> T { - take(storage_key, child_info, key).unwrap_or_else(default_value) + take(child_info, key).unwrap_or_else(default_value) } /// Check to see if `key` has an explicit entry in storage. pub fn exists( - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> bool { - let (data, child_type) = child_info.info(); - sp_io::storage::child_read( - storage_key, data, child_type, - key, &mut [0;0][..], 0, - ).is_some() + match child_info.child_type() { + ChildType::ParentKeyId => sp_io::default_child_storage::read( + child_info.storage_key(), + key, &mut [0;0][..], 0, + ).is_some(), + } } /// Remove all `storage_key` key/values pub fn kill_storage( - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, ) { - let (data, child_type) = child_info.info(); - sp_io::storage::child_storage_kill( - storage_key, - data, - child_type, - ) + match child_info.child_type() { + ChildType::ParentKeyId => sp_io::default_child_storage::storage_kill( + child_info.storage_key(), + ), + } } /// Ensure `key` has no explicit entry in storage. pub fn kill( - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) { - let (data, child_type) = child_info.info(); - sp_io::storage::child_clear( - storage_key, - data, - child_type, - key, - ); + match child_info.child_type() { + ChildType::ParentKeyId => { + sp_io::default_child_storage::clear( + child_info.storage_key(), + key, + ); + }, + } } /// Get a Vec of bytes from storage. pub fn get_raw( - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { - let (data, child_type) = child_info.info(); - sp_io::storage::child_get( - storage_key, - data, - child_type, - key, - ) + match child_info.child_type() { + ChildType::ParentKeyId => sp_io::default_child_storage::get( + child_info.storage_key(), + key, + ), + } } /// Put a raw byte slice into storage. pub fn put_raw( - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], value: &[u8], ) { - let (data, child_type) = child_info.info(); - sp_io::storage::child_set( - storage_key, - data, - child_type, - key, - value, - ) + match child_info.child_type() { + ChildType::ParentKeyId => sp_io::default_child_storage::set( + child_info.storage_key(), + key, + value, + ), + } } /// Calculate current child root value. -pub fn child_root( - storage_key: &[u8], +pub fn root( + child_info: &ChildInfo, ) -> Vec { - sp_io::storage::child_root( - storage_key, - ) + match child_info.child_type() { + ChildType::ParentKeyId => sp_io::default_child_storage::root( + child_info.storage_key(), + ), + } } diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index 00b110ffb90e1..779460ce31338 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -301,7 +301,7 @@ fn new_test_ext() -> sp_io::TestExternalities { fn storage_instance_independence() { let mut storage = sp_core::storage::Storage { top: std::collections::BTreeMap::new(), - children: std::collections::HashMap::new() + children_default: std::collections::HashMap::new() }; sp_state_machine::BasicExternalities::execute_with_storage(&mut storage, || { module2::Value::::put(0); diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 50f2b089f2074..d1bc3c1a51eab 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -930,7 +930,7 @@ impl Module { >::hashed_key().to_vec() => T::BlockNumber::one().encode(), >::hashed_key().to_vec() => [69u8; 32].encode() ], - children: map![], + children_default: map![], }) } diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 2c0f50cd74312..96f17579a29f5 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -24,7 +24,7 @@ use std::any::{Any, TypeId}; -use sp_storage::{ChildStorageKey, ChildInfo}; +use sp_storage::ChildInfo; pub use scope_limited::{set_and_run_with_externalities, with_externalities}; pub use extensions::{Extension, Extensions, ExtensionStore}; @@ -62,8 +62,7 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn child_storage_hash( &self, - storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -72,8 +71,7 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn child_storage( &self, - storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -85,12 +83,11 @@ pub trait Externalities: ExtensionStore { /// Set child storage entry `key` of current contract being called (effective immediately). fn set_child_storage( &mut self, - storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: Vec, value: Vec, ) { - self.place_child_storage(storage_key, child_info, key, Some(value)) + self.place_child_storage(child_info, key, Some(value)) } /// Clear a storage entry (`key`) of current contract being called (effective immediately). @@ -101,11 +98,10 @@ pub trait Externalities: ExtensionStore { /// Clear a child storage entry (`key`) of current contract being called (effective immediately). fn clear_child_storage( &mut self, - storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) { - self.place_child_storage(storage_key, child_info, key.to_vec(), None) + self.place_child_storage(child_info, key.to_vec(), None) } /// Whether a storage entry exists. @@ -116,11 +112,10 @@ pub trait Externalities: ExtensionStore { /// Whether a child storage entry exists. fn exists_child_storage( &self, - storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> bool { - self.child_storage(storage_key, child_info, key).is_some() + self.child_storage(child_info, key).is_some() } /// Returns the key immediately following the given key, if it exists. @@ -129,13 +124,12 @@ pub trait Externalities: ExtensionStore { /// Returns the key immediately following the given key, if it exists, in child storage. fn next_child_storage_key( &self, - storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; /// Clear an entire child storage. - fn kill_child_storage(&mut self, storage_key: ChildStorageKey, child_info: ChildInfo); + fn kill_child_storage(&mut self, child_info: &ChildInfo); /// Clear storage entries which keys are start with the given prefix. fn clear_prefix(&mut self, prefix: &[u8]); @@ -143,8 +137,7 @@ pub trait Externalities: ExtensionStore { /// Clear child storage entries which keys are start with the given prefix. fn clear_child_prefix( &mut self, - storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ); @@ -154,8 +147,7 @@ pub trait Externalities: ExtensionStore { /// Set or clear a child storage entry. fn place_child_storage( &mut self, - storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: Vec, value: Option>, ); @@ -178,7 +170,7 @@ pub trait Externalities: ExtensionStore { /// storage map will be removed. fn child_storage_root( &mut self, - storage_key: ChildStorageKey, + child_info: &ChildInfo, ) -> Vec; /// Get the changes trie root of the current storage overlay at a block with given `parent`. diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index ee146dbc29224..b5827202333e2 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -37,7 +37,7 @@ use sp_core::{ traits::{KeystoreExt, CallInWasmExt, TaskExecutorExt}, offchain::{OffchainExt, TransactionPoolExt}, hexdisplay::HexDisplay, - storage::{ChildStorageKey, ChildInfo}, + storage::ChildInfo, }; use sp_core::{ @@ -74,19 +74,6 @@ pub enum EcdsaVerifyError { BadSignature, } -/// Returns a `ChildStorageKey` if the given `storage_key` slice is a valid storage -/// key or panics otherwise. -/// -/// Panicking here is aligned with what the `without_std` environment would do -/// in the case of an invalid child storage key. -#[cfg(feature = "std")] -fn child_storage_key_or_panic(storage_key: &[u8]) -> ChildStorageKey { - match ChildStorageKey::from_slice(storage_key) { - Some(storage_key) => storage_key, - None => panic!("child storage key is invalid"), - } -} - /// Interface for accessing the storage from within the runtime. #[runtime_interface] pub trait Storage { @@ -95,30 +82,6 @@ pub trait Storage { self.storage(key).map(|s| s.to_vec()) } - /// All Child api uses : - /// - A `child_storage_key` to define the anchor point for the child proof - /// (commonly the location where the child root is stored in its parent trie). - /// - A `child_storage_types` to identify the kind of the child type and how its - /// `child definition` parameter is encoded. - /// - A `child_definition_parameter` which is the additional information required - /// to use the child trie. For instance defaults child tries requires this to - /// contain a collision free unique id. - /// - /// This function specifically returns the data for `key` in the child storage or `None` - /// if the key can not be found. - fn child_get( - &self, - child_storage_key: &[u8], - child_definition: &[u8], - child_type: u32, - key: &[u8], - ) -> Option> { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.child_storage(storage_key, child_info, key).map(|s| s.to_vec()) - } - /// Get `key` from storage, placing the value into `value_out` and return the number of /// bytes that the entry in storage has beyond the offset or `None` if the storage entry /// doesn't exist at all. @@ -134,26 +97,87 @@ pub trait Storage { }) } + /// Set `key` to `value` in the storage. + fn set(&mut self, key: &[u8], value: &[u8]) { + self.set_storage(key.to_vec(), value.to_vec()); + } + + /// Clear the storage of the given `key` and its value. + fn clear(&mut self, key: &[u8]) { + self.clear_storage(key) + } + + /// Check whether the given `key` exists in storage. + fn exists(&self, key: &[u8]) -> bool { + self.exists_storage(key) + } + + /// Clear the storage of each key-value pair where the key starts with the given `prefix`. + fn clear_prefix(&mut self, prefix: &[u8]) { + Externalities::clear_prefix(*self, prefix) + } + + /// "Commit" all existing operations and compute the resulting storage root. + /// + /// The hashing algorithm is defined by the `Block`. + /// + /// Returns the SCALE encoded hash. + fn root(&mut self) -> Vec { + self.storage_root() + } + + /// "Commit" all existing operations and get the resulting storage change root. + /// `parent_hash` is a SCALE encoded hash. + /// + /// The hashing algorithm is defined by the `Block`. + /// + /// Returns an `Some(_)` which holds the SCALE encoded hash or `None` when + /// changes trie is disabled. + fn changes_root(&mut self, parent_hash: &[u8]) -> Option> { + self.storage_changes_root(parent_hash) + .expect("Invalid `parent_hash` given to `changes_root`.") + } + + /// Get the next key in storage after the given one in lexicographic order. + fn next_key(&mut self, key: &[u8]) -> Option> { + self.next_storage_key(&key) + } +} + +/// Interface for accessing the child storage for default child trie, +/// from within the runtime. +#[runtime_interface] +pub trait DefaultChildStorage { + + /// Get a default child storage value for a given key. + /// + /// Parameter `storage_key` is the unprefixed location of the root of the child trie in the parent trie. + /// Result is `None` if the value for `key` in the child storage can not be found. + fn get( + &self, + storage_key: &[u8], + key: &[u8], + ) -> Option> { + let child_info = ChildInfo::new_default(storage_key); + self.child_storage(&child_info, key).map(|s| s.to_vec()) + } + + /// Allocation efficient variant of `get`. + /// /// Get `key` from child storage, placing the value into `value_out` and return the number /// of bytes that the entry in storage has beyond the offset or `None` if the storage entry /// doesn't exist at all. /// If `value_out` length is smaller than the returned length, only `value_out` length bytes /// are copied into `value_out`. - /// - /// See `child_get` for common child api parameters. - fn child_read( + fn read( &self, - child_storage_key: &[u8], - child_definition: &[u8], - child_type: u32, + storage_key: &[u8], key: &[u8], value_out: &mut [u8], value_offset: u32, ) -> Option { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.child_storage(storage_key, child_info, key) + let child_info = ChildInfo::new_default(storage_key); + self.child_storage(&child_info, key) .map(|value| { let value_offset = value_offset as usize; let data = &value[value_offset.min(value.len())..]; @@ -163,159 +187,91 @@ pub trait Storage { }) } - /// Set `key` to `value` in the storage. - fn set(&mut self, key: &[u8], value: &[u8]) { - self.set_storage(key.to_vec(), value.to_vec()); - } - - /// Set `key` to `value` in the child storage denoted by `child_storage_key`. + /// Set a child storage value. /// - /// See `child_get` for common child api parameters. - fn child_set( + /// Set `key` to `value` in the child storage denoted by `storage_key`. + fn set( &mut self, - child_storage_key: &[u8], - child_definition: &[u8], - child_type: u32, + storage_key: &[u8], key: &[u8], value: &[u8], ) { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.set_child_storage(storage_key, child_info, key.to_vec(), value.to_vec()); + let child_info = ChildInfo::new_default(storage_key); + self.set_child_storage(&child_info, key.to_vec(), value.to_vec()); } - /// Clear the storage of the given `key` and its value. - fn clear(&mut self, key: &[u8]) { - self.clear_storage(key) - } - - /// Clear the given child storage of the given `key` and its value. + /// Clear a child storage key. /// - /// See `child_get` for common child api parameters. - fn child_clear( + /// For the default child storage at `storage_key`, clear value at `key`. + fn clear ( &mut self, - child_storage_key: &[u8], - child_definition: &[u8], - child_type: u32, + storage_key: &[u8], key: &[u8], ) { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.clear_child_storage(storage_key, child_info, key); + let child_info = ChildInfo::new_default(storage_key); + self.clear_child_storage(&child_info, key); } /// Clear an entire child storage. /// - /// See `child_get` for common child api parameters. - fn child_storage_kill( + /// If it exists, the child storage for `storage_key` + /// is removed. + fn storage_kill( &mut self, - child_storage_key: &[u8], - child_definition: &[u8], - child_type: u32, + storage_key: &[u8], ) { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.kill_child_storage(storage_key, child_info); - } - - /// Check whether the given `key` exists in storage. - fn exists(&self, key: &[u8]) -> bool { - self.exists_storage(key) + let child_info = ChildInfo::new_default(storage_key); + self.kill_child_storage(&child_info); } - /// Check whether the given `key` exists in storage. + /// Check a child storage key. /// - /// See `child_get` for common child api parameters. - fn child_exists( + /// Check whether the given `key` exists in default child defined at `storage_key`. + fn exists( &self, - child_storage_key: &[u8], - child_definition: &[u8], - child_type: u32, + storage_key: &[u8], key: &[u8], ) -> bool { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.exists_child_storage(storage_key, child_info, key) - } - - /// Clear the storage of each key-value pair where the key starts with the given `prefix`. - fn clear_prefix(&mut self, prefix: &[u8]) { - Externalities::clear_prefix(*self, prefix) + let child_info = ChildInfo::new_default(storage_key); + self.exists_child_storage(&child_info, key) } - /// Clear the child storage of each key-value pair where the key starts with the given `prefix`. + /// Clear child default key by prefix. /// - /// See `child_get` for common child api parameters. - fn child_clear_prefix( + /// Clear the child storage of each key-value pair where the key starts with the given `prefix`. + fn clear_prefix( &mut self, - child_storage_key: &[u8], - child_definition: &[u8], - child_type: u32, + storage_key: &[u8], prefix: &[u8], ) { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.clear_child_prefix(storage_key, child_info, prefix); + let child_info = ChildInfo::new_default(storage_key); + self.clear_child_prefix(&child_info, prefix); } - /// "Commit" all existing operations and compute the resulting storage root. + /// Default child root calculation. /// - /// The hashing algorithm is defined by the `Block`. - /// - /// Returns the SCALE encoded hash. - fn root(&mut self) -> Vec { - self.storage_root() - } - /// "Commit" all existing operations and compute the resulting child storage root. - /// /// The hashing algorithm is defined by the `Block`. /// /// Returns the SCALE encoded hash. - /// - /// See `child_get` for common child api parameters. - fn child_root( + fn root( &mut self, - child_storage_key: &[u8], + storage_key: &[u8], ) -> Vec { - let storage_key = child_storage_key_or_panic(child_storage_key); - self.child_storage_root(storage_key) + let child_info = ChildInfo::new_default(storage_key); + self.child_storage_root(&child_info) } - /// "Commit" all existing operations and get the resulting storage change root. - /// `parent_hash` is a SCALE encoded hash. + /// Child storage key iteration. /// - /// The hashing algorithm is defined by the `Block`. - /// - /// Returns an `Some(_)` which holds the SCALE encoded hash or `None` when - /// changes trie is disabled. - fn changes_root(&mut self, parent_hash: &[u8]) -> Option> { - self.storage_changes_root(parent_hash) - .expect("Invalid `parent_hash` given to `changes_root`.") - } - - /// Get the next key in storage after the given one in lexicographic order. - fn next_key(&mut self, key: &[u8]) -> Option> { - self.next_storage_key(&key) - } - /// Get the next key in storage after the given one in lexicographic order in child storage. - fn child_next_key( + fn next_key( &mut self, - child_storage_key: &[u8], - child_definition: &[u8], - child_type: u32, + storage_key: &[u8], key: &[u8], ) -> Option> { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.next_child_storage_key(storage_key, child_info, key) + let child_info = ChildInfo::new_default(storage_key); + self.next_child_storage_key(&child_info, key) } } @@ -1019,6 +975,7 @@ pub type TestExternalities = sp_state_machine::TestExternalities b"bar".to_vec()], - children: map![], + children_default: map![], }); t.execute_with(|| { @@ -1063,7 +1020,7 @@ mod tests { fn read_storage_works() { let mut t = BasicExternalities::new(Storage { top: map![b":test".to_vec() => b"\x0b\0\0\0Hello world".to_vec()], - children: map![], + children_default: map![], }); t.execute_with(|| { @@ -1085,7 +1042,7 @@ mod tests { b":abc".to_vec() => b"\x0b\0\0\0Hello world".to_vec(), b":abdd".to_vec() => b"\x0b\0\0\0Hello world".to_vec() ], - children: map![], + children_default: map![], }); t.execute_with(|| { diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index e69f892626bca..0f609b0050018 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -136,15 +136,15 @@ impl BuildStorage for sp_core::storage::Storage { storage: &mut sp_core::storage::Storage, )-> Result<(), String> { storage.top.extend(self.top.iter().map(|(k, v)| (k.clone(), v.clone()))); - for (k, other_map) in self.children.iter() { + for (k, other_map) in self.children_default.iter() { let k = k.clone(); - if let Some(map) = storage.children.get_mut(&k) { + if let Some(map) = storage.children_default.get_mut(&k) { map.data.extend(other_map.data.iter().map(|(k, v)| (k.clone(), v.clone()))); - if !map.child_info.try_update(other_map.child_info.as_ref()) { + if !map.child_info.try_update(&other_map.child_info) { return Err("Incompatible child info update".to_string()); } } else { - storage.children.insert(k, other_map.clone()); + storage.children_default.insert(k, other_map.clone()); } } Ok(()) diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 94144fdb90fda..df8f810ceb7ce 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -20,7 +20,7 @@ use log::warn; use hash_db::Hasher; use codec::{Decode, Encode}; -use sp_core::{traits::RuntimeCode, storage::{ChildInfo, OwnedChildInfo, well_known_keys}}; +use sp_core::{traits::RuntimeCode, storage::{ChildInfo, well_known_keys}}; use sp_trie::{TrieMut, MemoryDB, trie_types::TrieDBMut}; use crate::{ @@ -54,19 +54,17 @@ pub trait Backend: std::fmt::Debug { /// Get keyed child storage or None if there is nothing associated. fn child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error>; /// Get child keyed storage value hash or None if there is nothing associated. fn child_storage_hash( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - self.child_storage(storage_key, child_info, key).map(|v| v.map(|v| H::hash(&v))) + self.child_storage(child_info, key).map(|v| v.map(|v| H::hash(&v))) } /// true if a key exists in storage. @@ -77,11 +75,10 @@ pub trait Backend: std::fmt::Debug { /// true if a key exists in child storage. fn exists_child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result { - Ok(self.child_storage(storage_key, child_info, key)?.is_some()) + Ok(self.child_storage(child_info, key)?.is_some()) } /// Return the next key in storage in lexicographic order or `None` if there is no value. @@ -90,16 +87,14 @@ pub trait Backend: std::fmt::Debug { /// Return the next key in child storage in lexicographic order or `None` if there is no value. fn next_child_storage_key( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8] ) -> Result, Self::Error>; /// Retrieve all entries keys of child storage and call `f` for each of those keys. fn for_keys_in_child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ); @@ -118,8 +113,7 @@ pub trait Backend: std::fmt::Debug { /// call `f` for each of those keys. fn for_child_keys_with_prefix( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ); @@ -137,8 +131,7 @@ pub trait Backend: std::fmt::Debug { /// is true if child storage root equals default storage root. fn child_storage_root( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -158,12 +151,11 @@ pub trait Backend: std::fmt::Debug { /// Get all keys of child storage with given prefix fn child_keys( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec { let mut all = Vec::new(); - self.for_child_keys_with_prefix(storage_key, child_info, prefix, |k| all.push(k.to_vec())); + self.for_child_keys_with_prefix(child_info, prefix, |k| all.push(k.to_vec())); all } @@ -183,20 +175,21 @@ pub trait Backend: std::fmt::Debug { where I1: IntoIterator)>, I2i: IntoIterator)>, - I2: IntoIterator, + I2: IntoIterator, H::Out: Ord + Encode, { let mut txs: Self::Transaction = Default::default(); let mut child_roots: Vec<_> = Default::default(); // child first - for (storage_key, child_delta, child_info) in child_deltas { + for (child_info, child_delta) in child_deltas { let (child_root, empty, child_txs) = - self.child_storage_root(&storage_key[..], child_info.as_ref(), child_delta); + self.child_storage_root(&child_info, child_delta); + let prefixed_storage_key = child_info.prefixed_storage_key(); txs.consolidate(child_txs); if empty { - child_roots.push((storage_key, None)); + child_roots.push((prefixed_storage_key.into_inner(), None)); } else { - child_roots.push((storage_key, Some(child_root.encode()))); + child_roots.push((prefixed_storage_key.into_inner(), Some(child_root.encode()))); } } let (root, parent_txs) = self.storage_root( @@ -239,20 +232,18 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - (*self).child_storage(storage_key, child_info, key) + (*self).child_storage(child_info, key) } fn for_keys_in_child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { - (*self).for_keys_in_child_storage(storage_key, child_info, f) + (*self).for_keys_in_child_storage(child_info, f) } fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { @@ -261,11 +252,10 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn next_child_storage_key( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - (*self).next_child_storage_key(storage_key, child_info, key) + (*self).next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -274,12 +264,11 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn for_child_keys_with_prefix( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { - (*self).for_child_keys_with_prefix(storage_key, child_info, prefix, f) + (*self).for_child_keys_with_prefix(child_info, prefix, f) } fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) @@ -292,15 +281,14 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn child_storage_root( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where I: IntoIterator)>, H::Out: Ord, { - (*self).child_storage_root(storage_key, child_info, delta) + (*self).child_storage_root(child_info, delta) } fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { @@ -331,7 +319,7 @@ impl Consolidate for () { } impl Consolidate for Vec<( - Option<(StorageKey, OwnedChildInfo)>, + Option, StorageCollection, )> { fn consolidate(&mut self, mut other: Self) { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index b49913418a4a9..7f26085958e97 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -21,11 +21,11 @@ use std::{ }; use crate::{Backend, InMemoryBackend, StorageKey, StorageValue}; use hash_db::Hasher; -use sp_trie::{TrieConfiguration, default_child_trie_root}; +use sp_trie::{TrieConfiguration, empty_child_trie_root}; use sp_trie::trie_types::Layout; use sp_core::{ storage::{ - well_known_keys::is_child_storage_key, ChildStorageKey, Storage, + well_known_keys::is_child_storage_key, Storage, ChildInfo, StorageChild, }, traits::Externalities, Blake2Hasher, @@ -83,7 +83,7 @@ impl BasicExternalities { let mut ext = Self { inner: Storage { top: std::mem::replace(&mut storage.top, Default::default()), - children: std::mem::replace(&mut storage.children, Default::default()), + children_default: std::mem::replace(&mut storage.children_default, Default::default()), }, extensions: Default::default(), }; @@ -111,7 +111,7 @@ impl BasicExternalities { impl PartialEq for BasicExternalities { fn eq(&self, other: &BasicExternalities) -> bool { self.inner.top.eq(&other.inner.top) - && self.inner.children.eq(&other.inner.children) + && self.inner.children_default.eq(&other.inner.children_default) } } @@ -132,7 +132,7 @@ impl From> for BasicExternalities { BasicExternalities { inner: Storage { top: hashmap, - children: Default::default(), + children_default: Default::default(), }, extensions: Default::default(), } @@ -150,20 +150,19 @@ impl Externalities for BasicExternalities { fn child_storage( &self, - storage_key: ChildStorageKey, - _child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { - self.inner.children.get(storage_key.as_ref()).and_then(|child| child.data.get(key)).cloned() + self.inner.children_default.get(child_info.storage_key()) + .and_then(|child| child.data.get(key)).cloned() } fn child_storage_hash( &self, - storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { - self.child_storage(storage_key, child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) + self.child_storage(child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) } fn next_storage_key(&self, key: &[u8]) -> Option { @@ -173,12 +172,11 @@ impl Externalities for BasicExternalities { fn next_child_storage_key( &self, - storage_key: ChildStorageKey, - _child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let range = (Bound::Excluded(key), Bound::Unbounded); - self.inner.children.get(storage_key.as_ref()) + self.inner.children_default.get(child_info.storage_key()) .and_then(|child| child.data.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()) } @@ -196,12 +194,11 @@ impl Externalities for BasicExternalities { fn place_child_storage( &mut self, - storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: StorageKey, value: Option, ) { - let child_map = self.inner.children.entry(storage_key.into_owned()) + let child_map = self.inner.children_default.entry(child_info.storage_key().to_vec()) .or_insert_with(|| StorageChild { data: Default::default(), child_info: child_info.to_owned(), @@ -215,10 +212,9 @@ impl Externalities for BasicExternalities { fn kill_child_storage( &mut self, - storage_key: ChildStorageKey, - _child_info: ChildInfo, + child_info: &ChildInfo, ) { - self.inner.children.remove(storage_key.as_ref()); + self.inner.children_default.remove(child_info.storage_key()); } fn clear_prefix(&mut self, prefix: &[u8]) { @@ -243,11 +239,10 @@ impl Externalities for BasicExternalities { fn clear_child_prefix( &mut self, - storage_key: ChildStorageKey, - _child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) { - if let Some(child) = self.inner.children.get_mut(storage_key.as_ref()) { + if let Some(child) = self.inner.children_default.get_mut(child_info.storage_key()) { let to_remove = child.data.range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) .map(|(k, _)| k) .take_while(|k| k.starts_with(prefix)) @@ -264,20 +259,19 @@ impl Externalities for BasicExternalities { fn storage_root(&mut self) -> Vec { let mut top = self.inner.top.clone(); - let keys: Vec<_> = self.inner.children.keys().map(|k| k.to_vec()).collect(); + let prefixed_keys: Vec<_> = self.inner.children_default.iter().map(|(_k, v)| { + (v.child_info.prefixed_storage_key(), v.child_info.clone()) + }).collect(); // Single child trie implementation currently allows using the same child // empty root for all child trie. Using null storage key until multiple // type of child trie support. - let empty_hash = default_child_trie_root::>(&[]); - for storage_key in keys { - let child_root = self.child_storage_root( - ChildStorageKey::from_slice(storage_key.as_slice()) - .expect("Map only feed by valid keys; qed"), - ); + let empty_hash = empty_child_trie_root::>(); + for (prefixed_storage_key, child_info) in prefixed_keys { + let child_root = self.child_storage_root(&child_info); if &empty_hash[..] == &child_root[..] { - top.remove(storage_key.as_slice()); + top.remove(prefixed_storage_key.as_slice()); } else { - top.insert(storage_key, child_root); + top.insert(prefixed_storage_key.into_inner(), child_root); } } @@ -286,15 +280,15 @@ impl Externalities for BasicExternalities { fn child_storage_root( &mut self, - storage_key: ChildStorageKey, + child_info: &ChildInfo, ) -> Vec { - if let Some(child) = self.inner.children.get(storage_key.as_ref()) { + if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { let delta = child.data.clone().into_iter().map(|(k, v)| (k, Some(v))); InMemoryBackend::::default() - .child_storage_root(storage_key.as_ref(), child.child_info.as_ref(), delta).0 + .child_storage_root(&child.child_info, delta).0 } else { - default_child_trie_root::>(storage_key.as_ref()) + empty_child_trie_root::>() }.encode() } @@ -336,8 +330,6 @@ mod tests { use sp_core::storage::well_known_keys::CODE; use hex_literal::hex; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - #[test] fn commit_should_work() { let mut ext = BasicExternalities::default(); @@ -361,30 +353,28 @@ mod tests { #[test] fn children_works() { - let child_storage = b":child_storage:default:test".to_vec(); - + let child_info = ChildInfo::new_default(b"storage_key"); + let child_info = &child_info; let mut ext = BasicExternalities::new(Storage { top: Default::default(), - children: map![ - child_storage.clone() => StorageChild { - data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], - child_info: CHILD_INFO_1.to_owned(), + children_default: map![ + child_info.storage_key().to_vec() => StorageChild { + data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], + child_info: child_info.to_owned(), } ] }); - let child = || ChildStorageKey::from_vec(child_storage.clone()).unwrap(); - - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"doe"), Some(b"reindeer".to_vec())); + assert_eq!(ext.child_storage(child_info, b"doe"), Some(b"reindeer".to_vec())); - ext.set_child_storage(child(), CHILD_INFO_1, b"dog".to_vec(), b"puppy".to_vec()); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"dog"), Some(b"puppy".to_vec())); + ext.set_child_storage(child_info, b"dog".to_vec(), b"puppy".to_vec()); + assert_eq!(ext.child_storage(child_info, b"dog"), Some(b"puppy".to_vec())); - ext.clear_child_storage(child(), CHILD_INFO_1, b"dog"); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"dog"), None); + ext.clear_child_storage(child_info, b"dog"); + assert_eq!(ext.child_storage(child_info, b"dog"), None); - ext.kill_child_storage(child(), CHILD_INFO_1); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"doe"), None); + ext.kill_child_storage(child_info); + assert_eq!(ext.child_storage(child_info, b"doe"), None); } #[test] @@ -392,6 +382,6 @@ mod tests { // Make sure no values are set by default in `BasicExternalities`. let storage = BasicExternalities::new_empty().into_storages(); assert!(storage.top.is_empty()); - assert!(storage.children.is_empty()); + assert!(storage.children_default.is_empty()); } } diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index 39ad81ed59a0e..45535204e0884 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -32,6 +32,7 @@ use crate::{ input::{InputKey, InputPair, DigestIndex, ExtrinsicIndex, ChildIndex}, }, }; +use sp_core::storage::{ChildInfo, ChildType, PrefixedStorageKey}; /// Prepare input pairs for building a changes trie of given block. /// @@ -105,19 +106,19 @@ fn prepare_extrinsics_input<'a, B, H, Number>( Number: BlockNumber, { - let mut children_keys = BTreeSet::::new(); + let mut children_info = BTreeSet::::new(); let mut children_result = BTreeMap::new(); - for (storage_key, _) in changes.prospective.children.iter() - .chain(changes.committed.children.iter()) { - children_keys.insert(storage_key.clone()); + for (_storage_key, (_map, child_info)) in changes.prospective.children_default.iter() + .chain(changes.committed.children_default.iter()) { + children_info.insert(child_info.clone()); } - for storage_key in children_keys { + for child_info in children_info { let child_index = ChildIndex:: { block: block.clone(), - storage_key: storage_key.clone(), + storage_key: child_info.prefixed_storage_key(), }; - let iter = prepare_extrinsics_input_inner(backend, block, changes, Some(storage_key))?; + let iter = prepare_extrinsics_input_inner(backend, block, changes, Some(child_info))?; children_result.insert(child_index, iter); } @@ -130,22 +131,22 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( backend: &'a B, block: &Number, changes: &'a OverlayedChanges, - storage_key: Option, + child_info: Option, ) -> Result> + 'a, String> where B: Backend, H: Hasher, Number: BlockNumber, { - let (committed, prospective, child_info) = if let Some(sk) = storage_key.as_ref() { - let child_info = changes.child_info(sk).cloned(); - ( - changes.committed.children.get(sk).map(|c| &c.0), - changes.prospective.children.get(sk).map(|c| &c.0), - child_info, - ) + let (committed, prospective) = if let Some(child_info) = child_info.as_ref() { + match child_info.child_type() { + ChildType::ParentKeyId => ( + changes.committed.children_default.get(child_info.storage_key()).map(|c| &c.0), + changes.prospective.children_default.get(child_info.storage_key()).map(|c| &c.0), + ), + } } else { - (Some(&changes.committed.top), Some(&changes.prospective.top), None) + (Some(&changes.committed.top), Some(&changes.prospective.top)) }; committed.iter().flat_map(|c| c.iter()) .chain(prospective.iter().flat_map(|c| c.iter())) @@ -155,13 +156,11 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( Entry::Vacant(entry) => { // ignore temporary values (values that have null value at the end of operation // AND are not in storage at the beginning of operation - if let Some(sk) = storage_key.as_ref() { - if !changes.child_storage(sk, k).map(|v| v.is_some()).unwrap_or_default() { - if let Some(child_info) = child_info.as_ref() { - if !backend.exists_child_storage(sk, child_info.as_ref(), k) - .map_err(|e| format!("{}", e))? { - return Ok(map); - } + if let Some(child_info) = child_info.as_ref() { + if !changes.child_storage(child_info, k).map(|v| v.is_some()).unwrap_or_default() { + if !backend.exists_child_storage(&child_info, k) + .map_err(|e| format!("{}", e))? { + return Ok(map); } } } else { @@ -281,7 +280,7 @@ fn prepare_digest_input<'a, H, Number>( return Ok((map, child_map)); } - let mut children_roots = BTreeMap::::new(); + let mut children_roots = BTreeMap::::new(); { let trie_storage = TrieBackendEssence::<_, H>::new( crate::changes_trie::TrieBackendStorageAdapter(storage), @@ -344,22 +343,20 @@ mod test { use codec::Encode; use sp_core::Blake2Hasher; use sp_core::storage::well_known_keys::EXTRINSIC_INDEX; - use sp_core::storage::ChildInfo; use crate::InMemoryBackend; use crate::changes_trie::{RootsStorage, Configuration, storage::InMemoryStorage}; use crate::changes_trie::build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}; use crate::overlayed_changes::{OverlayedValue, OverlayedChangeSet}; use super::*; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); - fn prepare_for_build(zero: u64) -> ( InMemoryBackend, InMemoryStorage, OverlayedChanges, Configuration, ) { + let child_info_1 = ChildInfo::new_default(b"storage_key1"); + let child_info_2 = ChildInfo::new_default(b"storage_key2"); let backend: InMemoryBackend<_> = vec![ (vec![100], vec![255]), (vec![101], vec![255]), @@ -368,8 +365,9 @@ mod test { (vec![104], vec![255]), (vec![105], vec![255]), ].into_iter().collect::>().into(); - let child_trie_key1 = b"1".to_vec(); - let child_trie_key2 = b"2".to_vec(); + let prefixed_child_trie_key1 = child_info_1.prefixed_storage_key(); + let child_trie_key1 = child_info_1.storage_key().to_vec(); + let child_trie_key2 = child_info_2.storage_key().to_vec(); let storage = InMemoryStorage::with_inputs(vec![ (zero + 1, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![100] }, vec![1, 3]), @@ -403,7 +401,7 @@ mod test { ]), (zero + 9, Vec::new()), (zero + 10, Vec::new()), (zero + 11, Vec::new()), (zero + 12, Vec::new()), (zero + 13, Vec::new()), (zero + 14, Vec::new()), (zero + 15, Vec::new()), - ], vec![(child_trie_key1.clone(), vec![ + ], vec![(prefixed_child_trie_key1.clone(), vec![ (zero + 1, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![100] }, vec![1, 3]), InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![101] }, vec![0, 2]), @@ -430,19 +428,19 @@ mod test { extrinsics: Some(vec![0, 1].into_iter().collect()) }), ].into_iter().collect(), - children: vec![ + children_default: vec![ (child_trie_key1.clone(), (vec![ (vec![100], OverlayedValue { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) - ].into_iter().collect(), CHILD_INFO_1.to_owned())), + ].into_iter().collect(), child_info_1.to_owned())), (child_trie_key2, (vec![ (vec![100], OverlayedValue { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) - ].into_iter().collect(), CHILD_INFO_2.to_owned())), + ].into_iter().collect(), child_info_2.to_owned())), ].into_iter().collect() }, committed: OverlayedChangeSet { top: vec![ @@ -459,13 +457,13 @@ mod test { extrinsics: Some(vec![1].into_iter().collect()) }), ].into_iter().collect(), - children: vec![ + children_default: vec![ (child_trie_key1, (vec![ (vec![100], OverlayedValue { value: Some(vec![202]), extrinsics: Some(vec![3].into_iter().collect()) }) - ].into_iter().collect(), CHILD_INFO_1.to_owned())), + ].into_iter().collect(), child_info_1.to_owned())), ].into_iter().collect(), }, collect_extrinsics: true, @@ -487,6 +485,8 @@ mod test { #[test] fn build_changes_trie_nodes_on_non_digest_block() { fn test_with_zero(zero: u64) { + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); let (backend, storage, changes, config) = prepare_for_build(zero); let parent = AnchorBlockId { hash: Default::default(), number: zero + 4 }; let changes_trie_nodes = prepare_input( @@ -503,11 +503,11 @@ mod test { ]); assert_eq!(changes_trie_nodes.1.into_iter() .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 5u64, storage_key: b"1".to_vec() }, + (ChildIndex { block: zero + 5u64, storage_key: child_trie_key1 }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5u64, key: vec![100] }, vec![0, 2, 3]), ]), - (ChildIndex { block: zero + 5, storage_key: b"2".to_vec() }, + (ChildIndex { block: zero + 5, storage_key: child_trie_key2 }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5, key: vec![100] }, vec![0, 2]), ]), @@ -523,6 +523,8 @@ mod test { #[test] fn build_changes_trie_nodes_on_digest_block_l1() { fn test_with_zero(zero: u64) { + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); let (backend, storage, changes, config) = prepare_for_build(zero); let parent = AnchorBlockId { hash: Default::default(), number: zero + 3 }; let changes_trie_nodes = prepare_input( @@ -544,7 +546,7 @@ mod test { ]); assert_eq!(changes_trie_nodes.1.into_iter() .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 4u64, storage_key: b"1".to_vec() }, + (ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, vec![0, 2, 3]), @@ -553,7 +555,7 @@ mod test { InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1]), ]), - (ChildIndex { block: zero + 4, storage_key: b"2".to_vec() }, + (ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2]), ]), @@ -568,6 +570,8 @@ mod test { #[test] fn build_changes_trie_nodes_on_digest_block_l2() { fn test_with_zero(zero: u64) { + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); let (backend, storage, changes, config) = prepare_for_build(zero); let parent = AnchorBlockId { hash: Default::default(), number: zero + 15 }; let changes_trie_nodes = prepare_input( @@ -590,13 +594,13 @@ mod test { ]); assert_eq!(changes_trie_nodes.1.into_iter() .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 16u64, storage_key: b"1".to_vec() }, + (ChildIndex { block: zero + 16u64, storage_key: child_trie_key1.clone() }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16u64, key: vec![100] }, vec![0, 2, 3]), InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![102] }, vec![zero + 4]), ]), - (ChildIndex { block: zero + 16, storage_key: b"2".to_vec() }, + (ChildIndex { block: zero + 16, storage_key: child_trie_key2.clone() }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16, key: vec![100] }, vec![0, 2]), ]), @@ -657,6 +661,8 @@ mod test { #[test] fn build_changes_trie_nodes_ignores_temporary_storage_values() { fn test_with_zero(zero: u64) { + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); let (backend, storage, mut changes, config) = prepare_for_build(zero); // 110: missing from backend, set to None in overlay @@ -685,7 +691,7 @@ mod test { ]); assert_eq!(changes_trie_nodes.1.into_iter() .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 4u64, storage_key: b"1".to_vec() }, + (ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, vec![0, 2, 3]), @@ -694,7 +700,7 @@ mod test { InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1]), ]), - (ChildIndex { block: zero + 4, storage_key: b"2".to_vec() }, + (ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2]), ]), @@ -709,6 +715,8 @@ mod test { #[test] fn cache_is_used_when_changes_trie_is_built() { + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); let (backend, mut storage, changes, config) = prepare_for_build(0); let parent = AnchorBlockId { hash: Default::default(), number: 15 }; @@ -728,8 +736,8 @@ mod test { let cached_data4 = IncompleteCacheAction::CacheBuildData(IncompleteCachedBuildData::new()) .set_digest_input_blocks(vec![1, 2, 3]) .insert(None, vec![vec![100], vec![102]].into_iter().collect()) - .insert(Some(b"1".to_vec()), vec![vec![103], vec![104]].into_iter().collect()) - .insert(Some(b"2".to_vec()), vec![vec![105], vec![106]].into_iter().collect()) + .insert(Some(child_trie_key1.clone()), vec![vec![103], vec![104]].into_iter().collect()) + .insert(Some(child_trie_key2.clone()), vec![vec![105], vec![106]].into_iter().collect()) .complete(4, &trie_root4); storage.cache_mut().perform(cached_data4); @@ -755,7 +763,10 @@ mod test { .map(|(k, i)| (k, i.collect::>())) .collect::>(); assert_eq!( - child_changes_tries_nodes.get(&ChildIndex { block: 16u64, storage_key: b"1".to_vec() }).unwrap(), + child_changes_tries_nodes.get(&ChildIndex { + block: 16u64, + storage_key: child_trie_key1.clone(), + }).unwrap(), &vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16u64, key: vec![100] }, vec![0, 2, 3]), @@ -764,7 +775,7 @@ mod test { ], ); assert_eq!( - child_changes_tries_nodes.get(&ChildIndex { block: 16u64, storage_key: b"2".to_vec() }).unwrap(), + child_changes_tries_nodes.get(&ChildIndex { block: 16u64, storage_key: child_trie_key2.clone() }).unwrap(), &vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16u64, key: vec![100] }, vec![0, 2]), diff --git a/primitives/state-machine/src/changes_trie/build_cache.rs b/primitives/state-machine/src/changes_trie/build_cache.rs index 9d0dbb4c1f310..aebebf3a17f59 100644 --- a/primitives/state-machine/src/changes_trie/build_cache.rs +++ b/primitives/state-machine/src/changes_trie/build_cache.rs @@ -19,6 +19,7 @@ use std::collections::{HashMap, HashSet}; use crate::StorageKey; +use sp_core::storage::PrefixedStorageKey; /// Changes trie build cache. /// @@ -38,7 +39,7 @@ pub struct BuildCache { /// The `Option>` in inner `HashMap` stands for the child storage key. /// If it is `None`, then the `HashSet` contains keys changed in top-level storage. /// If it is `Some`, then the `HashSet` contains keys changed in child storage, identified by the key. - changed_keys: HashMap, HashSet>>, + changed_keys: HashMap, HashSet>>, } /// The action to perform when block-with-changes-trie is imported. @@ -56,7 +57,7 @@ pub struct CachedBuildData { block: N, trie_root: H, digest_input_blocks: Vec, - changed_keys: HashMap, HashSet>, + changed_keys: HashMap, HashSet>, } /// The action to perform when block-with-changes-trie is imported. @@ -72,7 +73,7 @@ pub(crate) enum IncompleteCacheAction { #[derive(Debug, PartialEq)] pub(crate) struct IncompleteCachedBuildData { digest_input_blocks: Vec, - changed_keys: HashMap, HashSet>, + changed_keys: HashMap, HashSet>, } impl BuildCache @@ -89,7 +90,7 @@ impl BuildCache } /// Get cached changed keys for changes trie with given root. - pub fn get(&self, root: &H) -> Option<&HashMap, HashSet>> { + pub fn get(&self, root: &H) -> Option<&HashMap, HashSet>> { self.changed_keys.get(&root) } @@ -98,7 +99,7 @@ impl BuildCache pub fn with_changed_keys( &self, root: &H, - functor: &mut dyn FnMut(&HashMap, HashSet>), + functor: &mut dyn FnMut(&HashMap, HashSet>), ) -> bool { match self.changed_keys.get(&root) { Some(changed_keys) => { @@ -164,7 +165,7 @@ impl IncompleteCacheAction { /// Insert changed keys of given storage into cached data. pub(crate) fn insert( self, - storage_key: Option, + storage_key: Option, changed_keys: HashSet, ) -> Self { match self { @@ -200,7 +201,7 @@ impl IncompleteCachedBuildData { fn insert( mut self, - storage_key: Option, + storage_key: Option, changed_keys: HashSet, ) -> Self { self.changed_keys.insert(storage_key, changed_keys); diff --git a/primitives/state-machine/src/changes_trie/changes_iterator.rs b/primitives/state-machine/src/changes_trie/changes_iterator.rs index 685786218c75f..f5a936069ba40 100644 --- a/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ b/primitives/state-machine/src/changes_trie/changes_iterator.rs @@ -22,6 +22,7 @@ use std::collections::VecDeque; use codec::{Decode, Encode, Codec}; use hash_db::Hasher; use num_traits::Zero; +use sp_core::storage::PrefixedStorageKey; use sp_trie::Recorder; use crate::changes_trie::{AnchorBlockId, ConfigurationRange, RootsStorage, Storage, BlockNumber}; use crate::changes_trie::input::{DigestIndex, ExtrinsicIndex, DigestIndexValue, ExtrinsicIndexValue}; @@ -40,7 +41,7 @@ pub fn key_changes<'a, H: Hasher, Number: BlockNumber>( begin: Number, end: &'a AnchorBlockId, max: Number, - storage_key: Option<&'a [u8]>, + storage_key: Option<&'a PrefixedStorageKey>, key: &'a [u8], ) -> Result, String> { // we can't query any roots before root @@ -79,7 +80,7 @@ pub fn key_changes_proof<'a, H: Hasher, Number: BlockNumber>( begin: Number, end: &AnchorBlockId, max: Number, - storage_key: Option<&[u8]>, + storage_key: Option<&PrefixedStorageKey>, key: &[u8], ) -> Result>, String> where H::Out: Codec { // we can't query any roots before root @@ -127,7 +128,7 @@ pub fn key_changes_proof_check<'a, H: Hasher, Number: BlockNumber>( begin: Number, end: &AnchorBlockId, max: Number, - storage_key: Option<&[u8]>, + storage_key: Option<&PrefixedStorageKey>, key: &[u8] ) -> Result, String> where H::Out: Encode { key_changes_proof_check_with_db( @@ -150,7 +151,7 @@ pub fn key_changes_proof_check_with_db<'a, H: Hasher, Number: BlockNumber>( begin: Number, end: &AnchorBlockId, max: Number, - storage_key: Option<&[u8]>, + storage_key: Option<&PrefixedStorageKey>, key: &[u8] ) -> Result, String> where H::Out: Encode { // we can't query any roots before root @@ -188,7 +189,7 @@ pub struct DrilldownIteratorEssence<'a, H, Number> Number: BlockNumber, H::Out: 'a, { - storage_key: Option<&'a [u8]>, + storage_key: Option<&'a PrefixedStorageKey>, key: &'a [u8], roots_storage: &'a dyn RootsStorage, storage: &'a dyn Storage, @@ -238,7 +239,7 @@ impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> let trie_root = if let Some(storage_key) = self.storage_key { let child_key = ChildIndex { block: block.clone(), - storage_key: storage_key.to_vec(), + storage_key: storage_key.clone(), }.encode(); if let Some(trie_root) = trie_reader(self.storage, trie_root, &child_key)? .and_then(|v| >::decode(&mut &v[..]).ok()) @@ -382,6 +383,11 @@ mod tests { use sp_runtime::traits::BlakeTwo256; use super::*; + fn child_key() -> PrefixedStorageKey { + let child_info = sp_core::storage::ChildInfo::new_default(&b"1"[..]); + child_info.prefixed_storage_key() + } + fn prepare_for_drilldown() -> (Configuration, InMemoryStorage) { let config = Configuration { digest_interval: 4, digest_levels: 2 }; let backend = InMemoryStorage::with_inputs(vec![ @@ -418,7 +424,7 @@ mod tests { (16, vec![ InputPair::DigestIndex(DigestIndex { block: 16, key: vec![42] }, vec![4, 8]), ]), - ], vec![(b"1".to_vec(), vec![ + ], vec![(child_key(), vec![ (1, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 1, key: vec![42] }, vec![0]), ]), @@ -535,7 +541,7 @@ mod tests { 1, &AnchorBlockId { hash: Default::default(), number: 100 }, 1000, - Some(&b"1"[..]), + Some(&child_key()), &[42], ).and_then(|i| i.collect::, _>>()).is_err()); } @@ -577,7 +583,7 @@ mod tests { let (remote_config, remote_storage) = prepare_for_drilldown(); let remote_proof_child = key_changes_proof::( configuration_range(&remote_config, 0), &remote_storage, 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&b"1"[..]), &[42]).unwrap(); + &AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&child_key()), &[42]).unwrap(); // happens on local light node: @@ -592,7 +598,7 @@ mod tests { local_storage.clear_storage(); let local_result_child = key_changes_proof_check::( configuration_range(&local_config, 0), &local_storage, remote_proof_child, 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&b"1"[..]), &[42]); + &AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&child_key()), &[42]); // check that drilldown result is the same as if it was happening at the full node assert_eq!(local_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); diff --git a/primitives/state-machine/src/changes_trie/input.rs b/primitives/state-machine/src/changes_trie/input.rs index 4a1420f8486f9..4f0f3da40c52b 100644 --- a/primitives/state-machine/src/changes_trie/input.rs +++ b/primitives/state-machine/src/changes_trie/input.rs @@ -21,6 +21,7 @@ use crate::{ StorageKey, StorageValue, changes_trie::BlockNumber }; +use sp_core::storage::PrefixedStorageKey; /// Key of { changed key => set of extrinsic indices } mapping. #[derive(Clone, Debug, PartialEq, Eq)] @@ -49,7 +50,7 @@ pub struct ChildIndex { /// Block at which this key has been inserted in the trie. pub block: Number, /// Storage key this node is responsible for. - pub storage_key: StorageKey, + pub storage_key: PrefixedStorageKey, } /// Value of { changed key => block/digest block numbers } mapping. @@ -178,7 +179,7 @@ impl Decode for InputKey { })), 3 => Ok(InputKey::ChildIndex(ChildIndex { block: Decode::decode(input)?, - storage_key: Decode::decode(input)?, + storage_key: PrefixedStorageKey::new(Decode::decode(input)?), })), _ => Err("Invalid input key variant".into()), } diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index d614992df3033..ee6c6778e0aad 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -71,6 +71,7 @@ use hash_db::{Hasher, Prefix}; use num_traits::{One, Zero}; use codec::{Decode, Encode}; use sp_core; +use sp_core::storage::PrefixedStorageKey; use sp_trie::{MemoryDB, DBValue, TrieMut}; use sp_trie::trie_types::TrieDBMut; use crate::{ @@ -156,7 +157,7 @@ pub trait Storage: RootsStorage { fn with_cached_changed_keys( &self, root: &H::Out, - functor: &mut dyn FnMut(&HashMap, HashSet>), + functor: &mut dyn FnMut(&HashMap, HashSet>), ) -> bool; /// Get a trie node. fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs index 87923dc2f593c..05555df305b7c 100644 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ b/primitives/state-machine/src/changes_trie/prune.rs @@ -137,7 +137,8 @@ mod tests { #[test] fn prune_works() { fn prepare_storage() -> InMemoryStorage { - let child_key = ChildIndex { block: 67u64, storage_key: b"1".to_vec() }.encode(); + let child_info = sp_core::storage::ChildInfo::new_default(&b"1"[..]); + let child_key = ChildIndex { block: 67u64, storage_key: child_info.prefixed_storage_key() }.encode(); let mut mdb1 = MemoryDB::::default(); let root1 = insert_into_memory_db::( &mut mdb1, vec![(vec![10], vec![20])]).unwrap(); diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 7fb418672872b..81651dd2e719b 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -18,6 +18,7 @@ use std::collections::{BTreeMap, HashSet, HashMap}; use hash_db::{Hasher, Prefix, EMPTY_PREFIX}; +use sp_core::storage::PrefixedStorageKey; use sp_trie::DBValue; use sp_trie::MemoryDB; use parking_lot::RwLock; @@ -96,7 +97,7 @@ impl InMemoryStorage { #[cfg(test)] pub fn with_inputs( mut top_inputs: Vec<(Number, Vec>)>, - children_inputs: Vec<(StorageKey, Vec<(Number, Vec>)>)>, + children_inputs: Vec<(PrefixedStorageKey, Vec<(Number, Vec>)>)>, ) -> Self { let mut mdb = MemoryDB::default(); let mut roots = BTreeMap::new(); @@ -182,7 +183,7 @@ impl Storage for InMemoryStorage, HashSet>), + functor: &mut dyn FnMut(&HashMap, HashSet>), ) -> bool { self.cache.with_changed_keys(root, functor) } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 3a6b5442906d2..399bfc69d864f 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -24,10 +24,10 @@ use crate::{ use hash_db::Hasher; use sp_core::{ - storage::{ChildStorageKey, well_known_keys::is_child_storage_key, ChildInfo}, + storage::{well_known_keys::is_child_storage_key, ChildInfo}, traits::Externalities, hexdisplay::HexDisplay, }; -use sp_trie::{trie_types::Layout, default_child_trie_root}; +use sp_trie::{trie_types::Layout, empty_child_trie_root}; use sp_externalities::{Extensions, Extension}; use codec::{Decode, Encode}; @@ -181,22 +181,21 @@ where fn child_storage( &self, - storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay - .child_storage(storage_key.as_ref(), key) + .child_storage(child_info, key) .map(|x| x.map(|x| x.to_vec())) .unwrap_or_else(|| - self.backend.child_storage(storage_key.as_ref(), child_info, key) + self.backend.child_storage(child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL) ); trace!(target: "state-trace", "{:04x}: GetChild({}) {}={:?}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&child_info.storage_key()), HexDisplay::from(&key), result.as_ref().map(HexDisplay::from) ); @@ -206,22 +205,21 @@ where fn child_storage_hash( &self, - storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay - .child_storage(storage_key.as_ref(), key) + .child_storage(child_info, key) .map(|x| x.map(|x| H::hash(x))) .unwrap_or_else(|| - self.backend.child_storage_hash(storage_key.as_ref(), child_info, key) + self.backend.child_storage_hash(child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL) ); trace!(target: "state-trace", "{:04x}: ChildHash({}) {}={:?}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&child_info.storage_key()), HexDisplay::from(&key), result, ); @@ -247,22 +245,21 @@ where fn exists_child_storage( &self, - storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> bool { let _guard = sp_panic_handler::AbortGuard::force_abort(); - let result = match self.overlay.child_storage(storage_key.as_ref(), key) { + let result = match self.overlay.child_storage(child_info, key) { Some(x) => x.is_some(), _ => self.backend - .exists_child_storage(storage_key.as_ref(), child_info, key) + .exists_child_storage(child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL), }; trace!(target: "state-trace", "{:04x}: ChildExists({}) {}={:?}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&child_info.storage_key()), HexDisplay::from(&key), result, ); @@ -286,15 +283,14 @@ where fn next_child_storage_key( &self, - storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let next_backend_key = self.backend - .next_child_storage_key(storage_key.as_ref(), child_info, key) + .next_child_storage_key(child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL); let next_overlay_key_change = self.overlay.next_child_storage_key_change( - storage_key.as_ref(), + child_info.storage_key(), key ); @@ -305,7 +301,6 @@ where Some(overlay_key.0.to_vec()) } else { self.next_child_storage_key( - storage_key, child_info, &overlay_key.0[..], ) @@ -331,38 +326,36 @@ where fn place_child_storage( &mut self, - storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: StorageKey, value: Option, ) { trace!(target: "state-trace", "{:04x}: PutChild({}) {}={:?}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&child_info.storage_key()), HexDisplay::from(&key), value.as_ref().map(HexDisplay::from) ); let _guard = sp_panic_handler::AbortGuard::force_abort(); self.mark_dirty(); - self.overlay.set_child_storage(storage_key.into_owned(), child_info, key, value); + self.overlay.set_child_storage(child_info, key, value); } fn kill_child_storage( &mut self, - storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, ) { trace!(target: "state-trace", "{:04x}: KillChild({})", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&child_info.storage_key()), ); let _guard = sp_panic_handler::AbortGuard::force_abort(); self.mark_dirty(); - self.overlay.clear_child_storage(storage_key.as_ref(), child_info); - self.backend.for_keys_in_child_storage(storage_key.as_ref(), child_info, |key| { - self.overlay.set_child_storage(storage_key.as_ref().to_vec(), child_info, key.to_vec(), None); + self.overlay.clear_child_storage(child_info); + self.backend.for_keys_in_child_storage(child_info, |key| { + self.overlay.set_child_storage(child_info, key.to_vec(), None); }); } @@ -386,21 +379,20 @@ where fn clear_child_prefix( &mut self, - storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) { trace!(target: "state-trace", "{:04x}: ClearChildPrefix({}) {}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&child_info.storage_key()), HexDisplay::from(&prefix), ); let _guard = sp_panic_handler::AbortGuard::force_abort(); self.mark_dirty(); - self.overlay.clear_child_prefix(storage_key.as_ref(), child_info, prefix); - self.backend.for_child_keys_with_prefix(storage_key.as_ref(), child_info, prefix, |key| { - self.overlay.set_child_storage(storage_key.as_ref().to_vec(), child_info, key.to_vec(), None); + self.overlay.clear_child_prefix(child_info, prefix); + self.backend.for_child_keys_with_prefix(child_info, prefix, |key| { + self.overlay.set_child_storage(child_info, key.to_vec(), None); }); } @@ -425,37 +417,38 @@ where fn child_storage_root( &mut self, - storage_key: ChildStorageKey, + child_info: &ChildInfo, ) -> Vec { let _guard = sp_panic_handler::AbortGuard::force_abort(); + let storage_key = child_info.storage_key(); + let prefixed_storage_key = child_info.prefixed_storage_key(); if self.storage_transaction_cache.transaction_storage_root.is_some() { let root = self - .storage(storage_key.as_ref()) + .storage(prefixed_storage_key.as_slice()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) .unwrap_or( - default_child_trie_root::>(storage_key.as_ref()) + empty_child_trie_root::>() ); trace!(target: "state-trace", "{:04x}: ChildRoot({}) (cached) {}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&storage_key), HexDisplay::from(&root.as_ref()), ); root.encode() } else { - let storage_key = storage_key.as_ref(); - if let Some(child_info) = self.overlay.child_info(storage_key).cloned() { + if let Some(child_info) = self.overlay.default_child_info(storage_key).cloned() { let (root, is_empty, _) = { - let delta = self.overlay.committed.children.get(storage_key) + let delta = self.overlay.committed.children_default.get(storage_key) .into_iter() .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) .chain( - self.overlay.prospective.children.get(storage_key) + self.overlay.prospective.children_default.get(storage_key) .into_iter() .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) ); - self.backend.child_storage_root(storage_key, child_info.as_ref(), delta) + self.backend.child_storage_root(&child_info, delta) }; let root = root.encode(); @@ -465,9 +458,9 @@ where // A better design would be to manage 'child_storage_transaction' in a // similar way as 'storage_transaction' but for each child trie. if is_empty { - self.overlay.set_storage(storage_key.into(), None); + self.overlay.set_storage(prefixed_storage_key.into_inner(), None); } else { - self.overlay.set_storage(storage_key.into(), Some(root.clone())); + self.overlay.set_storage(prefixed_storage_key.into_inner(), Some(root.clone())); } trace!(target: "state-trace", "{:04x}: ChildRoot({}) {}", @@ -479,10 +472,10 @@ where } else { // empty overlay let root = self - .storage(storage_key.as_ref()) + .storage(prefixed_storage_key.as_slice()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) .unwrap_or( - default_child_trie_root::>(storage_key.as_ref()) + empty_child_trie_root::>() ); trace!(target: "state-trace", "{:04x}: ChildRoot({}) (no change) {}", self.id, @@ -591,11 +584,6 @@ mod tests { type TestBackend = InMemoryBackend; type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend>; - const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1"; - - const CHILD_UUID_1: &[u8] = b"unique_id_1"; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); - fn prepare_overlay_with_changes() -> OverlayedChanges { OverlayedChanges { prospective: vec![ @@ -680,7 +668,7 @@ mod tests { vec![20] => vec![20], vec![40] => vec![40] ], - children: map![] + children_default: map![] }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -707,26 +695,23 @@ mod tests { #[test] fn next_child_storage_key_works() { - const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1"; - - const CHILD_UUID_1: &[u8] = b"unique_id_1"; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); + let child_info = ChildInfo::new_default(b"Child1"); + let child_info = &child_info; let mut cache = StorageTransactionCache::default(); - let child = || ChildStorageKey::from_slice(CHILD_KEY_1).unwrap(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![20], None); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![30], Some(vec![31])); + overlay.set_child_storage(child_info, vec![20], None); + overlay.set_child_storage(child_info, vec![30], Some(vec![31])); let backend = Storage { top: map![], - children: map![ - child().as_ref().to_vec() => StorageChild { + children_default: map![ + child_info.storage_key().to_vec() => StorageChild { data: map![ vec![10] => vec![10], vec![20] => vec![20], vec![40] => vec![40] ], - child_info: CHILD_INFO_1.to_owned(), + child_info: child_info.to_owned(), } ], }.into(); @@ -735,65 +720,65 @@ mod tests { let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_backend < next_overlay - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[5]), Some(vec![10])); + assert_eq!(ext.next_child_storage_key(child_info, &[5]), Some(vec![10])); // next_backend == next_overlay but next_overlay is a delete - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[10]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(child_info, &[10]), Some(vec![30])); // next_overlay < next_backend - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[20]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(child_info, &[20]), Some(vec![30])); // next_backend exist but next_overlay doesn't exist - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[30]), Some(vec![40])); + assert_eq!(ext.next_child_storage_key(child_info, &[30]), Some(vec![40])); drop(ext); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![50], Some(vec![50])); + overlay.set_child_storage(child_info, vec![50], Some(vec![50])); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_overlay exist but next_backend doesn't exist - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[40]), Some(vec![50])); + assert_eq!(ext.next_child_storage_key(child_info, &[40]), Some(vec![50])); } #[test] fn child_storage_works() { + let child_info = ChildInfo::new_default(b"Child1"); + let child_info = &child_info; let mut cache = StorageTransactionCache::default(); - let child = || ChildStorageKey::from_slice(CHILD_KEY_1).unwrap(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![20], None); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![30], Some(vec![31])); + overlay.set_child_storage(child_info, vec![20], None); + overlay.set_child_storage(child_info, vec![30], Some(vec![31])); let backend = Storage { top: map![], - children: map![ - child().as_ref().to_vec() => StorageChild { + children_default: map![ + child_info.storage_key().to_vec() => StorageChild { data: map![ vec![10] => vec![10], vec![20] => vec![20], vec![30] => vec![40] ], - child_info: CHILD_INFO_1.to_owned(), + child_info: child_info.to_owned(), } ], }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[10]), Some(vec![10])); + assert_eq!(ext.child_storage(child_info, &[10]), Some(vec![10])); assert_eq!( - ext.child_storage_hash(child(), CHILD_INFO_1, &[10]), + ext.child_storage_hash(child_info, &[10]), Some(Blake2Hasher::hash(&[10]).as_ref().to_vec()), ); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[20]), None); + assert_eq!(ext.child_storage(child_info, &[20]), None); assert_eq!( - ext.child_storage_hash(child(), CHILD_INFO_1, &[20]), + ext.child_storage_hash(child_info, &[20]), None, ); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[30]), Some(vec![31])); + assert_eq!(ext.child_storage(child_info, &[30]), Some(vec![31])); assert_eq!( - ext.child_storage_hash(child(), CHILD_INFO_1, &[30]), + ext.child_storage_hash(child_info, &[30]), Some(Blake2Hasher::hash(&[31]).as_ref().to_vec()), ); - } } diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index ecd4532cf2640..83126abbf78e0 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -25,10 +25,10 @@ use crate::{ use std::{error, fmt, collections::{BTreeMap, HashMap}, marker::PhantomData, ops}; use hash_db::Hasher; use sp_trie::{ - MemoryDB, child_trie_root, default_child_trie_root, TrieConfiguration, trie_types::Layout, + MemoryDB, child_trie_root, empty_child_trie_root, TrieConfiguration, trie_types::Layout, }; use codec::Codec; -use sp_core::storage::{ChildInfo, OwnedChildInfo, Storage}; +use sp_core::storage::{ChildInfo, ChildType, Storage}; /// Error impossible. // FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121 @@ -48,7 +48,7 @@ impl error::Error for Void { /// In-memory backend. Fully recomputes tries each time `as_trie_backend` is called but useful for /// tests and proof checking. pub struct InMemory { - inner: HashMap, BTreeMap>, + inner: HashMap, BTreeMap>, // This field is only needed for returning reference in `as_trie_backend`. trie: Option, H>>, _hasher: PhantomData, @@ -89,7 +89,7 @@ impl PartialEq for InMemory { impl InMemory { /// Copy the state, with applied updates pub fn update< - T: IntoIterator, StorageCollection)> + T: IntoIterator, StorageCollection)> >( &self, changes: T, @@ -108,10 +108,10 @@ impl InMemory { } } -impl From, BTreeMap>> +impl From, BTreeMap>> for InMemory { - fn from(inner: HashMap, BTreeMap>) -> Self { + fn from(inner: HashMap, BTreeMap>) -> Self { InMemory { inner, trie: None, @@ -122,8 +122,8 @@ impl From, BTreeMap From for InMemory { fn from(inners: Storage) -> Self { - let mut inner: HashMap, BTreeMap> - = inners.children.into_iter().map(|(k, c)| (Some((k, c.child_info)), c.data)).collect(); + let mut inner: HashMap, BTreeMap> + = inners.children_default.into_iter().map(|(_k, c)| (Some(c.child_info), c.data)).collect(); inner.insert(None, inners.top); InMemory { inner, @@ -145,12 +145,12 @@ impl From> for InMemory { } } -impl From, StorageCollection)>> +impl From, StorageCollection)>> for InMemory { fn from( - inner: Vec<(Option<(StorageKey, OwnedChildInfo)>, StorageCollection)>, + inner: Vec<(Option, StorageCollection)>, ) -> Self { - let mut expanded: HashMap, BTreeMap> + let mut expanded: HashMap, BTreeMap> = HashMap::new(); for (child_info, key_values) in inner { let entry = expanded.entry(child_info).or_default(); @@ -165,18 +165,16 @@ impl From, StorageCollectio } impl InMemory { - /// child storage key iterator - pub fn child_storage_keys(&self) -> impl Iterator { - self.inner.iter().filter_map(|item| - item.0.as_ref().map(|v|(&v.0[..], v.1.as_ref())) - ) + /// Child storage infos iterator. + pub fn child_storage_infos(&self) -> impl Iterator { + self.inner.iter().filter_map(|item| item.0.as_ref()) } } impl Backend for InMemory where H::Out: Codec { type Error = Void; type Transaction = Vec<( - Option<(StorageKey, OwnedChildInfo)>, + Option, StorageCollection, )>; type TrieBackendStorage = MemoryDB; @@ -187,11 +185,10 @@ impl Backend for InMemory where H::Out: Codec { fn child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - Ok(self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) + Ok(self.inner.get(&Some(child_info.to_owned())) .and_then(|map| map.get(key).map(Clone::clone))) } @@ -211,22 +208,20 @@ impl Backend for InMemory where H::Out: Codec { fn for_keys_in_child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, mut f: F, ) { - self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) + self.inner.get(&Some(child_info.to_owned())) .map(|map| map.keys().for_each(|k| f(&k))); } fn for_child_keys_with_prefix( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { - self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) + self.inner.get(&Some(child_info.to_owned())) .map(|map| map.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f)); } @@ -253,16 +248,15 @@ impl Backend for InMemory where H::Out: Codec { fn child_storage_root( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where I: IntoIterator, Option>)>, H::Out: Ord { - let storage_key = storage_key.to_vec(); - let child_info = Some((storage_key.clone(), child_info.to_owned())); + let child_type = child_info.child_type(); + let child_info = Some(child_info.to_owned()); let existing_pairs = self.inner.get(&child_info) .into_iter() @@ -270,7 +264,6 @@ impl Backend for InMemory where H::Out: Codec { let transaction: Vec<_> = delta.into_iter().collect(); let root = child_trie_root::, _, _, _>( - &storage_key, existing_pairs.chain(transaction.iter().cloned()) .collect::>() .into_iter() @@ -279,7 +272,9 @@ impl Backend for InMemory where H::Out: Codec { let full_transaction = transaction.into_iter().collect(); - let is_default = root == default_child_trie_root::>(&storage_key); + let is_default = match child_type { + ChildType::ParentKeyId => root == empty_child_trie_root::>(), + }; (root, is_default, vec![(child_info, full_transaction)]) } @@ -294,12 +289,11 @@ impl Backend for InMemory where H::Out: Codec { fn next_child_storage_key( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); - let next_key = self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) + let next_key = self.inner.get(&Some(child_info.to_owned())) .and_then(|map| map.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()); Ok(next_key) @@ -321,11 +315,10 @@ impl Backend for InMemory where H::Out: Codec { fn child_keys( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec { - self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) + self.inner.get(&Some(child_info.to_owned())) .into_iter() .flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned()) .collect() @@ -336,11 +329,10 @@ impl Backend for InMemory where H::Out: Codec { let mut new_child_roots = Vec::new(); let mut root_map = None; for (child_info, map) in &self.inner { - if let Some((storage_key, _child_info)) = child_info.as_ref() { - // no need to use child_info at this point because we use a MemoryDB for - // proof (with PrefixedMemoryDB it would be needed). + if let Some(child_info) = child_info.as_ref() { + let prefix_storage_key = child_info.prefixed_storage_key(); let ch = insert_into_memory_db::(&mut mdb, map.clone().into_iter())?; - new_child_roots.push((storage_key.clone(), ch.as_ref().into())); + new_child_roots.push((prefix_storage_key.into_inner(), ch.as_ref().into())); } else { root_map = Some(map); } @@ -379,16 +371,18 @@ mod tests { #[test] fn in_memory_with_child_trie_only() { let storage = InMemory::::default(); - let child_info = OwnedChildInfo::new_default(b"unique_id_1".to_vec()); + let child_info = ChildInfo::new_default(b"1"); + let child_info = &child_info; let mut storage = storage.update( vec![( - Some((b"1".to_vec(), child_info.clone())), + Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))] )] ); let trie_backend = storage.as_trie_backend().unwrap(); - assert_eq!(trie_backend.child_storage(b"1", child_info.as_ref(), b"2").unwrap(), + assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(), Some(b"3".to_vec())); - assert!(trie_backend.storage(b"1").unwrap().is_some()); + let storage_key = child_info.prefixed_storage_key(); + assert!(trie_backend.storage(storage_key.as_slice()).unwrap().is_some()); } } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 9a2dc52cca20a..1c0007c5f9108 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -606,8 +606,7 @@ where /// Generate child storage read proof. pub fn prove_child_read( mut backend: B, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, keys: I, ) -> Result> where @@ -619,7 +618,7 @@ where { let trie_backend = backend.as_trie_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_child_read_on_trie_backend(trie_backend, storage_key, child_info, keys) + prove_child_read_on_trie_backend(trie_backend, child_info, keys) } /// Generate storage read proof on pre-created trie backend. @@ -646,8 +645,7 @@ where /// Generate storage read proof on pre-created trie backend. pub fn prove_child_read_on_trie_backend( trie_backend: &TrieBackend, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, keys: I, ) -> Result> where @@ -660,7 +658,7 @@ where let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); for key in keys.into_iter() { proving_backend - .child_storage(storage_key, child_info.clone(), key.as_ref()) + .child_storage(child_info, key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } Ok(proving_backend.extract_proof()) @@ -691,7 +689,7 @@ where pub fn read_child_proof_check( root: H::Out, proof: StorageProof, - storage_key: &[u8], + child_info: &ChildInfo, keys: I, ) -> Result, Option>>, Box> where @@ -705,7 +703,7 @@ where for key in keys.into_iter() { let value = read_child_proof_check_on_proving_backend( &proving_backend, - storage_key, + child_info, key.as_ref(), )?; result.insert(key.as_ref().to_vec(), value); @@ -728,15 +726,14 @@ where /// Check child storage read proof on pre-created proving backend. pub fn read_child_proof_check_on_proving_backend( proving_backend: &TrieBackend, H>, - storage_key: &[u8], + child_info: &ChildInfo, key: &[u8], ) -> Result>, Box> where H: Hasher, H::Out: Ord + Codec, { - // Not a prefixed memory db, using empty unique id and include root resolution. - proving_backend.child_storage(storage_key, ChildInfo::new_default(&[]), key) + proving_backend.child_storage(child_info, key) .map_err(|e| Box::new(e) as Box) } @@ -748,7 +745,7 @@ mod tests { use super::*; use super::ext::Ext; use super::changes_trie::Configuration as ChangesTrieConfig; - use sp_core::{map, traits::{Externalities, RuntimeCode}, storage::ChildStorageKey}; + use sp_core::{map, traits::{Externalities, RuntimeCode}}; use sp_runtime::traits::BlakeTwo256; #[derive(Clone)] @@ -759,8 +756,6 @@ mod tests { fallback_succeeds: bool, } - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - impl CodeExecutor for DummyCodeExecutor { type Error = u8; @@ -1003,6 +998,8 @@ mod tests { #[test] fn set_child_storage_works() { + let child_info = ChildInfo::new_default(b"sub1"); + let child_info = &child_info; let mut state = InMemoryBackend::::default(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -1016,27 +1013,23 @@ mod tests { ); ext.set_child_storage( - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - CHILD_INFO_1, + child_info, b"abc".to_vec(), b"def".to_vec() ); assert_eq!( ext.child_storage( - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - CHILD_INFO_1, + child_info, b"abc" ), Some(b"def".to_vec()) ); ext.kill_child_storage( - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - CHILD_INFO_1, + child_info, ); assert_eq!( ext.child_storage( - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - CHILD_INFO_1, + child_info, b"abc" ), None @@ -1045,6 +1038,8 @@ mod tests { #[test] fn prove_read_and_proof_check_works() { + let child_info = ChildInfo::new_default(b"sub1"); + let child_info = &child_info; // fetch read proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; @@ -1071,20 +1066,19 @@ mod tests { let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_child_read( remote_backend, - b":child_storage:default:sub1", - CHILD_INFO_1, + child_info, &[b"value3"], ).unwrap(); let local_result1 = read_child_proof_check::( remote_root, remote_proof.clone(), - b":child_storage:default:sub1", + child_info, &[b"value3"], ).unwrap(); let local_result2 = read_child_proof_check::( remote_root, remote_proof.clone(), - b":child_storage:default:sub1", + child_info, &[b"value2"], ).unwrap(); assert_eq!( @@ -1099,13 +1093,13 @@ mod tests { #[test] fn child_storage_uuid() { - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); + + let child_info_1 = ChildInfo::new_default(b"sub_test1"); + let child_info_2 = ChildInfo::new_default(b"sub_test2"); + use crate::trie_backend::tests::test_trie; let mut overlay = OverlayedChanges::default(); - let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub_test1").unwrap(); - let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub_test2").unwrap(); let mut transaction = { let backend = test_trie(); let mut cache = StorageTransactionCache::default(); @@ -1116,8 +1110,8 @@ mod tests { changes_trie::disabled_state::<_, u64>(), None, ); - ext.set_child_storage(subtrie1, CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec()); - ext.set_child_storage(subtrie2, CHILD_INFO_2, b"abc".to_vec(), b"def".to_vec()); + ext.set_child_storage(&child_info_1, b"abc".to_vec(), b"def".to_vec()); + ext.set_child_storage(&child_info_2, b"abc".to_vec(), b"def".to_vec()); ext.storage_root(); cache.transaction.unwrap() }; diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index ab50c61391bdb..f57d13ee3ffec 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -29,7 +29,7 @@ use crate::{ use std::iter::FromIterator; use std::collections::{HashMap, BTreeMap, BTreeSet}; use codec::{Decode, Encode}; -use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, OwnedChildInfo, ChildInfo}; +use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo}; use std::{mem, ops}; use hash_db::Hasher; @@ -79,8 +79,8 @@ pub struct OverlayedValue { pub struct OverlayedChangeSet { /// Top level storage changes. pub top: BTreeMap, - /// Child storage changes. - pub children: HashMap, OwnedChildInfo)>, + /// Child storage changes. The map key is the child storage key without the common prefix. + pub children_default: HashMap, ChildInfo)>, } /// A storage changes structure that can be generated by the data collected in [`OverlayedChanges`]. @@ -174,7 +174,7 @@ impl FromIterator<(StorageKey, OverlayedValue)> for OverlayedChangeSet { fn from_iter>(iter: T) -> Self { Self { top: iter.into_iter().collect(), - children: Default::default(), + children_default: Default::default(), } } } @@ -182,13 +182,13 @@ impl FromIterator<(StorageKey, OverlayedValue)> for OverlayedChangeSet { impl OverlayedChangeSet { /// Whether the change set is empty. pub fn is_empty(&self) -> bool { - self.top.is_empty() && self.children.is_empty() + self.top.is_empty() && self.children_default.is_empty() } /// Clear the change set. pub fn clear(&mut self) { self.top.clear(); - self.children.clear(); + self.children_default.clear(); } } @@ -219,8 +219,8 @@ impl OverlayedChanges { /// Returns a double-Option: None if the key is unknown (i.e. and the query should be referred /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose /// value has been set. - pub fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option> { - if let Some(map) = self.prospective.children.get(storage_key) { + pub fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { + if let Some(map) = self.prospective.children_default.get(child_info.storage_key()) { if let Some(val) = map.0.get(key) { let size_read = val.value.as_ref().map(|x| x.len() as u64).unwrap_or(0); self.stats.tally_read_modified(size_read); @@ -228,7 +228,7 @@ impl OverlayedChanges { } } - if let Some(map) = self.committed.children.get(storage_key) { + if let Some(map) = self.committed.children_default.get(child_info.storage_key()) { if let Some(val) = map.0.get(key) { let size_read = val.value.as_ref().map(|x| x.len() as u64).unwrap_or(0); self.stats.tally_read_modified(size_read); @@ -260,15 +260,15 @@ impl OverlayedChanges { /// `None` can be used to delete a value specified by the given key. pub(crate) fn set_child_storage( &mut self, - storage_key: StorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: StorageKey, val: Option, ) { let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0); self.stats.tally_write_overlay(size_write); let extrinsic_index = self.extrinsic_index(); - let map_entry = self.prospective.children.entry(storage_key) + let storage_key = child_info.storage_key().to_vec(); + let map_entry = self.prospective.children_default.entry(storage_key) .or_insert_with(|| (Default::default(), child_info.to_owned())); let updatable = map_entry.1.try_update(child_info); debug_assert!(updatable); @@ -290,11 +290,11 @@ impl OverlayedChanges { /// [`discard_prospective`]: #method.discard_prospective pub(crate) fn clear_child_storage( &mut self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, ) { let extrinsic_index = self.extrinsic_index(); - let map_entry = self.prospective.children.entry(storage_key.to_vec()) + let storage_key = child_info.storage_key(); + let map_entry = self.prospective.children_default.entry(storage_key.to_vec()) .or_insert_with(|| (Default::default(), child_info.to_owned())); let updatable = map_entry.1.try_update(child_info); debug_assert!(updatable); @@ -308,7 +308,7 @@ impl OverlayedChanges { e.value = None; }); - if let Some((committed_map, _child_info)) = self.committed.children.get(storage_key) { + if let Some((committed_map, _child_info)) = self.committed.children_default.get(storage_key) { for (key, value) in committed_map.iter() { if !map_entry.0.contains_key(key) { map_entry.0.insert(key.clone(), OverlayedValue { @@ -364,12 +364,12 @@ impl OverlayedChanges { pub(crate) fn clear_child_prefix( &mut self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) { let extrinsic_index = self.extrinsic_index(); - let map_entry = self.prospective.children.entry(storage_key.to_vec()) + let storage_key = child_info.storage_key(); + let map_entry = self.prospective.children_default.entry(storage_key.to_vec()) .or_insert_with(|| (Default::default(), child_info.to_owned())); let updatable = map_entry.1.try_update(child_info); debug_assert!(updatable); @@ -385,7 +385,7 @@ impl OverlayedChanges { } } - if let Some((child_committed, _child_info)) = self.committed.children.get(storage_key) { + if let Some((child_committed, _child_info)) = self.committed.children_default.get(storage_key) { // Then do the same with keys from committed changes. // NOTE that we are making changes in the prospective change set. for key in child_committed.keys() { @@ -422,8 +422,8 @@ impl OverlayedChanges { .extend(prospective_extrinsics); } } - for (storage_key, (map, child_info)) in self.prospective.children.drain() { - let child_content = self.committed.children.entry(storage_key) + for (storage_key, (map, child_info)) in self.prospective.children_default.drain() { + let child_content = self.committed.children_default.entry(storage_key) .or_insert_with(|| (Default::default(), child_info)); // No update to child info at this point (will be needed for deletion). for (key, val) in map.into_iter() { @@ -445,14 +445,14 @@ impl OverlayedChanges { /// Will panic if there are any uncommitted prospective changes. fn drain_committed(&mut self) -> ( impl Iterator)>, - impl Iterator)>, OwnedChildInfo))>, + impl Iterator)>, ChildInfo))>, ) { assert!(self.prospective.is_empty()); ( std::mem::replace(&mut self.committed.top, Default::default()) .into_iter() .map(|(k, v)| (k, v.value)), - std::mem::replace(&mut self.committed.children, Default::default()) + std::mem::replace(&mut self.committed.children_default, Default::default()) .into_iter() .map(|(sk, (v, ci))| (sk, (v.into_iter().map(|(k, v)| (k, v.value)), ci))), ) @@ -549,21 +549,20 @@ impl OverlayedChanges { ) -> H::Out where H::Out: Ord + Encode, { - let child_storage_keys = self.prospective.children.keys() - .chain(self.committed.children.keys()); + let child_storage_keys = self.prospective.children_default.keys() + .chain(self.committed.children_default.keys()); let child_delta_iter = child_storage_keys.map(|storage_key| ( - storage_key.clone(), - self.committed.children.get(storage_key) + self.default_child_info(storage_key).cloned() + .expect("child info initialized in either committed or prospective"), + self.committed.children_default.get(storage_key) .into_iter() .flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) .chain( - self.prospective.children.get(storage_key) + self.prospective.children_default.get(storage_key) .into_iter() .flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) ), - self.child_info(storage_key).cloned() - .expect("child info initialized in either committed or prospective"), ) ); @@ -610,11 +609,11 @@ impl OverlayedChanges { /// Get child info for a storage key. /// Take the latest value so prospective first. - pub fn child_info(&self, storage_key: &[u8]) -> Option<&OwnedChildInfo> { - if let Some((_, ci)) = self.prospective.children.get(storage_key) { + pub fn default_child_info(&self, storage_key: &[u8]) -> Option<&ChildInfo> { + if let Some((_, ci)) = self.prospective.children_default.get(storage_key) { return Some(&ci); } - if let Some((_, ci)) = self.committed.children.get(storage_key) { + if let Some((_, ci)) = self.committed.children_default.get(storage_key) { return Some(&ci); } None @@ -654,10 +653,10 @@ impl OverlayedChanges { ) -> Option<(&[u8], &OverlayedValue)> { let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); - let next_prospective_key = self.prospective.children.get(storage_key) + let next_prospective_key = self.prospective.children_default.get(storage_key) .and_then(|(map, _)| map.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v))); - let next_committed_key = self.committed.children.get(storage_key) + let next_committed_key = self.committed.children_default.get(storage_key) .and_then(|(map, _)| map.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v))); match (next_committed_key, next_prospective_key) { @@ -866,39 +865,40 @@ mod tests { #[test] fn next_child_storage_key_change_works() { - let child = b"Child1".to_vec(); - let child_info = ChildInfo::new_default(b"uniqueid"); + let child_info = ChildInfo::new_default(b"Child1"); + let child_info = &child_info; + let child = child_info.storage_key(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child.clone(), child_info, vec![20], Some(vec![20])); - overlay.set_child_storage(child.clone(), child_info, vec![30], Some(vec![30])); - overlay.set_child_storage(child.clone(), child_info, vec![40], Some(vec![40])); + overlay.set_child_storage(child_info, vec![20], Some(vec![20])); + overlay.set_child_storage(child_info, vec![30], Some(vec![30])); + overlay.set_child_storage(child_info, vec![40], Some(vec![40])); overlay.commit_prospective(); - overlay.set_child_storage(child.clone(), child_info, vec![10], Some(vec![10])); - overlay.set_child_storage(child.clone(), child_info, vec![30], None); + overlay.set_child_storage(child_info, vec![10], Some(vec![10])); + overlay.set_child_storage(child_info, vec![30], None); // next_prospective < next_committed - let next_to_5 = overlay.next_child_storage_key_change(&child, &[5]).unwrap(); + let next_to_5 = overlay.next_child_storage_key_change(child, &[5]).unwrap(); assert_eq!(next_to_5.0.to_vec(), vec![10]); assert_eq!(next_to_5.1.value, Some(vec![10])); // next_committed < next_prospective - let next_to_10 = overlay.next_child_storage_key_change(&child, &[10]).unwrap(); + let next_to_10 = overlay.next_child_storage_key_change(child, &[10]).unwrap(); assert_eq!(next_to_10.0.to_vec(), vec![20]); assert_eq!(next_to_10.1.value, Some(vec![20])); // next_committed == next_prospective - let next_to_20 = overlay.next_child_storage_key_change(&child, &[20]).unwrap(); + let next_to_20 = overlay.next_child_storage_key_change(child, &[20]).unwrap(); assert_eq!(next_to_20.0.to_vec(), vec![30]); assert_eq!(next_to_20.1.value, None); // next_committed, no next_prospective - let next_to_30 = overlay.next_child_storage_key_change(&child, &[30]).unwrap(); + let next_to_30 = overlay.next_child_storage_key_change(child, &[30]).unwrap(); assert_eq!(next_to_30.0.to_vec(), vec![40]); assert_eq!(next_to_30.1.value, Some(vec![40])); - overlay.set_child_storage(child.clone(), child_info, vec![50], Some(vec![50])); + overlay.set_child_storage(child_info, vec![50], Some(vec![50])); // next_prospective, no next_committed - let next_to_40 = overlay.next_child_storage_key_change(&child, &[40]).unwrap(); + let next_to_40 = overlay.next_child_storage_key_change(child, &[40]).unwrap(); assert_eq!(next_to_40.0.to_vec(), vec![50]); assert_eq!(next_to_40.1.value, Some(vec![50])); } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 747872af831ac..deafeb902d8bf 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -22,7 +22,7 @@ use codec::{Decode, Codec}; use log::debug; use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; use sp_trie::{ - MemoryDB, default_child_trie_root, read_trie_value_with, read_child_trie_value_with, + MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, record_all_keys, StorageProof, }; pub use sp_trie::Recorder; @@ -67,13 +67,13 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> /// Produce proof for a child key query. pub fn child_storage( &mut self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8] ) -> Result>, String> { + let storage_key = child_info.storage_key(); let root = self.storage(storage_key)? .and_then(|r| Decode::decode(&mut &r[..]).ok()) - .unwrap_or(default_child_trie_root::>(storage_key)); + .unwrap_or(empty_child_trie_root::>()); let mut read_overlay = S::Overlay::default(); let eph = Ephemeral::new( @@ -84,7 +84,6 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> let map_e = |e| format!("Trie lookup error: {}", e); read_child_trie_value_with::, _, _>( - storage_key, child_info.keyspace(), &eph, &root.as_ref(), @@ -201,20 +200,18 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.0.child_storage(storage_key, child_info, key) + self.0.child_storage(child_info, key) } fn for_keys_in_child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { - self.0.for_keys_in_child_storage(storage_key, child_info, f) + self.0.for_keys_in_child_storage(child_info, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -223,11 +220,10 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn next_child_storage_key( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.0.next_child_storage_key(storage_key, child_info, key) + self.0.next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -240,12 +236,11 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn for_child_keys_with_prefix( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { - self.0.for_child_keys_with_prefix(storage_key, child_info, prefix, f) + self.0.for_child_keys_with_prefix( child_info, prefix, f) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -258,11 +253,10 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_keys( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { - self.0.child_keys(storage_key, child_info, prefix) + self.0.child_keys(child_info, prefix) } fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) @@ -273,15 +267,14 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_storage_root( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where I: IntoIterator, Option>)>, H::Out: Ord { - self.0.child_storage_root(storage_key, child_info, delta) + self.0.child_storage_root(child_info, delta) } fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) { } @@ -314,14 +307,10 @@ mod tests { use crate::InMemoryBackend; use crate::trie_backend::tests::test_trie; use super::*; - use sp_core::storage::ChildStorageKey; use crate::proving_backend::create_proof_check_backend; use sp_trie::PrefixedMemoryDB; use sp_runtime::traits::BlakeTwo256; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); - fn test_proving<'a>( trie_backend: &'a TrieBackend,BlakeTwo256>, ) -> ProvingBackend<'a, PrefixedMemoryDB, BlakeTwo256> { @@ -389,33 +378,33 @@ mod tests { #[test] fn proof_recorded_and_checked_with_child() { - let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub1").unwrap(); - let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub2").unwrap(); - let own1 = subtrie1.into_owned(); - let own2 = subtrie2.into_owned(); + let child_info_1 = ChildInfo::new_default(b"sub1"); + let child_info_2 = ChildInfo::new_default(b"sub2"); + let child_info_1 = &child_info_1; + let child_info_2 = &child_info_2; let contents = vec![ (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some((own1.clone(), CHILD_INFO_1.to_owned())), + (Some(child_info_1.clone()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some((own2.clone(), CHILD_INFO_2.to_owned())), + (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; let in_memory = InMemoryBackend::::default(); let mut in_memory = in_memory.update(contents); let in_memory_root = in_memory.full_storage_root::<_, Vec<_>, _>( ::std::iter::empty(), - in_memory.child_storage_keys().map(|k|(k.0.to_vec(), Vec::new(), k.1.to_owned())) + in_memory.child_storage_infos().map(|k|(k.to_owned(), Vec::new())) ).0; (0..64).for_each(|i| assert_eq!( in_memory.storage(&[i]).unwrap().unwrap(), vec![i] )); (28..65).for_each(|i| assert_eq!( - in_memory.child_storage(&own1[..], CHILD_INFO_1, &[i]).unwrap().unwrap(), + in_memory.child_storage(child_info_1, &[i]).unwrap().unwrap(), vec![i] )); (10..15).for_each(|i| assert_eq!( - in_memory.child_storage(&own2[..], CHILD_INFO_2, &[i]).unwrap().unwrap(), + in_memory.child_storage(child_info_2, &[i]).unwrap().unwrap(), vec![i] )); @@ -443,7 +432,7 @@ mod tests { assert_eq!(proof_check.storage(&[64]).unwrap(), None); let proving = ProvingBackend::new(trie); - assert_eq!(proving.child_storage(&own1[..], CHILD_INFO_1, &[64]), Ok(Some(vec![64]))); + assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); let proof = proving.extract_proof(); let proof_check = create_proof_check_backend::( @@ -451,7 +440,7 @@ mod tests { proof ).unwrap(); assert_eq!( - proof_check.child_storage(&own1[..], CHILD_INFO_1, &[64]).unwrap().unwrap(), + proof_check.child_storage(child_info_1, &[64]).unwrap().unwrap(), vec![64] ); } diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 2b971d816a5b8..70a96c623adae 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -93,7 +93,7 @@ impl TestExternalities overlay.set_collect_extrinsics(changes_trie_config.is_some()); assert!(storage.top.keys().all(|key| !is_child_storage_key(key))); - assert!(storage.children.keys().all(|key| is_child_storage_key(key))); + assert!(storage.children_default.keys().all(|key| is_child_storage_key(key))); storage.top.insert(HEAP_PAGES.to_vec(), 8u64.encode()); storage.top.insert(CODE.to_vec(), code.to_vec()); @@ -133,11 +133,11 @@ impl TestExternalities .map(|(k, v)| (k, v.value)).collect(); let mut transaction = vec![(None, top)]; - self.overlay.committed.children.clone().into_iter() - .chain(self.overlay.prospective.children.clone().into_iter()) - .for_each(|(keyspace, (map, child_info))| { + self.overlay.committed.children_default.clone().into_iter() + .chain(self.overlay.prospective.children_default.clone().into_iter()) + .for_each(|(_storage_key, (map, child_info))| { transaction.push(( - Some((keyspace, child_info)), + Some(child_info), map.into_iter() .map(|(k, v)| (k, v.value)) .collect::>(), diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index f88e306a2fc3d..08eea29360465 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -18,9 +18,9 @@ use log::{warn, debug}; use hash_db::Hasher; -use sp_trie::{Trie, delta_trie_root, default_child_trie_root, child_delta_trie_root}; +use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, ChildType}; use codec::{Codec, Decode}; use crate::{ StorageKey, StorageValue, Backend, @@ -80,11 +80,10 @@ impl, H: Hasher> Backend for TrieBackend where fn child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - self.essence.child_storage(storage_key, child_info, key) + self.essence.child_storage(child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { @@ -93,11 +92,10 @@ impl, H: Hasher> Backend for TrieBackend where fn next_child_storage_key( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - self.essence.next_child_storage_key(storage_key, child_info, key) + self.essence.next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -110,21 +108,19 @@ impl, H: Hasher> Backend for TrieBackend where fn for_keys_in_child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { - self.essence.for_keys_in_child_storage(storage_key, child_info, f) + self.essence.for_keys_in_child_storage(child_info, f) } fn for_child_keys_with_prefix( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { - self.essence.for_child_keys_with_prefix(storage_key, child_info, prefix, f) + self.essence.for_child_keys_with_prefix(child_info, prefix, f) } fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { @@ -194,18 +190,20 @@ impl, H: Hasher> Backend for TrieBackend where fn child_storage_root( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where I: IntoIterator)>, H::Out: Ord, { - let default_root = default_child_trie_root::>(storage_key); + let default_root = match child_info.child_type() { + ChildType::ParentKeyId => empty_child_trie_root::>() + }; let mut write_overlay = S::Overlay::default(); - let mut root = match self.storage(storage_key) { + let prefixed_storage_key = child_info.prefixed_storage_key(); + let mut root = match self.storage(prefixed_storage_key.as_slice()) { Ok(value) => value.and_then(|r| Decode::decode(&mut &r[..]).ok()).unwrap_or(default_root.clone()), Err(e) => { @@ -221,7 +219,6 @@ impl, H: Hasher> Backend for TrieBackend where ); match child_delta_trie_root::, _, _, _, _, _>( - storage_key, child_info.keyspace(), &mut eph, root, @@ -257,16 +254,14 @@ pub mod tests { use sp_runtime::traits::BlakeTwo256; use super::*; - const CHILD_KEY_1: &[u8] = b":child_storage:default:sub1"; - - const CHILD_UUID_1: &[u8] = b"unique_id_1"; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); + const CHILD_KEY_1: &[u8] = b"sub1"; fn test_db() -> (PrefixedMemoryDB, H256) { + let child_info = ChildInfo::new_default(CHILD_KEY_1); let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); { - let mut mdb = KeySpacedDBMut::new(&mut mdb, CHILD_UUID_1); + let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); let mut trie = TrieDBMut::new(&mut mdb, &mut root); trie.insert(b"value3", &[142]).expect("insert failed"); trie.insert(b"value4", &[124]).expect("insert failed"); @@ -276,7 +271,8 @@ pub mod tests { let mut sub_root = Vec::new(); root.encode_to(&mut sub_root); let mut trie = TrieDBMut::new(&mut mdb, &mut root); - trie.insert(CHILD_KEY_1, &sub_root[..]).expect("insert failed"); + trie.insert(child_info.prefixed_storage_key().as_slice(), &sub_root[..]) + .expect("insert failed"); trie.insert(b"key", b"value").expect("insert failed"); trie.insert(b"value1", &[42]).expect("insert failed"); trie.insert(b"value2", &[24]).expect("insert failed"); @@ -302,7 +298,7 @@ pub mod tests { fn read_from_child_storage_returns_some() { let test_trie = test_trie(); assert_eq!( - test_trie.child_storage(CHILD_KEY_1, CHILD_INFO_1, b"value3").unwrap(), + test_trie.child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3").unwrap(), Some(vec![142u8]), ); } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 125a823f57fc1..28d1c68ca2e40 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -22,7 +22,7 @@ use std::sync::Arc; use log::{debug, warn}; use hash_db::{self, Hasher, EMPTY_PREFIX, Prefix}; use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, - default_child_trie_root, read_trie_value, read_child_trie_value, + empty_child_trie_root, read_trie_value, read_child_trie_value, for_keys_in_child_trie, KeySpacedDB, TrieDBIterator}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; @@ -71,15 +71,19 @@ impl, H: Hasher> TrieBackendEssence where H::Out: self.next_storage_key_from_root(&self.root, None, key) } + /// Access the root of the child storage in its parent trie + fn child_root(&self, child_info: &ChildInfo) -> Result, String> { + self.storage(child_info.prefixed_storage_key().as_slice()) + } + /// Return the next key in the child trie i.e. the minimum key that is strictly superior to /// `key` in lexicographic order. pub fn next_child_storage_key( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, String> { - let child_root = match self.storage(storage_key)? { + let child_root = match self.child_root(child_info)? { Some(child_root) => child_root, None => return Ok(None), }; @@ -87,7 +91,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let mut hash = H::Out::default(); if child_root.len() != hash.as_ref().len() { - return Err(format!("Invalid child storage hash at {:?}", storage_key)); + return Err(format!("Invalid child storage hash at {:?}", child_info.storage_key())); } // note: child_root and hash must be same size, panics otherwise. hash.as_mut().copy_from_slice(&child_root[..]); @@ -99,7 +103,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: fn next_storage_key_from_root( &self, root: &H::Out, - child_info: Option, + child_info: Option<&ChildInfo>, key: &[u8], ) -> Result, String> { let mut read_overlay = S::Overlay::default(); @@ -161,12 +165,11 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Get the value of child storage at given key. pub fn child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, String> { - let root = self.storage(storage_key)? - .unwrap_or(default_child_trie_root::>(storage_key).encode()); + let root = self.child_root(child_info)? + .unwrap_or(empty_child_trie_root::>().encode()); let mut read_overlay = S::Overlay::default(); let eph = Ephemeral { @@ -176,19 +179,18 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let map_e = |e| format!("Trie lookup error: {}", e); - read_child_trie_value::, _>(storage_key, child_info.keyspace(), &eph, &root, key) + read_child_trie_value::, _>(child_info.keyspace(), &eph, &root, key) .map_err(map_e) } /// Retrieve all entries keys of child storage and call `f` for each of those keys. pub fn for_keys_in_child_storage( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { - let root = match self.storage(storage_key) { - Ok(v) => v.unwrap_or(default_child_trie_root::>(storage_key).encode()), + let root = match self.child_root(child_info) { + Ok(v) => v.unwrap_or(empty_child_trie_root::>().encode()), Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); return; @@ -202,7 +204,6 @@ impl, H: Hasher> TrieBackendEssence where H::Out: }; if let Err(e) = for_keys_in_child_trie::, _, Ephemeral>( - storage_key, child_info.keyspace(), &eph, &root, @@ -215,13 +216,12 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Execute given closure for all keys starting with prefix. pub fn for_child_keys_with_prefix( &self, - storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], mut f: F, ) { - let root_vec = match self.storage(storage_key) { - Ok(v) => v.unwrap_or(default_child_trie_root::>(storage_key).encode()), + let root_vec = match self.child_root(child_info) { + Ok(v) => v.unwrap_or(empty_child_trie_root::>().encode()), Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); return; @@ -242,7 +242,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: root: &H::Out, prefix: &[u8], mut f: F, - child_info: Option, + child_info: Option<&ChildInfo>, ) { let mut read_overlay = S::Overlay::default(); let eph = Ephemeral { @@ -436,7 +436,8 @@ mod test { #[test] fn next_storage_key_and_next_child_storage_key_work() { - let child_info = ChildInfo::new_default(b"uniqueid"); + let child_info = ChildInfo::new_default(b"MyChild"); + let child_info = &child_info; // Contains values let mut root_1 = H256::default(); // Contains child trie @@ -460,7 +461,8 @@ mod test { } { let mut trie = TrieDBMut::new(&mut mdb, &mut root_2); - trie.insert(b"MyChild", root_1.as_ref()).expect("insert failed"); + trie.insert(child_info.prefixed_storage_key().as_slice(), root_1.as_ref()) + .expect("insert failed"); }; let essence_1 = TrieBackendEssence::new(mdb, root_1); @@ -475,19 +477,19 @@ mod test { let essence_2 = TrieBackendEssence::new(mdb, root_2); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"2"), Ok(Some(b"3".to_vec())) + essence_2.next_child_storage_key(child_info, b"2"), Ok(Some(b"3".to_vec())) ); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"3"), Ok(Some(b"4".to_vec())) + essence_2.next_child_storage_key(child_info, b"3"), Ok(Some(b"4".to_vec())) ); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"4"), Ok(Some(b"6".to_vec())) + essence_2.next_child_storage_key(child_info, b"4"), Ok(Some(b"6".to_vec())) ); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"5"), Ok(Some(b"6".to_vec())) + essence_2.next_child_storage_key(child_info, b"5"), Ok(Some(b"6".to_vec())) ); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"6"), Ok(None) + essence_2.next_child_storage_key(child_info, b"6"), Ok(None) ); } } diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index d7ae342b2dfdd..76174d13b03c7 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -16,6 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] sp-std = { version = "2.0.0-dev", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.2.3", optional = true } +ref-cast = "1.0.0" sp-debug-derive = { version = "2.0.0-dev", path = "../debug-derive" } [features] diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 76fd4baac9cf3..d2c4a73e23d59 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -22,7 +22,9 @@ use serde::{Serialize, Deserialize}; use sp_debug_derive::RuntimeDebug; -use sp_std::{vec::Vec, borrow::Cow}; +use sp_std::vec::Vec; +use sp_std::ops::{Deref, DerefMut}; +use ref_cast::RefCast; /// Storage key. #[derive(PartialEq, Eq, RuntimeDebug)] @@ -32,6 +34,51 @@ pub struct StorageKey( pub Vec, ); +/// Storage key of a child trie, it contains the prefix to the key. +#[derive(PartialEq, Eq, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone))] +#[repr(transparent)] +#[derive(RefCast)] +pub struct PrefixedStorageKey( + #[cfg_attr(feature = "std", serde(with="impl_serde::serialize"))] + Vec, +); + +impl Deref for PrefixedStorageKey { + type Target = Vec; + + fn deref(&self) -> &Vec { + &self.0 + } +} + +impl DerefMut for PrefixedStorageKey { + fn deref_mut(&mut self) -> &mut Vec { + &mut self.0 + } +} + +impl PrefixedStorageKey { + /// Create a prefixed storage key from its byte array + /// representation. + pub fn new(inner: Vec) -> Self { + PrefixedStorageKey(inner) + } + + /// Create a prefixed storage key reference. + pub fn new_ref(inner: &Vec) -> &Self { + PrefixedStorageKey::ref_cast(inner) + } + + /// Get inner key, this should + /// only be needed when writing + /// into parent trie to avoid an + /// allocation. + pub fn into_inner(self) -> Vec { + self.0 + } +} + /// Storage data associated to a [`StorageKey`]. #[derive(PartialEq, Eq, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone))] @@ -53,7 +100,7 @@ pub struct StorageChild { pub data: StorageMap, /// Associated child info for a child /// trie. - pub child_info: OwnedChildInfo, + pub child_info: ChildInfo, } #[cfg(feature = "std")] @@ -62,8 +109,11 @@ pub struct StorageChild { pub struct Storage { /// Top trie storage data. pub top: StorageMap, - /// Children trie storage data by storage key. - pub children: std::collections::HashMap, StorageChild>, + /// Children trie storage data. + /// The key does not including prefix, for the `default` + /// trie kind, so this is exclusively for the `ChildType::ParentKeyId` + /// tries. + pub children_default: std::collections::HashMap, StorageChild>, } /// Storage change set @@ -106,133 +156,87 @@ pub mod well_known_keys { // Other code might depend on this, so be careful changing this. key.starts_with(CHILD_STORAGE_KEY_PREFIX) } - - /// Determine whether a child trie key is valid. - /// - /// For now, the only valid child trie keys are those starting with `:child_storage:default:`. - /// - /// `child_trie_root` and `child_delta_trie_root` can panic if invalid value is provided to them. - pub fn is_child_trie_key_valid(storage_key: &[u8]) -> bool { - let has_right_prefix = storage_key.starts_with(b":child_storage:default:"); - if has_right_prefix { - // This is an attempt to catch a change of `is_child_storage_key`, which - // just checks if the key has prefix `:child_storage:` at the moment of writing. - debug_assert!( - is_child_storage_key(&storage_key), - "`is_child_trie_key_valid` is a subset of `is_child_storage_key`", - ); - } - has_right_prefix - } -} - -/// A wrapper around a child storage key. -/// -/// This wrapper ensures that the child storage key is correct and properly used. It is -/// impossible to create an instance of this struct without providing a correct `storage_key`. -pub struct ChildStorageKey<'a> { - storage_key: Cow<'a, [u8]>, } -impl<'a> ChildStorageKey<'a> { - /// Create new instance of `Self`. - fn new(storage_key: Cow<'a, [u8]>) -> Option { - if well_known_keys::is_child_trie_key_valid(&storage_key) { - Some(ChildStorageKey { storage_key }) - } else { - None - } - } - - /// Create a new `ChildStorageKey` from a vector. - /// - /// `storage_key` need to start with `:child_storage:default:` - /// See `is_child_trie_key_valid` for more details. - pub fn from_vec(key: Vec) -> Option { - Self::new(Cow::Owned(key)) - } - - /// Create a new `ChildStorageKey` from a slice. - /// - /// `storage_key` need to start with `:child_storage:default:` - /// See `is_child_trie_key_valid` for more details. - pub fn from_slice(key: &'a [u8]) -> Option { - Self::new(Cow::Borrowed(key)) - } - - /// Get access to the byte representation of the storage key. - /// - /// This key is guaranteed to be correct. - pub fn as_ref(&self) -> &[u8] { - &*self.storage_key - } - - /// Destruct this instance into an owned vector that represents the storage key. - /// - /// This key is guaranteed to be correct. - pub fn into_owned(self) -> Vec { - self.storage_key.into_owned() - } -} - -#[derive(Clone, Copy)] /// Information related to a child state. -pub enum ChildInfo<'a> { - Default(ChildTrie<'a>), -} - -/// Owned version of `ChildInfo`. -/// To be use in persistence layers. #[derive(Debug, Clone)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] -pub enum OwnedChildInfo { - Default(OwnedChildTrie), +pub enum ChildInfo { + /// This is the one used by default. + ParentKeyId(ChildTrieParentKeyId), } -impl<'a> ChildInfo<'a> { - /// Instantiates information for a default child trie. - pub const fn new_default(unique_id: &'a[u8]) -> Self { - ChildInfo::Default(ChildTrie { - data: unique_id, +impl ChildInfo { + /// Instantiates child information for a default child trie + /// of kind `ChildType::ParentKeyId`, using an unprefixed parent + /// storage key. + pub fn new_default(storage_key: &[u8]) -> Self { + let data = storage_key.to_vec(); + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data }) + } + + /// Same as `new_default` but with `Vec` as input. + pub fn new_default_from_vec(storage_key: Vec) -> Self { + ChildInfo::ParentKeyId(ChildTrieParentKeyId { + data: storage_key, }) } - /// Instantiates a owned version of this child info. - pub fn to_owned(&self) -> OwnedChildInfo { + /// Try to update with another instance, return false if both instance + /// are not compatible. + pub fn try_update(&mut self, other: &ChildInfo) -> bool { match self { - ChildInfo::Default(ChildTrie { data }) - => OwnedChildInfo::Default(OwnedChildTrie { - data: data.to_vec(), - }), + ChildInfo::ParentKeyId(child_trie) => child_trie.try_update(other), } } - /// Create child info from a linear byte packed value and a given type. - pub fn resolve_child_info(child_type: u32, data: &'a[u8]) -> Option { - match child_type { - x if x == ChildType::CryptoUniqueId as u32 => Some(ChildInfo::new_default(data)), - _ => None, + /// Returns byte sequence (keyspace) that can be use by underlying db to isolate keys. + /// This is a unique id of the child trie. The collision resistance of this value + /// depends on the type of child info use. For `ChildInfo::Default` it is and need to be. + pub fn keyspace(&self) -> &[u8] { + match self { + ChildInfo::ParentKeyId(..) => self.storage_key(), } } - /// Return a single byte vector containing packed child info content and its child info type. - /// This can be use as input for `resolve_child_info`. - pub fn info(&self) -> (&[u8], u32) { + /// Returns a reference to the location in the direct parent of + /// this trie but without the common prefix for this kind of + /// child trie. + pub fn storage_key(&self) -> &[u8] { match self { - ChildInfo::Default(ChildTrie { + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data, - }) => (data, ChildType::CryptoUniqueId as u32), + }) => &data[..], } } - /// Return byte sequence (keyspace) that can be use by underlying db to isolate keys. - /// This is a unique id of the child trie. The collision resistance of this value - /// depends on the type of child info use. For `ChildInfo::Default` it is and need to be. - pub fn keyspace(&self) -> &[u8] { + /// Return a the full location in the direct parent of + /// this trie. + pub fn prefixed_storage_key(&self) -> PrefixedStorageKey { match self { - ChildInfo::Default(ChildTrie { + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data, - }) => &data[..], + }) => ChildType::ParentKeyId.new_prefixed_key(data.as_slice()), + } + } + + /// Returns a the full location in the direct parent of + /// this trie. + pub fn into_prefixed_storage_key(self) -> PrefixedStorageKey { + match self { + ChildInfo::ParentKeyId(ChildTrieParentKeyId { + mut data, + }) => { + ChildType::ParentKeyId.do_prefix_key(&mut data); + PrefixedStorageKey(data) + }, + } + } + + /// Returns the type for this child info. + pub fn child_type(&self) -> ChildType { + match self { + ChildInfo::ParentKeyId(..) => ChildType::ParentKeyId, } } } @@ -241,65 +245,98 @@ impl<'a> ChildInfo<'a> { /// It does not strictly define different child type, it can also /// be related to technical consideration or api variant. #[repr(u32)] +#[derive(Clone, Copy, PartialEq)] +#[cfg_attr(feature = "std", derive(Debug))] pub enum ChildType { - /// Default, it uses a cryptographic strong unique id as input. - CryptoUniqueId = 1, + /// If runtime module ensures that the child key is a unique id that will + /// only be used once, its parent key is used as a child trie unique id. + ParentKeyId = 1, } -impl OwnedChildInfo { - /// Instantiates info for a default child trie. - pub fn new_default(unique_id: Vec) -> Self { - OwnedChildInfo::Default(OwnedChildTrie { - data: unique_id, +impl ChildType { + /// Try to get a child type from its `u32` representation. + pub fn new(repr: u32) -> Option { + Some(match repr { + r if r == ChildType::ParentKeyId as u32 => ChildType::ParentKeyId, + _ => return None, }) } - /// Try to update with another instance, return false if both instance - /// are not compatible. - pub fn try_update(&mut self, other: ChildInfo) -> bool { - match self { - OwnedChildInfo::Default(owned_child_trie) => owned_child_trie.try_update(other), + /// Transform a prefixed key into a tuple of the child type + /// and the unprefixed representation of the key. + pub fn from_prefixed_key<'a>(storage_key: &'a PrefixedStorageKey) -> Option<(Self, &'a [u8])> { + let match_type = |storage_key: &'a [u8], child_type: ChildType| { + let prefix = child_type.parent_prefix(); + if storage_key.starts_with(prefix) { + Some((child_type, &storage_key[prefix.len()..])) + } else { + None + } + }; + match_type(storage_key, ChildType::ParentKeyId) + } + + /// Produce a prefixed key for a given child type. + fn new_prefixed_key(&self, key: &[u8]) -> PrefixedStorageKey { + let parent_prefix = self.parent_prefix(); + let mut result = Vec::with_capacity(parent_prefix.len() + key.len()); + result.extend_from_slice(parent_prefix); + result.extend_from_slice(key); + PrefixedStorageKey(result) + } + + /// Prefixes a vec with the prefix for this child type. + fn do_prefix_key(&self, key: &mut Vec) { + let parent_prefix = self.parent_prefix(); + let key_len = key.len(); + if parent_prefix.len() > 0 { + key.resize(key_len + parent_prefix.len(), 0); + key.copy_within(..key_len, parent_prefix.len()); + key[..parent_prefix.len()].copy_from_slice(parent_prefix); } } - /// Get `ChildInfo` reference to this owned child info. - pub fn as_ref(&self) -> ChildInfo { + /// Returns the location reserved for this child trie in their parent trie if there + /// is one. + pub fn parent_prefix(&self) -> &'static [u8] { match self { - OwnedChildInfo::Default(OwnedChildTrie { data }) - => ChildInfo::Default(ChildTrie { - data: data.as_slice(), - }), + &ChildType::ParentKeyId => DEFAULT_CHILD_TYPE_PARENT_PREFIX, } } } /// A child trie of default type. -/// Default is the same implementation as the top trie. -/// It share its trie node storage with any kind of key, -/// and its unique id needs to be collision free (eg strong -/// crypto hash). -#[derive(Clone, Copy)] -pub struct ChildTrie<'a> { - /// Data containing unique id. - /// Unique id must but unique and free of any possible key collision - /// (depending on its storage behavior). - data: &'a[u8], -} - -/// Owned version of default child trie `ChildTrie`. +/// It uses the same default implementation as the top trie, +/// top trie being a child trie with no keyspace and no storage key. +/// Its keyspace is the variable (unprefixed) part of its storage key. +/// It shares its trie nodes backend storage with every other +/// child trie, so its storage key needs to be a unique id +/// that will be use only once. +/// Those unique id also required to be long enough to avoid any +/// unique id to be prefixed by an other unique id. #[derive(Debug, Clone)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] -pub struct OwnedChildTrie { - /// See `ChildTrie` reference field documentation. +pub struct ChildTrieParentKeyId { + /// Data is the storage key without prefix. data: Vec, } -impl OwnedChildTrie { +impl ChildTrieParentKeyId { /// Try to update with another instance, return false if both instance /// are not compatible. - fn try_update(&mut self, other: ChildInfo) -> bool { + fn try_update(&mut self, other: &ChildInfo) -> bool { match other { - ChildInfo::Default(other) => self.data[..] == other.data[..], + ChildInfo::ParentKeyId(other) => self.data[..] == other.data[..], } } } + +const DEFAULT_CHILD_TYPE_PARENT_PREFIX: &'static [u8] = b":child_storage:default:"; + +#[test] +fn test_prefix_default_child_info() { + let child_info = ChildInfo::new_default(b"any key"); + let prefix = child_info.child_type().parent_prefix(); + assert!(prefix.starts_with(well_known_keys::CHILD_STORAGE_KEY_PREFIX)); + assert!(prefix.starts_with(DEFAULT_CHILD_TYPE_PARENT_PREFIX)); +} diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 80570a9792b10..37fe928336337 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -211,9 +211,8 @@ pub fn read_trie_value_with< Ok(TrieDB::::new(&*db, root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) } -/// Determine the default child trie root. -pub fn default_child_trie_root( - _storage_key: &[u8], +/// Determine the empty child trie root. +pub fn empty_child_trie_root( ) -> ::Out { L::trie_root::<_, Vec, Vec>(core::iter::empty()) } @@ -221,7 +220,6 @@ pub fn default_child_trie_root( /// Determine a child trie root given its ordered contents, closed form. H is the default hasher, /// but a generic implementation may ignore this type parameter and use other hashers. pub fn child_trie_root( - _storage_key: &[u8], input: I, ) -> ::Out where @@ -235,7 +233,6 @@ pub fn child_trie_root( /// Determine a child trie root given a hash DB and delta values. H is the default hasher, /// but a generic implementation may ignore this type parameter and use other hashers. pub fn child_delta_trie_root( - _storage_key: &[u8], keyspace: &[u8], db: &mut DB, root_data: RD, @@ -270,7 +267,6 @@ pub fn child_delta_trie_root( /// Call `f` for all keys in a child trie. pub fn for_keys_in_child_trie( - _storage_key: &[u8], keyspace: &[u8], db: &DB, root_slice: &[u8], @@ -321,7 +317,6 @@ pub fn record_all_keys( /// Read a value from the child trie. pub fn read_child_trie_value( - _storage_key: &[u8], keyspace: &[u8], db: &DB, root_slice: &[u8], @@ -341,7 +336,6 @@ pub fn read_child_trie_value( /// Read a value from the child trie with given query. pub fn read_child_trie_value_with, DB>( - _storage_key: &[u8], keyspace: &[u8], db: &DB, root_slice: &[u8], diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index d04e85fd10c23..4880b296c7048 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -40,7 +40,7 @@ pub use self::client_ext::{ClientExt, ClientBlockImportExt}; use std::sync::Arc; use std::collections::HashMap; -use sp_core::storage::{well_known_keys, ChildInfo}; +use sp_core::storage::ChildInfo; use sp_runtime::traits::{Block as BlockT, BlakeTwo256}; use sc_client::LocalCallExecutor; @@ -66,6 +66,8 @@ impl GenesisInit for () { pub struct TestClientBuilder { execution_strategies: ExecutionStrategies, genesis_init: G, + /// The key is an unprefixed storage key, this only contains + /// default child trie content. child_storage_extension: HashMap, StorageChild>, backend: Arc, _executor: std::marker::PhantomData, @@ -129,17 +131,17 @@ impl TestClientBuilder, - child_key: impl AsRef<[u8]>, - child_info: ChildInfo, value: impl AsRef<[u8]>, ) -> Self { - let entry = self.child_storage_extension.entry(key.as_ref().to_vec()) + let storage_key = child_info.storage_key(); + let entry = self.child_storage_extension.entry(storage_key.to_vec()) .or_insert_with(|| StorageChild { data: Default::default(), - child_info: child_info.to_owned(), + child_info: child_info.clone(), }); - entry.data.insert(child_key.as_ref().to_vec(), value.as_ref().to_vec()); + entry.data.insert(key.as_ref().to_vec(), value.as_ref().to_vec()); self } @@ -189,8 +191,8 @@ impl TestClientBuilder::Header as HeaderT>::Hashing as HashT>::trie_root( child_content.data.clone().into_iter().collect() ); - (sk.clone(), state_root.encode()) + let prefixed_storage_key = child_content.child_info.prefixed_storage_key(); + (prefixed_storage_key.into_inner(), state_root.encode()) }); let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( storage.top.clone().into_iter().chain(child_roots).collect() @@ -192,22 +193,21 @@ pub trait TestClientBuilderExt: Sized { /// # Panics /// /// Panics if the key is empty. - fn add_extra_child_storage>, K: Into>, V: Into>>( + fn add_extra_child_storage>, V: Into>>( mut self, - storage_key: SK, - child_info: ChildInfo, + child_info: &ChildInfo, key: K, value: V, ) -> Self { - let storage_key = storage_key.into(); + let storage_key = child_info.storage_key().to_vec(); let key = key.into(); assert!(!storage_key.is_empty()); assert!(!key.is_empty()); - self.genesis_init_mut().extra_storage.children + self.genesis_init_mut().extra_storage.children_default .entry(storage_key) .or_insert_with(|| StorageChild { data: Default::default(), - child_info: child_info.to_owned(), + child_info: child_info.clone(), }).data.insert(key, value.into()); self } @@ -311,7 +311,10 @@ impl Fetcher for LightFetcher { unimplemented!() } - fn remote_read_child(&self, _: RemoteReadChildRequest) -> Self::RemoteReadResult { + fn remote_read_child( + &self, + _: RemoteReadChildRequest, + ) -> Self::RemoteReadResult { unimplemented!() } diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index 25d9a807ccee1..b9de3ab3f4cb0 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -73,7 +73,7 @@ impl GenesisConfig { map.extend(self.extra_storage.top.clone().into_iter()); // Assimilate the system genesis config. - let mut storage = Storage { top: map, children: self.extra_storage.children.clone()}; + let mut storage = Storage { top: map, children_default: self.extra_storage.children_default.clone()}; let mut config = system::GenesisConfig::default(); config.authorities = self.authorities.clone(); config.assimilate_storage(&mut storage).expect("Adding `system::GensisConfig` to the genesis"); @@ -85,7 +85,7 @@ impl GenesisConfig { pub fn insert_genesis_block( storage: &mut Storage, ) -> sp_core::hash::H256 { - let child_roots = storage.children.iter().map(|(sk, child_content)| { + let child_roots = storage.children_default.iter().map(|(sk, child_content)| { let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( child_content.data.clone().into_iter().collect(), ); diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 65fbf300bb8a5..cfafb76521ad3 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -50,7 +50,6 @@ use sp_version::NativeVersion; use frame_support::{impl_outer_origin, parameter_types, weights::{Weight, RuntimeDbWeight}}; use sp_inherents::{CheckInherentsResult, InherentData}; use cfg_if::cfg_if; -use sp_core::storage::ChildType; // Ensure Babe and Aura use the same crypto to simplify things a bit. pub use sp_consensus_babe::{AuthorityId, SlotNumber}; @@ -923,22 +922,17 @@ fn test_read_storage() { } fn test_read_child_storage() { - const CHILD_KEY: &[u8] = b":child_storage:default:read_child_storage"; - const UNIQUE_ID: &[u8] = b":unique_id"; + const STORAGE_KEY: &[u8] = b"unique_id_1"; const KEY: &[u8] = b":read_child_storage"; - sp_io::storage::child_set( - CHILD_KEY, - UNIQUE_ID, - ChildType::CryptoUniqueId as u32, + sp_io::default_child_storage::set( + STORAGE_KEY, KEY, b"test", ); let mut v = [0u8; 4]; - let r = sp_io::storage::child_read( - CHILD_KEY, - UNIQUE_ID, - ChildType::CryptoUniqueId as u32, + let r = sp_io::default_child_storage::read( + STORAGE_KEY, KEY, &mut v, 0, @@ -947,10 +941,8 @@ fn test_read_child_storage() { assert_eq!(&v, b"test"); let mut v = [0u8; 4]; - let r = sp_io::storage::child_read( - CHILD_KEY, - UNIQUE_ID, - ChildType::CryptoUniqueId as u32, + let r = sp_io::default_child_storage::read( + STORAGE_KEY, KEY, &mut v, 8, diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index c35850ae950e9..9cfec187ddb79 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -373,7 +373,7 @@ mod tests { vec![111u8, 0, 0, 0, 0, 0, 0, 0] } ], - children: map![], + children_default: map![], }, ) }