Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
implement iterator for change child trie.
  • Loading branch information
cheme committed Jul 15, 2019
commit a5483cb6fc065f47cc0d9344acb201482ce2cba2
10 changes: 8 additions & 2 deletions core/client/src/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -531,6 +531,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
&self,
first: NumberFor<Block>,
last: BlockId<Block>,
storage_key: Option<&StorageKey>,
key: &StorageKey
) -> error::Result<Vec<(NumberFor<Block>, u32)>> {
let (config, storage) = self.require_changes_trie()?;
Expand All @@ -546,6 +547,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
number: last_number,
},
self.backend.blockchain().info().best_number,
storage_key.as_ref().map(|sk| sk.0.as_slice()),
&key.0)
.and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::<Result<_, _>>())
.map_err(|err| error::Error::ChangesTrieAccessFailed(err))
Expand All @@ -563,13 +565,15 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
last: Block::Hash,
min: Block::Hash,
max: Block::Hash,
key: &StorageKey
storage_key: Option<&StorageKey>,
key: &StorageKey,
) -> error::Result<ChangesProof<Block::Header>> {
self.key_changes_proof_with_cht_size(
first,
last,
min,
max,
storage_key,
key,
cht::size(),
)
Expand All @@ -582,6 +586,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
last: Block::Hash,
min: Block::Hash,
max: Block::Hash,
storage_key: Option<&StorageKey>,
key: &StorageKey,
cht_size: NumberFor<Block>,
) -> error::Result<ChangesProof<Block::Header>> {
Expand Down Expand Up @@ -648,7 +653,8 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
number: last_number,
},
max_number,
&key.0
storage_key.as_ref().map(|sk| sk.0.as_slice()),
&key.0,
)
.map_err(|err| error::Error::from(error::Error::ChangesTrieAccessFailed(err)))?;

Expand Down
3 changes: 3 additions & 0 deletions core/client/src/light/fetcher.rs
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,8 @@ pub struct RemoteChangesRequest<Header: HeaderT> {
/// Known changes trie roots for the range of blocks [tries_roots.0..max_block].
/// Proofs for roots of ascendants of tries_roots.0 are provided by the remote node.
pub tries_roots: (Header::Number, Header::Hash, Vec<Header::Hash>),
/// Optional Child Storage key to read.
pub storage_key: Option<Vec<u8>>,
/// Storage key to read.
pub key: Vec<u8>,
/// Number of times to retry request. None means that default RETRY_COUNT is used.
Expand Down Expand Up @@ -297,6 +299,7 @@ impl<E, H, B: BlockT, S: BlockchainStorage<B>, F> LightDataChecker<E, H, B, S, F
number: request.last_block.0,
},
remote_max_block,
request.storage_key.as_ref().map(Vec::as_slice),
&request.key)
.map_err(|err| ClientError::ChangesTrieAccessFailed(err))
}
Expand Down
6 changes: 4 additions & 2 deletions core/network/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ pub trait Client<Block: BlockT>: Send + Sync {
last: Block::Hash,
min: Block::Hash,
max: Block::Hash,
storage_key: Option<&StorageKey>,
key: &StorageKey
) -> Result<ChangesProof<Block::Header>, Error>;

Expand Down Expand Up @@ -123,9 +124,10 @@ impl<B, E, Block, RA> Client<Block> for SubstrateClient<B, E, Block, RA> where
last: Block::Hash,
min: Block::Hash,
max: Block::Hash,
key: &StorageKey
storage_key: Option<&StorageKey>,
key: &StorageKey,
) -> Result<ChangesProof<Block::Header>, Error> {
(self as &SubstrateClient<B, E, Block, RA>).key_changes_proof(first, last, min, max, key)
(self as &SubstrateClient<B, E, Block, RA>).key_changes_proof(first, last, min, max, storage_key, key)
}

fn is_descendent_of(&self, base: &Block::Hash, block: &Block::Hash) -> Result<bool, Error> {
Expand Down
20 changes: 16 additions & 4 deletions core/network/src/protocol.rs
Original file line number Diff line number Diff line change
Expand Up @@ -228,14 +228,16 @@ impl<'a, B: BlockT> OnDemandNetwork<B> for OnDemandIn<'a, B> {
last: <B as BlockT>::Hash,
min: <B as BlockT>::Hash,
max: <B as BlockT>::Hash,
key: Vec<u8>
storage_key: Option<Vec<u8>>,
key: Vec<u8>,
) {
let message = message::generic::Message::RemoteChangesRequest(message::RemoteChangesRequest {
id,
first,
last,
min,
max,
storage_key,
key,
});

Expand Down Expand Up @@ -1379,24 +1381,34 @@ impl<B: BlockT, S: NetworkSpecialization<B>, H: ExHashT> Protocol<B, S, H> {
trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{})",
request.id,
who,
request.key.to_hex::<String>(),
if let Some(sk) = request.storage_key.as_ref() {
format!("{} : {}", sk.to_hex::<String>(), request.key.to_hex::<String>())
} else {
request.key.to_hex::<String>()
},
request.first,
request.last
);
let storage_key = request.storage_key.map(|sk| StorageKey(sk));
let key = StorageKey(request.key);
let proof = match self.context_data.chain.key_changes_proof(
request.first,
request.last,
request.min,
request.max,
&key
storage_key.as_ref(),
&key,
) {
Ok(proof) => proof,
Err(error) => {
trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{}) failed with: {}",
request.id,
who,
key.0.to_hex::<String>(),
if let Some(sk) = storage_key {
format!("{} : {}", sk.0.to_hex::<String>(), key.0.to_hex::<String>())
} else {
key.0.to_hex::<String>()
},
request.first,
request.last,
error
Expand Down
2 changes: 2 additions & 0 deletions core/network/src/protocol/message.rs
Original file line number Diff line number Diff line change
Expand Up @@ -345,6 +345,8 @@ pub mod generic {
pub min: H,
/// Hash of the last block that we can use when querying changes.
pub max: H,
/// Storage child node key which changes are requested.
pub storage_key: Option<Vec<u8>>,
/// Storage key which changes are requested.
pub key: Vec<u8>,
}
Expand Down
6 changes: 4 additions & 2 deletions core/network/src/protocol/on_demand.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,8 @@ pub trait OnDemandNetwork<B: BlockT> {
last: <B as BlockT>::Hash,
min: <B as BlockT>::Hash,
max: <B as BlockT>::Hash,
key: Vec<u8>
storage_key: Option<Vec<u8>>,
key: Vec<u8>,
);

/// Send to `who` a body request.
Expand Down Expand Up @@ -601,6 +602,7 @@ impl<Block: BlockT> Request<Block> {
data.last_block.1.clone(),
data.tries_roots.1.clone(),
data.max_block.1.clone(),
data.storage_key.clone(),
data.key.clone(),
),
RequestData::RemoteBody(ref data, _) =>
Expand Down Expand Up @@ -757,7 +759,7 @@ pub mod tests {
_: Vec<u8>) {}
fn send_call_request(&mut self, _: &PeerId, _: RequestId, _: <B as BlockT>::Hash, _: String, _: Vec<u8>) {}
fn send_changes_request(&mut self, _: &PeerId, _: RequestId, _: <B as BlockT>::Hash, _: <B as BlockT>::Hash,
_: <B as BlockT>::Hash, _: <B as BlockT>::Hash, _: Vec<u8>) {}
_: <B as BlockT>::Hash, _: <B as BlockT>::Hash, _: Option<Vec<u8>>, _: Vec<u8>) {}
fn send_body_request(&mut self, _: &PeerId, _: RequestId, _: BlockAttributes, _: FromBlock<<B as BlockT>::Hash,
<<B as BlockT>::Header as HeaderT>::Number>, _: Option<B::Hash>, _: Direction, _: Option<u32>) {}
}
Expand Down
2 changes: 1 addition & 1 deletion core/rpc/src/state/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -305,7 +305,7 @@ impl<B, E, Block: BlockT, RA> State<B, E, Block, RA> where
for key in keys {
let mut last_block = None;
let mut last_value = last_values.get(key).cloned().unwrap_or_default();
for (block, _) in self.client.key_changes(begin, end, key)?.into_iter().rev() {
for (block, _) in self.client.key_changes(begin, end, None, key)?.into_iter().rev() {
if last_block == Some(block) {
continue;
}
Expand Down
30 changes: 13 additions & 17 deletions core/state-machine/src/changes_trie/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,8 @@ pub fn prepare_input<'a, B, S, H, Number>(
config,
storage)?;
top.extend(prepared.0);
// TODO EMCH should zip with extrinsics (let's wait optim merge).
// zipping with extrinsics could be faster, but requires iterator
// to stay ordered.
for (storage_key, child) in prepared.1 {
if let Some(ix) = children.iter().position(|v| v.0 == storage_key) {
children[ix].1.extend(child);
Expand Down Expand Up @@ -259,17 +260,12 @@ fn prepare_digest_input<'a, S, H, Number>(
mod test {
use parity_codec::Encode;
use primitives::Blake2Hasher;
use primitives::storage::well_known_keys::{EXTRINSIC_INDEX, CHILD_STORAGE_KEY_PREFIX};
use primitives::storage::well_known_keys::{EXTRINSIC_INDEX};
use crate::backend::InMemory;
use crate::changes_trie::storage::InMemoryStorage;
use crate::overlayed_changes::{OverlayedValue, OverlayedChangeSet};
use super::*;

fn child_trie_key(k: &[u8]) -> Vec<u8> {
let mut res = CHILD_STORAGE_KEY_PREFIX.to_vec();
res.extend_from_slice(k);
res
}
fn prepare_for_build() -> (InMemory<Blake2Hasher>, InMemoryStorage<Blake2Hasher, u64>, OverlayedChanges) {
let backend: InMemory<_> = vec![
(vec![100], vec![255]),
Expand All @@ -279,8 +275,8 @@ mod test {
(vec![104], vec![255]),
(vec![105], vec![255]),
].into_iter().collect::<::std::collections::HashMap<_, _>>().into();
let child_trie_key1 = child_trie_key(b"1");
let child_trie_key2 = child_trie_key(b"2");
let child_trie_key1 = b"1".to_vec();
let child_trie_key2 = b"2".to_vec();
let storage = InMemoryStorage::with_inputs(vec![
(1, vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 1, key: vec![100] }, vec![1, 3]),
Expand Down Expand Up @@ -401,11 +397,11 @@ mod test {
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 5, key: vec![101] }, vec![1]),
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 5, key: vec![103] }, vec![0, 1]),
], vec![
(ChildIndex { block: 5, storage_key: child_trie_key(&b"1"[..]) },
(ChildIndex { block: 5, storage_key: b"1".to_vec() },
vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 5, key: vec![100] }, vec![0, 2, 3]),
]),
(ChildIndex { block: 5, storage_key: child_trie_key(&b"2"[..]) },
(ChildIndex { block: 5, storage_key: b"2".to_vec() },
vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 5, key: vec![100] }, vec![0, 2]),
]),
Expand Down Expand Up @@ -433,13 +429,13 @@ mod test {
InputPair::DigestIndex(DigestIndex { block: 4, key: vec![102] }, vec![2]),
InputPair::DigestIndex(DigestIndex { block: 4, key: vec![105] }, vec![1, 3]),
], vec![
(ChildIndex { block: 4, storage_key: child_trie_key(&b"1"[..]) },
(ChildIndex { block: 4, storage_key: b"1".to_vec() },
vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![100] }, vec![0, 2, 3]),

InputPair::DigestIndex(DigestIndex { block: 4, key: vec![102] }, vec![2]),
]),
(ChildIndex { block: 4, storage_key: child_trie_key(&b"2"[..]) },
(ChildIndex { block: 4, storage_key: b"2".to_vec() },
vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![100] }, vec![0, 2]),
]),
Expand Down Expand Up @@ -468,13 +464,13 @@ mod test {
InputPair::DigestIndex(DigestIndex { block: 16, key: vec![103] }, vec![4]),
InputPair::DigestIndex(DigestIndex { block: 16, key: vec![105] }, vec![4, 8]),
], vec![
(ChildIndex { block: 16, storage_key: child_trie_key(&b"1"[..]) },
(ChildIndex { block: 16, storage_key: b"1".to_vec() },
vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![100] }, vec![0, 2, 3]),

InputPair::DigestIndex(DigestIndex { block: 16, key: vec![102] }, vec![4]),
]),
(ChildIndex { block: 16, storage_key: child_trie_key(&b"2"[..]) },
(ChildIndex { block: 16, storage_key: b"2".to_vec() },
vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![100] }, vec![0, 2]),
]),
Expand Down Expand Up @@ -509,13 +505,13 @@ mod test {
InputPair::DigestIndex(DigestIndex { block: 4, key: vec![102] }, vec![2]),
InputPair::DigestIndex(DigestIndex { block: 4, key: vec![105] }, vec![1, 3]),
], vec![
(ChildIndex { block: 4, storage_key: child_trie_key(&b"1"[..]) },
(ChildIndex { block: 4, storage_key: b"1".to_vec() },
vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![100] }, vec![0, 2, 3]),

InputPair::DigestIndex(DigestIndex { block: 4, key: vec![102] }, vec![2]),
]),
(ChildIndex { block: 4, storage_key: child_trie_key(&b"2"[..]) },
(ChildIndex { block: 4, storage_key: b"2".to_vec() },
vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![100] }, vec![0, 2]),
]),
Expand Down
Loading