Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion client/service/src/client/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1190,7 +1190,8 @@ where
state, size_limit, start_key,
)?;
// This is read proof only, we can use either LayoutV0 or LayoutV1.
let proof = sp_trie::encode_compact::<sp_trie::LayoutV0<HashFor<Block>>>(proof, root)
let proof = proof
.into_compact_proof::<sp_trie::LayoutV0<HashFor<Block>>>(root)
.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?;
Ok((proof, count))
}
Expand Down
8 changes: 4 additions & 4 deletions primitives/state-machine/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1939,13 +1939,13 @@ mod tests {
let (proof, count) =
prove_range_read_with_size(remote_backend, None, None, 0, None).unwrap();
// Always contains at least some nodes.
assert_eq!(proof.into_memory_db::<BlakeTwo256>().drain().len(), 3);
assert_eq!(proof.to_memory_db::<BlakeTwo256>().drain().len(), 3);
assert_eq!(count, 1);

let remote_backend = trie_backend::tests::test_trie(state_version, None, None);
let (proof, count) =
prove_range_read_with_size(remote_backend, None, None, 800, Some(&[])).unwrap();
assert_eq!(proof.clone().into_memory_db::<BlakeTwo256>().drain().len(), 9);
assert_eq!(proof.to_memory_db::<BlakeTwo256>().drain().len(), 9);
assert_eq!(count, 85);
let (results, completed) = read_range_proof_check::<BlakeTwo256>(
remote_root,
Expand All @@ -1968,7 +1968,7 @@ mod tests {
let remote_backend = trie_backend::tests::test_trie(state_version, None, None);
let (proof, count) =
prove_range_read_with_size(remote_backend, None, None, 50000, Some(&[])).unwrap();
assert_eq!(proof.clone().into_memory_db::<BlakeTwo256>().drain().len(), 11);
assert_eq!(proof.to_memory_db::<BlakeTwo256>().drain().len(), 11);
assert_eq!(count, 132);
let (results, completed) =
read_range_proof_check::<BlakeTwo256>(remote_root, proof, None, None, None, None)
Expand Down Expand Up @@ -2053,7 +2053,7 @@ mod tests {
)
.unwrap();
// Always contains at least some nodes.
assert!(proof.clone().into_memory_db::<BlakeTwo256>().drain().len() > 0);
assert!(proof.to_memory_db::<BlakeTwo256>().drain().len() > 0);
assert!(count < 3); // when doing child we include parent and first child key.

let (result, completed_depth) = read_range_proof_check_with_child::<BlakeTwo256>(
Expand Down
4 changes: 2 additions & 2 deletions primitives/trie/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,10 +57,10 @@ pub use trie_db::{
pub use trie_stream::TrieStream;

/// substrate trie layout
pub struct LayoutV0<H>(sp_std::marker::PhantomData<H>);
pub struct LayoutV0<H>(PhantomData<H>);

/// substrate trie layout, with external value nodes.
pub struct LayoutV1<H>(sp_std::marker::PhantomData<H>);
pub struct LayoutV1<H>(PhantomData<H>);

impl<H> TrieLayout for LayoutV0<H>
where
Expand Down
64 changes: 40 additions & 24 deletions primitives/trie/src/storage_proof.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,11 @@
use codec::{Decode, Encode};
use hash_db::{HashDB, Hasher};
use scale_info::TypeInfo;
use sp_std::{collections::btree_set::BTreeSet, iter::IntoIterator, vec::Vec};
use sp_std::{
collections::btree_set::{BTreeSet, IntoIter, Iter},
iter::IntoIterator,
vec::Vec,
};
// Note that `LayoutV1` usage here (proof compaction) is compatible
// with `LayoutV0`.
use crate::LayoutV1 as Layout;
Expand Down Expand Up @@ -54,10 +58,16 @@ impl StorageProof {
self.trie_nodes.is_empty()
}

/// Convert into an iterator over encoded trie nodes in lexicographical order constructed
/// from the proof.
pub fn iter_nodes(self) -> IntoIter<Vec<u8>> {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could be rename into_iter_nodes

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should be!

self.trie_nodes.into_iter()
}

/// Create an iterator over encoded trie nodes in lexicographical order constructed
/// from the proof.
pub fn iter_nodes(self) -> StorageProofNodeIterator {
StorageProofNodeIterator::new(self)
pub fn iter(&self) -> Iter<'_, Vec<u8>> {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could be rename iter_nodes

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, this rename doesn't make any sense.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

And here return impl Iter

Copy link
Contributor Author

@yjhmelody yjhmelody Oct 4, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

And here return impl Iter

Yeah, But I think user maybe wanna use more trait methods here.

Would DoubleEndIter be better here?

Because there is lexicographical order semantic. So double end is meaningful IMO.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We didn't supported this before. So, not sure we need this now.

self.trie_nodes.iter()
}

/// Convert into plain node vector.
Expand All @@ -70,6 +80,11 @@ impl StorageProof {
self.into()
}

/// Creates a [`MemoryDB`](crate::MemoryDB) from `Self` reference.
pub fn to_memory_db<H: Hasher>(&self) -> crate::MemoryDB<H> {
self.into()
}

/// Merges multiple storage proofs covering potentially different sets of keys into one proof
/// covering all keys. The merged proof output may be smaller than the aggregate size of the
/// input proofs due to deduplication of trie nodes.
Expand All @@ -89,7 +104,17 @@ impl StorageProof {
self,
root: H::Out,
) -> Result<CompactProof, crate::CompactProofError<H::Out, crate::Error<H::Out>>> {
crate::encode_compact::<Layout<H>>(self, root)
let db = self.into_memory_db();
crate::encode_compact::<Layout<H>, crate::MemoryDB<H>>(db, root)
}

/// Encode as a compact proof with default trie layout.
pub fn to_compact_proof<H: Hasher>(
&self,
root: H::Out,
) -> Result<CompactProof, crate::CompactProofError<H::Out, crate::Error<H::Out>>> {
let db = self.to_memory_db();
crate::encode_compact::<Layout<H>, crate::MemoryDB<H>>(db, root)
}

/// Returns the estimated encoded size of the compact proof.
Expand All @@ -114,6 +139,17 @@ impl<H: Hasher> From<StorageProof> for crate::MemoryDB<H> {
}
}

impl<H: Hasher> From<&StorageProof> for crate::MemoryDB<H> {
fn from(proof: &StorageProof) -> Self {
let mut db = crate::MemoryDB::default();
proof.iter().for_each(|n| {
db.insert(crate::EMPTY_PREFIX, &n);
});

db
}
}

/// Storage proof in compact form.
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, TypeInfo)]
pub struct CompactProof {
Expand Down Expand Up @@ -169,23 +205,3 @@ impl CompactProof {
Ok((db, root))
}
}

/// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to
/// be traversed in any particular order.
pub struct StorageProofNodeIterator {
inner: <BTreeSet<Vec<u8>> as IntoIterator>::IntoIter,
}

impl StorageProofNodeIterator {
fn new(proof: StorageProof) -> Self {
StorageProofNodeIterator { inner: proof.trie_nodes.into_iter() }
}
}

impl Iterator for StorageProofNodeIterator {
type Item = Vec<u8>;

fn next(&mut self) -> Option<Self::Item> {
self.inner.next()
}
}
8 changes: 4 additions & 4 deletions primitives/trie/src/trie_codec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
//! This uses compact proof from trie crate and extends
//! it to substrate specific layout and child trie system.

use crate::{CompactProof, HashDBT, StorageProof, TrieConfiguration, TrieHash, EMPTY_PREFIX};
use crate::{CompactProof, HashDBT, TrieConfiguration, TrieHash, EMPTY_PREFIX};
use sp_std::{boxed::Box, vec::Vec};
use trie_db::{CError, Trie};

Expand Down Expand Up @@ -149,15 +149,15 @@ where
/// Then parse all child trie root and compress main trie content first
/// then all child trie contents.
/// Child trie are ordered by the order of their roots in the top trie.
pub fn encode_compact<L>(
proof: StorageProof,
pub fn encode_compact<L, DB>(
partial_db: DB,
root: TrieHash<L>,
) -> Result<CompactProof, Error<TrieHash<L>, CError<L>>>
where
L: TrieConfiguration,
DB: HashDBT<L::Hash, trie_db::DBValue> + hash_db::HashDBRef<L::Hash, trie_db::DBValue>,
{
let mut child_tries = Vec::new();
let partial_db = proof.into_memory_db();
let mut compact_proof = {
let trie = crate::TrieDBBuilder::<L>::new(&partial_db, &root).build();

Expand Down