Skip to content
Merged
Show file tree
Hide file tree
Changes from 34 commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
0af325f
Make local-cache unlimited
alexggh Feb 20, 2025
f326e8c
Remove unrelated changes
alexggh Feb 24, 2025
7799138
Create unlimited cache only in onchain context
alexggh Feb 24, 2025
a8dc8c1
Move writing back to the shared_cache from drop to a worker thread
alexggh Feb 25, 2025
c03011a
Fix build
alexggh Mar 11, 2025
6be7f6d
Add manual trie flush
alexggh Mar 12, 2025
94c7f8d
Fix on import
alexggh Mar 12, 2025
37a5a87
Merge branch 'master' into alexggh/local_cache_unlimited
alexggh Mar 12, 2025
c90d5eb
Some fixes
alexggh Mar 12, 2025
96fab66
Cosmetic changes
alexggh Mar 13, 2025
296e6f5
Merge remote-tracking branch 'origin/master' into alexggh/local_cache…
alexggh Apr 3, 2025
ad1aa3f
Remove unneded changes
alexggh Apr 3, 2025
c267986
Add metrics to the trie cache
alexggh Apr 8, 2025
596eaa2
Voluntarily give the lock in untrusted paths
alexggh Apr 8, 2025
b1ad910
Add unittest for cache
alexggh Apr 9, 2025
39c0ad4
Merge remote-tracking branch 'origin/master' into alexggh/local_cache…
alexggh Apr 9, 2025
99c758a
Update substrate/client/api/src/backend.rs
alexggh Apr 16, 2025
3e495e3
Update substrate/client/api/src/backend.rs
alexggh Apr 16, 2025
3ad7962
Update substrate/client/api/src/backend.rs
alexggh Apr 16, 2025
6c3964f
Update substrate/primitives/trie/src/cache/shared_cache.rs
alexggh Apr 16, 2025
bc17fa0
Merge remote-tracking branch 'origin/master' into alexggh/local_cache…
alexggh Apr 16, 2025
d1e73c5
Make cache trusted
alexggh Apr 17, 2025
ca8f97e
Rename local_cache to untrusted
alexggh Apr 17, 2025
5228fbf
Rename default in untrusted
alexggh Apr 17, 2025
9e2e204
Review suggestions
alexggh Apr 17, 2025
f14dbbd
Move snapshots structs in metrics
alexggh Apr 17, 2025
8245a80
Use a trusted local cache
alexggh Apr 17, 2025
d4504d0
Drop cli flag use_trusted_local_cache
alexggh Apr 17, 2025
c983020
Update basic_authorship.rs
alexggh Apr 17, 2025
0906d98
Make cargo fmt happy
alexggh Apr 17, 2025
418ab9c
Make taplo happy
alexggh Apr 17, 2025
d2661f9
Add prdoc
alexggh Apr 17, 2025
858b9dc
Merge remote-tracking branch 'origin/master' into alexggh/local_cache…
alexggh Apr 23, 2025
448b498
Merge branch 'master' into alexggh/local_cache_unlimited
alexggh Apr 25, 2025
9ca77ea
Update substrate/primitives/trie/src/cache/shared_cache.rs
alexggh Apr 28, 2025
08c74e5
Address review feedback
alexggh Apr 28, 2025
f37537a
Merge branch 'master' into alexggh/local_cache_unlimited
alexggh May 14, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 3 additions & 3 deletions cumulus/client/relay-chain-inprocess-interface/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ use polkadot_service::{
use sc_cli::{RuntimeVersion, SubstrateCli};
use sc_client_api::{
blockchain::BlockStatus, Backend, BlockchainEvents, HeaderBackend, ImportNotifications,
StorageProof,
StorageProof, TrieCacheContext,
};
use sc_telemetry::TelemetryWorkerHandle;
use sp_api::{CallApiAt, CallApiAtParams, CallContext, ProvideRuntimeApi};
Expand Down Expand Up @@ -217,7 +217,7 @@ impl RelayChainInterface for RelayChainInProcessInterface {
relay_parent: PHash,
key: &[u8],
) -> RelayChainResult<Option<StorageValue>> {
let state = self.backend.state_at(relay_parent)?;
let state = self.backend.state_at(relay_parent, TrieCacheContext::Untrusted)?;
state.storage(key).map_err(RelayChainError::GenericError)
}

Expand All @@ -226,7 +226,7 @@ impl RelayChainInterface for RelayChainInProcessInterface {
relay_parent: PHash,
relevant_keys: &Vec<Vec<u8>>,
) -> RelayChainResult<StorageProof> {
let state_backend = self.backend.state_at(relay_parent)?;
let state_backend = self.backend.state_at(relay_parent, TrieCacheContext::Untrusted)?;

sp_state_machine::prove_read(state_backend, relevant_keys)
.map_err(RelayChainError::StateMachineError)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -198,11 +198,11 @@ mod tests {
let recorder_for_test: SizeOnlyRecorderProvider<sp_core::Blake2Hasher> =
SizeOnlyRecorderProvider::default();
let reference_cache: SharedTrieCache<sp_core::Blake2Hasher> =
SharedTrieCache::new(CacheSize::new(1024 * 5));
SharedTrieCache::new(CacheSize::new(1024 * 5), None);
let cache_for_test: SharedTrieCache<sp_core::Blake2Hasher> =
SharedTrieCache::new(CacheSize::new(1024 * 5));
SharedTrieCache::new(CacheSize::new(1024 * 5), None);
{
let local_cache = cache_for_test.local_cache();
let local_cache = cache_for_test.local_cache_untrusted();
let mut trie_cache_for_reference = local_cache.as_trie_db_cache(root);
let mut reference_trie_recorder = reference_recorder.as_trie_recorder(root);
let reference_trie =
Expand All @@ -211,7 +211,7 @@ mod tests {
.with_cache(&mut trie_cache_for_reference)
.build();

let local_cache_for_test = reference_cache.local_cache();
let local_cache_for_test = reference_cache.local_cache_untrusted();
let mut trie_cache_for_test = local_cache_for_test.as_trie_db_cache(root);
let mut trie_recorder_under_test = recorder_for_test.as_trie_recorder(root);
let test_trie =
Expand Down
32 changes: 32 additions & 0 deletions prdoc/pr_7682.prdoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
title: Make SharedTrieCache/LocalTrieCache work with entire state in memory
doc:
- audience: Node Dev
description: |-
Extended LocalTrieCache with a trusted configuration that expands to hold everything in memory and
then propagates all the data back to the SharedTrieCache. This new configuration is used on authoring
and import paths.
crates:
- name: sp-state-machine
bump: major
- name: sp-trie
bump: major
- name: sc-client-api
bump: major
- name: sc-client-db
bump: major
- name: cumulus-pallet-parachain-system
bump: major
- name: sc-cli
bump: major
- name: sc-service
bump: major
- name: frame-benchmarking-cli
bump: major
- name: substrate-state-trie-migration-rpc
bump: major
- name: sc-consensus-beefy
bump: major
- name: sc-basic-authorship
bump: major
- name: cumulus-relay-chain-inprocess-interface
bump: major
1 change: 1 addition & 0 deletions substrate/bin/node/testing/src/bench.rs
Original file line number Diff line number Diff line change
Expand Up @@ -389,6 +389,7 @@ impl BenchDb {
state_pruning: Some(PruningMode::ArchiveAll),
source: database_type.into_settings(dir.into()),
blocks_pruning: sc_client_db::BlocksPruning::KeepAll,
metrics_registry: None,
};
let task_executor = TaskExecutor::new();

Expand Down
35 changes: 33 additions & 2 deletions substrate/client/api/src/backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ use std::collections::HashSet;

use parking_lot::RwLock;

use sp_api::CallContext;
use sp_consensus::BlockOrigin;
use sp_core::offchain::OffchainStorage;
use sp_runtime::{
Expand Down Expand Up @@ -492,6 +493,32 @@ pub trait StorageProvider<Block: BlockT, B: Backend<Block>> {
) -> sp_blockchain::Result<Option<MerkleValue<Block::Hash>>>;
}

/// Specify the desired trie cache context when calling [`Backend::state_at`].
///
/// This is used to determine the size of the local trie cache.
#[derive(Debug, Clone, Copy)]
pub enum TrieCacheContext {
/// This is used when calling [`Backend::state_at`] in a trusted context.
///
/// A trusted context is for example the building or importing of a block.
/// In this case the local trie cache can grow unlimited and all the cached data
/// will be propagated back to the shared trie cache.
Trusted,
/// This is used when calling [`Backend::state_at`] in from untrusted context.
///
/// The local trie cache will be bounded by its preconfigured size.
Untrusted,
}

impl From<CallContext> for TrieCacheContext {
fn from(call_context: CallContext) -> Self {
match call_context {
CallContext::Onchain => TrieCacheContext::Trusted,
CallContext::Offchain => TrieCacheContext::Untrusted,
}
}
}

/// Client backend.
///
/// Manages the data layer.
Expand Down Expand Up @@ -584,11 +611,15 @@ pub trait Backend<Block: BlockT>: AuxStore + Send + Sync {

/// Returns true if state for given block is available.
fn have_state_at(&self, hash: Block::Hash, _number: NumberFor<Block>) -> bool {
self.state_at(hash).is_ok()
self.state_at(hash, TrieCacheContext::Untrusted).is_ok()
}

/// Returns state backend with post-state of given block.
fn state_at(&self, hash: Block::Hash) -> sp_blockchain::Result<Self::State>;
fn state_at(
&self,
hash: Block::Hash,
trie_cache_context: TrieCacheContext,
) -> sp_blockchain::Result<Self::State>;

/// Attempts to revert the chain by `n` blocks. If `revert_finalized` is set it will attempt to
/// revert past any finalized block, this is unsafe and can potentially leave the node in an
Expand Down
12 changes: 8 additions & 4 deletions substrate/client/api/src/in_mem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ use crate::{
backend::{self, NewBlockState},
blockchain::{self, BlockStatus, HeaderBackend},
leaves::LeafSet,
UsageInfo,
TrieCacheContext, UsageInfo,
};

struct PendingBlock<B: BlockT> {
Expand Down Expand Up @@ -652,7 +652,7 @@ impl<Block: BlockT> backend::Backend<Block> for Backend<Block> {
type OffchainStorage = OffchainStorage;

fn begin_operation(&self) -> sp_blockchain::Result<Self::BlockImportOperation> {
let old_state = self.state_at(Default::default())?;
let old_state = self.state_at(Default::default(), TrieCacheContext::Untrusted)?;
Ok(BlockImportOperation {
pending_block: None,
old_state,
Expand All @@ -668,7 +668,7 @@ impl<Block: BlockT> backend::Backend<Block> for Backend<Block> {
operation: &mut Self::BlockImportOperation,
block: Block::Hash,
) -> sp_blockchain::Result<()> {
operation.old_state = self.state_at(block)?;
operation.old_state = self.state_at(block, TrieCacheContext::Untrusted)?;
Ok(())
}

Expand Down Expand Up @@ -734,7 +734,11 @@ impl<Block: BlockT> backend::Backend<Block> for Backend<Block> {
None
}

fn state_at(&self, hash: Block::Hash) -> sp_blockchain::Result<Self::State> {
fn state_at(
&self,
hash: Block::Hash,
_trie_cache_context: TrieCacheContext,
) -> sp_blockchain::Result<Self::State> {
if hash == Default::default() {
return Ok(Self::State::default())
}
Expand Down
4 changes: 2 additions & 2 deletions substrate/client/basic-authorship/src/basic_authorship.rs
Original file line number Diff line number Diff line change
Expand Up @@ -606,7 +606,7 @@ mod tests {

use futures::executor::block_on;
use parking_lot::Mutex;
use sc_client_api::Backend;
use sc_client_api::{Backend, TrieCacheContext};
use sc_transaction_pool::BasicPool;
use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool, TransactionSource};
use sp_api::Core;
Expand Down Expand Up @@ -777,7 +777,7 @@ mod tests {
let api = client.runtime_api();
api.execute_block(genesis_hash, proposal.block).unwrap();

let state = backend.state_at(genesis_hash).unwrap();
let state = backend.state_at(genesis_hash, TrieCacheContext::Untrusted).unwrap();

let storage_changes = api.into_storage_changes(&state, genesis_hash).unwrap();

Expand Down
1 change: 1 addition & 0 deletions substrate/client/cli/src/commands/chain_info_cmd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ impl ChainInfoCmd {
state_pruning: config.state_pruning.clone(),
source: config.database.clone(),
blocks_pruning: config.blocks_pruning,
metrics_registry: None,
};
let backend = sc_service::new_db_backend::<B>(db_config)?;
let info: ChainInfo<B> = backend.blockchain().info().into();
Expand Down
4 changes: 2 additions & 2 deletions substrate/client/consensus/beefy/src/import.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ use sp_runtime::{
EncodedJustification,
};

use sc_client_api::backend::Backend;
use sc_client_api::{backend::Backend, TrieCacheContext};
use sc_consensus::{BlockCheckParams, BlockImport, BlockImportParams, ImportResult};

use crate::{
Expand Down Expand Up @@ -149,7 +149,7 @@ where
// Run inner block import.
let inner_import_result = self.inner.import_block(block).await?;

match self.backend.state_at(hash) {
match self.backend.state_at(hash, TrieCacheContext::Untrusted) {
Ok(_) => {},
Err(_) => {
// The block is imported as part of some chain sync.
Expand Down
1 change: 1 addition & 0 deletions substrate/client/db/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ linked-hash-map = { workspace = true }
log = { workspace = true, default-features = true }
parity-db = { workspace = true }
parking_lot = { workspace = true, default-features = true }
prometheus-endpoint = { workspace = true, default-features = true }
sc-client-api = { workspace = true, default-features = true }
sc-state-db = { workspace = true, default-features = true }
schnellru = { workspace = true }
Expand Down
22 changes: 13 additions & 9 deletions substrate/client/db/benches/state_access.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@

use criterion::{criterion_group, criterion_main, BatchSize, Criterion};
use rand::{distributions::Uniform, rngs::StdRng, Rng, SeedableRng};
use sc_client_api::{Backend as _, BlockImportOperation, NewBlockState, StateBackend};
use sc_client_api::{
Backend as _, BlockImportOperation, NewBlockState, StateBackend, TrieCacheContext,
};
use sc_client_db::{Backend, BlocksPruning, DatabaseSettings, DatabaseSource, PruningMode};
use sp_core::H256;
use sp_runtime::{
Expand Down Expand Up @@ -83,10 +85,11 @@ fn insert_blocks(db: &Backend<Block>, storage: Vec<(Vec<u8>, Vec<u8>)>) -> H256
.map(|(k, v)| (k.clone(), Some(v.clone())))
.collect::<Vec<_>>();

let (state_root, tx) = db.state_at(parent_hash).unwrap().storage_root(
changes.iter().map(|(k, v)| (k.as_slice(), v.as_deref())),
StateVersion::V1,
);
let (state_root, tx) =
db.state_at(parent_hash, TrieCacheContext::Trusted).unwrap().storage_root(
changes.iter().map(|(k, v)| (k.as_slice(), v.as_deref())),
StateVersion::V1,
);
header.state_root = state_root;

op.update_db_storage(tx).unwrap();
Expand Down Expand Up @@ -122,6 +125,7 @@ fn create_backend(config: BenchmarkConfig, temp_dir: &TempDir) -> Backend<Block>
state_pruning: Some(PruningMode::ArchiveAll),
source: DatabaseSource::ParityDb { path },
blocks_pruning: BlocksPruning::KeepAll,
metrics_registry: None,
};

Backend::new(settings, 100).expect("Creates backend")
Expand Down Expand Up @@ -175,7 +179,7 @@ fn state_access_benchmarks(c: &mut Criterion) {

group.bench_function(desc, |b| {
b.iter_batched(
|| backend.state_at(block_hash).expect("Creates state"),
|| backend.state_at(block_hash, TrieCacheContext::Trusted).expect("Creates state"),
|state| {
for key in keys.iter().cycle().take(keys.len() * multiplier) {
let _ = state.storage(&key).expect("Doesn't fail").unwrap();
Expand Down Expand Up @@ -213,7 +217,7 @@ fn state_access_benchmarks(c: &mut Criterion) {

group.bench_function(desc, |b| {
b.iter_batched(
|| backend.state_at(block_hash).expect("Creates state"),
|| backend.state_at(block_hash, TrieCacheContext::Trusted).expect("Creates state"),
|state| {
for key in keys.iter().take(1).cycle().take(multiplier) {
let _ = state.storage(&key).expect("Doesn't fail").unwrap();
Expand Down Expand Up @@ -251,7 +255,7 @@ fn state_access_benchmarks(c: &mut Criterion) {

group.bench_function(desc, |b| {
b.iter_batched(
|| backend.state_at(block_hash).expect("Creates state"),
|| backend.state_at(block_hash, TrieCacheContext::Trusted).expect("Creates state"),
|state| {
for key in keys.iter().take(1).cycle().take(multiplier) {
let _ = state.storage_hash(&key).expect("Doesn't fail").unwrap();
Expand Down Expand Up @@ -289,7 +293,7 @@ fn state_access_benchmarks(c: &mut Criterion) {

group.bench_function(desc, |b| {
b.iter_batched(
|| backend.state_at(block_hash).expect("Creates state"),
|| backend.state_at(block_hash, TrieCacheContext::Trusted).expect("Creates state"),
|state| {
let _ = state
.storage_hash(sp_core::storage::well_known_keys::CODE)
Expand Down
4 changes: 2 additions & 2 deletions substrate/client/db/src/bench.rs
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ impl<Hasher: Hash> BenchmarkingState<Hasher> {
proof_recorder: record_proof.then(Default::default),
proof_recorder_root: Cell::new(root),
// Enable the cache, but do not sync anything to the shared state.
shared_trie_cache: SharedTrieCache::new(CacheSize::new(0)),
shared_trie_cache: SharedTrieCache::new(CacheSize::new(0), None),
};

state.add_whitelist_to_tracker();
Expand Down Expand Up @@ -199,7 +199,7 @@ impl<Hasher: Hash> BenchmarkingState<Hasher> {
*self.state.borrow_mut() = Some(
DbStateBuilder::<Hasher>::new(storage_db, self.root.get())
.with_optional_recorder(self.proof_recorder.clone())
.with_cache(self.shared_trie_cache.local_cache())
.with_cache(self.shared_trie_cache.local_cache_untrusted())
.build(),
);
Ok(())
Expand Down
Loading
Loading