Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
550 changes: 264 additions & 286 deletions Cargo.lock

Large diffs are not rendered by default.

5 changes: 2 additions & 3 deletions bin/node/bench/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ sp-state-machine = { version = "0.13.0", path = "../../../primitives/state-machi
serde = "1.0.136"
serde_json = "1.0.85"
derive_more = { version = "0.99.17", default-features = false, features = ["display"] }
kvdb = "0.12.0"
kvdb-rocksdb = "0.16.0"
kvdb = "0.13.0"
kvdb-rocksdb = "0.17.0"
sp-trie = { version = "7.0.0", path = "../../../primitives/trie" }
sp-core = { version = "7.0.0", path = "../../../primitives/core" }
sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" }
Expand All @@ -37,7 +37,6 @@ tempfile = "3.1.0"
fs_extra = "1"
rand = { version = "0.7.2", features = ["small_rng"] }
lazy_static = "1.4.0"
parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] }
parity-db = "0.4.2"
sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" }
sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" }
Expand Down
1 change: 0 additions & 1 deletion bin/node/bench/src/tempdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ pub enum DatabaseType {
pub struct TempDatabase(tempfile::TempDir);

struct ParityDbWrapper(parity_db::Db);
parity_util_mem::malloc_size_of_is_0!(ParityDbWrapper);

impl KeyValueDB for ParityDbWrapper {
/// Get a value by key.
Expand Down
4 changes: 1 addition & 3 deletions bin/node/cli/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,7 @@ try-runtime-cli = { version = "0.10.0-dev", optional = true, path = "../../../ut
serde_json = "1.0.85"

[target.'cfg(any(target_arch="x86_64", target_arch="aarch64"))'.dependencies]
sp-trie = { version = "7.0.0", default-features = false, path = "../../../primitives/trie", features = [
"memory-tracker",
] }
sp-trie = { version = "7.0.0", default-features = false, path = "../../../primitives/trie" }

[dev-dependencies]
sc-keystore = { version = "4.0.0-dev", path = "../../../client/keystore" }
Expand Down
17 changes: 0 additions & 17 deletions client/api/src/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -197,26 +197,13 @@ impl fmt::Display for MemorySize {
}
}

/// Memory statistics for state db.
#[derive(Default, Clone, Debug)]
pub struct StateDbMemoryInfo {
/// Memory usage of the non-canonical overlay
pub non_canonical: MemorySize,
/// Memory usage of the pruning window.
pub pruning: Option<MemorySize>,
/// Memory usage of the pinned blocks.
pub pinned: MemorySize,
}

/// Memory statistics for client instance.
#[derive(Default, Clone, Debug)]
pub struct MemoryInfo {
/// Size of state cache.
pub state_cache: MemorySize,
/// Size of backend database cache.
pub database_cache: MemorySize,
/// Size of the state db.
pub state_db: StateDbMemoryInfo,
}

/// I/O statistics for client instance.
Expand Down Expand Up @@ -264,13 +251,9 @@ impl fmt::Display for UsageInfo {
write!(
f,
"caches: ({} state, {} db overlay), \
state db: ({} non-canonical, {} pruning, {} pinned), \
i/o: ({} tx, {} write, {} read, {} avg tx, {}/{} key cache reads/total, {} trie nodes writes)",
self.memory.state_cache,
self.memory.database_cache,
self.memory.state_db.non_canonical,
self.memory.state_db.pruning.unwrap_or_default(),
self.memory.state_db.pinned,
self.io.transactions,
self.io.bytes_written,
self.io.bytes_read,
Expand Down
8 changes: 4 additions & 4 deletions client/db/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [
"derive",
] }
hash-db = "0.15.2"
kvdb = "0.12.0"
kvdb-memorydb = "0.12.0"
kvdb-rocksdb = { version = "0.16.0", optional = true }
kvdb = "0.13.0"
kvdb-memorydb = "0.13.0"
kvdb-rocksdb = { version = "0.17.0", optional = true }
linked-hash-map = "0.5.4"
log = "0.4.17"
parity-db = "0.4.2"
Expand All @@ -36,7 +36,7 @@ sp-trie = { version = "7.0.0", path = "../../primitives/trie" }

[dev-dependencies]
criterion = "0.3.3"
kvdb-rocksdb = "0.16.0"
kvdb-rocksdb = "0.17.0"
rand = "0.8.4"
tempfile = "3.1.0"
quickcheck = { version = "1.0.3", default-features = false }
Expand Down
3 changes: 1 addition & 2 deletions client/db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2086,10 +2086,9 @@ impl<Block: BlockT> sc_client_api::backend::Backend<Block> for Backend<Block> {
let state_cache = MemorySize::from_bytes(
self.shared_trie_cache.as_ref().map_or(0, |c| c.used_memory_size()),
);
let state_db = self.storage.state_db.memory_info();

Some(UsageInfo {
memory: MemoryInfo { state_cache, database_cache, state_db },
memory: MemoryInfo { state_cache, database_cache },
io: IoInfo {
transactions: io_stats.transactions,
bytes_read: io_stats.bytes_read,
Expand Down
10 changes: 3 additions & 7 deletions client/db/src/storage_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,15 +59,11 @@ pub struct Cache<B: BlockT> {

struct LRUMap<K, V>(LinkedHashMap<K, V>, usize, usize);

/// Internal trait similar to `heapsize` but using
/// a simply estimation.
/// Internal trait similar to `heapsize` but using a simple estimation.
///
/// This should not be made public, it is implementation
/// detail trait. If it need to become public please
/// consider using `malloc_size_of`.
/// This should not be made public, it is an implementation detail trait.
trait EstimateSize {
/// Return a size estimation of additional size needed
/// to cache this struct (in bytes).
/// Return a size estimation of the additional size needed to cache this struct (in bytes).
fn estimate_size(&self) -> usize;
}

Expand Down
1 change: 0 additions & 1 deletion client/informant/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ ansi_term = "0.12.1"
futures = "0.3.21"
futures-timer = "3.0.1"
log = "0.4.17"
parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] }
sc-client-api = { version = "4.0.0-dev", path = "../api" }
sc-network-common = { version = "0.10.0-dev", path = "../network/common" }
sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" }
Expand Down
10 changes: 2 additions & 8 deletions client/informant/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ use ansi_term::Colour;
use futures::prelude::*;
use futures_timer::Delay;
use log::{debug, info, trace};
use parity_util_mem::MallocSizeOf;
use sc_client_api::{BlockchainEvents, UsageProvider};
use sc_network_common::service::NetworkStatusProvider;
use sc_transaction_pool_api::TransactionPool;
Expand Down Expand Up @@ -56,13 +55,13 @@ impl Default for OutputFormat {
pub async fn build<B: BlockT, C, N, P>(
client: Arc<C>,
network: N,
pool: Arc<P>,
_pool: Arc<P>,
format: OutputFormat,
) where
N: NetworkStatusProvider<B>,
C: UsageProvider<B> + HeaderMetadata<B> + BlockchainEvents<B>,
<C as HeaderMetadata<B>>::Error: Display,
P: TransactionPool + MallocSizeOf,
P: TransactionPool,
{
let mut display = display::InformantDisplay::new(format.clone());

Expand All @@ -83,11 +82,6 @@ pub async fn build<B: BlockT, C, N, P>(
"Usage statistics not displayed as backend does not provide it",
)
}
trace!(
target: "usage",
"Subsystems memory [txpool: {} kB]",
parity_util_mem::malloc_size(&*pool) / 1024,
);
display.display(&info, net_status);
future::ready(())
});
Expand Down
3 changes: 0 additions & 3 deletions client/service/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -80,9 +80,6 @@ sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" }
sc-sysinfo = { version = "6.0.0-dev", path = "../sysinfo" }
tracing = "0.1.29"
tracing-futures = { version = "0.2.4" }
parity-util-mem = { version = "0.12.0", default-features = false, features = [
"primitive-types",
] }
async-trait = "0.1.57"
tokio = { version = "1.17.0", features = ["time", "rt-multi-thread", "parking_lot"] }
tempfile = "3.1.0"
Expand Down
1 change: 0 additions & 1 deletion client/service/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -437,7 +437,6 @@ where
TBl::Header: Unpin,
TBackend: 'static + sc_client_api::backend::Backend<TBl> + Send,
TExPool: MaintainedTransactionPool<Block = TBl, Hash = <TBl as BlockT>::Hash>
+ parity_util_mem::MallocSizeOf
+ 'static,
{
let SpawnTasksParams {
Expand Down
20 changes: 0 additions & 20 deletions client/service/src/metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@ struct PrometheusMetrics {
// I/O
database_cache: Gauge<U64>,
state_cache: Gauge<U64>,
state_db: GaugeVec<U64>,
}

impl PrometheusMetrics {
Expand Down Expand Up @@ -117,13 +116,6 @@ impl PrometheusMetrics {
Gauge::new("substrate_state_cache_bytes", "State cache size in bytes")?,
registry,
)?,
state_db: register(
GaugeVec::new(
Opts::new("substrate_state_db_cache_bytes", "State DB cache in bytes"),
&["subtype"],
)?,
registry,
)?,
})
}
}
Expand Down Expand Up @@ -254,18 +246,6 @@ impl MetricsService {
if let Some(info) = info.usage.as_ref() {
metrics.database_cache.set(info.memory.database_cache.as_bytes() as u64);
metrics.state_cache.set(info.memory.state_cache.as_bytes() as u64);

metrics
.state_db
.with_label_values(&["non_canonical"])
.set(info.memory.state_db.non_canonical.as_bytes() as u64);
if let Some(pruning) = info.memory.state_db.pruning {
metrics.state_db.with_label_values(&["pruning"]).set(pruning.as_bytes() as u64);
}
metrics
.state_db
.with_label_values(&["pinned"])
.set(info.memory.state_db.pinned.as_bytes() as u64);
}
}

Expand Down
2 changes: 0 additions & 2 deletions client/state-db/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@ targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] }
log = "0.4.17"
parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] }
parity-util-mem-derive = "0.1.0"
parking_lot = "0.12.1"
sc-client-api = { version = "4.0.0-dev", path = "../api" }
sp-core = { version = "7.0.0", path = "../../primitives/core" }
32 changes: 5 additions & 27 deletions client/state-db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,10 +49,8 @@ mod test;
use codec::Codec;
use log::trace;
use noncanonical::NonCanonicalOverlay;
use parity_util_mem::{malloc_size, MallocSizeOf};
use parking_lot::RwLock;
use pruning::{HaveBlock, RefWindow};
use sc_client_api::{MemorySize, StateDbMemoryInfo};
use std::{
collections::{hash_map::Entry, HashMap},
fmt,
Expand Down Expand Up @@ -220,8 +218,6 @@ pub struct Constraints {
/// Maximum blocks. Defaults to 0 when unspecified, effectively keeping only non-canonical
/// states.
pub max_blocks: Option<u32>,
/// Maximum memory in the pruning overlay.
pub max_mem: Option<usize>,
}

/// Pruning mode.
Expand All @@ -238,7 +234,7 @@ pub enum PruningMode {
impl PruningMode {
/// Create a mode that keeps given number of blocks.
pub fn blocks_pruning(n: u32) -> PruningMode {
PruningMode::Constrained(Constraints { max_blocks: Some(n), max_mem: None })
PruningMode::Constrained(Constraints { max_blocks: Some(n) })
}

/// Is this an archive (either ArchiveAll or ArchiveCanonical) pruning mode?
Expand Down Expand Up @@ -276,7 +272,7 @@ impl Default for PruningMode {

impl Default for Constraints {
fn default() -> Self {
Self { max_blocks: Some(DEFAULT_MAX_BLOCK_CONSTRAINT), max_mem: None }
Self { max_blocks: Some(DEFAULT_MAX_BLOCK_CONSTRAINT) }
}
}

Expand All @@ -294,7 +290,7 @@ pub struct StateDbSync<BlockHash: Hash, Key: Hash, D: MetaDb> {
ref_counting: bool,
}

impl<BlockHash: Hash + MallocSizeOf, Key: Hash + MallocSizeOf, D: MetaDb>
impl<BlockHash: Hash, Key: Hash, D: MetaDb>
StateDbSync<BlockHash, Key, D>
{
fn new(
Expand All @@ -306,8 +302,7 @@ impl<BlockHash: Hash + MallocSizeOf, Key: Hash + MallocSizeOf, D: MetaDb>

let non_canonical: NonCanonicalOverlay<BlockHash, Key> = NonCanonicalOverlay::new(&db)?;
let pruning: Option<RefWindow<BlockHash, Key, D>> = match mode {
PruningMode::Constrained(Constraints { max_mem: Some(_), .. }) => unimplemented!(),
PruningMode::Constrained(Constraints { max_blocks, .. }) =>
PruningMode::Constrained(Constraints { max_blocks }) =>
Some(RefWindow::new(db, max_blocks.unwrap_or(0), ref_counting)?),
PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => None,
};
Expand Down Expand Up @@ -392,10 +387,6 @@ impl<BlockHash: Hash + MallocSizeOf, Key: Hash + MallocSizeOf, D: MetaDb>
break
}

if constraints.max_mem.map_or(false, |m| pruning.mem_used() > m) {
break
}

let pinned = &self.pinned;
match pruning.next_hash() {
// the block record is temporary unavailable, break and try next time
Expand Down Expand Up @@ -496,14 +487,6 @@ impl<BlockHash: Hash + MallocSizeOf, Key: Hash + MallocSizeOf, D: MetaDb>
}
db.get(key.as_ref()).map_err(Error::Db)
}

fn memory_info(&self) -> StateDbMemoryInfo {
StateDbMemoryInfo {
non_canonical: MemorySize::from_bytes(malloc_size(&self.non_canonical)),
pruning: self.pruning.as_ref().map(|p| MemorySize::from_bytes(malloc_size(&p))),
pinned: MemorySize::from_bytes(malloc_size(&self.pinned)),
}
}
}

/// State DB maintenance. See module description.
Expand All @@ -512,7 +495,7 @@ pub struct StateDb<BlockHash: Hash, Key: Hash, D: MetaDb> {
db: RwLock<StateDbSync<BlockHash, Key, D>>,
}

impl<BlockHash: Hash + MallocSizeOf, Key: Hash + MallocSizeOf, D: MetaDb>
impl<BlockHash: Hash, Key: Hash, D: MetaDb>
StateDb<BlockHash, Key, D>
{
/// Create an instance of [`StateDb`].
Expand Down Expand Up @@ -637,11 +620,6 @@ impl<BlockHash: Hash + MallocSizeOf, Key: Hash + MallocSizeOf, D: MetaDb>
*state_db = StateDbSync::new(state_db.mode.clone(), state_db.ref_counting, db)?;
Ok(())
}

/// Returns the current memory statistics of this instance.
pub fn memory_info(&self) -> StateDbMemoryInfo {
self.db.read().memory_info()
}
}

/// The result return by `StateDb::is_pruned`
Expand Down
10 changes: 5 additions & 5 deletions client/state-db/src/noncanonical.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,17 +20,19 @@
//! Maintains trees of block overlays and allows discarding trees/roots
//! The overlays are added in `insert` and removed in `canonicalize`.

use super::{to_meta_key, ChangeSet, CommitSet, DBValue, Error, Hash, MetaDb, StateDbError};
use super::{
to_meta_key, ChangeSet, CommitSet, DBValue, Error, Hash, MetaDb,
StateDbError,
};
use codec::{Decode, Encode};
use log::trace;
use std::collections::{hash_map::Entry, HashMap, VecDeque};
use std::{collections::{hash_map::Entry, HashMap, VecDeque}};

const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal";
pub(crate) const LAST_CANONICAL: &[u8] = b"last_canonical";
const MAX_BLOCKS_PER_LEVEL: u64 = 32;

/// See module documentation.
#[derive(parity_util_mem_derive::MallocSizeOf)]
pub struct NonCanonicalOverlay<BlockHash: Hash, Key: Hash> {
last_canonicalized: Option<(BlockHash, u64)>,
levels: VecDeque<OverlayLevel<BlockHash, Key>>,
Expand All @@ -41,7 +43,6 @@ pub struct NonCanonicalOverlay<BlockHash: Hash, Key: Hash> {
pinned_insertions: HashMap<BlockHash, (Vec<Key>, u32)>,
}

#[derive(parity_util_mem_derive::MallocSizeOf)]
#[cfg_attr(test, derive(PartialEq, Debug))]
struct OverlayLevel<BlockHash: Hash, Key: Hash> {
blocks: Vec<BlockOverlay<BlockHash, Key>>,
Expand Down Expand Up @@ -81,7 +82,6 @@ fn to_journal_key(block: u64, index: u64) -> Vec<u8> {
}

#[cfg_attr(test, derive(PartialEq, Debug))]
#[derive(parity_util_mem_derive::MallocSizeOf)]
struct BlockOverlay<BlockHash: Hash, Key: Hash> {
hash: BlockHash,
journal_index: u64,
Expand Down
Loading