Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
80 commits
Select commit Hold shift + click to select a range
611f481
DigestItem::ChangesTrieSignal
svyatonik Jun 25, 2019
8b65b04
introduce changes_trie::State
svyatonik Jun 26, 2019
0cd05df
introduce config activation block
svyatonik Jun 26, 2019
40ed2c2
ChangesTrieSignal::as_new_configuration
svyatonik Jun 26, 2019
c01ecd6
moved well_known_cache_keys to client
svyatonik Jun 27, 2019
73f1933
extracted DbChangesTrieStorage to separate file
svyatonik Jun 27, 2019
315303a
change meaning of none in blockchain cache
svyatonik Jun 28, 2019
67642d1
changes trie config (FULL) cache draft
svyatonik Jul 1, 2019
a7e240d
eliminating const ChangesTrieConfiguration
svyatonik Jul 1, 2019
5256d03
delay pruning
svyatonik Jul 2, 2019
698e2ff
continue elimination
svyatonik Jul 3, 2019
04a8fa0
do not prune CT config from cache
svyatonik Jul 3, 2019
afda822
removed redundant code
svyatonik Jul 3, 2019
e08183d
fix some TODOs
svyatonik Jul 4, 2019
368563f
Merge branch 'master' into changes_tries_update_configuration
svyatonik Jul 24, 2019
01e3792
introduce ConfigurationRange
svyatonik Jul 29, 2019
c1913fc
use Configuration range in build
svyatonik Jul 29, 2019
fb6ae9c
build skewed digest
svyatonik Jul 29, 2019
e429061
remove debug print
svyatonik Jul 29, 2019
e6acae8
extracted surface iterator
svyatonik Jul 30, 2019
68527fb
key_changes works with skewed digests
svyatonik Jul 30, 2019
5a839e5
fix client build
svyatonik Jul 30, 2019
9729912
add test for NeverPrune
svyatonik Jul 30, 2019
bca439a
fix TODO
svyatonik Jul 30, 2019
383795a
fixed some TODOs
svyatonik Jul 30, 2019
f6993f6
more tests
svyatonik Jul 31, 2019
8b70450
fixing TODOs
svyatonik Aug 1, 2019
1b3b565
fixed compilation
svyatonik Aug 1, 2019
fa7d631
Merge branch 'master' into changes_tries_update_configuration
svyatonik Aug 1, 2019
1de7f77
update runtime version
svyatonik Aug 1, 2019
ce78f66
git rid of large tuple
svyatonik Aug 1, 2019
82e74d5
too long lines
svyatonik Aug 1, 2019
cfb1cf1
config_activation_block -> zero
svyatonik Aug 1, 2019
94979f8
obsolete TODO
svyatonik Aug 1, 2019
dd5b602
removed unjustified expect
svyatonik Aug 1, 2019
7260719
update TODOs with issue number
svyatonik Aug 1, 2019
c563d30
Merge branch 'master' into changes_tries_update_configuration
svyatonik Aug 6, 2019
4552586
new CT pruning algorithm
svyatonik Aug 7, 2019
62a5aa8
Merge branch 'master' into changes_tries_update_configuration
svyatonik Aug 13, 2019
27eecae
BlockIdOrHeader isn't really required
svyatonik Aug 14, 2019
a158250
Merge branch 'master' into changes_tries_update_configuration
svyatonik Nov 8, 2019
ce92672
Merge branch 'master' into changes_tries_update_configuration
svyatonik Nov 11, 2019
9da96a5
removed debug leftovers + some docs
svyatonik Nov 12, 2019
f8a14f5
more docs
svyatonik Nov 12, 2019
68c4db0
more post-merge fixes
svyatonik Nov 12, 2019
d63d65e
more post-merge fixes
svyatonik Nov 12, 2019
9baac30
Merge branch 'master' into changes_tries_update_configuration
svyatonik Nov 12, 2019
6da8b23
revertes some unnecessary changes
svyatonik Nov 12, 2019
d302d1e
reverted unnecessary changes
svyatonik Nov 12, 2019
f4a82de
fix compilation + unnecessary changes
svyatonik Nov 12, 2019
801169c
(restart CI)
svyatonik Nov 12, 2019
eae183b
fix cache update when finalizing multiple blocks
svyatonik Nov 12, 2019
6d2b4db
fixed tests
svyatonik Nov 12, 2019
d7abf44
Merge branch 'master' into changes_tries_update_configuration
svyatonik Nov 25, 2019
6b3d12d
Merge branch 'master' into changes_tries_update_configuration
svyatonik Nov 29, 2019
d89c979
Merge remote-tracking branch 'origin/master' into changes_tries_updat…
svyatonik Dec 11, 2019
ed31869
Merge remote-tracking branch 'origin/master' into changes_tries_updat…
svyatonik Dec 17, 2019
af3c925
collect_extrinsics -> set_collect_extrinsics
svyatonik Jan 5, 2020
935fb7b
restore lost test
svyatonik Jan 5, 2020
9da28c8
do not calculate block number twice
svyatonik Jan 5, 2020
bf7fea2
Update primitives/blockchain/src/error.rs
svyatonik Jan 5, 2020
3d570f1
map_err -> unwrap_or
svyatonik Jan 5, 2020
b898191
document get_at Result
svyatonik Jan 5, 2020
7db5596
delete abandoned file
svyatonik Jan 5, 2020
9481f59
added weight for set_changes_trie_config
svyatonik Jan 5, 2020
05630d8
Merge branch 'changes_tries_update_configuration' of https://github.c…
svyatonik Jan 5, 2020
06755b0
prefer_configs -> fail_if_disabled
svyatonik Jan 5, 2020
d0b2de2
Update client/api/src/backend.rs
svyatonik Jan 5, 2020
2f795e6
Merge branch 'changes_tries_update_configuration' of https://github.c…
svyatonik Jan 5, 2020
c4c8871
Update client/db/src/changes_tries_storage.rs
svyatonik Jan 6, 2020
51fa091
CommitOperation+merge -> CommitOperations
svyatonik Jan 6, 2020
98d1958
Merge branch 'changes_tries_update_configuration' of https://github.c…
svyatonik Jan 7, 2020
46d4721
fixed test compilation
svyatonik Jan 7, 2020
39c5f64
merged two different CTRange structs
svyatonik Jan 9, 2020
ee16f37
Merge remote-tracking branch 'origin/master' into changes_tries_updat…
svyatonik Jan 9, 2020
25452a5
lost file
svyatonik Jan 9, 2020
54f3d47
Merge remote-tracking branch 'origin/master' into changes_tries_updat…
svyatonik Jan 13, 2020
233afad
uggrade db from v0 to v1 (init CT cache + add column)
svyatonik Jan 14, 2020
0997bd6
Merge branch 'master' into changes_tries_update_configuration
gavofyork Jan 16, 2020
7658040
fix after merge
svyatonik Jan 16, 2020
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
uggrade db from v0 to v1 (init CT cache + add column)
  • Loading branch information
svyatonik committed Jan 14, 2020
commit 233afad98bf82c93d6e38aaa5deb03a75beaf4c6
11 changes: 11 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions client/db/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" }
substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" }
env_logger = "0.7.0"
quickcheck = "0.9"
kvdb-rocksdb = "0.4"
tempdir = "0.3"

[features]
default = []
Expand Down
5 changes: 4 additions & 1 deletion client/db/src/cache/list_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1243,7 +1243,10 @@ pub mod tests {
);
assert!(tx.inserted_entries().is_empty());
assert!(tx.removed_entries().is_empty());
assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![correct_id(5)] }));
assert_eq!(
*tx.updated_meta(),
Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(5)] }),
);
// finalization finalizes entry
let cache = ListCache::new(
DummyStorage::new()
Expand Down
4 changes: 3 additions & 1 deletion client/db/src/cache/list_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,9 @@ mod meta {
unfinalized.push(&entry.valid_from);
},
CommitOperation::BlockFinalized(_, ref finalizing_entry, ref forks) => {
finalized = finalizing_entry.as_ref().map(|entry| &entry.valid_from);
if let Some(finalizing_entry) = finalizing_entry.as_ref() {
finalized = Some(&finalizing_entry.valid_from);
}
for fork_index in forks.iter().rev() {
unfinalized.remove(*fork_index);
}
Expand Down
8 changes: 4 additions & 4 deletions client/db/src/cache/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ pub struct DbCache<Block: BlockT> {
db: Arc<dyn KeyValueDB>,
key_lookup_column: u32,
header_column: u32,
authorities_column: u32,
cache_column: u32,
genesis_hash: Block::Hash,
best_finalized_block: ComplexBlockId<Block>,
}
Expand All @@ -92,7 +92,7 @@ impl<Block: BlockT> DbCache<Block> {
db: Arc<dyn KeyValueDB>,
key_lookup_column: u32,
header_column: u32,
authorities_column: u32,
cache_column: u32,
genesis_hash: Block::Hash,
best_finalized_block: ComplexBlockId<Block>,
) -> Self {
Expand All @@ -101,7 +101,7 @@ impl<Block: BlockT> DbCache<Block> {
db,
key_lookup_column,
header_column,
authorities_column,
cache_column,
genesis_hash,
best_finalized_block,
}
Expand Down Expand Up @@ -158,7 +158,7 @@ impl<Block: BlockT> DbCache<Block> {
&self.db,
self.key_lookup_column,
self.header_column,
self.authorities_column,
self.cache_column,
&self.best_finalized_block
)
}
Expand Down
8 changes: 5 additions & 3 deletions client/db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@ mod children;
mod cache;
mod changes_tries_storage;
mod storage_cache;
#[cfg(any(feature = "kvdb-rocksdb", test))]
mod upgrade;
mod utils;
mod stats;

Expand Down Expand Up @@ -67,7 +69,7 @@ use sp_state_machine::{
DBValue, ChangesTrieTransaction, ChangesTrieCacheAction,
backend::Backend as StateBackend, UsageInfo as StateUsageInfo,
};
use crate::utils::{Meta, db_err, meta_keys, read_db, read_meta};
use crate::utils::{DatabaseType, Meta, db_err, meta_keys, read_db, read_meta};
use crate::changes_tries_storage::{DbChangesTrieStorage, DbChangesTrieStorageTransaction};
use sc_client::leaves::{LeafSet, FinalizationDisplaced};
use sc_state_db::StateDb;
Expand Down Expand Up @@ -355,7 +357,7 @@ pub struct BlockchainDb<Block: BlockT> {

impl<Block: BlockT> BlockchainDb<Block> {
fn new(db: Arc<dyn KeyValueDB>) -> ClientResult<Self> {
let meta = read_meta::<Block>(&*db, columns::META, columns::HEADER)?;
let meta = read_meta::<Block>(&*db, columns::HEADER)?;
let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?;
Ok(BlockchainDb {
db,
Expand Down Expand Up @@ -752,7 +754,7 @@ impl<Block: BlockT> Backend<Block> {
///
/// The pruning window is how old a block must be before the state is pruned.
pub fn new(config: DatabaseSettings, canonicalization_delay: u64) -> ClientResult<Self> {
let db = crate::utils::open_database(&config, columns::META, "full")?;
let db = crate::utils::open_database::<Block>(&config, DatabaseType::Full)?;
Self::from_kvdb(db as Arc<_>, canonicalization_delay, &config)
}

Expand Down
11 changes: 4 additions & 7 deletions client/db/src/light.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ use codec::{Decode, Encode};
use sp_runtime::generic::{DigestItem, BlockId};
use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, One, NumberFor, HasherFor};
use crate::cache::{DbCacheSync, DbCache, ComplexBlockId, EntryType as CacheEntryType};
use crate::utils::{self, meta_keys, Meta, db_err, read_db, block_id_to_lookup_key, read_meta};
use crate::utils::{self, meta_keys, DatabaseType, Meta, db_err, read_db, block_id_to_lookup_key, read_meta};
use crate::{DatabaseSettings, FrozenForDuration};
use log::{trace, warn, debug};

Expand Down Expand Up @@ -68,13 +68,10 @@ pub struct LightStorage<Block: BlockT> {
io_stats: FrozenForDuration<kvdb::IoStats>,
}

impl<Block> LightStorage<Block>
where
Block: BlockT,
{
impl<Block: BlockT> LightStorage<Block> {
/// Create new storage with given settings.
pub fn new(config: DatabaseSettings) -> ClientResult<Self> {
let db = crate::utils::open_database(&config, columns::META, "light")?;
let db = crate::utils::open_database::<Block>(&config, DatabaseType::Light)?;
Self::from_kvdb(db as Arc<_>)
}

Expand All @@ -89,7 +86,7 @@ impl<Block> LightStorage<Block>
}

fn from_kvdb(db: Arc<dyn KeyValueDB>) -> ClientResult<Self> {
let meta = read_meta::<Block>(&*db, columns::META, columns::HEADER)?;
let meta = read_meta::<Block>(&*db, columns::HEADER)?;
let cache = DbCache::new(
db.clone(),
columns::KEY_LOOKUP,
Expand Down
198 changes: 198 additions & 0 deletions client/db/src/upgrade.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,198 @@
// Copyright 2019-2020 Parity Technologies (UK) Ltd.
// This file is part of Substrate.

// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.

// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.

// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.

//! Database upgrade logic.

use std::fs;
use std::io::{Read, Write, ErrorKind};
use std::path::{Path, PathBuf};
use std::sync::Arc;

use codec::Encode;
use kvdb_rocksdb::{Database, DatabaseConfig};
use parking_lot::RwLock;
use sp_blockchain::{well_known_cache_keys, Cache};
use sp_core::ChangesTrieConfiguration;
use sp_runtime::traits::Block as BlockT;
use crate::{
cache::{ComplexBlockId, DbCache, DbCacheSync},
utils::{DatabaseType, check_database_type, db_err, read_genesis_hash},
};

/// Version file name.
const VERSION_FILE_NAME: &'static str = "db_version";

/// Current db version.
const CURRENT_VERSION: u32 = 1;

/// Number of columns in v0.
const V0_NUM_COLUMNS: u32 = 10;

/// Upgrade database to current version.
pub fn upgrade_db<Block: BlockT>(db_path: &Path, db_type: DatabaseType) -> sp_blockchain::Result<()> {
let db_version = current_version(db_path)?;
match db_version {
0 => migrate_0_to_1::<Block>(db_path, db_type)?,
1 => (),
_ => Err(sp_blockchain::Error::Backend(format!("Future database version: {}", db_version)))?,
}

update_version(db_path)
}

/// Migration from version0 to version1:
/// 1) the number of columns has changed from 10 to 11;
/// 2) changes tries configuration are now cached.
fn migrate_0_to_1<Block: BlockT>(db_path: &Path, db_type: DatabaseType) -> sp_blockchain::Result<()> {
{
let db = open_database(db_path, db_type, V0_NUM_COLUMNS)?;
db.add_column().map_err(db_err)?;
db.flush().map_err(db_err)?;
}

let db = open_database(db_path, db_type, V0_NUM_COLUMNS + 1)?;

const V0_FULL_KEY_LOOKUP_COLUMN: u32 = 3;
const V0_FULL_HEADER_COLUMN: u32 = 4;
const V0_FULL_CACHE_COLUMN: u32 = 10; // that's the column we have just added
const V0_LIGHT_KEY_LOOKUP_COLUMN: u32 = 1;
const V0_LIGHT_HEADER_COLUMN: u32 = 2;
const V0_LIGHT_CACHE_COLUMN: u32 = 3;

let (key_lookup_column, header_column, cache_column) = match db_type {
DatabaseType::Full => (
V0_FULL_KEY_LOOKUP_COLUMN,
V0_FULL_HEADER_COLUMN,
V0_FULL_CACHE_COLUMN,
),
DatabaseType::Light => (
V0_LIGHT_KEY_LOOKUP_COLUMN,
V0_LIGHT_HEADER_COLUMN,
V0_LIGHT_CACHE_COLUMN,
),
};

let genesis_hash: Option<Block::Hash> = read_genesis_hash(&db)?;
if let Some(genesis_hash) = genesis_hash {
let cache: DbCacheSync<Block> = DbCacheSync(RwLock::new(DbCache::new(
Arc::new(db),
key_lookup_column,
header_column,
cache_column,
genesis_hash,
ComplexBlockId::new(genesis_hash, 0.into()),
)));
let changes_trie_config: Option<ChangesTrieConfiguration> = None;
cache.initialize(&well_known_cache_keys::CHANGES_TRIE_CONFIG, changes_trie_config.encode())?;
}

Ok(())
}

/// Reads current database version from the file at given path.
/// If the file does not exist returns 0.
fn current_version(path: &Path) -> sp_blockchain::Result<u32> {
let unknown_version_err = || sp_blockchain::Error::Backend("Unknown database version".into());

match fs::File::open(version_file_path(path)) {
Err(ref err) if err.kind() == ErrorKind::NotFound => Ok(0),
Err(_) => Err(unknown_version_err()),
Ok(mut file) => {
let mut s = String::new();
file.read_to_string(&mut s).map_err(|_| unknown_version_err())?;
u32::from_str_radix(&s, 10).map_err(|_| unknown_version_err())
},
}
}

/// Opens database of givent type with given number of columns.
fn open_database(db_path: &Path, db_type: DatabaseType, db_columns: u32) -> sp_blockchain::Result<Database> {
let db_path = db_path.to_str()
.ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?;
let db_cfg = DatabaseConfig::with_columns(db_columns);
let db = Database::open(&db_cfg, db_path).map_err(db_err)?;
check_database_type(&db, db_type)?;
Ok(db)
}

/// Writes current database version to the file.
/// Creates a new file if the version file does not exist yet.
fn update_version(path: &Path) -> sp_blockchain::Result<()> {
fs::create_dir_all(path).map_err(db_err)?;
let mut file = fs::File::create(version_file_path(path)).map_err(db_err)?;
file.write_all(format!("{}", CURRENT_VERSION).as_bytes()).map_err(db_err)?;
Ok(())
}

/// Returns the version file path.
fn version_file_path(path: &Path) -> PathBuf {
let mut file_path = path.to_owned();
file_path.push(VERSION_FILE_NAME);
file_path
}

#[cfg(test)]
mod tests {
use sc_state_db::PruningMode;
use crate::{DatabaseSettings, DatabaseSettingsSrc};
use crate::tests::Block;
use super::*;

fn create_db(db_path: &Path, version: Option<u32>) {
let db_cfg = DatabaseConfig::with_columns(V0_NUM_COLUMNS);
Database::open(&db_cfg, db_path.to_str().unwrap()).unwrap();
if let Some(version) = version {
fs::create_dir_all(db_path).unwrap();
let mut file = fs::File::create(version_file_path(db_path)).unwrap();
file.write_all(format!("{}", version).as_bytes()).unwrap();
}
}

fn open_database(db_path: &Path) -> sp_blockchain::Result<()> {
crate::utils::open_database::<Block>(&DatabaseSettings {
state_cache_size: 0,
state_cache_child_ratio: None,
pruning: PruningMode::ArchiveAll,
source: DatabaseSettingsSrc::Path { path: db_path.to_owned(), cache_size: None },
}, DatabaseType::Full).map(|_| ())
}

#[test]
fn downgrade_never_happens() {
let db_dir = tempdir::TempDir::new("").unwrap();
create_db(db_dir.path(), Some(CURRENT_VERSION + 1));
assert!(open_database(db_dir.path()).is_err());
}

#[test]
fn open_empty_database_works() {
let db_dir = tempdir::TempDir::new("").unwrap();
open_database(db_dir.path()).unwrap();
open_database(db_dir.path()).unwrap();
assert_eq!(current_version(db_dir.path()).unwrap(), CURRENT_VERSION);
}

#[test]
fn upgrade_from_0_to_1_works() {
for version_from_file in &[None, Some(0)] {
let db_dir = tempdir::TempDir::new("").unwrap();
let db_path = db_dir.path();
create_db(db_path, *version_from_file);
open_database(db_path).unwrap();
assert_eq!(current_version(db_path).unwrap(), CURRENT_VERSION);
}
}
}
Loading