Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
85 commits
Select commit Hold shift + click to select a range
9248cd1
Refactor block building and import in collator service
skunert Apr 3, 2024
70afa25
Remove unused SyncOracle, light reformat
skunert Apr 3, 2024
7e01dca
Add slot-based collator
skunert Apr 3, 2024
08ec3e7
Prepare cumulus test node & runtime
skunert Apr 3, 2024
3848239
Prepare cumulus test node & runtime
skunert Apr 3, 2024
7a057ed
Add zombienet test
skunert Apr 3, 2024
3f4a260
Misc
skunert Apr 3, 2024
c049f50
Nits
skunert Apr 3, 2024
c1c96bc
Remove unused dep, add issues to todos
skunert Apr 3, 2024
7af20a6
Remove unused dep, add issues to todos
skunert Apr 3, 2024
245a11a
Enable "elastic-scaling-experimental" for cumulus test node
skunert Apr 3, 2024
560a907
Adjust parent search to allow for pending blocks with depth > 1
skunert Apr 5, 2024
734bdcb
Merge fixes
skunert Apr 9, 2024
c439d21
Modernize zombienet test:
skunert Apr 11, 2024
fb9f383
Unify core schedule fetching methods
skunert Apr 11, 2024
06f8fc7
Remove overseer_handle from parameters
skunert Apr 11, 2024
cf64247
Improve logging
skunert Apr 11, 2024
aa7fba9
Merge branch 'master' into slot-based-mvp
skunert Apr 12, 2024
44b489f
Do not return potential_parents that are not available locally
skunert Apr 12, 2024
8df558f
Use commong async backing params fetching methods
skunert Apr 16, 2024
7705314
Assign cores during block building
skunert Apr 16, 2024
1553d40
Merge branch 'master' into slot-based-mvp
skunert Apr 16, 2024
f84a6f3
Add more comments, cleanup
skunert Apr 16, 2024
e1a5879
Refactor `find_potential_parent`
skunert Apr 16, 2024
576605b
Merge branch 'master' into slot-based-mvp
skunert Apr 17, 2024
1b184da
Make expected cores work when para slot duration > relay slot duration
skunert Apr 17, 2024
e95e5df
Review comments
skunert Apr 23, 2024
e4edbe4
Extract parent search into module
skunert Apr 23, 2024
53869a0
More code reuse between lookahead and slot-based
skunert Apr 23, 2024
4240cc1
Merge branch 'master' into slot-based-mvp
skunert Apr 23, 2024
c790fb8
fmt
skunert Apr 23, 2024
61be48b
Merge branch 'master' into slot-based-mvp
skunert Apr 25, 2024
a06bf79
Merge branch 'master' into slot-based-mvp
skunert Apr 29, 2024
169479a
Add slot drift for testing
skunert Apr 30, 2024
ac3e9ed
Merge branch 'master' into slot-based-mvp
skunert Apr 30, 2024
6a9955d
Remove duplicate comment
skunert May 6, 2024
9760e81
Expose slot_drift parameter
skunert May 7, 2024
42c0135
Do not assumed fixed slot_duration
skunert May 7, 2024
44bc8e7
Apply suggestions from code review
skunert May 29, 2024
a23d5a9
Address comments
skunert May 29, 2024
07478dc
Remove unused parameters from zombienet
skunert May 29, 2024
15c36a9
Introduce experimental CLI option
skunert May 29, 2024
2c8cc0d
Merge branch 'master' into slot-based-mvp
skunert May 29, 2024
65cdc01
fmt
skunert May 29, 2024
5dcea6c
Adjust zombienet parameter
skunert May 29, 2024
46bd385
Merge branch 'master' into slot-based-mvp
skunert May 31, 2024
27eb643
Remove +1 on velocity
skunert May 31, 2024
99e5741
Reduce number of relay chain fetches
skunert May 31, 2024
944044d
Add comments to `RelayChainCachingFetcher`
skunert Jun 4, 2024
41dba02
More comment adjustments
skunert Jun 4, 2024
33243a2
Merge branch 'master' into slot-based-mvp
skunert Jun 4, 2024
0b061f6
Fix template build
skunert Jun 4, 2024
8be5db4
Apply suggestions from code review
skunert Jun 13, 2024
8e4320d
Merge branch 'master' into slot-based-mvp
skunert Jun 13, 2024
6c97bd1
Merge option in relay chain fetcher
skunert Jun 13, 2024
0ae68d3
Spawn futures inside collator
skunert Jun 13, 2024
767430e
Do not build if pending parent is not in db
skunert Jun 13, 2024
1021ee3
Remove slot based experimental from common cli options
skunert Jun 17, 2024
c109088
Fix lookahead collator filling the pipeline
skunert Jun 18, 2024
8c364e8
Add PoV-recovery test, make sure no inherent errors are thrown
skunert Jun 18, 2024
8e30d38
Move collator to futures again
skunert Jun 19, 2024
f3233db
Move lost comment
skunert Jun 19, 2024
da96123
Improve pov-recovery test
skunert Jun 19, 2024
05c3812
Increase velocity to 4
skunert Jun 19, 2024
938a736
Add test to pipeline
skunert Jun 19, 2024
fa3f070
Add prdoc
skunert Jun 19, 2024
edfe474
Abort block builder task if we can not send to collation task
skunert Jun 19, 2024
b1490eb
Fixes
skunert Jun 19, 2024
17fd978
Merge branch 'master' into slot-based-mvp
skunert Jun 19, 2024
9d14989
Comment adjustments
skunert Jun 19, 2024
96d885a
Fix pov-recovery test
skunert Jun 20, 2024
eb61449
Fix prdoc semver
skunert Jun 20, 2024
cb16053
Add slot based collator for default runtimes or chainspecs
skunert Jun 20, 2024
48267c2
PRDoc one more time
skunert Jun 20, 2024
d595c7b
Fix collator image
skunert Jun 21, 2024
b1edd47
Merge branch 'master' into slot-based-mvp
skunert Jun 25, 2024
764fcd5
Merge branch 'master' into slot-based-mvp
skunert Jul 4, 2024
62d5b9d
post-merge adjustments
skunert Jul 4, 2024
7bc28ff
Review comments
skunert Jul 1, 2024
d986ee7
Apply suggestions from code review
skunert Jul 4, 2024
dd36c44
Remove unnecessary pending condition in parent search
skunert Jul 4, 2024
91c6539
Adjust comments and tracing log
skunert Jul 4, 2024
8e0b80d
".git/.scripts/commands/fmt/fmt.sh"
Jul 4, 2024
09222f7
make semver happy
skunert Jul 5, 2024
938d6e6
Reviewer comment: Remove duplicate check for pending
skunert Jul 5, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Remove unused SyncOracle, light reformat
  • Loading branch information
skunert committed Apr 10, 2024
commit 70afa254161517352fbf0da5346b25cae21ca40e
1 change: 0 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

14 changes: 5 additions & 9 deletions cumulus/client/consensus/aura/src/collators/basic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,19 +41,18 @@ use sc_consensus::BlockImport;
use sp_api::{CallApiAt, ProvideRuntimeApi};
use sp_application_crypto::AppPublic;
use sp_blockchain::HeaderBackend;
use sp_consensus::SyncOracle;
use sp_consensus_aura::AuraApi;
use sp_consensus_aura::{AuraApi, SlotDuration};
use sp_core::crypto::Pair;
use sp_inherents::CreateInherentDataProviders;
use sp_keystore::KeystorePtr;
use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member};
use sp_state_machine::Backend as _;
use std::{convert::TryFrom, sync::Arc, time::Duration};
use std::{sync::Arc, time::Duration};

use crate::collator as collator_util;

/// Parameters for [`run`].
pub struct Params<BI, CIDP, Client, RClient, SO, Proposer, CS> {
pub struct Params<BI, CIDP, Client, RClient, Proposer, CS> {
/// Inherent data providers. Only non-consensus inherent data should be provided, i.e.
/// the timestamp, slot, and paras inherents should be omitted, as they are set by this
/// collator.
Expand All @@ -64,8 +63,6 @@ pub struct Params<BI, CIDP, Client, RClient, SO, Proposer, CS> {
pub para_client: Arc<Client>,
/// A handle to the relay-chain client.
pub relay_client: RClient,
/// A chain synchronization oracle.
pub sync_oracle: SO,
/// The underlying keystore, which should contain Aura consensus keys.
pub keystore: KeystorePtr,
/// The collator key used to sign collations before submitting to validators.
Expand All @@ -89,8 +86,8 @@ pub struct Params<BI, CIDP, Client, RClient, SO, Proposer, CS> {
}

/// Run bare Aura consensus as a relay-chain-driven collator.
pub fn run<Block, P, BI, CIDP, Client, RClient, SO, Proposer, CS>(
params: Params<BI, CIDP, Client, RClient, SO, Proposer, CS>,
pub fn run<Block, P, BI, CIDP, Client, RClient, Proposer, CS>(
params: Params<BI, CIDP, Client, RClient, Proposer, CS>,
) -> impl Future<Output = ()> + Send + 'static
where
Block: BlockT + Send,
Expand All @@ -108,7 +105,6 @@ where
CIDP: CreateInherentDataProviders<Block, ()> + Send + 'static,
CIDP::InherentDataProviders: Send,
BI: BlockImport<Block> + ParachainBlockImportMarker + Send + Sync + 'static,
SO: SyncOracle + Send + Sync + Clone + 'static,
Proposer: ProposerInterface<Block> + Send + Sync + 'static,
CS: CollatorServiceInterface<Block> + Send + Sync + 'static,
P: Pair,
Expand Down
12 changes: 4 additions & 8 deletions cumulus/client/consensus/aura/src/collators/lookahead.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,19 +60,18 @@ use sc_consensus_aura::standalone as aura_internal;
use sp_api::ProvideRuntimeApi;
use sp_application_crypto::AppPublic;
use sp_blockchain::HeaderBackend;
use sp_consensus::SyncOracle;
use sp_consensus_aura::{AuraApi, Slot};
use sp_core::crypto::Pair;
use sp_inherents::CreateInherentDataProviders;
use sp_keystore::KeystorePtr;
use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member};
use sp_timestamp::Timestamp;
use std::{convert::TryFrom, sync::Arc, time::Duration};
use std::{sync::Arc, time::Duration};

use crate::collator::{self as collator_util, SlotClaim};

/// Parameters for [`run`].
pub struct Params<BI, CIDP, Client, Backend, RClient, CHP, SO, Proposer, CS> {
pub struct Params<BI, CIDP, Client, Backend, RClient, CHP, Proposer, CS> {
/// Inherent data providers. Only non-consensus inherent data should be provided, i.e.
/// the timestamp, slot, and paras inherents should be omitted, as they are set by this
/// collator.
Expand All @@ -87,8 +86,6 @@ pub struct Params<BI, CIDP, Client, Backend, RClient, CHP, SO, Proposer, CS> {
pub relay_client: RClient,
/// A validation code hash provider, used to get the current validation code hash.
pub code_hash_provider: CHP,
/// A chain synchronization oracle.
pub sync_oracle: SO,
/// The underlying keystore, which should contain Aura consensus keys.
pub keystore: KeystorePtr,
/// The collator key used to sign collations before submitting to validators.
Expand All @@ -110,8 +107,8 @@ pub struct Params<BI, CIDP, Client, Backend, RClient, CHP, SO, Proposer, CS> {
}

/// Run async-backing-friendly Aura.
pub fn run<Block, P, BI, CIDP, Client, Backend, RClient, CHP, SO, Proposer, CS>(
mut params: Params<BI, CIDP, Client, Backend, RClient, CHP, SO, Proposer, CS>,
pub fn run<Block, P, BI, CIDP, Client, Backend, RClient, CHP, Proposer, CS>(
mut params: Params<BI, CIDP, Client, Backend, RClient, CHP, Proposer, CS>,
) -> impl Future<Output = ()> + Send + 'static
where
Block: BlockT,
Expand All @@ -130,7 +127,6 @@ where
CIDP: CreateInherentDataProviders<Block, ()> + 'static,
CIDP::InherentDataProviders: Send,
BI: BlockImport<Block> + ParachainBlockImportMarker + Send + Sync + 'static,
SO: SyncOracle + Send + Sync + Clone + 'static,
Proposer: ProposerInterface<Block> + Send + Sync + 'static,
CS: CollatorServiceInterface<Block> + Send + Sync + 'static,
CHP: consensus_common::ValidationCodeHashProvider<Block::Hash> + Send + 'static,
Expand Down
105 changes: 53 additions & 52 deletions cumulus/client/consensus/common/src/parachain_consensus.rs
Original file line number Diff line number Diff line change
Expand Up @@ -375,60 +375,61 @@ async fn handle_new_best_parachain_head<Block, P>(
target: LOG_TARGET,
block_hash = ?hash,
"Skipping set new best block, because block is already the best.",
)
} else {
// Make sure the block is already known or otherwise we skip setting new best.
match parachain.block_status(hash) {
Ok(BlockStatus::InChainWithState) => {
unset_best_header.take();
tracing::debug!(
target: LOG_TARGET,
?hash,
"Importing block as new best for parachain.",
);
import_block_as_new_best(hash, parachain_head, parachain).await;
},
Ok(BlockStatus::InChainPruned) => {
tracing::error!(
target: LOG_TARGET,
block_hash = ?hash,
"Trying to set pruned block as new best!",
);
},
Ok(BlockStatus::Unknown) => {
*unset_best_header = Some(parachain_head);
);
return;
}

tracing::debug!(
target: LOG_TARGET,
block_hash = ?hash,
"Parachain block not yet imported, waiting for import to enact as best block.",
);

if let Some(ref mut recovery_chan_tx) = recovery_chan_tx {
// Best effort channel to actively encourage block recovery.
// An error here is not fatal; the relay chain continuously re-announces
// the best block, thus we will have other opportunities to retry.
let req = RecoveryRequest { hash, kind: RecoveryKind::Full };
if let Err(err) = recovery_chan_tx.try_send(req) {
tracing::warn!(
target: LOG_TARGET,
block_hash = ?hash,
error = ?err,
"Unable to notify block recovery subsystem"
)
}
// Make sure the block is already known or otherwise we skip setting new best.
match parachain.block_status(hash) {
Ok(BlockStatus::InChainWithState) => {
unset_best_header.take();
tracing::debug!(
target: LOG_TARGET,
?hash,
"Importing block as new best for parachain.",
);
import_block_as_new_best(hash, parachain_head, parachain).await;
},
Ok(BlockStatus::InChainPruned) => {
tracing::error!(
target: LOG_TARGET,
block_hash = ?hash,
"Trying to set pruned block as new best!",
);
},
Ok(BlockStatus::Unknown) => {
*unset_best_header = Some(parachain_head);

tracing::debug!(
target: LOG_TARGET,
block_hash = ?hash,
"Parachain block not yet imported, waiting for import to enact as best block.",
);

if let Some(ref mut recovery_chan_tx) = recovery_chan_tx {
// Best effort channel to actively encourage block recovery.
// An error here is not fatal; the relay chain continuously re-announces
// the best block, thus we will have other opportunities to retry.
let req = RecoveryRequest { hash, kind: RecoveryKind::Full };
if let Err(err) = recovery_chan_tx.try_send(req) {
tracing::warn!(
target: LOG_TARGET,
block_hash = ?hash,
error = ?err,
"Unable to notify block recovery subsystem"
)
}
},
Err(e) => {
tracing::error!(
target: LOG_TARGET,
block_hash = ?hash,
error = ?e,
"Failed to get block status of block.",
);
},
_ => {},
}
}
},
Err(e) => {
tracing::error!(
target: LOG_TARGET,
block_hash = ?hash,
error = ?e,
"Failed to get block status of block.",
);
},
_ => {},
}
}

Expand Down
3 changes: 1 addition & 2 deletions cumulus/polkadot-parachain/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ sc-telemetry = { path = "../../substrate/client/telemetry" }
sc-transaction-pool = { path = "../../substrate/client/transaction-pool" }
sp-transaction-pool = { path = "../../substrate/primitives/transaction-pool" }
sc-network = { path = "../../substrate/client/network" }
sc-network-sync = { path = "../../substrate/client/network/sync" }
sc-basic-authorship = { path = "../../substrate/client/basic-authorship" }
sp-timestamp = { path = "../../substrate/primitives/timestamp" }
sp-blockchain = { path = "../../substrate/primitives/blockchain" }
Expand Down Expand Up @@ -112,6 +111,7 @@ cumulus-primitives-aura = { path = "../primitives/aura" }
cumulus-primitives-core = { path = "../primitives/core" }
cumulus-relay-chain-interface = { path = "../client/relay-chain-interface" }
color-print = "0.3.4"
tokio = { version = "1.32.0", features = ["macros", "parking_lot", "time"] }

[build-dependencies]
substrate-build-script-utils = { path = "../../substrate/utils/build-script-utils" }
Expand All @@ -120,7 +120,6 @@ substrate-build-script-utils = { path = "../../substrate/utils/build-script-util
assert_cmd = "2.0"
nix = { version = "0.26.1", features = ["signal"] }
tempfile = "3.8.0"
tokio = { version = "1.32.0", features = ["macros", "parking_lot", "time"] }
wait-timeout = "0.2"

[features]
Expand Down
20 changes: 4 additions & 16 deletions cumulus/polkadot-parachain/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ use sc_consensus::{
};
use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY};
use sc_network::{config::FullNetworkConfiguration, service::traits::NetworkBackend, NetworkBlock};
use sc_network_sync::SyncingService;
use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager};
use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle};
use sp_api::{ApiExt, ConstructRuntimeApi, ProvideRuntimeApi};
Expand Down Expand Up @@ -235,7 +234,6 @@ where
&TaskManager,
Arc<dyn RelayChainInterface>,
Arc<sc_transaction_pool::FullPool<Block, ParachainClient<RuntimeApi>>>,
Arc<SyncingService<Block>>,
KeystorePtr,
Duration,
ParaId,
Expand Down Expand Up @@ -369,7 +367,6 @@ where
&task_manager,
relay_chain_interface.clone(),
transaction_pool,
sync_service.clone(),
params.keystore_container.keystore(),
relay_chain_slot_duration,
para_id,
Expand Down Expand Up @@ -710,7 +707,6 @@ pub async fn start_generic_aura_node<Net: NetworkBackend<Block, Hash>>(
task_manager,
relay_chain_interface,
transaction_pool,
sync_oracle,
keystore,
relay_chain_slot_duration,
para_id,
Expand Down Expand Up @@ -739,7 +735,6 @@ pub async fn start_generic_aura_node<Net: NetworkBackend<Block, Hash>>(
block_import,
para_client: client,
relay_client: relay_chain_interface,
sync_oracle,
keystore,
collator_key,
para_id,
Expand All @@ -753,7 +748,7 @@ pub async fn start_generic_aura_node<Net: NetworkBackend<Block, Hash>>(
};

let fut =
basic_aura::run::<Block, <AuraId as AppCrypto>::Pair, _, _, _, _, _, _, _>(params);
basic_aura::run::<Block, <AuraId as AppCrypto>::Pair, _, _, _, _, _, _>(params);
task_manager.spawn_essential_handle().spawn("aura", None, fut);

Ok(())
Expand Down Expand Up @@ -828,7 +823,6 @@ where
task_manager,
relay_chain_interface,
transaction_pool,
sync_oracle,
keystore,
relay_chain_slot_duration,
para_id,
Expand Down Expand Up @@ -896,7 +890,6 @@ where
block_import,
para_client: client,
relay_client: relay_chain_interface2,
sync_oracle,
keystore,
collator_key,
para_id,
Expand All @@ -909,7 +902,7 @@ where
collation_request_receiver: Some(request_stream),
};

basic_aura::run::<Block, <AuraId as AppCrypto>::Pair, _, _, _, _, _, _, _>(params)
basic_aura::run::<Block, <AuraId as AppCrypto>::Pair, _, _, _, _, _, _>(params)
.await
});

Expand Down Expand Up @@ -972,7 +965,6 @@ where
task_manager,
relay_chain_interface,
transaction_pool,
sync_oracle,
keystore,
relay_chain_slot_duration,
para_id,
Expand Down Expand Up @@ -1045,7 +1037,6 @@ where
code_hash_provider: move |block_hash| {
client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash())
},
sync_oracle,
keystore,
collator_key,
para_id,
Expand All @@ -1058,7 +1049,7 @@ where
* to aura */
};

aura::run::<Block, <AuraId as AppCrypto>::Pair, _, _, _, _, _, _, _, _, _>(params)
aura::run::<Block, <AuraId as AppCrypto>::Pair, _, _, _, _, _, _, _, _>(params)
.await
});

Expand All @@ -1082,7 +1073,6 @@ fn start_relay_chain_consensus(
task_manager: &TaskManager,
relay_chain_interface: Arc<dyn RelayChainInterface>,
transaction_pool: Arc<sc_transaction_pool::FullPool<Block, ParachainClient<FakeRuntimeApi>>>,
_sync_oracle: Arc<SyncingService<Block>>,
_keystore: KeystorePtr,
_relay_chain_slot_duration: Duration,
para_id: ParaId,
Expand Down Expand Up @@ -1153,7 +1143,6 @@ fn start_lookahead_aura_consensus(
task_manager: &TaskManager,
relay_chain_interface: Arc<dyn RelayChainInterface>,
transaction_pool: Arc<sc_transaction_pool::FullPool<Block, ParachainClient<FakeRuntimeApi>>>,
sync_oracle: Arc<SyncingService<Block>>,
keystore: KeystorePtr,
relay_chain_slot_duration: Duration,
para_id: ParaId,
Expand Down Expand Up @@ -1186,7 +1175,6 @@ fn start_lookahead_aura_consensus(
code_hash_provider: move |block_hash| {
client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash())
},
sync_oracle,
keystore,
collator_key,
para_id,
Expand All @@ -1198,7 +1186,7 @@ fn start_lookahead_aura_consensus(
reinitialize: false,
};

let fut = aura::run::<Block, <AuraId as AppCrypto>::Pair, _, _, _, _, _, _, _, _, _>(params);
let fut = aura::run::<Block, <AuraId as AppCrypto>::Pair, _, _, _, _, _, _, _, _>(params);
task_manager.spawn_essential_handle().spawn("aura", None, fut);

Ok(())
Expand Down
6 changes: 5 additions & 1 deletion substrate/client/basic-authorship/src/basic_authorship.rs
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,11 @@ where
) -> Proposer<Block, C, A, PR> {
let parent_hash = parent_header.hash();

info!("🙌 Starting consensus session on top of parent {:?}", parent_hash);
info!(
"🙌 Starting consensus session on top of parent {:?} (#{})",
parent_hash,
parent_header.number()
);

let proposer = Proposer::<_, _, _, PR> {
spawn_handle: self.spawn_handle.clone(),
Expand Down
Loading