diff --git a/Cargo.lock b/Cargo.lock index 0423632c28f4..2f210d80b3aa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6926,6 +6926,7 @@ dependencies = [ "polkadot-primitives-test-helpers", "sc-keystore", "sp-application-crypto", + "sp-consensus", "sp-core", "sp-keyring", "sp-keystore", diff --git a/node/core/dispute-coordinator/Cargo.toml b/node/core/dispute-coordinator/Cargo.toml index 128c59a02230..832e96f0f550 100644 --- a/node/core/dispute-coordinator/Cargo.toml +++ b/node/core/dispute-coordinator/Cargo.toml @@ -19,7 +19,7 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } - +sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } [dev-dependencies] kvdb-memorydb = "0.13.0" diff --git a/node/core/dispute-coordinator/src/initialized.rs b/node/core/dispute-coordinator/src/initialized.rs index daab2838d7ee..04f6c146a456 100644 --- a/node/core/dispute-coordinator/src/initialized.rs +++ b/node/core/dispute-coordinator/src/initialized.rs @@ -93,7 +93,8 @@ impl Initialized { spam_slots: SpamSlots, scraper: ChainScraper, ) -> Self { - let DisputeCoordinatorSubsystem { config: _, store: _, keystore, metrics } = subsystem; + let DisputeCoordinatorSubsystem { config: _, store: _, keystore, metrics, sync_oracle: _ } = + subsystem; let (participation_sender, participation_receiver) = mpsc::channel(1); let participation = Participation::new(participation_sender); @@ -1235,7 +1236,7 @@ enum MuxedMessage { impl MuxedMessage { async fn receive( ctx: &mut Context, - from_sender: &mut participation::WorkerMessageReceiver, + from_sender: &mut WorkerMessageReceiver, ) -> FatalResult { // We are only fusing here to make `select` happy, in reality we will quit if the stream // ends. diff --git a/node/core/dispute-coordinator/src/lib.rs b/node/core/dispute-coordinator/src/lib.rs index 1c66c6c6099c..bc314197968e 100644 --- a/node/core/dispute-coordinator/src/lib.rs +++ b/node/core/dispute-coordinator/src/lib.rs @@ -30,6 +30,7 @@ use futures::FutureExt; use gum::CandidateHash; use sc_keystore::LocalKeystore; +use sp_consensus::SyncOracle; use polkadot_node_primitives::{ CandidateVotes, DisputeMessage, DisputeMessageCheckError, SignedDisputeStatement, @@ -117,6 +118,7 @@ pub struct DisputeCoordinatorSubsystem { store: Arc, keystore: Arc, metrics: Metrics, + sync_oracle: Box, } /// Configuration for the dispute coordinator subsystem. @@ -164,8 +166,9 @@ impl DisputeCoordinatorSubsystem { config: Config, keystore: Arc, metrics: Metrics, + sync_oracle: Box, ) -> Self { - Self { store, config, keystore, metrics } + Self { store, config, keystore, metrics, sync_oracle } } /// Initialize and afterwards run `Initialized::run`. @@ -213,8 +216,12 @@ impl DisputeCoordinatorSubsystem { let db_params = DatabaseParams { db: self.store.clone(), db_column: self.config.col_session_data }; + // The usage of `SyncOracle` below is not 100% correct. A better approach will be to + // cache the result and once the oracle returns `sync complete` to consider the node + // synced and never query the oracle again. In this case however this is not necessary + // because the oracle is used only once during initialisation. let (first_leaf, rolling_session_window) = - match get_rolling_session_window(ctx, db_params).await { + match get_rolling_session_window(ctx, db_params, &self.sync_oracle).await { Ok(Some(update)) => update, Ok(None) => { gum::info!(target: LOG_TARGET, "received `Conclude` signal, exiting"); @@ -377,8 +384,9 @@ impl DisputeCoordinatorSubsystem { async fn get_rolling_session_window( ctx: &mut Context, db_params: DatabaseParams, + sync_oracle: &Box, ) -> Result> { - if let Some(leaf) = { wait_for_first_leaf(ctx) }.await? { + if let Some(leaf) = { wait_for_first_leaf(ctx, sync_oracle) }.await? { let sender = ctx.sender().clone(); Ok(Some(( leaf.clone(), @@ -393,11 +401,20 @@ async fn get_rolling_session_window( /// Wait for `ActiveLeavesUpdate`, returns `None` if `Conclude` signal came first. #[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)] -async fn wait_for_first_leaf(ctx: &mut Context) -> Result> { +async fn wait_for_first_leaf( + ctx: &mut Context, + sync_oracle: &Box, +) -> Result> { loop { match ctx.recv().await? { FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(None), FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { + if sync_oracle.is_major_syncing() { + // still syncing - ignore this event because otherwise the runtime api calls will + // fail due to executing them on pruned blocks. + continue + } + if let Some(activated) = update.activated { return Ok(Some(activated)) } diff --git a/node/service/src/overseer.rs b/node/service/src/overseer.rs index 7dff86693827..8d4ed76d4e41 100644 --- a/node/service/src/overseer.rs +++ b/node/service/src/overseer.rs @@ -300,6 +300,7 @@ where dispute_coordinator_config, keystore.clone(), Metrics::register(registry)?, + Box::new(network_service.clone()), )) .dispute_distribution(DisputeDistributionSubsystem::new( keystore.clone(),