Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
8da9a8a
Start removing last few instances of futures01
expenses Jan 15, 2020
1c61728
Use to_poll on wasm
expenses Jan 15, 2020
8f42407
Revert "Use to_poll on wasm"
expenses Jan 16, 2020
a80cc31
Fix fg test
expenses Jan 16, 2020
66f837a
Upgrade network test futures
expenses Jan 20, 2020
4e9e5c6
Update offchain hyper version
expenses Jan 20, 2020
7a429c6
Update service test
expenses Jan 21, 2020
738fff9
Merge remote-tracking branch 'parity/master' into ashley-more-futures
expenses Jan 21, 2020
19c8b38
Merge remote-tracking branch 'parity/master' into ashley-more-futures
expenses Jan 21, 2020
5d36bef
Merge remote-tracking branch 'parity/master' into ashley-more-futures
expenses Jan 24, 2020
9a196df
bump tokio to 0.2.10
expenses Jan 24, 2020
29b70b8
Removed some unneeded tokios
expenses Jan 24, 2020
6aeecf7
Merge remote-tracking branch 'parity/master' into ashley-more-futures
expenses Jan 27, 2020
c4fd68b
fixes
expenses Jan 27, 2020
553f9c1
Merge remote-tracking branch 'parity/master' into ashley-more-futures
expenses Jan 27, 2020
8b28b97
fix run_until_all_full
expenses Jan 27, 2020
b692425
Merge remote-tracking branch 'parity/master' into ashley-more-futures
expenses Jan 29, 2020
7c55e50
Make service test debuggable
expenses Jan 29, 2020
837aa0b
Update client/offchain/src/api/http.rs
expenses Jan 29, 2020
06ca442
Add service_test to test-int output
expenses Jan 29, 2020
25f1579
Merge branch 'ashley-more-futures' of github.com:paritytech/substrate…
expenses Jan 29, 2020
a0d4b37
Merge remote-tracking branch 'parity/master' into ashley-more-futures
expenses Jan 30, 2020
4270f7f
nitpicking
expenses Jan 30, 2020
b5e8f13
Merge remote-tracking branch 'parity/master' into HEAD
expenses Feb 6, 2020
64856fe
Finally fix test
expenses Feb 10, 2020
18fe65a
Merge remote-tracking branch 'parity/master' into ashley-more-futures
expenses Feb 10, 2020
6eb50f6
Merge remote-tracking branch 'parity/master' into ashley-more-futures
expenses Feb 27, 2020
bcc5b8b
Give up and revert client/serviec/test
expenses Feb 28, 2020
53c4ccf
Revert gitlab ci too
expenses Feb 28, 2020
143d62e
Merge remote-tracking branch 'parity/master' into ashley-more-futures
expenses Feb 28, 2020
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Give up and revert client/serviec/test
  • Loading branch information
expenses committed Feb 28, 2020
commit bcc5b8b5e1023c5764f175b198d8bfef47fe01d3
4 changes: 2 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 3 additions & 3 deletions client/service/test/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,16 +10,16 @@ repository = "https://github.com/paritytech/substrate/"

[dependencies]
tempfile = "3.1.0"
futures = "0.3.1"
tokio = { version = "=0.2.12", features = ["rt-core"] }
tokio = "0.1.22"
futures01 = { package = "futures", version = "0.1.29" }
log = "0.4.8"
env_logger = "0.7.0"
fdlimit = "0.1.1"
futures = { version = "0.3.1", features = ["compat"] }
sc-service = { version = "0.8.0-alpha.2", default-features = false, path = "../../service" }
sc-network = { version = "0.8.0-alpha.2", path = "../../network" }
sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" }
sc-client = { version = "0.8.0-alpha.2", path = "../../" }
sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" }
sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" }
sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../../primitives/transaction-pool" }
async-std = { version = "1.4.0", features = ["unstable"] }
189 changes: 98 additions & 91 deletions client/service/test/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,12 @@ use std::sync::{Arc, Mutex, MutexGuard};
use std::net::Ipv4Addr;
use std::pin::Pin;
use std::time::Duration;
use std::task::{Poll, Context};
use log::info;
use futures01::{Future, Stream, Poll};
use futures::{FutureExt as _, TryFutureExt as _};
use tempfile::TempDir;
use tokio::runtime::Runtime;
use futures::prelude::*;
use futures::future::ready;
use async_std::stream::interval;
use async_std::prelude::FutureExt;
use tokio::{runtime::Runtime, prelude::FutureExt};
use tokio::timer::Interval;
use sc_service::{
AbstractService,
ChainSpec,
Expand Down Expand Up @@ -77,10 +75,12 @@ impl<T> From<T> for SyncService<T> {
}

impl<T: futures::Future<Output=Result<(), sc_service::Error>> + Unpin> Future for SyncService<T> {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not great to enforce Unpin, but since it's a test I don't really care.

type Output = Result<(), sc_service::Error>;
type Item = ();
type Error = sc_service::Error;

fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
Pin::new(&mut *self.get()).poll(cx)
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let mut f = self.0.lock().unwrap();
futures::compat::Compat::new(&mut *f).poll()
}
}

Expand All @@ -98,32 +98,32 @@ where F: Send + 'static, L: Send +'static, U: Clone + Send + 'static
{
let full_nodes = self.full_nodes.clone();
let light_nodes = self.light_nodes.clone();
let interval = interval(Duration::from_millis(100))
.take_while(move |_| {
let num_full_ready = full_nodes.iter().filter(|&(ref id, ref service, _, _)|
let interval = Interval::new_interval(Duration::from_millis(100))
.map_err(|_| ())
.for_each(move |_| {
let full_ready = full_nodes.iter().all(|&(ref id, ref service, _, _)|
full_predicate(*id, service)
).count();
);

let num_light_ready = light_nodes.iter().filter(|&(ref id, ref service, _)|
light_predicate(*id, service)
).count();
if !full_ready {
return Ok(());
}

info!(
"Full nodes ready: {}/{}; Light nodes ready: {}/{}",
num_full_ready, full_nodes.len(),
num_light_ready, light_nodes.len(),
let light_ready = light_nodes.iter().all(|&(ref id, ref service, _)|
light_predicate(*id, service)
);

let full_ready = num_full_ready == full_nodes.len();
let light_ready = num_light_ready == light_nodes.len();

ready(!(light_ready && full_ready))
if !light_ready {
Ok(())
} else {
Err(())
}
})
.for_each(|_| ready(()))
.timeout(MAX_WAIT_TIME);

match self.runtime.block_on(interval) {
Ok(()) => {},
Ok(()) => unreachable!("interval always fails; qed"),
Err(ref err) if err.is_inner() => (),
Err(_) => panic!("Waited for too long"),
}
}
Expand Down Expand Up @@ -252,12 +252,12 @@ impl<G, E, F, L, U> TestNet<G, E, F, L, U> where
light: impl Iterator<Item = impl FnOnce(Configuration<G, E>) -> Result<L, Error>>,
authorities: impl Iterator<Item = (String, impl FnOnce(Configuration<G, E>) -> Result<(F, U), Error>)>
) {
let handle = self.runtime.handle();
let executor = self.runtime.executor();

for (key, authority) in authorities {
let task_executor = {
let handle = handle.clone();
Arc::new(move |fut| { handle.spawn(fut); })
let executor = executor.clone();
Arc::new(move |fut: Pin<Box<dyn futures::Future<Output = ()> + Send>>| executor.spawn(fut.unit_error().compat()))
};
let node_config = node_config(
self.nodes,
Expand All @@ -272,38 +272,38 @@ impl<G, E, F, L, U> TestNet<G, E, F, L, U> where
let (service, user_data) = authority(node_config).expect("Error creating test node service");
let service = SyncService::from(service);

handle.spawn(service.clone().map_err(|_| ()));
executor.spawn(service.clone().map_err(|_| ()));
let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().into()));
self.authority_nodes.push((self.nodes, service, user_data, addr));
self.nodes += 1;
}

for full in full {
let task_executor = {
let handle = handle.clone();
Arc::new(move |fut| { handle.spawn(fut); })
let executor = executor.clone();
Arc::new(move |fut: Pin<Box<dyn futures::Future<Output = ()> + Send>>| executor.spawn(fut.unit_error().compat()))
};
let node_config = node_config(self.nodes, &self.chain_spec, Roles::FULL, task_executor, None, self.base_port, &temp);
let addr = node_config.network.listen_addresses.iter().next().unwrap().clone();
let (service, user_data) = full(node_config).expect("Error creating test node service");
let service = SyncService::from(service);

handle.spawn(service.clone().map_err(|_| ()));
executor.spawn(service.clone().map_err(|_| ()));
let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().into()));
self.full_nodes.push((self.nodes, service, user_data, addr));
self.nodes += 1;
}

for light in light {
let task_executor = {
let handle = handle.clone();
Arc::new(move |fut| { handle.spawn(fut); })
let executor = executor.clone();
Arc::new(move |fut: Pin<Box<dyn futures::Future<Output = ()> + Send>>| executor.spawn(fut.unit_error().compat()))
};
let node_config = node_config(self.nodes, &self.chain_spec, Roles::LIGHT, task_executor, None, self.base_port, &temp);
let addr = node_config.network.listen_addresses.iter().next().unwrap().clone();
let service = SyncService::from(light(node_config).expect("Error creating test node service"));

handle.spawn(service.clone().map_err(|_| ()));
executor.spawn(service.clone().map_err(|_| ()));
let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().into()));
self.light_nodes.push((self.nodes, service, addr));
self.nodes += 1;
Expand Down Expand Up @@ -334,74 +334,81 @@ pub fn connectivity<G, E, Fb, F, Lb, L>(

{
let temp = tempdir_with_prefix("substrate-connectivity-test");
let mut network = TestNet::new(
&temp,
spec.clone(),
(0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }),
(0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }),
// Note: this iterator is empty but we can't just use `iter::empty()`, otherwise
// the type of the closure cannot be inferred.
(0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })),
30400,
);
info!("Checking star topology");
let first_address = network.full_nodes[0].3.clone();
for (_, service, _, _) in network.full_nodes.iter().skip(1) {
service.get().network().add_reserved_peer(first_address.to_string())
.expect("Error adding reserved peer");
}
for (_, service, _) in network.light_nodes.iter() {
service.get().network().add_reserved_peer(first_address.to_string())
.expect("Error adding reserved peer");
}
let runtime = {
let mut network = TestNet::new(
&temp,
spec.clone(),
(0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }),
(0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }),
// Note: this iterator is empty but we can't just use `iter::empty()`, otherwise
// the type of the closure cannot be inferred.
(0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })),
30400,
);
info!("Checking star topology");
let first_address = network.full_nodes[0].3.clone();
for (_, service, _, _) in network.full_nodes.iter().skip(1) {
service.get().network().add_reserved_peer(first_address.to_string())
.expect("Error adding reserved peer");
}
for (_, service, _) in network.light_nodes.iter() {
service.get().network().add_reserved_peer(first_address.to_string())
.expect("Error adding reserved peer");
}

network.run_until_all_full(
move |_index, service| service.get().network().num_connected()
== expected_full_connections,
move |_index, service| service.get().network().num_connected()
== expected_light_connections,
);

network.run_until_all_full(
move |_index, service| service.get().network().num_connected()
== expected_full_connections,
move |_index, service| service.get().network().num_connected()
== expected_light_connections,
);
network.runtime
};

runtime.shutdown_now().wait().expect("Error shutting down runtime");

network.runtime.shutdown_timeout(Duration::from_secs(0));
temp.close().expect("Error removing temp dir");
}
{
let temp = tempdir_with_prefix("substrate-connectivity-test");
let mut network = TestNet::new(
&temp,
spec,
(0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }),
(0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }),
// Note: this iterator is empty but we can't just use `iter::empty()`, otherwise
// the type of the closure cannot be inferred.
(0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })),
30400,
);
info!("Checking linked topology");
let mut address = network.full_nodes[0].3.clone();
let max_nodes = std::cmp::max(NUM_FULL_NODES, NUM_LIGHT_NODES);
for i in 0..max_nodes {
if i != 0 {
if let Some((_, service, _, node_id)) = network.full_nodes.get(i) {
{
let mut network = TestNet::new(
&temp,
spec,
(0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }),
(0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }),
// Note: this iterator is empty but we can't just use `iter::empty()`, otherwise
// the type of the closure cannot be inferred.
(0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })),
30400,
);
info!("Checking linked topology");
let mut address = network.full_nodes[0].3.clone();
let max_nodes = std::cmp::max(NUM_FULL_NODES, NUM_LIGHT_NODES);
for i in 0..max_nodes {
if i != 0 {
if let Some((_, service, _, node_id)) = network.full_nodes.get(i) {
service.get().network().add_reserved_peer(address.to_string())
.expect("Error adding reserved peer");
address = node_id.clone();
}
}

if let Some((_, service, node_id)) = network.light_nodes.get(i) {
service.get().network().add_reserved_peer(address.to_string())
.expect("Error adding reserved peer");
address = node_id.clone();
}
}

if let Some((_, service, node_id)) = network.light_nodes.get(i) {
service.get().network().add_reserved_peer(address.to_string())
.expect("Error adding reserved peer");
address = node_id.clone();
}
network.run_until_all_full(
move |_index, service| service.get().network().num_connected()
== expected_full_connections,
move |_index, service| service.get().network().num_connected()
== expected_light_connections,
);
}

network.run_until_all_full(
move |_index, service| service.get().network().num_connected()
== expected_full_connections,
move |_index, service| service.get().network().num_connected()
== expected_light_connections,
);
temp.close().expect("Error removing temp dir");
}
}
Expand Down