Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions bin/node/cli/tests/benchmark_pallet_works.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.

#![cfg(feature = "runtime-benchmarks")]

use assert_cmd::cargo::cargo_bin;
use std::process::Command;

Expand Down
2 changes: 2 additions & 0 deletions bin/node/cli/tests/benchmark_storage_works.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.

#![cfg(feature = "runtime-benchmarks")]

use assert_cmd::cargo::cargo_bin;
use std::{
path::Path,
Expand Down
132 changes: 60 additions & 72 deletions bin/node/cli/tests/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,54 +20,26 @@

use assert_cmd::cargo::cargo_bin;
use nix::{
sys::signal::{kill, Signal::SIGINT},
sys::signal::{kill, Signal, Signal::SIGINT},
unistd::Pid,
};
use node_primitives::{Hash, Header};
use regex::Regex;
use std::{
io::{BufRead, BufReader, Read},
ops::{Deref, DerefMut},
path::Path,
process::{self, Child, Command, ExitStatus},
path::{Path, PathBuf},
process::{self, Child, Command},
time::Duration,
};
use tokio::time::timeout;

/// Wait for the given `child` the given number of `secs`.
///
/// Returns the `Some(exit status)` or `None` if the process did not finish in the given time.
pub fn wait_for(child: &mut Child, secs: u64) -> Result<ExitStatus, ()> {
let result = wait_timeout::ChildExt::wait_timeout(child, Duration::from_secs(5.min(secs)))
.map_err(|_| ())?;
if let Some(exit_status) = result {
Ok(exit_status)
} else {
if secs > 5 {
eprintln!("Child process taking over 5 seconds to exit gracefully");
let result = wait_timeout::ChildExt::wait_timeout(child, Duration::from_secs(secs - 5))
.map_err(|_| ())?;
if let Some(exit_status) = result {
return Ok(exit_status)
}
}
eprintln!("Took too long to exit (> {} seconds). Killing...", secs);
let _ = child.kill();
child.wait().unwrap();
Err(())
}
}

/// Wait for at least n blocks to be finalized within a specified time.
pub async fn wait_n_finalized_blocks(
n: usize,
timeout_secs: u64,
url: &str,
) -> Result<(), tokio::time::error::Elapsed> {
timeout(Duration::from_secs(timeout_secs), wait_n_finalized_blocks_from(n, url)).await
/// Run the given `future` and panic if the `timeout` is hit.
pub async fn run_with_timeout(timeout: Duration, future: impl futures::Future<Output = ()>) {
tokio::time::timeout(timeout, future).await.expect("Hit timeout");
}

/// Wait for at least n blocks to be finalized from a specified node
pub async fn wait_n_finalized_blocks_from(n: usize, url: &str) {
pub async fn wait_n_finalized_blocks(n: usize, url: &str) {
use substrate_rpc_client::{ws_client, ChainApi};

let mut built_blocks = std::collections::HashSet::new();
Expand All @@ -87,46 +59,54 @@ pub async fn wait_n_finalized_blocks_from(n: usize, url: &str) {

/// Run the node for a while (3 blocks)
pub async fn run_node_for_a_while(base_path: &Path, args: &[&str]) {
let mut cmd = Command::new(cargo_bin("substrate"))
.stdout(process::Stdio::piped())
.stderr(process::Stdio::piped())
.args(args)
.arg("-d")
.arg(base_path)
.spawn()
.unwrap();
run_with_timeout(Duration::from_secs(60 * 10), async move {
let mut cmd = Command::new(cargo_bin("substrate"))
.stdout(process::Stdio::piped())
.stderr(process::Stdio::piped())
.args(args)
.arg("-d")
.arg(base_path)
.spawn()
.unwrap();

let stderr = cmd.stderr.take().unwrap();
let stderr = cmd.stderr.take().unwrap();

let mut child = KillChildOnDrop(cmd);
let mut child = KillChildOnDrop(cmd);

let (ws_url, _) = find_ws_url_from_output(stderr);
let ws_url = extract_info_from_output(stderr).0.ws_url;

// Let it produce some blocks.
let _ = wait_n_finalized_blocks(3, 30, &ws_url).await;
// Let it produce some blocks.
wait_n_finalized_blocks(3, &ws_url).await;

assert!(child.try_wait().unwrap().is_none(), "the process should still be running");
child.assert_still_running();

// Stop the process
kill(Pid::from_raw(child.id().try_into().unwrap()), SIGINT).unwrap();
assert!(wait_for(&mut child, 40).map(|x| x.success()).unwrap());
// Stop the process
child.stop();
})
.await
}

/// Run the node asserting that it fails with an error
pub fn run_node_assert_fail(base_path: &Path, args: &[&str]) {
let mut cmd = Command::new(cargo_bin("substrate"));
pub struct KillChildOnDrop(pub Child);

let mut child = KillChildOnDrop(cmd.args(args).arg("-d").arg(base_path).spawn().unwrap());
impl KillChildOnDrop {
/// Stop the child and wait until it is finished.
///
/// Asserts if the exit status isn't success.
pub fn stop(&mut self) {
self.stop_with_signal(SIGINT);
}

// Let it produce some blocks, but it should die within 10 seconds.
assert_ne!(
wait_timeout::ChildExt::wait_timeout(&mut *child, Duration::from_secs(10)).unwrap(),
None,
"the process should not be running anymore"
);
}
/// Same as [`Self::stop`] but takes the `signal` that is sent to stop the child.
pub fn stop_with_signal(&mut self, signal: Signal) {
kill(Pid::from_raw(self.id().try_into().unwrap()), signal).unwrap();
assert!(self.wait().unwrap().success());
}

pub struct KillChildOnDrop(pub Child);
/// Asserts that the child is still running.
pub fn assert_still_running(&mut self) {
assert!(self.try_wait().unwrap().is_none(), "the process should still be running");
}
}

impl Drop for KillChildOnDrop {
fn drop(&mut self) {
Expand All @@ -148,18 +128,22 @@ impl DerefMut for KillChildOnDrop {
}
}

/// Read the WS address from the output.
/// Information extracted from a running node.
pub struct NodeInfo {
pub ws_url: String,
pub db_path: PathBuf,
}

/// Extract [`NodeInfo`] from a running node by parsing its output.
///
/// This is hack to get the actual binded sockaddr because
/// substrate assigns a random port if the specified port was already binded.
pub fn find_ws_url_from_output(read: impl Read + Send) -> (String, String) {
/// Returns the [`NodeInfo`] and all the read data.
pub fn extract_info_from_output(read: impl Read + Send) -> (NodeInfo, String) {
let mut data = String::new();

let ws_url = BufReader::new(read)
.lines()
.find_map(|line| {
let line =
line.expect("failed to obtain next line from stdout for WS address discovery");
let line = line.expect("failed to obtain next line while extracting node info");
data.push_str(&line);
data.push_str("\n");

Expand All @@ -176,5 +160,9 @@ pub fn find_ws_url_from_output(read: impl Read + Send) -> (String, String) {
panic!("We should get a WebSocket address")
});

(ws_url, data)
// Database path is printed before the ws url!
let re = Regex::new(r"Database: .+ at (\S+)").unwrap();
let db_path = PathBuf::from(re.captures(data.as_str()).unwrap().get(1).unwrap().as_str());

(NodeInfo { ws_url, db_path }, data)
}
143 changes: 65 additions & 78 deletions bin/node/cli/tests/running_the_node_and_interrupt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,94 +18,81 @@

#![cfg(unix)]
use assert_cmd::cargo::cargo_bin;
use nix::{
sys::signal::{
kill,
Signal::{self, SIGINT, SIGTERM},
},
unistd::Pid,
use nix::sys::signal::Signal::{self, SIGINT, SIGTERM};
use std::{
process::{self, Child, Command},
time::Duration,
};
use std::process::{self, Child, Command};
use tempfile::tempdir;

pub mod common;

#[tokio::test]
async fn running_the_node_works_and_can_be_interrupted() {
async fn run_command_and_kill(signal: Signal) {
let base_path = tempdir().expect("could not create a temp dir");
let mut cmd = common::KillChildOnDrop(
common::run_with_timeout(Duration::from_secs(60 * 10), async move {
Copy link
Contributor

@michalkucharczyk michalkucharczyk Mar 8, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If 10 minutes is kind of master timeout, maybe this could be a const (or maybe some default within the function that could be overwritten by env?), just to avoid spreading this hard-coded value over the code.

async fn run_command_and_kill(signal: Signal) {
let base_path = tempdir().expect("could not create a temp dir");
let mut cmd = common::KillChildOnDrop(
Command::new(cargo_bin("substrate"))
.stdout(process::Stdio::piped())
.stderr(process::Stdio::piped())
.args(&["--dev", "-d"])
.arg(base_path.path())
.arg("--db=paritydb")
.arg("--no-hardware-benchmarks")
.spawn()
.unwrap(),
);

let stderr = cmd.stderr.take().unwrap();

let ws_url = common::extract_info_from_output(stderr).0.ws_url;

common::wait_n_finalized_blocks(3, &ws_url).await;

cmd.assert_still_running();

cmd.stop_with_signal(signal);

// Check if the database was closed gracefully. If it was not,
// there may exist a ref cycle that prevents the Client from being dropped properly.
//
// parity-db only writes the stats file on clean shutdown.
let stats_file = base_path.path().join("chains/dev/paritydb/full/stats.txt");
assert!(std::path::Path::exists(&stats_file));
}

run_command_and_kill(SIGINT).await;
run_command_and_kill(SIGTERM).await;
})
.await;
}

#[tokio::test]
async fn running_two_nodes_with_the_same_ws_port_should_work() {
common::run_with_timeout(Duration::from_secs(60 * 10), async move {
fn start_node() -> Child {
Command::new(cargo_bin("substrate"))
.stdout(process::Stdio::piped())
.stderr(process::Stdio::piped())
.args(&["--dev", "-d"])
.arg(base_path.path())
.arg("--db=paritydb")
.arg("--no-hardware-benchmarks")
.args(&["--dev", "--tmp", "--ws-port=45789", "--no-hardware-benchmarks"])
.spawn()
.unwrap(),
);

let stderr = cmd.stderr.take().unwrap();

let (ws_url, _) = common::find_ws_url_from_output(stderr);

common::wait_n_finalized_blocks(3, 30, &ws_url)
.await
.expect("Blocks are produced in time");
assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running");
kill(Pid::from_raw(cmd.id().try_into().unwrap()), signal).unwrap();
assert_eq!(
common::wait_for(&mut cmd, 30).map(|x| x.success()),
Ok(true),
"the process must exit gracefully after signal {}",
signal,
);
// Check if the database was closed gracefully. If it was not,
// there may exist a ref cycle that prevents the Client from being dropped properly.
//
// parity-db only writes the stats file on clean shutdown.
let stats_file = base_path.path().join("chains/dev/paritydb/full/stats.txt");
assert!(std::path::Path::exists(&stats_file));
}

run_command_and_kill(SIGINT).await;
run_command_and_kill(SIGTERM).await;
}
.unwrap()
}

#[tokio::test]
async fn running_two_nodes_with_the_same_ws_port_should_work() {
fn start_node() -> Child {
Command::new(cargo_bin("substrate"))
.stdout(process::Stdio::piped())
.stderr(process::Stdio::piped())
.args(&["--dev", "--tmp", "--ws-port=45789", "--no-hardware-benchmarks"])
.spawn()
.unwrap()
}

let mut first_node = common::KillChildOnDrop(start_node());
let mut second_node = common::KillChildOnDrop(start_node());

let stderr = first_node.stderr.take().unwrap();
let (ws_url, _) = common::find_ws_url_from_output(stderr);

common::wait_n_finalized_blocks(3, 30, &ws_url).await.unwrap();

assert!(first_node.try_wait().unwrap().is_none(), "The first node should still be running");
assert!(second_node.try_wait().unwrap().is_none(), "The second node should still be running");

kill(Pid::from_raw(first_node.id().try_into().unwrap()), SIGINT).unwrap();
kill(Pid::from_raw(second_node.id().try_into().unwrap()), SIGINT).unwrap();

assert_eq!(
common::wait_for(&mut first_node, 30).map(|x| x.success()),
Ok(true),
"The first node must exit gracefully",
);
assert_eq!(
common::wait_for(&mut second_node, 30).map(|x| x.success()),
Ok(true),
"The second node must exit gracefully",
);
let mut first_node = common::KillChildOnDrop(start_node());
let mut second_node = common::KillChildOnDrop(start_node());

let stderr = first_node.stderr.take().unwrap();
let ws_url = common::extract_info_from_output(stderr).0.ws_url;

common::wait_n_finalized_blocks(3, &ws_url).await;

first_node.assert_still_running();
second_node.assert_still_running();

first_node.stop();
second_node.stop();
})
.await;
}
Loading