diff --git a/Cargo.lock b/Cargo.lock index 1b478fcc18a78..2d11e8e06f4ef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2795,12 +2795,6 @@ dependencies = [ "crunchy", ] -[[package]] -name = "hashbrown" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" - [[package]] name = "hashbrown" version = "0.11.2" @@ -3132,12 +3126,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.6.2" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3" +checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" dependencies = [ "autocfg 1.0.1", - "hashbrown 0.9.1", + "hashbrown 0.11.2", "serde", ] @@ -4871,6 +4865,7 @@ dependencies = [ "sc-service", "sc-service-test", "sc-sync-state-rpc", + "sc-sysinfo", "sc-telemetry", "sc-transaction-pool", "sc-transaction-pool-api", @@ -8939,6 +8934,7 @@ dependencies = [ "sc-offchain", "sc-rpc", "sc-rpc-server", + "sc-sysinfo", "sc-telemetry", "sc-tracing", "sc-transaction-pool", @@ -9043,6 +9039,22 @@ dependencies = [ "thiserror", ] +[[package]] +name = "sc-sysinfo" +version = "6.0.0-dev" +dependencies = [ + "futures 0.3.19", + "libc", + "log 0.4.14", + "rand 0.7.3", + "rand_pcg 0.2.1", + "regex", + "sc-telemetry", + "serde", + "serde_json", + "sp-core", +] + [[package]] name = "sc-telemetry" version = "4.0.0-dev" diff --git a/Cargo.toml b/Cargo.toml index 13657dd1234a5..576bae6b574b2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -54,6 +54,7 @@ members = [ "client/service", "client/service/test", "client/state-db", + "client/sysinfo", "client/sync-state-rpc", "client/telemetry", "client/tracing", diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 24e069d21f694..826e674bb816b 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -76,6 +76,7 @@ sc-telemetry = { version = "4.0.0-dev", path = "../../../client/telemetry" } sc-executor = { version = "0.10.0-dev", path = "../../../client/executor" } sc-authority-discovery = { version = "0.10.0-dev", path = "../../../client/authority-discovery" } sc-sync-state-rpc = { version = "0.10.0-dev", path = "../../../client/sync-state-rpc" } +sc-sysinfo = { version = "6.0.0-dev", path = "../../../client/sysinfo" } # frame dependencies frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } diff --git a/bin/node/cli/benches/block_production.rs b/bin/node/cli/benches/block_production.rs index 77b51fa28dd1a..ebb89c07da221 100644 --- a/bin/node/cli/benches/block_production.rs +++ b/bin/node/cli/benches/block_production.rs @@ -110,7 +110,8 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { wasm_runtime_overrides: None, }; - node_cli::service::new_full_base(config, |_, _| ()).expect("creating a full node doesn't fail") + node_cli::service::new_full_base(config, false, |_, _| ()) + .expect("creating a full node doesn't fail") } fn extrinsic_set_time(now: u64) -> OpaqueExtrinsic { diff --git a/bin/node/cli/benches/transaction_pool.rs b/bin/node/cli/benches/transaction_pool.rs index e89527f2333a2..267f3c4414546 100644 --- a/bin/node/cli/benches/transaction_pool.rs +++ b/bin/node/cli/benches/transaction_pool.rs @@ -102,7 +102,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { wasm_runtime_overrides: None, }; - node_cli::service::new_full_base(config, |_, _| ()).expect("Creates node") + node_cli::service::new_full_base(config, false, |_, _| ()).expect("Creates node") } fn create_accounts(num: usize) -> Vec { diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 11516f964903a..1212d3b3d07ed 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -471,7 +471,7 @@ pub(crate) mod tests { sc_service_test::connectivity(integration_test_config_with_two_authorities(), |config| { let NewFullBase { task_manager, client, network, transaction_pool, .. } = - new_full_base(config, |_, _| ())?; + new_full_base(config, false, |_, _| ())?; Ok(sc_service_test::TestNetComponents::new( task_manager, client, diff --git a/bin/node/cli/src/cli.rs b/bin/node/cli/src/cli.rs index a911cc26ef87c..c6b01dfd4254e 100644 --- a/bin/node/cli/src/cli.rs +++ b/bin/node/cli/src/cli.rs @@ -26,6 +26,16 @@ pub struct Cli { #[allow(missing_docs)] #[clap(flatten)] pub run: sc_cli::RunCmd, + + /// Disable automatic hardware benchmarks. + /// + /// By default these benchmarks are automatically ran at startup and measure + /// the CPU speed, the memory bandwidth and the disk speed. + /// + /// The results are then printed out in the logs, and also sent as part of + /// telemetry, if telemetry is enabled. + #[clap(long)] + pub no_hardware_benchmarks: bool, } /// Possible subcommands of the main binary. diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index 3c2039fde4757..ac5734f958d4d 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -80,7 +80,8 @@ pub fn run() -> Result<()> { None => { let runner = cli.create_runner(&cli.run)?; runner.run_node_until_exit(|config| async move { - service::new_full(config).map_err(sc_cli::Error::Service) + service::new_full(config, cli.no_hardware_benchmarks) + .map_err(sc_cli::Error::Service) }) }, Some(Subcommand::Inspect(cmd)) => { diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 038c14029cf31..649d901b5b670 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -309,11 +309,21 @@ pub struct NewFullBase { /// Creates a full service from the configuration. pub fn new_full_base( mut config: Configuration, + disable_hardware_benchmarks: bool, with_startup_data: impl FnOnce( &sc_consensus_babe::BabeBlockImport, &sc_consensus_babe::BabeLink, ), ) -> Result { + let hwbench = if !disable_hardware_benchmarks { + config.database.path().map(|database_path| { + let _ = std::fs::create_dir_all(&database_path); + sc_sysinfo::gather_hwbench(Some(database_path)) + }) + } else { + None + }; + let sc_service::PartialComponents { client, backend, @@ -383,6 +393,19 @@ pub fn new_full_base( telemetry: telemetry.as_mut(), })?; + if let Some(hwbench) = hwbench { + sc_sysinfo::print_hwbench(&hwbench); + + if let Some(ref mut telemetry) = telemetry { + let telemetry_handle = telemetry.handle(); + task_manager.spawn_handle().spawn( + "telemetry_hwbench", + None, + sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), + ); + } + } + let (block_import, grandpa_link, babe_link) = import_setup; (with_startup_data)(&block_import, &babe_link); @@ -530,8 +553,12 @@ pub fn new_full_base( } /// Builds a new service for a full client. -pub fn new_full(config: Configuration) -> Result { - new_full_base(config, |_, _| ()).map(|NewFullBase { task_manager, .. }| task_manager) +pub fn new_full( + config: Configuration, + disable_hardware_benchmarks: bool, +) -> Result { + new_full_base(config, disable_hardware_benchmarks, |_, _| ()) + .map(|NewFullBase { task_manager, .. }| task_manager) } #[cfg(test)] @@ -598,6 +625,7 @@ mod tests { let NewFullBase { task_manager, client, network, transaction_pool, .. } = new_full_base( config, + false, |block_import: &sc_consensus_babe::BabeBlockImport, babe_link: &sc_consensus_babe::BabeLink| { setup_handles = Some((block_import.clone(), babe_link.clone())); @@ -775,7 +803,7 @@ mod tests { crate::chain_spec::tests::integration_test_config_with_two_authorities(), |config| { let NewFullBase { task_manager, client, network, transaction_pool, .. } = - new_full_base(config, |_, _| ())?; + new_full_base(config, false, |_, _| ())?; Ok(sc_service_test::TestNetComponents::new( task_manager, client, diff --git a/bin/node/cli/tests/check_block_works.rs b/bin/node/cli/tests/check_block_works.rs index ac853b201b8c6..c5447fd2311c6 100644 --- a/bin/node/cli/tests/check_block_works.rs +++ b/bin/node/cli/tests/check_block_works.rs @@ -28,7 +28,7 @@ pub mod common; async fn check_block_works() { let base_path = tempdir().expect("could not create a temp dir"); - common::run_node_for_a_while(base_path.path(), &["--dev"]).await; + common::run_node_for_a_while(base_path.path(), &["--dev", "--no-hardware-benchmarks"]).await; let status = Command::new(cargo_bin("substrate")) .args(&["check-block", "--dev", "--pruning", "archive", "-d"]) diff --git a/bin/node/cli/tests/export_import_flow.rs b/bin/node/cli/tests/export_import_flow.rs index 2a2133bbfe4fe..48fccc8ca0293 100644 --- a/bin/node/cli/tests/export_import_flow.rs +++ b/bin/node/cli/tests/export_import_flow.rs @@ -188,7 +188,7 @@ async fn export_import_revert() { let exported_blocks_file = base_path.path().join("exported_blocks"); let db_path = base_path.path().join("db"); - common::run_node_for_a_while(base_path.path(), &["--dev"]).await; + common::run_node_for_a_while(base_path.path(), &["--dev", "--no-hardware-benchmarks"]).await; let mut executor = ExportImportRevertExecutor::new(&base_path, &exported_blocks_file, &db_path); diff --git a/bin/node/cli/tests/inspect_works.rs b/bin/node/cli/tests/inspect_works.rs index 28ad88dd501d1..6f73cc69582a9 100644 --- a/bin/node/cli/tests/inspect_works.rs +++ b/bin/node/cli/tests/inspect_works.rs @@ -28,7 +28,7 @@ pub mod common; async fn inspect_works() { let base_path = tempdir().expect("could not create a temp dir"); - common::run_node_for_a_while(base_path.path(), &["--dev"]).await; + common::run_node_for_a_while(base_path.path(), &["--dev", "--no-hardware-benchmarks"]).await; let status = Command::new(cargo_bin("substrate")) .args(&["inspect", "--dev", "--pruning", "archive", "-d"]) diff --git a/bin/node/cli/tests/purge_chain_works.rs b/bin/node/cli/tests/purge_chain_works.rs index 1a62aec287447..811762a714a9d 100644 --- a/bin/node/cli/tests/purge_chain_works.rs +++ b/bin/node/cli/tests/purge_chain_works.rs @@ -27,7 +27,7 @@ pub mod common; async fn purge_chain_works() { let base_path = tempdir().expect("could not create a temp dir"); - common::run_node_for_a_while(base_path.path(), &["--dev"]).await; + common::run_node_for_a_while(base_path.path(), &["--dev", "--no-hardware-benchmarks"]).await; let status = Command::new(cargo_bin("substrate")) .args(&["purge-chain", "--dev", "-d"]) diff --git a/bin/node/cli/tests/running_the_node_and_interrupt.rs b/bin/node/cli/tests/running_the_node_and_interrupt.rs index edce2bbc6e4c5..e0ccee9d4f88a 100644 --- a/bin/node/cli/tests/running_the_node_and_interrupt.rs +++ b/bin/node/cli/tests/running_the_node_and_interrupt.rs @@ -41,6 +41,7 @@ async fn running_the_node_works_and_can_be_interrupted() { Command::new(cargo_bin("substrate")) .args(&["--dev", "-d"]) .arg(base_path.path()) + .arg("--no-hardware-benchmarks") .spawn() .unwrap(), ); @@ -64,7 +65,7 @@ async fn running_the_node_works_and_can_be_interrupted() { async fn running_two_nodes_with_the_same_ws_port_should_work() { fn start_node() -> Child { Command::new(cargo_bin("substrate")) - .args(&["--dev", "--tmp", "--ws-port=45789"]) + .args(&["--dev", "--tmp", "--ws-port=45789", "--no-hardware-benchmarks"]) .spawn() .unwrap() } diff --git a/bin/node/cli/tests/telemetry.rs b/bin/node/cli/tests/telemetry.rs index 212fec7a02cf6..772a69ac3292f 100644 --- a/bin/node/cli/tests/telemetry.rs +++ b/bin/node/cli/tests/telemetry.rs @@ -71,6 +71,7 @@ async fn telemetry_works() { let mut substrate = substrate .args(&["--dev", "--tmp", "--telemetry-url"]) .arg(format!("ws://{} 10", addr)) + .arg("--no-hardware-benchmarks") .stdout(process::Stdio::piped()) .stderr(process::Stdio::piped()) .stdin(process::Stdio::null()) diff --git a/bin/node/cli/tests/temp_base_path_works.rs b/bin/node/cli/tests/temp_base_path_works.rs index a5b9b7499fc49..409375f0daf8c 100644 --- a/bin/node/cli/tests/temp_base_path_works.rs +++ b/bin/node/cli/tests/temp_base_path_works.rs @@ -37,7 +37,7 @@ pub mod common; async fn temp_base_path_works() { let mut cmd = Command::new(cargo_bin("substrate")); let mut child = common::KillChildOnDrop( - cmd.args(&["--dev", "--tmp"]) + cmd.args(&["--dev", "--tmp", "--no-hardware-benchmarks"]) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn() diff --git a/bin/node/cli/tests/version.rs b/bin/node/cli/tests/version.rs index 133eb65f4acef..f5a6f3a53d598 100644 --- a/bin/node/cli/tests/version.rs +++ b/bin/node/cli/tests/version.rs @@ -17,13 +17,11 @@ // along with this program. If not, see . use assert_cmd::cargo::cargo_bin; -use platforms::*; use regex::Regex; use std::process::Command; fn expected_regex() -> Regex { - Regex::new(r"^substrate (\d+\.\d+\.\d+(?:-.+?)?)-([a-f\d]+|unknown)-(.+?)-(.+?)(?:-(.+))?$") - .unwrap() + Regex::new(r"^substrate (.+)-([a-f\d]+)$").unwrap() } #[test] @@ -37,33 +35,17 @@ fn version_is_full() { let captures = expected.captures(output.as_str()).expect("could not parse version in output"); assert_eq!(&captures[1], env!("CARGO_PKG_VERSION")); - assert_eq!(&captures[3], TARGET_ARCH.as_str()); - assert_eq!(&captures[4], TARGET_OS.as_str()); - assert_eq!(captures.get(5).map(|x| x.as_str()), TARGET_ENV.map(|x| x.as_str())); } #[test] fn test_regex_matches_properly() { let expected = expected_regex(); - let captures = expected.captures("substrate 2.0.0-da487d19d-x86_64-linux-gnu").unwrap(); + let captures = expected.captures("substrate 2.0.0-da487d19d").unwrap(); assert_eq!(&captures[1], "2.0.0"); assert_eq!(&captures[2], "da487d19d"); - assert_eq!(&captures[3], "x86_64"); - assert_eq!(&captures[4], "linux"); - assert_eq!(captures.get(5).map(|x| x.as_str()), Some("gnu")); - let captures = expected.captures("substrate 2.0.0-alpha.5-da487d19d-x86_64-linux-gnu").unwrap(); + let captures = expected.captures("substrate 2.0.0-alpha.5-da487d19d").unwrap(); assert_eq!(&captures[1], "2.0.0-alpha.5"); assert_eq!(&captures[2], "da487d19d"); - assert_eq!(&captures[3], "x86_64"); - assert_eq!(&captures[4], "linux"); - assert_eq!(captures.get(5).map(|x| x.as_str()), Some("gnu")); - - let captures = expected.captures("substrate 2.0.0-alpha.5-da487d19d-x86_64-linux").unwrap(); - assert_eq!(&captures[1], "2.0.0-alpha.5"); - assert_eq!(&captures[2], "da487d19d"); - assert_eq!(&captures[3], "x86_64"); - assert_eq!(&captures[4], "linux"); - assert_eq!(captures.get(5).map(|x| x.as_str()), None); } diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index c920c6cd52cff..849a2d2cceb1f 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -56,7 +56,7 @@ pub trait SubstrateCli: Sized { /// /// By default this will look like this: /// - /// `2.0.0-b950f731c-x86_64-linux-gnu` + /// `2.0.0-b950f731c` /// /// Where the hash is the short commit hash of the commit of in the Git repository. fn impl_version() -> String; diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 737a29f1db0c0..b7c5eba17684e 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -72,6 +72,7 @@ sc-offchain = { version = "4.0.0-dev", path = "../offchain" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.10.0-dev" } sc-tracing = { version = "4.0.0-dev", path = "../tracing" } sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } +sc-sysinfo = { version = "6.0.0-dev", path = "../sysinfo" } tracing = "0.1.29" tracing-futures = { version = "0.2.4" } parity-util-mem = { version = "0.11.0", default-features = false, features = [ diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index f4ff932435755..882d7666dbe63 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -487,8 +487,13 @@ where ) .map_err(|e| Error::Application(Box::new(e)))?; + let sysinfo = sc_sysinfo::gather_sysinfo(); + sc_sysinfo::print_sysinfo(&sysinfo); + let telemetry = telemetry - .map(|telemetry| init_telemetry(&mut config, network.clone(), client.clone(), telemetry)) + .map(|telemetry| { + init_telemetry(&mut config, network.clone(), client.clone(), telemetry, Some(sysinfo)) + }) .transpose()?; info!("📦 Highest known block at #{}", chain_info.best_number); @@ -609,12 +614,16 @@ fn init_telemetry>( network: Arc::Hash>>, client: Arc, telemetry: &mut Telemetry, + sysinfo: Option, ) -> sc_telemetry::Result { let genesis_hash = client.block_hash(Zero::zero()).ok().flatten().unwrap_or_default(); let connection_message = ConnectionMessage { name: config.network.node_name.to_owned(), implementation: config.impl_name.to_owned(), version: config.impl_version.to_owned(), + target_os: sc_sysinfo::TARGET_OS.into(), + target_arch: sc_sysinfo::TARGET_ARCH.into(), + target_env: sc_sysinfo::TARGET_ENV.into(), config: String::new(), chain: config.chain_spec.name().to_owned(), genesis_hash: format!("{:?}", genesis_hash), @@ -625,6 +634,7 @@ fn init_telemetry>( .unwrap_or(0) .to_string(), network_id: network.local_peer_id().to_base58(), + sysinfo, }; telemetry.start_telemetry(connection_message)?; diff --git a/client/sysinfo/Cargo.toml b/client/sysinfo/Cargo.toml new file mode 100644 index 0000000000000..8efe583fb9335 --- /dev/null +++ b/client/sysinfo/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "sc-sysinfo" +version = "6.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "A crate that provides basic hardware and software telemetry information." +documentation = "https://docs.rs/sc-sysinfo" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +futures = "0.3.19" +log = "0.4.11" +rand = "0.7.3" +rand_pcg = "0.2.1" +regex = "1" +libc = "0.2" +serde = { version = "1.0.136", features = ["derive"] } +serde_json = "1.0.79" +sp-core = { version = "6.0.0", path = "../../primitives/core" } +sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } diff --git a/client/sysinfo/README.md b/client/sysinfo/README.md new file mode 100644 index 0000000000000..4a2189c5ed8db --- /dev/null +++ b/client/sysinfo/README.md @@ -0,0 +1,4 @@ +This crate contains the code necessary to gather basic hardware +and software telemetry information about the node on which we're running. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 diff --git a/client/sysinfo/build.rs b/client/sysinfo/build.rs new file mode 100644 index 0000000000000..6d288107445aa --- /dev/null +++ b/client/sysinfo/build.rs @@ -0,0 +1,31 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +fn main() { + let out_dir = std::env::var("OUT_DIR").expect("OUT_DIR is always set in build scripts; qed"); + let out_dir = std::path::PathBuf::from(out_dir); + let target_os = std::env::var("CARGO_CFG_TARGET_OS") + .expect("CARGO_CFG_TARGET_OS is always set in build scripts; qed"); + let target_arch = std::env::var("CARGO_CFG_TARGET_ARCH") + .expect("CARGO_CFG_TARGET_ARCH is always set in build scripts; qed"); + let target_env = std::env::var("CARGO_CFG_TARGET_ENV") + .expect("CARGO_CFG_TARGET_ENV is always set in build scripts; qed"); + std::fs::write(out_dir.join("target_os.txt"), target_os).unwrap(); + std::fs::write(out_dir.join("target_arch.txt"), target_arch).unwrap(); + std::fs::write(out_dir.join("target_env.txt"), target_env).unwrap(); +} diff --git a/client/sysinfo/src/lib.rs b/client/sysinfo/src/lib.rs new file mode 100644 index 0000000000000..be13efb82c66f --- /dev/null +++ b/client/sysinfo/src/lib.rs @@ -0,0 +1,111 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! This crate contains the code necessary to gather basic hardware +//! and software telemetry information about the node on which we're running. + +use futures::prelude::*; + +mod sysinfo; +#[cfg(target_os = "linux")] +mod sysinfo_linux; + +pub use sysinfo::{gather_hwbench, gather_sysinfo}; + +/// The operating system part of the current target triplet. +pub const TARGET_OS: &str = include_str!(concat!(env!("OUT_DIR"), "/target_os.txt")); + +/// The CPU ISA architecture part of the current target triplet. +pub const TARGET_ARCH: &str = include_str!(concat!(env!("OUT_DIR"), "/target_arch.txt")); + +/// The environment part of the current target triplet. +pub const TARGET_ENV: &str = include_str!(concat!(env!("OUT_DIR"), "/target_env.txt")); + +/// Hardware benchmark results for the node. +#[derive(Clone, Debug, serde::Serialize)] +pub struct HwBench { + /// The CPU speed, as measured in how many MB/s it can hash using the BLAKE2b-256 hash. + pub cpu_hashrate_score: u64, + /// Memory bandwidth in MB/s, calculated by measuring the throughput of `memcpy`. + pub memory_memcpy_score: u64, + /// Sequential disk write speed in MB/s. + pub disk_sequential_write_score: Option, + /// Random disk write speed in MB/s. + pub disk_random_write_score: Option, +} + +/// Prints out the system software/hardware information in the logs. +pub fn print_sysinfo(sysinfo: &sc_telemetry::SysInfo) { + log::info!("💻 Operating system: {}", TARGET_OS); + log::info!("💻 CPU architecture: {}", TARGET_ARCH); + if !TARGET_ENV.is_empty() { + log::info!("💻 Target environment: {}", TARGET_ENV); + } + + if let Some(ref cpu) = sysinfo.cpu { + log::info!("💻 CPU: {}", cpu); + } + if let Some(core_count) = sysinfo.core_count { + log::info!("💻 CPU cores: {}", core_count); + } + if let Some(memory) = sysinfo.memory { + log::info!("💻 Memory: {}MB", memory / (1024 * 1024)); + } + if let Some(ref linux_kernel) = sysinfo.linux_kernel { + log::info!("💻 Kernel: {}", linux_kernel); + } + if let Some(ref linux_distro) = sysinfo.linux_distro { + log::info!("💻 Linux distribution: {}", linux_distro); + } + if let Some(is_virtual_machine) = sysinfo.is_virtual_machine { + log::info!("💻 Virtual machine: {}", if is_virtual_machine { "yes" } else { "no" }); + } +} + +/// Prints out the results of the hardware benchmarks in the logs. +pub fn print_hwbench(hwbench: &HwBench) { + log::info!("🏁 CPU score: {}MB/s", hwbench.cpu_hashrate_score); + log::info!("🏁 Memory score: {}MB/s", hwbench.memory_memcpy_score); + + if let Some(score) = hwbench.disk_sequential_write_score { + log::info!("🏁 Disk score (seq. writes): {}MB/s", score); + } + if let Some(score) = hwbench.disk_random_write_score { + log::info!("🏁 Disk score (rand. writes): {}MB/s", score); + } +} + +/// Initializes the hardware benchmarks telemetry. +pub fn initialize_hwbench_telemetry( + telemetry_handle: sc_telemetry::TelemetryHandle, + hwbench: HwBench, +) -> impl std::future::Future { + let mut connect_stream = telemetry_handle.on_connect_stream(); + async move { + let payload = serde_json::to_value(&hwbench) + .expect("the `HwBench` can always be serialized into a JSON object; qed"); + let mut payload = match payload { + serde_json::Value::Object(map) => map, + _ => unreachable!("the `HwBench` always serializes into a JSON object; qed"), + }; + payload.insert("msg".into(), "sysinfo.hwbench".into()); + while connect_stream.next().await.is_some() { + telemetry_handle.send_telemetry(sc_telemetry::SUBSTRATE_INFO, payload.clone()); + } + } +} diff --git a/client/sysinfo/src/sysinfo.rs b/client/sysinfo/src/sysinfo.rs new file mode 100644 index 0000000000000..ceb28447002cf --- /dev/null +++ b/client/sysinfo/src/sysinfo.rs @@ -0,0 +1,393 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::HwBench; +use rand::{seq::SliceRandom, Rng}; +use sc_telemetry::SysInfo; +use std::{ + fs::File, + io::{Seek, SeekFrom, Write}, + ops::{Deref, DerefMut}, + path::{Path, PathBuf}, + time::{Duration, Instant}, +}; + +#[inline(always)] +pub(crate) fn benchmark( + name: &str, + size: usize, + max_iterations: usize, + max_duration: Duration, + mut run: impl FnMut() -> Result<(), E>, +) -> Result { + // Run the benchmark once as a warmup to get the code into the L1 cache. + run()?; + + // Then run it multiple times and average the result. + let timestamp = Instant::now(); + let mut elapsed = Duration::default(); + let mut count = 0; + for _ in 0..max_iterations { + run()?; + + count += 1; + elapsed = timestamp.elapsed(); + + if elapsed >= max_duration { + break + } + } + + let score = (((size * count) as f64 / elapsed.as_secs_f64()) / (1024.0 * 1024.0)) as u64; + log::trace!( + "Calculated {} of {}MB/s in {} iterations in {}ms", + name, + score, + count, + elapsed.as_millis() + ); + Ok(score) +} + +/// Gathers information about node's hardware and software. +pub fn gather_sysinfo() -> SysInfo { + #[allow(unused_mut)] + let mut sysinfo = SysInfo { + cpu: None, + memory: None, + core_count: None, + linux_kernel: None, + linux_distro: None, + is_virtual_machine: None, + }; + + #[cfg(target_os = "linux")] + crate::sysinfo_linux::gather_linux_sysinfo(&mut sysinfo); + + sysinfo +} + +#[inline(never)] +fn clobber(slice: &mut [u8]) { + assert!(!slice.is_empty()); + + // Discourage the compiler from optimizing out our benchmarks. + // + // Volatile reads and writes are guaranteed to not be elided nor reordered, + // so we can use them to effectively clobber a piece of memory and prevent + // the compiler from optimizing out our technically unnecessary code. + // + // This is not totally bulletproof in theory, but should work in practice. + // + // SAFETY: We've checked that the slice is not empty, so reading and writing + // its first element is always safe. + unsafe { + let value = std::ptr::read_volatile(slice.as_ptr()); + std::ptr::write_volatile(slice.as_mut_ptr(), value); + } +} + +// This benchmarks the CPU speed as measured by calculating BLAKE2b-256 hashes, in MB/s. +fn benchmark_cpu() -> u64 { + // In general the results of this benchmark are somewhat sensitive to how much + // data we hash at the time. The smaller this is the *less* MB/s we can hash, + // the bigger this is the *more* MB/s we can hash, up until a certain point + // where we can achieve roughly ~100% of what the hasher can do. If we'd plot + // this on a graph with the number of bytes we want to hash on the X axis + // and the speed in MB/s on the Y axis then we'd essentially see it grow + // logarithmically. + // + // In practice however we might not always have enough data to hit the maximum + // possible speed that the hasher can achieve, so the size set here should be + // picked in such a way as to still measure how fast the hasher is at hashing, + // but without hitting its theoretical maximum speed. + const SIZE: usize = 32 * 1024; + const MAX_ITERATIONS: usize = 4 * 1024; + const MAX_DURATION: Duration = Duration::from_millis(100); + + let mut buffer = Vec::new(); + buffer.resize(SIZE, 0x66); + let mut hash = Default::default(); + + let run = || -> Result<(), ()> { + clobber(&mut buffer); + hash = sp_core::hashing::blake2_256(&buffer); + clobber(&mut hash); + + Ok(()) + }; + + benchmark("CPU score", SIZE, MAX_ITERATIONS, MAX_DURATION, run) + .expect("benchmark cannot fail; qed") +} + +// This benchmarks the effective `memcpy` memory bandwidth available in MB/s. +// +// It doesn't technically measure the absolute maximum memory bandwidth available, +// but that's fine, because real code most of the time isn't optimized to take +// advantage of the full memory bandwidth either. +fn benchmark_memory() -> u64 { + // Ideally this should be at least as big as the CPU's L3 cache, + // and it should be big enough so that the `memcpy` takes enough + // time to be actually measurable. + // + // As long as it's big enough increasing it further won't change + // the benchmark's results. + const SIZE: usize = 64 * 1024 * 1024; + const MAX_ITERATIONS: usize = 32; + const MAX_DURATION: Duration = Duration::from_millis(100); + + let mut src = Vec::new(); + let mut dst = Vec::new(); + + // Prefault the pages; we want to measure the memory bandwidth, + // not how fast the kernel can supply us with fresh memory pages. + src.resize(SIZE, 0x66); + dst.resize(SIZE, 0x77); + + let run = || -> Result<(), ()> { + clobber(&mut src); + clobber(&mut dst); + + // SAFETY: Both vectors are of the same type and of the same size, + // so copying data between them is safe. + unsafe { + // We use `memcpy` directly here since `copy_from_slice` isn't actually + // guaranteed to be turned into a `memcpy`. + libc::memcpy(dst.as_mut_ptr().cast(), src.as_ptr().cast(), SIZE); + } + + clobber(&mut dst); + clobber(&mut src); + + Ok(()) + }; + + benchmark("memory score", SIZE, MAX_ITERATIONS, MAX_DURATION, run) + .expect("benchmark cannot fail; qed") +} + +struct TemporaryFile { + fp: Option, + path: PathBuf, +} + +impl Drop for TemporaryFile { + fn drop(&mut self) { + let _ = self.fp.take(); + + // Remove the file. + // + // This has to be done *after* the benchmark, + // otherwise it changes the results as the data + // doesn't actually get properly flushed to the disk, + // since the file's not there anymore. + if let Err(error) = std::fs::remove_file(&self.path) { + log::warn!("Failed to remove the file used for the disk benchmark: {}", error); + } + } +} + +impl Deref for TemporaryFile { + type Target = File; + fn deref(&self) -> &Self::Target { + self.fp.as_ref().expect("`fp` is None only during `drop`") + } +} + +impl DerefMut for TemporaryFile { + fn deref_mut(&mut self) -> &mut Self::Target { + self.fp.as_mut().expect("`fp` is None only during `drop`") + } +} + +fn rng() -> rand_pcg::Pcg64 { + rand_pcg::Pcg64::new(0xcafef00dd15ea5e5, 0xa02bdbf7bb3c0a7ac28fa16a64abf96) +} + +fn random_data(size: usize) -> Vec { + let mut buffer = Vec::new(); + buffer.resize(size, 0); + rng().fill(&mut buffer[..]); + buffer +} + +pub fn benchmark_disk_sequential_writes(directory: &Path) -> Result { + const SIZE: usize = 64 * 1024 * 1024; + const MAX_ITERATIONS: usize = 32; + const MAX_DURATION: Duration = Duration::from_millis(300); + + let buffer = random_data(SIZE); + let path = directory.join(".disk_bench_seq_wr.tmp"); + + let fp = + File::create(&path).map_err(|error| format!("failed to create a test file: {}", error))?; + + let mut fp = TemporaryFile { fp: Some(fp), path }; + + fp.sync_all() + .map_err(|error| format!("failed to fsync the test file: {}", error))?; + + let run = || { + // Just dump everything to the disk in one go. + fp.write_all(&buffer) + .map_err(|error| format!("failed to write to the test file: {}", error))?; + + // And then make sure it was actually written to disk. + fp.sync_all() + .map_err(|error| format!("failed to fsync the test file: {}", error))?; + + // Rewind to the beginning for the next iteration of the benchmark. + fp.seek(SeekFrom::Start(0)) + .map_err(|error| format!("failed to seek to the start of the test file: {}", error))?; + + Ok(()) + }; + + benchmark("disk sequential write score", SIZE, MAX_ITERATIONS, MAX_DURATION, run) +} + +pub fn benchmark_disk_random_writes(directory: &Path) -> Result { + const SIZE: usize = 64 * 1024 * 1024; + const MAX_ITERATIONS: usize = 32; + const MAX_DURATION: Duration = Duration::from_millis(300); + + let buffer = random_data(SIZE); + let path = directory.join(".disk_bench_rand_wr.tmp"); + + let fp = + File::create(&path).map_err(|error| format!("failed to create a test file: {}", error))?; + + let mut fp = TemporaryFile { fp: Some(fp), path }; + + // Since we want to test random writes we need an existing file + // through which we can seek, so here we just populate it with some data. + fp.write_all(&buffer) + .map_err(|error| format!("failed to write to the test file: {}", error))?; + + fp.sync_all() + .map_err(|error| format!("failed to fsync the test file: {}", error))?; + + // Generate a list of random positions at which we'll issue writes. + let mut positions = Vec::with_capacity(SIZE / 4096); + { + let mut position = 0; + while position < SIZE { + positions.push(position); + position += 4096; + } + } + + positions.shuffle(&mut rng()); + + let run = || { + for &position in &positions { + fp.seek(SeekFrom::Start(position as u64)) + .map_err(|error| format!("failed to seek in the test file: {}", error))?; + + // Here we deliberately only write half of the chunk since we don't + // want the OS' disk scheduler to coalesce our writes into one single + // sequential write. + // + // Also the chunk's size is deliberately exactly half of a modern disk's + // sector size to trigger an RMW cycle. + let chunk = &buffer[position..position + 2048]; + fp.write_all(&chunk) + .map_err(|error| format!("failed to write to the test file: {}", error))?; + } + + fp.sync_all() + .map_err(|error| format!("failed to fsync the test file: {}", error))?; + + Ok(()) + }; + + // We only wrote half of the bytes hence `SIZE / 2`. + benchmark("disk random write score", SIZE / 2, MAX_ITERATIONS, MAX_DURATION, run) +} + +/// Benchmarks the hardware and returns the results of those benchmarks. +/// +/// Optionally accepts a path to a `scratch_directory` to use to benchmark the disk. +pub fn gather_hwbench(scratch_directory: Option<&Path>) -> HwBench { + #[allow(unused_mut)] + let mut hwbench = HwBench { + cpu_hashrate_score: benchmark_cpu(), + memory_memcpy_score: benchmark_memory(), + disk_sequential_write_score: None, + disk_random_write_score: None, + }; + + if let Some(scratch_directory) = scratch_directory { + hwbench.disk_sequential_write_score = + match benchmark_disk_sequential_writes(scratch_directory) { + Ok(score) => Some(score), + Err(error) => { + log::warn!("Failed to run the sequential write disk benchmark: {}", error); + None + }, + }; + + hwbench.disk_random_write_score = match benchmark_disk_random_writes(scratch_directory) { + Ok(score) => Some(score), + Err(error) => { + log::warn!("Failed to run the random write disk benchmark: {}", error); + None + }, + }; + } + + hwbench +} + +#[cfg(test)] +mod tests { + use super::*; + + #[cfg(target_os = "linux")] + #[test] + fn test_gather_sysinfo_linux() { + let sysinfo = gather_sysinfo(); + assert!(sysinfo.cpu.unwrap().len() > 0); + assert!(sysinfo.core_count.unwrap() > 0); + assert!(sysinfo.memory.unwrap() > 0); + assert_ne!(sysinfo.is_virtual_machine, None); + assert_ne!(sysinfo.linux_kernel, None); + assert_ne!(sysinfo.linux_distro, None); + } + + #[test] + fn test_benchmark_cpu() { + assert_ne!(benchmark_cpu(), 0); + } + + #[test] + fn test_benchmark_memory() { + assert_ne!(benchmark_memory(), 0); + } + + #[test] + fn test_benchmark_disk_sequential_writes() { + assert!(benchmark_disk_sequential_writes("./".as_ref()).unwrap() > 0); + } + + #[test] + fn test_benchmark_disk_random_writes() { + assert!(benchmark_disk_random_writes("./".as_ref()).unwrap() > 0); + } +} diff --git a/client/sysinfo/src/sysinfo_linux.rs b/client/sysinfo/src/sysinfo_linux.rs new file mode 100644 index 0000000000000..41ab6014cbef0 --- /dev/null +++ b/client/sysinfo/src/sysinfo_linux.rs @@ -0,0 +1,101 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use regex::Regex; +use sc_telemetry::SysInfo; +use std::collections::HashSet; + +fn read_file(path: &str) -> Option { + match std::fs::read_to_string(path) { + Ok(data) => Some(data), + Err(error) => { + log::warn!("Failed to read '{}': {}", path, error); + None + }, + } +} + +fn extract(data: &str, regex: &str) -> Option +where + T: std::str::FromStr, +{ + Regex::new(regex) + .expect("regex is correct; qed") + .captures(&data)? + .get(1)? + .as_str() + .parse() + .ok() +} + +const LINUX_REGEX_CPU: &str = r#"(?m)^model name\s*:\s*([^\n]+)"#; +const LINUX_REGEX_PHYSICAL_ID: &str = r#"(?m)^physical id\s*:\s*(\d+)"#; +const LINUX_REGEX_CORE_ID: &str = r#"(?m)^core id\s*:\s*(\d+)"#; +const LINUX_REGEX_HYPERVISOR: &str = r#"(?m)^flags\s*:.+?\bhypervisor\b"#; +const LINUX_REGEX_MEMORY: &str = r#"(?m)^MemTotal:\s*(\d+) kB"#; +const LINUX_REGEX_DISTRO: &str = r#"(?m)^PRETTY_NAME\s*=\s*"?(.+?)"?$"#; + +pub fn gather_linux_sysinfo(sysinfo: &mut SysInfo) { + if let Some(data) = read_file("/proc/cpuinfo") { + sysinfo.cpu = extract(&data, LINUX_REGEX_CPU); + sysinfo.is_virtual_machine = + Some(Regex::new(LINUX_REGEX_HYPERVISOR).unwrap().is_match(&data)); + + // The /proc/cpuinfo returns a list of all of the hardware threads. + // + // Here we extract all of the unique {CPU ID, core ID} pairs to get + // the total number of cores. + let mut set: HashSet<(u32, u32)> = HashSet::new(); + for chunk in data.split("\n\n") { + let pid = extract(chunk, LINUX_REGEX_PHYSICAL_ID); + let cid = extract(chunk, LINUX_REGEX_CORE_ID); + if let (Some(pid), Some(cid)) = (pid, cid) { + set.insert((pid, cid)); + } + } + + if !set.is_empty() { + sysinfo.core_count = Some(set.len() as u32); + } + } + + if let Some(data) = read_file("/proc/meminfo") { + sysinfo.memory = extract(&data, LINUX_REGEX_MEMORY).map(|memory: u64| memory * 1024); + } + + if let Some(data) = read_file("/etc/os-release") { + sysinfo.linux_distro = extract(&data, LINUX_REGEX_DISTRO); + } + + // NOTE: We don't use the `nix` crate to call this since it doesn't + // currently check for errors. + unsafe { + // SAFETY: The `utsname` is full of byte arrays, so this is safe. + let mut uname: libc::utsname = std::mem::zeroed(); + if libc::uname(&mut uname) < 0 { + log::warn!("uname failed: {}", std::io::Error::last_os_error()); + } else { + let length = + uname.release.iter().position(|&byte| byte == 0).unwrap_or(uname.release.len()); + let release = std::slice::from_raw_parts(uname.release.as_ptr().cast(), length); + if let Ok(release) = std::str::from_utf8(release) { + sysinfo.linux_kernel = Some(release.into()); + } + } + } +} diff --git a/client/telemetry/src/lib.rs b/client/telemetry/src/lib.rs index 68128e0207085..d570701a3d9ec 100644 --- a/client/telemetry/src/lib.rs +++ b/client/telemetry/src/lib.rs @@ -101,6 +101,38 @@ pub struct ConnectionMessage { pub startup_time: String, /// Node's network ID. pub network_id: String, + + /// Node's OS. + pub target_os: String, + + /// Node's ISA. + pub target_arch: String, + + /// Node's target platform ABI or libc. + pub target_env: String, + + /// Node's software and hardware information. + pub sysinfo: Option, +} + +/// Hardware and software information for the node. +/// +/// Gathering most of this information is highly OS-specific, +/// so most of the fields here are optional. +#[derive(Debug, Serialize)] +pub struct SysInfo { + /// The exact CPU model. + pub cpu: Option, + /// The total amount of memory, in bytes. + pub memory: Option, + /// The number of physical CPU cores. + pub core_count: Option, + /// The Linux kernel version. + pub linux_kernel: Option, + /// The exact Linux distribution used. + pub linux_distro: Option, + /// Whether the node's running under a virtual machine. + pub is_virtual_machine: Option, } /// Telemetry worker. diff --git a/client/telemetry/src/node.rs b/client/telemetry/src/node.rs index ec857ede70fdb..f3d746abb2b18 100644 --- a/client/telemetry/src/node.rs +++ b/client/telemetry/src/node.rs @@ -179,8 +179,20 @@ where Poll::Ready(Ok(sink)) => { log::debug!(target: "telemetry", "✅ Connected to {}", self.addr); - for sender in self.telemetry_connection_notifier.iter_mut() { - let _ = sender.send(()); + { + let mut index = 0; + while index < self.telemetry_connection_notifier.len() { + let sender = &mut self.telemetry_connection_notifier[index]; + if let Err(error) = sender.try_send(()) { + if !error.is_disconnected() { + log::debug!(target: "telemetry", "Failed to send a telemetry connection notification: {}", error); + } else { + self.telemetry_connection_notifier.swap_remove(index); + continue + } + } + index += 1; + } } let buf = self diff --git a/utils/build-script-utils/src/version.rs b/utils/build-script-utils/src/version.rs index 773949e30d8d4..9432f23e9b8ee 100644 --- a/utils/build-script-utils/src/version.rs +++ b/utils/build-script-utils/src/version.rs @@ -15,7 +15,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use platforms::*; use std::{borrow::Cow, process::Command}; /// Generate the `cargo:` key output @@ -42,26 +41,13 @@ pub fn generate_cargo_keys() { println!("cargo:rustc-env=SUBSTRATE_CLI_IMPL_VERSION={}", get_version(&commit)) } -fn get_platform() -> String { - let env_dash = if TARGET_ENV.is_some() { "-" } else { "" }; - - format!( - "{}-{}{}{}", - TARGET_ARCH.as_str(), - TARGET_OS.as_str(), - env_dash, - TARGET_ENV.map(|x| x.as_str()).unwrap_or(""), - ) -} - fn get_version(impl_commit: &str) -> String { let commit_dash = if impl_commit.is_empty() { "" } else { "-" }; format!( - "{}{}{}-{}", + "{}{}{}", std::env::var("CARGO_PKG_VERSION").unwrap_or_default(), commit_dash, - impl_commit, - get_platform(), + impl_commit ) }