-
Notifications
You must be signed in to change notification settings - Fork 2.7k
Create a macro which automates creation of benchmark test suites. #8104
Changes from 1 commit
24cf315
0209e4d
df14bba
dacbe67
47c795f
4b3be35
9577ee7
a6cf5d4
ebf64f0
e0444d8
cb6a33d
fd71312
7169e83
c382fb2
bd39a02
ebcf6d8
e0839c2
5e45e40
5c9f586
2730e07
4206191
9889fc9
10079b5
ffa7693
01964c6
3ef76f3
deb140f
c154611
f4cdd67
a350c64
6efac34
3e14cd2
681deaf
4e03f34
82f2a0d
7fcb460
db62d9f
5341906
1732671
f1859ee
f3d2f8f
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
- Loading branch information
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -830,6 +830,26 @@ macro_rules! impl_benchmark { | |
| return Ok(results); | ||
| } | ||
| } | ||
|
|
||
| /// Test a particular benchmark by name. | ||
| /// | ||
| /// This isn't called `test_benchmark_by_name` just in case some end-user eventually | ||
| /// writes a benchmark, itself called `by_name`; the function would be shadowed in | ||
| /// that case. | ||
| #[cfg(test)] | ||
| fn test_bench_by_name<T>(name: &[u8]) -> Result<(), &'static str> | ||
| where | ||
| T: Config + frame_system::Config, $( $where_clause )* | ||
| { | ||
| let name = sp_std::str::from_utf8(name) | ||
| .map_err(|_| "`name` is not a valid utf8 string!")?; | ||
| match name { | ||
| $( stringify!($name) => { | ||
| $crate::paste::paste! { [< test_benchmark_ $name >]::<T>() } | ||
| } )* | ||
| _ => Err("Could not find test for requested benchmark."), | ||
| } | ||
| } | ||
| }; | ||
| } | ||
|
|
||
|
|
@@ -903,6 +923,85 @@ macro_rules! impl_benchmark_test { | |
| }; | ||
| } | ||
|
|
||
| /// This creates a test suite which runs the module's benchmarks. | ||
| /// | ||
| /// When called in [`pallet_example`] as | ||
| /// | ||
| /// ```rust,ignored | ||
| /// impl_benchmark_test_suite!(crate::tests::new_test_ext, crate::tests::Test); | ||
| /// ``` | ||
| /// | ||
| /// It expands to the equivalent of: | ||
| /// | ||
| /// ```rust,ignored | ||
| /// #[cfg(test)] | ||
| /// mod tests { | ||
| /// use super::*; | ||
| /// use crate::tests::{new_test_ext, Test}; | ||
| /// use frame_support::assert_ok; | ||
| /// | ||
| /// #[test] | ||
| /// fn test_benchmarks() { | ||
| /// new_test_ext().execute_with(|| { | ||
| /// assert_ok!(test_benchmark_accumulate_dummy::<Test>()); | ||
| /// assert_ok!(test_benchmark_set_dummy::<Test>()); | ||
| /// assert_ok!(test_benchmark_another_set_dummy::<Test>()); | ||
| /// assert_ok!(test_benchmark_sort_vector::<Test>()); | ||
| /// }); | ||
| /// } | ||
| /// } | ||
| /// ``` | ||
| /// | ||
| /// ## Arguments | ||
| /// | ||
| /// The first argument, `new_test_ext`, must be the path to a function which takes no arguments | ||
| /// and returns either a `sp_io::TestExternalities`, or some other type with an identical interface. | ||
| /// | ||
| /// The second argument, `test`, must be the path to the runtime. The item to which this must refer | ||
| /// will generally take the form: | ||
| /// | ||
| /// ```rust,ignored | ||
| /// frame_support::construct_runtime!( | ||
| /// pub enum Test where ... | ||
| /// { ... } | ||
| /// ); | ||
| /// ``` | ||
| /// | ||
| // ## Notes (not for rustdoc) | ||
| // | ||
| // The biggest challenge for this macro is communicating the actual test functions to be run. We | ||
| // can't just build an array of function pointers to each test function and iterate over it, because | ||
| // the test functions are parameterized by the `Test` type. That's incompatible with | ||
| // monomorphization: if it were legal, then even if the compiler detected and monomorphized the | ||
| // functions into only the types of the callers, which implementation would the function pointer | ||
| // point to? There would need to be some kind of syntax for selecting the destination of the pointer | ||
| // according to a generic argument, and in general it would be a huge mess and not worth it. | ||
| // | ||
| // Instead, we're going to steal a trick from `fn run_benchmark`: generate a function which is | ||
| // itself parametrized by `Test`, which accepts a `&[u8]` parameter containing the name of the | ||
| // benchmark, and dispatches based on that to the appropriate real test implementation. Then, we can | ||
| // just iterate over the `Benchmarking::benchmarks` list to run the actual implementations. | ||
| #[macro_export] | ||
| macro_rules! impl_benchmark_test_suite { | ||
| ($new_test_ext:path, $test:path) => { | ||
| #[cfg(test)] | ||
| mod tests { | ||
| use super::*; | ||
gui1117 marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| use $crate::frame_support::assert_ok; | ||
|
|
||
| #[test] | ||
| fn test_benchmarks() { | ||
| $new_test_ext().execute_with(|| { | ||
| use $crate::Benchmarking; | ||
| for benchmark_name in Module::<$test>::benchmarks(true) { | ||
gui1117 marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| assert_ok!(test_bench_by_name::<$test>(benchmark_name)); | ||
|
||
| } | ||
| }); | ||
| } | ||
| } | ||
| }; | ||
| } | ||
|
|
||
| /// show error message and debugging info for the case of an error happening | ||
| /// during a benchmark | ||
| pub fn show_benchmark_debug_info( | ||
|
|
@@ -1031,7 +1130,7 @@ macro_rules! add_benchmark { | |
| *repeat, | ||
| whitelist, | ||
| *verify, | ||
| ).map_err(|e| { | ||
| ).map_err(|e| { | ||
| $crate::show_benchmark_debug_info( | ||
| instance_string, | ||
| benchmark, | ||
|
|
@@ -1058,7 +1157,7 @@ macro_rules! add_benchmark { | |
| *repeat, | ||
| whitelist, | ||
| *verify, | ||
| ).map_err(|e| { | ||
| ).map_err(|e| { | ||
| $crate::show_benchmark_debug_info( | ||
| instance_string, | ||
| benchmark, | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Taking no arguments is a bad assumption for this function unfortunately.
Take a look at here:
https://github.com/paritytech/substrate/blob/master/frame/babe/src/benchmarking.rs#L76
One thing that will help you figure out if this PR satisfies the needs we have is to replace all of the benchmark test instances with your new macro and confirm things compile. You will have to be careful of such customization instances (although any really heavy customization we can just say is not supported by the macro and should be implemented manually)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Good catch! Working on it.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
a6cf5d4 should resolve this.