Skip to content

Commit d73545c

Browse files
committed
Run benchmarks once, as a test by default.
E.g. if `foo.rs` looks like #![feature(test)] extern crate test; #[bench] fn bar(b: &mut test::Bencher) { b.iter(|| { 1 }) } #[test] fn baz() {} #[bench] fn qux(b: &mut test::Bencher) { b.iter(|| { panic!() }) } Then $ rustc --test foo.rs $ ./foo running 3 tests test baz ... ok test qux ... FAILED test bar ... ok failures: ---- qux stdout ---- thread 'qux' panicked at 'explicit panic', bench.rs:17 failures: qux test result: FAILED. 2 passed; 1 failed; 0 ignored; 0 measured $ ./foo --bench ba running 2 tests test baz ... ignored test bar ... bench: 97 ns/iter (+/- 74) test result: ok. 0 passed; 0 failed; 1 ignored; 1 measured In particular, the two benchmark are being run as tests in the default mode. This helps for the main distribution, since benchmarks are only run with `PLEASE_BENCH=1`, which is rarely set (and never set on the test bots), and helps for code-coverage tools: benchmarks are run and so don't count as dead code. Fixes #15842.
1 parent b594036 commit d73545c

File tree

2 files changed

+46
-15
lines changed

2 files changed

+46
-15
lines changed

src/compiletest/compiletest.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,7 @@ pub fn test_opts(config: &Config) -> test::TestOpts {
269269
run_ignored: config.run_ignored,
270270
logfile: config.logfile.clone(),
271271
run_tests: true,
272-
run_benchmarks: true,
272+
bench_benchmarks: true,
273273
nocapture: env::var("RUST_TEST_NOCAPTURE").is_ok(),
274274
color: test::AutoColor,
275275
}

src/libtest/lib.rs

+45-14
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ impl TestDesc {
139139
}
140140

141141
/// Represents a benchmark function.
142-
pub trait TDynBenchFn {
142+
pub trait TDynBenchFn: Send {
143143
fn run(&self, harness: &mut Bencher);
144144
}
145145

@@ -285,7 +285,7 @@ pub struct TestOpts {
285285
pub filter: Option<String>,
286286
pub run_ignored: bool,
287287
pub run_tests: bool,
288-
pub run_benchmarks: bool,
288+
pub bench_benchmarks: bool,
289289
pub logfile: Option<PathBuf>,
290290
pub nocapture: bool,
291291
pub color: ColorConfig,
@@ -298,7 +298,7 @@ impl TestOpts {
298298
filter: None,
299299
run_ignored: false,
300300
run_tests: false,
301-
run_benchmarks: false,
301+
bench_benchmarks: false,
302302
logfile: None,
303303
nocapture: false,
304304
color: AutoColor,
@@ -377,8 +377,8 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
377377
let logfile = matches.opt_str("logfile");
378378
let logfile = logfile.map(|s| PathBuf::from(&s));
379379

380-
let run_benchmarks = matches.opt_present("bench");
381-
let run_tests = ! run_benchmarks ||
380+
let bench_benchmarks = matches.opt_present("bench");
381+
let run_tests = ! bench_benchmarks ||
382382
matches.opt_present("test");
383383

384384
let mut nocapture = matches.opt_present("nocapture");
@@ -400,7 +400,7 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
400400
filter: filter,
401401
run_ignored: run_ignored,
402402
run_tests: run_tests,
403-
run_benchmarks: run_benchmarks,
403+
bench_benchmarks: bench_benchmarks,
404404
logfile: logfile,
405405
nocapture: nocapture,
406406
color: color,
@@ -778,7 +778,11 @@ fn run_tests<F>(opts: &TestOpts,
778778
mut callback: F) -> io::Result<()> where
779779
F: FnMut(TestEvent) -> io::Result<()>,
780780
{
781-
let filtered_tests = filter_tests(opts, tests);
781+
let mut filtered_tests = filter_tests(opts, tests);
782+
if !opts.bench_benchmarks {
783+
filtered_tests = convert_benchmarks_to_tests(filtered_tests);
784+
}
785+
782786
let filtered_descs = filtered_tests.iter()
783787
.map(|t| t.desc.clone())
784788
.collect();
@@ -824,13 +828,15 @@ fn run_tests<F>(opts: &TestOpts,
824828
pending -= 1;
825829
}
826830

827-
// All benchmarks run at the end, in serial.
828-
// (this includes metric fns)
829-
for b in filtered_benchs_and_metrics {
830-
try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
831-
run_test(opts, !opts.run_benchmarks, b, tx.clone());
832-
let (test, result, stdout) = rx.recv().unwrap();
833-
try!(callback(TeResult(test, result, stdout)));
831+
if opts.bench_benchmarks {
832+
// All benchmarks run at the end, in serial.
833+
// (this includes metric fns)
834+
for b in filtered_benchs_and_metrics {
835+
try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
836+
run_test(opts, false, b, tx.clone());
837+
let (test, result, stdout) = rx.recv().unwrap();
838+
try!(callback(TeResult(test, result, stdout)));
839+
}
834840
}
835841
Ok(())
836842
}
@@ -893,6 +899,22 @@ pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescA
893899
filtered
894900
}
895901

902+
pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
903+
// convert benchmarks to tests, if we're not benchmarking them
904+
tests.into_iter().map(|x| {
905+
let testfn = match x.testfn {
906+
DynBenchFn(bench) => {
907+
DynTestFn(Box::new(move || bench::run_once(|b| bench.run(b))))
908+
}
909+
StaticBenchFn(benchfn) => {
910+
DynTestFn(Box::new(move || bench::run_once(|b| benchfn(b))))
911+
}
912+
f => f
913+
};
914+
TestDescAndFn { desc: x.desc, testfn: testfn }
915+
}).collect()
916+
}
917+
896918
pub fn run_test(opts: &TestOpts,
897919
force_ignore: bool,
898920
test: TestDescAndFn,
@@ -1159,6 +1181,15 @@ pub mod bench {
11591181
mb_s: mb_s as usize
11601182
}
11611183
}
1184+
1185+
pub fn run_once<F>(f: F) where F: FnOnce(&mut Bencher) {
1186+
let mut bs = Bencher {
1187+
iterations: 0,
1188+
dur: Duration::nanoseconds(0),
1189+
bytes: 0
1190+
};
1191+
bs.bench_n(1, f);
1192+
}
11621193
}
11631194

11641195
#[cfg(test)]

0 commit comments

Comments
 (0)