Skip to content

benchmark timeout when benchtime set too long #49974

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 8 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 11 additions & 4 deletions src/testing/benchmark.go
Original file line number Diff line number Diff line change
Expand Up @@ -323,10 +323,17 @@ func (b *B) launch() {
// which can hide an order of magnitude in execution time.
// So multiply first, then divide.
n = goalns * prevIters / prevns
// Run more iterations than we think we'll need (1.2x).
n += n / 5
// Don't grow too fast in case we had timing errors previously.
n = min(n, 100*last)
// Can't restore goalns means overflows.
if n*prevns/prevIters != goalns {
// Overflows means need to be executed many many times,
// otherwise it is easy to be timeout.
n = 100 * last
} else {
// Run more iterations than we think we'll need (1.2x).
n += n / 5
// Don't grow too fast in case we had timing errors previously.
n = min(n, 100*last)
}
// Be sure to run at least one more than last time.
n = max(n, last+1)
// Don't run more than 1e9 times. (This also keeps n in int range on 32 bit platforms.)
Expand Down
67 changes: 43 additions & 24 deletions src/testing/benchmark_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,14 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package testing_test
package testing

import (
"bytes"
"runtime"
"sort"
"strings"
"sync/atomic"
"testing"
"text/template"
"time"
)
Expand All @@ -32,19 +31,19 @@ var prettyPrintTests = []struct {
{0.0000999949999, " 0.0001000 x"},
}

func TestPrettyPrint(t *testing.T) {
func TestPrettyPrint(t *T) {
for _, tt := range prettyPrintTests {
buf := new(strings.Builder)
testing.PrettyPrint(buf, tt.v, "x")
PrettyPrint(buf, tt.v, "x")
if tt.expected != buf.String() {
t.Errorf("prettyPrint(%v): expected %q, actual %q", tt.v, tt.expected, buf.String())
}
}
}

func TestResultString(t *testing.T) {
func TestResultString(t *T) {
// Test fractional ns/op handling
r := testing.BenchmarkResult{
r := BenchmarkResult{
N: 100,
T: 240 * time.Nanosecond,
}
Expand All @@ -68,15 +67,15 @@ func TestResultString(t *testing.T) {
}
}

func TestRunParallel(t *testing.T) {
if testing.Short() {
func TestRunParallel(t *T) {
if Short() {
t.Skip("skipping in short mode")
}
testing.Benchmark(func(b *testing.B) {
Benchmark(func(b *B) {
procs := uint32(0)
iters := uint64(0)
b.SetParallelism(3)
b.RunParallel(func(pb *testing.PB) {
b.RunParallel(func(pb *PB) {
atomic.AddUint32(&procs, 1)
for pb.Next() {
atomic.AddUint64(&iters, 1)
Expand All @@ -91,9 +90,9 @@ func TestRunParallel(t *testing.T) {
})
}

func TestRunParallelFail(t *testing.T) {
testing.Benchmark(func(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
func TestRunParallelFail(t *T) {
Benchmark(func(b *B) {
b.RunParallel(func(pb *PB) {
// The function must be able to log/abort
// w/o crashing/deadlocking the whole benchmark.
b.Log("log")
Expand All @@ -102,9 +101,9 @@ func TestRunParallelFail(t *testing.T) {
})
}

func TestRunParallelFatal(t *testing.T) {
testing.Benchmark(func(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
func TestRunParallelFatal(t *T) {
Benchmark(func(b *B) {
b.RunParallel(func(pb *PB) {
for pb.Next() {
if b.N > 1 {
b.Fatal("error")
Expand All @@ -114,9 +113,9 @@ func TestRunParallelFatal(t *testing.T) {
})
}

func TestRunParallelSkipNow(t *testing.T) {
testing.Benchmark(func(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
func TestRunParallelSkipNow(t *T) {
Benchmark(func(b *B) {
b.RunParallel(func(pb *PB) {
for pb.Next() {
if b.N > 1 {
b.SkipNow()
Expand All @@ -128,11 +127,11 @@ func TestRunParallelSkipNow(t *testing.T) {

func ExampleB_RunParallel() {
// Parallel benchmark for text/template.Template.Execute on a single object.
testing.Benchmark(func(b *testing.B) {
Benchmark(func(b *B) {
templ := template.Must(template.New("test").Parse("Hello, {{.}}!"))
// RunParallel will create GOMAXPROCS goroutines
// and distribute work among them.
b.RunParallel(func(pb *testing.PB) {
b.RunParallel(func(pb *PB) {
// Each goroutine has its own bytes.Buffer.
var buf bytes.Buffer
for pb.Next() {
Expand All @@ -144,8 +143,8 @@ func ExampleB_RunParallel() {
})
}

func TestReportMetric(t *testing.T) {
res := testing.Benchmark(func(b *testing.B) {
func TestReportMetric(t *T) {
res := Benchmark(func(b *B) {
b.ReportMetric(12345, "ns/op")
b.ReportMetric(0.2, "frobs/op")
})
Expand All @@ -164,7 +163,7 @@ func TestReportMetric(t *testing.T) {
func ExampleB_ReportMetric() {
// This reports a custom benchmark metric relevant to a
// specific algorithm (in this case, sorting).
testing.Benchmark(func(b *testing.B) {
Benchmark(func(b *B) {
var compares int64
for i := 0; i < b.N; i++ {
s := []int{5, 4, 3, 2, 1}
Expand All @@ -178,3 +177,23 @@ func ExampleB_ReportMetric() {
b.ReportMetric(float64(compares)/float64(b.N), "compares/op")
})
}

func TestBenchmarkLaunch(t *T) {
tmp := benchTime
t.cleanups = append(t.cleanups, func() {
t.Logf("reset benchTime")
benchTime = tmp
})
// Set a long benchtime.
benchTime = durationOrCountFlag{
d: 150 * time.Second,
}
var try int32 = 0
Benchmark(func(b *B) {
c := atomic.AddInt32(&try, 1)
t.Logf("try %d %d\n", c, b.N)
if c > 6 {
t.Fatalf("benchmark try to many times %d", c)
}
})
}