Skip to content

Commit ac6dea7

Browse files
rhyshgopherbot
authored andcommitted
runtime: lower mutex contention test expectations
As of https://go.dev/cl/586796, the runtime/metrics view of internal mutex contention is sampled at 1 per gTrackingPeriod, rather than either 1 (immediately prior to CL 586796) or the more frequent of gTrackingPeriod or the mutex profiling rate (Go 1.22). Thus, we no longer have a real lower bound on the amount of contention that runtime/metrics will report. Relax the test's expectations again. For #64253 Change-Id: I94e1d92348a03599a819ec8ac785a0eb3c1ddd73 Reviewed-on: https://go-review.googlesource.com/c/go/+/587515 LUCI-TryBot-Result: Go LUCI <[email protected]> Reviewed-by: Carlos Amedee <[email protected]> Reviewed-by: Michael Knyszek <[email protected]> Auto-Submit: Rhys Hiltner <[email protected]>
1 parent b6fa505 commit ac6dea7

File tree

1 file changed

+21
-8
lines changed

1 file changed

+21
-8
lines changed

src/runtime/metrics_test.go

+21-8
Original file line numberDiff line numberDiff line change
@@ -1036,6 +1036,13 @@ func TestRuntimeLockMetricsAndProfile(t *testing.T) {
10361036
if metricGrowth == 0 && strictTiming {
10371037
// If the critical section is very short, systems with low timer
10381038
// resolution may be unable to measure it via nanotime.
1039+
//
1040+
// This is sampled at 1 per gTrackingPeriod, but the explicit
1041+
// runtime.mutex tests create 200 contention events. Observing
1042+
// zero of those has a probability of (7/8)^200 = 2.5e-12 which
1043+
// is acceptably low (though the calculation has a tenuous
1044+
// dependency on cheaprandn being a good-enough source of
1045+
// entropy).
10391046
t.Errorf("no increase in /sync/mutex/wait/total:seconds metric")
10401047
}
10411048
// This comparison is possible because the time measurements in support of
@@ -1111,7 +1118,7 @@ func TestRuntimeLockMetricsAndProfile(t *testing.T) {
11111118
name := t.Name()
11121119

11131120
t.Run("runtime.lock", func(t *testing.T) {
1114-
mus := make([]runtime.Mutex, 100)
1121+
mus := make([]runtime.Mutex, 200)
11151122
var needContention atomic.Int64
11161123
baseDelay := 100 * time.Microsecond // large relative to system noise, for comparison between clocks
11171124
fastDelayMicros := baseDelay.Microseconds()
@@ -1207,13 +1214,19 @@ func TestRuntimeLockMetricsAndProfile(t *testing.T) {
12071214
needContention.Store(int64(len(mus) - 1))
12081215
metricGrowth, profileGrowth, n, _ := testcase(true, stks, workers, fn)(t)
12091216

1210-
if have, want := metricGrowth, baseDelay.Seconds()*float64(len(mus)); have < want {
1211-
// The test imposes a delay with usleep, verified with calls to
1212-
// nanotime. Compare against the runtime/metrics package's view
1213-
// (based on nanotime) rather than runtime/pprof's view (based
1214-
// on cputicks).
1215-
t.Errorf("runtime/metrics reported less than the known minimum contention duration (%fs < %fs)", have, want)
1216-
}
1217+
t.Run("metric", func(t *testing.T) {
1218+
// The runtime/metrics view is sampled at 1 per gTrackingPeriod,
1219+
// so we don't have a hard lower bound here.
1220+
testenv.SkipFlaky(t, 64253)
1221+
1222+
if have, want := metricGrowth, baseDelay.Seconds()*float64(len(mus)); have < want {
1223+
// The test imposes a delay with usleep, verified with calls to
1224+
// nanotime. Compare against the runtime/metrics package's view
1225+
// (based on nanotime) rather than runtime/pprof's view (based
1226+
// on cputicks).
1227+
t.Errorf("runtime/metrics reported less than the known minimum contention duration (%fs < %fs)", have, want)
1228+
}
1229+
})
12171230
if have, want := n, int64(len(mus)); have != want {
12181231
t.Errorf("mutex profile reported contention count different from the known true count (%d != %d)", have, want)
12191232
}

0 commit comments

Comments
 (0)