From d96cb9b395eed4dbd64a5425fddf8154f10331fe Mon Sep 17 00:00:00 2001 From: qiulaidongfeng <2645477756@qq.com> Date: Sat, 20 Apr 2024 17:16:20 +0800 Subject: [PATCH 1/3] all: move procPin and procUnpin to internal/runtime For #65355 Change-Id: I42c04ade295d2900f55596ad2f223c9c93dd740f --- src/go/build/deps_test.go | 5 ++++- src/runtime/proc.go | 36 ------------------------------------ src/sync/atomic/value.go | 23 ++++++++++------------- src/sync/export_test.go | 6 ++++-- src/sync/pool.go | 13 ++++++------- 5 files changed, 24 insertions(+), 59 deletions(-) diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index 59546698747634..5d429915d5914e 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -50,6 +50,8 @@ var depsRules = ` unicode/utf8, unicode/utf16, unicode, unsafe; + unsafe < internal/runtime; + # These packages depend only on internal/goarch and unsafe. internal/goarch, unsafe < internal/abi, internal/chacha8rand; @@ -64,7 +66,8 @@ var depsRules = ` internal/goarch, internal/godebugs, internal/goexperiment, - internal/goos + internal/goos, + internal/runtime < internal/bytealg < internal/itoa < internal/unsafeheader diff --git a/src/runtime/proc.go b/src/runtime/proc.go index cb5a80455df091..c284572a13563f 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -6921,42 +6921,6 @@ func procUnpin() { gp.m.locks-- } -//go:linkname sync_runtime_procPin sync.runtime_procPin -//go:nosplit -func sync_runtime_procPin() int { - return procPin() -} - -//go:linkname sync_runtime_procUnpin sync.runtime_procUnpin -//go:nosplit -func sync_runtime_procUnpin() { - procUnpin() -} - -//go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin -//go:nosplit -func sync_atomic_runtime_procPin() int { - return procPin() -} - -//go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin -//go:nosplit -func sync_atomic_runtime_procUnpin() { - procUnpin() -} - -//go:linkname internal_weak_runtime_procPin internal/weak.runtime_procPin -//go:nosplit -func internal_weak_runtime_procPin() int { - return procPin() -} - -//go:linkname internal_weak_runtime_procUnpin internal/weak.runtime_procUnpin -//go:nosplit -func internal_weak_runtime_procUnpin() { - procUnpin() -} - // Active spinning for sync.Mutex. // //go:linkname sync_runtime_canSpin sync.runtime_canSpin diff --git a/src/sync/atomic/value.go b/src/sync/atomic/value.go index 0cfc5f9496c810..c90cec33f29850 100644 --- a/src/sync/atomic/value.go +++ b/src/sync/atomic/value.go @@ -5,6 +5,7 @@ package atomic import ( + rt "internal/runtime" "unsafe" ) @@ -56,15 +57,15 @@ func (v *Value) Store(val any) { // Attempt to start first store. // Disable preemption so that other goroutines can use // active spin wait to wait for completion. - runtime_procPin() + rt.ProcPin() if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) { - runtime_procUnpin() + rt.ProcUnpin() continue } // Complete first store. StorePointer(&vp.data, vlp.data) StorePointer(&vp.typ, vlp.typ) - runtime_procUnpin() + rt.ProcUnpin() return } if typ == unsafe.Pointer(&firstStoreInProgress) { @@ -100,15 +101,15 @@ func (v *Value) Swap(new any) (old any) { // Disable preemption so that other goroutines can use // active spin wait to wait for completion; and so that // GC does not see the fake type accidentally. - runtime_procPin() + rt.ProcPin() if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) { - runtime_procUnpin() + rt.ProcUnpin() continue } // Complete first store. StorePointer(&vp.data, np.data) StorePointer(&vp.typ, np.typ) - runtime_procUnpin() + rt.ProcUnpin() return nil } if typ == unsafe.Pointer(&firstStoreInProgress) { @@ -152,15 +153,15 @@ func (v *Value) CompareAndSwap(old, new any) (swapped bool) { // Disable preemption so that other goroutines can use // active spin wait to wait for completion; and so that // GC does not see the fake type accidentally. - runtime_procPin() + rt.ProcPin() if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) { - runtime_procUnpin() + rt.ProcUnpin() continue } // Complete first store. StorePointer(&vp.data, np.data) StorePointer(&vp.typ, np.typ) - runtime_procUnpin() + rt.ProcUnpin() return true } if typ == unsafe.Pointer(&firstStoreInProgress) { @@ -188,7 +189,3 @@ func (v *Value) CompareAndSwap(old, new any) (swapped bool) { return CompareAndSwapPointer(&vp.data, data, np.data) } } - -// Disable/enable preemption, implemented in runtime. -func runtime_procPin() int -func runtime_procUnpin() diff --git a/src/sync/export_test.go b/src/sync/export_test.go index b55cecd987dd07..a3d86ea6ebda22 100644 --- a/src/sync/export_test.go +++ b/src/sync/export_test.go @@ -4,11 +4,13 @@ package sync +import "internal/runtime" + // Export for testing. var Runtime_Semacquire = runtime_Semacquire var Runtime_Semrelease = runtime_Semrelease -var Runtime_procPin = runtime_procPin -var Runtime_procUnpin = runtime_procUnpin +var Runtime_procPin = runtime.ProcPin +var Runtime_procUnpin = runtime.ProcUnpin // poolDequeue testing. type PoolDequeue interface { diff --git a/src/sync/pool.go b/src/sync/pool.go index 9214bf6e34722a..6a7c820e1af5d3 100644 --- a/src/sync/pool.go +++ b/src/sync/pool.go @@ -6,6 +6,7 @@ package sync import ( "internal/race" + rt "internal/runtime" "runtime" "sync/atomic" "unsafe" @@ -112,7 +113,7 @@ func (p *Pool) Put(x any) { } else { l.shared.pushHead(x) } - runtime_procUnpin() + rt.ProcUnpin() if race.Enabled { race.Enable() } @@ -142,7 +143,7 @@ func (p *Pool) Get() any { x = p.getSlow(pid) } } - runtime_procUnpin() + rt.ProcUnpin() if race.Enabled { race.Enable() if x != nil { @@ -205,7 +206,7 @@ func (p *Pool) pin() (*poolLocal, int) { panic("nil Pool") } - pid := runtime_procPin() + pid := rt.ProcPin() // In pinSlow we store to local and then to localSize, here we load in opposite order. // Since we've disabled preemption, GC cannot happen in between. // Thus here we must observe local at least as large localSize. @@ -221,10 +222,10 @@ func (p *Pool) pin() (*poolLocal, int) { func (p *Pool) pinSlow() (*poolLocal, int) { // Retry under the mutex. // Can not lock the mutex while pinned. - runtime_procUnpin() + rt.ProcUnpin() allPoolsMu.Lock() defer allPoolsMu.Unlock() - pid := runtime_procPin() + pid := rt.ProcPin() // poolCleanup won't be called while we are pinned. s := p.localSize l := p.local @@ -292,8 +293,6 @@ func indexLocal(l unsafe.Pointer, i int) *poolLocal { // Implemented in runtime. func runtime_registerPoolCleanup(cleanup func()) -func runtime_procPin() int -func runtime_procUnpin() // The below are implemented in internal/runtime/atomic and the // compiler also knows to intrinsify the symbol we linkname into this From 302dfbdcded0d1b98c8b58e64038db6301d00984 Mon Sep 17 00:00:00 2001 From: qiulaidongfeng <2645477756@qq.com> Date: Sat, 20 Apr 2024 18:13:19 +0800 Subject: [PATCH 2/3] n Change-Id: I3f01bb25a178f04b766ee024c80665ea0976c51b --- src/internal/runtime/proc.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 src/internal/runtime/proc.go diff --git a/src/internal/runtime/proc.go b/src/internal/runtime/proc.go new file mode 100644 index 00000000000000..b6b6c6e11d3bde --- /dev/null +++ b/src/internal/runtime/proc.go @@ -0,0 +1,15 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import _ "unsafe" + +//go:linkname ProcPin runtime.procPin +//go:nosplit +func ProcPin() int + +//go:linkname ProcUnpin runtime.procUnpin +//go:nosplit +func ProcUnpin() From aecf47c9f779f9af21f3e77c848123517a862645 Mon Sep 17 00:00:00 2001 From: qiulaidongfeng <2645477756@qq.com> Date: Sun, 21 Apr 2024 11:14:23 +0800 Subject: [PATCH 3/3] n Change-Id: I9821dca4cc328c6c57bba6516da795ae6ccb5bc1 --- src/go/build/deps_test.go | 4 ++-- src/internal/runtime/{ => proc}/proc.go | 10 +++++----- src/sync/atomic/value.go | 20 ++++++++++---------- src/sync/export_test.go | 6 +++--- src/sync/pool.go | 12 ++++++------ 5 files changed, 26 insertions(+), 26 deletions(-) rename src/internal/runtime/{ => proc}/proc.go (61%) diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index 5d429915d5914e..ceaf3d54b78a97 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -50,7 +50,7 @@ var depsRules = ` unicode/utf8, unicode/utf16, unicode, unsafe; - unsafe < internal/runtime; + unsafe < internal/runtime/proc; # These packages depend only on internal/goarch and unsafe. internal/goarch, unsafe @@ -67,7 +67,7 @@ var depsRules = ` internal/godebugs, internal/goexperiment, internal/goos, - internal/runtime + internal/runtime/proc < internal/bytealg < internal/itoa < internal/unsafeheader diff --git a/src/internal/runtime/proc.go b/src/internal/runtime/proc/proc.go similarity index 61% rename from src/internal/runtime/proc.go rename to src/internal/runtime/proc/proc.go index b6b6c6e11d3bde..1611437c6dbb36 100644 --- a/src/internal/runtime/proc.go +++ b/src/internal/runtime/proc/proc.go @@ -2,14 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package runtime +package proc import _ "unsafe" -//go:linkname ProcPin runtime.procPin +//go:linkname Pin runtime.procPin //go:nosplit -func ProcPin() int +func Pin() int -//go:linkname ProcUnpin runtime.procUnpin +//go:linkname Unpin runtime.procUnpin //go:nosplit -func ProcUnpin() +func Unpin() diff --git a/src/sync/atomic/value.go b/src/sync/atomic/value.go index c90cec33f29850..2445a180c40751 100644 --- a/src/sync/atomic/value.go +++ b/src/sync/atomic/value.go @@ -5,7 +5,7 @@ package atomic import ( - rt "internal/runtime" + "internal/runtime/proc" "unsafe" ) @@ -57,15 +57,15 @@ func (v *Value) Store(val any) { // Attempt to start first store. // Disable preemption so that other goroutines can use // active spin wait to wait for completion. - rt.ProcPin() + proc.Pin() if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) { - rt.ProcUnpin() + proc.Unpin() continue } // Complete first store. StorePointer(&vp.data, vlp.data) StorePointer(&vp.typ, vlp.typ) - rt.ProcUnpin() + proc.Unpin() return } if typ == unsafe.Pointer(&firstStoreInProgress) { @@ -101,15 +101,15 @@ func (v *Value) Swap(new any) (old any) { // Disable preemption so that other goroutines can use // active spin wait to wait for completion; and so that // GC does not see the fake type accidentally. - rt.ProcPin() + proc.Pin() if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) { - rt.ProcUnpin() + proc.Unpin() continue } // Complete first store. StorePointer(&vp.data, np.data) StorePointer(&vp.typ, np.typ) - rt.ProcUnpin() + proc.Unpin() return nil } if typ == unsafe.Pointer(&firstStoreInProgress) { @@ -153,15 +153,15 @@ func (v *Value) CompareAndSwap(old, new any) (swapped bool) { // Disable preemption so that other goroutines can use // active spin wait to wait for completion; and so that // GC does not see the fake type accidentally. - rt.ProcPin() + proc.Pin() if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) { - rt.ProcUnpin() + proc.Unpin() continue } // Complete first store. StorePointer(&vp.data, np.data) StorePointer(&vp.typ, np.typ) - rt.ProcUnpin() + proc.Unpin() return true } if typ == unsafe.Pointer(&firstStoreInProgress) { diff --git a/src/sync/export_test.go b/src/sync/export_test.go index a3d86ea6ebda22..6f5ef0fe031c4c 100644 --- a/src/sync/export_test.go +++ b/src/sync/export_test.go @@ -4,13 +4,13 @@ package sync -import "internal/runtime" +import "internal/runtime/proc" // Export for testing. var Runtime_Semacquire = runtime_Semacquire var Runtime_Semrelease = runtime_Semrelease -var Runtime_procPin = runtime.ProcPin -var Runtime_procUnpin = runtime.ProcUnpin +var Runtime_procPin = proc.Pin +var Runtime_procUnpin = proc.Unpin // poolDequeue testing. type PoolDequeue interface { diff --git a/src/sync/pool.go b/src/sync/pool.go index 6a7c820e1af5d3..1fd16bf13682d9 100644 --- a/src/sync/pool.go +++ b/src/sync/pool.go @@ -6,7 +6,7 @@ package sync import ( "internal/race" - rt "internal/runtime" + "internal/runtime/proc" "runtime" "sync/atomic" "unsafe" @@ -113,7 +113,7 @@ func (p *Pool) Put(x any) { } else { l.shared.pushHead(x) } - rt.ProcUnpin() + proc.Unpin() if race.Enabled { race.Enable() } @@ -143,7 +143,7 @@ func (p *Pool) Get() any { x = p.getSlow(pid) } } - rt.ProcUnpin() + proc.Unpin() if race.Enabled { race.Enable() if x != nil { @@ -206,7 +206,7 @@ func (p *Pool) pin() (*poolLocal, int) { panic("nil Pool") } - pid := rt.ProcPin() + pid := proc.Pin() // In pinSlow we store to local and then to localSize, here we load in opposite order. // Since we've disabled preemption, GC cannot happen in between. // Thus here we must observe local at least as large localSize. @@ -222,10 +222,10 @@ func (p *Pool) pin() (*poolLocal, int) { func (p *Pool) pinSlow() (*poolLocal, int) { // Retry under the mutex. // Can not lock the mutex while pinned. - rt.ProcUnpin() + proc.Unpin() allPoolsMu.Lock() defer allPoolsMu.Unlock() - pid := rt.ProcPin() + pid := proc.Pin() // poolCleanup won't be called while we are pinned. s := p.localSize l := p.local