diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index 59546698747634..ceaf3d54b78a97 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -50,6 +50,8 @@ var depsRules = ` unicode/utf8, unicode/utf16, unicode, unsafe; + unsafe < internal/runtime/proc; + # These packages depend only on internal/goarch and unsafe. internal/goarch, unsafe < internal/abi, internal/chacha8rand; @@ -64,7 +66,8 @@ var depsRules = ` internal/goarch, internal/godebugs, internal/goexperiment, - internal/goos + internal/goos, + internal/runtime/proc < internal/bytealg < internal/itoa < internal/unsafeheader diff --git a/src/internal/runtime/proc/proc.go b/src/internal/runtime/proc/proc.go new file mode 100644 index 00000000000000..1611437c6dbb36 --- /dev/null +++ b/src/internal/runtime/proc/proc.go @@ -0,0 +1,15 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proc + +import _ "unsafe" + +//go:linkname Pin runtime.procPin +//go:nosplit +func Pin() int + +//go:linkname Unpin runtime.procUnpin +//go:nosplit +func Unpin() diff --git a/src/runtime/proc.go b/src/runtime/proc.go index cb5a80455df091..c284572a13563f 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -6921,42 +6921,6 @@ func procUnpin() { gp.m.locks-- } -//go:linkname sync_runtime_procPin sync.runtime_procPin -//go:nosplit -func sync_runtime_procPin() int { - return procPin() -} - -//go:linkname sync_runtime_procUnpin sync.runtime_procUnpin -//go:nosplit -func sync_runtime_procUnpin() { - procUnpin() -} - -//go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin -//go:nosplit -func sync_atomic_runtime_procPin() int { - return procPin() -} - -//go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin -//go:nosplit -func sync_atomic_runtime_procUnpin() { - procUnpin() -} - -//go:linkname internal_weak_runtime_procPin internal/weak.runtime_procPin -//go:nosplit -func internal_weak_runtime_procPin() int { - return procPin() -} - -//go:linkname internal_weak_runtime_procUnpin internal/weak.runtime_procUnpin -//go:nosplit -func internal_weak_runtime_procUnpin() { - procUnpin() -} - // Active spinning for sync.Mutex. // //go:linkname sync_runtime_canSpin sync.runtime_canSpin diff --git a/src/sync/atomic/value.go b/src/sync/atomic/value.go index 0cfc5f9496c810..2445a180c40751 100644 --- a/src/sync/atomic/value.go +++ b/src/sync/atomic/value.go @@ -5,6 +5,7 @@ package atomic import ( + "internal/runtime/proc" "unsafe" ) @@ -56,15 +57,15 @@ func (v *Value) Store(val any) { // Attempt to start first store. // Disable preemption so that other goroutines can use // active spin wait to wait for completion. - runtime_procPin() + proc.Pin() if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) { - runtime_procUnpin() + proc.Unpin() continue } // Complete first store. StorePointer(&vp.data, vlp.data) StorePointer(&vp.typ, vlp.typ) - runtime_procUnpin() + proc.Unpin() return } if typ == unsafe.Pointer(&firstStoreInProgress) { @@ -100,15 +101,15 @@ func (v *Value) Swap(new any) (old any) { // Disable preemption so that other goroutines can use // active spin wait to wait for completion; and so that // GC does not see the fake type accidentally. - runtime_procPin() + proc.Pin() if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) { - runtime_procUnpin() + proc.Unpin() continue } // Complete first store. StorePointer(&vp.data, np.data) StorePointer(&vp.typ, np.typ) - runtime_procUnpin() + proc.Unpin() return nil } if typ == unsafe.Pointer(&firstStoreInProgress) { @@ -152,15 +153,15 @@ func (v *Value) CompareAndSwap(old, new any) (swapped bool) { // Disable preemption so that other goroutines can use // active spin wait to wait for completion; and so that // GC does not see the fake type accidentally. - runtime_procPin() + proc.Pin() if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) { - runtime_procUnpin() + proc.Unpin() continue } // Complete first store. StorePointer(&vp.data, np.data) StorePointer(&vp.typ, np.typ) - runtime_procUnpin() + proc.Unpin() return true } if typ == unsafe.Pointer(&firstStoreInProgress) { @@ -188,7 +189,3 @@ func (v *Value) CompareAndSwap(old, new any) (swapped bool) { return CompareAndSwapPointer(&vp.data, data, np.data) } } - -// Disable/enable preemption, implemented in runtime. -func runtime_procPin() int -func runtime_procUnpin() diff --git a/src/sync/export_test.go b/src/sync/export_test.go index b55cecd987dd07..6f5ef0fe031c4c 100644 --- a/src/sync/export_test.go +++ b/src/sync/export_test.go @@ -4,11 +4,13 @@ package sync +import "internal/runtime/proc" + // Export for testing. var Runtime_Semacquire = runtime_Semacquire var Runtime_Semrelease = runtime_Semrelease -var Runtime_procPin = runtime_procPin -var Runtime_procUnpin = runtime_procUnpin +var Runtime_procPin = proc.Pin +var Runtime_procUnpin = proc.Unpin // poolDequeue testing. type PoolDequeue interface { diff --git a/src/sync/pool.go b/src/sync/pool.go index 9214bf6e34722a..1fd16bf13682d9 100644 --- a/src/sync/pool.go +++ b/src/sync/pool.go @@ -6,6 +6,7 @@ package sync import ( "internal/race" + "internal/runtime/proc" "runtime" "sync/atomic" "unsafe" @@ -112,7 +113,7 @@ func (p *Pool) Put(x any) { } else { l.shared.pushHead(x) } - runtime_procUnpin() + proc.Unpin() if race.Enabled { race.Enable() } @@ -142,7 +143,7 @@ func (p *Pool) Get() any { x = p.getSlow(pid) } } - runtime_procUnpin() + proc.Unpin() if race.Enabled { race.Enable() if x != nil { @@ -205,7 +206,7 @@ func (p *Pool) pin() (*poolLocal, int) { panic("nil Pool") } - pid := runtime_procPin() + pid := proc.Pin() // In pinSlow we store to local and then to localSize, here we load in opposite order. // Since we've disabled preemption, GC cannot happen in between. // Thus here we must observe local at least as large localSize. @@ -221,10 +222,10 @@ func (p *Pool) pin() (*poolLocal, int) { func (p *Pool) pinSlow() (*poolLocal, int) { // Retry under the mutex. // Can not lock the mutex while pinned. - runtime_procUnpin() + proc.Unpin() allPoolsMu.Lock() defer allPoolsMu.Unlock() - pid := runtime_procPin() + pid := proc.Pin() // poolCleanup won't be called while we are pinned. s := p.localSize l := p.local @@ -292,8 +293,6 @@ func indexLocal(l unsafe.Pointer, i int) *poolLocal { // Implemented in runtime. func runtime_registerPoolCleanup(cleanup func()) -func runtime_procPin() int -func runtime_procUnpin() // The below are implemented in internal/runtime/atomic and the // compiler also knows to intrinsify the symbol we linkname into this