Skip to content

Commit 6d7cb59

Browse files
mknyszekgopherbot
authored andcommitted
weak: accept linker-allocated objects to Make
Currently Make panics when passed a linker-allocated object. This is inconsistent with both runtime.AddCleanup and runtime.SetFinalizer. Not panicking in this case is important so that all pointers can be treated equally by these APIs. Libraries should not have to worry where a pointer came from to still make weak pointers. Supporting this behavior is a bit complex for weak pointers versus finalizers and cleanups. For the latter two, it means a function is never called, so we can just drop everything on the floor. For weak pointers, we still need to produce pointers that compare as per the API. To do this, copy the tiny lock-free trace map implementation and use it to store weak handles for "immortal" objects. These paths in the runtime should be rare, so it's OK if it's not incredibly fast, but we should keep the memory footprint relatively low (at least not have it be any worse than specials), so this change tweaks the map implementation a little bit to ensure that's the case. Fixes #71726. Change-Id: I0c87c9d90656d81659ac8d70f511773d0093ce27 Reviewed-on: https://go-review.googlesource.com/c/go/+/649460 Reviewed-by: Cherry Mui <[email protected]> Auto-Submit: Michael Knyszek <[email protected]> LUCI-TryBot-Result: Go LUCI <[email protected]>
1 parent 51f3ca3 commit 6d7cb59

File tree

2 files changed

+130
-1
lines changed

2 files changed

+130
-1
lines changed

src/runtime/mheap.go

Lines changed: 91 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
package runtime
1010

1111
import (
12+
"internal/abi"
1213
"internal/cpu"
1314
"internal/goarch"
1415
"internal/runtime/atomic"
@@ -246,6 +247,10 @@ type mheap struct {
246247
// the lock.
247248
cleanupID uint64
248249

250+
_ cpu.CacheLinePad
251+
252+
immortalWeakHandles immortalWeakHandleMap
253+
249254
unused *specialfinalizer // never set, just here to force the specialfinalizer type into DWARF
250255
}
251256

@@ -2138,7 +2143,15 @@ func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer {
21382143
// even if it's just some random span.
21392144
span := spanOfHeap(p)
21402145
if span == nil {
2141-
// The span probably got swept and released.
2146+
// If it's immortal, then just return the pointer.
2147+
//
2148+
// Stay non-preemptible so the GC can't see us convert this potentially
2149+
// completely bogus value to an unsafe.Pointer.
2150+
if isGoPointerWithoutSpan(unsafe.Pointer(p)) {
2151+
releasem(mp)
2152+
return unsafe.Pointer(p)
2153+
}
2154+
// It's heap-allocated, so the span probably just got swept and released.
21422155
releasem(mp)
21432156
return nil
21442157
}
@@ -2275,6 +2288,9 @@ func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr {
22752288
func getWeakHandle(p unsafe.Pointer) *atomic.Uintptr {
22762289
span := spanOfHeap(uintptr(p))
22772290
if span == nil {
2291+
if isGoPointerWithoutSpan(p) {
2292+
return mheap_.immortalWeakHandles.getOrAdd(uintptr(p))
2293+
}
22782294
throw("getWeakHandle on invalid pointer")
22792295
}
22802296

@@ -2303,6 +2319,80 @@ func getWeakHandle(p unsafe.Pointer) *atomic.Uintptr {
23032319
return handle
23042320
}
23052321

2322+
type immortalWeakHandleMap struct {
2323+
root atomic.UnsafePointer // *immortalWeakHandle (can't use generics because it's notinheap)
2324+
}
2325+
2326+
// immortalWeakHandle is a lock-free append-only hash-trie.
2327+
//
2328+
// Key features:
2329+
// - 2-ary trie. Child nodes are indexed by the highest bit (remaining) of the hash of the address.
2330+
// - New nodes are placed at the first empty level encountered.
2331+
// - When the first child is added to a node, the existing value is not moved into a child.
2332+
// This means that we must check the value at each level, not just at the leaf.
2333+
// - No deletion or rebalancing.
2334+
// - Intentionally devolves into a linked list on hash collisions (the hash bits will all
2335+
// get shifted out during iteration, and new nodes will just be appended to the 0th child).
2336+
type immortalWeakHandle struct {
2337+
_ sys.NotInHeap
2338+
2339+
children [2]atomic.UnsafePointer // *immortalObjectMapNode (can't use generics because it's notinheap)
2340+
ptr uintptr // &ptr is the weak handle
2341+
}
2342+
2343+
// handle returns a canonical weak handle.
2344+
func (h *immortalWeakHandle) handle() *atomic.Uintptr {
2345+
// N.B. Since we just need an *atomic.Uintptr that never changes, we can trivially
2346+
// reference ptr to save on some memory in immortalWeakHandle and avoid extra atomics
2347+
// in getOrAdd.
2348+
return (*atomic.Uintptr)(unsafe.Pointer(&h.ptr))
2349+
}
2350+
2351+
// getOrAdd introduces p, which must be a pointer to immortal memory (for example, a linker-allocated
2352+
// object) and returns a weak handle. The weak handle will never become nil.
2353+
func (tab *immortalWeakHandleMap) getOrAdd(p uintptr) *atomic.Uintptr {
2354+
var newNode *immortalWeakHandle
2355+
m := &tab.root
2356+
hash := memhash(abi.NoEscape(unsafe.Pointer(&p)), 0, goarch.PtrSize)
2357+
hashIter := hash
2358+
for {
2359+
n := (*immortalWeakHandle)(m.Load())
2360+
if n == nil {
2361+
// Try to insert a new map node. We may end up discarding
2362+
// this node if we fail to insert because it turns out the
2363+
// value is already in the map.
2364+
//
2365+
// The discard will only happen if two threads race on inserting
2366+
// the same value. Both might create nodes, but only one will
2367+
// succeed on insertion. If two threads race to insert two
2368+
// different values, then both nodes will *always* get inserted,
2369+
// because the equality checking below will always fail.
2370+
//
2371+
// Performance note: contention on insertion is likely to be
2372+
// higher for small maps, but since this data structure is
2373+
// append-only, either the map stays small because there isn't
2374+
// much activity, or the map gets big and races to insert on
2375+
// the same node are much less likely.
2376+
if newNode == nil {
2377+
newNode = (*immortalWeakHandle)(persistentalloc(unsafe.Sizeof(immortalWeakHandle{}), goarch.PtrSize, &memstats.gcMiscSys))
2378+
newNode.ptr = p
2379+
}
2380+
if m.CompareAndSwapNoWB(nil, unsafe.Pointer(newNode)) {
2381+
return newNode.handle()
2382+
}
2383+
// Reload n. Because pointers are only stored once,
2384+
// we must have lost the race, and therefore n is not nil
2385+
// anymore.
2386+
n = (*immortalWeakHandle)(m.Load())
2387+
}
2388+
if n.ptr == p {
2389+
return n.handle()
2390+
}
2391+
m = &n.children[hashIter>>(8*goarch.PtrSize-1)]
2392+
hashIter <<= 1
2393+
}
2394+
}
2395+
23062396
// The described object is being heap profiled.
23072397
type specialprofile struct {
23082398
_ sys.NotInHeap

src/weak/pointer_test.go

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ type T struct {
2020
// in a tiny block making the tests in this package flaky.
2121
t *T
2222
a int
23+
b int
2324
}
2425

2526
func TestPointer(t *testing.T) {
@@ -252,3 +253,41 @@ func TestIssue70739(t *testing.T) {
252253
t.Fatal("failed to look up special and made duplicate weak handle; see issue #70739")
253254
}
254255
}
256+
257+
var immortal T
258+
259+
func TestImmortalPointer(t *testing.T) {
260+
w0 := weak.Make(&immortal)
261+
if weak.Make(&immortal) != w0 {
262+
t.Error("immortal weak pointers to the same pointer not equal")
263+
}
264+
w0a := weak.Make(&immortal.a)
265+
w0b := weak.Make(&immortal.b)
266+
if w0a == w0b {
267+
t.Error("separate immortal pointers (same object) have the same pointer")
268+
}
269+
if got, want := w0.Value(), &immortal; got != want {
270+
t.Errorf("immortal weak pointer to %p has unexpected Value %p", want, got)
271+
}
272+
if got, want := w0a.Value(), &immortal.a; got != want {
273+
t.Errorf("immortal weak pointer to %p has unexpected Value %p", want, got)
274+
}
275+
if got, want := w0b.Value(), &immortal.b; got != want {
276+
t.Errorf("immortal weak pointer to %p has unexpected Value %p", want, got)
277+
}
278+
279+
// Run a couple of cycles.
280+
runtime.GC()
281+
runtime.GC()
282+
283+
// All immortal weak pointers should never get cleared.
284+
if got, want := w0.Value(), &immortal; got != want {
285+
t.Errorf("immortal weak pointer to %p has unexpected Value %p", want, got)
286+
}
287+
if got, want := w0a.Value(), &immortal.a; got != want {
288+
t.Errorf("immortal weak pointer to %p has unexpected Value %p", want, got)
289+
}
290+
if got, want := w0b.Value(), &immortal.b; got != want {
291+
t.Errorf("immortal weak pointer to %p has unexpected Value %p", want, got)
292+
}
293+
}

0 commit comments

Comments
 (0)