|
9 | 9 | package runtime
|
10 | 10 |
|
11 | 11 | import (
|
| 12 | + "internal/abi" |
12 | 13 | "internal/cpu"
|
13 | 14 | "internal/goarch"
|
14 | 15 | "internal/runtime/atomic"
|
@@ -246,6 +247,10 @@ type mheap struct {
|
246 | 247 | // the lock.
|
247 | 248 | cleanupID uint64
|
248 | 249 |
|
| 250 | + _ cpu.CacheLinePad |
| 251 | + |
| 252 | + immortalWeakHandles immortalWeakHandleMap |
| 253 | + |
249 | 254 | unused *specialfinalizer // never set, just here to force the specialfinalizer type into DWARF
|
250 | 255 | }
|
251 | 256 |
|
@@ -2138,7 +2143,15 @@ func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer {
|
2138 | 2143 | // even if it's just some random span.
|
2139 | 2144 | span := spanOfHeap(p)
|
2140 | 2145 | if span == nil {
|
2141 |
| - // The span probably got swept and released. |
| 2146 | + // If it's immortal, then just return the pointer. |
| 2147 | + // |
| 2148 | + // Stay non-preemptible so the GC can't see us convert this potentially |
| 2149 | + // completely bogus value to an unsafe.Pointer. |
| 2150 | + if isGoPointerWithoutSpan(unsafe.Pointer(p)) { |
| 2151 | + releasem(mp) |
| 2152 | + return unsafe.Pointer(p) |
| 2153 | + } |
| 2154 | + // It's heap-allocated, so the span probably just got swept and released. |
2142 | 2155 | releasem(mp)
|
2143 | 2156 | return nil
|
2144 | 2157 | }
|
@@ -2275,6 +2288,9 @@ func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr {
|
2275 | 2288 | func getWeakHandle(p unsafe.Pointer) *atomic.Uintptr {
|
2276 | 2289 | span := spanOfHeap(uintptr(p))
|
2277 | 2290 | if span == nil {
|
| 2291 | + if isGoPointerWithoutSpan(p) { |
| 2292 | + return mheap_.immortalWeakHandles.getOrAdd(uintptr(p)) |
| 2293 | + } |
2278 | 2294 | throw("getWeakHandle on invalid pointer")
|
2279 | 2295 | }
|
2280 | 2296 |
|
@@ -2303,6 +2319,80 @@ func getWeakHandle(p unsafe.Pointer) *atomic.Uintptr {
|
2303 | 2319 | return handle
|
2304 | 2320 | }
|
2305 | 2321 |
|
| 2322 | +type immortalWeakHandleMap struct { |
| 2323 | + root atomic.UnsafePointer // *immortalWeakHandle (can't use generics because it's notinheap) |
| 2324 | +} |
| 2325 | + |
| 2326 | +// immortalWeakHandle is a lock-free append-only hash-trie. |
| 2327 | +// |
| 2328 | +// Key features: |
| 2329 | +// - 2-ary trie. Child nodes are indexed by the highest bit (remaining) of the hash of the address. |
| 2330 | +// - New nodes are placed at the first empty level encountered. |
| 2331 | +// - When the first child is added to a node, the existing value is not moved into a child. |
| 2332 | +// This means that we must check the value at each level, not just at the leaf. |
| 2333 | +// - No deletion or rebalancing. |
| 2334 | +// - Intentionally devolves into a linked list on hash collisions (the hash bits will all |
| 2335 | +// get shifted out during iteration, and new nodes will just be appended to the 0th child). |
| 2336 | +type immortalWeakHandle struct { |
| 2337 | + _ sys.NotInHeap |
| 2338 | + |
| 2339 | + children [2]atomic.UnsafePointer // *immortalObjectMapNode (can't use generics because it's notinheap) |
| 2340 | + ptr uintptr // &ptr is the weak handle |
| 2341 | +} |
| 2342 | + |
| 2343 | +// handle returns a canonical weak handle. |
| 2344 | +func (h *immortalWeakHandle) handle() *atomic.Uintptr { |
| 2345 | + // N.B. Since we just need an *atomic.Uintptr that never changes, we can trivially |
| 2346 | + // reference ptr to save on some memory in immortalWeakHandle and avoid extra atomics |
| 2347 | + // in getOrAdd. |
| 2348 | + return (*atomic.Uintptr)(unsafe.Pointer(&h.ptr)) |
| 2349 | +} |
| 2350 | + |
| 2351 | +// getOrAdd introduces p, which must be a pointer to immortal memory (for example, a linker-allocated |
| 2352 | +// object) and returns a weak handle. The weak handle will never become nil. |
| 2353 | +func (tab *immortalWeakHandleMap) getOrAdd(p uintptr) *atomic.Uintptr { |
| 2354 | + var newNode *immortalWeakHandle |
| 2355 | + m := &tab.root |
| 2356 | + hash := memhash(abi.NoEscape(unsafe.Pointer(&p)), 0, goarch.PtrSize) |
| 2357 | + hashIter := hash |
| 2358 | + for { |
| 2359 | + n := (*immortalWeakHandle)(m.Load()) |
| 2360 | + if n == nil { |
| 2361 | + // Try to insert a new map node. We may end up discarding |
| 2362 | + // this node if we fail to insert because it turns out the |
| 2363 | + // value is already in the map. |
| 2364 | + // |
| 2365 | + // The discard will only happen if two threads race on inserting |
| 2366 | + // the same value. Both might create nodes, but only one will |
| 2367 | + // succeed on insertion. If two threads race to insert two |
| 2368 | + // different values, then both nodes will *always* get inserted, |
| 2369 | + // because the equality checking below will always fail. |
| 2370 | + // |
| 2371 | + // Performance note: contention on insertion is likely to be |
| 2372 | + // higher for small maps, but since this data structure is |
| 2373 | + // append-only, either the map stays small because there isn't |
| 2374 | + // much activity, or the map gets big and races to insert on |
| 2375 | + // the same node are much less likely. |
| 2376 | + if newNode == nil { |
| 2377 | + newNode = (*immortalWeakHandle)(persistentalloc(unsafe.Sizeof(immortalWeakHandle{}), goarch.PtrSize, &memstats.gcMiscSys)) |
| 2378 | + newNode.ptr = p |
| 2379 | + } |
| 2380 | + if m.CompareAndSwapNoWB(nil, unsafe.Pointer(newNode)) { |
| 2381 | + return newNode.handle() |
| 2382 | + } |
| 2383 | + // Reload n. Because pointers are only stored once, |
| 2384 | + // we must have lost the race, and therefore n is not nil |
| 2385 | + // anymore. |
| 2386 | + n = (*immortalWeakHandle)(m.Load()) |
| 2387 | + } |
| 2388 | + if n.ptr == p { |
| 2389 | + return n.handle() |
| 2390 | + } |
| 2391 | + m = &n.children[hashIter>>(8*goarch.PtrSize-1)] |
| 2392 | + hashIter <<= 1 |
| 2393 | + } |
| 2394 | +} |
| 2395 | + |
2306 | 2396 | // The described object is being heap profiled.
|
2307 | 2397 | type specialprofile struct {
|
2308 | 2398 | _ sys.NotInHeap
|
|
0 commit comments