Skip to content

Commit 287d1ec

Browse files
committed
runtime: ensure allocToCache updates searchAddr in a valid way
Currently allocToCache assumes it can move the search address past the block it allocated the cache from, which violates the property that searchAddr should always point to mapped memory (i.e. memory represented by pageAlloc.inUse). This bug was already fixed once for pageAlloc.alloc in the Go 1.14 release via CL 216697, but that changed failed to take into account allocToCache. Fixes #38605. Change-Id: Id08180aa10d19dc0f9f551a1d9e327a295560dff Reviewed-on: https://go-review.googlesource.com/c/go/+/229577 Run-TryBot: Michael Knyszek <[email protected]> TryBot-Result: Gobot Gobot <[email protected]> Reviewed-by: David Chase <[email protected]>
1 parent 9a3f22b commit 287d1ec

File tree

2 files changed

+40
-6
lines changed

2 files changed

+40
-6
lines changed

src/runtime/mpagecache.go

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -148,9 +148,14 @@ func (s *pageAlloc) allocToCache() pageCache {
148148
// Update as an allocation, but note that it's not contiguous.
149149
s.update(c.base, pageCachePages, false, true)
150150

151-
// We're always searching for the first free page, and we always know the
152-
// up to pageCache size bits will be allocated, so we can always move the
153-
// searchAddr past the cache.
154-
s.searchAddr = c.base + pageSize*pageCachePages
151+
// Set the search address to the last page represented by the cache.
152+
// Since all of the pages in this block are going to the cache, and we
153+
// searched for the first free page, we can confidently start at the
154+
// next page.
155+
//
156+
// However, s.searchAddr is not allowed to point into unmapped heap memory
157+
// unless it is maxSearchAddr, so make it the last page as opposed to
158+
// the page after.
159+
s.searchAddr = c.base + pageSize*(pageCachePages-1)
155160
return c
156161
}

src/runtime/mpagecache_test.go

Lines changed: 31 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -260,12 +260,13 @@ func TestPageAllocAllocToCache(t *testing.T) {
260260
if GOOS == "openbsd" && testing.Short() {
261261
t.Skip("skipping because virtual memory is limited; see #36210")
262262
}
263-
tests := map[string]struct {
263+
type test struct {
264264
before map[ChunkIdx][]BitRange
265265
scav map[ChunkIdx][]BitRange
266266
hits []PageCache // expected base addresses and patterns
267267
after map[ChunkIdx][]BitRange
268-
}{
268+
}
269+
tests := map[string]test{
269270
"AllFree": {
270271
before: map[ChunkIdx][]BitRange{
271272
BaseChunkIdx: {},
@@ -349,6 +350,34 @@ func TestPageAllocAllocToCache(t *testing.T) {
349350
},
350351
},
351352
}
353+
if PageAlloc64Bit != 0 {
354+
const chunkIdxBigJump = 0x100000 // chunk index offset which translates to O(TiB)
355+
356+
// This test is similar to the one with the same name for
357+
// pageAlloc.alloc and serves the same purpose.
358+
// See mpagealloc_test.go for details.
359+
sumsPerPhysPage := ChunkIdx(PhysPageSize / PallocSumBytes)
360+
baseChunkIdx := BaseChunkIdx &^ (sumsPerPhysPage - 1)
361+
tests["DiscontiguousMappedSumBoundary"] = test{
362+
before: map[ChunkIdx][]BitRange{
363+
baseChunkIdx + sumsPerPhysPage - 1: {{0, PallocChunkPages - 1}},
364+
baseChunkIdx + chunkIdxBigJump: {{1, PallocChunkPages - 1}},
365+
},
366+
scav: map[ChunkIdx][]BitRange{
367+
baseChunkIdx + sumsPerPhysPage - 1: {},
368+
baseChunkIdx + chunkIdxBigJump: {},
369+
},
370+
hits: []PageCache{
371+
NewPageCache(PageBase(baseChunkIdx+sumsPerPhysPage-1, PallocChunkPages-64), 1<<63, 0),
372+
NewPageCache(PageBase(baseChunkIdx+chunkIdxBigJump, 0), 1, 0),
373+
NewPageCache(0, 0, 0),
374+
},
375+
after: map[ChunkIdx][]BitRange{
376+
baseChunkIdx + sumsPerPhysPage - 1: {{0, PallocChunkPages}},
377+
baseChunkIdx + chunkIdxBigJump: {{0, PallocChunkPages}},
378+
},
379+
}
380+
}
352381
for name, v := range tests {
353382
v := v
354383
t.Run(name, func(t *testing.T) {

0 commit comments

Comments
 (0)