Skip to content

Commit 78bb91c

Browse files
committed
runtime: remove npreleased in favor of boolean
This change removes npreleased from mspan since spans may now either be scavenged or not scavenged; how many of its pages were actually scavenged doesn't matter. It saves some space in mpsan overhead too, as the boolean fits into what would otherwise be struct padding. For #14045. Change-Id: I63f25a4d98658f5fe21c6a466fc38c59bfc5d0f5 Reviewed-on: https://go-review.googlesource.com/c/139737 Run-TryBot: Michael Knyszek <[email protected]> TryBot-Result: Gobot Gobot <[email protected]> Reviewed-by: Austin Clements <[email protected]>
1 parent b46bf02 commit 78bb91c

File tree

1 file changed

+47
-30
lines changed

1 file changed

+47
-30
lines changed

src/runtime/mheap.go

+47-30
Original file line numberDiff line numberDiff line change
@@ -329,9 +329,9 @@ type mspan struct {
329329
needzero uint8 // needs to be zeroed before allocation
330330
divShift uint8 // for divide by elemsize - divMagic.shift
331331
divShift2 uint8 // for divide by elemsize - divMagic.shift2
332+
scavenged bool // whether this span has had its pages released to the OS
332333
elemsize uintptr // computed from sizeclass or from npages
333334
unusedsince int64 // first time spotted by gc in mspanfree state
334-
npreleased uintptr // number of pages released to the os
335335
limit uintptr // end of data in span
336336
speciallock mutex // guards specials list
337337
specials *special // linked list of special records sorted by offset.
@@ -350,34 +350,45 @@ func (s *mspan) layout() (size, n, total uintptr) {
350350
return
351351
}
352352

353-
func (s *mspan) scavenge() uintptr {
353+
// physPageBounds returns the start and end of the span
354+
// rounded in to the physical page size.
355+
func (s *mspan) physPageBounds() (uintptr, uintptr) {
354356
start := s.base()
355357
end := start + s.npages<<_PageShift
356358
if physPageSize > _PageSize {
357-
// We can only release pages in
358-
// physPageSize blocks, so round start
359-
// and end in. (Otherwise, madvise
360-
// will round them *out* and release
361-
// more memory than we want.)
359+
// Round start and end in.
362360
start = (start + physPageSize - 1) &^ (physPageSize - 1)
363361
end &^= physPageSize - 1
364-
if end <= start {
365-
// start and end don't span a
366-
// whole physical page.
367-
return 0
368-
}
369362
}
370-
len := end - start
371-
released := len - (s.npreleased << _PageShift)
372-
if physPageSize > _PageSize && released == 0 {
363+
return start, end
364+
}
365+
366+
func (s *mspan) scavenge() uintptr {
367+
// start and end must be rounded in, otherwise madvise
368+
// will round them *out* and release more memory
369+
// than we want.
370+
start, end := s.physPageBounds()
371+
if end <= start {
372+
// start and end don't span a whole physical page.
373373
return 0
374374
}
375+
released := end - start
375376
memstats.heap_released += uint64(released)
376-
s.npreleased = len >> _PageShift
377-
sysUnused(unsafe.Pointer(start), len)
377+
s.scavenged = true
378+
sysUnused(unsafe.Pointer(start), released)
378379
return released
379380
}
380381

382+
// released returns the number of bytes in this span
383+
// which were returned back to the OS.
384+
func (s *mspan) released() uintptr {
385+
if !s.scavenged {
386+
return 0
387+
}
388+
start, end := s.physPageBounds()
389+
return end - start
390+
}
391+
381392
// recordspan adds a newly allocated span to h.allspans.
382393
//
383394
// This only happens the first time a span is allocated from
@@ -873,10 +884,18 @@ HaveSpan:
873884
if s.npages < npage {
874885
throw("MHeap_AllocLocked - bad npages")
875886
}
876-
if s.npreleased > 0 {
887+
if s.scavenged {
888+
// sysUsed all the pages that are actually available
889+
// in the span, but only drop heap_released by the
890+
// actual amount of pages released. This helps ensure
891+
// that heap_released only increments and decrements
892+
// by the same amounts. It's also fine, because any
893+
// of the pages outside start and end wouldn't have been
894+
// sysUnused in the first place.
877895
sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift)
878-
memstats.heap_released -= uint64(s.npreleased << _PageShift)
879-
s.npreleased = 0
896+
start, end := s.physPageBounds()
897+
memstats.heap_released -= uint64(end-start)
898+
s.scavenged = false
880899
}
881900

882901
if s.npages > npage {
@@ -1019,8 +1038,8 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
10191038

10201039
// We scavenge s at the end after coalescing if s or anything
10211040
// it merged with is marked scavenged.
1022-
needsScavenge := s.npreleased != 0
1023-
prescavenged := s.npreleased * pageSize // number of bytes already scavenged.
1041+
needsScavenge := s.scavenged
1042+
prescavenged := s.released() // number of bytes already scavenged.
10241043

10251044
// Coalesce with earlier, later spans.
10261045
if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
@@ -1029,15 +1048,14 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
10291048
s.npages += before.npages
10301049
s.needzero |= before.needzero
10311050
h.setSpan(before.base(), s)
1032-
s.npreleased += before.npreleased // absorb released pages
10331051
// The size is potentially changing so the treap needs to delete adjacent nodes and
10341052
// insert back as a combined node.
1035-
if before.npreleased == 0 {
1053+
if !before.scavenged {
10361054
h.free.removeSpan(before)
10371055
} else {
10381056
h.scav.removeSpan(before)
10391057
needsScavenge = true
1040-
prescavenged += before.npreleased * pageSize
1058+
prescavenged += before.released()
10411059
}
10421060
before.state = mSpanDead
10431061
h.spanalloc.free(unsafe.Pointer(before))
@@ -1048,14 +1066,13 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
10481066
s.npages += after.npages
10491067
s.needzero |= after.needzero
10501068
h.setSpan(s.base()+s.npages*pageSize-1, s)
1051-
if after.npreleased == 0 {
1069+
if !after.scavenged {
10521070
h.free.removeSpan(after)
10531071
} else {
10541072
h.scav.removeSpan(after)
10551073
needsScavenge = true
1056-
prescavenged += after.npreleased * pageSize
1074+
prescavenged += after.released()
10571075
}
1058-
s.npreleased += after.npreleased
10591076
after.state = mSpanDead
10601077
h.spanalloc.free(unsafe.Pointer(after))
10611078
}
@@ -1076,7 +1093,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
10761093
}
10771094

10781095
// Insert s into the appropriate treap.
1079-
if s.npreleased != 0 {
1096+
if s.scavenged {
10801097
h.scav.insert(s)
10811098
} else {
10821099
h.free.insert(s)
@@ -1157,7 +1174,7 @@ func (span *mspan) init(base uintptr, npages uintptr) {
11571174
span.elemsize = 0
11581175
span.state = mSpanDead
11591176
span.unusedsince = 0
1160-
span.npreleased = 0
1177+
span.scavenged = false
11611178
span.speciallock.key = 0
11621179
span.specials = nil
11631180
span.needzero = 0

0 commit comments

Comments
 (0)