@@ -351,6 +351,34 @@ func (s *mspan) layout() (size, n, total uintptr) {
351
351
return
352
352
}
353
353
354
+ func (s * mspan ) scavenge () uintptr {
355
+ start := s .base ()
356
+ end := start + s .npages << _PageShift
357
+ if physPageSize > _PageSize {
358
+ // We can only release pages in
359
+ // physPageSize blocks, so round start
360
+ // and end in. (Otherwise, madvise
361
+ // will round them *out* and release
362
+ // more memory than we want.)
363
+ start = (start + physPageSize - 1 ) &^ (physPageSize - 1 )
364
+ end &^= physPageSize - 1
365
+ if end <= start {
366
+ // start and end don't span a
367
+ // whole physical page.
368
+ return 0
369
+ }
370
+ }
371
+ len := end - start
372
+ released := len - (s .npreleased << _PageShift )
373
+ if physPageSize > _PageSize && released == 0 {
374
+ return 0
375
+ }
376
+ memstats .heap_released += uint64 (released )
377
+ s .npreleased = len >> _PageShift
378
+ sysUnused (unsafe .Pointer (start ), len )
379
+ return released
380
+ }
381
+
354
382
// recordspan adds a newly allocated span to h.allspans.
355
383
//
356
384
// This only happens the first time a span is allocated from
@@ -1087,35 +1115,12 @@ func (h *mheap) busyList(npages uintptr) *mSpanList {
1087
1115
1088
1116
func scavengeTreapNode (t * treapNode , now , limit uint64 ) uintptr {
1089
1117
s := t .spanKey
1090
- var sumreleased uintptr
1091
1118
if (now - uint64 (s .unusedsince )) > limit && s .npreleased != s .npages {
1092
- start := s .base ()
1093
- end := start + s .npages << _PageShift
1094
- if physPageSize > _PageSize {
1095
- // We can only release pages in
1096
- // physPageSize blocks, so round start
1097
- // and end in. (Otherwise, madvise
1098
- // will round them *out* and release
1099
- // more memory than we want.)
1100
- start = (start + physPageSize - 1 ) &^ (physPageSize - 1 )
1101
- end &^= physPageSize - 1
1102
- if end <= start {
1103
- // start and end don't span a
1104
- // whole physical page.
1105
- return sumreleased
1106
- }
1119
+ if released := s .scavenge (); released != 0 {
1120
+ return released
1107
1121
}
1108
- len := end - start
1109
- released := len - (s .npreleased << _PageShift )
1110
- if physPageSize > _PageSize && released == 0 {
1111
- return sumreleased
1112
- }
1113
- memstats .heap_released += uint64 (released )
1114
- sumreleased += released
1115
- s .npreleased = len >> _PageShift
1116
- sysUnused (unsafe .Pointer (start ), len )
1117
1122
}
1118
- return sumreleased
1123
+ return 0
1119
1124
}
1120
1125
1121
1126
func scavengelist (list * mSpanList , now , limit uint64 ) uintptr {
@@ -1128,32 +1133,7 @@ func scavengelist(list *mSpanList, now, limit uint64) uintptr {
1128
1133
if (now - uint64 (s .unusedsince )) <= limit || s .npreleased == s .npages {
1129
1134
continue
1130
1135
}
1131
- start := s .base ()
1132
- end := start + s .npages << _PageShift
1133
- if physPageSize > _PageSize {
1134
- // We can only release pages in
1135
- // physPageSize blocks, so round start
1136
- // and end in. (Otherwise, madvise
1137
- // will round them *out* and release
1138
- // more memory than we want.)
1139
- start = (start + physPageSize - 1 ) &^ (physPageSize - 1 )
1140
- end &^= physPageSize - 1
1141
- if end <= start {
1142
- // start and end don't span a
1143
- // whole physical page.
1144
- continue
1145
- }
1146
- }
1147
- len := end - start
1148
-
1149
- released := len - (s .npreleased << _PageShift )
1150
- if physPageSize > _PageSize && released == 0 {
1151
- continue
1152
- }
1153
- memstats .heap_released += uint64 (released )
1154
- sumreleased += released
1155
- s .npreleased = len >> _PageShift
1156
- sysUnused (unsafe .Pointer (start ), len )
1136
+ sumreleased += s .scavenge ()
1157
1137
}
1158
1138
return sumreleased
1159
1139
}
0 commit comments