Skip to content

Commit 096b848

Browse files
committed
Merge branch 'mimalloc-v2.0.9'
This topic vendors in mimalloc v2.0.9, a fast allocator that allows Git for Windows to perform efficiently. Switch Git for Windows to using mimalloc instead of nedmalloc
2 parents eb749d2 + e86d22c commit 096b848

28 files changed

+13667
-8
lines changed

Makefile

+34
Original file line numberDiff line numberDiff line change
@@ -1328,6 +1328,7 @@ BUILTIN_OBJS += builtin/write-tree.o
13281328
# upstream unnecessarily (making merging in future changes easier).
13291329
THIRD_PARTY_SOURCES += compat/inet_ntop.c
13301330
THIRD_PARTY_SOURCES += compat/inet_pton.c
1331+
THIRD_PARTY_SOURCES += compat/mimalloc/%
13311332
THIRD_PARTY_SOURCES += compat/nedmalloc/%
13321333
THIRD_PARTY_SOURCES += compat/obstack.%
13331334
THIRD_PARTY_SOURCES += compat/poll/%
@@ -2051,6 +2052,39 @@ ifdef USE_NED_ALLOCATOR
20512052
OVERRIDE_STRDUP = YesPlease
20522053
endif
20532054

2055+
ifdef USE_MIMALLOC
2056+
MIMALLOC_OBJS = \
2057+
compat/mimalloc/alloc-aligned.o \
2058+
compat/mimalloc/alloc.o \
2059+
compat/mimalloc/arena.o \
2060+
compat/mimalloc/bitmap.o \
2061+
compat/mimalloc/heap.o \
2062+
compat/mimalloc/init.o \
2063+
compat/mimalloc/options.o \
2064+
compat/mimalloc/os.o \
2065+
compat/mimalloc/page.o \
2066+
compat/mimalloc/random.o \
2067+
compat/mimalloc/segment.o \
2068+
compat/mimalloc/segment-cache.o \
2069+
compat/mimalloc/stats.o
2070+
2071+
COMPAT_CFLAGS += -Icompat/mimalloc -DMI_DEBUG=0 -DUSE_MIMALLOC --std=gnu11
2072+
COMPAT_OBJS += $(MIMALLOC_OBJS)
2073+
2074+
$(MIMALLOC_OBJS): COMPAT_CFLAGS += -DBANNED_H
2075+
2076+
ifdef DEVELOPER
2077+
$(MIMALLOC_OBJS): COMPAT_CFLAGS += \
2078+
-Wno-attributes \
2079+
-Wno-pedantic \
2080+
-Wno-unknown-pragmas \
2081+
-Wno-declaration-after-statement \
2082+
-Wno-old-style-definition \
2083+
-Wno-missing-prototypes \
2084+
-Wno-array-bounds
2085+
endif
2086+
endif
2087+
20542088
ifdef OVERRIDE_STRDUP
20552089
COMPAT_CFLAGS += -DOVERRIDE_STRDUP
20562090
COMPAT_OBJS += compat/strdup.o

compat/mimalloc/LICENSE

+21
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
MIT License
2+
3+
Copyright (c) 2018-2021 Microsoft Corporation, Daan Leijen
4+
5+
Permission is hereby granted, free of charge, to any person obtaining a copy
6+
of this software and associated documentation files (the "Software"), to deal
7+
in the Software without restriction, including without limitation the rights
8+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9+
copies of the Software, and to permit persons to whom the Software is
10+
furnished to do so, subject to the following conditions:
11+
12+
The above copyright notice and this permission notice shall be included in all
13+
copies or substantial portions of the Software.
14+
15+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21+
SOFTWARE.

compat/mimalloc/alloc-aligned.c

+306
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,306 @@
1+
/* ----------------------------------------------------------------------------
2+
Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
3+
This is free software; you can redistribute it and/or modify it under the
4+
terms of the MIT license. A copy of the license can be found in the file
5+
"LICENSE" at the root of this distribution.
6+
-----------------------------------------------------------------------------*/
7+
8+
#include "mimalloc.h"
9+
#include "mimalloc-internal.h"
10+
11+
#include <string.h> // memset
12+
13+
// ------------------------------------------------------
14+
// Aligned Allocation
15+
// ------------------------------------------------------
16+
17+
// Fallback primitive aligned allocation -- split out for better codegen
18+
static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept
19+
{
20+
mi_assert_internal(size <= PTRDIFF_MAX);
21+
mi_assert_internal(alignment != 0 && _mi_is_power_of_two(alignment));
22+
23+
const uintptr_t align_mask = alignment - 1; // for any x, `(x & align_mask) == (x % alignment)`
24+
const size_t padsize = size + MI_PADDING_SIZE;
25+
26+
// use regular allocation if it is guaranteed to fit the alignment constraints
27+
if (offset==0 && alignment<=padsize && padsize<=MI_MAX_ALIGN_GUARANTEE && (padsize&align_mask)==0) {
28+
void* p = _mi_heap_malloc_zero(heap, size, zero);
29+
mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0);
30+
return p;
31+
}
32+
33+
void* p;
34+
size_t oversize;
35+
if mi_unlikely(alignment > MI_ALIGNMENT_MAX) {
36+
// use OS allocation for very large alignment and allocate inside a huge page (dedicated segment with 1 page)
37+
// This can support alignments >= MI_SEGMENT_SIZE by ensuring the object can be aligned at a point in the
38+
// first (and single) page such that the segment info is `MI_SEGMENT_SIZE` bytes before it (so it can be found by aligning the pointer down)
39+
if mi_unlikely(offset != 0) {
40+
// todo: cannot support offset alignment for very large alignments yet
41+
#if MI_DEBUG > 0
42+
_mi_error_message(EOVERFLOW, "aligned allocation with a very large alignment cannot be used with an alignment offset (size %zu, alignment %zu, offset %zu)\n", size, alignment, offset);
43+
#endif
44+
return NULL;
45+
}
46+
oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size);
47+
p = _mi_heap_malloc_zero_ex(heap, oversize, false, alignment); // the page block size should be large enough to align in the single huge page block
48+
// zero afterwards as only the area from the aligned_p may be committed!
49+
if (p == NULL) return NULL;
50+
}
51+
else {
52+
// otherwise over-allocate
53+
oversize = size + alignment - 1;
54+
p = _mi_heap_malloc_zero(heap, oversize, zero);
55+
if (p == NULL) return NULL;
56+
}
57+
58+
// .. and align within the allocation
59+
const uintptr_t poffset = ((uintptr_t)p + offset) & align_mask;
60+
const uintptr_t adjust = (poffset == 0 ? 0 : alignment - poffset);
61+
mi_assert_internal(adjust < alignment);
62+
void* aligned_p = (void*)((uintptr_t)p + adjust);
63+
if (aligned_p != p) {
64+
mi_page_set_has_aligned(_mi_ptr_page(p), true);
65+
}
66+
67+
mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size);
68+
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p));
69+
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
70+
mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size);
71+
72+
// now zero the block if needed
73+
if (zero && alignment > MI_ALIGNMENT_MAX) {
74+
const ptrdiff_t diff = (uint8_t*)aligned_p - (uint8_t*)p;
75+
const ptrdiff_t zsize = mi_page_usable_block_size(_mi_ptr_page(p)) - diff - MI_PADDING_SIZE;
76+
if (zsize > 0) { _mi_memzero(aligned_p, zsize); }
77+
}
78+
79+
#if MI_TRACK_ENABLED
80+
if (p != aligned_p) {
81+
mi_track_free_size(p, oversize);
82+
mi_track_malloc(aligned_p, size, zero);
83+
}
84+
else {
85+
mi_track_resize(aligned_p, oversize, size);
86+
}
87+
#endif
88+
return aligned_p;
89+
}
90+
91+
// Primitive aligned allocation
92+
static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept
93+
{
94+
// note: we don't require `size > offset`, we just guarantee that the address at offset is aligned regardless of the allocated size.
95+
mi_assert(alignment > 0);
96+
if mi_unlikely(alignment == 0 || !_mi_is_power_of_two(alignment)) { // require power-of-two (see <https://en.cppreference.com/w/c/memory/aligned_alloc>)
97+
#if MI_DEBUG > 0
98+
_mi_error_message(EOVERFLOW, "aligned allocation requires the alignment to be a power-of-two (size %zu, alignment %zu)\n", size, alignment);
99+
#endif
100+
return NULL;
101+
}
102+
/*
103+
if mi_unlikely(alignment > MI_ALIGNMENT_MAX) { // we cannot align at a boundary larger than this (or otherwise we cannot find segment headers)
104+
#if MI_DEBUG > 0
105+
_mi_error_message(EOVERFLOW, "aligned allocation has a maximum alignment of %zu (size %zu, alignment %zu)\n", MI_ALIGNMENT_MAX, size, alignment);
106+
#endif
107+
return NULL;
108+
}
109+
*/
110+
if mi_unlikely(size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
111+
#if MI_DEBUG > 0
112+
_mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment);
113+
#endif
114+
return NULL;
115+
}
116+
const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
117+
const size_t padsize = size + MI_PADDING_SIZE; // note: cannot overflow due to earlier size > PTRDIFF_MAX check
118+
119+
// try first if there happens to be a small block available with just the right alignment
120+
if mi_likely(padsize <= MI_SMALL_SIZE_MAX && alignment <= padsize) {
121+
mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize);
122+
const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0;
123+
if mi_likely(page->free != NULL && is_aligned)
124+
{
125+
#if MI_STAT>1
126+
mi_heap_stat_increase(heap, malloc, size);
127+
#endif
128+
void* p = _mi_page_malloc(heap, page, padsize, zero); // TODO: inline _mi_page_malloc
129+
mi_assert_internal(p != NULL);
130+
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
131+
mi_track_malloc(p,size,zero);
132+
return p;
133+
}
134+
}
135+
// fallback
136+
return mi_heap_malloc_zero_aligned_at_fallback(heap, size, alignment, offset, zero);
137+
}
138+
139+
140+
// ------------------------------------------------------
141+
// Optimized mi_heap_malloc_aligned / mi_malloc_aligned
142+
// ------------------------------------------------------
143+
144+
mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
145+
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, false);
146+
}
147+
148+
mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
149+
#if !MI_PADDING
150+
// without padding, any small sized allocation is naturally aligned (see also `_mi_segment_page_start`)
151+
if (!_mi_is_power_of_two(alignment)) return NULL;
152+
if mi_likely(_mi_is_power_of_two(size) && size >= alignment && size <= MI_SMALL_SIZE_MAX)
153+
#else
154+
// with padding, we can only guarantee this for fixed alignments
155+
if mi_likely((alignment == sizeof(void*) || (alignment == MI_MAX_ALIGN_SIZE && size > (MI_MAX_ALIGN_SIZE/2)))
156+
&& size <= MI_SMALL_SIZE_MAX)
157+
#endif
158+
{
159+
// fast path for common alignment and size
160+
return mi_heap_malloc_small(heap, size);
161+
}
162+
else {
163+
return mi_heap_malloc_aligned_at(heap, size, alignment, 0);
164+
}
165+
}
166+
167+
// ------------------------------------------------------
168+
// Aligned Allocation
169+
// ------------------------------------------------------
170+
171+
mi_decl_nodiscard mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
172+
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, true);
173+
}
174+
175+
mi_decl_nodiscard mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
176+
return mi_heap_zalloc_aligned_at(heap, size, alignment, 0);
177+
}
178+
179+
mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
180+
size_t total;
181+
if (mi_count_size_overflow(count, size, &total)) return NULL;
182+
return mi_heap_zalloc_aligned_at(heap, total, alignment, offset);
183+
}
184+
185+
mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept {
186+
return mi_heap_calloc_aligned_at(heap,count,size,alignment,0);
187+
}
188+
189+
mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
190+
return mi_heap_malloc_aligned_at(mi_get_default_heap(), size, alignment, offset);
191+
}
192+
193+
mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
194+
return mi_heap_malloc_aligned(mi_get_default_heap(), size, alignment);
195+
}
196+
197+
mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
198+
return mi_heap_zalloc_aligned_at(mi_get_default_heap(), size, alignment, offset);
199+
}
200+
201+
mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
202+
return mi_heap_zalloc_aligned(mi_get_default_heap(), size, alignment);
203+
}
204+
205+
mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
206+
return mi_heap_calloc_aligned_at(mi_get_default_heap(), count, size, alignment, offset);
207+
}
208+
209+
mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept {
210+
return mi_heap_calloc_aligned(mi_get_default_heap(), count, size, alignment);
211+
}
212+
213+
214+
// ------------------------------------------------------
215+
// Aligned re-allocation
216+
// ------------------------------------------------------
217+
218+
static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset, bool zero) mi_attr_noexcept {
219+
mi_assert(alignment > 0);
220+
if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero);
221+
if (p == NULL) return mi_heap_malloc_zero_aligned_at(heap,newsize,alignment,offset,zero);
222+
size_t size = mi_usable_size(p);
223+
if (newsize <= size && newsize >= (size - (size / 2))
224+
&& (((uintptr_t)p + offset) % alignment) == 0) {
225+
return p; // reallocation still fits, is aligned and not more than 50% waste
226+
}
227+
else {
228+
void* newp = mi_heap_malloc_aligned_at(heap,newsize,alignment,offset);
229+
if (newp != NULL) {
230+
if (zero && newsize > size) {
231+
const mi_page_t* page = _mi_ptr_page(newp);
232+
if (page->is_zero) {
233+
// already zero initialized
234+
mi_assert_expensive(mi_mem_is_zero(newp,newsize));
235+
}
236+
else {
237+
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
238+
size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
239+
memset((uint8_t*)newp + start, 0, newsize - start);
240+
}
241+
}
242+
_mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize));
243+
mi_free(p); // only free if successful
244+
}
245+
return newp;
246+
}
247+
}
248+
249+
static void* mi_heap_realloc_zero_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, bool zero) mi_attr_noexcept {
250+
mi_assert(alignment > 0);
251+
if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero);
252+
size_t offset = ((uintptr_t)p % alignment); // use offset of previous allocation (p can be NULL)
253+
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,zero);
254+
}
255+
256+
mi_decl_nodiscard void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
257+
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,false);
258+
}
259+
260+
mi_decl_nodiscard void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
261+
return mi_heap_realloc_zero_aligned(heap,p,newsize,alignment,false);
262+
}
263+
264+
mi_decl_nodiscard void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
265+
return mi_heap_realloc_zero_aligned_at(heap, p, newsize, alignment, offset, true);
266+
}
267+
268+
mi_decl_nodiscard void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
269+
return mi_heap_realloc_zero_aligned(heap, p, newsize, alignment, true);
270+
}
271+
272+
mi_decl_nodiscard void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
273+
size_t total;
274+
if (mi_count_size_overflow(newcount, size, &total)) return NULL;
275+
return mi_heap_rezalloc_aligned_at(heap, p, total, alignment, offset);
276+
}
277+
278+
mi_decl_nodiscard void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
279+
size_t total;
280+
if (mi_count_size_overflow(newcount, size, &total)) return NULL;
281+
return mi_heap_rezalloc_aligned(heap, p, total, alignment);
282+
}
283+
284+
mi_decl_nodiscard void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
285+
return mi_heap_realloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset);
286+
}
287+
288+
mi_decl_nodiscard void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
289+
return mi_heap_realloc_aligned(mi_get_default_heap(), p, newsize, alignment);
290+
}
291+
292+
mi_decl_nodiscard void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
293+
return mi_heap_rezalloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset);
294+
}
295+
296+
mi_decl_nodiscard void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
297+
return mi_heap_rezalloc_aligned(mi_get_default_heap(), p, newsize, alignment);
298+
}
299+
300+
mi_decl_nodiscard void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
301+
return mi_heap_recalloc_aligned_at(mi_get_default_heap(), p, newcount, size, alignment, offset);
302+
}
303+
304+
mi_decl_nodiscard void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
305+
return mi_heap_recalloc_aligned(mi_get_default_heap(), p, newcount, size, alignment);
306+
}

0 commit comments

Comments
 (0)