Skip to content

Commit ab6dda7

Browse files
dschoGit for Windows Build Agent
authored andcommitted
Import the source code of mimalloc
This commit imports mimalloc's source code as per v2.0.6, fetched from the tag at https://github.com/microsoft/mimalloc. The .c files are from the src/ subdirectory, and the .h files from the include/ subdirectory. We will subsequently modify the source code to accommodate building within Git's context. Since we plan on using the `mi_*()` family of functions, we skip the C++-specific source code, some POSIX compliant functions to interact with mimalloc, and the code that wants to support auto-magic overriding of the `malloc()` function (mimalloc-new-delete.h, alloc-posix.c, mimalloc-override.h, alloc-override.c, alloc-override-osx.c, alloc-override-win.c and static.c). To appease the `check-whitespace` job of Git's Continuous Integration, this commit was washed one time via `git rebase --whitespace=fix`. Signed-off-by: Johannes Schindelin <[email protected]>
1 parent e4b0d25 commit ab6dda7

22 files changed

+12712
-0
lines changed

Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1328,6 +1328,7 @@ BUILTIN_OBJS += builtin/write-tree.o
13281328
# upstream unnecessarily (making merging in future changes easier).
13291329
THIRD_PARTY_SOURCES += compat/inet_ntop.c
13301330
THIRD_PARTY_SOURCES += compat/inet_pton.c
1331+
THIRD_PARTY_SOURCES += compat/mimalloc/%
13311332
THIRD_PARTY_SOURCES += compat/nedmalloc/%
13321333
THIRD_PARTY_SOURCES += compat/obstack.%
13331334
THIRD_PARTY_SOURCES += compat/poll/%

compat/mimalloc/LICENSE

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
MIT License
2+
3+
Copyright (c) 2018-2021 Microsoft Corporation, Daan Leijen
4+
5+
Permission is hereby granted, free of charge, to any person obtaining a copy
6+
of this software and associated documentation files (the "Software"), to deal
7+
in the Software without restriction, including without limitation the rights
8+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9+
copies of the Software, and to permit persons to whom the Software is
10+
furnished to do so, subject to the following conditions:
11+
12+
The above copyright notice and this permission notice shall be included in all
13+
copies or substantial portions of the Software.
14+
15+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21+
SOFTWARE.

compat/mimalloc/alloc-aligned.c

Lines changed: 260 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,260 @@
1+
/* ----------------------------------------------------------------------------
2+
Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
3+
This is free software; you can redistribute it and/or modify it under the
4+
terms of the MIT license. A copy of the license can be found in the file
5+
"LICENSE" at the root of this distribution.
6+
-----------------------------------------------------------------------------*/
7+
8+
#include "mimalloc.h"
9+
#include "mimalloc-internal.h"
10+
11+
#include <string.h> // memset
12+
13+
// ------------------------------------------------------
14+
// Aligned Allocation
15+
// ------------------------------------------------------
16+
17+
// Fallback primitive aligned allocation -- split out for better codegen
18+
static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept
19+
{
20+
mi_assert_internal(size <= PTRDIFF_MAX);
21+
mi_assert_internal(alignment!=0 && _mi_is_power_of_two(alignment) && alignment <= MI_ALIGNMENT_MAX);
22+
23+
const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
24+
const size_t padsize = size + MI_PADDING_SIZE;
25+
26+
// use regular allocation if it is guaranteed to fit the alignment constraints
27+
if (offset==0 && alignment<=padsize && padsize<=MI_MAX_ALIGN_GUARANTEE && (padsize&align_mask)==0) {
28+
void* p = _mi_heap_malloc_zero(heap, size, zero);
29+
mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0);
30+
return p;
31+
}
32+
33+
// otherwise over-allocate
34+
void* p = _mi_heap_malloc_zero(heap, size + alignment - 1, zero);
35+
if (p == NULL) return NULL;
36+
37+
// .. and align within the allocation
38+
uintptr_t adjust = alignment - (((uintptr_t)p + offset) & align_mask);
39+
mi_assert_internal(adjust <= alignment);
40+
void* aligned_p = (adjust == alignment ? p : (void*)((uintptr_t)p + adjust));
41+
if (aligned_p != p) mi_page_set_has_aligned(_mi_ptr_page(p), true);
42+
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
43+
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p));
44+
return aligned_p;
45+
}
46+
47+
// Primitive aligned allocation
48+
static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept
49+
{
50+
// note: we don't require `size > offset`, we just guarantee that the address at offset is aligned regardless of the allocated size.
51+
mi_assert(alignment > 0);
52+
if (mi_unlikely(alignment==0 || !_mi_is_power_of_two(alignment))) { // require power-of-two (see <https://en.cppreference.com/w/c/memory/aligned_alloc>)
53+
#if MI_DEBUG > 0
54+
_mi_error_message(EOVERFLOW, "aligned allocation requires the alignment to be a power-of-two (size %zu, alignment %zu)\n", size, alignment);
55+
#endif
56+
return NULL;
57+
}
58+
if (mi_unlikely(alignment > MI_ALIGNMENT_MAX)) { // we cannot align at a boundary larger than this (or otherwise we cannot find segment headers)
59+
#if MI_DEBUG > 0
60+
_mi_error_message(EOVERFLOW, "aligned allocation has a maximum alignment of %zu (size %zu, alignment %zu)\n", MI_ALIGNMENT_MAX, size, alignment);
61+
#endif
62+
return NULL;
63+
}
64+
if (mi_unlikely(size > PTRDIFF_MAX)) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
65+
#if MI_DEBUG > 0
66+
_mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment);
67+
#endif
68+
return NULL;
69+
}
70+
const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
71+
const size_t padsize = size + MI_PADDING_SIZE; // note: cannot overflow due to earlier size > PTRDIFF_MAX check
72+
73+
// try first if there happens to be a small block available with just the right alignment
74+
if (mi_likely(padsize <= MI_SMALL_SIZE_MAX)) {
75+
mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize);
76+
const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0;
77+
if (mi_likely(page->free != NULL && is_aligned))
78+
{
79+
#if MI_STAT>1
80+
mi_heap_stat_increase(heap, malloc, size);
81+
#endif
82+
void* p = _mi_page_malloc(heap, page, padsize); // TODO: inline _mi_page_malloc
83+
mi_assert_internal(p != NULL);
84+
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
85+
if (zero) { _mi_block_zero_init(page, p, size); }
86+
return p;
87+
}
88+
}
89+
// fallback
90+
return mi_heap_malloc_zero_aligned_at_fallback(heap, size, alignment, offset, zero);
91+
}
92+
93+
94+
// ------------------------------------------------------
95+
// Optimized mi_heap_malloc_aligned / mi_malloc_aligned
96+
// ------------------------------------------------------
97+
98+
mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
99+
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, false);
100+
}
101+
102+
mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
103+
#if !MI_PADDING
104+
// without padding, any small sized allocation is naturally aligned (see also `_mi_segment_page_start`)
105+
if (!_mi_is_power_of_two(alignment)) return NULL;
106+
if (mi_likely(_mi_is_power_of_two(size) && size >= alignment && size <= MI_SMALL_SIZE_MAX))
107+
#else
108+
// with padding, we can only guarantee this for fixed alignments
109+
if (mi_likely((alignment == sizeof(void*) || (alignment == MI_MAX_ALIGN_SIZE && size > (MI_MAX_ALIGN_SIZE/2)))
110+
&& size <= MI_SMALL_SIZE_MAX))
111+
#endif
112+
{
113+
// fast path for common alignment and size
114+
return mi_heap_malloc_small(heap, size);
115+
}
116+
else {
117+
return mi_heap_malloc_aligned_at(heap, size, alignment, 0);
118+
}
119+
}
120+
121+
// ------------------------------------------------------
122+
// Aligned Allocation
123+
// ------------------------------------------------------
124+
125+
mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
126+
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, true);
127+
}
128+
129+
mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
130+
return mi_heap_zalloc_aligned_at(heap, size, alignment, 0);
131+
}
132+
133+
mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
134+
size_t total;
135+
if (mi_count_size_overflow(count, size, &total)) return NULL;
136+
return mi_heap_zalloc_aligned_at(heap, total, alignment, offset);
137+
}
138+
139+
mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept {
140+
return mi_heap_calloc_aligned_at(heap,count,size,alignment,0);
141+
}
142+
143+
mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
144+
return mi_heap_malloc_aligned_at(mi_get_default_heap(), size, alignment, offset);
145+
}
146+
147+
mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
148+
return mi_heap_malloc_aligned(mi_get_default_heap(), size, alignment);
149+
}
150+
151+
mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
152+
return mi_heap_zalloc_aligned_at(mi_get_default_heap(), size, alignment, offset);
153+
}
154+
155+
mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
156+
return mi_heap_zalloc_aligned(mi_get_default_heap(), size, alignment);
157+
}
158+
159+
mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
160+
return mi_heap_calloc_aligned_at(mi_get_default_heap(), count, size, alignment, offset);
161+
}
162+
163+
mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept {
164+
return mi_heap_calloc_aligned(mi_get_default_heap(), count, size, alignment);
165+
}
166+
167+
168+
// ------------------------------------------------------
169+
// Aligned re-allocation
170+
// ------------------------------------------------------
171+
172+
static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset, bool zero) mi_attr_noexcept {
173+
mi_assert(alignment > 0);
174+
if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero);
175+
if (p == NULL) return mi_heap_malloc_zero_aligned_at(heap,newsize,alignment,offset,zero);
176+
size_t size = mi_usable_size(p);
177+
if (newsize <= size && newsize >= (size - (size / 2))
178+
&& (((uintptr_t)p + offset) % alignment) == 0) {
179+
return p; // reallocation still fits, is aligned and not more than 50% waste
180+
}
181+
else {
182+
void* newp = mi_heap_malloc_aligned_at(heap,newsize,alignment,offset);
183+
if (newp != NULL) {
184+
if (zero && newsize > size) {
185+
const mi_page_t* page = _mi_ptr_page(newp);
186+
if (page->is_zero) {
187+
// already zero initialized
188+
mi_assert_expensive(mi_mem_is_zero(newp,newsize));
189+
}
190+
else {
191+
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
192+
size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
193+
memset((uint8_t*)newp + start, 0, newsize - start);
194+
}
195+
}
196+
_mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize));
197+
mi_free(p); // only free if successful
198+
}
199+
return newp;
200+
}
201+
}
202+
203+
static void* mi_heap_realloc_zero_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, bool zero) mi_attr_noexcept {
204+
mi_assert(alignment > 0);
205+
if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero);
206+
size_t offset = ((uintptr_t)p % alignment); // use offset of previous allocation (p can be NULL)
207+
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,zero);
208+
}
209+
210+
void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
211+
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,false);
212+
}
213+
214+
void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
215+
return mi_heap_realloc_zero_aligned(heap,p,newsize,alignment,false);
216+
}
217+
218+
void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
219+
return mi_heap_realloc_zero_aligned_at(heap, p, newsize, alignment, offset, true);
220+
}
221+
222+
void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
223+
return mi_heap_realloc_zero_aligned(heap, p, newsize, alignment, true);
224+
}
225+
226+
void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
227+
size_t total;
228+
if (mi_count_size_overflow(newcount, size, &total)) return NULL;
229+
return mi_heap_rezalloc_aligned_at(heap, p, total, alignment, offset);
230+
}
231+
232+
void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
233+
size_t total;
234+
if (mi_count_size_overflow(newcount, size, &total)) return NULL;
235+
return mi_heap_rezalloc_aligned(heap, p, total, alignment);
236+
}
237+
238+
void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
239+
return mi_heap_realloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset);
240+
}
241+
242+
void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
243+
return mi_heap_realloc_aligned(mi_get_default_heap(), p, newsize, alignment);
244+
}
245+
246+
void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
247+
return mi_heap_rezalloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset);
248+
}
249+
250+
void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
251+
return mi_heap_rezalloc_aligned(mi_get_default_heap(), p, newsize, alignment);
252+
}
253+
254+
void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
255+
return mi_heap_recalloc_aligned_at(mi_get_default_heap(), p, newcount, size, alignment, offset);
256+
}
257+
258+
void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
259+
return mi_heap_recalloc_aligned(mi_get_default_heap(), p, newcount, size, alignment);
260+
}

0 commit comments

Comments
 (0)