Skip to content

Commit 936b75c

Browse files
Justin Wilderfacebook-github-bot
authored andcommitted
Fix Arena allocator aligned size calculation.
Summary: D62702993 added support for >8 alignment, but its size calculation was wrong. It aligned the memory pointer after allocating the 8-byte `Block` header, but did not align the earlier size calculation for the `Block`. This could cause heap corruption in the case where the arena minBlockSize is already aligned, because the allocated buffer would be 8 bytes short. In particular, it broke in the `PosePool` after I changed the `PoseState` structure to make it 8 bytes smaller. Constructing that structure would write past the buffer and trigger a heap corruption exception in debug builds. Reviewed By: yfeldblum Differential Revision: D71167717 fbshipit-source-id: c7c4af9aeff0d79d772167ef2caaf6342e57e97c
1 parent 48712c1 commit 936b75c

File tree

3 files changed

+44
-25
lines changed

3 files changed

+44
-25
lines changed

folly/memory/Arena-inl.h

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,8 @@ void* Arena<Alloc>::allocateSlow(size_t size) {
2929
char* start;
3030

3131
size_t allocSize;
32-
if (!checked_add(&allocSize, std::max(size, minBlockSize()), sizeof(Block))) {
32+
if (!checked_add(
33+
&allocSize, std::max(size, minBlockSize()), roundUp(sizeof(Block)))) {
3334
throw_exception<std::bad_alloc>();
3435
}
3536
if (sizeLimit_ != kNoSizeLimit &&
@@ -40,7 +41,7 @@ void* Arena<Alloc>::allocateSlow(size_t size) {
4041
if (size > minBlockSize()) {
4142
// Allocate a large block for this chunk only; don't change ptr_ and end_,
4243
// let them point into a normal block (or none, if they're null)
43-
allocSize = sizeof(LargeBlock) + size;
44+
allocSize = roundUp(sizeof(LargeBlock)) + size;
4445
void* mem = AllocTraits::allocate(alloc(), allocSize);
4546
auto blk = new (mem) LargeBlock(allocSize);
4647
start = align(blk->start());
@@ -57,7 +58,7 @@ void* Arena<Alloc>::allocateSlow(size_t size) {
5758
blocks_.push_back(*blk);
5859
currentBlock_ = blocks_.last();
5960
ptr_ = start + size;
60-
end_ = start + allocSize - sizeof(Block);
61+
end_ = static_cast<char*>(mem) + allocSize;
6162
assert(ptr_ <= end_);
6263
}
6364

folly/memory/Arena.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ class Arena {
163163

164164
constexpr size_t blockGoodAllocSize() {
165165
return ArenaAllocatorTraits<Alloc>::goodSize(
166-
alloc(), sizeof(Block) + minBlockSize());
166+
alloc(), roundUp(sizeof(Block)) + minBlockSize());
167167
}
168168

169169
struct alignas(max_align_v) LargeBlock {

folly/memory/test/ArenaTest.cpp

Lines changed: 39 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,13 @@ using namespace folly;
3333

3434
static_assert(AllocatorHasTrivialDeallocate<SysArena>::value, "");
3535

36+
void* alloc(SysArena& arena, size_t size) {
37+
void* const mem = arena.allocate(size);
38+
// Fill with garbage to detect heap corruption.
39+
memset(mem, 0xff, size);
40+
return mem;
41+
}
42+
3643
TEST(Arena, SizeSanity) {
3744
std::set<size_t*> allocatedItems;
3845

@@ -42,7 +49,7 @@ TEST(Arena, SizeSanity) {
4249
EXPECT_EQ(arena.totalSize(), minimum_size);
4350

4451
// Insert a single small element to get a new block
45-
size_t* ptr = static_cast<size_t*>(arena.allocate(sizeof(long)));
52+
size_t* ptr = static_cast<size_t*>(alloc(arena, sizeof(long)));
4653
allocatedItems.insert(ptr);
4754
minimum_size += requestedBlockSize;
4855
maximum_size += goodMallocSize(requestedBlockSize + SysArena::kBlockOverhead);
@@ -52,7 +59,7 @@ TEST(Arena, SizeSanity) {
5259
<< maximum_size;
5360

5461
// Insert a larger element, size should be the same
55-
ptr = static_cast<size_t*>(arena.allocate(requestedBlockSize / 2));
62+
ptr = static_cast<size_t*>(alloc(arena, requestedBlockSize / 2));
5663
allocatedItems.insert(ptr);
5764
EXPECT_GE(arena.totalSize(), minimum_size);
5865
EXPECT_LE(arena.totalSize(), maximum_size);
@@ -61,7 +68,7 @@ TEST(Arena, SizeSanity) {
6168

6269
// Insert 10 full block sizes to get 10 new blocks
6370
for (int i = 0; i < 10; i++) {
64-
ptr = static_cast<size_t*>(arena.allocate(requestedBlockSize));
71+
ptr = static_cast<size_t*>(alloc(arena, requestedBlockSize));
6572
allocatedItems.insert(ptr);
6673
}
6774
minimum_size += 10 * requestedBlockSize;
@@ -73,7 +80,7 @@ TEST(Arena, SizeSanity) {
7380
<< maximum_size;
7481

7582
// Insert something huge
76-
ptr = static_cast<size_t*>(arena.allocate(10 * requestedBlockSize));
83+
ptr = static_cast<size_t*>(alloc(arena, 10 * requestedBlockSize));
7784
allocatedItems.insert(ptr);
7885
minimum_size += 10 * requestedBlockSize;
7986
maximum_size +=
@@ -109,30 +116,30 @@ TEST(Arena, BytesUsedSanity) {
109116
EXPECT_EQ(arena.bytesUsed(), bytesUsed);
110117

111118
// Insert 2 small chunks
112-
arena.allocate(smallChunkSize);
113-
arena.allocate(smallChunkSize);
119+
alloc(arena, smallChunkSize);
120+
alloc(arena, smallChunkSize);
114121
bytesUsed += 2 * smallChunkSize;
115122
EXPECT_EQ(arena.bytesUsed(), bytesUsed);
116123
EXPECT_GE(arena.totalSize(), blockSize);
117124
EXPECT_LE(arena.totalSize(), 2 * blockSize);
118125

119126
// Insert big chunk, should still fit in one block
120-
arena.allocate(bigChunkSize);
127+
alloc(arena, bigChunkSize);
121128
bytesUsed += bigChunkSize;
122129
EXPECT_EQ(arena.bytesUsed(), bytesUsed);
123130
EXPECT_GE(arena.totalSize(), blockSize);
124131
EXPECT_LE(arena.totalSize(), 2 * blockSize);
125132

126133
// Insert big chunk once more, should trigger new block allocation
127-
arena.allocate(bigChunkSize);
134+
alloc(arena, bigChunkSize);
128135
bytesUsed += bigChunkSize;
129136
EXPECT_EQ(arena.bytesUsed(), bytesUsed);
130137
EXPECT_GE(arena.totalSize(), 2 * blockSize);
131138
EXPECT_LE(arena.totalSize(), 3 * blockSize);
132139

133140
// Test that bytesUsed() accounts for alignment
134141
static const size_t tinyChunkSize = 7;
135-
arena.allocate(tinyChunkSize);
142+
alloc(arena, tinyChunkSize);
136143
EXPECT_GE(arena.bytesUsed(), bytesUsed + tinyChunkSize);
137144
size_t delta = arena.bytesUsed() - bytesUsed;
138145
EXPECT_EQ(delta & (delta - 1), 0);
@@ -195,7 +202,7 @@ TEST(Arena, FallbackSysArenaDoesFallbackToHeap) {
195202
SysArena arena0;
196203
FallBackIntAlloc f_no_init;
197204
FallBackIntAlloc f_do_init(arena0);
198-
arena0.allocate(1); // First allocation to prime the arena
205+
alloc(arena0, 1); // First allocation to prime the arena
199206

200207
std::vector<int, FallBackIntAlloc> vec_arg_empty__fallback;
201208
std::vector<int, FallBackIntAlloc> vec_arg_noinit_fallback(f_no_init);
@@ -239,19 +246,19 @@ TEST(Arena, SizeLimit) {
239246

240247
SysArena arena(requestedBlockSize, maxSize);
241248

242-
void* a = arena.allocate(sizeof(size_t));
249+
void* a = alloc(arena, sizeof(size_t));
243250
EXPECT_TRUE(a != nullptr);
244-
EXPECT_THROW(arena.allocate(maxSize + 1), std::bad_alloc);
251+
EXPECT_THROW(alloc(arena, maxSize + 1), std::bad_alloc);
245252
}
246253

247254
TEST(Arena, ExtremeSize) {
248255
static const size_t requestedBlockSize = sizeof(size_t);
249256

250257
SysArena arena(requestedBlockSize);
251258

252-
void* a = arena.allocate(sizeof(size_t));
259+
void* a = alloc(arena, sizeof(size_t));
253260
EXPECT_TRUE(a != nullptr);
254-
EXPECT_THROW(arena.allocate(SIZE_MAX - 2), std::bad_alloc);
261+
EXPECT_THROW(alloc(arena, SIZE_MAX - 2), std::bad_alloc);
255262
}
256263

257264
TEST(Arena, MaxAlign) {
@@ -263,19 +270,30 @@ TEST(Arena, MaxAlign) {
263270
SysArena arena(blockSize, SysArena::kNoSizeLimit, maxAlign);
264271

265272
for (int i = 0; i < 100; i++) {
266-
void* ptr = arena.allocate(Random::rand32(100));
273+
void* ptr = alloc(arena, Random::rand32(100));
267274
EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr) & (maxAlign - 1), 0);
268275
}
269276

270277
// Reusing blocks also respects alignment
271278
arena.clear();
272279
for (int i = 0; i < 100; i++) {
273-
void* ptr = arena.allocate(Random::rand32(100));
280+
void* ptr = alloc(arena, Random::rand32(100));
274281
EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr) & (maxAlign - 1), 0);
275282
}
276283
}
277284
}
278285

286+
// This used to cause heap corruption due to incorrect allocation size.
287+
TEST(Arena, AllocFullBlock) {
288+
static const size_t blockSize = 128;
289+
290+
for (const size_t maxAlign : {4, 8, 16, 32, 64}) {
291+
SCOPED_TRACE(maxAlign);
292+
SysArena arena(blockSize, SysArena::kNoSizeLimit, maxAlign);
293+
alloc(arena, blockSize);
294+
}
295+
}
296+
279297
TEST(Arena, Clear) {
280298
static const size_t blockSize = 1024;
281299
SysArena arena(blockSize);
@@ -288,7 +306,7 @@ TEST(Arena, Clear) {
288306

289307
std::vector<void*> addresses;
290308
for (auto s : sizes) {
291-
addresses.push_back(arena.allocate(s));
309+
addresses.push_back(alloc(arena, s));
292310
}
293311

294312
const size_t totalSize = arena.totalSize();
@@ -298,7 +316,7 @@ TEST(Arena, Clear) {
298316

299317
int j = 0;
300318
for (auto s : sizes) {
301-
auto addr = arena.allocate(s);
319+
auto addr = alloc(arena, s);
302320
if (s <= blockSize) {
303321
EXPECT_EQ(addr, addresses[j]);
304322
}
@@ -317,7 +335,7 @@ TEST(Arena, ClearAfterLarge) {
317335
constexpr size_t mult = 10;
318336
SysArena arena(blockSize);
319337
EXPECT_EQ(0, arena.bytesUsed());
320-
arena.allocate(blockSize * mult);
338+
alloc(arena, blockSize * mult);
321339
EXPECT_EQ(blockSize * mult, arena.bytesUsed());
322340
arena.clear();
323341
EXPECT_EQ(0, arena.bytesUsed());
@@ -330,8 +348,8 @@ TEST(Arena, Merge) {
330348
SysArena arena1(blockSize);
331349
SysArena arena2(blockSize);
332350

333-
arena1.allocate(16);
334-
arena2.allocate(32);
351+
alloc(arena1, 16);
352+
alloc(arena2, 32);
335353

336354
EXPECT_EQ(blockAllocSize + sizeof(SysArena), arena1.totalSize());
337355
EXPECT_EQ(blockAllocSize + sizeof(SysArena), arena2.totalSize());

0 commit comments

Comments
 (0)