Skip to content

Commit cd11016

Browse files
ramosian-glidertorvalds
authored andcommitted
mm, kasan: stackdepot implementation. Enable stackdepot for SLAB
Implement the stack depot and provide CONFIG_STACKDEPOT. Stack depot will allow KASAN store allocation/deallocation stack traces for memory chunks. The stack traces are stored in a hash table and referenced by handles which reside in the kasan_alloc_meta and kasan_free_meta structures in the allocated memory chunks. IRQ stack traces are cut below the IRQ entry point to avoid unnecessary duplication. Right now stackdepot support is only enabled in SLAB allocator. Once KASAN features in SLAB are on par with those in SLUB we can switch SLUB to stackdepot as well, thus removing the dependency on SLUB stack bookkeeping, which wastes a lot of memory. This patch is based on the "mm: kasan: stack depots" patch originally prepared by Dmitry Chernenkov. Joonsoo has said that he plans to reuse the stackdepot code for the mm/page_owner.c debugging facility. [[email protected]: s/depot_stack_handle/depot_stack_handle_t] [[email protected]: comment style fixes] Signed-off-by: Alexander Potapenko <[email protected]> Signed-off-by: Andrey Ryabinin <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: Pekka Enberg <[email protected]> Cc: David Rientjes <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: Andrey Konovalov <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: Steven Rostedt <[email protected]> Cc: Konstantin Serebryany <[email protected]> Cc: Dmitry Chernenkov <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent be7635e commit cd11016

File tree

9 files changed

+391
-12
lines changed

9 files changed

+391
-12
lines changed

arch/x86/kernel/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ endif
1919
KASAN_SANITIZE_head$(BITS).o := n
2020
KASAN_SANITIZE_dumpstack.o := n
2121
KASAN_SANITIZE_dumpstack_$(BITS).o := n
22+
KASAN_SANITIZE_stacktrace.o := n
2223

2324
OBJECT_FILES_NON_STANDARD_head_$(BITS).o := y
2425
OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y

include/linux/stackdepot.h

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
/*
2+
* A generic stack depot implementation
3+
*
4+
* Author: Alexander Potapenko <[email protected]>
5+
* Copyright (C) 2016 Google, Inc.
6+
*
7+
* Based on code by Dmitry Chernenkov.
8+
*
9+
* This program is free software; you can redistribute it and/or modify
10+
* it under the terms of the GNU General Public License as published by
11+
* the Free Software Foundation; either version 2 of the License, or
12+
* (at your option) any later version.
13+
*
14+
* This program is distributed in the hope that it will be useful,
15+
* but WITHOUT ANY WARRANTY; without even the implied warranty of
16+
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17+
* GNU General Public License for more details.
18+
*
19+
*/
20+
21+
#ifndef _LINUX_STACKDEPOT_H
22+
#define _LINUX_STACKDEPOT_H
23+
24+
typedef u32 depot_stack_handle_t;
25+
26+
struct stack_trace;
27+
28+
depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags);
29+
30+
void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace);
31+
32+
#endif

lib/Kconfig

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -536,4 +536,8 @@ config ARCH_HAS_PMEM_API
536536
config ARCH_HAS_MMIO_FLUSH
537537
bool
538538

539+
config STACKDEPOT
540+
bool
541+
select STACKTRACE
542+
539543
endmenu

lib/Kconfig.kasan

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ config KASAN
77
bool "KASan: runtime memory debugger"
88
depends on SLUB_DEBUG || (SLAB && !DEBUG_SLAB)
99
select CONSTRUCTORS
10+
select STACKDEPOT if SLAB
1011
help
1112
Enables kernel address sanitizer - runtime memory debugger,
1213
designed to find out-of-bounds accesses and use-after-free bugs.

lib/Makefile

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -181,6 +181,9 @@ obj-$(CONFIG_SG_SPLIT) += sg_split.o
181181
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
182182
obj-$(CONFIG_IRQ_POLL) += irq_poll.o
183183

184+
obj-$(CONFIG_STACKDEPOT) += stackdepot.o
185+
KASAN_SANITIZE_stackdepot.o := n
186+
184187
libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
185188
fdt_empty_tree.o
186189
$(foreach file, $(libfdt_files), \

lib/stackdepot.c

Lines changed: 284 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,284 @@
1+
/*
2+
* Generic stack depot for storing stack traces.
3+
*
4+
* Some debugging tools need to save stack traces of certain events which can
5+
* be later presented to the user. For example, KASAN needs to safe alloc and
6+
* free stacks for each object, but storing two stack traces per object
7+
* requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
8+
* that).
9+
*
10+
* Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
11+
* and free stacks repeat a lot, we save about 100x space.
12+
* Stacks are never removed from depot, so we store them contiguously one after
13+
* another in a contiguos memory allocation.
14+
*
15+
* Author: Alexander Potapenko <[email protected]>
16+
* Copyright (C) 2016 Google, Inc.
17+
*
18+
* Based on code by Dmitry Chernenkov.
19+
*
20+
* This program is free software; you can redistribute it and/or
21+
* modify it under the terms of the GNU General Public License
22+
* version 2 as published by the Free Software Foundation.
23+
*
24+
* This program is distributed in the hope that it will be useful, but
25+
* WITHOUT ANY WARRANTY; without even the implied warranty of
26+
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27+
* General Public License for more details.
28+
*
29+
*/
30+
31+
#include <linux/gfp.h>
32+
#include <linux/jhash.h>
33+
#include <linux/kernel.h>
34+
#include <linux/mm.h>
35+
#include <linux/percpu.h>
36+
#include <linux/printk.h>
37+
#include <linux/slab.h>
38+
#include <linux/stacktrace.h>
39+
#include <linux/stackdepot.h>
40+
#include <linux/string.h>
41+
#include <linux/types.h>
42+
43+
#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
44+
45+
#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
46+
#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
47+
#define STACK_ALLOC_ALIGN 4
48+
#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
49+
STACK_ALLOC_ALIGN)
50+
#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - STACK_ALLOC_OFFSET_BITS)
51+
#define STACK_ALLOC_SLABS_CAP 1024
52+
#define STACK_ALLOC_MAX_SLABS \
53+
(((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
54+
(1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
55+
56+
/* The compact structure to store the reference to stacks. */
57+
union handle_parts {
58+
depot_stack_handle_t handle;
59+
struct {
60+
u32 slabindex : STACK_ALLOC_INDEX_BITS;
61+
u32 offset : STACK_ALLOC_OFFSET_BITS;
62+
};
63+
};
64+
65+
struct stack_record {
66+
struct stack_record *next; /* Link in the hashtable */
67+
u32 hash; /* Hash in the hastable */
68+
u32 size; /* Number of frames in the stack */
69+
union handle_parts handle;
70+
unsigned long entries[1]; /* Variable-sized array of entries. */
71+
};
72+
73+
static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
74+
75+
static int depot_index;
76+
static int next_slab_inited;
77+
static size_t depot_offset;
78+
static DEFINE_SPINLOCK(depot_lock);
79+
80+
static bool init_stack_slab(void **prealloc)
81+
{
82+
if (!*prealloc)
83+
return false;
84+
/*
85+
* This smp_load_acquire() pairs with smp_store_release() to
86+
* |next_slab_inited| below and in depot_alloc_stack().
87+
*/
88+
if (smp_load_acquire(&next_slab_inited))
89+
return true;
90+
if (stack_slabs[depot_index] == NULL) {
91+
stack_slabs[depot_index] = *prealloc;
92+
} else {
93+
stack_slabs[depot_index + 1] = *prealloc;
94+
/*
95+
* This smp_store_release pairs with smp_load_acquire() from
96+
* |next_slab_inited| above and in depot_save_stack().
97+
*/
98+
smp_store_release(&next_slab_inited, 1);
99+
}
100+
*prealloc = NULL;
101+
return true;
102+
}
103+
104+
/* Allocation of a new stack in raw storage */
105+
static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
106+
u32 hash, void **prealloc, gfp_t alloc_flags)
107+
{
108+
int required_size = offsetof(struct stack_record, entries) +
109+
sizeof(unsigned long) * size;
110+
struct stack_record *stack;
111+
112+
required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
113+
114+
if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
115+
if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
116+
WARN_ONCE(1, "Stack depot reached limit capacity");
117+
return NULL;
118+
}
119+
depot_index++;
120+
depot_offset = 0;
121+
/*
122+
* smp_store_release() here pairs with smp_load_acquire() from
123+
* |next_slab_inited| in depot_save_stack() and
124+
* init_stack_slab().
125+
*/
126+
if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
127+
smp_store_release(&next_slab_inited, 0);
128+
}
129+
init_stack_slab(prealloc);
130+
if (stack_slabs[depot_index] == NULL)
131+
return NULL;
132+
133+
stack = stack_slabs[depot_index] + depot_offset;
134+
135+
stack->hash = hash;
136+
stack->size = size;
137+
stack->handle.slabindex = depot_index;
138+
stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
139+
memcpy(stack->entries, entries, size * sizeof(unsigned long));
140+
depot_offset += required_size;
141+
142+
return stack;
143+
}
144+
145+
#define STACK_HASH_ORDER 20
146+
#define STACK_HASH_SIZE (1L << STACK_HASH_ORDER)
147+
#define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
148+
#define STACK_HASH_SEED 0x9747b28c
149+
150+
static struct stack_record *stack_table[STACK_HASH_SIZE] = {
151+
[0 ... STACK_HASH_SIZE - 1] = NULL
152+
};
153+
154+
/* Calculate hash for a stack */
155+
static inline u32 hash_stack(unsigned long *entries, unsigned int size)
156+
{
157+
return jhash2((u32 *)entries,
158+
size * sizeof(unsigned long) / sizeof(u32),
159+
STACK_HASH_SEED);
160+
}
161+
162+
/* Find a stack that is equal to the one stored in entries in the hash */
163+
static inline struct stack_record *find_stack(struct stack_record *bucket,
164+
unsigned long *entries, int size,
165+
u32 hash)
166+
{
167+
struct stack_record *found;
168+
169+
for (found = bucket; found; found = found->next) {
170+
if (found->hash == hash &&
171+
found->size == size &&
172+
!memcmp(entries, found->entries,
173+
size * sizeof(unsigned long))) {
174+
return found;
175+
}
176+
}
177+
return NULL;
178+
}
179+
180+
void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
181+
{
182+
union handle_parts parts = { .handle = handle };
183+
void *slab = stack_slabs[parts.slabindex];
184+
size_t offset = parts.offset << STACK_ALLOC_ALIGN;
185+
struct stack_record *stack = slab + offset;
186+
187+
trace->nr_entries = trace->max_entries = stack->size;
188+
trace->entries = stack->entries;
189+
trace->skip = 0;
190+
}
191+
192+
/**
193+
* depot_save_stack - save stack in a stack depot.
194+
* @trace - the stacktrace to save.
195+
* @alloc_flags - flags for allocating additional memory if required.
196+
*
197+
* Returns the handle of the stack struct stored in depot.
198+
*/
199+
depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
200+
gfp_t alloc_flags)
201+
{
202+
u32 hash;
203+
depot_stack_handle_t retval = 0;
204+
struct stack_record *found = NULL, **bucket;
205+
unsigned long flags;
206+
struct page *page = NULL;
207+
void *prealloc = NULL;
208+
209+
if (unlikely(trace->nr_entries == 0))
210+
goto fast_exit;
211+
212+
hash = hash_stack(trace->entries, trace->nr_entries);
213+
/* Bad luck, we won't store this stack. */
214+
if (hash == 0)
215+
goto exit;
216+
217+
bucket = &stack_table[hash & STACK_HASH_MASK];
218+
219+
/*
220+
* Fast path: look the stack trace up without locking.
221+
* The smp_load_acquire() here pairs with smp_store_release() to
222+
* |bucket| below.
223+
*/
224+
found = find_stack(smp_load_acquire(bucket), trace->entries,
225+
trace->nr_entries, hash);
226+
if (found)
227+
goto exit;
228+
229+
/*
230+
* Check if the current or the next stack slab need to be initialized.
231+
* If so, allocate the memory - we won't be able to do that under the
232+
* lock.
233+
*
234+
* The smp_load_acquire() here pairs with smp_store_release() to
235+
* |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
236+
*/
237+
if (unlikely(!smp_load_acquire(&next_slab_inited))) {
238+
/*
239+
* Zero out zone modifiers, as we don't have specific zone
240+
* requirements. Keep the flags related to allocation in atomic
241+
* contexts and I/O.
242+
*/
243+
alloc_flags &= ~GFP_ZONEMASK;
244+
alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
245+
page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
246+
if (page)
247+
prealloc = page_address(page);
248+
}
249+
250+
spin_lock_irqsave(&depot_lock, flags);
251+
252+
found = find_stack(*bucket, trace->entries, trace->nr_entries, hash);
253+
if (!found) {
254+
struct stack_record *new =
255+
depot_alloc_stack(trace->entries, trace->nr_entries,
256+
hash, &prealloc, alloc_flags);
257+
if (new) {
258+
new->next = *bucket;
259+
/*
260+
* This smp_store_release() pairs with
261+
* smp_load_acquire() from |bucket| above.
262+
*/
263+
smp_store_release(bucket, new);
264+
found = new;
265+
}
266+
} else if (prealloc) {
267+
/*
268+
* We didn't need to store this stack trace, but let's keep
269+
* the preallocated memory for the future.
270+
*/
271+
WARN_ON(!init_stack_slab(&prealloc));
272+
}
273+
274+
spin_unlock_irqrestore(&depot_lock, flags);
275+
exit:
276+
if (prealloc) {
277+
/* Nobody used this memory, ok to free it. */
278+
free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
279+
}
280+
if (found)
281+
retval = found->handle.handle;
282+
fast_exit:
283+
return retval;
284+
}

0 commit comments

Comments
 (0)