Skip to content

Commit d949a81

Browse files
pccakpm00
authored andcommitted
mm: make minimum slab alignment a runtime property
When CONFIG_KASAN_HW_TAGS is enabled we currently increase the minimum slab alignment to 16. This happens even if MTE is not supported in hardware or disabled via kasan=off, which creates an unnecessary memory overhead in those cases. Eliminate this overhead by making the minimum slab alignment a runtime property and only aligning to 16 if KASAN is enabled at runtime. On a DragonBoard 845c (non-MTE hardware) with a kernel built with CONFIG_KASAN_HW_TAGS, waiting for quiescence after a full Android boot I see the following Slab measurements in /proc/meminfo (median of 3 reboots): Before: 169020 kB After: 167304 kB [[email protected]: make slab alignment type `unsigned int' to avoid casting] Link: https://linux-review.googlesource.com/id/I752e725179b43b144153f4b6f584ceb646473ead Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Peter Collingbourne <[email protected]> Reviewed-by: Andrey Konovalov <[email protected]> Reviewed-by: Hyeonggon Yoo <[email protected]> Tested-by: Hyeonggon Yoo <[email protected]> Acked-by: David Rientjes <[email protected]> Reviewed-by: Catalin Marinas <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Cc: Pekka Enberg <[email protected]> Cc: Roman Gushchin <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: Herbert Xu <[email protected]> Cc: Andrey Ryabinin <[email protected]> Cc: Alexander Potapenko <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: Eric W. Biederman <[email protected]> Cc: Kees Cook <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 534aa1d commit d949a81

File tree

5 files changed

+39
-16
lines changed

5 files changed

+39
-16
lines changed

arch/arm64/include/asm/cache.h

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
#define __ASM_CACHE_H
77

88
#include <asm/cputype.h>
9+
#include <asm/mte-def.h>
910

1011
#define CTR_L1IP_SHIFT 14
1112
#define CTR_L1IP_MASK 3
@@ -49,16 +50,22 @@
4950
*/
5051
#define ARCH_DMA_MINALIGN (128)
5152

53+
#ifndef __ASSEMBLY__
54+
55+
#include <linux/bitops.h>
56+
#include <linux/kasan-enabled.h>
57+
5258
#ifdef CONFIG_KASAN_SW_TAGS
5359
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
5460
#elif defined(CONFIG_KASAN_HW_TAGS)
55-
#define ARCH_SLAB_MINALIGN MTE_GRANULE_SIZE
61+
static inline unsigned int arch_slab_minalign(void)
62+
{
63+
return kasan_hw_tags_enabled() ? MTE_GRANULE_SIZE :
64+
__alignof__(unsigned long long);
65+
}
66+
#define arch_slab_minalign() arch_slab_minalign()
5667
#endif
5768

58-
#ifndef __ASSEMBLY__
59-
60-
#include <linux/bitops.h>
61-
6269
#define ICACHEF_ALIASING 0
6370
#define ICACHEF_VPIPT 1
6471
extern unsigned long __icache_flags;

include/linux/slab.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -209,6 +209,18 @@ void kmem_dump_obj(void *object);
209209
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
210210
#endif
211211

212+
/*
213+
* Arches can define this function if they want to decide the minimum slab
214+
* alignment at runtime. The value returned by the function must be a power
215+
* of two and >= ARCH_SLAB_MINALIGN.
216+
*/
217+
#ifndef arch_slab_minalign
218+
static inline unsigned int arch_slab_minalign(void)
219+
{
220+
return ARCH_SLAB_MINALIGN;
221+
}
222+
#endif
223+
212224
/*
213225
* kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
214226
* pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN

mm/slab.c

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3009,10 +3009,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
30093009
objp += obj_offset(cachep);
30103010
if (cachep->ctor && cachep->flags & SLAB_POISON)
30113011
cachep->ctor(objp);
3012-
if (ARCH_SLAB_MINALIGN &&
3013-
((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3014-
pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3015-
objp, (int)ARCH_SLAB_MINALIGN);
3012+
if ((unsigned long)objp & (arch_slab_minalign() - 1)) {
3013+
pr_err("0x%px: not aligned to arch_slab_minalign()=%u\n", objp,
3014+
arch_slab_minalign());
30163015
}
30173016
return objp;
30183017
}

mm/slab_common.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -154,8 +154,7 @@ static unsigned int calculate_alignment(slab_flags_t flags,
154154
align = max(align, ralign);
155155
}
156156

157-
if (align < ARCH_SLAB_MINALIGN)
158-
align = ARCH_SLAB_MINALIGN;
157+
align = max(align, arch_slab_minalign());
159158

160159
return ALIGN(align, sizeof(void *));
161160
}

mm/slob.c

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -478,9 +478,11 @@ static __always_inline void *
478478
__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
479479
{
480480
unsigned int *m;
481-
int minalign = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
481+
unsigned int minalign;
482482
void *ret;
483483

484+
minalign = max_t(unsigned int, ARCH_KMALLOC_MINALIGN,
485+
arch_slab_minalign());
484486
gfp &= gfp_allowed_mask;
485487

486488
might_alloc(gfp);
@@ -493,7 +495,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
493495
* kmalloc()'d objects.
494496
*/
495497
if (is_power_of_2(size))
496-
align = max(minalign, (int) size);
498+
align = max_t(unsigned int, minalign, size);
497499

498500
if (!size)
499501
return ZERO_SIZE_PTR;
@@ -555,8 +557,11 @@ void kfree(const void *block)
555557

556558
sp = virt_to_folio(block);
557559
if (folio_test_slab(sp)) {
558-
int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
560+
unsigned int align = max_t(unsigned int,
561+
ARCH_KMALLOC_MINALIGN,
562+
arch_slab_minalign());
559563
unsigned int *m = (unsigned int *)(block - align);
564+
560565
slob_free(m, *m + align);
561566
} else {
562567
unsigned int order = folio_order(sp);
@@ -573,7 +578,7 @@ EXPORT_SYMBOL(kfree);
573578
size_t __ksize(const void *block)
574579
{
575580
struct folio *folio;
576-
int align;
581+
unsigned int align;
577582
unsigned int *m;
578583

579584
BUG_ON(!block);
@@ -584,7 +589,8 @@ size_t __ksize(const void *block)
584589
if (unlikely(!folio_test_slab(folio)))
585590
return folio_size(folio);
586591

587-
align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
592+
align = max_t(unsigned int, ARCH_KMALLOC_MINALIGN,
593+
arch_slab_minalign());
588594
m = (unsigned int *)(block - align);
589595
return SLOB_UNITS(*m) * SLOB_UNIT;
590596
}

0 commit comments

Comments
 (0)