forked from mirrors/linux
		
	mm: make minimum slab alignment a runtime property
When CONFIG_KASAN_HW_TAGS is enabled we currently increase the minimum slab alignment to 16. This happens even if MTE is not supported in hardware or disabled via kasan=off, which creates an unnecessary memory overhead in those cases. Eliminate this overhead by making the minimum slab alignment a runtime property and only aligning to 16 if KASAN is enabled at runtime. On a DragonBoard 845c (non-MTE hardware) with a kernel built with CONFIG_KASAN_HW_TAGS, waiting for quiescence after a full Android boot I see the following Slab measurements in /proc/meminfo (median of 3 reboots): Before: 169020 kB After: 167304 kB [akpm@linux-foundation.org: make slab alignment type `unsigned int' to avoid casting] Link: https://linux-review.googlesource.com/id/I752e725179b43b144153f4b6f584ceb646473ead Link: https://lkml.kernel.org/r/20220427195820.1716975-2-pcc@google.com Signed-off-by: Peter Collingbourne <pcc@google.com> Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: David Rientjes <rientjes@google.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Pekka Enberg <penberg@kernel.org> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Alexander Potapenko <glider@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Eric W. Biederman <ebiederm@xmission.com> Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									534aa1dc97
								
							
						
					
					
						commit
						d949a8155d
					
				
					 5 changed files with 40 additions and 17 deletions
				
			
		|  | @ -6,6 +6,7 @@ | |||
| #define __ASM_CACHE_H | ||||
| 
 | ||||
| #include <asm/cputype.h> | ||||
| #include <asm/mte-def.h> | ||||
| 
 | ||||
| #define CTR_L1IP_SHIFT		14 | ||||
| #define CTR_L1IP_MASK		3 | ||||
|  | @ -49,15 +50,21 @@ | |||
|  */ | ||||
| #define ARCH_DMA_MINALIGN	(128) | ||||
| 
 | ||||
| #ifdef CONFIG_KASAN_SW_TAGS | ||||
| #define ARCH_SLAB_MINALIGN	(1ULL << KASAN_SHADOW_SCALE_SHIFT) | ||||
| #elif defined(CONFIG_KASAN_HW_TAGS) | ||||
| #define ARCH_SLAB_MINALIGN	MTE_GRANULE_SIZE | ||||
| #endif | ||||
| 
 | ||||
| #ifndef __ASSEMBLY__ | ||||
| 
 | ||||
| #include <linux/bitops.h> | ||||
| #include <linux/kasan-enabled.h> | ||||
| 
 | ||||
| #ifdef CONFIG_KASAN_SW_TAGS | ||||
| #define ARCH_SLAB_MINALIGN	(1ULL << KASAN_SHADOW_SCALE_SHIFT) | ||||
| #elif defined(CONFIG_KASAN_HW_TAGS) | ||||
| static inline unsigned int arch_slab_minalign(void) | ||||
| { | ||||
| 	return kasan_hw_tags_enabled() ? MTE_GRANULE_SIZE : | ||||
| 					 __alignof__(unsigned long long); | ||||
| } | ||||
| #define arch_slab_minalign() arch_slab_minalign() | ||||
| #endif | ||||
| 
 | ||||
| #define ICACHEF_ALIASING	0 | ||||
| #define ICACHEF_VPIPT		1 | ||||
|  |  | |||
|  | @ -209,6 +209,18 @@ void kmem_dump_obj(void *object); | |||
| #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | ||||
| #endif | ||||
| 
 | ||||
| /*
 | ||||
|  * Arches can define this function if they want to decide the minimum slab | ||||
|  * alignment at runtime. The value returned by the function must be a power | ||||
|  * of two and >= ARCH_SLAB_MINALIGN. | ||||
|  */ | ||||
| #ifndef arch_slab_minalign | ||||
| static inline unsigned int arch_slab_minalign(void) | ||||
| { | ||||
| 	return ARCH_SLAB_MINALIGN; | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
| /*
 | ||||
|  * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned | ||||
|  * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN | ||||
|  |  | |||
|  | @ -3009,10 +3009,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
| 	objp += obj_offset(cachep); | ||||
| 	if (cachep->ctor && cachep->flags & SLAB_POISON) | ||||
| 		cachep->ctor(objp); | ||||
| 	if (ARCH_SLAB_MINALIGN && | ||||
| 	    ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) { | ||||
| 		pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n", | ||||
| 		       objp, (int)ARCH_SLAB_MINALIGN); | ||||
| 	if ((unsigned long)objp & (arch_slab_minalign() - 1)) { | ||||
| 		pr_err("0x%px: not aligned to arch_slab_minalign()=%u\n", objp, | ||||
| 		       arch_slab_minalign()); | ||||
| 	} | ||||
| 	return objp; | ||||
| } | ||||
|  |  | |||
|  | @ -154,8 +154,7 @@ static unsigned int calculate_alignment(slab_flags_t flags, | |||
| 		align = max(align, ralign); | ||||
| 	} | ||||
| 
 | ||||
| 	if (align < ARCH_SLAB_MINALIGN) | ||||
| 		align = ARCH_SLAB_MINALIGN; | ||||
| 	align = max(align, arch_slab_minalign()); | ||||
| 
 | ||||
| 	return ALIGN(align, sizeof(void *)); | ||||
| } | ||||
|  |  | |||
							
								
								
									
										16
									
								
								mm/slob.c
									
									
									
									
									
								
							
							
						
						
									
										16
									
								
								mm/slob.c
									
									
									
									
									
								
							|  | @ -478,9 +478,11 @@ static __always_inline void * | |||
| __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) | ||||
| { | ||||
| 	unsigned int *m; | ||||
| 	int minalign = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | ||||
| 	unsigned int minalign; | ||||
| 	void *ret; | ||||
| 
 | ||||
| 	minalign = max_t(unsigned int, ARCH_KMALLOC_MINALIGN, | ||||
| 			 arch_slab_minalign()); | ||||
| 	gfp &= gfp_allowed_mask; | ||||
| 
 | ||||
| 	might_alloc(gfp); | ||||
|  | @ -493,7 +495,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) | |||
| 		 * kmalloc()'d objects. | ||||
| 		 */ | ||||
| 		if (is_power_of_2(size)) | ||||
| 			align = max(minalign, (int) size); | ||||
| 			align = max_t(unsigned int, minalign, size); | ||||
| 
 | ||||
| 		if (!size) | ||||
| 			return ZERO_SIZE_PTR; | ||||
|  | @ -555,8 +557,11 @@ void kfree(const void *block) | |||
| 
 | ||||
| 	sp = virt_to_folio(block); | ||||
| 	if (folio_test_slab(sp)) { | ||||
| 		int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | ||||
| 		unsigned int align = max_t(unsigned int, | ||||
| 					   ARCH_KMALLOC_MINALIGN, | ||||
| 					   arch_slab_minalign()); | ||||
| 		unsigned int *m = (unsigned int *)(block - align); | ||||
| 
 | ||||
| 		slob_free(m, *m + align); | ||||
| 	} else { | ||||
| 		unsigned int order = folio_order(sp); | ||||
|  | @ -573,7 +578,7 @@ EXPORT_SYMBOL(kfree); | |||
| size_t __ksize(const void *block) | ||||
| { | ||||
| 	struct folio *folio; | ||||
| 	int align; | ||||
| 	unsigned int align; | ||||
| 	unsigned int *m; | ||||
| 
 | ||||
| 	BUG_ON(!block); | ||||
|  | @ -584,7 +589,8 @@ size_t __ksize(const void *block) | |||
| 	if (unlikely(!folio_test_slab(folio))) | ||||
| 		return folio_size(folio); | ||||
| 
 | ||||
| 	align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | ||||
| 	align = max_t(unsigned int, ARCH_KMALLOC_MINALIGN, | ||||
| 		      arch_slab_minalign()); | ||||
| 	m = (unsigned int *)(block - align); | ||||
| 	return SLOB_UNITS(*m) * SLOB_UNIT; | ||||
| } | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Peter Collingbourne
						Peter Collingbourne