forked from mirrors/linux
		
	lib: stackdepot: add support to disable stack depot
Add a kernel parameter stack_depot_disable to disable stack depot. So that stack hash table doesn't consume any memory when stack depot is disabled. The use case is CONFIG_PAGE_OWNER without page_owner=on. Without this patch, stackdepot will consume the memory for the hashtable. By default, it's 8M which is never trivial. With this option, in CONFIG_PAGE_OWNER configured system, page_owner=off, stack_depot_disable in kernel command line, we could save the wasted memory for the hashtable. [akpm@linux-foundation.org: fix CONFIG_STACKDEPOT=n build] Link: https://lkml.kernel.org/r/1611749198-24316-2-git-send-email-vjitta@codeaurora.org Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org> Signed-off-by: Vijayanand Jitta <vjitta@codeaurora.org> Cc: Alexander Potapenko <glider@google.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Yogesh Lal <ylal@codeaurora.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									d262093656
								
							
						
					
					
						commit
						e1fdc40334
					
				
					 4 changed files with 45 additions and 4 deletions
				
			
		|  | @ -5182,6 +5182,12 @@ | |||
| 			growing up) the main stack are reserved for no other | ||||
| 			mapping. Default value is 256 pages. | ||||
| 
 | ||||
| 	stack_depot_disable= [KNL] | ||||
| 			Setting this to true through kernel command line will | ||||
| 			disable the stack depot thereby saving the static memory | ||||
| 			consumed by the stack hash table. By default this is set | ||||
| 			to false. | ||||
| 
 | ||||
| 	stacktrace	[FTRACE] | ||||
| 			Enabled the stack tracer on boot up. | ||||
| 
 | ||||
|  |  | |||
|  | @ -21,4 +21,13 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle, | |||
| 
 | ||||
| unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries); | ||||
| 
 | ||||
| #ifdef CONFIG_STACKDEPOT | ||||
| int stack_depot_init(void); | ||||
| #else | ||||
| static inline int stack_depot_init(void) | ||||
| { | ||||
| 	return 0; | ||||
| } | ||||
| #endif	/* CONFIG_STACKDEPOT */ | ||||
| 
 | ||||
| #endif | ||||
|  |  | |||
|  | @ -97,6 +97,7 @@ | |||
| #include <linux/mem_encrypt.h> | ||||
| #include <linux/kcsan.h> | ||||
| #include <linux/init_syscalls.h> | ||||
| #include <linux/stackdepot.h> | ||||
| 
 | ||||
| #include <asm/io.h> | ||||
| #include <asm/bugs.h> | ||||
|  | @ -827,6 +828,7 @@ static void __init mm_init(void) | |||
| 	init_mem_debugging_and_hardening(); | ||||
| 	kfence_alloc_pool(); | ||||
| 	report_meminit(); | ||||
| 	stack_depot_init(); | ||||
| 	mem_init(); | ||||
| 	/* page_owner must be initialized after buddy is ready */ | ||||
| 	page_ext_init_flatmem_late(); | ||||
|  |  | |||
|  | @ -31,6 +31,7 @@ | |||
| #include <linux/stackdepot.h> | ||||
| #include <linux/string.h> | ||||
| #include <linux/types.h> | ||||
| #include <linux/memblock.h> | ||||
| 
 | ||||
| #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8) | ||||
| 
 | ||||
|  | @ -145,9 +146,32 @@ static struct stack_record *depot_alloc_stack(unsigned long *entries, int size, | |||
| #define STACK_HASH_MASK (STACK_HASH_SIZE - 1) | ||||
| #define STACK_HASH_SEED 0x9747b28c | ||||
| 
 | ||||
| static struct stack_record *stack_table[STACK_HASH_SIZE] = { | ||||
| 	[0 ...	STACK_HASH_SIZE - 1] = NULL | ||||
| }; | ||||
| static bool stack_depot_disable; | ||||
| static struct stack_record **stack_table; | ||||
| 
 | ||||
| static int __init is_stack_depot_disabled(char *str) | ||||
| { | ||||
| 	kstrtobool(str, &stack_depot_disable); | ||||
| 	if (stack_depot_disable) { | ||||
| 		pr_info("Stack Depot is disabled\n"); | ||||
| 		stack_table = NULL; | ||||
| 	} | ||||
| 	return 0; | ||||
| } | ||||
| early_param("stack_depot_disable", is_stack_depot_disabled); | ||||
| 
 | ||||
| int __init stack_depot_init(void) | ||||
| { | ||||
| 	if (!stack_depot_disable) { | ||||
| 		size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *)); | ||||
| 		int i; | ||||
| 
 | ||||
| 		stack_table = memblock_alloc(size, size); | ||||
| 		for (i = 0; i < STACK_HASH_SIZE;  i++) | ||||
| 			stack_table[i] = NULL; | ||||
| 	} | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| /* Calculate hash for a stack */ | ||||
| static inline u32 hash_stack(unsigned long *entries, unsigned int size) | ||||
|  | @ -241,7 +265,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries, | |||
| 	unsigned long flags; | ||||
| 	u32 hash; | ||||
| 
 | ||||
| 	if (unlikely(nr_entries == 0)) | ||||
| 	if (unlikely(nr_entries == 0) || stack_depot_disable) | ||||
| 		goto fast_exit; | ||||
| 
 | ||||
| 	hash = hash_stack(entries, nr_entries); | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Vijayanand Jitta
						Vijayanand Jitta