mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	s390/kasan: dynamic shadow mem allocation for modules
Move from modules area entire shadow memory preallocation to dynamic
allocation per module load.
This behaivior has been introduced for x86 with bebf56a1b: "This patch
also forces module_alloc() to return 8*PAGE_SIZE aligned address making
shadow memory handling ( kasan_module_alloc()/kasan_module_free() )
more simple. Such alignment guarantees that each shadow page backing
modules address space correspond to only one module_alloc() allocation"
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
			
			
This commit is contained in:
		
							parent
							
								
									0dac8f6bc3
								
							
						
					
					
						commit
						793213a82d
					
				
					 2 changed files with 14 additions and 12 deletions
				
			
		| 
						 | 
					@ -16,6 +16,7 @@
 | 
				
			||||||
#include <linux/fs.h>
 | 
					#include <linux/fs.h>
 | 
				
			||||||
#include <linux/string.h>
 | 
					#include <linux/string.h>
 | 
				
			||||||
#include <linux/kernel.h>
 | 
					#include <linux/kernel.h>
 | 
				
			||||||
 | 
					#include <linux/kasan.h>
 | 
				
			||||||
#include <linux/moduleloader.h>
 | 
					#include <linux/moduleloader.h>
 | 
				
			||||||
#include <linux/bug.h>
 | 
					#include <linux/bug.h>
 | 
				
			||||||
#include <asm/alternative.h>
 | 
					#include <asm/alternative.h>
 | 
				
			||||||
| 
						 | 
					@ -32,12 +33,18 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void *module_alloc(unsigned long size)
 | 
					void *module_alloc(unsigned long size)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						void *p;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (PAGE_ALIGN(size) > MODULES_LEN)
 | 
						if (PAGE_ALIGN(size) > MODULES_LEN)
 | 
				
			||||||
		return NULL;
 | 
							return NULL;
 | 
				
			||||||
	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
 | 
						p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
 | 
				
			||||||
				    GFP_KERNEL, PAGE_KERNEL_EXEC,
 | 
									 GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
 | 
				
			||||||
				    0, NUMA_NO_NODE,
 | 
									 __builtin_return_address(0));
 | 
				
			||||||
				    __builtin_return_address(0));
 | 
						if (p && (kasan_module_alloc(p, size) < 0)) {
 | 
				
			||||||
 | 
							vfree(p);
 | 
				
			||||||
 | 
							return NULL;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return p;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void module_arch_freeing_init(struct module *mod)
 | 
					void module_arch_freeing_init(struct module *mod)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -214,8 +214,6 @@ void __init kasan_early_init(void)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	memsize = min(max_physmem_end, KASAN_SHADOW_START);
 | 
						memsize = min(max_physmem_end, KASAN_SHADOW_START);
 | 
				
			||||||
	shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
 | 
						shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
 | 
				
			||||||
	if (IS_ENABLED(CONFIG_MODULES))
 | 
					 | 
				
			||||||
		shadow_alloc_size += MODULES_LEN >> KASAN_SHADOW_SCALE_SHIFT;
 | 
					 | 
				
			||||||
	pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
 | 
						pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
 | 
				
			||||||
	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
 | 
						if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
 | 
				
			||||||
		initrd_end =
 | 
							initrd_end =
 | 
				
			||||||
| 
						 | 
					@ -239,18 +237,15 @@ void __init kasan_early_init(void)
 | 
				
			||||||
	 * +- shadow end    -+	 |	mapping	  |
 | 
						 * +- shadow end    -+	 |	mapping	  |
 | 
				
			||||||
	 * | ... gap ...     |\  |    (untracked) |
 | 
						 * | ... gap ...     |\  |    (untracked) |
 | 
				
			||||||
	 * +- modules vaddr -+ \ +----------------+
 | 
						 * +- modules vaddr -+ \ +----------------+
 | 
				
			||||||
	 * | 2Gb	     |	\| 256Mb	  |
 | 
						 * | 2Gb	     |	\|	unmapped  | allocated per module
 | 
				
			||||||
	 * +-----------------+	 +- shadow end ---+
 | 
						 * +-----------------+	 +- shadow end ---+
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	/* populate identity mapping */
 | 
						/* populate identity mapping */
 | 
				
			||||||
	kasan_early_vmemmap_populate(0, memsize, POPULATE_ONE2ONE);
 | 
						kasan_early_vmemmap_populate(0, memsize, POPULATE_ONE2ONE);
 | 
				
			||||||
	/* populate kasan shadow (for identity mapping / modules / zero page) */
 | 
						/* populate kasan shadow (for identity mapping and zero page mapping) */
 | 
				
			||||||
	kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
 | 
						kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
 | 
				
			||||||
	if (IS_ENABLED(CONFIG_MODULES)) {
 | 
						if (IS_ENABLED(CONFIG_MODULES))
 | 
				
			||||||
		untracked_mem_end = vmax - MODULES_LEN;
 | 
							untracked_mem_end = vmax - MODULES_LEN;
 | 
				
			||||||
		kasan_early_vmemmap_populate(__sha(untracked_mem_end),
 | 
					 | 
				
			||||||
					     __sha(vmax), POPULATE_MAP);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	kasan_early_vmemmap_populate(__sha(memsize), __sha(untracked_mem_end),
 | 
						kasan_early_vmemmap_populate(__sha(memsize), __sha(untracked_mem_end),
 | 
				
			||||||
				     POPULATE_ZERO_SHADOW);
 | 
									     POPULATE_ZERO_SHADOW);
 | 
				
			||||||
	kasan_set_pgd(early_pg_dir, asce_type);
 | 
						kasan_set_pgd(early_pg_dir, asce_type);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue