mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm: unify module_alloc code for vmalloc
Four architectures (arm, mips, sparc, x86) use __vmalloc_area() for module_init(). Much of the code is duplicated and can be generalized in a globally accessible function, __vmalloc_node_range(). __vmalloc_node() now calls into __vmalloc_node_range() with a range of [VMALLOC_START, VMALLOC_END) for functionally equivalent behavior. Each architecture may then use __vmalloc_node_range() directly to remove the duplication of code. Signed-off-by: David Rientjes <rientjes@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									ec3f64fc9c
								
							
						
					
					
						commit
						d0a21265df
					
				
					 6 changed files with 53 additions and 75 deletions
				
			
		| 
						 | 
				
			
			@ -38,17 +38,9 @@
 | 
			
		|||
#ifdef CONFIG_MMU
 | 
			
		||||
void *module_alloc(unsigned long size)
 | 
			
		||||
{
 | 
			
		||||
	struct vm_struct *area;
 | 
			
		||||
 | 
			
		||||
	size = PAGE_ALIGN(size);
 | 
			
		||||
	if (!size)
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
 | 
			
		||||
	if (!area)
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC);
 | 
			
		||||
	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
 | 
			
		||||
				GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
 | 
			
		||||
				__builtin_return_address(0));
 | 
			
		||||
}
 | 
			
		||||
#else /* CONFIG_MMU */
 | 
			
		||||
void *module_alloc(unsigned long size)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -46,17 +46,9 @@ static DEFINE_SPINLOCK(dbe_lock);
 | 
			
		|||
void *module_alloc(unsigned long size)
 | 
			
		||||
{
 | 
			
		||||
#ifdef MODULE_START
 | 
			
		||||
	struct vm_struct *area;
 | 
			
		||||
 | 
			
		||||
	size = PAGE_ALIGN(size);
 | 
			
		||||
	if (!size)
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END);
 | 
			
		||||
	if (!area)
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
 | 
			
		||||
	return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
 | 
			
		||||
				GFP_KERNEL, PAGE_KERNEL, -1,
 | 
			
		||||
				__builtin_return_address(0));
 | 
			
		||||
#else
 | 
			
		||||
	if (size == 0)
 | 
			
		||||
		return NULL;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -23,17 +23,11 @@
 | 
			
		|||
 | 
			
		||||
static void *module_map(unsigned long size)
 | 
			
		||||
{
 | 
			
		||||
	struct vm_struct *area;
 | 
			
		||||
 | 
			
		||||
	size = PAGE_ALIGN(size);
 | 
			
		||||
	if (!size || size > MODULES_LEN)
 | 
			
		||||
	if (PAGE_ALIGN(size) > MODULES_LEN)
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
 | 
			
		||||
	if (!area)
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
 | 
			
		||||
	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
 | 
			
		||||
				GFP_KERNEL, PAGE_KERNEL, -1,
 | 
			
		||||
				__builtin_return_address(0));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static char *dot2underscore(char *name)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -37,20 +37,11 @@
 | 
			
		|||
 | 
			
		||||
void *module_alloc(unsigned long size)
 | 
			
		||||
{
 | 
			
		||||
	struct vm_struct *area;
 | 
			
		||||
 | 
			
		||||
	if (!size)
 | 
			
		||||
	if (PAGE_ALIGN(size) > MODULES_LEN)
 | 
			
		||||
		return NULL;
 | 
			
		||||
	size = PAGE_ALIGN(size);
 | 
			
		||||
	if (size > MODULES_LEN)
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
 | 
			
		||||
	if (!area)
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
 | 
			
		||||
					PAGE_KERNEL_EXEC);
 | 
			
		||||
	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
 | 
			
		||||
				GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
 | 
			
		||||
				-1, __builtin_return_address(0));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Free memory returned from module_alloc */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -59,8 +59,9 @@ extern void *vmalloc_exec(unsigned long size);
 | 
			
		|||
extern void *vmalloc_32(unsigned long size);
 | 
			
		||||
extern void *vmalloc_32_user(unsigned long size);
 | 
			
		||||
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
 | 
			
		||||
extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
 | 
			
		||||
				pgprot_t prot);
 | 
			
		||||
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
 | 
			
		||||
			unsigned long start, unsigned long end, gfp_t gfp_mask,
 | 
			
		||||
			pgprot_t prot, int node, void *caller);
 | 
			
		||||
extern void vfree(const void *addr);
 | 
			
		||||
 | 
			
		||||
extern void *vmap(struct page **pages, unsigned int count,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										64
									
								
								mm/vmalloc.c
									
									
									
									
									
								
							
							
						
						
									
										64
									
								
								mm/vmalloc.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -1530,17 +1530,47 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 | 
			
		|||
	return NULL;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
 | 
			
		||||
/**
 | 
			
		||||
 *	__vmalloc_node_range  -  allocate virtually contiguous memory
 | 
			
		||||
 *	@size:		allocation size
 | 
			
		||||
 *	@align:		desired alignment
 | 
			
		||||
 *	@start:		vm area range start
 | 
			
		||||
 *	@end:		vm area range end
 | 
			
		||||
 *	@gfp_mask:	flags for the page level allocator
 | 
			
		||||
 *	@prot:		protection mask for the allocated pages
 | 
			
		||||
 *	@node:		node to use for allocation or -1
 | 
			
		||||
 *	@caller:	caller's return address
 | 
			
		||||
 *
 | 
			
		||||
 *	Allocate enough pages to cover @size from the page level
 | 
			
		||||
 *	allocator with @gfp_mask flags.  Map them into contiguous
 | 
			
		||||
 *	kernel virtual space, using a pagetable protection of @prot.
 | 
			
		||||
 */
 | 
			
		||||
void *__vmalloc_node_range(unsigned long size, unsigned long align,
 | 
			
		||||
			unsigned long start, unsigned long end, gfp_t gfp_mask,
 | 
			
		||||
			pgprot_t prot, int node, void *caller)
 | 
			
		||||
{
 | 
			
		||||
	void *addr = __vmalloc_area_node(area, gfp_mask, prot, -1,
 | 
			
		||||
					 __builtin_return_address(0));
 | 
			
		||||
	struct vm_struct *area;
 | 
			
		||||
	void *addr;
 | 
			
		||||
	unsigned long real_size = size;
 | 
			
		||||
 | 
			
		||||
	size = PAGE_ALIGN(size);
 | 
			
		||||
	if (!size || (size >> PAGE_SHIFT) > totalram_pages)
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
 | 
			
		||||
				  gfp_mask, caller);
 | 
			
		||||
 | 
			
		||||
	if (!area)
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * A ref_count = 3 is needed because the vm_struct and vmap_area
 | 
			
		||||
	 * structures allocated in the __get_vm_area_node() function contain
 | 
			
		||||
	 * references to the virtual address of the vmalloc'ed block.
 | 
			
		||||
	 */
 | 
			
		||||
	kmemleak_alloc(addr, area->size - PAGE_SIZE, 3, gfp_mask);
 | 
			
		||||
	kmemleak_alloc(addr, real_size, 3, gfp_mask);
 | 
			
		||||
 | 
			
		||||
	return addr;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -1562,30 +1592,8 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
 | 
			
		|||
			    gfp_t gfp_mask, pgprot_t prot,
 | 
			
		||||
			    int node, void *caller)
 | 
			
		||||
{
 | 
			
		||||
	struct vm_struct *area;
 | 
			
		||||
	void *addr;
 | 
			
		||||
	unsigned long real_size = size;
 | 
			
		||||
 | 
			
		||||
	size = PAGE_ALIGN(size);
 | 
			
		||||
	if (!size || (size >> PAGE_SHIFT) > totalram_pages)
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
 | 
			
		||||
				  VMALLOC_END, node, gfp_mask, caller);
 | 
			
		||||
 | 
			
		||||
	if (!area)
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * A ref_count = 3 is needed because the vm_struct and vmap_area
 | 
			
		||||
	 * structures allocated in the __get_vm_area_node() function contain
 | 
			
		||||
	 * references to the virtual address of the vmalloc'ed block.
 | 
			
		||||
	 */
 | 
			
		||||
	kmemleak_alloc(addr, real_size, 3, gfp_mask);
 | 
			
		||||
 | 
			
		||||
	return addr;
 | 
			
		||||
	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
 | 
			
		||||
				gfp_mask, prot, node, caller);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue