mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	[PATCH] mm: introduce remap_vmalloc_range()
Add remap_vmalloc_range, vmalloc_user, and vmalloc_32_user so that drivers can have a nice interface for remapping vmalloc memory. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
		
							parent
							
								
									762834e8bf
								
							
						
					
					
						commit
						833423143c
					
				
					 2 changed files with 128 additions and 2 deletions
				
			
		|  | @ -4,10 +4,13 @@ | ||||||
| #include <linux/spinlock.h> | #include <linux/spinlock.h> | ||||||
| #include <asm/page.h>		/* pgprot_t */ | #include <asm/page.h>		/* pgprot_t */ | ||||||
| 
 | 
 | ||||||
|  | struct vm_area_struct; | ||||||
|  | 
 | ||||||
| /* bits in vm_struct->flags */ | /* bits in vm_struct->flags */ | ||||||
| #define VM_IOREMAP	0x00000001	/* ioremap() and friends */ | #define VM_IOREMAP	0x00000001	/* ioremap() and friends */ | ||||||
| #define VM_ALLOC	0x00000002	/* vmalloc() */ | #define VM_ALLOC	0x00000002	/* vmalloc() */ | ||||||
| #define VM_MAP		0x00000004	/* vmap()ed pages */ | #define VM_MAP		0x00000004	/* vmap()ed pages */ | ||||||
|  | #define VM_USERMAP	0x00000008	/* suitable for remap_vmalloc_range */ | ||||||
| /* bits [20..32] reserved for arch specific ioremap internals */ | /* bits [20..32] reserved for arch specific ioremap internals */ | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  | @ -32,9 +35,11 @@ struct vm_struct { | ||||||
|  *	Highlevel APIs for driver use |  *	Highlevel APIs for driver use | ||||||
|  */ |  */ | ||||||
| extern void *vmalloc(unsigned long size); | extern void *vmalloc(unsigned long size); | ||||||
|  | extern void *vmalloc_user(unsigned long size); | ||||||
| extern void *vmalloc_node(unsigned long size, int node); | extern void *vmalloc_node(unsigned long size, int node); | ||||||
| extern void *vmalloc_exec(unsigned long size); | extern void *vmalloc_exec(unsigned long size); | ||||||
| extern void *vmalloc_32(unsigned long size); | extern void *vmalloc_32(unsigned long size); | ||||||
|  | extern void *vmalloc_32_user(unsigned long size); | ||||||
| extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); | extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); | ||||||
| extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, | extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, | ||||||
| 				pgprot_t prot); | 				pgprot_t prot); | ||||||
|  | @ -45,6 +50,9 @@ extern void vfree(void *addr); | ||||||
| extern void *vmap(struct page **pages, unsigned int count, | extern void *vmap(struct page **pages, unsigned int count, | ||||||
| 			unsigned long flags, pgprot_t prot); | 			unsigned long flags, pgprot_t prot); | ||||||
| extern void vunmap(void *addr); | extern void vunmap(void *addr); | ||||||
|  | 
 | ||||||
|  | extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, | ||||||
|  | 							unsigned long pgoff); | ||||||
|   |   | ||||||
| /*
 | /*
 | ||||||
|  *	Lowlevel-APIs (not for driver use!) |  *	Lowlevel-APIs (not for driver use!) | ||||||
|  |  | ||||||
							
								
								
									
										122
									
								
								mm/vmalloc.c
									
									
									
									
									
								
							
							
						
						
									
										122
									
								
								mm/vmalloc.c
									
									
									
									
									
								
							|  | @ -256,6 +256,19 @@ struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int | ||||||
| 	return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node); | 	return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | /* Caller must hold vmlist_lock */ | ||||||
|  | static struct vm_struct *__find_vm_area(void *addr) | ||||||
|  | { | ||||||
|  | 	struct vm_struct *tmp; | ||||||
|  | 
 | ||||||
|  | 	for (tmp = vmlist; tmp != NULL; tmp = tmp->next) { | ||||||
|  | 		 if (tmp->addr == addr) | ||||||
|  | 			break; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return tmp; | ||||||
|  | } | ||||||
|  | 
 | ||||||
| /* Caller must hold vmlist_lock */ | /* Caller must hold vmlist_lock */ | ||||||
| struct vm_struct *__remove_vm_area(void *addr) | struct vm_struct *__remove_vm_area(void *addr) | ||||||
| { | { | ||||||
|  | @ -498,10 +511,32 @@ EXPORT_SYMBOL(__vmalloc); | ||||||
|  */ |  */ | ||||||
| void *vmalloc(unsigned long size) | void *vmalloc(unsigned long size) | ||||||
| { | { | ||||||
|        return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); | 	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(vmalloc); | EXPORT_SYMBOL(vmalloc); | ||||||
| 
 | 
 | ||||||
|  | /**
 | ||||||
|  |  *	vmalloc_user  -  allocate virtually contiguous memory which has | ||||||
|  |  *			   been zeroed so it can be mapped to userspace without | ||||||
|  |  *			   leaking data. | ||||||
|  |  * | ||||||
|  |  *	@size:		allocation size | ||||||
|  |  */ | ||||||
|  | void *vmalloc_user(unsigned long size) | ||||||
|  | { | ||||||
|  | 	struct vm_struct *area; | ||||||
|  | 	void *ret; | ||||||
|  | 
 | ||||||
|  | 	ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); | ||||||
|  | 	write_lock(&vmlist_lock); | ||||||
|  | 	area = __find_vm_area(ret); | ||||||
|  | 	area->flags |= VM_USERMAP; | ||||||
|  | 	write_unlock(&vmlist_lock); | ||||||
|  | 
 | ||||||
|  | 	return ret; | ||||||
|  | } | ||||||
|  | EXPORT_SYMBOL(vmalloc_user); | ||||||
|  | 
 | ||||||
| /**
 | /**
 | ||||||
|  *	vmalloc_node  -  allocate memory on a specific node |  *	vmalloc_node  -  allocate memory on a specific node | ||||||
|  * |  * | ||||||
|  | @ -516,7 +551,7 @@ EXPORT_SYMBOL(vmalloc); | ||||||
|  */ |  */ | ||||||
| void *vmalloc_node(unsigned long size, int node) | void *vmalloc_node(unsigned long size, int node) | ||||||
| { | { | ||||||
|        return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node); | 	return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node); | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(vmalloc_node); | EXPORT_SYMBOL(vmalloc_node); | ||||||
| 
 | 
 | ||||||
|  | @ -556,6 +591,28 @@ void *vmalloc_32(unsigned long size) | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(vmalloc_32); | EXPORT_SYMBOL(vmalloc_32); | ||||||
| 
 | 
 | ||||||
|  | /**
 | ||||||
|  |  *	vmalloc_32_user  -  allocate virtually contiguous memory (32bit | ||||||
|  |  *			      addressable) which is zeroed so it can be | ||||||
|  |  *			      mapped to userspace without leaking data. | ||||||
|  |  * | ||||||
|  |  *	@size:		allocation size | ||||||
|  |  */ | ||||||
|  | void *vmalloc_32_user(unsigned long size) | ||||||
|  | { | ||||||
|  | 	struct vm_struct *area; | ||||||
|  | 	void *ret; | ||||||
|  | 
 | ||||||
|  | 	ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); | ||||||
|  | 	write_lock(&vmlist_lock); | ||||||
|  | 	area = __find_vm_area(ret); | ||||||
|  | 	area->flags |= VM_USERMAP; | ||||||
|  | 	write_unlock(&vmlist_lock); | ||||||
|  | 
 | ||||||
|  | 	return ret; | ||||||
|  | } | ||||||
|  | EXPORT_SYMBOL(vmalloc_32_user); | ||||||
|  | 
 | ||||||
| long vread(char *buf, char *addr, unsigned long count) | long vread(char *buf, char *addr, unsigned long count) | ||||||
| { | { | ||||||
| 	struct vm_struct *tmp; | 	struct vm_struct *tmp; | ||||||
|  | @ -630,3 +687,64 @@ long vwrite(char *buf, char *addr, unsigned long count) | ||||||
| 	read_unlock(&vmlist_lock); | 	read_unlock(&vmlist_lock); | ||||||
| 	return buf - buf_start; | 	return buf - buf_start; | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  *	remap_vmalloc_range  -  map vmalloc pages to userspace | ||||||
|  |  * | ||||||
|  |  *	@vma:		vma to cover (map full range of vma) | ||||||
|  |  *	@addr:		vmalloc memory | ||||||
|  |  *	@pgoff:		number of pages into addr before first page to map | ||||||
|  |  *	@returns:	0 for success, -Exxx on failure | ||||||
|  |  * | ||||||
|  |  *	This function checks that addr is a valid vmalloc'ed area, and | ||||||
|  |  *	that it is big enough to cover the vma. Will return failure if | ||||||
|  |  *	that criteria isn't met. | ||||||
|  |  * | ||||||
|  |  *	Similar to remap_pfn_range (see mm/memory.c) | ||||||
|  |  */ | ||||||
|  | int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, | ||||||
|  | 						unsigned long pgoff) | ||||||
|  | { | ||||||
|  | 	struct vm_struct *area; | ||||||
|  | 	unsigned long uaddr = vma->vm_start; | ||||||
|  | 	unsigned long usize = vma->vm_end - vma->vm_start; | ||||||
|  | 	int ret; | ||||||
|  | 
 | ||||||
|  | 	if ((PAGE_SIZE-1) & (unsigned long)addr) | ||||||
|  | 		return -EINVAL; | ||||||
|  | 
 | ||||||
|  | 	read_lock(&vmlist_lock); | ||||||
|  | 	area = __find_vm_area(addr); | ||||||
|  | 	if (!area) | ||||||
|  | 		goto out_einval_locked; | ||||||
|  | 
 | ||||||
|  | 	if (!(area->flags & VM_USERMAP)) | ||||||
|  | 		goto out_einval_locked; | ||||||
|  | 
 | ||||||
|  | 	if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) | ||||||
|  | 		goto out_einval_locked; | ||||||
|  | 	read_unlock(&vmlist_lock); | ||||||
|  | 
 | ||||||
|  | 	addr += pgoff << PAGE_SHIFT; | ||||||
|  | 	do { | ||||||
|  | 		struct page *page = vmalloc_to_page(addr); | ||||||
|  | 		ret = vm_insert_page(vma, uaddr, page); | ||||||
|  | 		if (ret) | ||||||
|  | 			return ret; | ||||||
|  | 
 | ||||||
|  | 		uaddr += PAGE_SIZE; | ||||||
|  | 		addr += PAGE_SIZE; | ||||||
|  | 		usize -= PAGE_SIZE; | ||||||
|  | 	} while (usize > 0); | ||||||
|  | 
 | ||||||
|  | 	/* Prevent "things" like memory migration? VM_flags need a cleanup... */ | ||||||
|  | 	vma->vm_flags |= VM_RESERVED; | ||||||
|  | 
 | ||||||
|  | 	return ret; | ||||||
|  | 
 | ||||||
|  | out_einval_locked: | ||||||
|  | 	read_unlock(&vmlist_lock); | ||||||
|  | 	return -EINVAL; | ||||||
|  | } | ||||||
|  | EXPORT_SYMBOL(remap_vmalloc_range); | ||||||
|  | 
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Nick Piggin
						Nick Piggin