forked from mirrors/linux
		
	mm: add a vmap_pfn function
Add a proper helper to remap PFNs into kernel virtual space so that drivers don't have to abuse alloc_vm_area and open coded PTE manipulation for it. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Juergen Gross <jgross@suse.com> Cc: Matthew Auld <matthew.auld@intel.com> Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Stefano Stabellini <sstabellini@kernel.org> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Uladzislau Rezki (Sony) <urezki@gmail.com> Link: https://lkml.kernel.org/r/20201002122204.1534411-4-hch@lst.de Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									b944afc9d6
								
							
						
					
					
						commit
						3e9a9e256b
					
				
					 3 changed files with 49 additions and 0 deletions
				
			
		|  | @ -122,6 +122,7 @@ extern void vfree_atomic(const void *addr); | ||||||
| 
 | 
 | ||||||
| extern void *vmap(struct page **pages, unsigned int count, | extern void *vmap(struct page **pages, unsigned int count, | ||||||
| 			unsigned long flags, pgprot_t prot); | 			unsigned long flags, pgprot_t prot); | ||||||
|  | void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot); | ||||||
| extern void vunmap(const void *addr); | extern void vunmap(const void *addr); | ||||||
| 
 | 
 | ||||||
| extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, | extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, | ||||||
|  |  | ||||||
|  | @ -816,6 +816,9 @@ config DEVICE_PRIVATE | ||||||
| 	  memory; i.e., memory that is only accessible from the device (or | 	  memory; i.e., memory that is only accessible from the device (or | ||||||
| 	  group of devices). You likely also want to select HMM_MIRROR. | 	  group of devices). You likely also want to select HMM_MIRROR. | ||||||
| 
 | 
 | ||||||
|  | config VMAP_PFN | ||||||
|  | 	bool | ||||||
|  | 
 | ||||||
| config FRAME_VECTOR | config FRAME_VECTOR | ||||||
| 	bool | 	bool | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
							
								
								
									
										45
									
								
								mm/vmalloc.c
									
									
									
									
									
								
							
							
						
						
									
										45
									
								
								mm/vmalloc.c
									
									
									
									
									
								
							|  | @ -2413,6 +2413,51 @@ void *vmap(struct page **pages, unsigned int count, | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(vmap); | EXPORT_SYMBOL(vmap); | ||||||
| 
 | 
 | ||||||
|  | #ifdef CONFIG_VMAP_PFN | ||||||
|  | struct vmap_pfn_data { | ||||||
|  | 	unsigned long	*pfns; | ||||||
|  | 	pgprot_t	prot; | ||||||
|  | 	unsigned int	idx; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private) | ||||||
|  | { | ||||||
|  | 	struct vmap_pfn_data *data = private; | ||||||
|  | 
 | ||||||
|  | 	if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx]))) | ||||||
|  | 		return -EINVAL; | ||||||
|  | 	*pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot)); | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /**
 | ||||||
|  |  * vmap_pfn - map an array of PFNs into virtually contiguous space | ||||||
|  |  * @pfns: array of PFNs | ||||||
|  |  * @count: number of pages to map | ||||||
|  |  * @prot: page protection for the mapping | ||||||
|  |  * | ||||||
|  |  * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns | ||||||
|  |  * the start address of the mapping. | ||||||
|  |  */ | ||||||
|  | void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) | ||||||
|  | { | ||||||
|  | 	struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) }; | ||||||
|  | 	struct vm_struct *area; | ||||||
|  | 
 | ||||||
|  | 	area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, | ||||||
|  | 			__builtin_return_address(0)); | ||||||
|  | 	if (!area) | ||||||
|  | 		return NULL; | ||||||
|  | 	if (apply_to_page_range(&init_mm, (unsigned long)area->addr, | ||||||
|  | 			count * PAGE_SIZE, vmap_pfn_apply, &data)) { | ||||||
|  | 		free_vm_area(area); | ||||||
|  | 		return NULL; | ||||||
|  | 	} | ||||||
|  | 	return area->addr; | ||||||
|  | } | ||||||
|  | EXPORT_SYMBOL_GPL(vmap_pfn); | ||||||
|  | #endif /* CONFIG_VMAP_PFN */ | ||||||
|  | 
 | ||||||
| static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, | static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, | ||||||
| 				 pgprot_t prot, int node) | 				 pgprot_t prot, int node) | ||||||
| { | { | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Christoph Hellwig
						Christoph Hellwig