mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	Patch series "small ioremap cleanups". The first patch moves a little code around the vmalloc/ioremap boundary following a bigger move by Nick earlier. The second enforces non-executable mapping on ioremap just like we do for vmap. No driver currently uses executable mappings anyway, as they should. This patch (of 2): This keeps it together with the implementation, and to remove the vmap_range wrapper. Link: https://lkml.kernel.org/r/20210824091259.1324527-1-hch@lst.de Link: https://lkml.kernel.org/r/20210824091259.1324527-2-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Nicholas Piggin <npiggin@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
			
				
	
	
		
			49 lines
		
	
	
	
		
			1.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			49 lines
		
	
	
	
		
			1.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
// SPDX-License-Identifier: GPL-2.0
 | 
						|
/*
 | 
						|
 * Re-map IO memory to kernel address space so that we can access it.
 | 
						|
 * This is needed for high PCI addresses that aren't mapped in the
 | 
						|
 * 640k-1MB IO memory area on PC's
 | 
						|
 *
 | 
						|
 * (C) Copyright 1995 1996 Linus Torvalds
 | 
						|
 */
 | 
						|
#include <linux/vmalloc.h>
 | 
						|
#include <linux/mm.h>
 | 
						|
#include <linux/io.h>
 | 
						|
#include <linux/export.h>
 | 
						|
 | 
						|
void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
 | 
						|
{
 | 
						|
	unsigned long offset, vaddr;
 | 
						|
	phys_addr_t last_addr;
 | 
						|
	struct vm_struct *area;
 | 
						|
 | 
						|
	/* Disallow wrap-around or zero size */
 | 
						|
	last_addr = addr + size - 1;
 | 
						|
	if (!size || last_addr < addr)
 | 
						|
		return NULL;
 | 
						|
 | 
						|
	/* Page-align mappings */
 | 
						|
	offset = addr & (~PAGE_MASK);
 | 
						|
	addr -= offset;
 | 
						|
	size = PAGE_ALIGN(size + offset);
 | 
						|
 | 
						|
	area = get_vm_area_caller(size, VM_IOREMAP,
 | 
						|
			__builtin_return_address(0));
 | 
						|
	if (!area)
 | 
						|
		return NULL;
 | 
						|
	vaddr = (unsigned long)area->addr;
 | 
						|
 | 
						|
	if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
 | 
						|
		free_vm_area(area);
 | 
						|
		return NULL;
 | 
						|
	}
 | 
						|
 | 
						|
	return (void __iomem *)(vaddr + offset);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(ioremap_prot);
 | 
						|
 | 
						|
void iounmap(volatile void __iomem *addr)
 | 
						|
{
 | 
						|
	vunmap((void *)((unsigned long)addr & PAGE_MASK));
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(iounmap);
 |