mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	Add special hook for architecture to verify addr, size or prot
when ioremap() or iounmap(), which will make the generic ioremap
more useful.
  ioremap_allowed() return a bool,
    - true means continue to remap
    - false means skip remap and return directly
  iounmap_allowed() return a bool,
    - true means continue to vunmap
    - false code means skip vunmap and return directly
Meanwhile, only vunmap the address when it is in vmalloc area
as the generic ioremap only returns vmalloc addresses.
Acked-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Baoquan He <bhe@redhat.com>
Link: https://lore.kernel.org/r/20220607125027.44946-5-wangkefeng.wang@huawei.com
Signed-off-by: Will Deacon <will@kernel.org>
		
	
			
		
			
				
	
	
		
			61 lines
		
	
	
	
		
			1.4 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			61 lines
		
	
	
	
		
			1.4 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
// SPDX-License-Identifier: GPL-2.0
 | 
						|
/*
 | 
						|
 * Re-map IO memory to kernel address space so that we can access it.
 | 
						|
 * This is needed for high PCI addresses that aren't mapped in the
 | 
						|
 * 640k-1MB IO memory area on PC's
 | 
						|
 *
 | 
						|
 * (C) Copyright 1995 1996 Linus Torvalds
 | 
						|
 */
 | 
						|
#include <linux/vmalloc.h>
 | 
						|
#include <linux/mm.h>
 | 
						|
#include <linux/io.h>
 | 
						|
#include <linux/export.h>
 | 
						|
 | 
						|
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
 | 
						|
			   unsigned long prot)
 | 
						|
{
 | 
						|
	unsigned long offset, vaddr;
 | 
						|
	phys_addr_t last_addr;
 | 
						|
	struct vm_struct *area;
 | 
						|
 | 
						|
	/* Disallow wrap-around or zero size */
 | 
						|
	last_addr = phys_addr + size - 1;
 | 
						|
	if (!size || last_addr < phys_addr)
 | 
						|
		return NULL;
 | 
						|
 | 
						|
	/* Page-align mappings */
 | 
						|
	offset = phys_addr & (~PAGE_MASK);
 | 
						|
	phys_addr -= offset;
 | 
						|
	size = PAGE_ALIGN(size + offset);
 | 
						|
 | 
						|
	if (!ioremap_allowed(phys_addr, size, prot))
 | 
						|
		return NULL;
 | 
						|
 | 
						|
	area = get_vm_area_caller(size, VM_IOREMAP,
 | 
						|
			__builtin_return_address(0));
 | 
						|
	if (!area)
 | 
						|
		return NULL;
 | 
						|
	vaddr = (unsigned long)area->addr;
 | 
						|
	area->phys_addr = phys_addr;
 | 
						|
 | 
						|
	if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
 | 
						|
			       __pgprot(prot))) {
 | 
						|
		free_vm_area(area);
 | 
						|
		return NULL;
 | 
						|
	}
 | 
						|
 | 
						|
	return (void __iomem *)(vaddr + offset);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(ioremap_prot);
 | 
						|
 | 
						|
void iounmap(volatile void __iomem *addr)
 | 
						|
{
 | 
						|
	void *vaddr = (void *)((unsigned long)addr & PAGE_MASK);
 | 
						|
 | 
						|
	if (!iounmap_allowed(vaddr))
 | 
						|
		return;
 | 
						|
 | 
						|
	if (is_vmalloc_addr(vaddr))
 | 
						|
		vunmap(vaddr);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(iounmap);
 |