mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm: change ioremap to set up huge I/O mappings
ioremap_pud_range() and ioremap_pmd_range() are changed to create huge I/O mappings when their capability is enabled, and a request meets required conditions -- both virtual & physical addresses are aligned by their huge page size, and a requested range fufills their huge page size. When pud_set_huge() or pmd_set_huge() returns zero, i.e. no-operation is performed, the code simply falls back to the next level. The changes are only enabled when CONFIG_HAVE_ARCH_HUGE_VMAP is defined on the architecture. Signed-off-by: Toshi Kani <toshi.kani@hp.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Robert Elliott <Elliott@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									0ddab1d2ed
								
							
						
					
					
						commit
						e61ce6ade4
					
				
					 2 changed files with 31 additions and 0 deletions
				
			
		| 
						 | 
				
			
			@ -6,6 +6,7 @@
 | 
			
		|||
 | 
			
		||||
#include <linux/mm_types.h>
 | 
			
		||||
#include <linux/bug.h>
 | 
			
		||||
#include <linux/errno.h>
 | 
			
		||||
 | 
			
		||||
#if 4 - defined(__PAGETABLE_PUD_FOLDED) - defined(__PAGETABLE_PMD_FOLDED) != \
 | 
			
		||||
	CONFIG_PGTABLE_LEVELS
 | 
			
		||||
| 
						 | 
				
			
			@ -696,6 +697,20 @@ static inline int pmd_protnone(pmd_t pmd)
 | 
			
		|||
 | 
			
		||||
#endif /* CONFIG_MMU */
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
 | 
			
		||||
int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
 | 
			
		||||
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
 | 
			
		||||
#else	/* !CONFIG_HAVE_ARCH_HUGE_VMAP */
 | 
			
		||||
static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
 | 
			
		||||
{
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
 | 
			
		||||
{
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
#endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
 | 
			
		||||
 | 
			
		||||
#endif /* !__ASSEMBLY__ */
 | 
			
		||||
 | 
			
		||||
#ifndef io_remap_pfn_range
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -80,6 +80,14 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
 | 
			
		|||
		return -ENOMEM;
 | 
			
		||||
	do {
 | 
			
		||||
		next = pmd_addr_end(addr, end);
 | 
			
		||||
 | 
			
		||||
		if (ioremap_pmd_enabled() &&
 | 
			
		||||
		    ((next - addr) == PMD_SIZE) &&
 | 
			
		||||
		    IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
 | 
			
		||||
			if (pmd_set_huge(pmd, phys_addr + addr, prot))
 | 
			
		||||
				continue;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
 | 
			
		||||
			return -ENOMEM;
 | 
			
		||||
	} while (pmd++, addr = next, addr != end);
 | 
			
		||||
| 
						 | 
				
			
			@ -98,6 +106,14 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
 | 
			
		|||
		return -ENOMEM;
 | 
			
		||||
	do {
 | 
			
		||||
		next = pud_addr_end(addr, end);
 | 
			
		||||
 | 
			
		||||
		if (ioremap_pud_enabled() &&
 | 
			
		||||
		    ((next - addr) == PUD_SIZE) &&
 | 
			
		||||
		    IS_ALIGNED(phys_addr + addr, PUD_SIZE)) {
 | 
			
		||||
			if (pud_set_huge(pud, phys_addr + addr, prot))
 | 
			
		||||
				continue;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
 | 
			
		||||
			return -ENOMEM;
 | 
			
		||||
	} while (pud++, addr = next, addr != end);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue