forked from mirrors/linux
		
	mm: stack based kmap_atomic()
Keep the current interface but ignore the KM_type and use a stack based approach. The advantage is that we get rid of crappy code like: #define __KM_PTE \ (in_nmi() ? KM_NMI_PTE : \ in_irq() ? KM_IRQ_PTE : \ KM_PTE0) and in general can stop worrying about what context we're in and what kmap slots might be appropriate for that. The downside is that FRV kmap_atomic() gets more expensive. For now we use a CPP trick suggested by Andrew: #define kmap_atomic(page, args...) __kmap_atomic(page) to avoid having to touch all kmap_atomic() users in a single patch. [ not compiled on: - mn10300: the arch doesn't actually build with highmem to begin with ] [akpm@linux-foundation.org: coding-style fixes] [akpm@linux-foundation.org: fix up drivers/gpu/drm/i915/intel_overlay.c] Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Chris Metcalf <cmetcalf@tilera.com> Cc: David Howells <dhowells@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: David Miller <davem@davemloft.net> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Dave Airlie <airlied@linux.ie> Cc: Li Zefan <lizf@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									61ecdb801e
								
							
						
					
					
						commit
						3e4d3af501
					
				
					 28 changed files with 371 additions and 376 deletions
				
			
		| 
						 | 
				
			
			@ -35,9 +35,9 @@ extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
 | 
			
		|||
#ifdef CONFIG_HIGHMEM
 | 
			
		||||
extern void *kmap(struct page *page);
 | 
			
		||||
extern void kunmap(struct page *page);
 | 
			
		||||
extern void *kmap_atomic(struct page *page, enum km_type type);
 | 
			
		||||
extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
 | 
			
		||||
extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
 | 
			
		||||
extern void *__kmap_atomic(struct page *page);
 | 
			
		||||
extern void __kunmap_atomic(void *kvaddr);
 | 
			
		||||
extern void *kmap_atomic_pfn(unsigned long pfn);
 | 
			
		||||
extern struct page *kmap_atomic_to_page(const void *ptr);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -36,18 +36,17 @@ void kunmap(struct page *page)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL(kunmap);
 | 
			
		||||
 | 
			
		||||
void *kmap_atomic(struct page *page, enum km_type type)
 | 
			
		||||
void *__kmap_atomic(struct page *page)
 | 
			
		||||
{
 | 
			
		||||
	unsigned int idx;
 | 
			
		||||
	unsigned long vaddr;
 | 
			
		||||
	void *kmap;
 | 
			
		||||
	int type;
 | 
			
		||||
 | 
			
		||||
	pagefault_disable();
 | 
			
		||||
	if (!PageHighMem(page))
 | 
			
		||||
		return page_address(page);
 | 
			
		||||
 | 
			
		||||
	debug_kmap_atomic(type);
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_DEBUG_HIGHMEM
 | 
			
		||||
	/*
 | 
			
		||||
	 * There is no cache coherency issue when non VIVT, so force the
 | 
			
		||||
| 
						 | 
				
			
			@ -61,6 +60,8 @@ void *kmap_atomic(struct page *page, enum km_type type)
 | 
			
		|||
	if (kmap)
 | 
			
		||||
		return kmap;
 | 
			
		||||
 | 
			
		||||
	type = kmap_atomic_idx_push();
 | 
			
		||||
 | 
			
		||||
	idx = type + KM_TYPE_NR * smp_processor_id();
 | 
			
		||||
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 | 
			
		||||
#ifdef CONFIG_DEBUG_HIGHMEM
 | 
			
		||||
| 
						 | 
				
			
			@ -80,14 +81,17 @@ void *kmap_atomic(struct page *page, enum km_type type)
 | 
			
		|||
 | 
			
		||||
	return (void *)vaddr;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(kmap_atomic);
 | 
			
		||||
EXPORT_SYMBOL(__kmap_atomic);
 | 
			
		||||
 | 
			
		||||
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
 | 
			
		||||
void __kunmap_atomic(void *kvaddr)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 | 
			
		||||
	unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
 | 
			
		||||
	int idx, type;
 | 
			
		||||
 | 
			
		||||
	if (kvaddr >= (void *)FIXADDR_START) {
 | 
			
		||||
		type = kmap_atomic_idx_pop();
 | 
			
		||||
		idx = type + KM_TYPE_NR * smp_processor_id();
 | 
			
		||||
 | 
			
		||||
		if (cache_is_vivt())
 | 
			
		||||
			__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
 | 
			
		||||
#ifdef CONFIG_DEBUG_HIGHMEM
 | 
			
		||||
| 
						 | 
				
			
			@ -103,15 +107,16 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
 | 
			
		|||
	}
 | 
			
		||||
	pagefault_enable();
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(kunmap_atomic_notypecheck);
 | 
			
		||||
EXPORT_SYMBOL(__kunmap_atomic);
 | 
			
		||||
 | 
			
		||||
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
 | 
			
		||||
void *kmap_atomic_pfn(unsigned long pfn)
 | 
			
		||||
{
 | 
			
		||||
	unsigned int idx;
 | 
			
		||||
	unsigned long vaddr;
 | 
			
		||||
	int idx, type;
 | 
			
		||||
 | 
			
		||||
	pagefault_disable();
 | 
			
		||||
 | 
			
		||||
	type = kmap_atomic_idx_push();
 | 
			
		||||
	idx = type + KM_TYPE_NR * smp_processor_id();
 | 
			
		||||
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 | 
			
		||||
#ifdef CONFIG_DEBUG_HIGHMEM
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -112,12 +112,11 @@ extern struct page *kmap_atomic_to_page(void *ptr);
 | 
			
		|||
	(void *) damlr;										  \
 | 
			
		||||
})
 | 
			
		||||
 | 
			
		||||
static inline void *kmap_atomic(struct page *page, enum km_type type)
 | 
			
		||||
static inline void *kmap_atomic_primary(struct page *page, enum km_type type)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long paddr;
 | 
			
		||||
 | 
			
		||||
	pagefault_disable();
 | 
			
		||||
	debug_kmap_atomic(type);
 | 
			
		||||
	paddr = page_to_phys(page);
 | 
			
		||||
 | 
			
		||||
	switch (type) {
 | 
			
		||||
| 
						 | 
				
			
			@ -125,14 +124,6 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
 | 
			
		|||
        case 1:		return __kmap_atomic_primary(1, paddr, 3);
 | 
			
		||||
        case 2:		return __kmap_atomic_primary(2, paddr, 4);
 | 
			
		||||
        case 3:		return __kmap_atomic_primary(3, paddr, 5);
 | 
			
		||||
        case 4:		return __kmap_atomic_primary(4, paddr, 6);
 | 
			
		||||
        case 5:		return __kmap_atomic_primary(5, paddr, 7);
 | 
			
		||||
        case 6:		return __kmap_atomic_primary(6, paddr, 8);
 | 
			
		||||
        case 7:		return __kmap_atomic_primary(7, paddr, 9);
 | 
			
		||||
        case 8:		return __kmap_atomic_primary(8, paddr, 10);
 | 
			
		||||
 | 
			
		||||
	case 9 ... 9 + NR_TLB_LINES - 1:
 | 
			
		||||
		return __kmap_atomic_secondary(type - 9, paddr);
 | 
			
		||||
 | 
			
		||||
	default:
 | 
			
		||||
		BUG();
 | 
			
		||||
| 
						 | 
				
			
			@ -152,22 +143,13 @@ do {									\
 | 
			
		|||
	asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory");	\
 | 
			
		||||
} while(0)
 | 
			
		||||
 | 
			
		||||
static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
 | 
			
		||||
static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type)
 | 
			
		||||
{
 | 
			
		||||
	switch (type) {
 | 
			
		||||
        case 0:		__kunmap_atomic_primary(0, 2);	break;
 | 
			
		||||
        case 1:		__kunmap_atomic_primary(1, 3);	break;
 | 
			
		||||
        case 2:		__kunmap_atomic_primary(2, 4);	break;
 | 
			
		||||
        case 3:		__kunmap_atomic_primary(3, 5);	break;
 | 
			
		||||
        case 4:		__kunmap_atomic_primary(4, 6);	break;
 | 
			
		||||
        case 5:		__kunmap_atomic_primary(5, 7);	break;
 | 
			
		||||
        case 6:		__kunmap_atomic_primary(6, 8);	break;
 | 
			
		||||
        case 7:		__kunmap_atomic_primary(7, 9);	break;
 | 
			
		||||
        case 8:		__kunmap_atomic_primary(8, 10);	break;
 | 
			
		||||
 | 
			
		||||
	case 9 ... 9 + NR_TLB_LINES - 1:
 | 
			
		||||
		__kunmap_atomic_secondary(type - 9, kvaddr);
 | 
			
		||||
		break;
 | 
			
		||||
 | 
			
		||||
	default:
 | 
			
		||||
		BUG();
 | 
			
		||||
| 
						 | 
				
			
			@ -175,6 +157,9 @@ static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
 | 
			
		|||
	pagefault_enable();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void *__kmap_atomic(struct page *page);
 | 
			
		||||
void __kunmap_atomic(void *kvaddr);
 | 
			
		||||
 | 
			
		||||
#endif /* !__ASSEMBLY__ */
 | 
			
		||||
 | 
			
		||||
#endif /* __KERNEL__ */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -61,14 +61,14 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
 | 
			
		|||
	dampr2 = __get_DAMPR(2);
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < nents; i++) {
 | 
			
		||||
		vaddr = kmap_atomic(sg_page(&sg[i]), __KM_CACHE);
 | 
			
		||||
		vaddr = kmap_atomic_primary(sg_page(&sg[i]), __KM_CACHE);
 | 
			
		||||
 | 
			
		||||
		frv_dcache_writeback((unsigned long) vaddr,
 | 
			
		||||
				     (unsigned long) vaddr + PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	kunmap_atomic(vaddr, __KM_CACHE);
 | 
			
		||||
	kunmap_atomic_primary(vaddr, __KM_CACHE);
 | 
			
		||||
	if (dampr2) {
 | 
			
		||||
		__set_DAMPR(2, dampr2);
 | 
			
		||||
		__set_IAMPR(2, dampr2);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -26,11 +26,11 @@ void flush_dcache_page(struct page *page)
 | 
			
		|||
 | 
			
		||||
	dampr2 = __get_DAMPR(2);
 | 
			
		||||
 | 
			
		||||
	vaddr = kmap_atomic(page, __KM_CACHE);
 | 
			
		||||
	vaddr = kmap_atomic_primary(page, __KM_CACHE);
 | 
			
		||||
 | 
			
		||||
	frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
	kunmap_atomic(vaddr, __KM_CACHE);
 | 
			
		||||
	kunmap_atomic_primary(vaddr, __KM_CACHE);
 | 
			
		||||
 | 
			
		||||
	if (dampr2) {
 | 
			
		||||
		__set_DAMPR(2, dampr2);
 | 
			
		||||
| 
						 | 
				
			
			@ -54,12 +54,12 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
 | 
			
		|||
 | 
			
		||||
	dampr2 = __get_DAMPR(2);
 | 
			
		||||
 | 
			
		||||
	vaddr = kmap_atomic(page, __KM_CACHE);
 | 
			
		||||
	vaddr = kmap_atomic_primary(page, __KM_CACHE);
 | 
			
		||||
 | 
			
		||||
	start = (start & ~PAGE_MASK) | (unsigned long) vaddr;
 | 
			
		||||
	frv_cache_wback_inv(start, start + len);
 | 
			
		||||
 | 
			
		||||
	kunmap_atomic(vaddr, __KM_CACHE);
 | 
			
		||||
	kunmap_atomic_primary(vaddr, __KM_CACHE);
 | 
			
		||||
 | 
			
		||||
	if (dampr2) {
 | 
			
		||||
		__set_DAMPR(2, dampr2);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -36,3 +36,53 @@ struct page *kmap_atomic_to_page(void *ptr)
 | 
			
		|||
{
 | 
			
		||||
	return virt_to_page(ptr);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void *__kmap_atomic(struct page *page)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long paddr;
 | 
			
		||||
	int type;
 | 
			
		||||
 | 
			
		||||
	pagefault_disable();
 | 
			
		||||
	type = kmap_atomic_idx_push();
 | 
			
		||||
	paddr = page_to_phys(page);
 | 
			
		||||
 | 
			
		||||
	switch (type) {
 | 
			
		||||
	/*
 | 
			
		||||
	 * The first 4 primary maps are reserved for architecture code
 | 
			
		||||
	 */
 | 
			
		||||
	case 0:		return __kmap_atomic_primary(4, paddr, 6);
 | 
			
		||||
	case 1:		return __kmap_atomic_primary(5, paddr, 7);
 | 
			
		||||
	case 2:		return __kmap_atomic_primary(6, paddr, 8);
 | 
			
		||||
	case 3:		return __kmap_atomic_primary(7, paddr, 9);
 | 
			
		||||
	case 4:		return __kmap_atomic_primary(8, paddr, 10);
 | 
			
		||||
 | 
			
		||||
	case 5 ... 5 + NR_TLB_LINES - 1:
 | 
			
		||||
		return __kmap_atomic_secondary(type - 5, paddr);
 | 
			
		||||
 | 
			
		||||
	default:
 | 
			
		||||
		BUG();
 | 
			
		||||
		return NULL;
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(__kmap_atomic);
 | 
			
		||||
 | 
			
		||||
void __kunmap_atomic(void *kvaddr)
 | 
			
		||||
{
 | 
			
		||||
	int type = kmap_atomic_idx_pop();
 | 
			
		||||
	switch (type) {
 | 
			
		||||
	case 0:		__kunmap_atomic_primary(4, 6);	break;
 | 
			
		||||
	case 1:		__kunmap_atomic_primary(5, 7);	break;
 | 
			
		||||
	case 2:		__kunmap_atomic_primary(6, 8);	break;
 | 
			
		||||
	case 3:		__kunmap_atomic_primary(7, 9);	break;
 | 
			
		||||
	case 4:		__kunmap_atomic_primary(8, 10);	break;
 | 
			
		||||
 | 
			
		||||
	case 5 ... 5 + NR_TLB_LINES - 1:
 | 
			
		||||
		__kunmap_atomic_secondary(type - 5, kvaddr);
 | 
			
		||||
		break;
 | 
			
		||||
 | 
			
		||||
	default:
 | 
			
		||||
		BUG();
 | 
			
		||||
	}
 | 
			
		||||
	pagefault_enable();
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(__kunmap_atomic);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -45,18 +45,12 @@ extern pte_t *pkmap_page_table;
 | 
			
		|||
extern void * kmap_high(struct page *page);
 | 
			
		||||
extern void kunmap_high(struct page *page);
 | 
			
		||||
 | 
			
		||||
extern void *__kmap(struct page *page);
 | 
			
		||||
extern void __kunmap(struct page *page);
 | 
			
		||||
extern void *__kmap_atomic(struct page *page, enum km_type type);
 | 
			
		||||
extern void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
 | 
			
		||||
extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
 | 
			
		||||
extern struct page *__kmap_atomic_to_page(void *ptr);
 | 
			
		||||
 | 
			
		||||
#define kmap			__kmap
 | 
			
		||||
#define kunmap			__kunmap
 | 
			
		||||
#define kmap_atomic		__kmap_atomic
 | 
			
		||||
#define kunmap_atomic_notypecheck		__kunmap_atomic_notypecheck
 | 
			
		||||
#define kmap_atomic_to_page	__kmap_atomic_to_page
 | 
			
		||||
extern void *kmap(struct page *page);
 | 
			
		||||
extern void kunmap(struct page *page);
 | 
			
		||||
extern void *__kmap_atomic(struct page *page);
 | 
			
		||||
extern void __kunmap_atomic(void *kvaddr);
 | 
			
		||||
extern void *kmap_atomic_pfn(unsigned long pfn);
 | 
			
		||||
extern struct page *kmap_atomic_to_page(void *ptr);
 | 
			
		||||
 | 
			
		||||
#define flush_cache_kmaps()	flush_cache_all()
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -9,7 +9,7 @@ static pte_t *kmap_pte;
 | 
			
		|||
 | 
			
		||||
unsigned long highstart_pfn, highend_pfn;
 | 
			
		||||
 | 
			
		||||
void *__kmap(struct page *page)
 | 
			
		||||
void *kmap(struct page *page)
 | 
			
		||||
{
 | 
			
		||||
	void *addr;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -21,16 +21,16 @@ void *__kmap(struct page *page)
 | 
			
		|||
 | 
			
		||||
	return addr;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(__kmap);
 | 
			
		||||
EXPORT_SYMBOL(kmap);
 | 
			
		||||
 | 
			
		||||
void __kunmap(struct page *page)
 | 
			
		||||
void kunmap(struct page *page)
 | 
			
		||||
{
 | 
			
		||||
	BUG_ON(in_interrupt());
 | 
			
		||||
	if (!PageHighMem(page))
 | 
			
		||||
		return;
 | 
			
		||||
	kunmap_high(page);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(__kunmap);
 | 
			
		||||
EXPORT_SYMBOL(kunmap);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
 | 
			
		||||
| 
						 | 
				
			
			@ -41,17 +41,17 @@ EXPORT_SYMBOL(__kunmap);
 | 
			
		|||
 * kmaps are appropriate for short, tight code paths only.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
void *__kmap_atomic(struct page *page, enum km_type type)
 | 
			
		||||
void *__kmap_atomic(struct page *page)
 | 
			
		||||
{
 | 
			
		||||
	enum fixed_addresses idx;
 | 
			
		||||
	unsigned long vaddr;
 | 
			
		||||
	int idx, type;
 | 
			
		||||
 | 
			
		||||
	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
 | 
			
		||||
	pagefault_disable();
 | 
			
		||||
	if (!PageHighMem(page))
 | 
			
		||||
		return page_address(page);
 | 
			
		||||
 | 
			
		||||
	debug_kmap_atomic(type);
 | 
			
		||||
	type = kmap_atomic_idx_push();
 | 
			
		||||
	idx = type + KM_TYPE_NR*smp_processor_id();
 | 
			
		||||
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 | 
			
		||||
#ifdef CONFIG_DEBUG_HIGHMEM
 | 
			
		||||
| 
						 | 
				
			
			@ -64,17 +64,21 @@ void *__kmap_atomic(struct page *page, enum km_type type)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL(__kmap_atomic);
 | 
			
		||||
 | 
			
		||||
void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
 | 
			
		||||
void __kunmap_atomic(void *kvaddr)
 | 
			
		||||
{
 | 
			
		||||
#ifdef CONFIG_DEBUG_HIGHMEM
 | 
			
		||||
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 | 
			
		||||
	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
 | 
			
		||||
	int type;
 | 
			
		||||
 | 
			
		||||
	if (vaddr < FIXADDR_START) { // FIXME
 | 
			
		||||
		pagefault_enable();
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	type = kmap_atomic_idx_pop();
 | 
			
		||||
#ifdef CONFIG_DEBUG_HIGHMEM
 | 
			
		||||
	{
 | 
			
		||||
		int idx = type + KM_TYPE_NR * smp_processor_id();
 | 
			
		||||
 | 
			
		||||
		BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
| 
						 | 
				
			
			@ -83,24 +87,24 @@ void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
 | 
			
		|||
		 */
 | 
			
		||||
		pte_clear(&init_mm, vaddr, kmap_pte-idx);
 | 
			
		||||
		local_flush_tlb_one(vaddr);
 | 
			
		||||
	}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	pagefault_enable();
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(__kunmap_atomic_notypecheck);
 | 
			
		||||
EXPORT_SYMBOL(__kunmap_atomic);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * This is the same as kmap_atomic() but can map memory that doesn't
 | 
			
		||||
 * have a struct page associated with it.
 | 
			
		||||
 */
 | 
			
		||||
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
 | 
			
		||||
void *kmap_atomic_pfn(unsigned long pfn)
 | 
			
		||||
{
 | 
			
		||||
	enum fixed_addresses idx;
 | 
			
		||||
	unsigned long vaddr;
 | 
			
		||||
	int idx, type;
 | 
			
		||||
 | 
			
		||||
	pagefault_disable();
 | 
			
		||||
 | 
			
		||||
	debug_kmap_atomic(type);
 | 
			
		||||
	type = kmap_atomic_idx_push();
 | 
			
		||||
	idx = type + KM_TYPE_NR*smp_processor_id();
 | 
			
		||||
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 | 
			
		||||
	set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
 | 
			
		||||
| 
						 | 
				
			
			@ -109,7 +113,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
 | 
			
		|||
	return (void*) vaddr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct page *__kmap_atomic_to_page(void *ptr)
 | 
			
		||||
struct page *kmap_atomic_to_page(void *ptr)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long idx, vaddr = (unsigned long)ptr;
 | 
			
		||||
	pte_t *pte;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -70,15 +70,16 @@ static inline void kunmap(struct page *page)
 | 
			
		|||
 * be used in IRQ contexts, so in some (very limited) cases we need
 | 
			
		||||
 * it.
 | 
			
		||||
 */
 | 
			
		||||
static inline unsigned long kmap_atomic(struct page *page, enum km_type type)
 | 
			
		||||
static inline unsigned long __kmap_atomic(struct page *page)
 | 
			
		||||
{
 | 
			
		||||
	enum fixed_addresses idx;
 | 
			
		||||
	unsigned long vaddr;
 | 
			
		||||
	int idx, type;
 | 
			
		||||
 | 
			
		||||
	pagefault_disable();
 | 
			
		||||
	if (page < highmem_start_page)
 | 
			
		||||
		return page_address(page);
 | 
			
		||||
 | 
			
		||||
	debug_kmap_atomic(type);
 | 
			
		||||
	type = kmap_atomic_idx_push();
 | 
			
		||||
	idx = type + KM_TYPE_NR * smp_processor_id();
 | 
			
		||||
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 | 
			
		||||
#if HIGHMEM_DEBUG
 | 
			
		||||
| 
						 | 
				
			
			@ -91,13 +92,21 @@ static inline unsigned long kmap_atomic(struct page *page, enum km_type type)
 | 
			
		|||
	return vaddr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void kunmap_atomic_notypecheck(unsigned long vaddr, enum km_type type)
 | 
			
		||||
static inline void __kunmap_atomic(unsigned long vaddr)
 | 
			
		||||
{
 | 
			
		||||
#if HIGHMEM_DEBUG
 | 
			
		||||
	enum fixed_addresses idx = type + KM_TYPE_NR * smp_processor_id();
 | 
			
		||||
	int type;
 | 
			
		||||
 | 
			
		||||
	if (vaddr < FIXADDR_START) /* FIXME */
 | 
			
		||||
	if (vaddr < FIXADDR_START) { /* FIXME */
 | 
			
		||||
		pagefault_enable();
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	type = kmap_atomic_idx_pop();
 | 
			
		||||
 | 
			
		||||
#if HIGHMEM_DEBUG
 | 
			
		||||
	{
 | 
			
		||||
		unsigned int idx;
 | 
			
		||||
		idx = type + KM_TYPE_NR * smp_processor_id();
 | 
			
		||||
 | 
			
		||||
		if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx))
 | 
			
		||||
			BUG();
 | 
			
		||||
| 
						 | 
				
			
			@ -108,9 +117,10 @@ static inline void kunmap_atomic_notypecheck(unsigned long vaddr, enum km_type t
 | 
			
		|||
		 */
 | 
			
		||||
		pte_clear(kmap_pte - idx);
 | 
			
		||||
		__flush_tlb_one(vaddr);
 | 
			
		||||
	}
 | 
			
		||||
#endif
 | 
			
		||||
	pagefault_enable();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif /* __KERNEL__ */
 | 
			
		||||
 | 
			
		||||
#endif /* _ASM_HIGHMEM_H */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -60,9 +60,8 @@ extern pte_t *pkmap_page_table;
 | 
			
		|||
 | 
			
		||||
extern void *kmap_high(struct page *page);
 | 
			
		||||
extern void kunmap_high(struct page *page);
 | 
			
		||||
extern void *kmap_atomic_prot(struct page *page, enum km_type type,
 | 
			
		||||
			      pgprot_t prot);
 | 
			
		||||
extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
 | 
			
		||||
extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
 | 
			
		||||
extern void __kunmap_atomic(void *kvaddr);
 | 
			
		||||
 | 
			
		||||
static inline void *kmap(struct page *page)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -80,9 +79,9 @@ static inline void kunmap(struct page *page)
 | 
			
		|||
	kunmap_high(page);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void *kmap_atomic(struct page *page, enum km_type type)
 | 
			
		||||
static inline void *__kmap_atomic(struct page *page)
 | 
			
		||||
{
 | 
			
		||||
	return kmap_atomic_prot(page, type, kmap_prot);
 | 
			
		||||
	return kmap_atomic_prot(page, kmap_prot);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline struct page *kmap_atomic_to_page(void *ptr)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -29,17 +29,17 @@
 | 
			
		|||
 * be used in IRQ contexts, so in some (very limited) cases we need
 | 
			
		||||
 * it.
 | 
			
		||||
 */
 | 
			
		||||
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
 | 
			
		||||
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 | 
			
		||||
{
 | 
			
		||||
	unsigned int idx;
 | 
			
		||||
	unsigned long vaddr;
 | 
			
		||||
	int idx, type;
 | 
			
		||||
 | 
			
		||||
	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
 | 
			
		||||
	pagefault_disable();
 | 
			
		||||
	if (!PageHighMem(page))
 | 
			
		||||
		return page_address(page);
 | 
			
		||||
 | 
			
		||||
	debug_kmap_atomic(type);
 | 
			
		||||
	type = kmap_atomic_idx_push();
 | 
			
		||||
	idx = type + KM_TYPE_NR*smp_processor_id();
 | 
			
		||||
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 | 
			
		||||
#ifdef CONFIG_DEBUG_HIGHMEM
 | 
			
		||||
| 
						 | 
				
			
			@ -52,17 +52,23 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL(kmap_atomic_prot);
 | 
			
		||||
 | 
			
		||||
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
 | 
			
		||||
void __kunmap_atomic(void *kvaddr)
 | 
			
		||||
{
 | 
			
		||||
#ifdef CONFIG_DEBUG_HIGHMEM
 | 
			
		||||
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 | 
			
		||||
	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
 | 
			
		||||
	int type;
 | 
			
		||||
 | 
			
		||||
	if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
 | 
			
		||||
		pagefault_enable();
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	type = kmap_atomic_idx_pop();
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_DEBUG_HIGHMEM
 | 
			
		||||
	{
 | 
			
		||||
		unsigned int idx;
 | 
			
		||||
 | 
			
		||||
		idx = type + KM_TYPE_NR * smp_processor_id();
 | 
			
		||||
		BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
| 
						 | 
				
			
			@ -71,7 +77,8 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
 | 
			
		|||
		 */
 | 
			
		||||
		pte_clear(&init_mm, vaddr, kmap_pte-idx);
 | 
			
		||||
		local_flush_tlb_page(NULL, vaddr);
 | 
			
		||||
	}
 | 
			
		||||
#endif
 | 
			
		||||
	pagefault_enable();
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(kunmap_atomic_notypecheck);
 | 
			
		||||
EXPORT_SYMBOL(__kunmap_atomic);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -70,8 +70,8 @@ static inline void kunmap(struct page *page)
 | 
			
		|||
	kunmap_high(page);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
extern void *kmap_atomic(struct page *page, enum km_type type);
 | 
			
		||||
extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
 | 
			
		||||
extern void *__kmap_atomic(struct page *page);
 | 
			
		||||
extern void __kunmap_atomic(void *kvaddr);
 | 
			
		||||
extern struct page *kmap_atomic_to_page(void *vaddr);
 | 
			
		||||
 | 
			
		||||
#define flush_cache_kmaps()	flush_cache_all()
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -29,17 +29,17 @@
 | 
			
		|||
#include <asm/tlbflush.h>
 | 
			
		||||
#include <asm/fixmap.h>
 | 
			
		||||
 | 
			
		||||
void *kmap_atomic(struct page *page, enum km_type type)
 | 
			
		||||
void *__kmap_atomic(struct page *page)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long idx;
 | 
			
		||||
	unsigned long vaddr;
 | 
			
		||||
	long idx, type;
 | 
			
		||||
 | 
			
		||||
	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
 | 
			
		||||
	pagefault_disable();
 | 
			
		||||
	if (!PageHighMem(page))
 | 
			
		||||
		return page_address(page);
 | 
			
		||||
 | 
			
		||||
	debug_kmap_atomic(type);
 | 
			
		||||
	type = kmap_atomic_idx_push();
 | 
			
		||||
	idx = type + KM_TYPE_NR*smp_processor_id();
 | 
			
		||||
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -63,22 +63,28 @@ void *kmap_atomic(struct page *page, enum km_type type)
 | 
			
		|||
 | 
			
		||||
	return (void*) vaddr;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(kmap_atomic);
 | 
			
		||||
EXPORT_SYMBOL(__kmap_atomic);
 | 
			
		||||
 | 
			
		||||
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
 | 
			
		||||
void __kunmap_atomic(void *kvaddr)
 | 
			
		||||
{
 | 
			
		||||
#ifdef CONFIG_DEBUG_HIGHMEM
 | 
			
		||||
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 | 
			
		||||
	unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
 | 
			
		||||
	int type;
 | 
			
		||||
 | 
			
		||||
	if (vaddr < FIXADDR_START) { // FIXME
 | 
			
		||||
		pagefault_enable();
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	type = kmap_atomic_idx_pop();
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_DEBUG_HIGHMEM
 | 
			
		||||
	{
 | 
			
		||||
		unsigned long idx;
 | 
			
		||||
 | 
			
		||||
		idx = type + KM_TYPE_NR * smp_processor_id();
 | 
			
		||||
		BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
 | 
			
		||||
 | 
			
		||||
/* XXX Fix - Anton */
 | 
			
		||||
		/* XXX Fix - Anton */
 | 
			
		||||
#if 0
 | 
			
		||||
		__flush_cache_one(vaddr);
 | 
			
		||||
#else
 | 
			
		||||
| 
						 | 
				
			
			@ -90,17 +96,17 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
 | 
			
		|||
		 * this pte without first remap it
 | 
			
		||||
		 */
 | 
			
		||||
		pte_clear(&init_mm, vaddr, kmap_pte-idx);
 | 
			
		||||
/* XXX Fix - Anton */
 | 
			
		||||
		/* XXX Fix - Anton */
 | 
			
		||||
#if 0
 | 
			
		||||
		__flush_tlb_one(vaddr);
 | 
			
		||||
#else
 | 
			
		||||
		flush_tlb_all();
 | 
			
		||||
#endif
 | 
			
		||||
	}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	pagefault_enable();
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(kunmap_atomic_notypecheck);
 | 
			
		||||
EXPORT_SYMBOL(__kunmap_atomic);
 | 
			
		||||
 | 
			
		||||
/* We may be fed a pagetable here by ptep_to_xxx and others. */
 | 
			
		||||
struct page *kmap_atomic_to_page(void *ptr)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -60,12 +60,12 @@ void *kmap_fix_kpte(struct page *page, int finished);
 | 
			
		|||
/* This macro is used only in map_new_virtual() to map "page". */
 | 
			
		||||
#define kmap_prot page_to_kpgprot(page)
 | 
			
		||||
 | 
			
		||||
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
 | 
			
		||||
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
 | 
			
		||||
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
 | 
			
		||||
void *__kmap_atomic(struct page *page);
 | 
			
		||||
void __kunmap_atomic(void *kvaddr);
 | 
			
		||||
void *kmap_atomic_pfn(unsigned long pfn);
 | 
			
		||||
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
 | 
			
		||||
struct page *kmap_atomic_to_page(void *ptr);
 | 
			
		||||
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
 | 
			
		||||
void *kmap_atomic(struct page *page, enum km_type type);
 | 
			
		||||
void *kmap_atomic_prot(struct page *page, pgprot_t prot);
 | 
			
		||||
void kmap_atomic_fix_kpte(struct page *page, int finished);
 | 
			
		||||
 | 
			
		||||
#define flush_cache_kmaps()	do { } while (0)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -56,50 +56,6 @@ void kunmap(struct page *page)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL(kunmap);
 | 
			
		||||
 | 
			
		||||
static void debug_kmap_atomic_prot(enum km_type type)
 | 
			
		||||
{
 | 
			
		||||
#ifdef CONFIG_DEBUG_HIGHMEM
 | 
			
		||||
	static unsigned warn_count = 10;
 | 
			
		||||
 | 
			
		||||
	if (unlikely(warn_count == 0))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	if (unlikely(in_interrupt())) {
 | 
			
		||||
		if (in_irq()) {
 | 
			
		||||
			if (type != KM_IRQ0 && type != KM_IRQ1 &&
 | 
			
		||||
			    type != KM_BIO_SRC_IRQ &&
 | 
			
		||||
			    /* type != KM_BIO_DST_IRQ && */
 | 
			
		||||
			    type != KM_BOUNCE_READ) {
 | 
			
		||||
				WARN_ON(1);
 | 
			
		||||
				warn_count--;
 | 
			
		||||
			}
 | 
			
		||||
		} else if (!irqs_disabled()) {	/* softirq */
 | 
			
		||||
			if (type != KM_IRQ0 && type != KM_IRQ1 &&
 | 
			
		||||
			    type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
 | 
			
		||||
			    type != KM_SKB_SUNRPC_DATA &&
 | 
			
		||||
			    type != KM_SKB_DATA_SOFTIRQ &&
 | 
			
		||||
			    type != KM_BOUNCE_READ) {
 | 
			
		||||
				WARN_ON(1);
 | 
			
		||||
				warn_count--;
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
 | 
			
		||||
	    type == KM_BIO_SRC_IRQ /* || type == KM_BIO_DST_IRQ */) {
 | 
			
		||||
		if (!irqs_disabled()) {
 | 
			
		||||
			WARN_ON(1);
 | 
			
		||||
			warn_count--;
 | 
			
		||||
		}
 | 
			
		||||
	} else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
 | 
			
		||||
		if (irq_count() == 0 && !irqs_disabled()) {
 | 
			
		||||
			WARN_ON(1);
 | 
			
		||||
			warn_count--;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Describe a single atomic mapping of a page on a given cpu at a
 | 
			
		||||
 * given address, and allow it to be linked into a list.
 | 
			
		||||
| 
						 | 
				
			
			@ -240,10 +196,10 @@ void kmap_atomic_fix_kpte(struct page *page, int finished)
 | 
			
		|||
 * When holding an atomic kmap is is not legal to sleep, so atomic
 | 
			
		||||
 * kmaps are appropriate for short, tight code paths only.
 | 
			
		||||
 */
 | 
			
		||||
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
 | 
			
		||||
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 | 
			
		||||
{
 | 
			
		||||
	enum fixed_addresses idx;
 | 
			
		||||
	unsigned long vaddr;
 | 
			
		||||
	int idx, type;
 | 
			
		||||
	pte_t *pte;
 | 
			
		||||
 | 
			
		||||
	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
 | 
			
		||||
| 
						 | 
				
			
			@ -255,8 +211,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
 | 
			
		|||
	if (!PageHighMem(page))
 | 
			
		||||
		return page_address(page);
 | 
			
		||||
 | 
			
		||||
	debug_kmap_atomic_prot(type);
 | 
			
		||||
 | 
			
		||||
	type = kmap_atomic_idx_push();
 | 
			
		||||
	idx = type + KM_TYPE_NR*smp_processor_id();
 | 
			
		||||
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 | 
			
		||||
	pte = kmap_get_pte(vaddr);
 | 
			
		||||
| 
						 | 
				
			
			@ -269,25 +224,31 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL(kmap_atomic_prot);
 | 
			
		||||
 | 
			
		||||
void *kmap_atomic(struct page *page, enum km_type type)
 | 
			
		||||
void *__kmap_atomic(struct page *page)
 | 
			
		||||
{
 | 
			
		||||
	/* PAGE_NONE is a magic value that tells us to check immutability. */
 | 
			
		||||
	return kmap_atomic_prot(page, type, PAGE_NONE);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(kmap_atomic);
 | 
			
		||||
EXPORT_SYMBOL(__kmap_atomic);
 | 
			
		||||
 | 
			
		||||
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
 | 
			
		||||
void __kunmap_atomic(void *kvaddr)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 | 
			
		||||
	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Force other mappings to Oops if they try to access this pte without
 | 
			
		||||
	 * first remapping it.  Keeping stale mappings around is a bad idea.
 | 
			
		||||
	 */
 | 
			
		||||
	if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) {
 | 
			
		||||
	if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
 | 
			
		||||
	    vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
 | 
			
		||||
		pte_t *pte = kmap_get_pte(vaddr);
 | 
			
		||||
		pte_t pteval = *pte;
 | 
			
		||||
		int idx, type;
 | 
			
		||||
 | 
			
		||||
		type = kmap_atomic_idx_pop();
 | 
			
		||||
		idx = type + KM_TYPE_NR*smp_processor_id();
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * Force other mappings to Oops if they try to access this pte
 | 
			
		||||
		 * without first remapping it.  Keeping stale mappings around
 | 
			
		||||
		 * is a bad idea.
 | 
			
		||||
		 */
 | 
			
		||||
		BUG_ON(!pte_present(pteval) && !pte_migrating(pteval));
 | 
			
		||||
		kmap_atomic_unregister(pte_page(pteval), vaddr);
 | 
			
		||||
		kpte_clear_flush(pte, vaddr);
 | 
			
		||||
| 
						 | 
				
			
			@ -300,19 +261,19 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
 | 
			
		|||
	arch_flush_lazy_mmu_mode();
 | 
			
		||||
	pagefault_enable();
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(kunmap_atomic_notypecheck);
 | 
			
		||||
EXPORT_SYMBOL(__kunmap_atomic);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * This API is supposed to allow us to map memory without a "struct page".
 | 
			
		||||
 * Currently we don't support this, though this may change in the future.
 | 
			
		||||
 */
 | 
			
		||||
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
 | 
			
		||||
void *kmap_atomic_pfn(unsigned long pfn)
 | 
			
		||||
{
 | 
			
		||||
	return kmap_atomic(pfn_to_page(pfn), type);
 | 
			
		||||
	return kmap_atomic(pfn_to_page(pfn));
 | 
			
		||||
}
 | 
			
		||||
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 | 
			
		||||
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
 | 
			
		||||
{
 | 
			
		||||
	return kmap_atomic_prot(pfn_to_page(pfn), type, prot);
 | 
			
		||||
	return kmap_atomic_prot(pfn_to_page(pfn), prot);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct page *kmap_atomic_to_page(void *ptr)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -59,11 +59,12 @@ extern void kunmap_high(struct page *page);
 | 
			
		|||
 | 
			
		||||
void *kmap(struct page *page);
 | 
			
		||||
void kunmap(struct page *page);
 | 
			
		||||
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
 | 
			
		||||
void *kmap_atomic(struct page *page, enum km_type type);
 | 
			
		||||
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
 | 
			
		||||
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
 | 
			
		||||
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
 | 
			
		||||
 | 
			
		||||
void *kmap_atomic_prot(struct page *page, pgprot_t prot);
 | 
			
		||||
void *__kmap_atomic(struct page *page);
 | 
			
		||||
void __kunmap_atomic(void *kvaddr);
 | 
			
		||||
void *kmap_atomic_pfn(unsigned long pfn);
 | 
			
		||||
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
 | 
			
		||||
struct page *kmap_atomic_to_page(void *ptr);
 | 
			
		||||
 | 
			
		||||
#define flush_cache_kmaps()	do { } while (0)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -27,10 +27,10 @@
 | 
			
		|||
#include <asm/tlbflush.h>
 | 
			
		||||
 | 
			
		||||
void __iomem *
 | 
			
		||||
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
 | 
			
		||||
iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
 | 
			
		||||
 | 
			
		||||
void
 | 
			
		||||
iounmap_atomic(void __iomem *kvaddr, enum km_type type);
 | 
			
		||||
iounmap_atomic(void __iomem *kvaddr);
 | 
			
		||||
 | 
			
		||||
int
 | 
			
		||||
iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -61,7 +61,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
 | 
			
		|||
	if (!is_crashed_pfn_valid(pfn))
 | 
			
		||||
		return -EFAULT;
 | 
			
		||||
 | 
			
		||||
	vaddr = kmap_atomic_pfn(pfn, KM_PTE0);
 | 
			
		||||
	vaddr = kmap_atomic_pfn(pfn);
 | 
			
		||||
 | 
			
		||||
	if (!userbuf) {
 | 
			
		||||
		memcpy(buf, (vaddr + offset), csize);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -9,6 +9,7 @@ void *kmap(struct page *page)
 | 
			
		|||
		return page_address(page);
 | 
			
		||||
	return kmap_high(page);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(kmap);
 | 
			
		||||
 | 
			
		||||
void kunmap(struct page *page)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -18,6 +19,7 @@ void kunmap(struct page *page)
 | 
			
		|||
		return;
 | 
			
		||||
	kunmap_high(page);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(kunmap);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
 | 
			
		||||
| 
						 | 
				
			
			@ -27,10 +29,10 @@ void kunmap(struct page *page)
 | 
			
		|||
 * However when holding an atomic kmap it is not legal to sleep, so atomic
 | 
			
		||||
 * kmaps are appropriate for short, tight code paths only.
 | 
			
		||||
 */
 | 
			
		||||
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
 | 
			
		||||
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 | 
			
		||||
{
 | 
			
		||||
	enum fixed_addresses idx;
 | 
			
		||||
	unsigned long vaddr;
 | 
			
		||||
	int idx, type;
 | 
			
		||||
 | 
			
		||||
	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
 | 
			
		||||
	pagefault_disable();
 | 
			
		||||
| 
						 | 
				
			
			@ -38,8 +40,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
 | 
			
		|||
	if (!PageHighMem(page))
 | 
			
		||||
		return page_address(page);
 | 
			
		||||
 | 
			
		||||
	debug_kmap_atomic(type);
 | 
			
		||||
 | 
			
		||||
	type = kmap_atomic_idx_push();
 | 
			
		||||
	idx = type + KM_TYPE_NR*smp_processor_id();
 | 
			
		||||
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 | 
			
		||||
	BUG_ON(!pte_none(*(kmap_pte-idx)));
 | 
			
		||||
| 
						 | 
				
			
			@ -47,44 +48,56 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
 | 
			
		|||
 | 
			
		||||
	return (void *)vaddr;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(kmap_atomic_prot);
 | 
			
		||||
 | 
			
		||||
void *kmap_atomic(struct page *page, enum km_type type)
 | 
			
		||||
void *__kmap_atomic(struct page *page)
 | 
			
		||||
{
 | 
			
		||||
	return kmap_atomic_prot(page, type, kmap_prot);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 | 
			
		||||
	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Force other mappings to Oops if they'll try to access this pte
 | 
			
		||||
	 * without first remap it.  Keeping stale mappings around is a bad idea
 | 
			
		||||
	 * also, in case the page changes cacheability attributes or becomes
 | 
			
		||||
	 * a protected page in a hypervisor.
 | 
			
		||||
	 */
 | 
			
		||||
	if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
 | 
			
		||||
		kpte_clear_flush(kmap_pte-idx, vaddr);
 | 
			
		||||
	else {
 | 
			
		||||
#ifdef CONFIG_DEBUG_HIGHMEM
 | 
			
		||||
		BUG_ON(vaddr < PAGE_OFFSET);
 | 
			
		||||
		BUG_ON(vaddr >= (unsigned long)high_memory);
 | 
			
		||||
#endif
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	pagefault_enable();
 | 
			
		||||
	return kmap_atomic_prot(page, kmap_prot);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(__kmap_atomic);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * This is the same as kmap_atomic() but can map memory that doesn't
 | 
			
		||||
 * have a struct page associated with it.
 | 
			
		||||
 */
 | 
			
		||||
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
 | 
			
		||||
void *kmap_atomic_pfn(unsigned long pfn)
 | 
			
		||||
{
 | 
			
		||||
	return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
 | 
			
		||||
	return kmap_atomic_prot_pfn(pfn, kmap_prot);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
 | 
			
		||||
EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
 | 
			
		||||
 | 
			
		||||
void __kunmap_atomic(void *kvaddr)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 | 
			
		||||
 | 
			
		||||
	if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
 | 
			
		||||
	    vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
 | 
			
		||||
		int idx, type;
 | 
			
		||||
 | 
			
		||||
		type = kmap_atomic_idx_pop();
 | 
			
		||||
		idx = type + KM_TYPE_NR * smp_processor_id();
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_DEBUG_HIGHMEM
 | 
			
		||||
		WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
 | 
			
		||||
#endif
 | 
			
		||||
		/*
 | 
			
		||||
		 * Force other mappings to Oops if they'll try to access this
 | 
			
		||||
		 * pte without first remap it.  Keeping stale mappings around
 | 
			
		||||
		 * is a bad idea also, in case the page changes cacheability
 | 
			
		||||
		 * attributes or becomes a protected page in a hypervisor.
 | 
			
		||||
		 */
 | 
			
		||||
		kpte_clear_flush(kmap_pte-idx, vaddr);
 | 
			
		||||
	}
 | 
			
		||||
#ifdef CONFIG_DEBUG_HIGHMEM
 | 
			
		||||
	else {
 | 
			
		||||
		BUG_ON(vaddr < PAGE_OFFSET);
 | 
			
		||||
		BUG_ON(vaddr >= (unsigned long)high_memory);
 | 
			
		||||
	}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	pagefault_enable();
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(__kunmap_atomic);
 | 
			
		||||
 | 
			
		||||
struct page *kmap_atomic_to_page(void *ptr)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -98,12 +111,6 @@ struct page *kmap_atomic_to_page(void *ptr)
 | 
			
		|||
	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
 | 
			
		||||
	return pte_page(*pte);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
EXPORT_SYMBOL(kmap);
 | 
			
		||||
EXPORT_SYMBOL(kunmap);
 | 
			
		||||
EXPORT_SYMBOL(kmap_atomic);
 | 
			
		||||
EXPORT_SYMBOL(kunmap_atomic_notypecheck);
 | 
			
		||||
EXPORT_SYMBOL(kmap_atomic_prot);
 | 
			
		||||
EXPORT_SYMBOL(kmap_atomic_to_page);
 | 
			
		||||
 | 
			
		||||
void __init set_highmem_pages_init(void)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -48,21 +48,20 @@ int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(iomap_create_wc);
 | 
			
		||||
 | 
			
		||||
void
 | 
			
		||||
iomap_free(resource_size_t base, unsigned long size)
 | 
			
		||||
void iomap_free(resource_size_t base, unsigned long size)
 | 
			
		||||
{
 | 
			
		||||
	io_free_memtype(base, base + size);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(iomap_free);
 | 
			
		||||
 | 
			
		||||
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 | 
			
		||||
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
 | 
			
		||||
{
 | 
			
		||||
	enum fixed_addresses idx;
 | 
			
		||||
	unsigned long vaddr;
 | 
			
		||||
	int idx, type;
 | 
			
		||||
 | 
			
		||||
	pagefault_disable();
 | 
			
		||||
 | 
			
		||||
	debug_kmap_atomic(type);
 | 
			
		||||
	type = kmap_atomic_idx_push();
 | 
			
		||||
	idx = type + KM_TYPE_NR * smp_processor_id();
 | 
			
		||||
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 | 
			
		||||
	set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
 | 
			
		||||
| 
						 | 
				
			
			@ -72,10 +71,10 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Map 'pfn' using fixed map 'type' and protections 'prot'
 | 
			
		||||
 * Map 'pfn' using protections 'prot'
 | 
			
		||||
 */
 | 
			
		||||
void __iomem *
 | 
			
		||||
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 | 
			
		||||
iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
 | 
			
		||||
{
 | 
			
		||||
	/*
 | 
			
		||||
	 * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
 | 
			
		||||
| 
						 | 
				
			
			@ -86,24 +85,33 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 | 
			
		|||
	if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
 | 
			
		||||
		prot = PAGE_KERNEL_UC_MINUS;
 | 
			
		||||
 | 
			
		||||
	return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot);
 | 
			
		||||
	return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
 | 
			
		||||
 | 
			
		||||
void
 | 
			
		||||
iounmap_atomic(void __iomem *kvaddr, enum km_type type)
 | 
			
		||||
iounmap_atomic(void __iomem *kvaddr)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 | 
			
		||||
	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
 | 
			
		||||
 | 
			
		||||
	if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
 | 
			
		||||
	    vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
 | 
			
		||||
		int idx, type;
 | 
			
		||||
 | 
			
		||||
		type = kmap_atomic_idx_pop();
 | 
			
		||||
		idx = type + KM_TYPE_NR * smp_processor_id();
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_DEBUG_HIGHMEM
 | 
			
		||||
		WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
 | 
			
		||||
#endif
 | 
			
		||||
		/*
 | 
			
		||||
	 * Force other mappings to Oops if they'll try to access this pte
 | 
			
		||||
	 * without first remap it.  Keeping stale mappings around is a bad idea
 | 
			
		||||
	 * also, in case the page changes cacheability attributes or becomes
 | 
			
		||||
	 * a protected page in a hypervisor.
 | 
			
		||||
		 * Force other mappings to Oops if they'll try to access this
 | 
			
		||||
		 * pte without first remap it.  Keeping stale mappings around
 | 
			
		||||
		 * is a bad idea also, in case the page changes cacheability
 | 
			
		||||
		 * attributes or becomes a protected page in a hypervisor.
 | 
			
		||||
		 */
 | 
			
		||||
	if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
 | 
			
		||||
		kpte_clear_flush(kmap_pte-idx, vaddr);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	pagefault_enable();
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -155,11 +155,11 @@ fast_shmem_read(struct page **pages,
 | 
			
		|||
	char __iomem *vaddr;
 | 
			
		||||
	int unwritten;
 | 
			
		||||
 | 
			
		||||
	vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
 | 
			
		||||
	vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
 | 
			
		||||
	if (vaddr == NULL)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
	unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
 | 
			
		||||
	kunmap_atomic(vaddr, KM_USER0);
 | 
			
		||||
	kunmap_atomic(vaddr);
 | 
			
		||||
 | 
			
		||||
	if (unwritten)
 | 
			
		||||
		return -EFAULT;
 | 
			
		||||
| 
						 | 
				
			
			@ -509,10 +509,10 @@ fast_user_write(struct io_mapping *mapping,
 | 
			
		|||
	char *vaddr_atomic;
 | 
			
		||||
	unsigned long unwritten;
 | 
			
		||||
 | 
			
		||||
	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0);
 | 
			
		||||
	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
 | 
			
		||||
	unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
 | 
			
		||||
						      user_data, length);
 | 
			
		||||
	io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
 | 
			
		||||
	io_mapping_unmap_atomic(vaddr_atomic);
 | 
			
		||||
	if (unwritten)
 | 
			
		||||
		return -EFAULT;
 | 
			
		||||
	return 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -551,11 +551,11 @@ fast_shmem_write(struct page **pages,
 | 
			
		|||
	char __iomem *vaddr;
 | 
			
		||||
	unsigned long unwritten;
 | 
			
		||||
 | 
			
		||||
	vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
 | 
			
		||||
	vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
 | 
			
		||||
	if (vaddr == NULL)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
	unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
 | 
			
		||||
	kunmap_atomic(vaddr, KM_USER0);
 | 
			
		||||
	kunmap_atomic(vaddr);
 | 
			
		||||
 | 
			
		||||
	if (unwritten)
 | 
			
		||||
		return -EFAULT;
 | 
			
		||||
| 
						 | 
				
			
			@ -3346,8 +3346,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
 | 
			
		|||
		reloc_offset = obj_priv->gtt_offset + reloc->offset;
 | 
			
		||||
		reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
 | 
			
		||||
						      (reloc_offset &
 | 
			
		||||
						       ~(PAGE_SIZE - 1)),
 | 
			
		||||
						      KM_USER0);
 | 
			
		||||
						       ~(PAGE_SIZE - 1)));
 | 
			
		||||
		reloc_entry = (uint32_t __iomem *)(reloc_page +
 | 
			
		||||
						   (reloc_offset & (PAGE_SIZE - 1)));
 | 
			
		||||
		reloc_val = target_obj_priv->gtt_offset + reloc->delta;
 | 
			
		||||
| 
						 | 
				
			
			@ -3358,7 +3357,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
 | 
			
		|||
			  readl(reloc_entry), reloc_val);
 | 
			
		||||
#endif
 | 
			
		||||
		writel(reloc_val, reloc_entry);
 | 
			
		||||
		io_mapping_unmap_atomic(reloc_page, KM_USER0);
 | 
			
		||||
		io_mapping_unmap_atomic(reloc_page);
 | 
			
		||||
 | 
			
		||||
		/* The updated presumed offset for this entry will be
 | 
			
		||||
		 * copied back out to the user.
 | 
			
		||||
| 
						 | 
				
			
			@ -4772,11 +4771,11 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
 | 
			
		|||
	page_count = obj->size / PAGE_SIZE;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < page_count; i++) {
 | 
			
		||||
		char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
 | 
			
		||||
		char *dst = kmap_atomic(obj_priv->pages[i]);
 | 
			
		||||
		char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
		memcpy(dst, src, PAGE_SIZE);
 | 
			
		||||
		kunmap_atomic(dst, KM_USER0);
 | 
			
		||||
		kunmap_atomic(dst);
 | 
			
		||||
	}
 | 
			
		||||
	drm_clflush_pages(obj_priv->pages, page_count);
 | 
			
		||||
	drm_agp_chipset_flush(dev);
 | 
			
		||||
| 
						 | 
				
			
			@ -4833,11 +4832,11 @@ i915_gem_attach_phys_object(struct drm_device *dev,
 | 
			
		|||
	page_count = obj->size / PAGE_SIZE;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < page_count; i++) {
 | 
			
		||||
		char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
 | 
			
		||||
		char *src = kmap_atomic(obj_priv->pages[i]);
 | 
			
		||||
		char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
		memcpy(dst, src, PAGE_SIZE);
 | 
			
		||||
		kunmap_atomic(src, KM_USER0);
 | 
			
		||||
		kunmap_atomic(src);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	i915_gem_object_put_pages(obj);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -456,10 +456,9 @@ i915_error_object_create(struct drm_device *dev,
 | 
			
		|||
 | 
			
		||||
		local_irq_save(flags);
 | 
			
		||||
		s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
 | 
			
		||||
					     reloc_offset,
 | 
			
		||||
					     KM_IRQ0);
 | 
			
		||||
					     reloc_offset);
 | 
			
		||||
		memcpy_fromio(d, s, PAGE_SIZE);
 | 
			
		||||
		io_mapping_unmap_atomic(s, KM_IRQ0);
 | 
			
		||||
		io_mapping_unmap_atomic(s);
 | 
			
		||||
		local_irq_restore(flags);
 | 
			
		||||
 | 
			
		||||
		dst->pages[page] = d;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -187,8 +187,7 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
 | 
			
		|||
 | 
			
		||||
	if (OVERLAY_NONPHYSICAL(overlay->dev)) {
 | 
			
		||||
		regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
 | 
			
		||||
						overlay->reg_bo->gtt_offset,
 | 
			
		||||
						KM_USER0);
 | 
			
		||||
						overlay->reg_bo->gtt_offset);
 | 
			
		||||
 | 
			
		||||
		if (!regs) {
 | 
			
		||||
			DRM_ERROR("failed to map overlay regs in GTT\n");
 | 
			
		||||
| 
						 | 
				
			
			@ -203,7 +202,7 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
 | 
			
		|||
static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
 | 
			
		||||
{
 | 
			
		||||
	if (OVERLAY_NONPHYSICAL(overlay->dev))
 | 
			
		||||
		io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0);
 | 
			
		||||
		io_mapping_unmap_atomic(overlay->virt_addr);
 | 
			
		||||
 | 
			
		||||
	overlay->virt_addr = NULL;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2167,11 +2167,11 @@ peek_fb(struct drm_device *dev, struct io_mapping *fb,
 | 
			
		|||
 | 
			
		||||
	if (off < pci_resource_len(dev->pdev, 1)) {
 | 
			
		||||
		uint8_t __iomem *p =
 | 
			
		||||
			io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
 | 
			
		||||
			io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
 | 
			
		||||
 | 
			
		||||
		val = ioread32(p + (off & ~PAGE_MASK));
 | 
			
		||||
 | 
			
		||||
		io_mapping_unmap_atomic(p, KM_USER0);
 | 
			
		||||
		io_mapping_unmap_atomic(p);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return val;
 | 
			
		||||
| 
						 | 
				
			
			@ -2183,12 +2183,12 @@ poke_fb(struct drm_device *dev, struct io_mapping *fb,
 | 
			
		|||
{
 | 
			
		||||
	if (off < pci_resource_len(dev->pdev, 1)) {
 | 
			
		||||
		uint8_t __iomem *p =
 | 
			
		||||
			io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
 | 
			
		||||
			io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
 | 
			
		||||
 | 
			
		||||
		iowrite32(val, p + (off & ~PAGE_MASK));
 | 
			
		||||
		wmb();
 | 
			
		||||
 | 
			
		||||
		io_mapping_unmap_atomic(p, KM_USER0);
 | 
			
		||||
		io_mapping_unmap_atomic(p);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -170,7 +170,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
 | 
			
		|||
	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_X86
 | 
			
		||||
	dst = kmap_atomic_prot(d, KM_USER0, prot);
 | 
			
		||||
	dst = kmap_atomic_prot(d, prot);
 | 
			
		||||
#else
 | 
			
		||||
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
 | 
			
		||||
		dst = vmap(&d, 1, 0, prot);
 | 
			
		||||
| 
						 | 
				
			
			@ -183,7 +183,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
 | 
			
		|||
	memcpy_fromio(dst, src, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_X86
 | 
			
		||||
	kunmap_atomic(dst, KM_USER0);
 | 
			
		||||
	kunmap_atomic(dst);
 | 
			
		||||
#else
 | 
			
		||||
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
 | 
			
		||||
		vunmap(dst);
 | 
			
		||||
| 
						 | 
				
			
			@ -206,7 +206,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
 | 
			
		|||
 | 
			
		||||
	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
 | 
			
		||||
#ifdef CONFIG_X86
 | 
			
		||||
	src = kmap_atomic_prot(s, KM_USER0, prot);
 | 
			
		||||
	src = kmap_atomic_prot(s, prot);
 | 
			
		||||
#else
 | 
			
		||||
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
 | 
			
		||||
		src = vmap(&s, 1, 0, prot);
 | 
			
		||||
| 
						 | 
				
			
			@ -219,7 +219,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
 | 
			
		|||
	memcpy_toio(dst, src, PAGE_SIZE);
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_X86
 | 
			
		||||
	kunmap_atomic(src, KM_USER0);
 | 
			
		||||
	kunmap_atomic(src);
 | 
			
		||||
#else
 | 
			
		||||
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
 | 
			
		||||
		vunmap(src);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -28,18 +28,6 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
 | 
			
		|||
 | 
			
		||||
#include <asm/kmap_types.h>
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_DEBUG_HIGHMEM
 | 
			
		||||
 | 
			
		||||
void debug_kmap_atomic(enum km_type type);
 | 
			
		||||
 | 
			
		||||
#else
 | 
			
		||||
 | 
			
		||||
static inline void debug_kmap_atomic(enum km_type type)
 | 
			
		||||
{
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_HIGHMEM
 | 
			
		||||
#include <asm/highmem.h>
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -49,6 +37,27 @@ extern unsigned long totalhigh_pages;
 | 
			
		|||
 | 
			
		||||
void kmap_flush_unused(void);
 | 
			
		||||
 | 
			
		||||
DECLARE_PER_CPU(int, __kmap_atomic_idx);
 | 
			
		||||
 | 
			
		||||
static inline int kmap_atomic_idx_push(void)
 | 
			
		||||
{
 | 
			
		||||
	int idx = __get_cpu_var(__kmap_atomic_idx)++;
 | 
			
		||||
#ifdef CONFIG_DEBUG_HIGHMEM
 | 
			
		||||
	WARN_ON_ONCE(in_irq() && !irqs_disabled());
 | 
			
		||||
	BUG_ON(idx > KM_TYPE_NR);
 | 
			
		||||
#endif
 | 
			
		||||
	return idx;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline int kmap_atomic_idx_pop(void)
 | 
			
		||||
{
 | 
			
		||||
	int idx = --__get_cpu_var(__kmap_atomic_idx);
 | 
			
		||||
#ifdef CONFIG_DEBUG_HIGHMEM
 | 
			
		||||
	BUG_ON(idx < 0);
 | 
			
		||||
#endif
 | 
			
		||||
	return idx;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#else /* CONFIG_HIGHMEM */
 | 
			
		||||
 | 
			
		||||
static inline unsigned int nr_free_highpages(void) { return 0; }
 | 
			
		||||
| 
						 | 
				
			
			@ -66,19 +75,19 @@ static inline void kunmap(struct page *page)
 | 
			
		|||
{
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void *kmap_atomic(struct page *page, enum km_type idx)
 | 
			
		||||
static inline void *__kmap_atomic(struct page *page)
 | 
			
		||||
{
 | 
			
		||||
	pagefault_disable();
 | 
			
		||||
	return page_address(page);
 | 
			
		||||
}
 | 
			
		||||
#define kmap_atomic_prot(page, idx, prot)	kmap_atomic(page, idx)
 | 
			
		||||
#define kmap_atomic_prot(page, prot)	__kmap_atomic(page)
 | 
			
		||||
 | 
			
		||||
static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx)
 | 
			
		||||
static inline void __kunmap_atomic(void *addr)
 | 
			
		||||
{
 | 
			
		||||
	pagefault_enable();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#define kmap_atomic_pfn(pfn, idx)	kmap_atomic(pfn_to_page(pfn), (idx))
 | 
			
		||||
#define kmap_atomic_pfn(pfn)	kmap_atomic(pfn_to_page(pfn))
 | 
			
		||||
#define kmap_atomic_to_page(ptr)	virt_to_page(ptr)
 | 
			
		||||
 | 
			
		||||
#define kmap_flush_unused()	do {} while(0)
 | 
			
		||||
| 
						 | 
				
			
			@ -86,12 +95,20 @@ static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx)
 | 
			
		|||
 | 
			
		||||
#endif /* CONFIG_HIGHMEM */
 | 
			
		||||
 | 
			
		||||
/* Prevent people trying to call kunmap_atomic() as if it were kunmap() */
 | 
			
		||||
/* kunmap_atomic() should get the return value of kmap_atomic, not the page. */
 | 
			
		||||
#define kunmap_atomic(addr, idx) do { \
 | 
			
		||||
/*
 | 
			
		||||
 * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work.
 | 
			
		||||
 */
 | 
			
		||||
#define kmap_atomic(page, args...) __kmap_atomic(page)
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
 | 
			
		||||
 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
 | 
			
		||||
 */
 | 
			
		||||
#define kunmap_atomic(addr, args...)				\
 | 
			
		||||
do {								\
 | 
			
		||||
	BUILD_BUG_ON(__same_type((addr), struct page *));	\
 | 
			
		||||
		kunmap_atomic_notypecheck((addr), (idx)); \
 | 
			
		||||
	} while (0)
 | 
			
		||||
	__kunmap_atomic(addr);					\
 | 
			
		||||
} while (0)
 | 
			
		||||
 | 
			
		||||
/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
 | 
			
		||||
#ifndef clear_user_highpage
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -81,8 +81,7 @@ io_mapping_free(struct io_mapping *mapping)
 | 
			
		|||
/* Atomic map/unmap */
 | 
			
		||||
static inline void __iomem *
 | 
			
		||||
io_mapping_map_atomic_wc(struct io_mapping *mapping,
 | 
			
		||||
			 unsigned long offset,
 | 
			
		||||
			 int slot)
 | 
			
		||||
			 unsigned long offset)
 | 
			
		||||
{
 | 
			
		||||
	resource_size_t phys_addr;
 | 
			
		||||
	unsigned long pfn;
 | 
			
		||||
| 
						 | 
				
			
			@ -90,13 +89,13 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
 | 
			
		|||
	BUG_ON(offset >= mapping->size);
 | 
			
		||||
	phys_addr = mapping->base + offset;
 | 
			
		||||
	pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
 | 
			
		||||
	return iomap_atomic_prot_pfn(pfn, slot, mapping->prot);
 | 
			
		||||
	return iomap_atomic_prot_pfn(pfn, mapping->prot);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
 | 
			
		||||
io_mapping_unmap_atomic(void __iomem *vaddr)
 | 
			
		||||
{
 | 
			
		||||
	iounmap_atomic(vaddr, slot);
 | 
			
		||||
	iounmap_atomic(vaddr);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void __iomem *
 | 
			
		||||
| 
						 | 
				
			
			@ -137,14 +136,13 @@ io_mapping_free(struct io_mapping *mapping)
 | 
			
		|||
/* Atomic map/unmap */
 | 
			
		||||
static inline void __iomem *
 | 
			
		||||
io_mapping_map_atomic_wc(struct io_mapping *mapping,
 | 
			
		||||
			 unsigned long offset,
 | 
			
		||||
			 int slot)
 | 
			
		||||
			 unsigned long offset)
 | 
			
		||||
{
 | 
			
		||||
	return ((char __force __iomem *) mapping) + offset;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
 | 
			
		||||
io_mapping_unmap_atomic(void __iomem *vaddr)
 | 
			
		||||
{
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										62
									
								
								mm/highmem.c
									
									
									
									
									
								
							
							
						
						
									
										62
									
								
								mm/highmem.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -42,6 +42,10 @@
 | 
			
		|||
unsigned long totalhigh_pages __read_mostly;
 | 
			
		||||
EXPORT_SYMBOL(totalhigh_pages);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
DEFINE_PER_CPU(int, __kmap_atomic_idx);
 | 
			
		||||
EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
 | 
			
		||||
 | 
			
		||||
unsigned int nr_free_highpages (void)
 | 
			
		||||
{
 | 
			
		||||
	pg_data_t *pgdat;
 | 
			
		||||
| 
						 | 
				
			
			@ -422,61 +426,3 @@ void __init page_address_init(void)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
#endif	/* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_DEBUG_HIGHMEM
 | 
			
		||||
 | 
			
		||||
void debug_kmap_atomic(enum km_type type)
 | 
			
		||||
{
 | 
			
		||||
	static int warn_count = 10;
 | 
			
		||||
 | 
			
		||||
	if (unlikely(warn_count < 0))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	if (unlikely(in_interrupt())) {
 | 
			
		||||
		if (in_nmi()) {
 | 
			
		||||
			if (type != KM_NMI && type != KM_NMI_PTE) {
 | 
			
		||||
				WARN_ON(1);
 | 
			
		||||
				warn_count--;
 | 
			
		||||
			}
 | 
			
		||||
		} else if (in_irq()) {
 | 
			
		||||
			if (type != KM_IRQ0 && type != KM_IRQ1 &&
 | 
			
		||||
			    type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
 | 
			
		||||
			    type != KM_BOUNCE_READ && type != KM_IRQ_PTE) {
 | 
			
		||||
				WARN_ON(1);
 | 
			
		||||
				warn_count--;
 | 
			
		||||
			}
 | 
			
		||||
		} else if (!irqs_disabled()) {	/* softirq */
 | 
			
		||||
			if (type != KM_IRQ0 && type != KM_IRQ1 &&
 | 
			
		||||
			    type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
 | 
			
		||||
			    type != KM_SKB_SUNRPC_DATA &&
 | 
			
		||||
			    type != KM_SKB_DATA_SOFTIRQ &&
 | 
			
		||||
			    type != KM_BOUNCE_READ) {
 | 
			
		||||
				WARN_ON(1);
 | 
			
		||||
				warn_count--;
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
 | 
			
		||||
			type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ ||
 | 
			
		||||
			type == KM_IRQ_PTE || type == KM_NMI ||
 | 
			
		||||
			type == KM_NMI_PTE ) {
 | 
			
		||||
		if (!irqs_disabled()) {
 | 
			
		||||
			WARN_ON(1);
 | 
			
		||||
			warn_count--;
 | 
			
		||||
		}
 | 
			
		||||
	} else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
 | 
			
		||||
		if (irq_count() == 0 && !irqs_disabled()) {
 | 
			
		||||
			WARN_ON(1);
 | 
			
		||||
			warn_count--;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
#ifdef CONFIG_KGDB_KDB
 | 
			
		||||
	if (unlikely(type == KM_KDB && atomic_read(&kgdb_active) == -1)) {
 | 
			
		||||
		WARN_ON(1);
 | 
			
		||||
		warn_count--;
 | 
			
		||||
	}
 | 
			
		||||
#endif /* CONFIG_KGDB_KDB */
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue