mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	WARNING: CPU: 12 PID: 4322 at /arch/powerpc/mm/pgtable-book3s64.c:76 set_pmd_at+0x4c/0x2b0
 Modules linked in:
 CPU: 12 PID: 4322 Comm: qemu-system-ppc Tainted: G        W         4.19.0-rc3-00758-g8f0c636b0542 #36
 NIP:  c0000000000872fc LR: c000000000484eec CTR: 0000000000000000
 REGS: c000003fba876fe0 TRAP: 0700   Tainted: G        W          (4.19.0-rc3-00758-g8f0c636b0542)
 MSR:  900000010282b033 <SF,HV,VEC,VSX,EE,FP,ME,IR,DR,RI,LE,TM[E]>  CR: 24282884  XER: 00000000
 CFAR: c000000000484ee8 IRQMASK: 0
 GPR00: c000000000484eec c000003fba877268 c000000001f0ec00 c000003fbd229f80
 GPR04: 00007c8fe8e00000 c000003f864c5a38 860300853e0000c0 0000000000000080
 GPR08: 0000000080000000 0000000000000001 0401000000000080 0000000000000001
 GPR12: 0000000000002000 c000003fffff5400 c000003fce292000 00007c9024570000
 GPR16: 0000000000000000 0000000000ffffff 0000000000000001 c000000001885950
 GPR20: 0000000000000000 001ffffc0004807c 0000000000000008 c000000001f49d05
 GPR24: 00007c8fe8e00000 c0000000020f2468 ffffffffffffffff c000003fcd33b090
 GPR28: 00007c8fe8e00000 c000003fbd229f80 c000003f864c5a38 860300853e0000c0
 NIP [c0000000000872fc] set_pmd_at+0x4c/0x2b0
 LR [c000000000484eec] do_huge_pmd_numa_page+0xb1c/0xc20
 Call Trace:
 [c000003fba877268] [c00000000045931c] mpol_misplaced+0x1bc/0x230 (unreliable)
 [c000003fba8772c8] [c000000000484eec] do_huge_pmd_numa_page+0xb1c/0xc20
 [c000003fba877398] [c00000000040d344] __handle_mm_fault+0x5e4/0x2300
 [c000003fba8774d8] [c00000000040f400] handle_mm_fault+0x3a0/0x420
 [c000003fba877528] [c0000000003ff6f4] __get_user_pages+0x2e4/0x560
 [c000003fba877628] [c000000000400314] get_user_pages_unlocked+0x104/0x2a0
 [c000003fba8776c8] [c000000000118f44] __gfn_to_pfn_memslot+0x284/0x6a0
 [c000003fba877748] [c0000000001463a0] kvmppc_book3s_radix_page_fault+0x360/0x12d0
 [c000003fba877838] [c000000000142228] kvmppc_book3s_hv_page_fault+0x48/0x1300
 [c000003fba877988] [c00000000013dc08] kvmppc_vcpu_run_hv+0x1808/0x1b50
 [c000003fba877af8] [c000000000126b44] kvmppc_vcpu_run+0x34/0x50
 [c000003fba877b18] [c000000000123268] kvm_arch_vcpu_ioctl_run+0x288/0x2d0
 [c000003fba877b98] [c00000000011253c] kvm_vcpu_ioctl+0x1fc/0x8c0
 [c000003fba877d08] [c0000000004e9b24] do_vfs_ioctl+0xa44/0xae0
 [c000003fba877db8] [c0000000004e9c44] ksys_ioctl+0x84/0xf0
 [c000003fba877e08] [c0000000004e9cd8] sys_ioctl+0x28/0x80
We removed the pte_protnone check earlier with the understanding that we
mark the pte invalid before the set_pte/set_pmd usage. But the huge pmd
autonuma still use the set_pmd_at directly. This is ok because a protnone pte
won't have translation cache in TLB.
Fixes: da7ad366b4 ("powerpc/mm/book3s: Update pmd_present to look at _PAGE_PRESENT bit")
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
		
	
			
		
			
				
	
	
		
			299 lines
		
	
	
	
		
			7.7 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			299 lines
		
	
	
	
		
			7.7 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/*
 | 
						|
 * This file contains common routines for dealing with free of page tables
 | 
						|
 * Along with common page table handling code
 | 
						|
 *
 | 
						|
 *  Derived from arch/powerpc/mm/tlb_64.c:
 | 
						|
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 | 
						|
 *
 | 
						|
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 | 
						|
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 | 
						|
 *    Copyright (C) 1996 Paul Mackerras
 | 
						|
 *
 | 
						|
 *  Derived from "arch/i386/mm/init.c"
 | 
						|
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 | 
						|
 *
 | 
						|
 *  Dave Engebretsen <engebret@us.ibm.com>
 | 
						|
 *      Rework for PPC64 port.
 | 
						|
 *
 | 
						|
 *  This program is free software; you can redistribute it and/or
 | 
						|
 *  modify it under the terms of the GNU General Public License
 | 
						|
 *  as published by the Free Software Foundation; either version
 | 
						|
 *  2 of the License, or (at your option) any later version.
 | 
						|
 */
 | 
						|
 | 
						|
#include <linux/kernel.h>
 | 
						|
#include <linux/gfp.h>
 | 
						|
#include <linux/mm.h>
 | 
						|
#include <linux/percpu.h>
 | 
						|
#include <linux/hardirq.h>
 | 
						|
#include <linux/hugetlb.h>
 | 
						|
#include <asm/pgalloc.h>
 | 
						|
#include <asm/tlbflush.h>
 | 
						|
#include <asm/tlb.h>
 | 
						|
 | 
						|
static inline int is_exec_fault(void)
 | 
						|
{
 | 
						|
	return current->thread.regs && TRAP(current->thread.regs) == 0x400;
 | 
						|
}
 | 
						|
 | 
						|
/* We only try to do i/d cache coherency on stuff that looks like
 | 
						|
 * reasonably "normal" PTEs. We currently require a PTE to be present
 | 
						|
 * and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that
 | 
						|
 * on userspace PTEs
 | 
						|
 */
 | 
						|
static inline int pte_looks_normal(pte_t pte)
 | 
						|
{
 | 
						|
 | 
						|
	if (pte_present(pte) && !pte_special(pte)) {
 | 
						|
		if (pte_ci(pte))
 | 
						|
			return 0;
 | 
						|
		if (pte_user(pte))
 | 
						|
			return 1;
 | 
						|
	}
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
 | 
						|
static struct page *maybe_pte_to_page(pte_t pte)
 | 
						|
{
 | 
						|
	unsigned long pfn = pte_pfn(pte);
 | 
						|
	struct page *page;
 | 
						|
 | 
						|
	if (unlikely(!pfn_valid(pfn)))
 | 
						|
		return NULL;
 | 
						|
	page = pfn_to_page(pfn);
 | 
						|
	if (PageReserved(page))
 | 
						|
		return NULL;
 | 
						|
	return page;
 | 
						|
}
 | 
						|
 | 
						|
#ifdef CONFIG_PPC_BOOK3S
 | 
						|
 | 
						|
/* Server-style MMU handles coherency when hashing if HW exec permission
 | 
						|
 * is supposed per page (currently 64-bit only). If not, then, we always
 | 
						|
 * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec
 | 
						|
 * support falls into the same category.
 | 
						|
 */
 | 
						|
 | 
						|
static pte_t set_pte_filter(pte_t pte)
 | 
						|
{
 | 
						|
	if (radix_enabled())
 | 
						|
		return pte;
 | 
						|
 | 
						|
	pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
 | 
						|
	if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
 | 
						|
				       cpu_has_feature(CPU_FTR_NOEXECUTE))) {
 | 
						|
		struct page *pg = maybe_pte_to_page(pte);
 | 
						|
		if (!pg)
 | 
						|
			return pte;
 | 
						|
		if (!test_bit(PG_arch_1, &pg->flags)) {
 | 
						|
			flush_dcache_icache_page(pg);
 | 
						|
			set_bit(PG_arch_1, &pg->flags);
 | 
						|
		}
 | 
						|
	}
 | 
						|
	return pte;
 | 
						|
}
 | 
						|
 | 
						|
static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
 | 
						|
				     int dirty)
 | 
						|
{
 | 
						|
	return pte;
 | 
						|
}
 | 
						|
 | 
						|
#else /* CONFIG_PPC_BOOK3S */
 | 
						|
 | 
						|
/* Embedded type MMU with HW exec support. This is a bit more complicated
 | 
						|
 * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
 | 
						|
 * instead we "filter out" the exec permission for non clean pages.
 | 
						|
 */
 | 
						|
static pte_t set_pte_filter(pte_t pte)
 | 
						|
{
 | 
						|
	struct page *pg;
 | 
						|
 | 
						|
	/* No exec permission in the first place, move on */
 | 
						|
	if (!pte_exec(pte) || !pte_looks_normal(pte))
 | 
						|
		return pte;
 | 
						|
 | 
						|
	/* If you set _PAGE_EXEC on weird pages you're on your own */
 | 
						|
	pg = maybe_pte_to_page(pte);
 | 
						|
	if (unlikely(!pg))
 | 
						|
		return pte;
 | 
						|
 | 
						|
	/* If the page clean, we move on */
 | 
						|
	if (test_bit(PG_arch_1, &pg->flags))
 | 
						|
		return pte;
 | 
						|
 | 
						|
	/* If it's an exec fault, we flush the cache and make it clean */
 | 
						|
	if (is_exec_fault()) {
 | 
						|
		flush_dcache_icache_page(pg);
 | 
						|
		set_bit(PG_arch_1, &pg->flags);
 | 
						|
		return pte;
 | 
						|
	}
 | 
						|
 | 
						|
	/* Else, we filter out _PAGE_EXEC */
 | 
						|
	return pte_exprotect(pte);
 | 
						|
}
 | 
						|
 | 
						|
static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
 | 
						|
				     int dirty)
 | 
						|
{
 | 
						|
	struct page *pg;
 | 
						|
 | 
						|
	/* So here, we only care about exec faults, as we use them
 | 
						|
	 * to recover lost _PAGE_EXEC and perform I$/D$ coherency
 | 
						|
	 * if necessary. Also if _PAGE_EXEC is already set, same deal,
 | 
						|
	 * we just bail out
 | 
						|
	 */
 | 
						|
	if (dirty || pte_exec(pte) || !is_exec_fault())
 | 
						|
		return pte;
 | 
						|
 | 
						|
#ifdef CONFIG_DEBUG_VM
 | 
						|
	/* So this is an exec fault, _PAGE_EXEC is not set. If it was
 | 
						|
	 * an error we would have bailed out earlier in do_page_fault()
 | 
						|
	 * but let's make sure of it
 | 
						|
	 */
 | 
						|
	if (WARN_ON(!(vma->vm_flags & VM_EXEC)))
 | 
						|
		return pte;
 | 
						|
#endif /* CONFIG_DEBUG_VM */
 | 
						|
 | 
						|
	/* If you set _PAGE_EXEC on weird pages you're on your own */
 | 
						|
	pg = maybe_pte_to_page(pte);
 | 
						|
	if (unlikely(!pg))
 | 
						|
		goto bail;
 | 
						|
 | 
						|
	/* If the page is already clean, we move on */
 | 
						|
	if (test_bit(PG_arch_1, &pg->flags))
 | 
						|
		goto bail;
 | 
						|
 | 
						|
	/* Clean the page and set PG_arch_1 */
 | 
						|
	flush_dcache_icache_page(pg);
 | 
						|
	set_bit(PG_arch_1, &pg->flags);
 | 
						|
 | 
						|
 bail:
 | 
						|
	return pte_mkexec(pte);
 | 
						|
}
 | 
						|
 | 
						|
#endif /* CONFIG_PPC_BOOK3S */
 | 
						|
 | 
						|
/*
 | 
						|
 * set_pte stores a linux PTE into the linux page table.
 | 
						|
 */
 | 
						|
void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
 | 
						|
		pte_t pte)
 | 
						|
{
 | 
						|
	/*
 | 
						|
	 * Make sure hardware valid bit is not set. We don't do
 | 
						|
	 * tlb flush for this update.
 | 
						|
	 */
 | 
						|
	VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
 | 
						|
 | 
						|
	/* Add the pte bit when trying to set a pte */
 | 
						|
	pte = pte_mkpte(pte);
 | 
						|
 | 
						|
	/* Note: mm->context.id might not yet have been assigned as
 | 
						|
	 * this context might not have been activated yet when this
 | 
						|
	 * is called.
 | 
						|
	 */
 | 
						|
	pte = set_pte_filter(pte);
 | 
						|
 | 
						|
	/* Perform the setting of the PTE */
 | 
						|
	__set_pte_at(mm, addr, ptep, pte, 0);
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * This is called when relaxing access to a PTE. It's also called in the page
 | 
						|
 * fault path when we don't hit any of the major fault cases, ie, a minor
 | 
						|
 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
 | 
						|
 * handled those two for us, we additionally deal with missing execute
 | 
						|
 * permission here on some processors
 | 
						|
 */
 | 
						|
int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
 | 
						|
			  pte_t *ptep, pte_t entry, int dirty)
 | 
						|
{
 | 
						|
	int changed;
 | 
						|
	entry = set_access_flags_filter(entry, vma, dirty);
 | 
						|
	changed = !pte_same(*(ptep), entry);
 | 
						|
	if (changed) {
 | 
						|
		assert_pte_locked(vma->vm_mm, address);
 | 
						|
		__ptep_set_access_flags(vma, ptep, entry,
 | 
						|
					address, mmu_virtual_psize);
 | 
						|
	}
 | 
						|
	return changed;
 | 
						|
}
 | 
						|
 | 
						|
#ifdef CONFIG_HUGETLB_PAGE
 | 
						|
extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 | 
						|
				      unsigned long addr, pte_t *ptep,
 | 
						|
				      pte_t pte, int dirty)
 | 
						|
{
 | 
						|
#ifdef HUGETLB_NEED_PRELOAD
 | 
						|
	/*
 | 
						|
	 * The "return 1" forces a call of update_mmu_cache, which will write a
 | 
						|
	 * TLB entry.  Without this, platforms that don't do a write of the TLB
 | 
						|
	 * entry in the TLB miss handler asm will fault ad infinitum.
 | 
						|
	 */
 | 
						|
	ptep_set_access_flags(vma, addr, ptep, pte, dirty);
 | 
						|
	return 1;
 | 
						|
#else
 | 
						|
	int changed, psize;
 | 
						|
 | 
						|
	pte = set_access_flags_filter(pte, vma, dirty);
 | 
						|
	changed = !pte_same(*(ptep), pte);
 | 
						|
	if (changed) {
 | 
						|
 | 
						|
#ifdef CONFIG_PPC_BOOK3S_64
 | 
						|
		struct hstate *h = hstate_vma(vma);
 | 
						|
 | 
						|
		psize = hstate_get_psize(h);
 | 
						|
#ifdef CONFIG_DEBUG_VM
 | 
						|
		assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep));
 | 
						|
#endif
 | 
						|
 | 
						|
#else
 | 
						|
		/*
 | 
						|
		 * Not used on non book3s64 platforms. But 8xx
 | 
						|
		 * can possibly use tsize derived from hstate.
 | 
						|
		 */
 | 
						|
		psize = 0;
 | 
						|
#endif
 | 
						|
		__ptep_set_access_flags(vma, ptep, pte, addr, psize);
 | 
						|
	}
 | 
						|
	return changed;
 | 
						|
#endif
 | 
						|
}
 | 
						|
#endif /* CONFIG_HUGETLB_PAGE */
 | 
						|
 | 
						|
#ifdef CONFIG_DEBUG_VM
 | 
						|
void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
 | 
						|
{
 | 
						|
	pgd_t *pgd;
 | 
						|
	pud_t *pud;
 | 
						|
	pmd_t *pmd;
 | 
						|
 | 
						|
	if (mm == &init_mm)
 | 
						|
		return;
 | 
						|
	pgd = mm->pgd + pgd_index(addr);
 | 
						|
	BUG_ON(pgd_none(*pgd));
 | 
						|
	pud = pud_offset(pgd, addr);
 | 
						|
	BUG_ON(pud_none(*pud));
 | 
						|
	pmd = pmd_offset(pud, addr);
 | 
						|
	/*
 | 
						|
	 * khugepaged to collapse normal pages to hugepage, first set
 | 
						|
	 * pmd to none to force page fault/gup to take mmap_sem. After
 | 
						|
	 * pmd is set to none, we do a pte_clear which does this assertion
 | 
						|
	 * so if we find pmd none, return.
 | 
						|
	 */
 | 
						|
	if (pmd_none(*pmd))
 | 
						|
		return;
 | 
						|
	BUG_ON(!pmd_present(*pmd));
 | 
						|
	assert_spin_locked(pte_lockptr(mm, pmd));
 | 
						|
}
 | 
						|
#endif /* CONFIG_DEBUG_VM */
 | 
						|
 | 
						|
unsigned long vmalloc_to_phys(void *va)
 | 
						|
{
 | 
						|
	unsigned long pfn = vmalloc_to_pfn(va);
 | 
						|
 | 
						|
	BUG_ON(!pfn);
 | 
						|
	return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(vmalloc_to_phys);
 |