mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	Patch series "mm/thp: fix THP splitting unmap BUGs and related", v10.
Here is v2 batch of long-standing THP bug fixes that I had not got
around to sending before, but prompted now by Wang Yugui's report
https://lore.kernel.org/linux-mm/20210412180659.B9E3.409509F4@e16-tech.com/
Wang Yugui has tested a rollup of these fixes applied to 5.10.39, and
they have done no harm, but have *not* fixed that issue: something more
is needed and I have no idea of what.
This patch (of 7):
Stressing huge tmpfs page migration racing hole punch often crashed on
the VM_BUG_ON(!pmd_present) in pmdp_huge_clear_flush(), with DEBUG_VM=y
kernel; or shortly afterwards, on a bad dereference in
__split_huge_pmd_locked() when DEBUG_VM=n.  They forgot to allow for pmd
migration entries in the non-anonymous case.
Full disclosure: those particular experiments were on a kernel with more
relaxed mmap_lock and i_mmap_rwsem locking, and were not repeated on the
vanilla kernel: it is conceivable that stricter locking happens to avoid
those cases, or makes them less likely; but __split_huge_pmd_locked()
already allowed for pmd migration entries when handling anonymous THPs,
so this commit brings the shmem and file THP handling into line.
And while there: use old_pmd rather than _pmd, as in the following
blocks; and make it clearer to the eye that the !vma_is_anonymous()
block is self-contained, making an early return after accounting for
unmapping.
Link: https://lkml.kernel.org/r/af88612-1473-2eaa-903-8d1a448b26@google.com
Link: https://lkml.kernel.org/r/dd221a99-efb3-cd1d-6256-7e646af29314@google.com
Fixes: e71769ae52 ("mm: enable thp migration for shmem thp")
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Wang Yugui <wangyugui@e16-tech.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Jue Wang <juew@google.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
	
			
		
			
				
	
	
		
			222 lines
		
	
	
	
		
			5.6 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			222 lines
		
	
	
	
		
			5.6 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
// SPDX-License-Identifier: GPL-2.0
 | 
						|
/*
 | 
						|
 *  mm/pgtable-generic.c
 | 
						|
 *
 | 
						|
 *  Generic pgtable methods declared in linux/pgtable.h
 | 
						|
 *
 | 
						|
 *  Copyright (C) 2010  Linus Torvalds
 | 
						|
 */
 | 
						|
 | 
						|
#include <linux/pagemap.h>
 | 
						|
#include <linux/hugetlb.h>
 | 
						|
#include <linux/pgtable.h>
 | 
						|
#include <asm/tlb.h>
 | 
						|
 | 
						|
/*
 | 
						|
 * If a p?d_bad entry is found while walking page tables, report
 | 
						|
 * the error, before resetting entry to p?d_none.  Usually (but
 | 
						|
 * very seldom) called out from the p?d_none_or_clear_bad macros.
 | 
						|
 */
 | 
						|
 | 
						|
void pgd_clear_bad(pgd_t *pgd)
 | 
						|
{
 | 
						|
	pgd_ERROR(*pgd);
 | 
						|
	pgd_clear(pgd);
 | 
						|
}
 | 
						|
 | 
						|
#ifndef __PAGETABLE_P4D_FOLDED
 | 
						|
void p4d_clear_bad(p4d_t *p4d)
 | 
						|
{
 | 
						|
	p4d_ERROR(*p4d);
 | 
						|
	p4d_clear(p4d);
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
#ifndef __PAGETABLE_PUD_FOLDED
 | 
						|
void pud_clear_bad(pud_t *pud)
 | 
						|
{
 | 
						|
	pud_ERROR(*pud);
 | 
						|
	pud_clear(pud);
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
/*
 | 
						|
 * Note that the pmd variant below can't be stub'ed out just as for p4d/pud
 | 
						|
 * above. pmd folding is special and typically pmd_* macros refer to upper
 | 
						|
 * level even when folded
 | 
						|
 */
 | 
						|
void pmd_clear_bad(pmd_t *pmd)
 | 
						|
{
 | 
						|
	pmd_ERROR(*pmd);
 | 
						|
	pmd_clear(pmd);
 | 
						|
}
 | 
						|
 | 
						|
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 | 
						|
/*
 | 
						|
 * Only sets the access flags (dirty, accessed), as well as write
 | 
						|
 * permission. Furthermore, we know it always gets set to a "more
 | 
						|
 * permissive" setting, which allows most architectures to optimize
 | 
						|
 * this. We return whether the PTE actually changed, which in turn
 | 
						|
 * instructs the caller to do things like update__mmu_cache.  This
 | 
						|
 * used to be done in the caller, but sparc needs minor faults to
 | 
						|
 * force that call on sun4c so we changed this macro slightly
 | 
						|
 */
 | 
						|
int ptep_set_access_flags(struct vm_area_struct *vma,
 | 
						|
			  unsigned long address, pte_t *ptep,
 | 
						|
			  pte_t entry, int dirty)
 | 
						|
{
 | 
						|
	int changed = !pte_same(*ptep, entry);
 | 
						|
	if (changed) {
 | 
						|
		set_pte_at(vma->vm_mm, address, ptep, entry);
 | 
						|
		flush_tlb_fix_spurious_fault(vma, address);
 | 
						|
	}
 | 
						|
	return changed;
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
 | 
						|
int ptep_clear_flush_young(struct vm_area_struct *vma,
 | 
						|
			   unsigned long address, pte_t *ptep)
 | 
						|
{
 | 
						|
	int young;
 | 
						|
	young = ptep_test_and_clear_young(vma, address, ptep);
 | 
						|
	if (young)
 | 
						|
		flush_tlb_page(vma, address);
 | 
						|
	return young;
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
 | 
						|
pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
 | 
						|
		       pte_t *ptep)
 | 
						|
{
 | 
						|
	struct mm_struct *mm = (vma)->vm_mm;
 | 
						|
	pte_t pte;
 | 
						|
	pte = ptep_get_and_clear(mm, address, ptep);
 | 
						|
	if (pte_accessible(mm, pte))
 | 
						|
		flush_tlb_page(vma, address);
 | 
						|
	return pte;
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 | 
						|
 | 
						|
#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
 | 
						|
int pmdp_set_access_flags(struct vm_area_struct *vma,
 | 
						|
			  unsigned long address, pmd_t *pmdp,
 | 
						|
			  pmd_t entry, int dirty)
 | 
						|
{
 | 
						|
	int changed = !pmd_same(*pmdp, entry);
 | 
						|
	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 | 
						|
	if (changed) {
 | 
						|
		set_pmd_at(vma->vm_mm, address, pmdp, entry);
 | 
						|
		flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 | 
						|
	}
 | 
						|
	return changed;
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
 | 
						|
int pmdp_clear_flush_young(struct vm_area_struct *vma,
 | 
						|
			   unsigned long address, pmd_t *pmdp)
 | 
						|
{
 | 
						|
	int young;
 | 
						|
	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 | 
						|
	young = pmdp_test_and_clear_young(vma, address, pmdp);
 | 
						|
	if (young)
 | 
						|
		flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 | 
						|
	return young;
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
 | 
						|
pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
 | 
						|
			    pmd_t *pmdp)
 | 
						|
{
 | 
						|
	pmd_t pmd;
 | 
						|
	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 | 
						|
	VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
 | 
						|
			   !pmd_devmap(*pmdp));
 | 
						|
	pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
 | 
						|
	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 | 
						|
	return pmd;
 | 
						|
}
 | 
						|
 | 
						|
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
 | 
						|
pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
 | 
						|
			    pud_t *pudp)
 | 
						|
{
 | 
						|
	pud_t pud;
 | 
						|
 | 
						|
	VM_BUG_ON(address & ~HPAGE_PUD_MASK);
 | 
						|
	VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
 | 
						|
	pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
 | 
						|
	flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
 | 
						|
	return pud;
 | 
						|
}
 | 
						|
#endif
 | 
						|
#endif
 | 
						|
 | 
						|
#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
 | 
						|
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
 | 
						|
				pgtable_t pgtable)
 | 
						|
{
 | 
						|
	assert_spin_locked(pmd_lockptr(mm, pmdp));
 | 
						|
 | 
						|
	/* FIFO */
 | 
						|
	if (!pmd_huge_pte(mm, pmdp))
 | 
						|
		INIT_LIST_HEAD(&pgtable->lru);
 | 
						|
	else
 | 
						|
		list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
 | 
						|
	pmd_huge_pte(mm, pmdp) = pgtable;
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
 | 
						|
/* no "address" argument so destroys page coloring of some arch */
 | 
						|
pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
 | 
						|
{
 | 
						|
	pgtable_t pgtable;
 | 
						|
 | 
						|
	assert_spin_locked(pmd_lockptr(mm, pmdp));
 | 
						|
 | 
						|
	/* FIFO */
 | 
						|
	pgtable = pmd_huge_pte(mm, pmdp);
 | 
						|
	pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
 | 
						|
							  struct page, lru);
 | 
						|
	if (pmd_huge_pte(mm, pmdp))
 | 
						|
		list_del(&pgtable->lru);
 | 
						|
	return pgtable;
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
#ifndef __HAVE_ARCH_PMDP_INVALIDATE
 | 
						|
pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 | 
						|
		     pmd_t *pmdp)
 | 
						|
{
 | 
						|
	pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
 | 
						|
	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 | 
						|
	return old;
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
#ifndef pmdp_collapse_flush
 | 
						|
pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
 | 
						|
			  pmd_t *pmdp)
 | 
						|
{
 | 
						|
	/*
 | 
						|
	 * pmd and hugepage pte format are same. So we could
 | 
						|
	 * use the same function.
 | 
						|
	 */
 | 
						|
	pmd_t pmd;
 | 
						|
 | 
						|
	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 | 
						|
	VM_BUG_ON(pmd_trans_huge(*pmdp));
 | 
						|
	pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
 | 
						|
 | 
						|
	/* collapse entails shooting down ptes not pmd */
 | 
						|
	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 | 
						|
	return pmd;
 | 
						|
}
 | 
						|
#endif
 | 
						|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 |