mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm/gup: replace FOLL_NUMA by gup_can_follow_protnone()
Patch series "mm: minor cleanups around NUMA hinting". Working on some GUP cleanups (e.g., getting rid of some FOLL_ flags) and preparing for other GUP changes (getting rid of FOLL_FORCE|FOLL_WRITE for for taking a R/O longterm pin), this is something I can easily send out independently. Get rid of FOLL_NUMA, allow FOLL_FORCE access to PROT_NONE mapped pages in GUP-fast, and fixup some documentation around NUMA hinting. This patch (of 3): No need for a special flag that is not even properly documented to be internal-only. Let's just factor this check out and get rid of this flag. The separate function has the nice benefit that we can centralize comments. Link: https://lkml.kernel.org/r/20220825164659.89824-2-david@redhat.com Link: https://lkml.kernel.org/r/20220825164659.89824-1-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Peter Xu <peterx@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									f7091ed64e
								
							
						
					
					
						commit
						474098edac
					
				
					 3 changed files with 18 additions and 12 deletions
				
			
		| 
						 | 
				
			
			@ -2933,7 +2933,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
 | 
			
		|||
				 * and return without waiting upon it */
 | 
			
		||||
#define FOLL_NOFAULT	0x80	/* do not fault in pages */
 | 
			
		||||
#define FOLL_HWPOISON	0x100	/* check page is hwpoisoned */
 | 
			
		||||
#define FOLL_NUMA	0x200	/* force NUMA hinting page fault */
 | 
			
		||||
#define FOLL_MIGRATION	0x400	/* wait for page to replace migration entry */
 | 
			
		||||
#define FOLL_TRIED	0x800	/* a retry, previous pass started an IO */
 | 
			
		||||
#define FOLL_REMOTE	0x2000	/* we are working on non-current tsk/mm */
 | 
			
		||||
| 
						 | 
				
			
			@ -3054,6 +3053,21 @@ static inline bool gup_must_unshare(unsigned int flags, struct page *page)
 | 
			
		|||
	return !PageAnonExclusive(page);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Indicates whether GUP can follow a PROT_NONE mapped page, or whether
 | 
			
		||||
 * a (NUMA hinting) fault is required.
 | 
			
		||||
 */
 | 
			
		||||
static inline bool gup_can_follow_protnone(unsigned int flags)
 | 
			
		||||
{
 | 
			
		||||
	/*
 | 
			
		||||
	 * FOLL_FORCE has to be able to make progress even if the VMA is
 | 
			
		||||
	 * inaccessible. Further, FOLL_FORCE access usually does not represent
 | 
			
		||||
	 * application behaviour and we should avoid triggering NUMA hinting
 | 
			
		||||
	 * faults.
 | 
			
		||||
	 */
 | 
			
		||||
	return flags & FOLL_FORCE;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
 | 
			
		||||
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
 | 
			
		||||
			       unsigned long size, pte_fn_t fn, void *data);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										12
									
								
								mm/gup.c
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								mm/gup.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -561,7 +561,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
 | 
			
		|||
		migration_entry_wait(mm, pmd, address);
 | 
			
		||||
		goto retry;
 | 
			
		||||
	}
 | 
			
		||||
	if ((flags & FOLL_NUMA) && pte_protnone(pte))
 | 
			
		||||
	if (pte_protnone(pte) && !gup_can_follow_protnone(flags))
 | 
			
		||||
		goto no_page;
 | 
			
		||||
 | 
			
		||||
	page = vm_normal_page(vma, address, pte);
 | 
			
		||||
| 
						 | 
				
			
			@ -714,7 +714,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
 | 
			
		|||
	if (likely(!pmd_trans_huge(pmdval)))
 | 
			
		||||
		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
 | 
			
		||||
 | 
			
		||||
	if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
 | 
			
		||||
	if (pmd_protnone(pmdval) && !gup_can_follow_protnone(flags))
 | 
			
		||||
		return no_page_table(vma, flags);
 | 
			
		||||
 | 
			
		||||
retry_locked:
 | 
			
		||||
| 
						 | 
				
			
			@ -1160,14 +1160,6 @@ static long __get_user_pages(struct mm_struct *mm,
 | 
			
		|||
 | 
			
		||||
	VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * If FOLL_FORCE is set then do not force a full fault as the hinting
 | 
			
		||||
	 * fault information is unrelated to the reference behaviour of a task
 | 
			
		||||
	 * using the address space
 | 
			
		||||
	 */
 | 
			
		||||
	if (!(gup_flags & FOLL_FORCE))
 | 
			
		||||
		gup_flags |= FOLL_NUMA;
 | 
			
		||||
 | 
			
		||||
	do {
 | 
			
		||||
		struct page *page;
 | 
			
		||||
		unsigned int foll_flags = gup_flags;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1447,7 +1447,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 | 
			
		|||
		return ERR_PTR(-EFAULT);
 | 
			
		||||
 | 
			
		||||
	/* Full NUMA hinting faults to serialise migration in fault paths */
 | 
			
		||||
	if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
 | 
			
		||||
	if (pmd_protnone(*pmd) && !gup_can_follow_protnone(flags))
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	if (!pmd_write(*pmd) && gup_must_unshare(flags, page))
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue