mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	mm: numa: Support NUMA hinting page faults from gup/gup_fast
Introduce FOLL_NUMA to tell follow_page to check pte/pmd_numa. get_user_pages must use FOLL_NUMA, and it's safe to do so because it always invokes handle_mm_fault and retries the follow_page later. KVM secondary MMU page faults will trigger the NUMA hinting page faults through gup_fast -> get_user_pages -> follow_page -> handle_mm_fault. Other follow_page callers like KSM should not use FOLL_NUMA, or they would fail to get the pages if they use follow_page instead of get_user_pages. [ This patch was picked up from the AutoNUMA tree. ] Originally-by: Andrea Arcangeli <aarcange@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Rik van Riel <riel@redhat.com> [ ported to this tree. ] Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Rik van Riel <riel@redhat.com>
This commit is contained in:
		
							parent
							
								
									be3a728427
								
							
						
					
					
						commit
						0b9d705297
					
				
					 2 changed files with 18 additions and 0 deletions
				
			
		| 
						 | 
					@ -1572,6 +1572,7 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
 | 
				
			||||||
#define FOLL_MLOCK	0x40	/* mark page as mlocked */
 | 
					#define FOLL_MLOCK	0x40	/* mark page as mlocked */
 | 
				
			||||||
#define FOLL_SPLIT	0x80	/* don't return transhuge pages, split them */
 | 
					#define FOLL_SPLIT	0x80	/* don't return transhuge pages, split them */
 | 
				
			||||||
#define FOLL_HWPOISON	0x100	/* check page is hwpoisoned */
 | 
					#define FOLL_HWPOISON	0x100	/* check page is hwpoisoned */
 | 
				
			||||||
 | 
					#define FOLL_NUMA	0x200	/* force NUMA hinting page fault */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
 | 
					typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
 | 
				
			||||||
			void *data);
 | 
								void *data);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										17
									
								
								mm/memory.c
									
									
									
									
									
								
							
							
						
						
									
										17
									
								
								mm/memory.c
									
									
									
									
									
								
							| 
						 | 
					@ -1517,6 +1517,8 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
 | 
				
			||||||
		page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
 | 
							page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
 | 
				
			||||||
		goto out;
 | 
							goto out;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
 | 
				
			||||||
 | 
							goto no_page_table;
 | 
				
			||||||
	if (pmd_trans_huge(*pmd)) {
 | 
						if (pmd_trans_huge(*pmd)) {
 | 
				
			||||||
		if (flags & FOLL_SPLIT) {
 | 
							if (flags & FOLL_SPLIT) {
 | 
				
			||||||
			split_huge_page_pmd(mm, pmd);
 | 
								split_huge_page_pmd(mm, pmd);
 | 
				
			||||||
| 
						 | 
					@ -1546,6 +1548,8 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
 | 
				
			||||||
	pte = *ptep;
 | 
						pte = *ptep;
 | 
				
			||||||
	if (!pte_present(pte))
 | 
						if (!pte_present(pte))
 | 
				
			||||||
		goto no_page;
 | 
							goto no_page;
 | 
				
			||||||
 | 
						if ((flags & FOLL_NUMA) && pte_numa(pte))
 | 
				
			||||||
 | 
							goto no_page;
 | 
				
			||||||
	if ((flags & FOLL_WRITE) && !pte_write(pte))
 | 
						if ((flags & FOLL_WRITE) && !pte_write(pte))
 | 
				
			||||||
		goto unlock;
 | 
							goto unlock;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1697,6 +1701,19 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 | 
				
			||||||
			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
 | 
								(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
 | 
				
			||||||
	vm_flags &= (gup_flags & FOLL_FORCE) ?
 | 
						vm_flags &= (gup_flags & FOLL_FORCE) ?
 | 
				
			||||||
			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
 | 
								(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * If FOLL_FORCE and FOLL_NUMA are both set, handle_mm_fault
 | 
				
			||||||
 | 
						 * would be called on PROT_NONE ranges. We must never invoke
 | 
				
			||||||
 | 
						 * handle_mm_fault on PROT_NONE ranges or the NUMA hinting
 | 
				
			||||||
 | 
						 * page faults would unprotect the PROT_NONE ranges if
 | 
				
			||||||
 | 
						 * _PAGE_NUMA and _PAGE_PROTNONE are sharing the same pte/pmd
 | 
				
			||||||
 | 
						 * bitflag. So to avoid that, don't set FOLL_NUMA if
 | 
				
			||||||
 | 
						 * FOLL_FORCE is set.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (!(gup_flags & FOLL_FORCE))
 | 
				
			||||||
 | 
							gup_flags |= FOLL_NUMA;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	i = 0;
 | 
						i = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	do {
 | 
						do {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue