forked from mirrors/linux
		
	mm: convert do_set_pte() to set_pte_range()
set_pte_range() allows to setup page table entries for a specific range. It takes advantage of batched rmap update for large folio. It now takes care of calling update_mmu_cache_range(). Link: https://lkml.kernel.org/r/20230802151406.3735276-37-willy@infradead.org Signed-off-by: Yin Fengwei <fengwei.yin@intel.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									86f35f69db
								
							
						
					
					
						commit
						3bd786f76d
					
				
					 4 changed files with 28 additions and 17 deletions
				
			
		|  | @ -661,7 +661,7 @@ locked. The VM will unlock the page. | |||
| Filesystem should find and map pages associated with offsets from "start_pgoff" | ||||
| till "end_pgoff". ->map_pages() is called with the RCU lock held and must | ||||
| not block.  If it's not possible to reach a page without blocking, | ||||
| filesystem should skip it. Filesystem should use do_set_pte() to setup | ||||
| filesystem should skip it. Filesystem should use set_pte_range() to setup | ||||
| page table entry. Pointer to entry associated with the page is passed in | ||||
| "pte" field in vm_fault structure. Pointers to entries for other offsets | ||||
| should be calculated relative to "pte". | ||||
|  |  | |||
|  | @ -1322,7 +1322,8 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) | |||
| } | ||||
| 
 | ||||
| vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page); | ||||
| void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr); | ||||
| void set_pte_range(struct vm_fault *vmf, struct folio *folio, | ||||
| 		struct page *page, unsigned int nr, unsigned long addr); | ||||
| 
 | ||||
| vm_fault_t finish_fault(struct vm_fault *vmf); | ||||
| vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); | ||||
|  |  | |||
|  | @ -3501,8 +3501,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, | |||
| 			ret = VM_FAULT_NOPAGE; | ||||
| 
 | ||||
| 		ref_count++; | ||||
| 		do_set_pte(vmf, page, addr); | ||||
| 		update_mmu_cache(vma, addr, vmf->pte); | ||||
| 		set_pte_range(vmf, folio, page, 1, addr); | ||||
| 	} while (vmf->pte++, page++, addr += PAGE_SIZE, ++count < nr_pages); | ||||
| 
 | ||||
| 	/* Restore the vmf->pte */ | ||||
|  |  | |||
							
								
								
									
										37
									
								
								mm/memory.c
									
									
									
									
									
								
							
							
						
						
									
										37
									
								
								mm/memory.c
									
									
									
									
									
								
							|  | @ -4330,15 +4330,24 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) | |||
| } | ||||
| #endif | ||||
| 
 | ||||
| void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr) | ||||
| /**
 | ||||
|  * set_pte_range - Set a range of PTEs to point to pages in a folio. | ||||
|  * @vmf: Fault decription. | ||||
|  * @folio: The folio that contains @page. | ||||
|  * @page: The first page to create a PTE for. | ||||
|  * @nr: The number of PTEs to create. | ||||
|  * @addr: The first address to create a PTE for. | ||||
|  */ | ||||
| void set_pte_range(struct vm_fault *vmf, struct folio *folio, | ||||
| 		struct page *page, unsigned int nr, unsigned long addr) | ||||
| { | ||||
| 	struct vm_area_struct *vma = vmf->vma; | ||||
| 	bool uffd_wp = vmf_orig_pte_uffd_wp(vmf); | ||||
| 	bool write = vmf->flags & FAULT_FLAG_WRITE; | ||||
| 	bool prefault = vmf->address != addr; | ||||
| 	bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE); | ||||
| 	pte_t entry; | ||||
| 
 | ||||
| 	flush_icache_page(vma, page); | ||||
| 	flush_icache_pages(vma, page, nr); | ||||
| 	entry = mk_pte(page, vma->vm_page_prot); | ||||
| 
 | ||||
| 	if (prefault && arch_wants_old_prefaulted_pte()) | ||||
|  | @ -4352,14 +4361,18 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr) | |||
| 		entry = pte_mkuffd_wp(entry); | ||||
| 	/* copy-on-write page */ | ||||
| 	if (write && !(vma->vm_flags & VM_SHARED)) { | ||||
| 		inc_mm_counter(vma->vm_mm, MM_ANONPAGES); | ||||
| 		page_add_new_anon_rmap(page, vma, addr); | ||||
| 		lru_cache_add_inactive_or_unevictable(page, vma); | ||||
| 		add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr); | ||||
| 		VM_BUG_ON_FOLIO(nr != 1, folio); | ||||
| 		folio_add_new_anon_rmap(folio, vma, addr); | ||||
| 		folio_add_lru_vma(folio, vma); | ||||
| 	} else { | ||||
| 		inc_mm_counter(vma->vm_mm, mm_counter_file(page)); | ||||
| 		page_add_file_rmap(page, vma, false); | ||||
| 		add_mm_counter(vma->vm_mm, mm_counter_file(page), nr); | ||||
| 		folio_add_file_rmap_range(folio, page, nr, vma, false); | ||||
| 	} | ||||
| 	set_pte_at(vma->vm_mm, addr, vmf->pte, entry); | ||||
| 	set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); | ||||
| 
 | ||||
| 	/* no need to invalidate: a not-present page won't be cached */ | ||||
| 	update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr); | ||||
| } | ||||
| 
 | ||||
| static bool vmf_pte_changed(struct vm_fault *vmf) | ||||
|  | @ -4427,11 +4440,9 @@ vm_fault_t finish_fault(struct vm_fault *vmf) | |||
| 
 | ||||
| 	/* Re-check under ptl */ | ||||
| 	if (likely(!vmf_pte_changed(vmf))) { | ||||
| 		do_set_pte(vmf, page, vmf->address); | ||||
| 
 | ||||
| 		/* no need to invalidate: a not-present page won't be cached */ | ||||
| 		update_mmu_cache(vma, vmf->address, vmf->pte); | ||||
| 		struct folio *folio = page_folio(page); | ||||
| 
 | ||||
| 		set_pte_range(vmf, folio, page, 1, vmf->address); | ||||
| 		ret = 0; | ||||
| 	} else { | ||||
| 		update_mmu_tlb(vma, vmf->address, vmf->pte); | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Yin Fengwei
						Yin Fengwei