forked from mirrors/linux
		
	mm/memory: page_add_file_rmap() -> folio_add_file_rmap_[pte|pmd]()
Let's convert insert_page_into_pte_locked() and do_set_pmd(). While at it, perform some folio conversion. Link: https://lkml.kernel.org/r/20231220224504.646757-9-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Yin Fengwei <fengwei.yin@intel.com> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> Cc: Hugh Dickins <hughd@google.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Peter Xu <peterx@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									68f0320824
								
							
						
					
					
						commit
						ef37b2ea08
					
				
					 1 changed files with 8 additions and 6 deletions
				
			
		
							
								
								
									
										14
									
								
								mm/memory.c
									
									
									
									
									
								
							
							
						
						
									
										14
									
								
								mm/memory.c
									
									
									
									
									
								
							|  | @ -1859,12 +1859,14 @@ static int validate_page_before_insert(struct page *page) | |||
| static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, | ||||
| 			unsigned long addr, struct page *page, pgprot_t prot) | ||||
| { | ||||
| 	struct folio *folio = page_folio(page); | ||||
| 
 | ||||
| 	if (!pte_none(ptep_get(pte))) | ||||
| 		return -EBUSY; | ||||
| 	/* Ok, finally just insert the thing.. */ | ||||
| 	get_page(page); | ||||
| 	folio_get(folio); | ||||
| 	inc_mm_counter(vma->vm_mm, mm_counter_file(page)); | ||||
| 	page_add_file_rmap(page, vma, false); | ||||
| 	folio_add_file_rmap_pte(folio, page, vma); | ||||
| 	set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot)); | ||||
| 	return 0; | ||||
| } | ||||
|  | @ -4410,6 +4412,7 @@ static void deposit_prealloc_pte(struct vm_fault *vmf) | |||
| 
 | ||||
| vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) | ||||
| { | ||||
| 	struct folio *folio = page_folio(page); | ||||
| 	struct vm_area_struct *vma = vmf->vma; | ||||
| 	bool write = vmf->flags & FAULT_FLAG_WRITE; | ||||
| 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK; | ||||
|  | @ -4419,8 +4422,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) | |||
| 	if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	page = compound_head(page); | ||||
| 	if (compound_order(page) != HPAGE_PMD_ORDER) | ||||
| 	if (page != &folio->page || folio_order(folio) != HPAGE_PMD_ORDER) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	/*
 | ||||
|  | @ -4429,7 +4431,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) | |||
| 	 * check.  This kind of THP just can be PTE mapped.  Access to | ||||
| 	 * the corrupted subpage should trigger SIGBUS as expected. | ||||
| 	 */ | ||||
| 	if (unlikely(PageHasHWPoisoned(page))) | ||||
| 	if (unlikely(folio_test_has_hwpoisoned(folio))) | ||||
| 		return ret; | ||||
| 
 | ||||
| 	/*
 | ||||
|  | @ -4453,7 +4455,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) | |||
| 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); | ||||
| 
 | ||||
| 	add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); | ||||
| 	page_add_file_rmap(page, vma, true); | ||||
| 	folio_add_file_rmap_pmd(folio, page, vma); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * deposit and withdraw with pmd lock held | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 David Hildenbrand
						David Hildenbrand