mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	mm/memory: page_remove_rmap() -> folio_remove_rmap_pte()
Let's convert zap_pte_range() and closely-related tlb_flush_rmap_batch(). While at it, perform some more folio conversion in zap_pte_range(). Link: https://lkml.kernel.org/r/20231220224504.646757-29-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Peter Xu <peterx@redhat.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Yin Fengwei <fengwei.yin@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									18e8612e56
								
							
						
					
					
						commit
						c46265030b
					
				
					 2 changed files with 14 additions and 11 deletions
				
			
		
							
								
								
									
										23
									
								
								mm/memory.c
									
									
									
									
									
								
							
							
						
						
									
										23
									
								
								mm/memory.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -1434,6 +1434,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
 | 
			
		|||
	arch_enter_lazy_mmu_mode();
 | 
			
		||||
	do {
 | 
			
		||||
		pte_t ptent = ptep_get(pte);
 | 
			
		||||
		struct folio *folio;
 | 
			
		||||
		struct page *page;
 | 
			
		||||
 | 
			
		||||
		if (pte_none(ptent))
 | 
			
		||||
| 
						 | 
				
			
			@ -1459,21 +1460,22 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
 | 
			
		|||
				continue;
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			folio = page_folio(page);
 | 
			
		||||
			delay_rmap = 0;
 | 
			
		||||
			if (!PageAnon(page)) {
 | 
			
		||||
			if (!folio_test_anon(folio)) {
 | 
			
		||||
				if (pte_dirty(ptent)) {
 | 
			
		||||
					set_page_dirty(page);
 | 
			
		||||
					folio_set_dirty(folio);
 | 
			
		||||
					if (tlb_delay_rmap(tlb)) {
 | 
			
		||||
						delay_rmap = 1;
 | 
			
		||||
						force_flush = 1;
 | 
			
		||||
					}
 | 
			
		||||
				}
 | 
			
		||||
				if (pte_young(ptent) && likely(vma_has_recency(vma)))
 | 
			
		||||
					mark_page_accessed(page);
 | 
			
		||||
					folio_mark_accessed(folio);
 | 
			
		||||
			}
 | 
			
		||||
			rss[mm_counter(page)]--;
 | 
			
		||||
			if (!delay_rmap) {
 | 
			
		||||
				page_remove_rmap(page, vma, false);
 | 
			
		||||
				folio_remove_rmap_pte(folio, page, vma);
 | 
			
		||||
				if (unlikely(page_mapcount(page) < 0))
 | 
			
		||||
					print_bad_pte(vma, addr, ptent, page);
 | 
			
		||||
			}
 | 
			
		||||
| 
						 | 
				
			
			@ -1489,6 +1491,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
 | 
			
		|||
		if (is_device_private_entry(entry) ||
 | 
			
		||||
		    is_device_exclusive_entry(entry)) {
 | 
			
		||||
			page = pfn_swap_entry_to_page(entry);
 | 
			
		||||
			folio = page_folio(page);
 | 
			
		||||
			if (unlikely(!should_zap_page(details, page)))
 | 
			
		||||
				continue;
 | 
			
		||||
			/*
 | 
			
		||||
| 
						 | 
				
			
			@ -1500,8 +1503,8 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
 | 
			
		|||
			WARN_ON_ONCE(!vma_is_anonymous(vma));
 | 
			
		||||
			rss[mm_counter(page)]--;
 | 
			
		||||
			if (is_device_private_entry(entry))
 | 
			
		||||
				page_remove_rmap(page, vma, false);
 | 
			
		||||
			put_page(page);
 | 
			
		||||
				folio_remove_rmap_pte(folio, page, vma);
 | 
			
		||||
			folio_put(folio);
 | 
			
		||||
		} else if (!non_swap_entry(entry)) {
 | 
			
		||||
			/* Genuine swap entry, hence a private anon page */
 | 
			
		||||
			if (!should_zap_cows(details))
 | 
			
		||||
| 
						 | 
				
			
			@ -3220,10 +3223,10 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 | 
			
		|||
			 * threads.
 | 
			
		||||
			 *
 | 
			
		||||
			 * The critical issue is to order this
 | 
			
		||||
			 * page_remove_rmap with the ptp_clear_flush above.
 | 
			
		||||
			 * Those stores are ordered by (if nothing else,)
 | 
			
		||||
			 * folio_remove_rmap_pte() with the ptp_clear_flush
 | 
			
		||||
			 * above. Those stores are ordered by (if nothing else,)
 | 
			
		||||
			 * the barrier present in the atomic_add_negative
 | 
			
		||||
			 * in page_remove_rmap.
 | 
			
		||||
			 * in folio_remove_rmap_pte();
 | 
			
		||||
			 *
 | 
			
		||||
			 * Then the TLB flush in ptep_clear_flush ensures that
 | 
			
		||||
			 * no process can access the old page before the
 | 
			
		||||
| 
						 | 
				
			
			@ -3232,7 +3235,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 | 
			
		|||
			 * mapcount is visible. So transitively, TLBs to
 | 
			
		||||
			 * old page will be flushed before it can be reused.
 | 
			
		||||
			 */
 | 
			
		||||
			page_remove_rmap(vmf->page, vma, false);
 | 
			
		||||
			folio_remove_rmap_pte(old_folio, vmf->page, vma);
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		/* Free the old page.. */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -55,7 +55,7 @@ static void tlb_flush_rmap_batch(struct mmu_gather_batch *batch, struct vm_area_
 | 
			
		|||
 | 
			
		||||
		if (encoded_page_flags(enc)) {
 | 
			
		||||
			struct page *page = encoded_page_ptr(enc);
 | 
			
		||||
			page_remove_rmap(page, vma, false);
 | 
			
		||||
			folio_remove_rmap_pte(page_folio(page), page, vma);
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue