mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	mm,hugetlb: rename anon_rmap to new_anon_folio and make it boolean
anon_rmap is used to determine whether the new allocated folio is anonymous. Rename it to something more meaningul like new_anon_folio and make it boolean, as we use it like that. While we are at it, drop 'new_pagecache_folio' as 'new_anon_folio' is enough to check whether we need to restore the consumed reservation. Link: https://lkml.kernel.org/r/20250627102904.107202-4-osalvador@suse.de Link: https://lkml.kernel.org/r/20250630144212.156938-4-osalvador@suse.de Signed-off-by: Oscar Salvador <osalvador@suse.de> Acked-by: David Hildenbrand <david@redhat.com> Cc: Gavin Guo <gavinguo@igalia.com> Cc: Muchun Song <muchun.song@linux.dev> Cc: Peter Xu <peterx@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									9293fb4765
								
							
						
					
					
						commit
						d531fd2ccf
					
				
					 1 changed files with 10 additions and 11 deletions
				
			
		
							
								
								
									
										21
									
								
								mm/hugetlb.c
									
									
									
									
									
								
							
							
						
						
									
										21
									
								
								mm/hugetlb.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -6406,17 +6406,16 @@ static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned
 | 
			
		|||
static vm_fault_t hugetlb_no_page(struct address_space *mapping,
 | 
			
		||||
			struct vm_fault *vmf)
 | 
			
		||||
{
 | 
			
		||||
	u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
 | 
			
		||||
	bool new_folio, new_anon_folio = false;
 | 
			
		||||
	struct vm_area_struct *vma = vmf->vma;
 | 
			
		||||
	struct mm_struct *mm = vma->vm_mm;
 | 
			
		||||
	struct hstate *h = hstate_vma(vma);
 | 
			
		||||
	vm_fault_t ret = VM_FAULT_SIGBUS;
 | 
			
		||||
	int anon_rmap = 0;
 | 
			
		||||
	unsigned long size;
 | 
			
		||||
	struct folio *folio;
 | 
			
		||||
	pte_t new_pte;
 | 
			
		||||
	bool new_folio, new_pagecache_folio = false;
 | 
			
		||||
	u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
 | 
			
		||||
	bool folio_locked = true;
 | 
			
		||||
	struct folio *folio;
 | 
			
		||||
	unsigned long size;
 | 
			
		||||
	pte_t new_pte;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Currently, we are forced to kill the process in the event the
 | 
			
		||||
| 
						 | 
				
			
			@ -6515,10 +6514,9 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
 | 
			
		|||
				ret = VM_FAULT_SIGBUS;
 | 
			
		||||
				goto out;
 | 
			
		||||
			}
 | 
			
		||||
			new_pagecache_folio = true;
 | 
			
		||||
		} else {
 | 
			
		||||
			new_anon_folio = true;
 | 
			
		||||
			folio_lock(folio);
 | 
			
		||||
			anon_rmap = 1;
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		/*
 | 
			
		||||
| 
						 | 
				
			
			@ -6567,7 +6565,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
 | 
			
		|||
	if (!pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), vmf->orig_pte))
 | 
			
		||||
		goto backout;
 | 
			
		||||
 | 
			
		||||
	if (anon_rmap)
 | 
			
		||||
	if (new_anon_folio)
 | 
			
		||||
		hugetlb_add_new_anon_rmap(folio, vma, vmf->address);
 | 
			
		||||
	else
 | 
			
		||||
		hugetlb_add_file_rmap(folio);
 | 
			
		||||
| 
						 | 
				
			
			@ -6586,7 +6584,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
 | 
			
		|||
		 * No need to keep file folios locked. See comment in
 | 
			
		||||
		 * hugetlb_fault().
 | 
			
		||||
		 */
 | 
			
		||||
		if (!anon_rmap) {
 | 
			
		||||
		if (!new_anon_folio) {
 | 
			
		||||
			folio_locked = false;
 | 
			
		||||
			folio_unlock(folio);
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			@ -6622,7 +6620,8 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
 | 
			
		|||
backout:
 | 
			
		||||
	spin_unlock(vmf->ptl);
 | 
			
		||||
backout_unlocked:
 | 
			
		||||
	if (new_folio && !new_pagecache_folio)
 | 
			
		||||
	/* We only need to restore reservations for private mappings */
 | 
			
		||||
	if (new_anon_folio)
 | 
			
		||||
		restore_reserve_on_error(h, vma, vmf->address, folio);
 | 
			
		||||
 | 
			
		||||
	folio_unlock(folio);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue