mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm/smaps: simplify shmem handling of pte holes
Firstly, check_shmem_swap variable is actually not necessary, because it's always set with pte_hole hook; checking each would work. Meanwhile, the check within smaps_pte_entry is not easy to follow. E.g., pte_none() check is not needed as "!pte_present && !is_swap_pte" is the same. Since at it, use the pte_hole() helper rather than dup the page cache lookup. Still keep the CONFIG_SHMEM part so the code can be optimized to nop for !SHMEM. There will be a very slight functional change in smaps_pte_entry(), that for !SHMEM we'll return early for pte_none (before checking page==NULL), but that's even nicer. Link: https://lkml.kernel.org/r/20210917164756.8586-4-peterx@redhat.com Signed-off-by: Peter Xu <peterx@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									02399c8802
								
							
						
					
					
						commit
						2301003215
					
				
					 1 changed files with 12 additions and 10 deletions
				
			
		| 
						 | 
				
			
			@ -397,7 +397,6 @@ struct mem_size_stats {
 | 
			
		|||
	u64 pss_shmem;
 | 
			
		||||
	u64 pss_locked;
 | 
			
		||||
	u64 swap_pss;
 | 
			
		||||
	bool check_shmem_swap;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static void smaps_page_accumulate(struct mem_size_stats *mss,
 | 
			
		||||
| 
						 | 
				
			
			@ -490,6 +489,16 @@ static int smaps_pte_hole(unsigned long addr, unsigned long end,
 | 
			
		|||
#define smaps_pte_hole		NULL
 | 
			
		||||
#endif /* CONFIG_SHMEM */
 | 
			
		||||
 | 
			
		||||
static void smaps_pte_hole_lookup(unsigned long addr, struct mm_walk *walk)
 | 
			
		||||
{
 | 
			
		||||
#ifdef CONFIG_SHMEM
 | 
			
		||||
	if (walk->ops->pte_hole) {
 | 
			
		||||
		/* depth is not used */
 | 
			
		||||
		smaps_pte_hole(addr, addr + PAGE_SIZE, 0, walk);
 | 
			
		||||
	}
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void smaps_pte_entry(pte_t *pte, unsigned long addr,
 | 
			
		||||
		struct mm_walk *walk)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -518,12 +527,8 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
 | 
			
		|||
			}
 | 
			
		||||
		} else if (is_pfn_swap_entry(swpent))
 | 
			
		||||
			page = pfn_swap_entry_to_page(swpent);
 | 
			
		||||
	} else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
 | 
			
		||||
							&& pte_none(*pte))) {
 | 
			
		||||
		page = xa_load(&vma->vm_file->f_mapping->i_pages,
 | 
			
		||||
						linear_page_index(vma, addr));
 | 
			
		||||
		if (xa_is_value(page))
 | 
			
		||||
			mss->swap += PAGE_SIZE;
 | 
			
		||||
	} else {
 | 
			
		||||
		smaps_pte_hole_lookup(addr, walk);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -737,8 +742,6 @@ static void smap_gather_stats(struct vm_area_struct *vma,
 | 
			
		|||
		return;
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_SHMEM
 | 
			
		||||
	/* In case of smaps_rollup, reset the value from previous vma */
 | 
			
		||||
	mss->check_shmem_swap = false;
 | 
			
		||||
	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
 | 
			
		||||
		/*
 | 
			
		||||
		 * For shared or readonly shmem mappings we know that all
 | 
			
		||||
| 
						 | 
				
			
			@ -756,7 +759,6 @@ static void smap_gather_stats(struct vm_area_struct *vma,
 | 
			
		|||
					!(vma->vm_flags & VM_WRITE))) {
 | 
			
		||||
			mss->swap += shmem_swapped;
 | 
			
		||||
		} else {
 | 
			
		||||
			mss->check_shmem_swap = true;
 | 
			
		||||
			ops = &smaps_shmem_walk_ops;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue