mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	ksm: add ksm zero pages for each process
As the number of ksm zero pages is not included in ksm_merging_pages per process when enabling use_zero_pages, it's unclear of how many actual pages are merged by KSM. To let users accurately estimate their memory demands when unsharing KSM zero-pages, it's necessary to show KSM zero- pages per process. In addition, it help users to know the actual KSM profit because KSM-placed zero pages are also benefit from KSM. since unsharing zero pages placed by KSM accurately is achieved, then tracking empty pages merging and unmerging is not a difficult thing any longer. Since we already have /proc/<pid>/ksm_stat, just add the information of 'ksm_zero_pages' in it. Link: https://lkml.kernel.org/r/20230613030938.185993-1-yang.yang29@zte.com.cn Signed-off-by: xu xin <xu.xin16@zte.com.cn> Acked-by: David Hildenbrand <david@redhat.com> Reviewed-by: Xiaokai Ran <ran.xiaokai@zte.com.cn> Reviewed-by: Yang Yang <yang.yang29@zte.com.cn> Cc: Claudio Imbrenda <imbrenda@linux.ibm.com> Cc: Xuexin Jiang <jiang.xuexin@zte.com.cn> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									e2942062e0
								
							
						
					
					
						commit
						6080d19f07
					
				
					 6 changed files with 17 additions and 8 deletions
				
			
		| 
						 | 
					@ -3207,6 +3207,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns,
 | 
				
			||||||
	mm = get_task_mm(task);
 | 
						mm = get_task_mm(task);
 | 
				
			||||||
	if (mm) {
 | 
						if (mm) {
 | 
				
			||||||
		seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items);
 | 
							seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items);
 | 
				
			||||||
 | 
							seq_printf(m, "ksm_zero_pages %lu\n", mm->ksm_zero_pages);
 | 
				
			||||||
		seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages);
 | 
							seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages);
 | 
				
			||||||
		seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm));
 | 
							seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm));
 | 
				
			||||||
		mmput(mm);
 | 
							mmput(mm);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -35,10 +35,12 @@ void __ksm_exit(struct mm_struct *mm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern unsigned long ksm_zero_pages;
 | 
					extern unsigned long ksm_zero_pages;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void ksm_might_unmap_zero_page(pte_t pte)
 | 
					static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (is_ksm_zero_pte(pte))
 | 
						if (is_ksm_zero_pte(pte)) {
 | 
				
			||||||
		ksm_zero_pages--;
 | 
							ksm_zero_pages--;
 | 
				
			||||||
 | 
							mm->ksm_zero_pages--;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
 | 
					static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
 | 
				
			||||||
| 
						 | 
					@ -109,7 +111,7 @@ static inline void ksm_exit(struct mm_struct *mm)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void ksm_might_unmap_zero_page(pte_t pte)
 | 
					static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -812,7 +812,7 @@ struct mm_struct {
 | 
				
			||||||
#ifdef CONFIG_KSM
 | 
					#ifdef CONFIG_KSM
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * Represent how many pages of this process are involved in KSM
 | 
							 * Represent how many pages of this process are involved in KSM
 | 
				
			||||||
		 * merging.
 | 
							 * merging (not including ksm_zero_pages).
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		unsigned long ksm_merging_pages;
 | 
							unsigned long ksm_merging_pages;
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
| 
						 | 
					@ -820,7 +820,12 @@ struct mm_struct {
 | 
				
			||||||
		 * including merged and not merged.
 | 
							 * including merged and not merged.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		unsigned long ksm_rmap_items;
 | 
							unsigned long ksm_rmap_items;
 | 
				
			||||||
#endif
 | 
							/*
 | 
				
			||||||
 | 
							 * Represent how many empty pages are merged with kernel zero
 | 
				
			||||||
 | 
							 * pages when enabling KSM use_zero_pages.
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
 | 
							unsigned long ksm_zero_pages;
 | 
				
			||||||
 | 
					#endif /* CONFIG_KSM */
 | 
				
			||||||
#ifdef CONFIG_LRU_GEN
 | 
					#ifdef CONFIG_LRU_GEN
 | 
				
			||||||
		struct {
 | 
							struct {
 | 
				
			||||||
			/* this mm_struct is on lru_gen_mm_list */
 | 
								/* this mm_struct is on lru_gen_mm_list */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -710,7 +710,7 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte,
 | 
				
			||||||
				spin_lock(ptl);
 | 
									spin_lock(ptl);
 | 
				
			||||||
				ptep_clear(vma->vm_mm, address, _pte);
 | 
									ptep_clear(vma->vm_mm, address, _pte);
 | 
				
			||||||
				spin_unlock(ptl);
 | 
									spin_unlock(ptl);
 | 
				
			||||||
				ksm_might_unmap_zero_page(pteval);
 | 
									ksm_might_unmap_zero_page(vma->vm_mm, pteval);
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			src_page = pte_page(pteval);
 | 
								src_page = pte_page(pteval);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										1
									
								
								mm/ksm.c
									
									
									
									
									
								
							
							
						
						
									
										1
									
								
								mm/ksm.c
									
									
									
									
									
								
							| 
						 | 
					@ -1233,6 +1233,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
 | 
							newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
 | 
				
			||||||
		ksm_zero_pages++;
 | 
							ksm_zero_pages++;
 | 
				
			||||||
 | 
							mm->ksm_zero_pages++;
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * We're replacing an anonymous page with a zero page, which is
 | 
							 * We're replacing an anonymous page with a zero page, which is
 | 
				
			||||||
		 * not anonymous. We need to do proper accounting otherwise we
 | 
							 * not anonymous. We need to do proper accounting otherwise we
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1434,7 +1434,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
 | 
				
			||||||
			zap_install_uffd_wp_if_needed(vma, addr, pte, details,
 | 
								zap_install_uffd_wp_if_needed(vma, addr, pte, details,
 | 
				
			||||||
						      ptent);
 | 
											      ptent);
 | 
				
			||||||
			if (unlikely(!page)) {
 | 
								if (unlikely(!page)) {
 | 
				
			||||||
				ksm_might_unmap_zero_page(ptent);
 | 
									ksm_might_unmap_zero_page(mm, ptent);
 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3130,7 +3130,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 | 
				
			||||||
				inc_mm_counter(mm, MM_ANONPAGES);
 | 
									inc_mm_counter(mm, MM_ANONPAGES);
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			ksm_might_unmap_zero_page(vmf->orig_pte);
 | 
								ksm_might_unmap_zero_page(mm, vmf->orig_pte);
 | 
				
			||||||
			inc_mm_counter(mm, MM_ANONPAGES);
 | 
								inc_mm_counter(mm, MM_ANONPAGES);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
 | 
							flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue