forked from mirrors/linux
		
	mm: add per-order mTHP anon_swpout and anon_swpout_fallback counters
This helps to display the fragmentation situation of the swapfile, knowing the proportion of how much we haven't split large folios. So far, we only support non-split swapout for anon memory, with the possibility of expanding to shmem in the future. So, we add the "anon" prefix to the counter names. Link: https://lkml.kernel.org/r/20240412114858.407208-3-21cnbao@gmail.com Signed-off-by: Barry Song <v-songbaohua@oppo.com> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> Acked-by: David Hildenbrand <david@redhat.com> Cc: Chris Li <chrisl@kernel.org> Cc: Domenico Cerasuolo <cerasuolodomenico@gmail.com> Cc: Kairui Song <kasong@tencent.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Peter Xu <peterx@redhat.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Yosry Ahmed <yosryahmed@google.com> Cc: Yu Zhao <yuzhao@google.com> Cc: Jonathan Corbet <corbet@lwn.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									ec33687c67
								
							
						
					
					
						commit
						d0f048ac39
					
				
					 4 changed files with 10 additions and 0 deletions
				
			
		|  | @ -268,6 +268,8 @@ enum mthp_stat_item { | ||||||
| 	MTHP_STAT_ANON_FAULT_ALLOC, | 	MTHP_STAT_ANON_FAULT_ALLOC, | ||||||
| 	MTHP_STAT_ANON_FAULT_FALLBACK, | 	MTHP_STAT_ANON_FAULT_FALLBACK, | ||||||
| 	MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, | 	MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, | ||||||
|  | 	MTHP_STAT_ANON_SWPOUT, | ||||||
|  | 	MTHP_STAT_ANON_SWPOUT_FALLBACK, | ||||||
| 	__MTHP_STAT_COUNT | 	__MTHP_STAT_COUNT | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -555,11 +555,15 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) | ||||||
| DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC); | DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC); | ||||||
| DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK); | DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK); | ||||||
| DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); | DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); | ||||||
|  | DEFINE_MTHP_STAT_ATTR(anon_swpout, MTHP_STAT_ANON_SWPOUT); | ||||||
|  | DEFINE_MTHP_STAT_ATTR(anon_swpout_fallback, MTHP_STAT_ANON_SWPOUT_FALLBACK); | ||||||
| 
 | 
 | ||||||
| static struct attribute *stats_attrs[] = { | static struct attribute *stats_attrs[] = { | ||||||
| 	&anon_fault_alloc_attr.attr, | 	&anon_fault_alloc_attr.attr, | ||||||
| 	&anon_fault_fallback_attr.attr, | 	&anon_fault_fallback_attr.attr, | ||||||
| 	&anon_fault_fallback_charge_attr.attr, | 	&anon_fault_fallback_charge_attr.attr, | ||||||
|  | 	&anon_swpout_attr.attr, | ||||||
|  | 	&anon_swpout_fallback_attr.attr, | ||||||
| 	NULL, | 	NULL, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -217,6 +217,7 @@ static inline void count_swpout_vm_event(struct folio *folio) | ||||||
| 		count_memcg_folio_events(folio, THP_SWPOUT, 1); | 		count_memcg_folio_events(folio, THP_SWPOUT, 1); | ||||||
| 		count_vm_event(THP_SWPOUT); | 		count_vm_event(THP_SWPOUT); | ||||||
| 	} | 	} | ||||||
|  | 	count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_SWPOUT); | ||||||
| #endif | #endif | ||||||
| 	count_vm_events(PSWPOUT, folio_nr_pages(folio)); | 	count_vm_events(PSWPOUT, folio_nr_pages(folio)); | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -1214,6 +1214,8 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, | ||||||
| 						goto activate_locked; | 						goto activate_locked; | ||||||
| 				} | 				} | ||||||
| 				if (!add_to_swap(folio)) { | 				if (!add_to_swap(folio)) { | ||||||
|  | 					int __maybe_unused order = folio_order(folio); | ||||||
|  | 
 | ||||||
| 					if (!folio_test_large(folio)) | 					if (!folio_test_large(folio)) | ||||||
| 						goto activate_locked_split; | 						goto activate_locked_split; | ||||||
| 					/* Fallback to swap normal pages */ | 					/* Fallback to swap normal pages */ | ||||||
|  | @ -1225,6 +1227,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, | ||||||
| 							THP_SWPOUT_FALLBACK, 1); | 							THP_SWPOUT_FALLBACK, 1); | ||||||
| 						count_vm_event(THP_SWPOUT_FALLBACK); | 						count_vm_event(THP_SWPOUT_FALLBACK); | ||||||
| 					} | 					} | ||||||
|  | 					count_mthp_stat(order, MTHP_STAT_ANON_SWPOUT_FALLBACK); | ||||||
| #endif | #endif | ||||||
| 					if (!add_to_swap(folio)) | 					if (!add_to_swap(folio)) | ||||||
| 						goto activate_locked_split; | 						goto activate_locked_split; | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Barry Song
						Barry Song