mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm: free folios in a batch in shrink_folio_list()
Use free_unref_page_batch() to free the folios. This may increase the number of IPIs from calling try_to_unmap_flush() more often, but that's going to be very workload-dependent. It may even reduce the number of IPIs as we now batch-free large folios instead of freeing them one at a time. Link: https://lkml.kernel.org/r/20240227174254.710559-12-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Mel Gorman <mgorman@suse.de> Cc: David Hildenbrand <david@redhat.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									f77171d241
								
							
						
					
					
						commit
						bc2ff4cbc3
					
				
					 1 changed files with 9 additions and 11 deletions
				
			
		
							
								
								
									
										20
									
								
								mm/vmscan.c
									
									
									
									
									
								
							
							
						
						
									
										20
									
								
								mm/vmscan.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -1006,14 +1006,15 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
 | 
			
		|||
		struct pglist_data *pgdat, struct scan_control *sc,
 | 
			
		||||
		struct reclaim_stat *stat, bool ignore_references)
 | 
			
		||||
{
 | 
			
		||||
	struct folio_batch free_folios;
 | 
			
		||||
	LIST_HEAD(ret_folios);
 | 
			
		||||
	LIST_HEAD(free_folios);
 | 
			
		||||
	LIST_HEAD(demote_folios);
 | 
			
		||||
	unsigned int nr_reclaimed = 0;
 | 
			
		||||
	unsigned int pgactivate = 0;
 | 
			
		||||
	bool do_demote_pass;
 | 
			
		||||
	struct swap_iocb *plug = NULL;
 | 
			
		||||
 | 
			
		||||
	folio_batch_init(&free_folios);
 | 
			
		||||
	memset(stat, 0, sizeof(*stat));
 | 
			
		||||
	cond_resched();
 | 
			
		||||
	do_demote_pass = can_demote(pgdat->node_id, sc);
 | 
			
		||||
| 
						 | 
				
			
			@ -1412,14 +1413,11 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
 | 
			
		|||
		 */
 | 
			
		||||
		nr_reclaimed += nr_pages;
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * Is there need to periodically free_folio_list? It would
 | 
			
		||||
		 * appear not as the counts should be low
 | 
			
		||||
		 */
 | 
			
		||||
		if (unlikely(folio_test_large(folio)))
 | 
			
		||||
			destroy_large_folio(folio);
 | 
			
		||||
		else
 | 
			
		||||
			list_add(&folio->lru, &free_folios);
 | 
			
		||||
		if (folio_batch_add(&free_folios, folio) == 0) {
 | 
			
		||||
			mem_cgroup_uncharge_folios(&free_folios);
 | 
			
		||||
			try_to_unmap_flush();
 | 
			
		||||
			free_unref_folios(&free_folios);
 | 
			
		||||
		}
 | 
			
		||||
		continue;
 | 
			
		||||
 | 
			
		||||
activate_locked_split:
 | 
			
		||||
| 
						 | 
				
			
			@ -1483,9 +1481,9 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
 | 
			
		|||
 | 
			
		||||
	pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
 | 
			
		||||
 | 
			
		||||
	mem_cgroup_uncharge_list(&free_folios);
 | 
			
		||||
	mem_cgroup_uncharge_folios(&free_folios);
 | 
			
		||||
	try_to_unmap_flush();
 | 
			
		||||
	free_unref_page_list(&free_folios);
 | 
			
		||||
	free_unref_folios(&free_folios);
 | 
			
		||||
 | 
			
		||||
	list_splice(&ret_folios, folio_list);
 | 
			
		||||
	count_vm_events(PGACTIVATE, pgactivate);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue