mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	vmscan: abort reclaim/compaction if compaction can proceed
If compaction can proceed, shrink_zones() stops doing any work but its callers still call shrink_slab() which raises the priority and potentially sleeps. This is unnecessary and wasteful so this patch aborts direct reclaim/compaction entirely if compaction can proceed. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Rik van Riel <riel@redhat.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Acked-by: Johannes Weiner <jweiner@redhat.com> Cc: Josh Boyer <jwboyer@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									e0887c19b2
								
							
						
					
					
						commit
						e0c23279c9
					
				
					 1 changed files with 21 additions and 11 deletions
				
			
		
							
								
								
									
										32
									
								
								mm/vmscan.c
									
									
									
									
									
								
							
							
						
						
									
										32
									
								
								mm/vmscan.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -2103,14 +2103,19 @@ static void shrink_zone(int priority, struct zone *zone,
 | 
			
		|||
 *
 | 
			
		||||
 * If a zone is deemed to be full of pinned pages then just give it a light
 | 
			
		||||
 * scan then give up on it.
 | 
			
		||||
 *
 | 
			
		||||
 * This function returns true if a zone is being reclaimed for a costly
 | 
			
		||||
 * high-order allocation and compaction is either ready to begin or deferred.
 | 
			
		||||
 * This indicates to the caller that it should retry the allocation or fail.
 | 
			
		||||
 */
 | 
			
		||||
static void shrink_zones(int priority, struct zonelist *zonelist,
 | 
			
		||||
static bool shrink_zones(int priority, struct zonelist *zonelist,
 | 
			
		||||
					struct scan_control *sc)
 | 
			
		||||
{
 | 
			
		||||
	struct zoneref *z;
 | 
			
		||||
	struct zone *zone;
 | 
			
		||||
	unsigned long nr_soft_reclaimed;
 | 
			
		||||
	unsigned long nr_soft_scanned;
 | 
			
		||||
	bool should_abort_reclaim = false;
 | 
			
		||||
 | 
			
		||||
	for_each_zone_zonelist_nodemask(zone, z, zonelist,
 | 
			
		||||
					gfp_zone(sc->gfp_mask), sc->nodemask) {
 | 
			
		||||
| 
						 | 
				
			
			@ -2127,19 +2132,20 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
 | 
			
		|||
				continue;	/* Let kswapd poll it */
 | 
			
		||||
			if (COMPACTION_BUILD) {
 | 
			
		||||
				/*
 | 
			
		||||
				 * If we already have plenty of memory
 | 
			
		||||
				 * free for compaction, don't free any
 | 
			
		||||
				 * more.  Even though compaction is
 | 
			
		||||
				 * invoked for any non-zero order,
 | 
			
		||||
				 * only frequent costly order
 | 
			
		||||
				 * reclamation is disruptive enough to
 | 
			
		||||
				 * become a noticable problem, like
 | 
			
		||||
				 * transparent huge page allocations.
 | 
			
		||||
				 * If we already have plenty of memory free for
 | 
			
		||||
				 * compaction in this zone, don't free any more.
 | 
			
		||||
				 * Even though compaction is invoked for any
 | 
			
		||||
				 * non-zero order, only frequent costly order
 | 
			
		||||
				 * reclamation is disruptive enough to become a
 | 
			
		||||
				 * noticable problem, like transparent huge page
 | 
			
		||||
				 * allocations.
 | 
			
		||||
				 */
 | 
			
		||||
				if (sc->order > PAGE_ALLOC_COSTLY_ORDER &&
 | 
			
		||||
					(compaction_suitable(zone, sc->order) ||
 | 
			
		||||
					 compaction_deferred(zone)))
 | 
			
		||||
					 compaction_deferred(zone))) {
 | 
			
		||||
					should_abort_reclaim = true;
 | 
			
		||||
					continue;
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			/*
 | 
			
		||||
			 * This steals pages from memory cgroups over softlimit
 | 
			
		||||
| 
						 | 
				
			
			@ -2158,6 +2164,8 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
 | 
			
		|||
 | 
			
		||||
		shrink_zone(priority, zone, sc);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return should_abort_reclaim;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static bool zone_reclaimable(struct zone *zone)
 | 
			
		||||
| 
						 | 
				
			
			@ -2222,7 +2230,9 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
 | 
			
		|||
		sc->nr_scanned = 0;
 | 
			
		||||
		if (!priority)
 | 
			
		||||
			disable_swap_token(sc->mem_cgroup);
 | 
			
		||||
		shrink_zones(priority, zonelist, sc);
 | 
			
		||||
		if (shrink_zones(priority, zonelist, sc))
 | 
			
		||||
			break;
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * Don't shrink slabs when reclaiming memory from
 | 
			
		||||
		 * over limit cgroups
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue