forked from mirrors/linux
		
	mm: use alloc_flags to record if kswapd can wake
This is a preparation patch that copies the GFP flag __GFP_KSWAPD_RECLAIM into alloc_flags. This is a preparation patch only that avoids having to pass gfp_mask through a long callchain in a future patch. Note that the setting in the fast path happens in alloc_flags_nofragment() and it may be claimed that this has nothing to do with ALLOC_NO_FRAGMENT. That's true in this patch but is not true later so it's done now for easier review to show where the flag needs to be recorded. No functional change. [mgorman@techsingularity.net: ALLOC_KSWAPD flag needs to be applied in the !CONFIG_ZONE_DMA32 case] Link: http://lkml.kernel.org/r/20181126143503.GO23260@techsingularity.net Link: http://lkml.kernel.org/r/20181123114528.28802-4-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Zi Yan <zi.yan@cs.rutgers.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									a921444382
								
							
						
					
					
						commit
						0a79cdad5e
					
				
					 2 changed files with 19 additions and 15 deletions
				
			
		|  | @ -499,6 +499,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, | ||||||
| #else | #else | ||||||
| #define ALLOC_NOFRAGMENT	  0x0 | #define ALLOC_NOFRAGMENT	  0x0 | ||||||
| #endif | #endif | ||||||
|  | #define ALLOC_KSWAPD		0x200 /* allow waking of kswapd */ | ||||||
| 
 | 
 | ||||||
| enum ttu_flags; | enum ttu_flags; | ||||||
| struct tlbflush_unmap_batch; | struct tlbflush_unmap_batch; | ||||||
|  |  | ||||||
|  | @ -3268,7 +3268,6 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) | ||||||
| } | } | ||||||
| #endif	/* CONFIG_NUMA */ | #endif	/* CONFIG_NUMA */ | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_ZONE_DMA32 |  | ||||||
| /*
 | /*
 | ||||||
|  * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid |  * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid | ||||||
|  * fragmentation is subtle. If the preferred zone was HIGHMEM then |  * fragmentation is subtle. If the preferred zone was HIGHMEM then | ||||||
|  | @ -3278,10 +3277,16 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) | ||||||
|  * fragmentation between the Normal and DMA32 zones. |  * fragmentation between the Normal and DMA32 zones. | ||||||
|  */ |  */ | ||||||
| static inline unsigned int | static inline unsigned int | ||||||
| alloc_flags_nofragment(struct zone *zone) | alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) | ||||||
| { | { | ||||||
|  | 	unsigned int alloc_flags = 0; | ||||||
|  | 
 | ||||||
|  | 	if (gfp_mask & __GFP_KSWAPD_RECLAIM) | ||||||
|  | 		alloc_flags |= ALLOC_KSWAPD; | ||||||
|  | 
 | ||||||
|  | #ifdef CONFIG_ZONE_DMA32 | ||||||
| 	if (zone_idx(zone) != ZONE_NORMAL) | 	if (zone_idx(zone) != ZONE_NORMAL) | ||||||
| 		return 0; | 		goto out; | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and | 	 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and | ||||||
|  | @ -3290,17 +3295,12 @@ alloc_flags_nofragment(struct zone *zone) | ||||||
| 	 */ | 	 */ | ||||||
| 	BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); | 	BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); | ||||||
| 	if (nr_online_nodes > 1 && !populated_zone(--zone)) | 	if (nr_online_nodes > 1 && !populated_zone(--zone)) | ||||||
| 		return 0; | 		goto out; | ||||||
| 
 | 
 | ||||||
| 	return ALLOC_NOFRAGMENT; | out: | ||||||
|  | #endif /* CONFIG_ZONE_DMA32 */ | ||||||
|  | 	return alloc_flags; | ||||||
| } | } | ||||||
| #else |  | ||||||
| static inline unsigned int |  | ||||||
| alloc_flags_nofragment(struct zone *zone) |  | ||||||
| { |  | ||||||
| 	return 0; |  | ||||||
| } |  | ||||||
| #endif |  | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  * get_page_from_freelist goes through the zonelist trying to allocate |  * get_page_from_freelist goes through the zonelist trying to allocate | ||||||
|  | @ -3939,6 +3939,9 @@ gfp_to_alloc_flags(gfp_t gfp_mask) | ||||||
| 	} else if (unlikely(rt_task(current)) && !in_interrupt()) | 	} else if (unlikely(rt_task(current)) && !in_interrupt()) | ||||||
| 		alloc_flags |= ALLOC_HARDER; | 		alloc_flags |= ALLOC_HARDER; | ||||||
| 
 | 
 | ||||||
|  | 	if (gfp_mask & __GFP_KSWAPD_RECLAIM) | ||||||
|  | 		alloc_flags |= ALLOC_KSWAPD; | ||||||
|  | 
 | ||||||
| #ifdef CONFIG_CMA | #ifdef CONFIG_CMA | ||||||
| 	if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) | 	if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) | ||||||
| 		alloc_flags |= ALLOC_CMA; | 		alloc_flags |= ALLOC_CMA; | ||||||
|  | @ -4170,7 +4173,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | ||||||
| 	if (!ac->preferred_zoneref->zone) | 	if (!ac->preferred_zoneref->zone) | ||||||
| 		goto nopage; | 		goto nopage; | ||||||
| 
 | 
 | ||||||
| 	if (gfp_mask & __GFP_KSWAPD_RECLAIM) | 	if (alloc_flags & ALLOC_KSWAPD) | ||||||
| 		wake_all_kswapds(order, gfp_mask, ac); | 		wake_all_kswapds(order, gfp_mask, ac); | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
|  | @ -4228,7 +4231,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | ||||||
| 
 | 
 | ||||||
| retry: | retry: | ||||||
| 	/* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ | 	/* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ | ||||||
| 	if (gfp_mask & __GFP_KSWAPD_RECLAIM) | 	if (alloc_flags & ALLOC_KSWAPD) | ||||||
| 		wake_all_kswapds(order, gfp_mask, ac); | 		wake_all_kswapds(order, gfp_mask, ac); | ||||||
| 
 | 
 | ||||||
| 	reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); | 	reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); | ||||||
|  | @ -4451,7 +4454,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, | ||||||
| 	 * Forbid the first pass from falling back to types that fragment | 	 * Forbid the first pass from falling back to types that fragment | ||||||
| 	 * memory until all local zones are considered. | 	 * memory until all local zones are considered. | ||||||
| 	 */ | 	 */ | ||||||
| 	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone); | 	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask); | ||||||
| 
 | 
 | ||||||
| 	/* First allocation attempt */ | 	/* First allocation attempt */ | ||||||
| 	page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); | 	page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Mel Gorman
						Mel Gorman