forked from mirrors/linux
		
	mm/cma: remove ALLOC_CMA
Now, all reserved pages for CMA region are belong to the ZONE_MOVABLE and it only serves for a request with GFP_HIGHMEM && GFP_MOVABLE. Therefore, we don't need to maintain ALLOC_CMA at all. Link: http://lkml.kernel.org/r/1512114786-5085-3-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Tested-by: Tony Lindgren <tony@atomide.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Laura Abbott <lauraa@codeaurora.org> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Michal Hocko <mhocko@suse.com> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@redhat.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									bad8c6c0b1
								
							
						
					
					
						commit
						1d47a3ec09
					
				
					 3 changed files with 4 additions and 29 deletions
				
			
		|  | @ -1450,14 +1450,12 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order, | ||||||
| 	 * if compaction succeeds. | 	 * if compaction succeeds. | ||||||
| 	 * For costly orders, we require low watermark instead of min for | 	 * For costly orders, we require low watermark instead of min for | ||||||
| 	 * compaction to proceed to increase its chances. | 	 * compaction to proceed to increase its chances. | ||||||
| 	 * ALLOC_CMA is used, as pages in CMA pageblocks are considered |  | ||||||
| 	 * suitable migration targets |  | ||||||
| 	 */ | 	 */ | ||||||
| 	watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? | 	watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? | ||||||
| 				low_wmark_pages(zone) : min_wmark_pages(zone); | 				low_wmark_pages(zone) : min_wmark_pages(zone); | ||||||
| 	watermark += compact_gap(order); | 	watermark += compact_gap(order); | ||||||
| 	if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx, | 	if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx, | ||||||
| 						ALLOC_CMA, wmark_target)) | 						0, wmark_target)) | ||||||
| 		return COMPACT_SKIPPED; | 		return COMPACT_SKIPPED; | ||||||
| 
 | 
 | ||||||
| 	return COMPACT_CONTINUE; | 	return COMPACT_CONTINUE; | ||||||
|  |  | ||||||
|  | @ -498,7 +498,6 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, | ||||||
| #define ALLOC_HARDER		0x10 /* try to alloc harder */ | #define ALLOC_HARDER		0x10 /* try to alloc harder */ | ||||||
| #define ALLOC_HIGH		0x20 /* __GFP_HIGH set */ | #define ALLOC_HIGH		0x20 /* __GFP_HIGH set */ | ||||||
| #define ALLOC_CPUSET		0x40 /* check for correct cpuset */ | #define ALLOC_CPUSET		0x40 /* check for correct cpuset */ | ||||||
| #define ALLOC_CMA		0x80 /* allow allocations from CMA areas */ |  | ||||||
| 
 | 
 | ||||||
| enum ttu_flags; | enum ttu_flags; | ||||||
| struct tlbflush_unmap_batch; | struct tlbflush_unmap_batch; | ||||||
|  |  | ||||||
|  | @ -2893,7 +2893,7 @@ int __isolate_free_page(struct page *page, unsigned int order) | ||||||
| 		 * exists. | 		 * exists. | ||||||
| 		 */ | 		 */ | ||||||
| 		watermark = min_wmark_pages(zone) + (1UL << order); | 		watermark = min_wmark_pages(zone) + (1UL << order); | ||||||
| 		if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) | 		if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) | ||||||
| 			return 0; | 			return 0; | ||||||
| 
 | 
 | ||||||
| 		__mod_zone_freepage_state(zone, -(1UL << order), mt); | 		__mod_zone_freepage_state(zone, -(1UL << order), mt); | ||||||
|  | @ -3169,12 +3169,6 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_CMA |  | ||||||
| 	/* If allocation can't use CMA areas don't use free CMA pages */ |  | ||||||
| 	if (!(alloc_flags & ALLOC_CMA)) |  | ||||||
| 		free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); |  | ||||||
| #endif |  | ||||||
| 
 |  | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Check watermarks for an order-0 allocation request. If these | 	 * Check watermarks for an order-0 allocation request. If these | ||||||
| 	 * are not met, then a high-order request also cannot go ahead | 	 * are not met, then a high-order request also cannot go ahead | ||||||
|  | @ -3201,10 +3195,8 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_CMA | #ifdef CONFIG_CMA | ||||||
| 		if ((alloc_flags & ALLOC_CMA) && | 		if (!list_empty(&area->free_list[MIGRATE_CMA])) | ||||||
| 		    !list_empty(&area->free_list[MIGRATE_CMA])) { |  | ||||||
| 			return true; | 			return true; | ||||||
| 		} |  | ||||||
| #endif | #endif | ||||||
| 		if (alloc_harder && | 		if (alloc_harder && | ||||||
| 			!list_empty(&area->free_list[MIGRATE_HIGHATOMIC])) | 			!list_empty(&area->free_list[MIGRATE_HIGHATOMIC])) | ||||||
|  | @ -3224,13 +3216,6 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order, | ||||||
| 		unsigned long mark, int classzone_idx, unsigned int alloc_flags) | 		unsigned long mark, int classzone_idx, unsigned int alloc_flags) | ||||||
| { | { | ||||||
| 	long free_pages = zone_page_state(z, NR_FREE_PAGES); | 	long free_pages = zone_page_state(z, NR_FREE_PAGES); | ||||||
| 	long cma_pages = 0; |  | ||||||
| 
 |  | ||||||
| #ifdef CONFIG_CMA |  | ||||||
| 	/* If allocation can't use CMA areas don't use free CMA pages */ |  | ||||||
| 	if (!(alloc_flags & ALLOC_CMA)) |  | ||||||
| 		cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES); |  | ||||||
| #endif |  | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Fast check for order-0 only. If this fails then the reserves | 	 * Fast check for order-0 only. If this fails then the reserves | ||||||
|  | @ -3239,7 +3224,7 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order, | ||||||
| 	 * the caller is !atomic then it'll uselessly search the free | 	 * the caller is !atomic then it'll uselessly search the free | ||||||
| 	 * list. That corner case is then slower but it is harmless. | 	 * list. That corner case is then slower but it is harmless. | ||||||
| 	 */ | 	 */ | ||||||
| 	if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx]) | 	if (!order && free_pages > mark + z->lowmem_reserve[classzone_idx]) | ||||||
| 		return true; | 		return true; | ||||||
| 
 | 
 | ||||||
| 	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, | 	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, | ||||||
|  | @ -3875,10 +3860,6 @@ gfp_to_alloc_flags(gfp_t gfp_mask) | ||||||
| 	} else if (unlikely(rt_task(current)) && !in_interrupt()) | 	} else if (unlikely(rt_task(current)) && !in_interrupt()) | ||||||
| 		alloc_flags |= ALLOC_HARDER; | 		alloc_flags |= ALLOC_HARDER; | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_CMA |  | ||||||
| 	if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) |  | ||||||
| 		alloc_flags |= ALLOC_CMA; |  | ||||||
| #endif |  | ||||||
| 	return alloc_flags; | 	return alloc_flags; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -4345,9 +4326,6 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, | ||||||
| 	if (should_fail_alloc_page(gfp_mask, order)) | 	if (should_fail_alloc_page(gfp_mask, order)) | ||||||
| 		return false; | 		return false; | ||||||
| 
 | 
 | ||||||
| 	if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE) |  | ||||||
| 		*alloc_flags |= ALLOC_CMA; |  | ||||||
| 
 |  | ||||||
| 	return true; | 	return true; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Joonsoo Kim
						Joonsoo Kim