forked from mirrors/linux
		
	mm: page_alloc: avoid merging non-fallbackable pageblocks with others
This is done in addition to MIGRATE_ISOLATE pageblock merge avoidance. It prepares for the upcoming removal of the MAX_ORDER-1 alignment requirement for CMA and alloc_contig_range(). MIGRATE_HIGHATOMIC should not merge with other migratetypes like MIGRATE_ISOLATE and MIGRARTE_CMA[1], so this commit prevents that too. Remove MIGRATE_CMA and MIGRATE_ISOLATE from fallbacks list, since they are never used. [1] https://lore.kernel.org/linux-mm/20211130100853.GP3366@techsingularity.net/ Link: https://lkml.kernel.org/r/20220124175957.1261961-1-zi.yan@sent.com Signed-off-by: Zi Yan <ziy@nvidia.com> Acked-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: David Hildenbrand <david@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Mike Rapoport <rppt@linux.ibm.com> Reviewed-by: Oscar Salvador <osalvador@suse.de> Cc: Mike Rapoport <rppt@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									ff11a7ce1f
								
							
						
					
					
						commit
						1dd214b8f2
					
				
					 2 changed files with 32 additions and 23 deletions
				
			
		|  | @ -83,6 +83,17 @@ static inline bool is_migrate_movable(int mt) | |||
| 	return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Check whether a migratetype can be merged with another migratetype. | ||||
|  * | ||||
|  * It is only mergeable when it can fall back to other migratetypes for | ||||
|  * allocation. See fallbacks[MIGRATE_TYPES][3] in page_alloc.c. | ||||
|  */ | ||||
| static inline bool migratetype_is_mergeable(int mt) | ||||
| { | ||||
| 	return mt < MIGRATE_PCPTYPES; | ||||
| } | ||||
| 
 | ||||
| #define for_each_migratetype_order(order, type) \ | ||||
| 	for (order = 0; order < MAX_ORDER; order++) \ | ||||
| 		for (type = 0; type < MIGRATE_TYPES; type++) | ||||
|  |  | |||
|  | @ -1117,25 +1117,24 @@ static inline void __free_one_page(struct page *page, | |||
| 	} | ||||
| 	if (order < MAX_ORDER - 1) { | ||||
| 		/* If we are here, it means order is >= pageblock_order.
 | ||||
| 		 * We want to prevent merge between freepages on isolate | ||||
| 		 * pageblock and normal pageblock. Without this, pageblock | ||||
| 		 * isolation could cause incorrect freepage or CMA accounting. | ||||
| 		 * We want to prevent merge between freepages on pageblock | ||||
| 		 * without fallbacks and normal pageblock. Without this, | ||||
| 		 * pageblock isolation could cause incorrect freepage or CMA | ||||
| 		 * accounting or HIGHATOMIC accounting. | ||||
| 		 * | ||||
| 		 * We don't want to hit this code for the more frequent | ||||
| 		 * low-order merging. | ||||
| 		 */ | ||||
| 		if (unlikely(has_isolate_pageblock(zone))) { | ||||
| 			int buddy_mt; | ||||
| 		int buddy_mt; | ||||
| 
 | ||||
| 			buddy_pfn = __find_buddy_pfn(pfn, order); | ||||
| 			buddy = page + (buddy_pfn - pfn); | ||||
| 			buddy_mt = get_pageblock_migratetype(buddy); | ||||
| 		buddy_pfn = __find_buddy_pfn(pfn, order); | ||||
| 		buddy = page + (buddy_pfn - pfn); | ||||
| 		buddy_mt = get_pageblock_migratetype(buddy); | ||||
| 
 | ||||
| 			if (migratetype != buddy_mt | ||||
| 					&& (is_migrate_isolate(migratetype) || | ||||
| 						is_migrate_isolate(buddy_mt))) | ||||
| 				goto done_merging; | ||||
| 		} | ||||
| 		if (migratetype != buddy_mt | ||||
| 				&& (!migratetype_is_mergeable(migratetype) || | ||||
| 					!migratetype_is_mergeable(buddy_mt))) | ||||
| 			goto done_merging; | ||||
| 		max_order = order + 1; | ||||
| 		goto continue_merging; | ||||
| 	} | ||||
|  | @ -2479,17 +2478,13 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, | |||
| /*
 | ||||
|  * This array describes the order lists are fallen back to when | ||||
|  * the free lists for the desirable migrate type are depleted | ||||
|  * | ||||
|  * The other migratetypes do not have fallbacks. | ||||
|  */ | ||||
| static int fallbacks[MIGRATE_TYPES][3] = { | ||||
| 	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_TYPES }, | ||||
| 	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES }, | ||||
| 	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_TYPES }, | ||||
| #ifdef CONFIG_CMA | ||||
| 	[MIGRATE_CMA]         = { MIGRATE_TYPES }, /* Never used */ | ||||
| #endif | ||||
| #ifdef CONFIG_MEMORY_ISOLATION | ||||
| 	[MIGRATE_ISOLATE]     = { MIGRATE_TYPES }, /* Never used */ | ||||
| #endif | ||||
| }; | ||||
| 
 | ||||
| #ifdef CONFIG_CMA | ||||
|  | @ -2795,8 +2790,8 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, | |||
| 
 | ||||
| 	/* Yoink! */ | ||||
| 	mt = get_pageblock_migratetype(page); | ||||
| 	if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt) | ||||
| 	    && !is_migrate_cma(mt)) { | ||||
| 	/* Only reserve normal pageblocks (i.e., they can merge with others) */ | ||||
| 	if (migratetype_is_mergeable(mt)) { | ||||
| 		zone->nr_reserved_highatomic += pageblock_nr_pages; | ||||
| 		set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); | ||||
| 		move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); | ||||
|  | @ -3545,8 +3540,11 @@ int __isolate_free_page(struct page *page, unsigned int order) | |||
| 		struct page *endpage = page + (1 << order) - 1; | ||||
| 		for (; page < endpage; page += pageblock_nr_pages) { | ||||
| 			int mt = get_pageblock_migratetype(page); | ||||
| 			if (!is_migrate_isolate(mt) && !is_migrate_cma(mt) | ||||
| 			    && !is_migrate_highatomic(mt)) | ||||
| 			/*
 | ||||
| 			 * Only change normal pageblocks (i.e., they can merge | ||||
| 			 * with others) | ||||
| 			 */ | ||||
| 			if (migratetype_is_mergeable(mt)) | ||||
| 				set_pageblock_migratetype(page, | ||||
| 							  MIGRATE_MOVABLE); | ||||
| 		} | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Zi Yan
						Zi Yan