mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	Joonsoo has noticed that "mm: drop migrate type checks from
has_unmovable_pages" would break CMA allocator because it relies on
has_unmovable_pages returning false even for CMA pageblocks which in
fact don't have to be movable:
 alloc_contig_range
   start_isolate_page_range
     set_migratetype_isolate
       has_unmovable_pages
This is a result of the code sharing between CMA and memory hotplug
while each one has a different idea of what has_unmovable_pages should
return.  This is unfortunate but fixing it properly would require a lot
of code duplication.
Fix the issue by introducing the requested migrate type argument and
special case MIGRATE_CMA case where CMA page blocks are handled
properly.  This will work for memory hotplug because it requires
MIGRATE_MOVABLE.
Link: http://lkml.kernel.org/r/20171019122118.y6cndierwl2vnguj@dhcp22.suse.cz
Signed-off-by: Michal Hocko <mhocko@suse.com>
Reported-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
Tested-by: Ran Wang <ran.wang_1@nxp.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Igor Mammedov <imammedo@redhat.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Reza Arbab <arbab@linux.vnet.ibm.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: Xishi Qiu <qiuxishi@huawei.com>
Cc: Yasuaki Ishimatsu <yasu.isimatu@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
	
			
		
			
				
	
	
		
			69 lines
		
	
	
	
		
			1.9 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			69 lines
		
	
	
	
		
			1.9 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/* SPDX-License-Identifier: GPL-2.0 */
 | 
						|
#ifndef __LINUX_PAGEISOLATION_H
 | 
						|
#define __LINUX_PAGEISOLATION_H
 | 
						|
 | 
						|
#ifdef CONFIG_MEMORY_ISOLATION
 | 
						|
static inline bool has_isolate_pageblock(struct zone *zone)
 | 
						|
{
 | 
						|
	return zone->nr_isolate_pageblock;
 | 
						|
}
 | 
						|
static inline bool is_migrate_isolate_page(struct page *page)
 | 
						|
{
 | 
						|
	return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
 | 
						|
}
 | 
						|
static inline bool is_migrate_isolate(int migratetype)
 | 
						|
{
 | 
						|
	return migratetype == MIGRATE_ISOLATE;
 | 
						|
}
 | 
						|
#else
 | 
						|
static inline bool has_isolate_pageblock(struct zone *zone)
 | 
						|
{
 | 
						|
	return false;
 | 
						|
}
 | 
						|
static inline bool is_migrate_isolate_page(struct page *page)
 | 
						|
{
 | 
						|
	return false;
 | 
						|
}
 | 
						|
static inline bool is_migrate_isolate(int migratetype)
 | 
						|
{
 | 
						|
	return false;
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
 | 
						|
			 int migratetype, bool skip_hwpoisoned_pages);
 | 
						|
void set_pageblock_migratetype(struct page *page, int migratetype);
 | 
						|
int move_freepages_block(struct zone *zone, struct page *page,
 | 
						|
				int migratetype, int *num_movable);
 | 
						|
 | 
						|
/*
 | 
						|
 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
 | 
						|
 * If specified range includes migrate types other than MOVABLE or CMA,
 | 
						|
 * this will fail with -EBUSY.
 | 
						|
 *
 | 
						|
 * For isolating all pages in the range finally, the caller have to
 | 
						|
 * free all pages in the range. test_page_isolated() can be used for
 | 
						|
 * test it.
 | 
						|
 */
 | 
						|
int
 | 
						|
start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 | 
						|
			 unsigned migratetype, bool skip_hwpoisoned_pages);
 | 
						|
 | 
						|
/*
 | 
						|
 * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
 | 
						|
 * target range is [start_pfn, end_pfn)
 | 
						|
 */
 | 
						|
int
 | 
						|
undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 | 
						|
			unsigned migratetype);
 | 
						|
 | 
						|
/*
 | 
						|
 * Test all pages in [start_pfn, end_pfn) are isolated or not.
 | 
						|
 */
 | 
						|
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
 | 
						|
			bool skip_hwpoisoned_pages);
 | 
						|
 | 
						|
struct page *alloc_migrate_target(struct page *page, unsigned long private,
 | 
						|
				int **resultp);
 | 
						|
 | 
						|
#endif
 |