forked from mirrors/linux
		
	 b2c9e2fbba
			
		
	
	
		b2c9e2fbba
		
	
	
	
	
		
			
			alloc_contig_range() worked at MAX_ORDER_NR_PAGES granularity to avoid merging pageblocks with different migratetypes. It might unnecessarily convert extra pageblocks at the beginning and at the end of the range. Change alloc_contig_range() to work at pageblock granularity. Special handling is needed for free pages and in-use pages across the boundaries of the range specified by alloc_contig_range(). Because these= Partially isolated pages causes free page accounting issues. The free pages will be split and freed into separate migratetype lists; the in-use= Pages will be migrated then the freed pages will be handled in the aforementioned way. [ziy@nvidia.com: fix deadlock/crash] Link: https://lkml.kernel.org/r/23A7297E-6C84-4138-A9FE-3598234004E6@nvidia.com Link: https://lkml.kernel.org/r/20220425143118.2850746-4-zi.yan@sent.com Signed-off-by: Zi Yan <ziy@nvidia.com> Reported-by: kernel test robot <lkp@intel.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: David Hildenbrand <david@redhat.com> Cc: Eric Ren <renzhengeek@gmail.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Oscar Salvador <osalvador@suse.de> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
		
			
				
	
	
		
			63 lines
		
	
	
	
		
			1.5 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			63 lines
		
	
	
	
		
			1.5 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* SPDX-License-Identifier: GPL-2.0 */
 | |
| #ifndef __LINUX_PAGEISOLATION_H
 | |
| #define __LINUX_PAGEISOLATION_H
 | |
| 
 | |
| #ifdef CONFIG_MEMORY_ISOLATION
 | |
| static inline bool has_isolate_pageblock(struct zone *zone)
 | |
| {
 | |
| 	return zone->nr_isolate_pageblock;
 | |
| }
 | |
| static inline bool is_migrate_isolate_page(struct page *page)
 | |
| {
 | |
| 	return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
 | |
| }
 | |
| static inline bool is_migrate_isolate(int migratetype)
 | |
| {
 | |
| 	return migratetype == MIGRATE_ISOLATE;
 | |
| }
 | |
| #else
 | |
| static inline bool has_isolate_pageblock(struct zone *zone)
 | |
| {
 | |
| 	return false;
 | |
| }
 | |
| static inline bool is_migrate_isolate_page(struct page *page)
 | |
| {
 | |
| 	return false;
 | |
| }
 | |
| static inline bool is_migrate_isolate(int migratetype)
 | |
| {
 | |
| 	return false;
 | |
| }
 | |
| #endif
 | |
| 
 | |
| #define MEMORY_OFFLINE	0x1
 | |
| #define REPORT_FAILURE	0x2
 | |
| 
 | |
| void set_pageblock_migratetype(struct page *page, int migratetype);
 | |
| int move_freepages_block(struct zone *zone, struct page *page,
 | |
| 				int migratetype, int *num_movable);
 | |
| 
 | |
| /*
 | |
|  * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
 | |
|  */
 | |
| int
 | |
| start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 | |
| 			 int migratetype, int flags, gfp_t gfp_flags);
 | |
| 
 | |
| /*
 | |
|  * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
 | |
|  * target range is [start_pfn, end_pfn)
 | |
|  */
 | |
| void
 | |
| undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 | |
| 			int migratetype);
 | |
| 
 | |
| /*
 | |
|  * Test all pages in [start_pfn, end_pfn) are isolated or not.
 | |
|  */
 | |
| int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
 | |
| 			int isol_flags);
 | |
| 
 | |
| struct page *alloc_migrate_target(struct page *page, unsigned long private);
 | |
| 
 | |
| #endif
 |