mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm, hugetlb: don't require CMA for runtime gigantic pages
Commit 944d9fec8d ("hugetlb: add support for gigantic page allocation
at runtime") has added the runtime gigantic page allocation via
alloc_contig_range(), making this support available only when CONFIG_CMA
is enabled.  Because it doesn't depend on MIGRATE_CMA pageblocks and the
associated infrastructure, it is possible with few simple adjustments to
require only CONFIG_MEMORY_ISOLATION instead of full CONFIG_CMA.
After this patch, alloc_contig_range() and related functions are
available and used for gigantic pages with just CONFIG_MEMORY_ISOLATION
enabled.  Note CONFIG_CMA selects CONFIG_MEMORY_ISOLATION.  This allows
supporting runtime gigantic pages without the CMA-specific checks in
page allocator fastpaths.
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Luiz Capitulino <lcapitulino@redhat.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
			
			
This commit is contained in:
		
							parent
							
								
									b4330afbed
								
							
						
					
					
						commit
						080fe2068e
					
				
					 4 changed files with 7 additions and 7 deletions
				
			
		| 
						 | 
					@ -173,10 +173,10 @@ static __init int setup_hugepagesz(char *opt)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
__setup("hugepagesz=", setup_hugepagesz);
 | 
					__setup("hugepagesz=", setup_hugepagesz);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_CMA
 | 
					#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
 | 
				
			||||||
static __init int gigantic_pages_init(void)
 | 
					static __init int gigantic_pages_init(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	/* With CMA we can allocate gigantic pages at runtime */
 | 
						/* With compaction or CMA we can allocate gigantic pages at runtime */
 | 
				
			||||||
	if (cpu_has_gbpages && !size_to_hstate(1UL << PUD_SHIFT))
 | 
						if (cpu_has_gbpages && !size_to_hstate(1UL << PUD_SHIFT))
 | 
				
			||||||
		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
 | 
							hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -547,16 +547,16 @@ static inline bool pm_suspended_storage(void)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#endif /* CONFIG_PM_SLEEP */
 | 
					#endif /* CONFIG_PM_SLEEP */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_CMA
 | 
					#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
 | 
				
			||||||
 | 
					 | 
				
			||||||
/* The below functions must be run on a range from a single zone. */
 | 
					/* The below functions must be run on a range from a single zone. */
 | 
				
			||||||
extern int alloc_contig_range(unsigned long start, unsigned long end,
 | 
					extern int alloc_contig_range(unsigned long start, unsigned long end,
 | 
				
			||||||
			      unsigned migratetype);
 | 
								      unsigned migratetype);
 | 
				
			||||||
extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
 | 
					extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_CMA
 | 
				
			||||||
/* CMA stuff */
 | 
					/* CMA stuff */
 | 
				
			||||||
extern void init_cma_reserved_pageblock(struct page *page);
 | 
					extern void init_cma_reserved_pageblock(struct page *page);
 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* __LINUX_GFP_H */
 | 
					#endif /* __LINUX_GFP_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1001,7 +1001,7 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
 | 
				
			||||||
		((node = hstate_next_node_to_free(hs, mask)) || 1);	\
 | 
							((node = hstate_next_node_to_free(hs, mask)) || 1);	\
 | 
				
			||||||
		nr_nodes--)
 | 
							nr_nodes--)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
 | 
					#if defined(CONFIG_X86_64) && ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA))
 | 
				
			||||||
static void destroy_compound_gigantic_page(struct page *page,
 | 
					static void destroy_compound_gigantic_page(struct page *page,
 | 
				
			||||||
					unsigned int order)
 | 
										unsigned int order)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -6620,7 +6620,7 @@ bool is_pageblock_removable_nolock(struct page *page)
 | 
				
			||||||
	return !has_unmovable_pages(zone, page, 0, true);
 | 
						return !has_unmovable_pages(zone, page, 0, true);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_CMA
 | 
					#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static unsigned long pfn_max_align_down(unsigned long pfn)
 | 
					static unsigned long pfn_max_align_down(unsigned long pfn)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue