mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm/debug-pagealloc: make debug-pagealloc boottime configurable
Now, we have prepared to avoid using debug-pagealloc in boottime. So introduce new kernel-parameter to disable debug-pagealloc in boottime, and makes related functions to be disabled in this case. Only non-intuitive part is change of guard page functions. Because guard page is effective only if debug-pagealloc is enabled, turning off according to debug-pagealloc is reasonable thing to do. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Dave Hansen <dave@sr71.net> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Jungsoo Son <jungsoo.son@lge.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									e30825f186
								
							
						
					
					
						commit
						031bc5743f
					
				
					 9 changed files with 57 additions and 7 deletions
				
			
		| 
						 | 
				
			
			@ -829,6 +829,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 | 
			
		|||
			CONFIG_DEBUG_PAGEALLOC, hence this option will not help
 | 
			
		||||
			tracking down these problems.
 | 
			
		||||
 | 
			
		||||
	debug_pagealloc=
 | 
			
		||||
			[KNL] When CONFIG_DEBUG_PAGEALLOC is set, this
 | 
			
		||||
			parameter enables the feature at boot time. In
 | 
			
		||||
			default, it is disabled. We can avoid allocating huge
 | 
			
		||||
			chunk of memory for debug pagealloc if we don't enable
 | 
			
		||||
			it at boot time and the system will work mostly same
 | 
			
		||||
			with the kernel built without CONFIG_DEBUG_PAGEALLOC.
 | 
			
		||||
			on: enable the feature
 | 
			
		||||
 | 
			
		||||
	debugpat	[X86] Enable PAT debugging
 | 
			
		||||
 | 
			
		||||
	decnet.addr=	[HW,NET]
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1514,7 +1514,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
 | 
			
		|||
			       mmu_kernel_ssize, 0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void kernel_map_pages(struct page *page, int numpages, int enable)
 | 
			
		||||
void __kernel_map_pages(struct page *page, int numpages, int enable)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long flags, vaddr, lmi;
 | 
			
		||||
	int i;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -429,7 +429,7 @@ static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
void kernel_map_pages(struct page *page, int numpages, int enable)
 | 
			
		||||
void __kernel_map_pages(struct page *page, int numpages, int enable)
 | 
			
		||||
{
 | 
			
		||||
	if (PageHighMem(page))
 | 
			
		||||
		return;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -120,7 +120,7 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr)
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void kernel_map_pages(struct page *page, int numpages, int enable)
 | 
			
		||||
void __kernel_map_pages(struct page *page, int numpages, int enable)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long address;
 | 
			
		||||
	int nr, i, j;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1621,7 +1621,7 @@ static void __init kernel_physical_mapping_init(void)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_DEBUG_PAGEALLOC
 | 
			
		||||
void kernel_map_pages(struct page *page, int numpages, int enable)
 | 
			
		||||
void __kernel_map_pages(struct page *page, int numpages, int enable)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
 | 
			
		||||
	unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1817,7 +1817,7 @@ static int __set_pages_np(struct page *page, int numpages)
 | 
			
		|||
	return __change_page_attr_set_clr(&cpa, 0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void kernel_map_pages(struct page *page, int numpages, int enable)
 | 
			
		||||
void __kernel_map_pages(struct page *page, int numpages, int enable)
 | 
			
		||||
{
 | 
			
		||||
	if (PageHighMem(page))
 | 
			
		||||
		return;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2061,7 +2061,22 @@ static inline void vm_stat_account(struct mm_struct *mm,
 | 
			
		|||
#endif /* CONFIG_PROC_FS */
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_DEBUG_PAGEALLOC
 | 
			
		||||
extern void kernel_map_pages(struct page *page, int numpages, int enable);
 | 
			
		||||
extern bool _debug_pagealloc_enabled;
 | 
			
		||||
extern void __kernel_map_pages(struct page *page, int numpages, int enable);
 | 
			
		||||
 | 
			
		||||
static inline bool debug_pagealloc_enabled(void)
 | 
			
		||||
{
 | 
			
		||||
	return _debug_pagealloc_enabled;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
kernel_map_pages(struct page *page, int numpages, int enable)
 | 
			
		||||
{
 | 
			
		||||
	if (!debug_pagealloc_enabled())
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	__kernel_map_pages(page, numpages, enable);
 | 
			
		||||
}
 | 
			
		||||
#ifdef CONFIG_HIBERNATION
 | 
			
		||||
extern bool kernel_page_present(struct page *page);
 | 
			
		||||
#endif /* CONFIG_HIBERNATION */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -10,11 +10,17 @@ static bool page_poisoning_enabled __read_mostly;
 | 
			
		|||
 | 
			
		||||
static bool need_page_poisoning(void)
 | 
			
		||||
{
 | 
			
		||||
	if (!debug_pagealloc_enabled())
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
	return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void init_page_poisoning(void)
 | 
			
		||||
{
 | 
			
		||||
	if (!debug_pagealloc_enabled())
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	page_poisoning_enabled = true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -119,7 +125,7 @@ static void unpoison_pages(struct page *page, int n)
 | 
			
		|||
		unpoison_page(page + i);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void kernel_map_pages(struct page *page, int numpages, int enable)
 | 
			
		||||
void __kernel_map_pages(struct page *page, int numpages, int enable)
 | 
			
		||||
{
 | 
			
		||||
	if (!page_poisoning_enabled)
 | 
			
		||||
		return;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -425,15 +425,35 @@ static inline void prep_zero_page(struct page *page, unsigned int order,
 | 
			
		|||
 | 
			
		||||
#ifdef CONFIG_DEBUG_PAGEALLOC
 | 
			
		||||
unsigned int _debug_guardpage_minorder;
 | 
			
		||||
bool _debug_pagealloc_enabled __read_mostly;
 | 
			
		||||
bool _debug_guardpage_enabled __read_mostly;
 | 
			
		||||
 | 
			
		||||
static int __init early_debug_pagealloc(char *buf)
 | 
			
		||||
{
 | 
			
		||||
	if (!buf)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	if (strcmp(buf, "on") == 0)
 | 
			
		||||
		_debug_pagealloc_enabled = true;
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
early_param("debug_pagealloc", early_debug_pagealloc);
 | 
			
		||||
 | 
			
		||||
static bool need_debug_guardpage(void)
 | 
			
		||||
{
 | 
			
		||||
	/* If we don't use debug_pagealloc, we don't need guard page */
 | 
			
		||||
	if (!debug_pagealloc_enabled())
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
	return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void init_debug_guardpage(void)
 | 
			
		||||
{
 | 
			
		||||
	if (!debug_pagealloc_enabled())
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	_debug_guardpage_enabled = true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue