forked from mirrors/linux
		
	mm/hugetlb: introduce minimum hugepage order
Currently the initial value of order in dissolve_free_huge_page is 64 or
32, which leads to the following warning in static checker:
  mm/hugetlb.c:1203 dissolve_free_huge_pages()
  warn: potential right shift more than type allows '9,18,64'
This is a potential risk of infinite loop, because 1 << order (== 0) is used
in for-loop like this:
  for (pfn =3D start_pfn; pfn < end_pfn; pfn +=3D 1 << order)
      ...
So this patch fixes it by using global minimum_order calculated at boot time.
    text    data     bss     dec     hex filename
   28313     469   84236  113018   1b97a mm/hugetlb.o
   28256     473   84236  112965   1b945 mm/hugetlb.o (patched)
Fixes: c8721bbbdd ("mm: memory-hotplug: enable memory hotplug to handle hugepage")
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
			
			
This commit is contained in:
		
							parent
							
								
									414e2fb8ce
								
							
						
					
					
						commit
						641844f561
					
				
					 1 changed files with 11 additions and 8 deletions
				
			
		
							
								
								
									
										19
									
								
								mm/hugetlb.c
									
									
									
									
									
								
							
							
						
						
									
										19
									
								
								mm/hugetlb.c
									
									
									
									
									
								
							| 
						 | 
					@ -40,6 +40,11 @@ int hugepages_treat_as_movable;
 | 
				
			||||||
int hugetlb_max_hstate __read_mostly;
 | 
					int hugetlb_max_hstate __read_mostly;
 | 
				
			||||||
unsigned int default_hstate_idx;
 | 
					unsigned int default_hstate_idx;
 | 
				
			||||||
struct hstate hstates[HUGE_MAX_HSTATE];
 | 
					struct hstate hstates[HUGE_MAX_HSTATE];
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Minimum page order among possible hugepage sizes, set to a proper value
 | 
				
			||||||
 | 
					 * at boot time.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static unsigned int minimum_order __read_mostly = UINT_MAX;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
__initdata LIST_HEAD(huge_boot_pages);
 | 
					__initdata LIST_HEAD(huge_boot_pages);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1188,19 +1193,13 @@ static void dissolve_free_huge_page(struct page *page)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
 | 
					void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned int order = 8 * sizeof(void *);
 | 
					 | 
				
			||||||
	unsigned long pfn;
 | 
						unsigned long pfn;
 | 
				
			||||||
	struct hstate *h;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!hugepages_supported())
 | 
						if (!hugepages_supported())
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Set scan step to minimum hugepage size */
 | 
						VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
 | 
				
			||||||
	for_each_hstate(h)
 | 
						for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
 | 
				
			||||||
		if (order > huge_page_order(h))
 | 
					 | 
				
			||||||
			order = huge_page_order(h);
 | 
					 | 
				
			||||||
	VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order));
 | 
					 | 
				
			||||||
	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order)
 | 
					 | 
				
			||||||
		dissolve_free_huge_page(pfn_to_page(pfn));
 | 
							dissolve_free_huge_page(pfn_to_page(pfn));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1627,10 +1626,14 @@ static void __init hugetlb_init_hstates(void)
 | 
				
			||||||
	struct hstate *h;
 | 
						struct hstate *h;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for_each_hstate(h) {
 | 
						for_each_hstate(h) {
 | 
				
			||||||
 | 
							if (minimum_order > huge_page_order(h))
 | 
				
			||||||
 | 
								minimum_order = huge_page_order(h);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* oversize hugepages were init'ed in early boot */
 | 
							/* oversize hugepages were init'ed in early boot */
 | 
				
			||||||
		if (!hstate_is_gigantic(h))
 | 
							if (!hstate_is_gigantic(h))
 | 
				
			||||||
			hugetlb_hstate_alloc_pages(h);
 | 
								hugetlb_hstate_alloc_pages(h);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						VM_BUG_ON(minimum_order == UINT_MAX);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static char * __init memfmt(char *buf, unsigned long n)
 | 
					static char * __init memfmt(char *buf, unsigned long n)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue