mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	hugetlb: perform vmemmap optimization on a list of pages
When adding hugetlb pages to the pool, we first create a list of the allocated pages before adding to the pool. Pass this list of pages to a new routine hugetlb_vmemmap_optimize_folios() for vmemmap optimization. Due to significant differences in vmemmmap initialization for bootmem allocated hugetlb pages, a new routine prep_and_add_bootmem_folios is created. We also modify the routine vmemmap_should_optimize() to check for pages that are already optimized. There are code paths that might request vmemmap optimization twice and we want to make sure this is not attempted. Link: https://lkml.kernel.org/r/20231019023113.345257-4-mike.kravetz@oracle.com Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> Reviewed-by: Muchun Song <songmuchun@bytedance.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Barry Song <21cnbao@gmail.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: James Houghton <jthoughton@google.com> Cc: Joao Martins <joao.m.martins@oracle.com> Cc: Konrad Dybcio <konradybcio@kernel.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev> Cc: Oscar Salvador <osalvador@suse.de> Cc: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Usama Arif <usama.arif@bytedance.com> Cc: Xiongchun Duan <duanxiongchun@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									d67e32f267
								
							
						
					
					
						commit
						79359d6d24
					
				
					 3 changed files with 51 additions and 8 deletions
				
			
		
							
								
								
									
										43
									
								
								mm/hugetlb.c
									
									
									
									
									
								
							
							
						
						
									
										43
									
								
								mm/hugetlb.c
									
									
									
									
									
								
							| 
						 | 
					@ -2282,6 +2282,9 @@ static void prep_and_add_allocated_folios(struct hstate *h,
 | 
				
			||||||
	unsigned long flags;
 | 
						unsigned long flags;
 | 
				
			||||||
	struct folio *folio, *tmp_f;
 | 
						struct folio *folio, *tmp_f;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Send list for bulk vmemmap optimization processing */
 | 
				
			||||||
 | 
						hugetlb_vmemmap_optimize_folios(h, folio_list);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Add all new pool pages to free lists in one lock cycle */
 | 
						/* Add all new pool pages to free lists in one lock cycle */
 | 
				
			||||||
	spin_lock_irqsave(&hugetlb_lock, flags);
 | 
						spin_lock_irqsave(&hugetlb_lock, flags);
 | 
				
			||||||
	list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
 | 
						list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
 | 
				
			||||||
| 
						 | 
					@ -3344,6 +3347,35 @@ static void __init hugetlb_folio_init_vmemmap(struct folio *folio,
 | 
				
			||||||
	prep_compound_head((struct page *)folio, huge_page_order(h));
 | 
						prep_compound_head((struct page *)folio, huge_page_order(h));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void __init prep_and_add_bootmem_folios(struct hstate *h,
 | 
				
			||||||
 | 
										struct list_head *folio_list)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						unsigned long flags;
 | 
				
			||||||
 | 
						struct folio *folio, *tmp_f;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Send list for bulk vmemmap optimization processing */
 | 
				
			||||||
 | 
						hugetlb_vmemmap_optimize_folios(h, folio_list);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Add all new pool pages to free lists in one lock cycle */
 | 
				
			||||||
 | 
						spin_lock_irqsave(&hugetlb_lock, flags);
 | 
				
			||||||
 | 
						list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
 | 
				
			||||||
 | 
							if (!folio_test_hugetlb_vmemmap_optimized(folio)) {
 | 
				
			||||||
 | 
								/*
 | 
				
			||||||
 | 
								 * If HVO fails, initialize all tail struct pages
 | 
				
			||||||
 | 
								 * We do not worry about potential long lock hold
 | 
				
			||||||
 | 
								 * time as this is early in boot and there should
 | 
				
			||||||
 | 
								 * be no contention.
 | 
				
			||||||
 | 
								 */
 | 
				
			||||||
 | 
								hugetlb_folio_init_tail_vmemmap(folio,
 | 
				
			||||||
 | 
										HUGETLB_VMEMMAP_RESERVE_PAGES,
 | 
				
			||||||
 | 
										pages_per_huge_page(h));
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							__prep_account_new_huge_page(h, folio_nid(folio));
 | 
				
			||||||
 | 
							enqueue_hugetlb_folio(h, folio);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						spin_unlock_irqrestore(&hugetlb_lock, flags);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Put bootmem huge pages into the standard lists after mem_map is up.
 | 
					 * Put bootmem huge pages into the standard lists after mem_map is up.
 | 
				
			||||||
 * Note: This only applies to gigantic (order > MAX_ORDER) pages.
 | 
					 * Note: This only applies to gigantic (order > MAX_ORDER) pages.
 | 
				
			||||||
| 
						 | 
					@ -3364,7 +3396,7 @@ static void __init gather_bootmem_prealloc(void)
 | 
				
			||||||
		 * in this list.  If so, process each size separately.
 | 
							 * in this list.  If so, process each size separately.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (h != prev_h && prev_h != NULL)
 | 
							if (h != prev_h && prev_h != NULL)
 | 
				
			||||||
			prep_and_add_allocated_folios(prev_h, &folio_list);
 | 
								prep_and_add_bootmem_folios(prev_h, &folio_list);
 | 
				
			||||||
		prev_h = h;
 | 
							prev_h = h;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		VM_BUG_ON(!hstate_is_gigantic(h));
 | 
							VM_BUG_ON(!hstate_is_gigantic(h));
 | 
				
			||||||
| 
						 | 
					@ -3372,12 +3404,7 @@ static void __init gather_bootmem_prealloc(void)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		hugetlb_folio_init_vmemmap(folio, h,
 | 
							hugetlb_folio_init_vmemmap(folio, h,
 | 
				
			||||||
					   HUGETLB_VMEMMAP_RESERVE_PAGES);
 | 
										   HUGETLB_VMEMMAP_RESERVE_PAGES);
 | 
				
			||||||
		__prep_new_hugetlb_folio(h, folio);
 | 
							init_new_hugetlb_folio(h, folio);
 | 
				
			||||||
		/* If HVO fails, initialize all tail struct pages */
 | 
					 | 
				
			||||||
		if (!HPageVmemmapOptimized(&folio->page))
 | 
					 | 
				
			||||||
			hugetlb_folio_init_tail_vmemmap(folio,
 | 
					 | 
				
			||||||
						HUGETLB_VMEMMAP_RESERVE_PAGES,
 | 
					 | 
				
			||||||
						pages_per_huge_page(h));
 | 
					 | 
				
			||||||
		list_add(&folio->lru, &folio_list);
 | 
							list_add(&folio->lru, &folio_list);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
| 
						 | 
					@ -3389,7 +3416,7 @@ static void __init gather_bootmem_prealloc(void)
 | 
				
			||||||
		cond_resched();
 | 
							cond_resched();
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	prep_and_add_allocated_folios(h, &folio_list);
 | 
						prep_and_add_bootmem_folios(h, &folio_list);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
 | 
					static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -483,6 +483,9 @@ int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
 | 
				
			||||||
/* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
 | 
					/* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
 | 
				
			||||||
static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head)
 | 
					static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						if (HPageVmemmapOptimized((struct page *)head))
 | 
				
			||||||
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!READ_ONCE(vmemmap_optimize_enabled))
 | 
						if (!READ_ONCE(vmemmap_optimize_enabled))
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -572,6 +575,14 @@ void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
 | 
				
			||||||
		SetHPageVmemmapOptimized(head);
 | 
							SetHPageVmemmapOptimized(head);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct folio *folio;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						list_for_each_entry(folio, folio_list, lru)
 | 
				
			||||||
 | 
							hugetlb_vmemmap_optimize(h, &folio->page);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct ctl_table hugetlb_vmemmap_sysctls[] = {
 | 
					static struct ctl_table hugetlb_vmemmap_sysctls[] = {
 | 
				
			||||||
	{
 | 
						{
 | 
				
			||||||
		.procname	= "hugetlb_optimize_vmemmap",
 | 
							.procname	= "hugetlb_optimize_vmemmap",
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -20,6 +20,7 @@
 | 
				
			||||||
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
 | 
					#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
 | 
				
			||||||
int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head);
 | 
					int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head);
 | 
				
			||||||
void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head);
 | 
					void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head);
 | 
				
			||||||
 | 
					void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h)
 | 
					static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -48,6 +49,10 @@ static inline void hugetlb_vmemmap_optimize(const struct hstate *h, struct page
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
 | 
					static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue