mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm/hugetlb: convert hugetlb prep functions to folios
Convert prep_new_huge_page() and __prep_compound_gigantic_page() to folios. Link: https://lkml.kernel.org/r/20221129225039.82257-10-sidhartha.kumar@oracle.com Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: David Hildenbrand <david@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Mina Almasry <almasrymina@google.com> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Tarun Sahu <tsahu@linux.ibm.com> Cc: Wei Chen <harperchen1110@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									7f325a8d25
								
							
						
					
					
						commit
						d1c6095572
					
				
					 1 changed files with 33 additions and 36 deletions
				
			
		
							
								
								
									
										69
									
								
								mm/hugetlb.c
									
									
									
									
									
								
							
							
						
						
									
										69
									
								
								mm/hugetlb.c
									
									
									
									
									
								
							| 
						 | 
					@ -1789,29 +1789,27 @@ static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
 | 
				
			||||||
	set_hugetlb_cgroup_rsvd(folio, NULL);
 | 
						set_hugetlb_cgroup_rsvd(folio, NULL);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
 | 
					static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct folio *folio = page_folio(page);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	__prep_new_hugetlb_folio(h, folio);
 | 
						__prep_new_hugetlb_folio(h, folio);
 | 
				
			||||||
	spin_lock_irq(&hugetlb_lock);
 | 
						spin_lock_irq(&hugetlb_lock);
 | 
				
			||||||
	__prep_account_new_huge_page(h, nid);
 | 
						__prep_account_new_huge_page(h, nid);
 | 
				
			||||||
	spin_unlock_irq(&hugetlb_lock);
 | 
						spin_unlock_irq(&hugetlb_lock);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
 | 
					static bool __prep_compound_gigantic_folio(struct folio *folio,
 | 
				
			||||||
								bool demote)
 | 
										unsigned int order, bool demote)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int i, j;
 | 
						int i, j;
 | 
				
			||||||
	int nr_pages = 1 << order;
 | 
						int nr_pages = 1 << order;
 | 
				
			||||||
	struct page *p;
 | 
						struct page *p;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* we rely on prep_new_huge_page to set the destructor */
 | 
						/* we rely on prep_new_hugetlb_folio to set the destructor */
 | 
				
			||||||
	set_compound_order(page, order);
 | 
						folio_set_compound_order(folio, order);
 | 
				
			||||||
	__ClearPageReserved(page);
 | 
						__folio_clear_reserved(folio);
 | 
				
			||||||
	__SetPageHead(page);
 | 
						__folio_set_head(folio);
 | 
				
			||||||
	for (i = 0; i < nr_pages; i++) {
 | 
						for (i = 0; i < nr_pages; i++) {
 | 
				
			||||||
		p = nth_page(page, i);
 | 
							p = folio_page(folio, i);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * For gigantic hugepages allocated through bootmem at
 | 
							 * For gigantic hugepages allocated through bootmem at
 | 
				
			||||||
| 
						 | 
					@ -1853,43 +1851,41 @@ static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
 | 
				
			||||||
			VM_BUG_ON_PAGE(page_count(p), p);
 | 
								VM_BUG_ON_PAGE(page_count(p), p);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if (i != 0)
 | 
							if (i != 0)
 | 
				
			||||||
			set_compound_head(p, page);
 | 
								set_compound_head(p, &folio->page);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	atomic_set(compound_mapcount_ptr(page), -1);
 | 
						atomic_set(folio_mapcount_ptr(folio), -1);
 | 
				
			||||||
	atomic_set(subpages_mapcount_ptr(page), 0);
 | 
						atomic_set(folio_subpages_mapcount_ptr(folio), 0);
 | 
				
			||||||
	atomic_set(compound_pincount_ptr(page), 0);
 | 
						atomic_set(folio_pincount_ptr(folio), 0);
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
out_error:
 | 
					out_error:
 | 
				
			||||||
	/* undo page modifications made above */
 | 
						/* undo page modifications made above */
 | 
				
			||||||
	for (j = 0; j < i; j++) {
 | 
						for (j = 0; j < i; j++) {
 | 
				
			||||||
		p = nth_page(page, j);
 | 
							p = folio_page(folio, j);
 | 
				
			||||||
		if (j != 0)
 | 
							if (j != 0)
 | 
				
			||||||
			clear_compound_head(p);
 | 
								clear_compound_head(p);
 | 
				
			||||||
		set_page_refcounted(p);
 | 
							set_page_refcounted(p);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	/* need to clear PG_reserved on remaining tail pages  */
 | 
						/* need to clear PG_reserved on remaining tail pages  */
 | 
				
			||||||
	for (; j < nr_pages; j++) {
 | 
						for (; j < nr_pages; j++) {
 | 
				
			||||||
		p = nth_page(page, j);
 | 
							p = folio_page(folio, j);
 | 
				
			||||||
		__ClearPageReserved(p);
 | 
							__ClearPageReserved(p);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	set_compound_order(page, 0);
 | 
						folio_set_compound_order(folio, 0);
 | 
				
			||||||
#ifdef CONFIG_64BIT
 | 
						__folio_clear_head(folio);
 | 
				
			||||||
	page[1].compound_nr = 0;
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
	__ClearPageHead(page);
 | 
					 | 
				
			||||||
	return false;
 | 
						return false;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static bool prep_compound_gigantic_page(struct page *page, unsigned int order)
 | 
					static bool prep_compound_gigantic_folio(struct folio *folio,
 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return __prep_compound_gigantic_page(page, order, false);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static bool prep_compound_gigantic_page_for_demote(struct page *page,
 | 
					 | 
				
			||||||
							unsigned int order)
 | 
												unsigned int order)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return __prep_compound_gigantic_page(page, order, true);
 | 
						return __prep_compound_gigantic_folio(folio, order, false);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
 | 
				
			||||||
 | 
												unsigned int order)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return __prep_compound_gigantic_folio(folio, order, true);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -2041,7 +2037,7 @@ static struct page *alloc_fresh_huge_page(struct hstate *h,
 | 
				
			||||||
		return NULL;
 | 
							return NULL;
 | 
				
			||||||
	folio = page_folio(page);
 | 
						folio = page_folio(page);
 | 
				
			||||||
	if (hstate_is_gigantic(h)) {
 | 
						if (hstate_is_gigantic(h)) {
 | 
				
			||||||
		if (!prep_compound_gigantic_page(page, huge_page_order(h))) {
 | 
							if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) {
 | 
				
			||||||
			/*
 | 
								/*
 | 
				
			||||||
			 * Rare failure to convert pages to compound page.
 | 
								 * Rare failure to convert pages to compound page.
 | 
				
			||||||
			 * Free pages and try again - ONCE!
 | 
								 * Free pages and try again - ONCE!
 | 
				
			||||||
| 
						 | 
					@ -2054,7 +2050,7 @@ static struct page *alloc_fresh_huge_page(struct hstate *h,
 | 
				
			||||||
			return NULL;
 | 
								return NULL;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	prep_new_huge_page(h, page, page_to_nid(page));
 | 
						prep_new_hugetlb_folio(h, folio, folio_nid(folio));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return page;
 | 
						return page;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -3058,10 +3054,10 @@ static void __init gather_bootmem_prealloc(void)
 | 
				
			||||||
		struct hstate *h = m->hstate;
 | 
							struct hstate *h = m->hstate;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		VM_BUG_ON(!hstate_is_gigantic(h));
 | 
							VM_BUG_ON(!hstate_is_gigantic(h));
 | 
				
			||||||
		WARN_ON(page_count(page) != 1);
 | 
							WARN_ON(folio_ref_count(folio) != 1);
 | 
				
			||||||
		if (prep_compound_gigantic_page(page, huge_page_order(h))) {
 | 
							if (prep_compound_gigantic_folio(folio, huge_page_order(h))) {
 | 
				
			||||||
			WARN_ON(PageReserved(page));
 | 
								WARN_ON(folio_test_reserved(folio));
 | 
				
			||||||
			prep_new_huge_page(h, page, page_to_nid(page));
 | 
								prep_new_hugetlb_folio(h, folio, folio_nid(folio));
 | 
				
			||||||
			free_huge_page(page); /* add to the hugepage allocator */
 | 
								free_huge_page(page); /* add to the hugepage allocator */
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			/* VERY unlikely inflated ref count on a tail page */
 | 
								/* VERY unlikely inflated ref count on a tail page */
 | 
				
			||||||
| 
						 | 
					@ -3480,13 +3476,14 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
 | 
				
			||||||
	for (i = 0; i < pages_per_huge_page(h);
 | 
						for (i = 0; i < pages_per_huge_page(h);
 | 
				
			||||||
				i += pages_per_huge_page(target_hstate)) {
 | 
									i += pages_per_huge_page(target_hstate)) {
 | 
				
			||||||
		subpage = nth_page(page, i);
 | 
							subpage = nth_page(page, i);
 | 
				
			||||||
 | 
							folio = page_folio(subpage);
 | 
				
			||||||
		if (hstate_is_gigantic(target_hstate))
 | 
							if (hstate_is_gigantic(target_hstate))
 | 
				
			||||||
			prep_compound_gigantic_page_for_demote(subpage,
 | 
								prep_compound_gigantic_folio_for_demote(folio,
 | 
				
			||||||
							target_hstate->order);
 | 
												target_hstate->order);
 | 
				
			||||||
		else
 | 
							else
 | 
				
			||||||
			prep_compound_page(subpage, target_hstate->order);
 | 
								prep_compound_page(subpage, target_hstate->order);
 | 
				
			||||||
		set_page_private(subpage, 0);
 | 
							set_page_private(subpage, 0);
 | 
				
			||||||
		prep_new_huge_page(target_hstate, subpage, nid);
 | 
							prep_new_hugetlb_folio(target_hstate, folio, nid);
 | 
				
			||||||
		free_huge_page(subpage);
 | 
							free_huge_page(subpage);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	mutex_unlock(&target_hstate->resize_lock);
 | 
						mutex_unlock(&target_hstate->resize_lock);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue