forked from mirrors/linux
		
	hugetlb: convert alloc_buddy_hugetlb_folio to use a folio
While this function returned a folio, it was still using __alloc_pages() and __free_pages(). Use __folio_alloc() and put_folio() instead. This actually removes a call to compound_head(), but more importantly, it prepares us for the move to memdescs. Link: https://lkml.kernel.org/r/20240402200656.913841-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Reviewed-by: Oscar Salvador <osalvador@suse.de> Reviewed-by: Muchun Song <muchun.song@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									4c773a4425
								
							
						
					
					
						commit
						f6a8dd98a2
					
				
					 1 changed files with 16 additions and 17 deletions
				
			
		
							
								
								
									
										33
									
								
								mm/hugetlb.c
									
									
									
									
									
								
							
							
						
						
									
										33
									
								
								mm/hugetlb.c
									
									
									
									
									
								
							|  | @ -2177,13 +2177,13 @@ static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, | ||||||
| 		nodemask_t *node_alloc_noretry) | 		nodemask_t *node_alloc_noretry) | ||||||
| { | { | ||||||
| 	int order = huge_page_order(h); | 	int order = huge_page_order(h); | ||||||
| 	struct page *page; | 	struct folio *folio; | ||||||
| 	bool alloc_try_hard = true; | 	bool alloc_try_hard = true; | ||||||
| 	bool retry = true; | 	bool retry = true; | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * By default we always try hard to allocate the page with | 	 * By default we always try hard to allocate the folio with | ||||||
| 	 * __GFP_RETRY_MAYFAIL flag.  However, if we are allocating pages in | 	 * __GFP_RETRY_MAYFAIL flag.  However, if we are allocating folios in | ||||||
| 	 * a loop (to adjust global huge page counts) and previous allocation | 	 * a loop (to adjust global huge page counts) and previous allocation | ||||||
| 	 * failed, do not continue to try hard on the same node.  Use the | 	 * failed, do not continue to try hard on the same node.  Use the | ||||||
| 	 * node_alloc_noretry bitmap to manage this state information. | 	 * node_alloc_noretry bitmap to manage this state information. | ||||||
|  | @ -2196,43 +2196,42 @@ static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, | ||||||
| 	if (nid == NUMA_NO_NODE) | 	if (nid == NUMA_NO_NODE) | ||||||
| 		nid = numa_mem_id(); | 		nid = numa_mem_id(); | ||||||
| retry: | retry: | ||||||
| 	page = __alloc_pages(gfp_mask, order, nid, nmask); | 	folio = __folio_alloc(gfp_mask, order, nid, nmask); | ||||||
| 
 | 
 | ||||||
| 	/* Freeze head page */ | 	if (folio && !folio_ref_freeze(folio, 1)) { | ||||||
| 	if (page && !page_ref_freeze(page, 1)) { | 		folio_put(folio); | ||||||
| 		__free_pages(page, order); |  | ||||||
| 		if (retry) {	/* retry once */ | 		if (retry) {	/* retry once */ | ||||||
| 			retry = false; | 			retry = false; | ||||||
| 			goto retry; | 			goto retry; | ||||||
| 		} | 		} | ||||||
| 		/* WOW!  twice in a row. */ | 		/* WOW!  twice in a row. */ | ||||||
| 		pr_warn("HugeTLB head page unexpected inflated ref count\n"); | 		pr_warn("HugeTLB unexpected inflated folio ref count\n"); | ||||||
| 		page = NULL; | 		folio = NULL; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this | 	 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a | ||||||
| 	 * indicates an overall state change.  Clear bit so that we resume | 	 * folio this indicates an overall state change.  Clear bit so | ||||||
| 	 * normal 'try hard' allocations. | 	 * that we resume normal 'try hard' allocations. | ||||||
| 	 */ | 	 */ | ||||||
| 	if (node_alloc_noretry && page && !alloc_try_hard) | 	if (node_alloc_noretry && folio && !alloc_try_hard) | ||||||
| 		node_clear(nid, *node_alloc_noretry); | 		node_clear(nid, *node_alloc_noretry); | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * If we tried hard to get a page but failed, set bit so that | 	 * If we tried hard to get a folio but failed, set bit so that | ||||||
| 	 * subsequent attempts will not try as hard until there is an | 	 * subsequent attempts will not try as hard until there is an | ||||||
| 	 * overall state change. | 	 * overall state change. | ||||||
| 	 */ | 	 */ | ||||||
| 	if (node_alloc_noretry && !page && alloc_try_hard) | 	if (node_alloc_noretry && !folio && alloc_try_hard) | ||||||
| 		node_set(nid, *node_alloc_noretry); | 		node_set(nid, *node_alloc_noretry); | ||||||
| 
 | 
 | ||||||
| 	if (!page) { | 	if (!folio) { | ||||||
| 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); | 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); | ||||||
| 		return NULL; | 		return NULL; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	__count_vm_event(HTLB_BUDDY_PGALLOC); | 	__count_vm_event(HTLB_BUDDY_PGALLOC); | ||||||
| 	return page_folio(page); | 	return folio; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static struct folio *__alloc_fresh_hugetlb_folio(struct hstate *h, | static struct folio *__alloc_fresh_hugetlb_folio(struct hstate *h, | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Matthew Wilcox (Oracle)
						Matthew Wilcox (Oracle)