mirror of
https://github.com/torvalds/linux.git
synced 2025-11-02 01:29:02 +02:00
mm/hugetlb: retry to allocate for early boot hugepage allocation
In cloud environments with massive hugepage reservations (95%+ of system
RAM), single-attempt allocation during early boot often fails due to
memory pressure.
Commit 91f386bf07 ("hugetlb: batch freeing of vmemmap pages")
intensified this by deferring page frees, increase peak memory usage
during allocation.
Introduce a retry mechanism that leverages vmemmap optimization reclaim
(~1.6% memory) when available. Upon initial allocation failure, the
system retries until successful or no further progress is made, ensuring
reliable hugepage allocation while preserving batched vmemmap freeing
benefits.
Testing on a 256G machine allocating 252G of hugepages:
Before: 128056/129024 hugepages allocated
After: Successfully allocated all 129024 hugepages
Link: https://lkml.kernel.org/r/20250901082052.3247-1-lirongqing@baidu.com
Signed-off-by: Li RongQing <lirongqing@baidu.com>
Suggested-by: David Hildenbrand <david@redhat.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Li RongQing <lirongqing@baidu.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
2b79cb3eac
commit
2a8f3f44f5
1 changed files with 22 additions and 4 deletions
26
mm/hugetlb.c
26
mm/hugetlb.c
|
|
@ -3593,10 +3593,9 @@ static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
|
|||
|
||||
unsigned long jiffies_start;
|
||||
unsigned long jiffies_end;
|
||||
unsigned long remaining;
|
||||
|
||||
job.thread_fn = hugetlb_pages_alloc_boot_node;
|
||||
job.start = 0;
|
||||
job.size = h->max_huge_pages;
|
||||
|
||||
/*
|
||||
* job.max_threads is 25% of the available cpu threads by default.
|
||||
|
|
@ -3620,10 +3619,29 @@ static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
|
|||
}
|
||||
|
||||
job.max_threads = hugepage_allocation_threads;
|
||||
job.min_chunk = h->max_huge_pages / hugepage_allocation_threads;
|
||||
|
||||
jiffies_start = jiffies;
|
||||
padata_do_multithreaded(&job);
|
||||
do {
|
||||
remaining = h->max_huge_pages - h->nr_huge_pages;
|
||||
|
||||
job.start = h->nr_huge_pages;
|
||||
job.size = remaining;
|
||||
job.min_chunk = remaining / hugepage_allocation_threads;
|
||||
padata_do_multithreaded(&job);
|
||||
|
||||
if (h->nr_huge_pages == h->max_huge_pages)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Retry only if the vmemmap optimization might have been able to free
|
||||
* some memory back to the system.
|
||||
*/
|
||||
if (!hugetlb_vmemmap_optimizable(h))
|
||||
break;
|
||||
|
||||
/* Continue if progress was made in last iteration */
|
||||
} while (remaining != (h->max_huge_pages - h->nr_huge_pages));
|
||||
|
||||
jiffies_end = jiffies;
|
||||
|
||||
pr_info("HugeTLB: allocation took %dms with hugepage_allocation_threads=%ld\n",
|
||||
|
|
|
|||
Loading…
Reference in a new issue