mirror of
https://github.com/torvalds/linux.git
synced 2025-11-03 10:10:33 +02:00
mm/madvise: batch tlb flushes for MADV_DONTNEED[_LOCKED]
MADV_DONTNEED[_LOCKED] handling for [process_]madvise() flushes tlb for each vma of each address range. Update the logic to do tlb flushes in a batched way. Initialize an mmu_gather object from do_madvise() and vector_madvise(), which are the entry level functions for [process_]madvise(), respectively. And pass those objects to the function for per-vma work, via madvise_behavior struct. Make the per-vma logic not flushes tlb on their own but just saves the tlb entries to the received mmu_gather object. For this internal logic change, make zap_page_range_single_batched() non-static and use it directly from madvise_dontneed_single_vma(). Finally, the entry level functions flush the tlb entries that gathered for the entire user request, at once. Link: https://lkml.kernel.org/r/20250410000022.1901-5-sj@kernel.org Signed-off-by: SeongJae Park <sj@kernel.org> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
de8efdf8cd
commit
43c4cfde7e
3 changed files with 13 additions and 5 deletions
|
|
@ -430,6 +430,9 @@ void unmap_page_range(struct mmu_gather *tlb,
|
||||||
struct vm_area_struct *vma,
|
struct vm_area_struct *vma,
|
||||||
unsigned long addr, unsigned long end,
|
unsigned long addr, unsigned long end,
|
||||||
struct zap_details *details);
|
struct zap_details *details);
|
||||||
|
void zap_page_range_single_batched(struct mmu_gather *tlb,
|
||||||
|
struct vm_area_struct *vma, unsigned long addr,
|
||||||
|
unsigned long size, struct zap_details *details);
|
||||||
int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
|
int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
|
||||||
gfp_t gfp);
|
gfp_t gfp);
|
||||||
|
|
||||||
|
|
|
||||||
11
mm/madvise.c
11
mm/madvise.c
|
|
@ -851,7 +851,8 @@ static int madvise_free_single_vma(struct madvise_behavior *madv_behavior,
|
||||||
* An interface that causes the system to free clean pages and flush
|
* An interface that causes the system to free clean pages and flush
|
||||||
* dirty pages is already available as msync(MS_INVALIDATE).
|
* dirty pages is already available as msync(MS_INVALIDATE).
|
||||||
*/
|
*/
|
||||||
static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
|
static long madvise_dontneed_single_vma(struct madvise_behavior *madv_behavior,
|
||||||
|
struct vm_area_struct *vma,
|
||||||
unsigned long start, unsigned long end)
|
unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
struct zap_details details = {
|
struct zap_details details = {
|
||||||
|
|
@ -859,7 +860,8 @@ static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
|
||||||
.even_cows = true,
|
.even_cows = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
zap_page_range_single(vma, start, end - start, &details);
|
zap_page_range_single_batched(
|
||||||
|
madv_behavior->tlb, vma, start, end - start, &details);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -950,7 +952,8 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED)
|
if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED)
|
||||||
return madvise_dontneed_single_vma(vma, start, end);
|
return madvise_dontneed_single_vma(
|
||||||
|
madv_behavior, vma, start, end);
|
||||||
else if (behavior == MADV_FREE)
|
else if (behavior == MADV_FREE)
|
||||||
return madvise_free_single_vma(madv_behavior, vma, start, end);
|
return madvise_free_single_vma(madv_behavior, vma, start, end);
|
||||||
else
|
else
|
||||||
|
|
@ -1628,6 +1631,8 @@ static void madvise_unlock(struct mm_struct *mm, int behavior)
|
||||||
static bool madvise_batch_tlb_flush(int behavior)
|
static bool madvise_batch_tlb_flush(int behavior)
|
||||||
{
|
{
|
||||||
switch (behavior) {
|
switch (behavior) {
|
||||||
|
case MADV_DONTNEED:
|
||||||
|
case MADV_DONTNEED_LOCKED:
|
||||||
case MADV_FREE:
|
case MADV_FREE:
|
||||||
return true;
|
return true;
|
||||||
default:
|
default:
|
||||||
|
|
|
||||||
|
|
@ -1998,7 +1998,7 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
|
||||||
mmu_notifier_invalidate_range_end(&range);
|
mmu_notifier_invalidate_range_end(&range);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* zap_page_range_single_batched - remove user pages in a given range
|
* zap_page_range_single_batched - remove user pages in a given range
|
||||||
* @tlb: pointer to the caller's struct mmu_gather
|
* @tlb: pointer to the caller's struct mmu_gather
|
||||||
* @vma: vm_area_struct holding the applicable pages
|
* @vma: vm_area_struct holding the applicable pages
|
||||||
|
|
@ -2009,7 +2009,7 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
|
||||||
* @tlb shouldn't be NULL. The range must fit into one VMA. If @vma is for
|
* @tlb shouldn't be NULL. The range must fit into one VMA. If @vma is for
|
||||||
* hugetlb, @tlb is flushed and re-initialized by this function.
|
* hugetlb, @tlb is flushed and re-initialized by this function.
|
||||||
*/
|
*/
|
||||||
static void zap_page_range_single_batched(struct mmu_gather *tlb,
|
void zap_page_range_single_batched(struct mmu_gather *tlb,
|
||||||
struct vm_area_struct *vma, unsigned long address,
|
struct vm_area_struct *vma, unsigned long address,
|
||||||
unsigned long size, struct zap_details *details)
|
unsigned long size, struct zap_details *details)
|
||||||
{
|
{
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue