mirror of
https://github.com/torvalds/linux.git
synced 2025-10-29 07:46:20 +02:00
7 hotfixes. All 7 are cc:stable and all 7 are for MM.
All singletons, please see the changelogs for details. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaOmCTwAKCRDdBJ7gKXxA jtQZAQC9sRd+4LNYothoXlY9avKNYR4YvN3ogiIFJqHwiENu/QD/ec/57KME9dA4 H4SqK/49Rs/tVCYmkPTO7IWRmxo9/AA= =h9On -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2025-10-10-15-00' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morton: "7 hotfixes. All 7 are cc:stable and all 7 are for MM. All singletons, please see the changelogs for details" * tag 'mm-hotfixes-stable-2025-10-10-15-00' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm: hugetlb: avoid soft lockup when mprotect to large memory area fsnotify: pass correct offset to fsnotify_mmap_perm() mm/ksm: fix flag-dropping behavior in ksm_madvise mm/damon/vaddr: do not repeat pte_offset_map_lock() until success mm/rmap: fix soft-dirty and uffd-wp bit loss when remapping zero-filled mTHP subpage to shared zeropage mm/thp: fix MTE tag mismatch when replacing zero-filled subpages memcg: skip cgroup_file_notify if spinning is not allowed
This commit is contained in:
commit
971370a88c
9 changed files with 45 additions and 42 deletions
|
|
@ -1001,22 +1001,28 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
|
|||
count_memcg_events_mm(mm, idx, 1);
|
||||
}
|
||||
|
||||
static inline void memcg_memory_event(struct mem_cgroup *memcg,
|
||||
enum memcg_memory_event event)
|
||||
static inline void __memcg_memory_event(struct mem_cgroup *memcg,
|
||||
enum memcg_memory_event event,
|
||||
bool allow_spinning)
|
||||
{
|
||||
bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
|
||||
event == MEMCG_SWAP_FAIL;
|
||||
|
||||
/* For now only MEMCG_MAX can happen with !allow_spinning context. */
|
||||
VM_WARN_ON_ONCE(!allow_spinning && event != MEMCG_MAX);
|
||||
|
||||
atomic_long_inc(&memcg->memory_events_local[event]);
|
||||
if (!swap_event)
|
||||
if (!swap_event && allow_spinning)
|
||||
cgroup_file_notify(&memcg->events_local_file);
|
||||
|
||||
do {
|
||||
atomic_long_inc(&memcg->memory_events[event]);
|
||||
if (swap_event)
|
||||
cgroup_file_notify(&memcg->swap_events_file);
|
||||
else
|
||||
cgroup_file_notify(&memcg->events_file);
|
||||
if (allow_spinning) {
|
||||
if (swap_event)
|
||||
cgroup_file_notify(&memcg->swap_events_file);
|
||||
else
|
||||
cgroup_file_notify(&memcg->events_file);
|
||||
}
|
||||
|
||||
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
|
||||
break;
|
||||
|
|
@ -1026,6 +1032,12 @@ static inline void memcg_memory_event(struct mem_cgroup *memcg,
|
|||
!mem_cgroup_is_root(memcg));
|
||||
}
|
||||
|
||||
static inline void memcg_memory_event(struct mem_cgroup *memcg,
|
||||
enum memcg_memory_event event)
|
||||
{
|
||||
__memcg_memory_event(memcg, event, true);
|
||||
}
|
||||
|
||||
static inline void memcg_memory_event_mm(struct mm_struct *mm,
|
||||
enum memcg_memory_event event)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -323,7 +323,7 @@ extern unsigned int kobjsize(const void *objp);
|
|||
#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
|
||||
#define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */
|
||||
#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
|
||||
#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
|
||||
#define VM_MERGEABLE BIT(31) /* KSM may merge identical pages */
|
||||
|
||||
#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
|
||||
#define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */
|
||||
|
|
|
|||
|
|
@ -328,10 +328,8 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
|
|||
}
|
||||
|
||||
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
|
||||
if (!pte) {
|
||||
walk->action = ACTION_AGAIN;
|
||||
if (!pte)
|
||||
return 0;
|
||||
}
|
||||
if (!pte_present(ptep_get(pte)))
|
||||
goto out;
|
||||
damon_ptep_mkold(pte, walk->vma, addr);
|
||||
|
|
@ -481,10 +479,8 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
|
|||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
|
||||
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
|
||||
if (!pte) {
|
||||
walk->action = ACTION_AGAIN;
|
||||
if (!pte)
|
||||
return 0;
|
||||
}
|
||||
ptent = ptep_get(pte);
|
||||
if (!pte_present(ptent))
|
||||
goto out;
|
||||
|
|
|
|||
|
|
@ -4104,32 +4104,23 @@ static unsigned long deferred_split_count(struct shrinker *shrink,
|
|||
static bool thp_underused(struct folio *folio)
|
||||
{
|
||||
int num_zero_pages = 0, num_filled_pages = 0;
|
||||
void *kaddr;
|
||||
int i;
|
||||
|
||||
if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < folio_nr_pages(folio); i++) {
|
||||
kaddr = kmap_local_folio(folio, i * PAGE_SIZE);
|
||||
if (!memchr_inv(kaddr, 0, PAGE_SIZE)) {
|
||||
num_zero_pages++;
|
||||
if (num_zero_pages > khugepaged_max_ptes_none) {
|
||||
kunmap_local(kaddr);
|
||||
if (pages_identical(folio_page(folio, i), ZERO_PAGE(0))) {
|
||||
if (++num_zero_pages > khugepaged_max_ptes_none)
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Another path for early exit once the number
|
||||
* of non-zero filled pages exceeds threshold.
|
||||
*/
|
||||
num_filled_pages++;
|
||||
if (num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none) {
|
||||
kunmap_local(kaddr);
|
||||
if (++num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
kunmap_local(kaddr);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7222,6 +7222,8 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
|
|||
psize);
|
||||
}
|
||||
spin_unlock(ptl);
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
/*
|
||||
* Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
|
||||
|
|
|
|||
|
|
@ -2307,12 +2307,13 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
|
|||
bool drained = false;
|
||||
bool raised_max_event = false;
|
||||
unsigned long pflags;
|
||||
bool allow_spinning = gfpflags_allow_spinning(gfp_mask);
|
||||
|
||||
retry:
|
||||
if (consume_stock(memcg, nr_pages))
|
||||
return 0;
|
||||
|
||||
if (!gfpflags_allow_spinning(gfp_mask))
|
||||
if (!allow_spinning)
|
||||
/* Avoid the refill and flush of the older stock */
|
||||
batch = nr_pages;
|
||||
|
||||
|
|
@ -2348,7 +2349,7 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
|
|||
if (!gfpflags_allow_blocking(gfp_mask))
|
||||
goto nomem;
|
||||
|
||||
memcg_memory_event(mem_over_limit, MEMCG_MAX);
|
||||
__memcg_memory_event(mem_over_limit, MEMCG_MAX, allow_spinning);
|
||||
raised_max_event = true;
|
||||
|
||||
psi_memstall_enter(&pflags);
|
||||
|
|
@ -2415,7 +2416,7 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
|
|||
* a MEMCG_MAX event.
|
||||
*/
|
||||
if (!raised_max_event)
|
||||
memcg_memory_event(mem_over_limit, MEMCG_MAX);
|
||||
__memcg_memory_event(mem_over_limit, MEMCG_MAX, allow_spinning);
|
||||
|
||||
/*
|
||||
* The allocation either can't fail or will lead to more memory
|
||||
|
|
|
|||
23
mm/migrate.c
23
mm/migrate.c
|
|
@ -296,19 +296,16 @@ bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
|
|||
}
|
||||
|
||||
static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
|
||||
struct folio *folio,
|
||||
unsigned long idx)
|
||||
struct folio *folio, pte_t old_pte, unsigned long idx)
|
||||
{
|
||||
struct page *page = folio_page(folio, idx);
|
||||
bool contains_data;
|
||||
pte_t newpte;
|
||||
void *addr;
|
||||
|
||||
if (PageCompound(page))
|
||||
return false;
|
||||
VM_BUG_ON_PAGE(!PageAnon(page), page);
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
VM_BUG_ON_PAGE(pte_present(ptep_get(pvmw->pte)), page);
|
||||
VM_BUG_ON_PAGE(pte_present(old_pte), page);
|
||||
|
||||
if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
|
||||
mm_forbids_zeropage(pvmw->vma->vm_mm))
|
||||
|
|
@ -319,15 +316,17 @@ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
|
|||
* this subpage has been non present. If the subpage is only zero-filled
|
||||
* then map it to the shared zeropage.
|
||||
*/
|
||||
addr = kmap_local_page(page);
|
||||
contains_data = memchr_inv(addr, 0, PAGE_SIZE);
|
||||
kunmap_local(addr);
|
||||
|
||||
if (contains_data)
|
||||
if (!pages_identical(page, ZERO_PAGE(0)))
|
||||
return false;
|
||||
|
||||
newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
|
||||
pvmw->vma->vm_page_prot));
|
||||
|
||||
if (pte_swp_soft_dirty(old_pte))
|
||||
newpte = pte_mksoft_dirty(newpte);
|
||||
if (pte_swp_uffd_wp(old_pte))
|
||||
newpte = pte_mkuffd_wp(newpte);
|
||||
|
||||
set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte);
|
||||
|
||||
dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio));
|
||||
|
|
@ -370,13 +369,13 @@ static bool remove_migration_pte(struct folio *folio,
|
|||
continue;
|
||||
}
|
||||
#endif
|
||||
old_pte = ptep_get(pvmw.pte);
|
||||
if (rmap_walk_arg->map_unused_to_zeropage &&
|
||||
try_to_map_unused_to_zeropage(&pvmw, folio, idx))
|
||||
try_to_map_unused_to_zeropage(&pvmw, folio, old_pte, idx))
|
||||
continue;
|
||||
|
||||
folio_get(folio);
|
||||
pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
|
||||
old_pte = ptep_get(pvmw.pte);
|
||||
|
||||
entry = pte_to_swp_entry(old_pte);
|
||||
if (!is_migration_entry_young(entry))
|
||||
|
|
|
|||
|
|
@ -566,6 +566,7 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
|
|||
unsigned long len, unsigned long prot,
|
||||
unsigned long flag, unsigned long pgoff)
|
||||
{
|
||||
loff_t off = (loff_t)pgoff << PAGE_SHIFT;
|
||||
unsigned long ret;
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long populate;
|
||||
|
|
@ -573,7 +574,7 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
|
|||
|
||||
ret = security_mmap_file(file, prot, flag);
|
||||
if (!ret)
|
||||
ret = fsnotify_mmap_perm(file, prot, pgoff >> PAGE_SHIFT, len);
|
||||
ret = fsnotify_mmap_perm(file, prot, off, len);
|
||||
if (!ret) {
|
||||
if (mmap_write_lock_killable(mm))
|
||||
return -EINTR;
|
||||
|
|
|
|||
|
|
@ -108,6 +108,7 @@ const xa_mark_t RUST_CONST_HELPER_XA_PRESENT = XA_PRESENT;
|
|||
|
||||
const gfp_t RUST_CONST_HELPER_XA_FLAGS_ALLOC = XA_FLAGS_ALLOC;
|
||||
const gfp_t RUST_CONST_HELPER_XA_FLAGS_ALLOC1 = XA_FLAGS_ALLOC1;
|
||||
const vm_flags_t RUST_CONST_HELPER_VM_MERGEABLE = VM_MERGEABLE;
|
||||
|
||||
#if IS_ENABLED(CONFIG_ANDROID_BINDER_IPC_RUST)
|
||||
#include "../../drivers/android/binder/rust_binder.h"
|
||||
|
|
|
|||
Loading…
Reference in a new issue