7 hotfixes. All 7 are cc:stable and all 7 are for MM.

All singletons, please see the changelogs for details.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaOmCTwAKCRDdBJ7gKXxA
 jtQZAQC9sRd+4LNYothoXlY9avKNYR4YvN3ogiIFJqHwiENu/QD/ec/57KME9dA4
 H4SqK/49Rs/tVCYmkPTO7IWRmxo9/AA=
 =h9On
 -----END PGP SIGNATURE-----

Merge tag 'mm-hotfixes-stable-2025-10-10-15-00' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "7 hotfixes.  All 7 are cc:stable and all 7 are for MM.

  All singletons, please see the changelogs for details"

* tag 'mm-hotfixes-stable-2025-10-10-15-00' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  mm: hugetlb: avoid soft lockup when mprotect to large memory area
  fsnotify: pass correct offset to fsnotify_mmap_perm()
  mm/ksm: fix flag-dropping behavior in ksm_madvise
  mm/damon/vaddr: do not repeat pte_offset_map_lock() until success
  mm/rmap: fix soft-dirty and uffd-wp bit loss when remapping zero-filled mTHP subpage to shared zeropage
  mm/thp: fix MTE tag mismatch when replacing zero-filled subpages
  memcg: skip cgroup_file_notify if spinning is not allowed
This commit is contained in:
Linus Torvalds 2025-10-11 10:14:55 -07:00
commit 971370a88c
9 changed files with 45 additions and 42 deletions

View file

@ -1001,22 +1001,28 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
count_memcg_events_mm(mm, idx, 1); count_memcg_events_mm(mm, idx, 1);
} }
static inline void memcg_memory_event(struct mem_cgroup *memcg, static inline void __memcg_memory_event(struct mem_cgroup *memcg,
enum memcg_memory_event event) enum memcg_memory_event event,
bool allow_spinning)
{ {
bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
event == MEMCG_SWAP_FAIL; event == MEMCG_SWAP_FAIL;
/* For now only MEMCG_MAX can happen with !allow_spinning context. */
VM_WARN_ON_ONCE(!allow_spinning && event != MEMCG_MAX);
atomic_long_inc(&memcg->memory_events_local[event]); atomic_long_inc(&memcg->memory_events_local[event]);
if (!swap_event) if (!swap_event && allow_spinning)
cgroup_file_notify(&memcg->events_local_file); cgroup_file_notify(&memcg->events_local_file);
do { do {
atomic_long_inc(&memcg->memory_events[event]); atomic_long_inc(&memcg->memory_events[event]);
if (swap_event) if (allow_spinning) {
cgroup_file_notify(&memcg->swap_events_file); if (swap_event)
else cgroup_file_notify(&memcg->swap_events_file);
cgroup_file_notify(&memcg->events_file); else
cgroup_file_notify(&memcg->events_file);
}
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
break; break;
@ -1026,6 +1032,12 @@ static inline void memcg_memory_event(struct mem_cgroup *memcg,
!mem_cgroup_is_root(memcg)); !mem_cgroup_is_root(memcg));
} }
static inline void memcg_memory_event(struct mem_cgroup *memcg,
enum memcg_memory_event event)
{
__memcg_memory_event(memcg, event, true);
}
static inline void memcg_memory_event_mm(struct mm_struct *mm, static inline void memcg_memory_event_mm(struct mm_struct *mm,
enum memcg_memory_event event) enum memcg_memory_event event)
{ {

View file

@ -323,7 +323,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
#define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ #define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */
#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ #define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ #define VM_MERGEABLE BIT(31) /* KSM may merge identical pages */
#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS #ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
#define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */ #define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */

View file

@ -328,10 +328,8 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
} }
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
if (!pte) { if (!pte)
walk->action = ACTION_AGAIN;
return 0; return 0;
}
if (!pte_present(ptep_get(pte))) if (!pte_present(ptep_get(pte)))
goto out; goto out;
damon_ptep_mkold(pte, walk->vma, addr); damon_ptep_mkold(pte, walk->vma, addr);
@ -481,10 +479,8 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
if (!pte) { if (!pte)
walk->action = ACTION_AGAIN;
return 0; return 0;
}
ptent = ptep_get(pte); ptent = ptep_get(pte);
if (!pte_present(ptent)) if (!pte_present(ptent))
goto out; goto out;

View file

@ -4104,32 +4104,23 @@ static unsigned long deferred_split_count(struct shrinker *shrink,
static bool thp_underused(struct folio *folio) static bool thp_underused(struct folio *folio)
{ {
int num_zero_pages = 0, num_filled_pages = 0; int num_zero_pages = 0, num_filled_pages = 0;
void *kaddr;
int i; int i;
if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1) if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1)
return false; return false;
for (i = 0; i < folio_nr_pages(folio); i++) { for (i = 0; i < folio_nr_pages(folio); i++) {
kaddr = kmap_local_folio(folio, i * PAGE_SIZE); if (pages_identical(folio_page(folio, i), ZERO_PAGE(0))) {
if (!memchr_inv(kaddr, 0, PAGE_SIZE)) { if (++num_zero_pages > khugepaged_max_ptes_none)
num_zero_pages++;
if (num_zero_pages > khugepaged_max_ptes_none) {
kunmap_local(kaddr);
return true; return true;
}
} else { } else {
/* /*
* Another path for early exit once the number * Another path for early exit once the number
* of non-zero filled pages exceeds threshold. * of non-zero filled pages exceeds threshold.
*/ */
num_filled_pages++; if (++num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none)
if (num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none) {
kunmap_local(kaddr);
return false; return false;
}
} }
kunmap_local(kaddr);
} }
return false; return false;
} }

View file

@ -7222,6 +7222,8 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
psize); psize);
} }
spin_unlock(ptl); spin_unlock(ptl);
cond_resched();
} }
/* /*
* Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare

View file

@ -2307,12 +2307,13 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
bool drained = false; bool drained = false;
bool raised_max_event = false; bool raised_max_event = false;
unsigned long pflags; unsigned long pflags;
bool allow_spinning = gfpflags_allow_spinning(gfp_mask);
retry: retry:
if (consume_stock(memcg, nr_pages)) if (consume_stock(memcg, nr_pages))
return 0; return 0;
if (!gfpflags_allow_spinning(gfp_mask)) if (!allow_spinning)
/* Avoid the refill and flush of the older stock */ /* Avoid the refill and flush of the older stock */
batch = nr_pages; batch = nr_pages;
@ -2348,7 +2349,7 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
if (!gfpflags_allow_blocking(gfp_mask)) if (!gfpflags_allow_blocking(gfp_mask))
goto nomem; goto nomem;
memcg_memory_event(mem_over_limit, MEMCG_MAX); __memcg_memory_event(mem_over_limit, MEMCG_MAX, allow_spinning);
raised_max_event = true; raised_max_event = true;
psi_memstall_enter(&pflags); psi_memstall_enter(&pflags);
@ -2415,7 +2416,7 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
* a MEMCG_MAX event. * a MEMCG_MAX event.
*/ */
if (!raised_max_event) if (!raised_max_event)
memcg_memory_event(mem_over_limit, MEMCG_MAX); __memcg_memory_event(mem_over_limit, MEMCG_MAX, allow_spinning);
/* /*
* The allocation either can't fail or will lead to more memory * The allocation either can't fail or will lead to more memory

View file

@ -296,19 +296,16 @@ bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
} }
static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw, static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
struct folio *folio, struct folio *folio, pte_t old_pte, unsigned long idx)
unsigned long idx)
{ {
struct page *page = folio_page(folio, idx); struct page *page = folio_page(folio, idx);
bool contains_data;
pte_t newpte; pte_t newpte;
void *addr;
if (PageCompound(page)) if (PageCompound(page))
return false; return false;
VM_BUG_ON_PAGE(!PageAnon(page), page); VM_BUG_ON_PAGE(!PageAnon(page), page);
VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(pte_present(ptep_get(pvmw->pte)), page); VM_BUG_ON_PAGE(pte_present(old_pte), page);
if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) || if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
mm_forbids_zeropage(pvmw->vma->vm_mm)) mm_forbids_zeropage(pvmw->vma->vm_mm))
@ -319,15 +316,17 @@ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
* this subpage has been non present. If the subpage is only zero-filled * this subpage has been non present. If the subpage is only zero-filled
* then map it to the shared zeropage. * then map it to the shared zeropage.
*/ */
addr = kmap_local_page(page); if (!pages_identical(page, ZERO_PAGE(0)))
contains_data = memchr_inv(addr, 0, PAGE_SIZE);
kunmap_local(addr);
if (contains_data)
return false; return false;
newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address), newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
pvmw->vma->vm_page_prot)); pvmw->vma->vm_page_prot));
if (pte_swp_soft_dirty(old_pte))
newpte = pte_mksoft_dirty(newpte);
if (pte_swp_uffd_wp(old_pte))
newpte = pte_mkuffd_wp(newpte);
set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte); set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte);
dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio)); dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio));
@ -370,13 +369,13 @@ static bool remove_migration_pte(struct folio *folio,
continue; continue;
} }
#endif #endif
old_pte = ptep_get(pvmw.pte);
if (rmap_walk_arg->map_unused_to_zeropage && if (rmap_walk_arg->map_unused_to_zeropage &&
try_to_map_unused_to_zeropage(&pvmw, folio, idx)) try_to_map_unused_to_zeropage(&pvmw, folio, old_pte, idx))
continue; continue;
folio_get(folio); folio_get(folio);
pte = mk_pte(new, READ_ONCE(vma->vm_page_prot)); pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
old_pte = ptep_get(pvmw.pte);
entry = pte_to_swp_entry(old_pte); entry = pte_to_swp_entry(old_pte);
if (!is_migration_entry_young(entry)) if (!is_migration_entry_young(entry))

View file

@ -566,6 +566,7 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long len, unsigned long prot,
unsigned long flag, unsigned long pgoff) unsigned long flag, unsigned long pgoff)
{ {
loff_t off = (loff_t)pgoff << PAGE_SHIFT;
unsigned long ret; unsigned long ret;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long populate; unsigned long populate;
@ -573,7 +574,7 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
ret = security_mmap_file(file, prot, flag); ret = security_mmap_file(file, prot, flag);
if (!ret) if (!ret)
ret = fsnotify_mmap_perm(file, prot, pgoff >> PAGE_SHIFT, len); ret = fsnotify_mmap_perm(file, prot, off, len);
if (!ret) { if (!ret) {
if (mmap_write_lock_killable(mm)) if (mmap_write_lock_killable(mm))
return -EINTR; return -EINTR;

View file

@ -108,6 +108,7 @@ const xa_mark_t RUST_CONST_HELPER_XA_PRESENT = XA_PRESENT;
const gfp_t RUST_CONST_HELPER_XA_FLAGS_ALLOC = XA_FLAGS_ALLOC; const gfp_t RUST_CONST_HELPER_XA_FLAGS_ALLOC = XA_FLAGS_ALLOC;
const gfp_t RUST_CONST_HELPER_XA_FLAGS_ALLOC1 = XA_FLAGS_ALLOC1; const gfp_t RUST_CONST_HELPER_XA_FLAGS_ALLOC1 = XA_FLAGS_ALLOC1;
const vm_flags_t RUST_CONST_HELPER_VM_MERGEABLE = VM_MERGEABLE;
#if IS_ENABLED(CONFIG_ANDROID_BINDER_IPC_RUST) #if IS_ENABLED(CONFIG_ANDROID_BINDER_IPC_RUST)
#include "../../drivers/android/binder/rust_binder.h" #include "../../drivers/android/binder/rust_binder.h"