forked from mirrors/linux
31 hotfixes. 9 are cc:stable and the remainder address post-6.15 issues
or aren't considered necessary for -stable kernels. 22 patches are for MM, 9 are otherwise. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaABuqgAKCRDdBJ7gKXxA jkZ7AQCkxrzIxZs7uUcHZNIGpNhbhg0Dl07j6txgf7piCBSk4wD+LX6skmC2CXLF QWDhw1+dKHY/Ha0TSQkXUlMTjAP1mA4= =3vRc -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2025-04-16-19-59' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc hotfixes from Andrew Morton: "31 hotfixes. 9 are cc:stable and the remainder address post-6.15 issues or aren't considered necessary for -stable kernels. 22 patches are for MM, 9 are otherwise" * tag 'mm-hotfixes-stable-2025-04-16-19-59' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (31 commits) MAINTAINERS: update HUGETLB reviewers mm: fix apply_to_existing_page_range() selftests/mm: fix compiler -Wmaybe-uninitialized warning alloc_tag: handle incomplete bulk allocations in vm_module_tags_populate mailmap: add entry for Jean-Michel Hautbois mm: (un)track_pfn_copy() fix + doc improvements mm: fix filemap_get_folios_contig returning batches of identical folios mm/hugetlb: add a line break at the end of the format string selftests: mincore: fix tmpfs mincore test failure mm/hugetlb: fix set_max_huge_pages() when there are surplus pages mm/cma: report base address of single range correctly mm: page_alloc: speed up fallbacks in rmqueue_bulk() kunit: slub: add module description mm/kasan: add module decription ucs2_string: add module description zlib: add module description fpga: tests: add module descriptions samples/livepatch: add module descriptions ASN.1: add module description mm/vma: add give_up_on_oom option on modify/merge, use in uffd release ...
This commit is contained in:
commit
cfb2e2c57a
38 changed files with 359 additions and 284 deletions
3
.mailmap
3
.mailmap
|
|
@ -322,6 +322,7 @@ Jayachandran C <c.jayachandran@gmail.com> <jchandra@broadcom.com>
|
||||||
Jayachandran C <c.jayachandran@gmail.com> <jchandra@digeo.com>
|
Jayachandran C <c.jayachandran@gmail.com> <jchandra@digeo.com>
|
||||||
Jayachandran C <c.jayachandran@gmail.com> <jnair@caviumnetworks.com>
|
Jayachandran C <c.jayachandran@gmail.com> <jnair@caviumnetworks.com>
|
||||||
<jean-philippe@linaro.org> <jean-philippe.brucker@arm.com>
|
<jean-philippe@linaro.org> <jean-philippe.brucker@arm.com>
|
||||||
|
Jean-Michel Hautbois <jeanmichel.hautbois@yoseli.org> <jeanmichel.hautbois@ideasonboard.com>
|
||||||
Jean Tourrilhes <jt@hpl.hp.com>
|
Jean Tourrilhes <jt@hpl.hp.com>
|
||||||
Jeevan Shriram <quic_jshriram@quicinc.com> <jshriram@codeaurora.org>
|
Jeevan Shriram <quic_jshriram@quicinc.com> <jshriram@codeaurora.org>
|
||||||
Jeff Garzik <jgarzik@pretzel.yyz.us>
|
Jeff Garzik <jgarzik@pretzel.yyz.us>
|
||||||
|
|
@ -438,6 +439,8 @@ Linus Lüssing <linus.luessing@c0d3.blue> <ll@simonwunderlich.de>
|
||||||
Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
|
Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
|
||||||
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
|
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
|
||||||
Lior David <quic_liord@quicinc.com> <liord@codeaurora.org>
|
Lior David <quic_liord@quicinc.com> <liord@codeaurora.org>
|
||||||
|
Loic Poulain <loic.poulain@oss.qualcomm.com> <loic.poulain@linaro.org>
|
||||||
|
Loic Poulain <loic.poulain@oss.qualcomm.com> <loic.poulain@intel.com>
|
||||||
Lorenzo Pieralisi <lpieralisi@kernel.org> <lorenzo.pieralisi@arm.com>
|
Lorenzo Pieralisi <lpieralisi@kernel.org> <lorenzo.pieralisi@arm.com>
|
||||||
Lorenzo Stoakes <lorenzo.stoakes@oracle.com> <lstoakes@gmail.com>
|
Lorenzo Stoakes <lorenzo.stoakes@oracle.com> <lstoakes@gmail.com>
|
||||||
Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net>
|
Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net>
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,7 @@ SYSCALL
|
||||||
=======
|
=======
|
||||||
mseal syscall signature
|
mseal syscall signature
|
||||||
-----------------------
|
-----------------------
|
||||||
``int mseal(void \* addr, size_t len, unsigned long flags)``
|
``int mseal(void *addr, size_t len, unsigned long flags)``
|
||||||
|
|
||||||
**addr**/**len**: virtual memory address range.
|
**addr**/**len**: virtual memory address range.
|
||||||
The address range set by **addr**/**len** must meet:
|
The address range set by **addr**/**len** must meet:
|
||||||
|
|
|
||||||
|
|
@ -10956,6 +10956,7 @@ F: include/linux/platform_data/huawei-gaokun-ec.h
|
||||||
|
|
||||||
HUGETLB SUBSYSTEM
|
HUGETLB SUBSYSTEM
|
||||||
M: Muchun Song <muchun.song@linux.dev>
|
M: Muchun Song <muchun.song@linux.dev>
|
||||||
|
R: Oscar Salvador <osalvador@suse.de>
|
||||||
L: linux-mm@kvack.org
|
L: linux-mm@kvack.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages
|
F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages
|
||||||
|
|
@ -12812,6 +12813,7 @@ F: lib/Kconfig.kcsan
|
||||||
F: scripts/Makefile.kcsan
|
F: scripts/Makefile.kcsan
|
||||||
|
|
||||||
KDUMP
|
KDUMP
|
||||||
|
M: Andrew Morton <akpm@linux-foundation.org>
|
||||||
M: Baoquan He <bhe@redhat.com>
|
M: Baoquan He <bhe@redhat.com>
|
||||||
R: Vivek Goyal <vgoyal@redhat.com>
|
R: Vivek Goyal <vgoyal@redhat.com>
|
||||||
R: Dave Young <dyoung@redhat.com>
|
R: Dave Young <dyoung@redhat.com>
|
||||||
|
|
@ -13113,6 +13115,8 @@ F: fs/kernfs/
|
||||||
F: include/linux/kernfs.h
|
F: include/linux/kernfs.h
|
||||||
|
|
||||||
KEXEC
|
KEXEC
|
||||||
|
M: Andrew Morton <akpm@linux-foundation.org>
|
||||||
|
M: Baoquan He <bhe@redhat.com>
|
||||||
L: kexec@lists.infradead.org
|
L: kexec@lists.infradead.org
|
||||||
W: http://kernel.org/pub/linux/utils/kernel/kexec/
|
W: http://kernel.org/pub/linux/utils/kernel/kexec/
|
||||||
F: include/linux/kexec.h
|
F: include/linux/kexec.h
|
||||||
|
|
|
||||||
|
|
@ -170,4 +170,5 @@ static struct kunit_suite fpga_bridge_suite = {
|
||||||
|
|
||||||
kunit_test_suite(fpga_bridge_suite);
|
kunit_test_suite(fpga_bridge_suite);
|
||||||
|
|
||||||
|
MODULE_DESCRIPTION("KUnit test for the FPGA Bridge");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
|
||||||
|
|
@ -330,4 +330,5 @@ static struct kunit_suite fpga_mgr_suite = {
|
||||||
|
|
||||||
kunit_test_suite(fpga_mgr_suite);
|
kunit_test_suite(fpga_mgr_suite);
|
||||||
|
|
||||||
|
MODULE_DESCRIPTION("KUnit test for the FPGA Manager");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
|
||||||
|
|
@ -214,4 +214,5 @@ static struct kunit_suite fpga_region_suite = {
|
||||||
|
|
||||||
kunit_test_suite(fpga_region_suite);
|
kunit_test_suite(fpga_region_suite);
|
||||||
|
|
||||||
|
MODULE_DESCRIPTION("KUnit test for the FPGA Region");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
|
||||||
|
|
@ -52,43 +52,22 @@
|
||||||
__local_unlock_irqrestore(lock, flags)
|
__local_unlock_irqrestore(lock, flags)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* localtry_lock_init - Runtime initialize a lock instance
|
* local_lock_init - Runtime initialize a lock instance
|
||||||
*/
|
*/
|
||||||
#define localtry_lock_init(lock) __localtry_lock_init(lock)
|
#define local_trylock_init(lock) __local_trylock_init(lock)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* localtry_lock - Acquire a per CPU local lock
|
* local_trylock - Try to acquire a per CPU local lock
|
||||||
* @lock: The lock variable
|
|
||||||
*/
|
|
||||||
#define localtry_lock(lock) __localtry_lock(lock)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* localtry_lock_irq - Acquire a per CPU local lock and disable interrupts
|
|
||||||
* @lock: The lock variable
|
|
||||||
*/
|
|
||||||
#define localtry_lock_irq(lock) __localtry_lock_irq(lock)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* localtry_lock_irqsave - Acquire a per CPU local lock, save and disable
|
|
||||||
* interrupts
|
|
||||||
* @lock: The lock variable
|
|
||||||
* @flags: Storage for interrupt flags
|
|
||||||
*/
|
|
||||||
#define localtry_lock_irqsave(lock, flags) \
|
|
||||||
__localtry_lock_irqsave(lock, flags)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* localtry_trylock - Try to acquire a per CPU local lock.
|
|
||||||
* @lock: The lock variable
|
* @lock: The lock variable
|
||||||
*
|
*
|
||||||
* The function can be used in any context such as NMI or HARDIRQ. Due to
|
* The function can be used in any context such as NMI or HARDIRQ. Due to
|
||||||
* locking constrains it will _always_ fail to acquire the lock in NMI or
|
* locking constrains it will _always_ fail to acquire the lock in NMI or
|
||||||
* HARDIRQ context on PREEMPT_RT.
|
* HARDIRQ context on PREEMPT_RT.
|
||||||
*/
|
*/
|
||||||
#define localtry_trylock(lock) __localtry_trylock(lock)
|
#define local_trylock(lock) __local_trylock(lock)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* localtry_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
|
* local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
|
||||||
* interrupts if acquired
|
* interrupts if acquired
|
||||||
* @lock: The lock variable
|
* @lock: The lock variable
|
||||||
* @flags: Storage for interrupt flags
|
* @flags: Storage for interrupt flags
|
||||||
|
|
@ -97,29 +76,8 @@
|
||||||
* locking constrains it will _always_ fail to acquire the lock in NMI or
|
* locking constrains it will _always_ fail to acquire the lock in NMI or
|
||||||
* HARDIRQ context on PREEMPT_RT.
|
* HARDIRQ context on PREEMPT_RT.
|
||||||
*/
|
*/
|
||||||
#define localtry_trylock_irqsave(lock, flags) \
|
#define local_trylock_irqsave(lock, flags) \
|
||||||
__localtry_trylock_irqsave(lock, flags)
|
__local_trylock_irqsave(lock, flags)
|
||||||
|
|
||||||
/**
|
|
||||||
* local_unlock - Release a per CPU local lock
|
|
||||||
* @lock: The lock variable
|
|
||||||
*/
|
|
||||||
#define localtry_unlock(lock) __localtry_unlock(lock)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* local_unlock_irq - Release a per CPU local lock and enable interrupts
|
|
||||||
* @lock: The lock variable
|
|
||||||
*/
|
|
||||||
#define localtry_unlock_irq(lock) __localtry_unlock_irq(lock)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* localtry_unlock_irqrestore - Release a per CPU local lock and restore
|
|
||||||
* interrupt flags
|
|
||||||
* @lock: The lock variable
|
|
||||||
* @flags: Interrupt flags to restore
|
|
||||||
*/
|
|
||||||
#define localtry_unlock_irqrestore(lock, flags) \
|
|
||||||
__localtry_unlock_irqrestore(lock, flags)
|
|
||||||
|
|
||||||
DEFINE_GUARD(local_lock, local_lock_t __percpu*,
|
DEFINE_GUARD(local_lock, local_lock_t __percpu*,
|
||||||
local_lock(_T),
|
local_lock(_T),
|
||||||
|
|
|
||||||
|
|
@ -15,10 +15,11 @@ typedef struct {
|
||||||
#endif
|
#endif
|
||||||
} local_lock_t;
|
} local_lock_t;
|
||||||
|
|
||||||
|
/* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */
|
||||||
typedef struct {
|
typedef struct {
|
||||||
local_lock_t llock;
|
local_lock_t llock;
|
||||||
unsigned int acquired;
|
u8 acquired;
|
||||||
} localtry_lock_t;
|
} local_trylock_t;
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
# define LOCAL_LOCK_DEBUG_INIT(lockname) \
|
# define LOCAL_LOCK_DEBUG_INIT(lockname) \
|
||||||
|
|
@ -29,6 +30,9 @@ typedef struct {
|
||||||
}, \
|
}, \
|
||||||
.owner = NULL,
|
.owner = NULL,
|
||||||
|
|
||||||
|
# define LOCAL_TRYLOCK_DEBUG_INIT(lockname) \
|
||||||
|
.llock = { LOCAL_LOCK_DEBUG_INIT((lockname).llock) },
|
||||||
|
|
||||||
static inline void local_lock_acquire(local_lock_t *l)
|
static inline void local_lock_acquire(local_lock_t *l)
|
||||||
{
|
{
|
||||||
lock_map_acquire(&l->dep_map);
|
lock_map_acquire(&l->dep_map);
|
||||||
|
|
@ -56,6 +60,7 @@ static inline void local_lock_debug_init(local_lock_t *l)
|
||||||
}
|
}
|
||||||
#else /* CONFIG_DEBUG_LOCK_ALLOC */
|
#else /* CONFIG_DEBUG_LOCK_ALLOC */
|
||||||
# define LOCAL_LOCK_DEBUG_INIT(lockname)
|
# define LOCAL_LOCK_DEBUG_INIT(lockname)
|
||||||
|
# define LOCAL_TRYLOCK_DEBUG_INIT(lockname)
|
||||||
static inline void local_lock_acquire(local_lock_t *l) { }
|
static inline void local_lock_acquire(local_lock_t *l) { }
|
||||||
static inline void local_trylock_acquire(local_lock_t *l) { }
|
static inline void local_trylock_acquire(local_lock_t *l) { }
|
||||||
static inline void local_lock_release(local_lock_t *l) { }
|
static inline void local_lock_release(local_lock_t *l) { }
|
||||||
|
|
@ -63,7 +68,7 @@ static inline void local_lock_debug_init(local_lock_t *l) { }
|
||||||
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
|
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
|
||||||
|
|
||||||
#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
|
#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
|
||||||
#define INIT_LOCALTRY_LOCK(lockname) { .llock = { LOCAL_LOCK_DEBUG_INIT(lockname.llock) }}
|
#define INIT_LOCAL_TRYLOCK(lockname) { LOCAL_TRYLOCK_DEBUG_INIT(lockname) }
|
||||||
|
|
||||||
#define __local_lock_init(lock) \
|
#define __local_lock_init(lock) \
|
||||||
do { \
|
do { \
|
||||||
|
|
@ -76,6 +81,8 @@ do { \
|
||||||
local_lock_debug_init(lock); \
|
local_lock_debug_init(lock); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
#define __local_trylock_init(lock) __local_lock_init(lock.llock)
|
||||||
|
|
||||||
#define __spinlock_nested_bh_init(lock) \
|
#define __spinlock_nested_bh_init(lock) \
|
||||||
do { \
|
do { \
|
||||||
static struct lock_class_key __key; \
|
static struct lock_class_key __key; \
|
||||||
|
|
@ -87,39 +94,105 @@ do { \
|
||||||
local_lock_debug_init(lock); \
|
local_lock_debug_init(lock); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
#define __local_lock_acquire(lock) \
|
||||||
|
do { \
|
||||||
|
local_trylock_t *tl; \
|
||||||
|
local_lock_t *l; \
|
||||||
|
\
|
||||||
|
l = (local_lock_t *)this_cpu_ptr(lock); \
|
||||||
|
tl = (local_trylock_t *)l; \
|
||||||
|
_Generic((lock), \
|
||||||
|
local_trylock_t *: ({ \
|
||||||
|
lockdep_assert(tl->acquired == 0); \
|
||||||
|
WRITE_ONCE(tl->acquired, 1); \
|
||||||
|
}), \
|
||||||
|
default:(void)0); \
|
||||||
|
local_lock_acquire(l); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
#define __local_lock(lock) \
|
#define __local_lock(lock) \
|
||||||
do { \
|
do { \
|
||||||
preempt_disable(); \
|
preempt_disable(); \
|
||||||
local_lock_acquire(this_cpu_ptr(lock)); \
|
__local_lock_acquire(lock); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define __local_lock_irq(lock) \
|
#define __local_lock_irq(lock) \
|
||||||
do { \
|
do { \
|
||||||
local_irq_disable(); \
|
local_irq_disable(); \
|
||||||
local_lock_acquire(this_cpu_ptr(lock)); \
|
__local_lock_acquire(lock); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define __local_lock_irqsave(lock, flags) \
|
#define __local_lock_irqsave(lock, flags) \
|
||||||
do { \
|
do { \
|
||||||
local_irq_save(flags); \
|
local_irq_save(flags); \
|
||||||
local_lock_acquire(this_cpu_ptr(lock)); \
|
__local_lock_acquire(lock); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define __local_trylock(lock) \
|
||||||
|
({ \
|
||||||
|
local_trylock_t *tl; \
|
||||||
|
\
|
||||||
|
preempt_disable(); \
|
||||||
|
tl = this_cpu_ptr(lock); \
|
||||||
|
if (READ_ONCE(tl->acquired)) { \
|
||||||
|
preempt_enable(); \
|
||||||
|
tl = NULL; \
|
||||||
|
} else { \
|
||||||
|
WRITE_ONCE(tl->acquired, 1); \
|
||||||
|
local_trylock_acquire( \
|
||||||
|
(local_lock_t *)tl); \
|
||||||
|
} \
|
||||||
|
!!tl; \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define __local_trylock_irqsave(lock, flags) \
|
||||||
|
({ \
|
||||||
|
local_trylock_t *tl; \
|
||||||
|
\
|
||||||
|
local_irq_save(flags); \
|
||||||
|
tl = this_cpu_ptr(lock); \
|
||||||
|
if (READ_ONCE(tl->acquired)) { \
|
||||||
|
local_irq_restore(flags); \
|
||||||
|
tl = NULL; \
|
||||||
|
} else { \
|
||||||
|
WRITE_ONCE(tl->acquired, 1); \
|
||||||
|
local_trylock_acquire( \
|
||||||
|
(local_lock_t *)tl); \
|
||||||
|
} \
|
||||||
|
!!tl; \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define __local_lock_release(lock) \
|
||||||
|
do { \
|
||||||
|
local_trylock_t *tl; \
|
||||||
|
local_lock_t *l; \
|
||||||
|
\
|
||||||
|
l = (local_lock_t *)this_cpu_ptr(lock); \
|
||||||
|
tl = (local_trylock_t *)l; \
|
||||||
|
local_lock_release(l); \
|
||||||
|
_Generic((lock), \
|
||||||
|
local_trylock_t *: ({ \
|
||||||
|
lockdep_assert(tl->acquired == 1); \
|
||||||
|
WRITE_ONCE(tl->acquired, 0); \
|
||||||
|
}), \
|
||||||
|
default:(void)0); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define __local_unlock(lock) \
|
#define __local_unlock(lock) \
|
||||||
do { \
|
do { \
|
||||||
local_lock_release(this_cpu_ptr(lock)); \
|
__local_lock_release(lock); \
|
||||||
preempt_enable(); \
|
preempt_enable(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define __local_unlock_irq(lock) \
|
#define __local_unlock_irq(lock) \
|
||||||
do { \
|
do { \
|
||||||
local_lock_release(this_cpu_ptr(lock)); \
|
__local_lock_release(lock); \
|
||||||
local_irq_enable(); \
|
local_irq_enable(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define __local_unlock_irqrestore(lock, flags) \
|
#define __local_unlock_irqrestore(lock, flags) \
|
||||||
do { \
|
do { \
|
||||||
local_lock_release(this_cpu_ptr(lock)); \
|
__local_lock_release(lock); \
|
||||||
local_irq_restore(flags); \
|
local_irq_restore(flags); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
|
@ -132,104 +205,6 @@ do { \
|
||||||
#define __local_unlock_nested_bh(lock) \
|
#define __local_unlock_nested_bh(lock) \
|
||||||
local_lock_release(this_cpu_ptr(lock))
|
local_lock_release(this_cpu_ptr(lock))
|
||||||
|
|
||||||
/* localtry_lock_t variants */
|
|
||||||
|
|
||||||
#define __localtry_lock_init(lock) \
|
|
||||||
do { \
|
|
||||||
__local_lock_init(&(lock)->llock); \
|
|
||||||
WRITE_ONCE((lock)->acquired, 0); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define __localtry_lock(lock) \
|
|
||||||
do { \
|
|
||||||
localtry_lock_t *lt; \
|
|
||||||
preempt_disable(); \
|
|
||||||
lt = this_cpu_ptr(lock); \
|
|
||||||
local_lock_acquire(<->llock); \
|
|
||||||
WRITE_ONCE(lt->acquired, 1); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define __localtry_lock_irq(lock) \
|
|
||||||
do { \
|
|
||||||
localtry_lock_t *lt; \
|
|
||||||
local_irq_disable(); \
|
|
||||||
lt = this_cpu_ptr(lock); \
|
|
||||||
local_lock_acquire(<->llock); \
|
|
||||||
WRITE_ONCE(lt->acquired, 1); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define __localtry_lock_irqsave(lock, flags) \
|
|
||||||
do { \
|
|
||||||
localtry_lock_t *lt; \
|
|
||||||
local_irq_save(flags); \
|
|
||||||
lt = this_cpu_ptr(lock); \
|
|
||||||
local_lock_acquire(<->llock); \
|
|
||||||
WRITE_ONCE(lt->acquired, 1); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define __localtry_trylock(lock) \
|
|
||||||
({ \
|
|
||||||
localtry_lock_t *lt; \
|
|
||||||
bool _ret; \
|
|
||||||
\
|
|
||||||
preempt_disable(); \
|
|
||||||
lt = this_cpu_ptr(lock); \
|
|
||||||
if (!READ_ONCE(lt->acquired)) { \
|
|
||||||
WRITE_ONCE(lt->acquired, 1); \
|
|
||||||
local_trylock_acquire(<->llock); \
|
|
||||||
_ret = true; \
|
|
||||||
} else { \
|
|
||||||
_ret = false; \
|
|
||||||
preempt_enable(); \
|
|
||||||
} \
|
|
||||||
_ret; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define __localtry_trylock_irqsave(lock, flags) \
|
|
||||||
({ \
|
|
||||||
localtry_lock_t *lt; \
|
|
||||||
bool _ret; \
|
|
||||||
\
|
|
||||||
local_irq_save(flags); \
|
|
||||||
lt = this_cpu_ptr(lock); \
|
|
||||||
if (!READ_ONCE(lt->acquired)) { \
|
|
||||||
WRITE_ONCE(lt->acquired, 1); \
|
|
||||||
local_trylock_acquire(<->llock); \
|
|
||||||
_ret = true; \
|
|
||||||
} else { \
|
|
||||||
_ret = false; \
|
|
||||||
local_irq_restore(flags); \
|
|
||||||
} \
|
|
||||||
_ret; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define __localtry_unlock(lock) \
|
|
||||||
do { \
|
|
||||||
localtry_lock_t *lt; \
|
|
||||||
lt = this_cpu_ptr(lock); \
|
|
||||||
WRITE_ONCE(lt->acquired, 0); \
|
|
||||||
local_lock_release(<->llock); \
|
|
||||||
preempt_enable(); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define __localtry_unlock_irq(lock) \
|
|
||||||
do { \
|
|
||||||
localtry_lock_t *lt; \
|
|
||||||
lt = this_cpu_ptr(lock); \
|
|
||||||
WRITE_ONCE(lt->acquired, 0); \
|
|
||||||
local_lock_release(<->llock); \
|
|
||||||
local_irq_enable(); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define __localtry_unlock_irqrestore(lock, flags) \
|
|
||||||
do { \
|
|
||||||
localtry_lock_t *lt; \
|
|
||||||
lt = this_cpu_ptr(lock); \
|
|
||||||
WRITE_ONCE(lt->acquired, 0); \
|
|
||||||
local_lock_release(<->llock); \
|
|
||||||
local_irq_restore(flags); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#else /* !CONFIG_PREEMPT_RT */
|
#else /* !CONFIG_PREEMPT_RT */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -237,16 +212,18 @@ do { \
|
||||||
* critical section while staying preemptible.
|
* critical section while staying preemptible.
|
||||||
*/
|
*/
|
||||||
typedef spinlock_t local_lock_t;
|
typedef spinlock_t local_lock_t;
|
||||||
typedef spinlock_t localtry_lock_t;
|
typedef spinlock_t local_trylock_t;
|
||||||
|
|
||||||
#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
|
#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
|
||||||
#define INIT_LOCALTRY_LOCK(lockname) INIT_LOCAL_LOCK(lockname)
|
#define INIT_LOCAL_TRYLOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
|
||||||
|
|
||||||
#define __local_lock_init(l) \
|
#define __local_lock_init(l) \
|
||||||
do { \
|
do { \
|
||||||
local_spin_lock_init((l)); \
|
local_spin_lock_init((l)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
#define __local_trylock_init(l) __local_lock_init(l)
|
||||||
|
|
||||||
#define __local_lock(__lock) \
|
#define __local_lock(__lock) \
|
||||||
do { \
|
do { \
|
||||||
migrate_disable(); \
|
migrate_disable(); \
|
||||||
|
|
@ -283,17 +260,7 @@ do { \
|
||||||
spin_unlock(this_cpu_ptr((lock))); \
|
spin_unlock(this_cpu_ptr((lock))); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/* localtry_lock_t variants */
|
#define __local_trylock(lock) \
|
||||||
|
|
||||||
#define __localtry_lock_init(lock) __local_lock_init(lock)
|
|
||||||
#define __localtry_lock(lock) __local_lock(lock)
|
|
||||||
#define __localtry_lock_irq(lock) __local_lock(lock)
|
|
||||||
#define __localtry_lock_irqsave(lock, flags) __local_lock_irqsave(lock, flags)
|
|
||||||
#define __localtry_unlock(lock) __local_unlock(lock)
|
|
||||||
#define __localtry_unlock_irq(lock) __local_unlock(lock)
|
|
||||||
#define __localtry_unlock_irqrestore(lock, flags) __local_unlock_irqrestore(lock, flags)
|
|
||||||
|
|
||||||
#define __localtry_trylock(lock) \
|
|
||||||
({ \
|
({ \
|
||||||
int __locked; \
|
int __locked; \
|
||||||
\
|
\
|
||||||
|
|
@ -308,11 +275,11 @@ do { \
|
||||||
__locked; \
|
__locked; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define __localtry_trylock_irqsave(lock, flags) \
|
#define __local_trylock_irqsave(lock, flags) \
|
||||||
({ \
|
({ \
|
||||||
typecheck(unsigned long, flags); \
|
typecheck(unsigned long, flags); \
|
||||||
flags = 0; \
|
flags = 0; \
|
||||||
__localtry_trylock(lock); \
|
__local_trylock(lock); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#endif /* CONFIG_PREEMPT_RT */
|
#endif /* CONFIG_PREEMPT_RT */
|
||||||
|
|
|
||||||
|
|
@ -1511,8 +1511,9 @@ static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* track_pfn_copy is called when a VM_PFNMAP VMA is about to get the page
|
* track_pfn_copy is called when a VM_PFNMAP VMA is about to get the page
|
||||||
* tables copied during copy_page_range(). On success, stores the pfn to be
|
* tables copied during copy_page_range(). Will store the pfn to be
|
||||||
* passed to untrack_pfn_copy().
|
* passed to untrack_pfn_copy() only if there is something to be untracked.
|
||||||
|
* Callers should initialize the pfn to 0.
|
||||||
*/
|
*/
|
||||||
static inline int track_pfn_copy(struct vm_area_struct *dst_vma,
|
static inline int track_pfn_copy(struct vm_area_struct *dst_vma,
|
||||||
struct vm_area_struct *src_vma, unsigned long *pfn)
|
struct vm_area_struct *src_vma, unsigned long *pfn)
|
||||||
|
|
@ -1522,7 +1523,9 @@ static inline int track_pfn_copy(struct vm_area_struct *dst_vma,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* untrack_pfn_copy is called when a VM_PFNMAP VMA failed to copy during
|
* untrack_pfn_copy is called when a VM_PFNMAP VMA failed to copy during
|
||||||
* copy_page_range(), but after track_pfn_copy() was already called.
|
* copy_page_range(), but after track_pfn_copy() was already called. Can
|
||||||
|
* be called even if track_pfn_copy() did not actually track anything:
|
||||||
|
* handled internally.
|
||||||
*/
|
*/
|
||||||
static inline void untrack_pfn_copy(struct vm_area_struct *dst_vma,
|
static inline void untrack_pfn_copy(struct vm_area_struct *dst_vma,
|
||||||
unsigned long pfn)
|
unsigned long pfn)
|
||||||
|
|
|
||||||
|
|
@ -422,11 +422,20 @@ static int vm_module_tags_populate(void)
|
||||||
unsigned long old_shadow_end = ALIGN(phys_end, MODULE_ALIGN);
|
unsigned long old_shadow_end = ALIGN(phys_end, MODULE_ALIGN);
|
||||||
unsigned long new_shadow_end = ALIGN(new_end, MODULE_ALIGN);
|
unsigned long new_shadow_end = ALIGN(new_end, MODULE_ALIGN);
|
||||||
unsigned long more_pages;
|
unsigned long more_pages;
|
||||||
unsigned long nr;
|
unsigned long nr = 0;
|
||||||
|
|
||||||
more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT;
|
more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT;
|
||||||
nr = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN,
|
while (nr < more_pages) {
|
||||||
NUMA_NO_NODE, more_pages, next_page);
|
unsigned long allocated;
|
||||||
|
|
||||||
|
allocated = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN,
|
||||||
|
NUMA_NO_NODE, more_pages - nr, next_page + nr);
|
||||||
|
|
||||||
|
if (!allocated)
|
||||||
|
break;
|
||||||
|
nr += allocated;
|
||||||
|
}
|
||||||
|
|
||||||
if (nr < more_pages ||
|
if (nr < more_pages ||
|
||||||
vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL,
|
vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL,
|
||||||
next_page, PAGE_SHIFT) < 0) {
|
next_page, PAGE_SHIFT) < 0) {
|
||||||
|
|
|
||||||
|
|
@ -518,4 +518,5 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(asn1_ber_decoder);
|
EXPORT_SYMBOL_GPL(asn1_ber_decoder);
|
||||||
|
|
||||||
|
MODULE_DESCRIPTION("Decoder for ASN.1 BER/DER/CER encoded bytestream");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
|
||||||
|
|
@ -1191,7 +1191,7 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
p = *pages;
|
p = *pages;
|
||||||
for (int k = 0; k < n; k++) {
|
for (int k = 0; k < n; k++) {
|
||||||
struct folio *folio = page_folio(page);
|
struct folio *folio = page_folio(page + k);
|
||||||
p[k] = page + k;
|
p[k] = page + k;
|
||||||
if (!folio_test_slab(folio))
|
if (!folio_test_slab(folio))
|
||||||
folio_get(folio);
|
folio_get(folio);
|
||||||
|
|
|
||||||
|
|
@ -325,4 +325,5 @@ static struct kunit_suite test_suite = {
|
||||||
};
|
};
|
||||||
kunit_test_suite(test_suite);
|
kunit_test_suite(test_suite);
|
||||||
|
|
||||||
|
MODULE_DESCRIPTION("Kunit tests for slub allocator");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
|
||||||
|
|
@ -165,4 +165,5 @@ ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ucs2_as_utf8);
|
EXPORT_SYMBOL(ucs2_as_utf8);
|
||||||
|
|
||||||
|
MODULE_DESCRIPTION("UCS2 string handling");
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
|
|
|
||||||
|
|
@ -18,4 +18,5 @@ EXPORT_SYMBOL(zlib_inflateEnd);
|
||||||
EXPORT_SYMBOL(zlib_inflateReset);
|
EXPORT_SYMBOL(zlib_inflateReset);
|
||||||
EXPORT_SYMBOL(zlib_inflateIncomp);
|
EXPORT_SYMBOL(zlib_inflateIncomp);
|
||||||
EXPORT_SYMBOL(zlib_inflate_blob);
|
EXPORT_SYMBOL(zlib_inflate_blob);
|
||||||
|
MODULE_DESCRIPTION("Data decompression using the deflation algorithm");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
|
||||||
19
mm/cma.c
19
mm/cma.c
|
|
@ -35,7 +35,7 @@
|
||||||
struct cma cma_areas[MAX_CMA_AREAS];
|
struct cma cma_areas[MAX_CMA_AREAS];
|
||||||
unsigned int cma_area_count;
|
unsigned int cma_area_count;
|
||||||
|
|
||||||
static int __init __cma_declare_contiguous_nid(phys_addr_t base,
|
static int __init __cma_declare_contiguous_nid(phys_addr_t *basep,
|
||||||
phys_addr_t size, phys_addr_t limit,
|
phys_addr_t size, phys_addr_t limit,
|
||||||
phys_addr_t alignment, unsigned int order_per_bit,
|
phys_addr_t alignment, unsigned int order_per_bit,
|
||||||
bool fixed, const char *name, struct cma **res_cma,
|
bool fixed, const char *name, struct cma **res_cma,
|
||||||
|
|
@ -370,7 +370,7 @@ int __init cma_declare_contiguous_multi(phys_addr_t total_size,
|
||||||
phys_addr_t align, unsigned int order_per_bit,
|
phys_addr_t align, unsigned int order_per_bit,
|
||||||
const char *name, struct cma **res_cma, int nid)
|
const char *name, struct cma **res_cma, int nid)
|
||||||
{
|
{
|
||||||
phys_addr_t start, end;
|
phys_addr_t start = 0, end;
|
||||||
phys_addr_t size, sizesum, sizeleft;
|
phys_addr_t size, sizesum, sizeleft;
|
||||||
struct cma_init_memrange *mrp, *mlp, *failed;
|
struct cma_init_memrange *mrp, *mlp, *failed;
|
||||||
struct cma_memrange *cmrp;
|
struct cma_memrange *cmrp;
|
||||||
|
|
@ -384,7 +384,7 @@ int __init cma_declare_contiguous_multi(phys_addr_t total_size,
|
||||||
/*
|
/*
|
||||||
* First, try it the normal way, producing just one range.
|
* First, try it the normal way, producing just one range.
|
||||||
*/
|
*/
|
||||||
ret = __cma_declare_contiguous_nid(0, total_size, 0, align,
|
ret = __cma_declare_contiguous_nid(&start, total_size, 0, align,
|
||||||
order_per_bit, false, name, res_cma, nid);
|
order_per_bit, false, name, res_cma, nid);
|
||||||
if (ret != -ENOMEM)
|
if (ret != -ENOMEM)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
@ -580,7 +580,7 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = __cma_declare_contiguous_nid(base, size, limit, alignment,
|
ret = __cma_declare_contiguous_nid(&base, size, limit, alignment,
|
||||||
order_per_bit, fixed, name, res_cma, nid);
|
order_per_bit, fixed, name, res_cma, nid);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
pr_err("Failed to reserve %ld MiB\n",
|
pr_err("Failed to reserve %ld MiB\n",
|
||||||
|
|
@ -592,14 +592,14 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init __cma_declare_contiguous_nid(phys_addr_t base,
|
static int __init __cma_declare_contiguous_nid(phys_addr_t *basep,
|
||||||
phys_addr_t size, phys_addr_t limit,
|
phys_addr_t size, phys_addr_t limit,
|
||||||
phys_addr_t alignment, unsigned int order_per_bit,
|
phys_addr_t alignment, unsigned int order_per_bit,
|
||||||
bool fixed, const char *name, struct cma **res_cma,
|
bool fixed, const char *name, struct cma **res_cma,
|
||||||
int nid)
|
int nid)
|
||||||
{
|
{
|
||||||
phys_addr_t memblock_end = memblock_end_of_DRAM();
|
phys_addr_t memblock_end = memblock_end_of_DRAM();
|
||||||
phys_addr_t highmem_start;
|
phys_addr_t highmem_start, base = *basep;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -722,12 +722,15 @@ static int __init __cma_declare_contiguous_nid(phys_addr_t base,
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
|
ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
|
||||||
if (ret)
|
if (ret) {
|
||||||
memblock_phys_free(base, size);
|
memblock_phys_free(base, size);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
(*res_cma)->nid = nid;
|
(*res_cma)->nid = nid;
|
||||||
|
*basep = base;
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cma_debug_show_areas(struct cma *cma)
|
static void cma_debug_show_areas(struct cma *cma)
|
||||||
|
|
|
||||||
|
|
@ -981,13 +981,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (PageHuge(page)) {
|
if (PageHuge(page)) {
|
||||||
|
const unsigned int order = compound_order(page);
|
||||||
/*
|
/*
|
||||||
* skip hugetlbfs if we are not compacting for pages
|
* skip hugetlbfs if we are not compacting for pages
|
||||||
* bigger than its order. THPs and other compound pages
|
* bigger than its order. THPs and other compound pages
|
||||||
* are handled below.
|
* are handled below.
|
||||||
*/
|
*/
|
||||||
if (!cc->alloc_contig) {
|
if (!cc->alloc_contig) {
|
||||||
const unsigned int order = compound_order(page);
|
|
||||||
|
|
||||||
if (order <= MAX_PAGE_ORDER) {
|
if (order <= MAX_PAGE_ORDER) {
|
||||||
low_pfn += (1UL << order) - 1;
|
low_pfn += (1UL << order) - 1;
|
||||||
|
|
@ -1011,8 +1011,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
||||||
/* Do not report -EBUSY down the chain */
|
/* Do not report -EBUSY down the chain */
|
||||||
if (ret == -EBUSY)
|
if (ret == -EBUSY)
|
||||||
ret = 0;
|
ret = 0;
|
||||||
low_pfn += compound_nr(page) - 1;
|
low_pfn += (1UL << order) - 1;
|
||||||
nr_scanned += compound_nr(page) - 1;
|
nr_scanned += (1UL << order) - 1;
|
||||||
goto isolate_fail;
|
goto isolate_fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2244,6 +2244,7 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
|
||||||
*start = folio->index + nr;
|
*start = folio->index + nr;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
xas_advance(&xas, folio_next_index(folio) - 1);
|
||||||
continue;
|
continue;
|
||||||
put_folio:
|
put_folio:
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
|
|
|
||||||
23
mm/hugetlb.c
23
mm/hugetlb.c
|
|
@ -2271,7 +2271,7 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
|
||||||
* as surplus_pages, otherwise it might confuse
|
* as surplus_pages, otherwise it might confuse
|
||||||
* persistent_huge_pages() momentarily.
|
* persistent_huge_pages() momentarily.
|
||||||
*/
|
*/
|
||||||
__prep_account_new_huge_page(h, nid);
|
__prep_account_new_huge_page(h, folio_nid(folio));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We could have raced with the pool size change.
|
* We could have raced with the pool size change.
|
||||||
|
|
@ -3825,6 +3825,7 @@ static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
|
||||||
static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
|
static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
|
||||||
nodemask_t *nodes_allowed)
|
nodemask_t *nodes_allowed)
|
||||||
{
|
{
|
||||||
|
unsigned long persistent_free_count;
|
||||||
unsigned long min_count;
|
unsigned long min_count;
|
||||||
unsigned long allocated;
|
unsigned long allocated;
|
||||||
struct folio *folio;
|
struct folio *folio;
|
||||||
|
|
@ -3959,8 +3960,24 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
|
||||||
* though, we'll note that we're not allowed to exceed surplus
|
* though, we'll note that we're not allowed to exceed surplus
|
||||||
* and won't grow the pool anywhere else. Not until one of the
|
* and won't grow the pool anywhere else. Not until one of the
|
||||||
* sysctls are changed, or the surplus pages go out of use.
|
* sysctls are changed, or the surplus pages go out of use.
|
||||||
|
*
|
||||||
|
* min_count is the expected number of persistent pages, we
|
||||||
|
* shouldn't calculate min_count by using
|
||||||
|
* resv_huge_pages + persistent_huge_pages() - free_huge_pages,
|
||||||
|
* because there may exist free surplus huge pages, and this will
|
||||||
|
* lead to subtracting twice. Free surplus huge pages come from HVO
|
||||||
|
* failing to restore vmemmap, see comments in the callers of
|
||||||
|
* hugetlb_vmemmap_restore_folio(). Thus, we should calculate
|
||||||
|
* persistent free count first.
|
||||||
*/
|
*/
|
||||||
min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
|
persistent_free_count = h->free_huge_pages;
|
||||||
|
if (h->free_huge_pages > persistent_huge_pages(h)) {
|
||||||
|
if (h->free_huge_pages > h->surplus_huge_pages)
|
||||||
|
persistent_free_count -= h->surplus_huge_pages;
|
||||||
|
else
|
||||||
|
persistent_free_count = 0;
|
||||||
|
}
|
||||||
|
min_count = h->resv_huge_pages + persistent_huge_pages(h) - persistent_free_count;
|
||||||
min_count = max(count, min_count);
|
min_count = max(count, min_count);
|
||||||
try_to_free_low(h, min_count, nodes_allowed);
|
try_to_free_low(h, min_count, nodes_allowed);
|
||||||
|
|
||||||
|
|
@ -4630,7 +4647,7 @@ static void __init hugetlb_sysfs_init(void)
|
||||||
err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
|
err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
|
||||||
hstate_kobjs, &hstate_attr_group);
|
hstate_kobjs, &hstate_attr_group);
|
||||||
if (err)
|
if (err)
|
||||||
pr_err("HugeTLB: Unable to add hstate %s", h->name);
|
pr_err("HugeTLB: Unable to add hstate %s\n", h->name);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
|
|
|
||||||
|
|
@ -2127,4 +2127,5 @@ static struct kunit_suite kasan_kunit_test_suite = {
|
||||||
|
|
||||||
kunit_test_suite(kasan_kunit_test_suite);
|
kunit_test_suite(kasan_kunit_test_suite);
|
||||||
|
|
||||||
|
MODULE_DESCRIPTION("KUnit tests for checking KASAN bug-detection capabilities");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
|
||||||
|
|
@ -1759,7 +1759,7 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
|
||||||
}
|
}
|
||||||
|
|
||||||
struct memcg_stock_pcp {
|
struct memcg_stock_pcp {
|
||||||
localtry_lock_t stock_lock;
|
local_trylock_t stock_lock;
|
||||||
struct mem_cgroup *cached; /* this never be root cgroup */
|
struct mem_cgroup *cached; /* this never be root cgroup */
|
||||||
unsigned int nr_pages;
|
unsigned int nr_pages;
|
||||||
|
|
||||||
|
|
@ -1774,7 +1774,7 @@ struct memcg_stock_pcp {
|
||||||
#define FLUSHING_CACHED_CHARGE 0
|
#define FLUSHING_CACHED_CHARGE 0
|
||||||
};
|
};
|
||||||
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
|
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
|
||||||
.stock_lock = INIT_LOCALTRY_LOCK(stock_lock),
|
.stock_lock = INIT_LOCAL_TRYLOCK(stock_lock),
|
||||||
};
|
};
|
||||||
static DEFINE_MUTEX(percpu_charge_mutex);
|
static DEFINE_MUTEX(percpu_charge_mutex);
|
||||||
|
|
||||||
|
|
@ -1805,11 +1805,10 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages,
|
||||||
if (nr_pages > MEMCG_CHARGE_BATCH)
|
if (nr_pages > MEMCG_CHARGE_BATCH)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (!localtry_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
|
if (gfpflags_allow_spinning(gfp_mask))
|
||||||
if (!gfpflags_allow_spinning(gfp_mask))
|
local_lock_irqsave(&memcg_stock.stock_lock, flags);
|
||||||
|
else if (!local_trylock_irqsave(&memcg_stock.stock_lock, flags))
|
||||||
return ret;
|
return ret;
|
||||||
localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
stock = this_cpu_ptr(&memcg_stock);
|
stock = this_cpu_ptr(&memcg_stock);
|
||||||
stock_pages = READ_ONCE(stock->nr_pages);
|
stock_pages = READ_ONCE(stock->nr_pages);
|
||||||
|
|
@ -1818,7 +1817,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages,
|
||||||
ret = true;
|
ret = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
|
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
@ -1857,14 +1856,14 @@ static void drain_local_stock(struct work_struct *dummy)
|
||||||
* drain_stock races is that we always operate on local CPU stock
|
* drain_stock races is that we always operate on local CPU stock
|
||||||
* here with IRQ disabled
|
* here with IRQ disabled
|
||||||
*/
|
*/
|
||||||
localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
|
local_lock_irqsave(&memcg_stock.stock_lock, flags);
|
||||||
|
|
||||||
stock = this_cpu_ptr(&memcg_stock);
|
stock = this_cpu_ptr(&memcg_stock);
|
||||||
old = drain_obj_stock(stock);
|
old = drain_obj_stock(stock);
|
||||||
drain_stock(stock);
|
drain_stock(stock);
|
||||||
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
|
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
|
||||||
|
|
||||||
localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
|
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
|
||||||
obj_cgroup_put(old);
|
obj_cgroup_put(old);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1894,7 +1893,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (!localtry_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
|
if (!local_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
|
||||||
/*
|
/*
|
||||||
* In case of unlikely failure to lock percpu stock_lock
|
* In case of unlikely failure to lock percpu stock_lock
|
||||||
* uncharge memcg directly.
|
* uncharge memcg directly.
|
||||||
|
|
@ -1907,7 +1906,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
__refill_stock(memcg, nr_pages);
|
__refill_stock(memcg, nr_pages);
|
||||||
localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
|
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -1964,9 +1963,9 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
|
||||||
stock = &per_cpu(memcg_stock, cpu);
|
stock = &per_cpu(memcg_stock, cpu);
|
||||||
|
|
||||||
/* drain_obj_stock requires stock_lock */
|
/* drain_obj_stock requires stock_lock */
|
||||||
localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
|
local_lock_irqsave(&memcg_stock.stock_lock, flags);
|
||||||
old = drain_obj_stock(stock);
|
old = drain_obj_stock(stock);
|
||||||
localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
|
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
|
||||||
|
|
||||||
drain_stock(stock);
|
drain_stock(stock);
|
||||||
obj_cgroup_put(old);
|
obj_cgroup_put(old);
|
||||||
|
|
@ -2787,7 +2786,7 @@ static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int *bytes;
|
int *bytes;
|
||||||
|
|
||||||
localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
|
local_lock_irqsave(&memcg_stock.stock_lock, flags);
|
||||||
stock = this_cpu_ptr(&memcg_stock);
|
stock = this_cpu_ptr(&memcg_stock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -2836,7 +2835,7 @@ static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
|
||||||
if (nr)
|
if (nr)
|
||||||
__mod_objcg_mlstate(objcg, pgdat, idx, nr);
|
__mod_objcg_mlstate(objcg, pgdat, idx, nr);
|
||||||
|
|
||||||
localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
|
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
|
||||||
obj_cgroup_put(old);
|
obj_cgroup_put(old);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2846,7 +2845,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
bool ret = false;
|
bool ret = false;
|
||||||
|
|
||||||
localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
|
local_lock_irqsave(&memcg_stock.stock_lock, flags);
|
||||||
|
|
||||||
stock = this_cpu_ptr(&memcg_stock);
|
stock = this_cpu_ptr(&memcg_stock);
|
||||||
if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
|
if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
|
||||||
|
|
@ -2854,7 +2853,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
|
||||||
ret = true;
|
ret = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
|
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
@ -2946,7 +2945,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned int nr_pages = 0;
|
unsigned int nr_pages = 0;
|
||||||
|
|
||||||
localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
|
local_lock_irqsave(&memcg_stock.stock_lock, flags);
|
||||||
|
|
||||||
stock = this_cpu_ptr(&memcg_stock);
|
stock = this_cpu_ptr(&memcg_stock);
|
||||||
if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
|
if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
|
||||||
|
|
@ -2960,7 +2959,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
|
||||||
stock->nr_bytes &= (PAGE_SIZE - 1);
|
stock->nr_bytes &= (PAGE_SIZE - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
|
local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
|
||||||
obj_cgroup_put(old);
|
obj_cgroup_put(old);
|
||||||
|
|
||||||
if (nr_pages)
|
if (nr_pages)
|
||||||
|
|
|
||||||
|
|
@ -1361,7 +1361,7 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
|
||||||
struct mm_struct *dst_mm = dst_vma->vm_mm;
|
struct mm_struct *dst_mm = dst_vma->vm_mm;
|
||||||
struct mm_struct *src_mm = src_vma->vm_mm;
|
struct mm_struct *src_mm = src_vma->vm_mm;
|
||||||
struct mmu_notifier_range range;
|
struct mmu_notifier_range range;
|
||||||
unsigned long next, pfn;
|
unsigned long next, pfn = 0;
|
||||||
bool is_cow;
|
bool is_cow;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
|
@ -2938,11 +2938,11 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
||||||
if (fn) {
|
if (fn) {
|
||||||
do {
|
do {
|
||||||
if (create || !pte_none(ptep_get(pte))) {
|
if (create || !pte_none(ptep_get(pte))) {
|
||||||
err = fn(pte++, addr, data);
|
err = fn(pte, addr, data);
|
||||||
if (err)
|
if (err)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} while (addr += PAGE_SIZE, addr != end);
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||||
}
|
}
|
||||||
*mask |= PGTBL_PTE_MODIFIED;
|
*mask |= PGTBL_PTE_MODIFIED;
|
||||||
|
|
||||||
|
|
|
||||||
124
mm/page_alloc.c
124
mm/page_alloc.c
|
|
@ -1400,11 +1400,12 @@ static void free_one_page(struct zone *zone, struct page *page,
|
||||||
struct llist_head *llhead;
|
struct llist_head *llhead;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (!spin_trylock_irqsave(&zone->lock, flags)) {
|
|
||||||
if (unlikely(fpi_flags & FPI_TRYLOCK)) {
|
if (unlikely(fpi_flags & FPI_TRYLOCK)) {
|
||||||
|
if (!spin_trylock_irqsave(&zone->lock, flags)) {
|
||||||
add_page_to_zone_llist(zone, page, order);
|
add_page_to_zone_llist(zone, page, order);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
spin_lock_irqsave(&zone->lock, flags);
|
spin_lock_irqsave(&zone->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2182,23 +2183,15 @@ try_to_claim_block(struct zone *zone, struct page *page,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try finding a free buddy page on the fallback list.
|
* Try to allocate from some fallback migratetype by claiming the entire block,
|
||||||
*
|
* i.e. converting it to the allocation's start migratetype.
|
||||||
* This will attempt to claim a whole pageblock for the requested type
|
|
||||||
* to ensure grouping of such requests in the future.
|
|
||||||
*
|
|
||||||
* If a whole block cannot be claimed, steal an individual page, regressing to
|
|
||||||
* __rmqueue_smallest() logic to at least break up as little contiguity as
|
|
||||||
* possible.
|
|
||||||
*
|
*
|
||||||
* The use of signed ints for order and current_order is a deliberate
|
* The use of signed ints for order and current_order is a deliberate
|
||||||
* deviation from the rest of this file, to make the for loop
|
* deviation from the rest of this file, to make the for loop
|
||||||
* condition simpler.
|
* condition simpler.
|
||||||
*
|
|
||||||
* Return the stolen page, or NULL if none can be found.
|
|
||||||
*/
|
*/
|
||||||
static __always_inline struct page *
|
static __always_inline struct page *
|
||||||
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
|
__rmqueue_claim(struct zone *zone, int order, int start_migratetype,
|
||||||
unsigned int alloc_flags)
|
unsigned int alloc_flags)
|
||||||
{
|
{
|
||||||
struct free_area *area;
|
struct free_area *area;
|
||||||
|
|
@ -2236,14 +2229,29 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
|
||||||
page = try_to_claim_block(zone, page, current_order, order,
|
page = try_to_claim_block(zone, page, current_order, order,
|
||||||
start_migratetype, fallback_mt,
|
start_migratetype, fallback_mt,
|
||||||
alloc_flags);
|
alloc_flags);
|
||||||
if (page)
|
if (page) {
|
||||||
goto got_one;
|
trace_mm_page_alloc_extfrag(page, order, current_order,
|
||||||
|
start_migratetype, fallback_mt);
|
||||||
|
return page;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (alloc_flags & ALLOC_NOFRAGMENT)
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Try to steal a single page from some fallback migratetype. Leave the rest of
|
||||||
|
* the block as its current migratetype, potentially causing fragmentation.
|
||||||
|
*/
|
||||||
|
static __always_inline struct page *
|
||||||
|
__rmqueue_steal(struct zone *zone, int order, int start_migratetype)
|
||||||
|
{
|
||||||
|
struct free_area *area;
|
||||||
|
int current_order;
|
||||||
|
struct page *page;
|
||||||
|
int fallback_mt;
|
||||||
|
bool claim_block;
|
||||||
|
|
||||||
/* No luck claiming pageblock. Find the smallest fallback page */
|
|
||||||
for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
|
for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
|
||||||
area = &(zone->free_area[current_order]);
|
area = &(zone->free_area[current_order]);
|
||||||
fallback_mt = find_suitable_fallback(area, current_order,
|
fallback_mt = find_suitable_fallback(area, current_order,
|
||||||
|
|
@ -2253,25 +2261,28 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
|
||||||
|
|
||||||
page = get_page_from_free_area(area, fallback_mt);
|
page = get_page_from_free_area(area, fallback_mt);
|
||||||
page_del_and_expand(zone, page, order, current_order, fallback_mt);
|
page_del_and_expand(zone, page, order, current_order, fallback_mt);
|
||||||
goto got_one;
|
trace_mm_page_alloc_extfrag(page, order, current_order,
|
||||||
|
start_migratetype, fallback_mt);
|
||||||
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
got_one:
|
|
||||||
trace_mm_page_alloc_extfrag(page, order, current_order,
|
|
||||||
start_migratetype, fallback_mt);
|
|
||||||
|
|
||||||
return page;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enum rmqueue_mode {
|
||||||
|
RMQUEUE_NORMAL,
|
||||||
|
RMQUEUE_CMA,
|
||||||
|
RMQUEUE_CLAIM,
|
||||||
|
RMQUEUE_STEAL,
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do the hard work of removing an element from the buddy allocator.
|
* Do the hard work of removing an element from the buddy allocator.
|
||||||
* Call me with the zone->lock already held.
|
* Call me with the zone->lock already held.
|
||||||
*/
|
*/
|
||||||
static __always_inline struct page *
|
static __always_inline struct page *
|
||||||
__rmqueue(struct zone *zone, unsigned int order, int migratetype,
|
__rmqueue(struct zone *zone, unsigned int order, int migratetype,
|
||||||
unsigned int alloc_flags)
|
unsigned int alloc_flags, enum rmqueue_mode *mode)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
|
|
@ -2290,16 +2301,48 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* First try the freelists of the requested migratetype, then try
|
||||||
|
* fallbacks modes with increasing levels of fragmentation risk.
|
||||||
|
*
|
||||||
|
* The fallback logic is expensive and rmqueue_bulk() calls in
|
||||||
|
* a loop with the zone->lock held, meaning the freelists are
|
||||||
|
* not subject to any outside changes. Remember in *mode where
|
||||||
|
* we found pay dirt, to save us the search on the next call.
|
||||||
|
*/
|
||||||
|
switch (*mode) {
|
||||||
|
case RMQUEUE_NORMAL:
|
||||||
page = __rmqueue_smallest(zone, order, migratetype);
|
page = __rmqueue_smallest(zone, order, migratetype);
|
||||||
if (unlikely(!page)) {
|
if (page)
|
||||||
if (alloc_flags & ALLOC_CMA)
|
|
||||||
page = __rmqueue_cma_fallback(zone, order);
|
|
||||||
|
|
||||||
if (!page)
|
|
||||||
page = __rmqueue_fallback(zone, order, migratetype,
|
|
||||||
alloc_flags);
|
|
||||||
}
|
|
||||||
return page;
|
return page;
|
||||||
|
fallthrough;
|
||||||
|
case RMQUEUE_CMA:
|
||||||
|
if (alloc_flags & ALLOC_CMA) {
|
||||||
|
page = __rmqueue_cma_fallback(zone, order);
|
||||||
|
if (page) {
|
||||||
|
*mode = RMQUEUE_CMA;
|
||||||
|
return page;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fallthrough;
|
||||||
|
case RMQUEUE_CLAIM:
|
||||||
|
page = __rmqueue_claim(zone, order, migratetype, alloc_flags);
|
||||||
|
if (page) {
|
||||||
|
/* Replenished preferred freelist, back to normal mode. */
|
||||||
|
*mode = RMQUEUE_NORMAL;
|
||||||
|
return page;
|
||||||
|
}
|
||||||
|
fallthrough;
|
||||||
|
case RMQUEUE_STEAL:
|
||||||
|
if (!(alloc_flags & ALLOC_NOFRAGMENT)) {
|
||||||
|
page = __rmqueue_steal(zone, order, migratetype);
|
||||||
|
if (page) {
|
||||||
|
*mode = RMQUEUE_STEAL;
|
||||||
|
return page;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -2311,17 +2354,19 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
|
||||||
unsigned long count, struct list_head *list,
|
unsigned long count, struct list_head *list,
|
||||||
int migratetype, unsigned int alloc_flags)
|
int migratetype, unsigned int alloc_flags)
|
||||||
{
|
{
|
||||||
|
enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!spin_trylock_irqsave(&zone->lock, flags)) {
|
if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
|
||||||
if (unlikely(alloc_flags & ALLOC_TRYLOCK))
|
if (!spin_trylock_irqsave(&zone->lock, flags))
|
||||||
return 0;
|
return 0;
|
||||||
|
} else {
|
||||||
spin_lock_irqsave(&zone->lock, flags);
|
spin_lock_irqsave(&zone->lock, flags);
|
||||||
}
|
}
|
||||||
for (i = 0; i < count; ++i) {
|
for (i = 0; i < count; ++i) {
|
||||||
struct page *page = __rmqueue(zone, order, migratetype,
|
struct page *page = __rmqueue(zone, order, migratetype,
|
||||||
alloc_flags);
|
alloc_flags, &rmqm);
|
||||||
if (unlikely(page == NULL))
|
if (unlikely(page == NULL))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
@ -2937,15 +2982,18 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
|
||||||
|
|
||||||
do {
|
do {
|
||||||
page = NULL;
|
page = NULL;
|
||||||
if (!spin_trylock_irqsave(&zone->lock, flags)) {
|
if (unlikely(alloc_flags & ALLOC_TRYLOCK)) {
|
||||||
if (unlikely(alloc_flags & ALLOC_TRYLOCK))
|
if (!spin_trylock_irqsave(&zone->lock, flags))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
} else {
|
||||||
spin_lock_irqsave(&zone->lock, flags);
|
spin_lock_irqsave(&zone->lock, flags);
|
||||||
}
|
}
|
||||||
if (alloc_flags & ALLOC_HIGHATOMIC)
|
if (alloc_flags & ALLOC_HIGHATOMIC)
|
||||||
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
|
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
|
||||||
if (!page) {
|
if (!page) {
|
||||||
page = __rmqueue(zone, order, migratetype, alloc_flags);
|
enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
|
||||||
|
|
||||||
|
page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the allocation fails, allow OOM handling and
|
* If the allocation fails, allow OOM handling and
|
||||||
|
|
|
||||||
|
|
@ -1902,6 +1902,14 @@ struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi,
|
||||||
unsigned long end)
|
unsigned long end)
|
||||||
{
|
{
|
||||||
struct vm_area_struct *ret;
|
struct vm_area_struct *ret;
|
||||||
|
bool give_up_on_oom = false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we are modifying only and not splitting, just give up on the merge
|
||||||
|
* if OOM prevents us from merging successfully.
|
||||||
|
*/
|
||||||
|
if (start == vma->vm_start && end == vma->vm_end)
|
||||||
|
give_up_on_oom = true;
|
||||||
|
|
||||||
/* Reset ptes for the whole vma range if wr-protected */
|
/* Reset ptes for the whole vma range if wr-protected */
|
||||||
if (userfaultfd_wp(vma))
|
if (userfaultfd_wp(vma))
|
||||||
|
|
@ -1909,7 +1917,7 @@ struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi,
|
||||||
|
|
||||||
ret = vma_modify_flags_uffd(vmi, prev, vma, start, end,
|
ret = vma_modify_flags_uffd(vmi, prev, vma, start, end,
|
||||||
vma->vm_flags & ~__VM_UFFD_FLAGS,
|
vma->vm_flags & ~__VM_UFFD_FLAGS,
|
||||||
NULL_VM_UFFD_CTX);
|
NULL_VM_UFFD_CTX, give_up_on_oom);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In the vma_merge() successful mprotect-like case 8:
|
* In the vma_merge() successful mprotect-like case 8:
|
||||||
|
|
@ -1960,7 +1968,8 @@ int userfaultfd_register_range(struct userfaultfd_ctx *ctx,
|
||||||
new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
|
new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
|
||||||
vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end,
|
vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end,
|
||||||
new_flags,
|
new_flags,
|
||||||
(struct vm_userfaultfd_ctx){ctx});
|
(struct vm_userfaultfd_ctx){ctx},
|
||||||
|
/* give_up_on_oom = */false);
|
||||||
if (IS_ERR(vma))
|
if (IS_ERR(vma))
|
||||||
return PTR_ERR(vma);
|
return PTR_ERR(vma);
|
||||||
|
|
||||||
|
|
|
||||||
47
mm/vma.c
47
mm/vma.c
|
|
@ -666,6 +666,9 @@ static void vmg_adjust_set_range(struct vma_merge_struct *vmg)
|
||||||
/*
|
/*
|
||||||
* Actually perform the VMA merge operation.
|
* Actually perform the VMA merge operation.
|
||||||
*
|
*
|
||||||
|
* IMPORTANT: We guarantee that, should vmg->give_up_on_oom is set, to not
|
||||||
|
* modify any VMAs or cause inconsistent state should an OOM condition arise.
|
||||||
|
*
|
||||||
* Returns 0 on success, or an error value on failure.
|
* Returns 0 on success, or an error value on failure.
|
||||||
*/
|
*/
|
||||||
static int commit_merge(struct vma_merge_struct *vmg)
|
static int commit_merge(struct vma_merge_struct *vmg)
|
||||||
|
|
@ -685,6 +688,12 @@ static int commit_merge(struct vma_merge_struct *vmg)
|
||||||
|
|
||||||
init_multi_vma_prep(&vp, vma, vmg);
|
init_multi_vma_prep(&vp, vma, vmg);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If vmg->give_up_on_oom is set, we're safe, because we don't actually
|
||||||
|
* manipulate any VMAs until we succeed at preallocation.
|
||||||
|
*
|
||||||
|
* Past this point, we will not return an error.
|
||||||
|
*/
|
||||||
if (vma_iter_prealloc(vmg->vmi, vma))
|
if (vma_iter_prealloc(vmg->vmi, vma))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
@ -915,6 +924,12 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
|
||||||
if (anon_dup)
|
if (anon_dup)
|
||||||
unlink_anon_vmas(anon_dup);
|
unlink_anon_vmas(anon_dup);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We've cleaned up any cloned anon_vma's, no VMAs have been
|
||||||
|
* modified, no harm no foul if the user requests that we not
|
||||||
|
* report this and just give up, leaving the VMAs unmerged.
|
||||||
|
*/
|
||||||
|
if (!vmg->give_up_on_oom)
|
||||||
vmg->state = VMA_MERGE_ERROR_NOMEM;
|
vmg->state = VMA_MERGE_ERROR_NOMEM;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
@ -926,6 +941,14 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
|
||||||
abort:
|
abort:
|
||||||
vma_iter_set(vmg->vmi, start);
|
vma_iter_set(vmg->vmi, start);
|
||||||
vma_iter_load(vmg->vmi);
|
vma_iter_load(vmg->vmi);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This means we have failed to clone anon_vma's correctly, but no
|
||||||
|
* actual changes to VMAs have occurred, so no harm no foul - if the
|
||||||
|
* user doesn't want this reported and instead just wants to give up on
|
||||||
|
* the merge, allow it.
|
||||||
|
*/
|
||||||
|
if (!vmg->give_up_on_oom)
|
||||||
vmg->state = VMA_MERGE_ERROR_NOMEM;
|
vmg->state = VMA_MERGE_ERROR_NOMEM;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
@ -1068,6 +1091,10 @@ int vma_expand(struct vma_merge_struct *vmg)
|
||||||
/* This should already have been checked by this point. */
|
/* This should already have been checked by this point. */
|
||||||
VM_WARN_ON_VMG(!can_merge_remove_vma(next), vmg);
|
VM_WARN_ON_VMG(!can_merge_remove_vma(next), vmg);
|
||||||
vma_start_write(next);
|
vma_start_write(next);
|
||||||
|
/*
|
||||||
|
* In this case we don't report OOM, so vmg->give_up_on_mm is
|
||||||
|
* safe.
|
||||||
|
*/
|
||||||
ret = dup_anon_vma(middle, next, &anon_dup);
|
ret = dup_anon_vma(middle, next, &anon_dup);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
@ -1090,9 +1117,15 @@ int vma_expand(struct vma_merge_struct *vmg)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
nomem:
|
nomem:
|
||||||
vmg->state = VMA_MERGE_ERROR_NOMEM;
|
|
||||||
if (anon_dup)
|
if (anon_dup)
|
||||||
unlink_anon_vmas(anon_dup);
|
unlink_anon_vmas(anon_dup);
|
||||||
|
/*
|
||||||
|
* If the user requests that we just give upon OOM, we are safe to do so
|
||||||
|
* here, as commit merge provides this contract to us. Nothing has been
|
||||||
|
* changed - no harm no foul, just don't report it.
|
||||||
|
*/
|
||||||
|
if (!vmg->give_up_on_oom)
|
||||||
|
vmg->state = VMA_MERGE_ERROR_NOMEM;
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1534,6 +1567,13 @@ static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg)
|
||||||
if (vmg_nomem(vmg))
|
if (vmg_nomem(vmg))
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Split can fail for reasons other than OOM, so if the user requests
|
||||||
|
* this it's probably a mistake.
|
||||||
|
*/
|
||||||
|
VM_WARN_ON(vmg->give_up_on_oom &&
|
||||||
|
(vma->vm_start != start || vma->vm_end != end));
|
||||||
|
|
||||||
/* Split any preceding portion of the VMA. */
|
/* Split any preceding portion of the VMA. */
|
||||||
if (vma->vm_start < start) {
|
if (vma->vm_start < start) {
|
||||||
int err = split_vma(vmg->vmi, vma, start, 1);
|
int err = split_vma(vmg->vmi, vma, start, 1);
|
||||||
|
|
@ -1602,12 +1642,15 @@ struct vm_area_struct
|
||||||
struct vm_area_struct *vma,
|
struct vm_area_struct *vma,
|
||||||
unsigned long start, unsigned long end,
|
unsigned long start, unsigned long end,
|
||||||
unsigned long new_flags,
|
unsigned long new_flags,
|
||||||
struct vm_userfaultfd_ctx new_ctx)
|
struct vm_userfaultfd_ctx new_ctx,
|
||||||
|
bool give_up_on_oom)
|
||||||
{
|
{
|
||||||
VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
|
VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
|
||||||
|
|
||||||
vmg.flags = new_flags;
|
vmg.flags = new_flags;
|
||||||
vmg.uffd_ctx = new_ctx;
|
vmg.uffd_ctx = new_ctx;
|
||||||
|
if (give_up_on_oom)
|
||||||
|
vmg.give_up_on_oom = true;
|
||||||
|
|
||||||
return vma_modify(&vmg);
|
return vma_modify(&vmg);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
9
mm/vma.h
9
mm/vma.h
|
|
@ -114,6 +114,12 @@ struct vma_merge_struct {
|
||||||
*/
|
*/
|
||||||
bool just_expand :1;
|
bool just_expand :1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If a merge is possible, but an OOM error occurs, give up and don't
|
||||||
|
* execute the merge, returning NULL.
|
||||||
|
*/
|
||||||
|
bool give_up_on_oom :1;
|
||||||
|
|
||||||
/* Internal flags set during merge process: */
|
/* Internal flags set during merge process: */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -255,7 +261,8 @@ __must_check struct vm_area_struct
|
||||||
struct vm_area_struct *vma,
|
struct vm_area_struct *vma,
|
||||||
unsigned long start, unsigned long end,
|
unsigned long start, unsigned long end,
|
||||||
unsigned long new_flags,
|
unsigned long new_flags,
|
||||||
struct vm_userfaultfd_ctx new_ctx);
|
struct vm_userfaultfd_ctx new_ctx,
|
||||||
|
bool give_up_on_oom);
|
||||||
|
|
||||||
__must_check struct vm_area_struct
|
__must_check struct vm_area_struct
|
||||||
*vma_merge_new_range(struct vma_merge_struct *vmg);
|
*vma_merge_new_range(struct vma_merge_struct *vmg);
|
||||||
|
|
|
||||||
|
|
@ -56,4 +56,5 @@ static void livepatch_callbacks_mod_exit(void)
|
||||||
|
|
||||||
module_init(livepatch_callbacks_mod_init);
|
module_init(livepatch_callbacks_mod_init);
|
||||||
module_exit(livepatch_callbacks_mod_exit);
|
module_exit(livepatch_callbacks_mod_exit);
|
||||||
|
MODULE_DESCRIPTION("Live patching demo for (un)patching callbacks, support module");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
|
||||||
|
|
@ -192,5 +192,6 @@ static void livepatch_callbacks_demo_exit(void)
|
||||||
|
|
||||||
module_init(livepatch_callbacks_demo_init);
|
module_init(livepatch_callbacks_demo_init);
|
||||||
module_exit(livepatch_callbacks_demo_exit);
|
module_exit(livepatch_callbacks_demo_exit);
|
||||||
|
MODULE_DESCRIPTION("Live patching demo for (un)patching callbacks");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_INFO(livepatch, "Y");
|
MODULE_INFO(livepatch, "Y");
|
||||||
|
|
|
||||||
|
|
@ -38,4 +38,5 @@ static void livepatch_callbacks_mod_exit(void)
|
||||||
|
|
||||||
module_init(livepatch_callbacks_mod_init);
|
module_init(livepatch_callbacks_mod_init);
|
||||||
module_exit(livepatch_callbacks_mod_exit);
|
module_exit(livepatch_callbacks_mod_exit);
|
||||||
|
MODULE_DESCRIPTION("Live patching demo for (un)patching callbacks, support module");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
|
||||||
|
|
@ -66,5 +66,6 @@ static void livepatch_exit(void)
|
||||||
|
|
||||||
module_init(livepatch_init);
|
module_init(livepatch_init);
|
||||||
module_exit(livepatch_exit);
|
module_exit(livepatch_exit);
|
||||||
|
MODULE_DESCRIPTION("Kernel Live Patching Sample Module");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_INFO(livepatch, "Y");
|
MODULE_INFO(livepatch, "Y");
|
||||||
|
|
|
||||||
|
|
@ -168,5 +168,6 @@ static void livepatch_shadow_fix1_exit(void)
|
||||||
|
|
||||||
module_init(livepatch_shadow_fix1_init);
|
module_init(livepatch_shadow_fix1_init);
|
||||||
module_exit(livepatch_shadow_fix1_exit);
|
module_exit(livepatch_shadow_fix1_exit);
|
||||||
|
MODULE_DESCRIPTION("Live patching demo for shadow variables");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_INFO(livepatch, "Y");
|
MODULE_INFO(livepatch, "Y");
|
||||||
|
|
|
||||||
|
|
@ -128,5 +128,6 @@ static void livepatch_shadow_fix2_exit(void)
|
||||||
|
|
||||||
module_init(livepatch_shadow_fix2_init);
|
module_init(livepatch_shadow_fix2_init);
|
||||||
module_exit(livepatch_shadow_fix2_exit);
|
module_exit(livepatch_shadow_fix2_exit);
|
||||||
|
MODULE_DESCRIPTION("Live patching demo for shadow variables");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_INFO(livepatch, "Y");
|
MODULE_INFO(livepatch, "Y");
|
||||||
|
|
|
||||||
|
|
@ -283,8 +283,7 @@ TEST(check_file_mmap)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Test mincore() behavior on a page backed by a tmpfs file. This test
|
* Test mincore() behavior on a page backed by a tmpfs file. This test
|
||||||
* performs the same steps as the previous one. However, we don't expect
|
* performs the same steps as the previous one.
|
||||||
* any readahead in this case.
|
|
||||||
*/
|
*/
|
||||||
TEST(check_tmpfs_mmap)
|
TEST(check_tmpfs_mmap)
|
||||||
{
|
{
|
||||||
|
|
@ -295,7 +294,6 @@ TEST(check_tmpfs_mmap)
|
||||||
int page_size;
|
int page_size;
|
||||||
int fd;
|
int fd;
|
||||||
int i;
|
int i;
|
||||||
int ra_pages = 0;
|
|
||||||
|
|
||||||
page_size = sysconf(_SC_PAGESIZE);
|
page_size = sysconf(_SC_PAGESIZE);
|
||||||
vec_size = FILE_SIZE / page_size;
|
vec_size = FILE_SIZE / page_size;
|
||||||
|
|
@ -338,8 +336,7 @@ TEST(check_tmpfs_mmap)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Touch a page in the middle of the mapping. We expect only
|
* Touch a page in the middle of the mapping.
|
||||||
* that page to be fetched into memory.
|
|
||||||
*/
|
*/
|
||||||
addr[FILE_SIZE / 2] = 1;
|
addr[FILE_SIZE / 2] = 1;
|
||||||
retval = mincore(addr, FILE_SIZE, vec);
|
retval = mincore(addr, FILE_SIZE, vec);
|
||||||
|
|
@ -348,15 +345,6 @@ TEST(check_tmpfs_mmap)
|
||||||
TH_LOG("Page not found in memory after use");
|
TH_LOG("Page not found in memory after use");
|
||||||
}
|
}
|
||||||
|
|
||||||
i = FILE_SIZE / 2 / page_size + 1;
|
|
||||||
while (i < vec_size && vec[i]) {
|
|
||||||
ra_pages++;
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
ASSERT_EQ(ra_pages, 0) {
|
|
||||||
TH_LOG("Read-ahead pages found in memory");
|
|
||||||
}
|
|
||||||
|
|
||||||
munmap(addr, FILE_SIZE);
|
munmap(addr, FILE_SIZE);
|
||||||
close(fd);
|
close(fd);
|
||||||
free(vec);
|
free(vec);
|
||||||
|
|
|
||||||
|
|
@ -29,7 +29,7 @@ fi
|
||||||
if [[ $cgroup2 ]]; then
|
if [[ $cgroup2 ]]; then
|
||||||
cgroup_path=$(mount -t cgroup2 | head -1 | awk '{print $3}')
|
cgroup_path=$(mount -t cgroup2 | head -1 | awk '{print $3}')
|
||||||
if [[ -z "$cgroup_path" ]]; then
|
if [[ -z "$cgroup_path" ]]; then
|
||||||
cgroup_path=/dev/cgroup/memory
|
cgroup_path=$(mktemp -d)
|
||||||
mount -t cgroup2 none $cgroup_path
|
mount -t cgroup2 none $cgroup_path
|
||||||
do_umount=1
|
do_umount=1
|
||||||
fi
|
fi
|
||||||
|
|
@ -37,7 +37,7 @@ if [[ $cgroup2 ]]; then
|
||||||
else
|
else
|
||||||
cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}')
|
cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}')
|
||||||
if [[ -z "$cgroup_path" ]]; then
|
if [[ -z "$cgroup_path" ]]; then
|
||||||
cgroup_path=/dev/cgroup/memory
|
cgroup_path=$(mktemp -d)
|
||||||
mount -t cgroup memory,hugetlb $cgroup_path
|
mount -t cgroup memory,hugetlb $cgroup_path
|
||||||
do_umount=1
|
do_umount=1
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -293,7 +293,7 @@ static void do_test_vmsplice_in_parent(char *mem, size_t size,
|
||||||
.iov_base = mem,
|
.iov_base = mem,
|
||||||
.iov_len = size,
|
.iov_len = size,
|
||||||
};
|
};
|
||||||
ssize_t cur, total, transferred;
|
ssize_t cur, total, transferred = 0;
|
||||||
struct comm_pipes comm_pipes;
|
struct comm_pipes comm_pipes;
|
||||||
char *old, *new;
|
char *old, *new;
|
||||||
int ret, fds[2];
|
int ret, fds[2];
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ fi
|
||||||
if [[ $cgroup2 ]]; then
|
if [[ $cgroup2 ]]; then
|
||||||
CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk '{print $3}')
|
CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk '{print $3}')
|
||||||
if [[ -z "$CGROUP_ROOT" ]]; then
|
if [[ -z "$CGROUP_ROOT" ]]; then
|
||||||
CGROUP_ROOT=/dev/cgroup/memory
|
CGROUP_ROOT=$(mktemp -d)
|
||||||
mount -t cgroup2 none $CGROUP_ROOT
|
mount -t cgroup2 none $CGROUP_ROOT
|
||||||
do_umount=1
|
do_umount=1
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -150,7 +150,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
|
||||||
void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
|
void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
|
||||||
{
|
{
|
||||||
if (kmalloc_verbose)
|
if (kmalloc_verbose)
|
||||||
pr_debug("Bulk free %p[0-%lu]\n", list, size - 1);
|
pr_debug("Bulk free %p[0-%zu]\n", list, size - 1);
|
||||||
|
|
||||||
pthread_mutex_lock(&cachep->lock);
|
pthread_mutex_lock(&cachep->lock);
|
||||||
for (int i = 0; i < size; i++)
|
for (int i = 0; i < size; i++)
|
||||||
|
|
@ -168,7 +168,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
|
||||||
size_t i;
|
size_t i;
|
||||||
|
|
||||||
if (kmalloc_verbose)
|
if (kmalloc_verbose)
|
||||||
pr_debug("Bulk alloc %lu\n", size);
|
pr_debug("Bulk alloc %zu\n", size);
|
||||||
|
|
||||||
pthread_mutex_lock(&cachep->lock);
|
pthread_mutex_lock(&cachep->lock);
|
||||||
if (cachep->nr_objs >= size) {
|
if (cachep->nr_objs >= size) {
|
||||||
|
|
|
||||||
2
tools/testing/shared/linux/cleanup.h
Normal file
2
tools/testing/shared/linux/cleanup.h
Normal file
|
|
@ -0,0 +1,2 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
#include "../../../../include/linux/cleanup.h"
|
||||||
Loading…
Reference in a new issue