forked from mirrors/linux
Partially revert commit0aaddfb068("locking/local_lock: Introduce localtry_lock_t"). Remove localtry_*() helpers, since localtry_lock() name might be misinterpreted as "try lock". Introduce local_trylock[_irqsave]() helpers that only work with newly introduced local_trylock_t type. Note that attempt to use local_trylock[_irqsave]() with local_lock_t will cause compilation failure. Usage and behavior in !PREEMPT_RT: local_lock_t lock; // sizeof(lock) == 0 local_lock(&lock); // preempt disable local_lock_irqsave(&lock, ...); // irq save if (local_trylock_irqsave(&lock, ...)) // compilation error local_trylock_t lock; // sizeof(lock) == 4 local_lock(&lock); // preempt disable, acquired = 1 local_lock_irqsave(&lock, ...); // irq save, acquired = 1 if (local_trylock(&lock)) // if (!acquired) preempt disable, acquired = 1 if (local_trylock_irqsave(&lock, ...)) // if (!acquired) irq save, acquired = 1 The existing local_lock_*() macros can be used either with local_lock_t or local_trylock_t. With local_trylock_t they set acquired = 1 while local_unlock_*() clears it. In !PREEMPT_RT local_lock_irqsave(local_lock_t *) disables interrupts to protect critical section, but it doesn't prevent NMI, so the fully reentrant code cannot use local_lock_irqsave(local_lock_t *) for exclusive access. The local_lock_irqsave(local_trylock_t *) helper disables interrupts and sets acquired=1, so local_trylock_irqsave(local_trylock_t *) from NMI attempting to acquire the same lock will return false. In PREEMPT_RT local_lock_irqsave() maps to preemptible spin_lock(). Map local_trylock_irqsave() to preemptible spin_trylock(). When in hard IRQ or NMI return false right away, since spin_trylock() is not safe due to explicit locking in the underneath rt_spin_trylock() implementation. Removing this explicit locking and attempting only "trylock" is undesired due to PI implications. The local_trylock() without _irqsave can be used to avoid the cost of disabling/enabling interrupts by only disabling preemption, so local_trylock() in an interrupt attempting to acquire the same lock will return false. Note there is no need to use local_inc for acquired variable, since it's a percpu variable with strict nesting scopes. Note that guard(local_lock)(&lock) works only for "local_lock_t lock". The patch also makes sure that local_lock_release(l) is called before WRITE_ONCE(l->acquired, 0). Though IRQs are disabled at this point the local_trylock() from NMI will succeed and local_lock_acquire(l) will warn. Link: https://lkml.kernel.org/r/20250403025514.41186-1-alexei.starovoitov@gmail.com Fixes:0aaddfb068("locking/local_lock: Introduce localtry_lock_t") Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev> Cc: Daniel Borkman <daniel@iogearbox.net> Cc: Linus Torvalds <torvalds@linuxfoundation.org> Cc: Martin KaFai Lau <martin.lau@kernel.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
103 lines
2.9 KiB
C
103 lines
2.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_LOCAL_LOCK_H
|
|
#define _LINUX_LOCAL_LOCK_H
|
|
|
|
#include <linux/local_lock_internal.h>
|
|
|
|
/**
|
|
* local_lock_init - Runtime initialize a lock instance
|
|
*/
|
|
#define local_lock_init(lock) __local_lock_init(lock)
|
|
|
|
/**
|
|
* local_lock - Acquire a per CPU local lock
|
|
* @lock: The lock variable
|
|
*/
|
|
#define local_lock(lock) __local_lock(lock)
|
|
|
|
/**
|
|
* local_lock_irq - Acquire a per CPU local lock and disable interrupts
|
|
* @lock: The lock variable
|
|
*/
|
|
#define local_lock_irq(lock) __local_lock_irq(lock)
|
|
|
|
/**
|
|
* local_lock_irqsave - Acquire a per CPU local lock, save and disable
|
|
* interrupts
|
|
* @lock: The lock variable
|
|
* @flags: Storage for interrupt flags
|
|
*/
|
|
#define local_lock_irqsave(lock, flags) \
|
|
__local_lock_irqsave(lock, flags)
|
|
|
|
/**
|
|
* local_unlock - Release a per CPU local lock
|
|
* @lock: The lock variable
|
|
*/
|
|
#define local_unlock(lock) __local_unlock(lock)
|
|
|
|
/**
|
|
* local_unlock_irq - Release a per CPU local lock and enable interrupts
|
|
* @lock: The lock variable
|
|
*/
|
|
#define local_unlock_irq(lock) __local_unlock_irq(lock)
|
|
|
|
/**
|
|
* local_unlock_irqrestore - Release a per CPU local lock and restore
|
|
* interrupt flags
|
|
* @lock: The lock variable
|
|
* @flags: Interrupt flags to restore
|
|
*/
|
|
#define local_unlock_irqrestore(lock, flags) \
|
|
__local_unlock_irqrestore(lock, flags)
|
|
|
|
/**
|
|
* local_lock_init - Runtime initialize a lock instance
|
|
*/
|
|
#define local_trylock_init(lock) __local_trylock_init(lock)
|
|
|
|
/**
|
|
* local_trylock - Try to acquire a per CPU local lock
|
|
* @lock: The lock variable
|
|
*
|
|
* The function can be used in any context such as NMI or HARDIRQ. Due to
|
|
* locking constrains it will _always_ fail to acquire the lock in NMI or
|
|
* HARDIRQ context on PREEMPT_RT.
|
|
*/
|
|
#define local_trylock(lock) __local_trylock(lock)
|
|
|
|
/**
|
|
* local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
|
|
* interrupts if acquired
|
|
* @lock: The lock variable
|
|
* @flags: Storage for interrupt flags
|
|
*
|
|
* The function can be used in any context such as NMI or HARDIRQ. Due to
|
|
* locking constrains it will _always_ fail to acquire the lock in NMI or
|
|
* HARDIRQ context on PREEMPT_RT.
|
|
*/
|
|
#define local_trylock_irqsave(lock, flags) \
|
|
__local_trylock_irqsave(lock, flags)
|
|
|
|
DEFINE_GUARD(local_lock, local_lock_t __percpu*,
|
|
local_lock(_T),
|
|
local_unlock(_T))
|
|
DEFINE_GUARD(local_lock_irq, local_lock_t __percpu*,
|
|
local_lock_irq(_T),
|
|
local_unlock_irq(_T))
|
|
DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu,
|
|
local_lock_irqsave(_T->lock, _T->flags),
|
|
local_unlock_irqrestore(_T->lock, _T->flags),
|
|
unsigned long flags)
|
|
|
|
#define local_lock_nested_bh(_lock) \
|
|
__local_lock_nested_bh(_lock)
|
|
|
|
#define local_unlock_nested_bh(_lock) \
|
|
__local_unlock_nested_bh(_lock)
|
|
|
|
DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*,
|
|
local_lock_nested_bh(_T),
|
|
local_unlock_nested_bh(_T))
|
|
|
|
#endif
|