forked from mirrors/linux
		
	 c5bcab7558
			
		
	
	
		c5bcab7558
		
	
	
	
	
		
			
			Add local_lock_nested_bh() locking. It is based on local_lock_t and the naming follows the preempt_disable_nested() example. For !PREEMPT_RT + !LOCKDEP it is a per-CPU annotation for locking assumptions based on local_bh_disable(). The macro is optimized away during compilation. For !PREEMPT_RT + LOCKDEP the local_lock_nested_bh() is reduced to the usual lock-acquire plus lockdep_assert_in_softirq() - ensuring that BH is disabled. For PREEMPT_RT local_lock_nested_bh() acquires the specified per-CPU lock. It does not disable CPU migration because it relies on local_bh_disable() disabling CPU migration. With LOCKDEP it performans the usual lockdep checks as with !PREEMPT_RT. Due to include hell the softirq check has been moved spinlock.c. The intention is to use this locking in places where locking of a per-CPU variable relies on BH being disabled. Instead of treating disabled bottom halves as a big per-CPU lock, PREEMPT_RT can use this to reduce the locking scope to what actually needs protecting. A side effect is that it also documents the protection scope of the per-CPU variables. Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Link: https://patch.msgid.link/20240620132727.660738-3-bigeasy@linutronix.de Signed-off-by: Jakub Kicinski <kuba@kernel.org>
		
			
				
	
	
		
			172 lines
		
	
	
	
		
			4.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			172 lines
		
	
	
	
		
			4.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* SPDX-License-Identifier: GPL-2.0 */
 | |
| #ifndef _LINUX_LOCAL_LOCK_H
 | |
| # error "Do not include directly, include linux/local_lock.h"
 | |
| #endif
 | |
| 
 | |
| #include <linux/percpu-defs.h>
 | |
| #include <linux/lockdep.h>
 | |
| 
 | |
| #ifndef CONFIG_PREEMPT_RT
 | |
| 
 | |
| typedef struct {
 | |
| #ifdef CONFIG_DEBUG_LOCK_ALLOC
 | |
| 	struct lockdep_map	dep_map;
 | |
| 	struct task_struct	*owner;
 | |
| #endif
 | |
| } local_lock_t;
 | |
| 
 | |
| #ifdef CONFIG_DEBUG_LOCK_ALLOC
 | |
| # define LOCAL_LOCK_DEBUG_INIT(lockname)		\
 | |
| 	.dep_map = {					\
 | |
| 		.name = #lockname,			\
 | |
| 		.wait_type_inner = LD_WAIT_CONFIG,	\
 | |
| 		.lock_type = LD_LOCK_PERCPU,		\
 | |
| 	},						\
 | |
| 	.owner = NULL,
 | |
| 
 | |
| static inline void local_lock_acquire(local_lock_t *l)
 | |
| {
 | |
| 	lock_map_acquire(&l->dep_map);
 | |
| 	DEBUG_LOCKS_WARN_ON(l->owner);
 | |
| 	l->owner = current;
 | |
| }
 | |
| 
 | |
| static inline void local_lock_release(local_lock_t *l)
 | |
| {
 | |
| 	DEBUG_LOCKS_WARN_ON(l->owner != current);
 | |
| 	l->owner = NULL;
 | |
| 	lock_map_release(&l->dep_map);
 | |
| }
 | |
| 
 | |
| static inline void local_lock_debug_init(local_lock_t *l)
 | |
| {
 | |
| 	l->owner = NULL;
 | |
| }
 | |
| #else /* CONFIG_DEBUG_LOCK_ALLOC */
 | |
| # define LOCAL_LOCK_DEBUG_INIT(lockname)
 | |
| static inline void local_lock_acquire(local_lock_t *l) { }
 | |
| static inline void local_lock_release(local_lock_t *l) { }
 | |
| static inline void local_lock_debug_init(local_lock_t *l) { }
 | |
| #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
 | |
| 
 | |
| #define INIT_LOCAL_LOCK(lockname)	{ LOCAL_LOCK_DEBUG_INIT(lockname) }
 | |
| 
 | |
| #define __local_lock_init(lock)					\
 | |
| do {								\
 | |
| 	static struct lock_class_key __key;			\
 | |
| 								\
 | |
| 	debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
 | |
| 	lockdep_init_map_type(&(lock)->dep_map, #lock, &__key,  \
 | |
| 			      0, LD_WAIT_CONFIG, LD_WAIT_INV,	\
 | |
| 			      LD_LOCK_PERCPU);			\
 | |
| 	local_lock_debug_init(lock);				\
 | |
| } while (0)
 | |
| 
 | |
| #define __spinlock_nested_bh_init(lock)				\
 | |
| do {								\
 | |
| 	static struct lock_class_key __key;			\
 | |
| 								\
 | |
| 	debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
 | |
| 	lockdep_init_map_type(&(lock)->dep_map, #lock, &__key,  \
 | |
| 			      0, LD_WAIT_CONFIG, LD_WAIT_INV,	\
 | |
| 			      LD_LOCK_NORMAL);			\
 | |
| 	local_lock_debug_init(lock);				\
 | |
| } while (0)
 | |
| 
 | |
| #define __local_lock(lock)					\
 | |
| 	do {							\
 | |
| 		preempt_disable();				\
 | |
| 		local_lock_acquire(this_cpu_ptr(lock));		\
 | |
| 	} while (0)
 | |
| 
 | |
| #define __local_lock_irq(lock)					\
 | |
| 	do {							\
 | |
| 		local_irq_disable();				\
 | |
| 		local_lock_acquire(this_cpu_ptr(lock));		\
 | |
| 	} while (0)
 | |
| 
 | |
| #define __local_lock_irqsave(lock, flags)			\
 | |
| 	do {							\
 | |
| 		local_irq_save(flags);				\
 | |
| 		local_lock_acquire(this_cpu_ptr(lock));		\
 | |
| 	} while (0)
 | |
| 
 | |
| #define __local_unlock(lock)					\
 | |
| 	do {							\
 | |
| 		local_lock_release(this_cpu_ptr(lock));		\
 | |
| 		preempt_enable();				\
 | |
| 	} while (0)
 | |
| 
 | |
| #define __local_unlock_irq(lock)				\
 | |
| 	do {							\
 | |
| 		local_lock_release(this_cpu_ptr(lock));		\
 | |
| 		local_irq_enable();				\
 | |
| 	} while (0)
 | |
| 
 | |
| #define __local_unlock_irqrestore(lock, flags)			\
 | |
| 	do {							\
 | |
| 		local_lock_release(this_cpu_ptr(lock));		\
 | |
| 		local_irq_restore(flags);			\
 | |
| 	} while (0)
 | |
| 
 | |
| #define __local_lock_nested_bh(lock)				\
 | |
| 	do {							\
 | |
| 		lockdep_assert_in_softirq();			\
 | |
| 		local_lock_acquire(this_cpu_ptr(lock));	\
 | |
| 	} while (0)
 | |
| 
 | |
| #define __local_unlock_nested_bh(lock)				\
 | |
| 	local_lock_release(this_cpu_ptr(lock))
 | |
| 
 | |
| #else /* !CONFIG_PREEMPT_RT */
 | |
| 
 | |
| /*
 | |
|  * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
 | |
|  * critical section while staying preemptible.
 | |
|  */
 | |
| typedef spinlock_t local_lock_t;
 | |
| 
 | |
| #define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
 | |
| 
 | |
| #define __local_lock_init(l)					\
 | |
| 	do {							\
 | |
| 		local_spin_lock_init((l));			\
 | |
| 	} while (0)
 | |
| 
 | |
| #define __local_lock(__lock)					\
 | |
| 	do {							\
 | |
| 		migrate_disable();				\
 | |
| 		spin_lock(this_cpu_ptr((__lock)));		\
 | |
| 	} while (0)
 | |
| 
 | |
| #define __local_lock_irq(lock)			__local_lock(lock)
 | |
| 
 | |
| #define __local_lock_irqsave(lock, flags)			\
 | |
| 	do {							\
 | |
| 		typecheck(unsigned long, flags);		\
 | |
| 		flags = 0;					\
 | |
| 		__local_lock(lock);				\
 | |
| 	} while (0)
 | |
| 
 | |
| #define __local_unlock(__lock)					\
 | |
| 	do {							\
 | |
| 		spin_unlock(this_cpu_ptr((__lock)));		\
 | |
| 		migrate_enable();				\
 | |
| 	} while (0)
 | |
| 
 | |
| #define __local_unlock_irq(lock)		__local_unlock(lock)
 | |
| 
 | |
| #define __local_unlock_irqrestore(lock, flags)	__local_unlock(lock)
 | |
| 
 | |
| #define __local_lock_nested_bh(lock)				\
 | |
| do {								\
 | |
| 	lockdep_assert_in_softirq_func();			\
 | |
| 	spin_lock(this_cpu_ptr(lock));				\
 | |
| } while (0)
 | |
| 
 | |
| #define __local_unlock_nested_bh(lock)				\
 | |
| do {								\
 | |
| 	spin_unlock(this_cpu_ptr((lock)));			\
 | |
| } while (0)
 | |
| 
 | |
| #endif /* CONFIG_PREEMPT_RT */
 |