forked from mirrors/linux
		
	 91710728d1
			
		
	
	
		91710728d1
		
	
	
	
	
		
			
			preempt_disable() and local_irq_disable/save() are in principle per CPU big
kernel locks. This has several downsides:
  - The protection scope is unknown
  - Violation of protection rules is hard to detect by instrumentation
  - For PREEMPT_RT such sections, unless in low level critical code, can
    violate the preemptability constraints.
To address this PREEMPT_RT introduced the concept of local_locks which are
strictly per CPU.
The lock operations map to preempt_disable(), local_irq_disable/save() and
the enabling counterparts on non RT enabled kernels.
If lockdep is enabled local locks gain a lock map which tracks the usage
context. This will catch cases where an area is protected by
preempt_disable() but the access also happens from interrupt context. local
locks have identified quite a few such issues over the years, the most
recent example is:
  b7d5dc2107 ("random: add a spinlock_t to struct batched_entropy")
Aside of the lockdep coverage this also improves code readability as it
precisely annotates the protection scope.
PREEMPT_RT substitutes these local locks with 'sleeping' spinlocks to
protect such sections while maintaining preemtability and CPU locality.
local locks can replace:
  - preempt_enable()/disable() pairs
  - local_irq_disable/enable() pairs
  - local_irq_save/restore() pairs
They are also used to replace code which implicitly disables preemption
like:
  - get_cpu()/put_cpu()
  - get_cpu_var()/put_cpu_var()
with PREEMPT_RT friendly constructs.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20200527201119.1692513-2-bigeasy@linutronix.de
		
	
			
		
			
				
	
	
		
			54 lines
		
	
	
	
		
			1.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			54 lines
		
	
	
	
		
			1.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* SPDX-License-Identifier: GPL-2.0 */
 | |
| #ifndef _LINUX_LOCAL_LOCK_H
 | |
| #define _LINUX_LOCAL_LOCK_H
 | |
| 
 | |
| #include <linux/local_lock_internal.h>
 | |
| 
 | |
| /**
 | |
|  * local_lock_init - Runtime initialize a lock instance
 | |
|  */
 | |
| #define local_lock_init(lock)		__local_lock_init(lock)
 | |
| 
 | |
| /**
 | |
|  * local_lock - Acquire a per CPU local lock
 | |
|  * @lock:	The lock variable
 | |
|  */
 | |
| #define local_lock(lock)		__local_lock(lock)
 | |
| 
 | |
| /**
 | |
|  * local_lock_irq - Acquire a per CPU local lock and disable interrupts
 | |
|  * @lock:	The lock variable
 | |
|  */
 | |
| #define local_lock_irq(lock)		__local_lock_irq(lock)
 | |
| 
 | |
| /**
 | |
|  * local_lock_irqsave - Acquire a per CPU local lock, save and disable
 | |
|  *			 interrupts
 | |
|  * @lock:	The lock variable
 | |
|  * @flags:	Storage for interrupt flags
 | |
|  */
 | |
| #define local_lock_irqsave(lock, flags)				\
 | |
| 	__local_lock_irqsave(lock, flags)
 | |
| 
 | |
| /**
 | |
|  * local_unlock - Release a per CPU local lock
 | |
|  * @lock:	The lock variable
 | |
|  */
 | |
| #define local_unlock(lock)		__local_unlock(lock)
 | |
| 
 | |
| /**
 | |
|  * local_unlock_irq - Release a per CPU local lock and enable interrupts
 | |
|  * @lock:	The lock variable
 | |
|  */
 | |
| #define local_unlock_irq(lock)		__local_unlock_irq(lock)
 | |
| 
 | |
| /**
 | |
|  * local_unlock_irqrestore - Release a per CPU local lock and restore
 | |
|  *			      interrupt flags
 | |
|  * @lock:	The lock variable
 | |
|  * @flags:      Interrupt flags to restore
 | |
|  */
 | |
| #define local_unlock_irqrestore(lock, flags)			\
 | |
| 	__local_unlock_irqrestore(lock, flags)
 | |
| 
 | |
| #endif
 |