mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	 ccfbb5bed4
			
		
	
	
		ccfbb5bed4
		
	
	
	
	
		
			
			There are in-tree users of atomic_dec_and_lock() which must acquire the spin lock with interrupts disabled. To workaround the lack of an irqsave variant of atomic_dec_and_lock() they use local_irq_save() at the call site. This causes extra code and creates in some places unneeded long interrupt disabled times. These places need also extra treatment for PREEMPT_RT due to the disconnect of the irq disabling and the lock function. Implement the missing irqsave variant of the function. Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r20180612161621.22645-3-bigeasy@linutronix.de
		
			
				
	
	
		
			51 lines
		
	
	
	
		
			1.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			51 lines
		
	
	
	
		
			1.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| // SPDX-License-Identifier: GPL-2.0
 | |
| #include <linux/export.h>
 | |
| #include <linux/spinlock.h>
 | |
| #include <linux/atomic.h>
 | |
| 
 | |
| /*
 | |
|  * This is an implementation of the notion of "decrement a
 | |
|  * reference count, and return locked if it decremented to zero".
 | |
|  *
 | |
|  * NOTE NOTE NOTE! This is _not_ equivalent to
 | |
|  *
 | |
|  *	if (atomic_dec_and_test(&atomic)) {
 | |
|  *		spin_lock(&lock);
 | |
|  *		return 1;
 | |
|  *	}
 | |
|  *	return 0;
 | |
|  *
 | |
|  * because the spin-lock and the decrement must be
 | |
|  * "atomic".
 | |
|  */
 | |
| int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 | |
| {
 | |
| 	/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
 | |
| 	if (atomic_add_unless(atomic, -1, 1))
 | |
| 		return 0;
 | |
| 
 | |
| 	/* Otherwise do it the slow way */
 | |
| 	spin_lock(lock);
 | |
| 	if (atomic_dec_and_test(atomic))
 | |
| 		return 1;
 | |
| 	spin_unlock(lock);
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| EXPORT_SYMBOL(_atomic_dec_and_lock);
 | |
| 
 | |
| int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
 | |
| 				 unsigned long *flags)
 | |
| {
 | |
| 	/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
 | |
| 	if (atomic_add_unless(atomic, -1, 1))
 | |
| 		return 0;
 | |
| 
 | |
| 	/* Otherwise do it the slow way */
 | |
| 	spin_lock_irqsave(lock, *flags);
 | |
| 	if (atomic_dec_and_test(atomic))
 | |
| 		return 1;
 | |
| 	spin_unlock_irqrestore(lock, *flags);
 | |
| 	return 0;
 | |
| }
 | |
| EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
 |