mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	locking/spinlocks: Remove an instruction from spin and write locks
Both spin locks and write locks currently do: f0 0f b1 17 lock cmpxchg %edx,(%rdi) 85 c0 test %eax,%eax 75 05 jne [slowpath] This 'test' insn is superfluous; the cmpxchg insn sets the Z flag appropriately. Peter pointed out that using atomic_try_cmpxchg_acquire() will let the compiler know this is true. Comparing before/after disassemblies show the only effect is to remove this insn. Take this opportunity to make the spin & write lock code resemble each other more closely and have similar likely() hints. Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Matthew Wilcox <willy@infradead.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Waiman Long <longman@redhat.com> Link: http://lkml.kernel.org/r/20180820162639.GC25153@bombadil.infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
							parent
							
								
									cb92173d1f
								
							
						
					
					
						commit
						27df89689e
					
				
					 2 changed files with 13 additions and 10 deletions
				
			
		| 
						 | 
					@ -71,8 +71,8 @@ static inline int queued_write_trylock(struct qrwlock *lock)
 | 
				
			||||||
	if (unlikely(cnts))
 | 
						if (unlikely(cnts))
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return likely(atomic_cmpxchg_acquire(&lock->cnts,
 | 
						return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts,
 | 
				
			||||||
					     cnts, cnts | _QW_LOCKED) == cnts);
 | 
									_QW_LOCKED));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * queued_read_lock - acquire read lock of a queue rwlock
 | 
					 * queued_read_lock - acquire read lock of a queue rwlock
 | 
				
			||||||
| 
						 | 
					@ -96,8 +96,9 @@ static inline void queued_read_lock(struct qrwlock *lock)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static inline void queued_write_lock(struct qrwlock *lock)
 | 
					static inline void queued_write_lock(struct qrwlock *lock)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						u32 cnts = 0;
 | 
				
			||||||
	/* Optimize for the unfair lock case where the fair flag is 0. */
 | 
						/* Optimize for the unfair lock case where the fair flag is 0. */
 | 
				
			||||||
	if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)
 | 
						if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	queued_write_lock_slowpath(lock);
 | 
						queued_write_lock_slowpath(lock);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -66,10 +66,12 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static __always_inline int queued_spin_trylock(struct qspinlock *lock)
 | 
					static __always_inline int queued_spin_trylock(struct qspinlock *lock)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (!atomic_read(&lock->val) &&
 | 
						u32 val = atomic_read(&lock->val);
 | 
				
			||||||
	   (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0))
 | 
					
 | 
				
			||||||
		return 1;
 | 
						if (unlikely(val))
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
 | 
					extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
 | 
				
			||||||
| 
						 | 
					@ -80,11 +82,11 @@ extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static __always_inline void queued_spin_lock(struct qspinlock *lock)
 | 
					static __always_inline void queued_spin_lock(struct qspinlock *lock)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	u32 val;
 | 
						u32 val = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL);
 | 
						if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
 | 
				
			||||||
	if (likely(val == 0))
 | 
					 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	queued_spin_lock_slowpath(lock, val);
 | 
						queued_spin_lock_slowpath(lock, val);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue