mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	Both spin locks and write locks currently do: f0 0f b1 17 lock cmpxchg %edx,(%rdi) 85 c0 test %eax,%eax 75 05 jne [slowpath] This 'test' insn is superfluous; the cmpxchg insn sets the Z flag appropriately. Peter pointed out that using atomic_try_cmpxchg_acquire() will let the compiler know this is true. Comparing before/after disassemblies show the only effect is to remove this insn. Take this opportunity to make the spin & write lock code resemble each other more closely and have similar likely() hints. Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Matthew Wilcox <willy@infradead.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Waiman Long <longman@redhat.com> Link: http://lkml.kernel.org/r/20180820162639.GC25153@bombadil.infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
		
			
				
	
	
		
			125 lines
		
	
	
	
		
			3.6 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			125 lines
		
	
	
	
		
			3.6 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/*
 | 
						|
 * Queued spinlock
 | 
						|
 *
 | 
						|
 * This program is free software; you can redistribute it and/or modify
 | 
						|
 * it under the terms of the GNU General Public License as published by
 | 
						|
 * the Free Software Foundation; either version 2 of the License, or
 | 
						|
 * (at your option) any later version.
 | 
						|
 *
 | 
						|
 * This program is distributed in the hope that it will be useful,
 | 
						|
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
						|
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
						|
 * GNU General Public License for more details.
 | 
						|
 *
 | 
						|
 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
 | 
						|
 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
 | 
						|
 *
 | 
						|
 * Authors: Waiman Long <waiman.long@hpe.com>
 | 
						|
 */
 | 
						|
#ifndef __ASM_GENERIC_QSPINLOCK_H
 | 
						|
#define __ASM_GENERIC_QSPINLOCK_H
 | 
						|
 | 
						|
#include <asm-generic/qspinlock_types.h>
 | 
						|
 | 
						|
/**
 | 
						|
 * queued_spin_is_locked - is the spinlock locked?
 | 
						|
 * @lock: Pointer to queued spinlock structure
 | 
						|
 * Return: 1 if it is locked, 0 otherwise
 | 
						|
 */
 | 
						|
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
 | 
						|
{
 | 
						|
	/*
 | 
						|
	 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
 | 
						|
	 * isn't immediately observable.
 | 
						|
	 */
 | 
						|
	return atomic_read(&lock->val);
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * queued_spin_value_unlocked - is the spinlock structure unlocked?
 | 
						|
 * @lock: queued spinlock structure
 | 
						|
 * Return: 1 if it is unlocked, 0 otherwise
 | 
						|
 *
 | 
						|
 * N.B. Whenever there are tasks waiting for the lock, it is considered
 | 
						|
 *      locked wrt the lockref code to avoid lock stealing by the lockref
 | 
						|
 *      code and change things underneath the lock. This also allows some
 | 
						|
 *      optimizations to be applied without conflict with lockref.
 | 
						|
 */
 | 
						|
static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
 | 
						|
{
 | 
						|
	return !atomic_read(&lock.val);
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * queued_spin_is_contended - check if the lock is contended
 | 
						|
 * @lock : Pointer to queued spinlock structure
 | 
						|
 * Return: 1 if lock contended, 0 otherwise
 | 
						|
 */
 | 
						|
static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
 | 
						|
{
 | 
						|
	return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
 | 
						|
}
 | 
						|
/**
 | 
						|
 * queued_spin_trylock - try to acquire the queued spinlock
 | 
						|
 * @lock : Pointer to queued spinlock structure
 | 
						|
 * Return: 1 if lock acquired, 0 if failed
 | 
						|
 */
 | 
						|
static __always_inline int queued_spin_trylock(struct qspinlock *lock)
 | 
						|
{
 | 
						|
	u32 val = atomic_read(&lock->val);
 | 
						|
 | 
						|
	if (unlikely(val))
 | 
						|
		return 0;
 | 
						|
 | 
						|
	return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
 | 
						|
}
 | 
						|
 | 
						|
extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
 | 
						|
 | 
						|
/**
 | 
						|
 * queued_spin_lock - acquire a queued spinlock
 | 
						|
 * @lock: Pointer to queued spinlock structure
 | 
						|
 */
 | 
						|
static __always_inline void queued_spin_lock(struct qspinlock *lock)
 | 
						|
{
 | 
						|
	u32 val = 0;
 | 
						|
 | 
						|
	if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
 | 
						|
		return;
 | 
						|
 | 
						|
	queued_spin_lock_slowpath(lock, val);
 | 
						|
}
 | 
						|
 | 
						|
#ifndef queued_spin_unlock
 | 
						|
/**
 | 
						|
 * queued_spin_unlock - release a queued spinlock
 | 
						|
 * @lock : Pointer to queued spinlock structure
 | 
						|
 */
 | 
						|
static __always_inline void queued_spin_unlock(struct qspinlock *lock)
 | 
						|
{
 | 
						|
	/*
 | 
						|
	 * unlock() needs release semantics:
 | 
						|
	 */
 | 
						|
	smp_store_release(&lock->locked, 0);
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
#ifndef virt_spin_lock
 | 
						|
static __always_inline bool virt_spin_lock(struct qspinlock *lock)
 | 
						|
{
 | 
						|
	return false;
 | 
						|
}
 | 
						|
#endif
 | 
						|
 | 
						|
/*
 | 
						|
 * Remapping spinlock architecture specific functions to the corresponding
 | 
						|
 * queued spinlock functions.
 | 
						|
 */
 | 
						|
#define arch_spin_is_locked(l)		queued_spin_is_locked(l)
 | 
						|
#define arch_spin_is_contended(l)	queued_spin_is_contended(l)
 | 
						|
#define arch_spin_value_unlocked(l)	queued_spin_value_unlocked(l)
 | 
						|
#define arch_spin_lock(l)		queued_spin_lock(l)
 | 
						|
#define arch_spin_trylock(l)		queued_spin_trylock(l)
 | 
						|
#define arch_spin_unlock(l)		queued_spin_unlock(l)
 | 
						|
 | 
						|
#endif /* __ASM_GENERIC_QSPINLOCK_H */
 |