forked from mirrors/linux
		
	asm-generic: ticket-lock: Optimize arch_spin_value_unlocked()
The arch_spin_value_unlocked() of ticket-lock would cause the compiler to
generate inefficient asm code in riscv architecture because of
unnecessary memory access to the contended value.
Before the patch:
	void lockref_get(struct lockref *lockref)
	{
	  78:   fd010113                add     sp,sp,-48
	  7c:   02813023                sd      s0,32(sp)
	  80:   02113423                sd      ra,40(sp)
	  84:   03010413                add     s0,sp,48
	0000000000000088 <.LBB296>:
		CMPXCHG_LOOP(
	  88:   00053783                ld      a5,0(a0)
After the patch:
	void lockref_get(struct lockref *lockref)
	{
		CMPXCHG_LOOP(
	  78:   00053783                ld      a5,0(a0)
After the patch, the lockref_get() could get in a fast path instead of the
function's prologue. This is because ticket lock complex logic would
limit compiler optimization for the spinlock fast path, and qspinlock
won't.
The caller of arch_spin_value_unlocked() could benefit from this
change. Currently, the only caller is lockref.
Signed-off-by: Guo Ren <guoren@kernel.org>
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Waiman Long <longman@redhat.com>
Acked-by: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20230908154339.3250567-1-guoren@kernel.org
			
			
This commit is contained in:
		
							parent
							
								
									fbeb558b0d
								
							
						
					
					
						commit
						c6f4a90022
					
				
					 1 changed files with 9 additions and 7 deletions
				
			
		| 
						 | 
				
			
			@ -68,11 +68,18 @@ static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
 | 
			
		|||
	smp_store_release(ptr, (u16)val + 1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 | 
			
		||||
{
 | 
			
		||||
	u32 val = lock.counter;
 | 
			
		||||
 | 
			
		||||
	return ((val >> 16) == (val & 0xffff));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock)
 | 
			
		||||
{
 | 
			
		||||
	u32 val = atomic_read(lock);
 | 
			
		||||
	arch_spinlock_t val = READ_ONCE(*lock);
 | 
			
		||||
 | 
			
		||||
	return ((val >> 16) != (val & 0xffff));
 | 
			
		||||
	return !arch_spin_value_unlocked(val);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock)
 | 
			
		||||
| 
						 | 
				
			
			@ -82,11 +89,6 @@ static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock)
 | 
			
		|||
	return (s16)((val >> 16) - (val & 0xffff)) > 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 | 
			
		||||
{
 | 
			
		||||
	return !arch_spin_is_locked(&lock);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#include <asm/qrwlock.h>
 | 
			
		||||
 | 
			
		||||
#endif /* __ASM_GENERIC_SPINLOCK_H */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue