mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	The arch_{read,spin,write}_lock_flags() macros are simply mapped to the
non-flags versions by the majority of architectures, so do this in core
code and remove the dummy implementations. Also remove the implementation
in spinlock_up.h, since all callers of do_raw_spin_lock_flags() call
local_irq_save(flags) anyway.
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/1507055129-12300-4-git-send-email-will.deacon@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
		
	
			
		
			
				
	
	
		
			72 lines
		
	
	
	
		
			2.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			72 lines
		
	
	
	
		
			2.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
#ifndef __LINUX_SPINLOCK_UP_H
 | 
						|
#define __LINUX_SPINLOCK_UP_H
 | 
						|
 | 
						|
#ifndef __LINUX_SPINLOCK_H
 | 
						|
# error "please don't include this file directly"
 | 
						|
#endif
 | 
						|
 | 
						|
#include <asm/processor.h>	/* for cpu_relax() */
 | 
						|
#include <asm/barrier.h>
 | 
						|
 | 
						|
/*
 | 
						|
 * include/linux/spinlock_up.h - UP-debug version of spinlocks.
 | 
						|
 *
 | 
						|
 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
 | 
						|
 * Released under the General Public License (GPL).
 | 
						|
 *
 | 
						|
 * In the debug case, 1 means unlocked, 0 means locked. (the values
 | 
						|
 * are inverted, to catch initialization bugs)
 | 
						|
 *
 | 
						|
 * No atomicity anywhere, we are on UP. However, we still need
 | 
						|
 * the compiler barriers, because we do not want the compiler to
 | 
						|
 * move potentially faulting instructions (notably user accesses)
 | 
						|
 * into the locked sequence, resulting in non-atomic execution.
 | 
						|
 */
 | 
						|
 | 
						|
#ifdef CONFIG_DEBUG_SPINLOCK
 | 
						|
#define arch_spin_is_locked(x)		((x)->slock == 0)
 | 
						|
 | 
						|
static inline void arch_spin_lock(arch_spinlock_t *lock)
 | 
						|
{
 | 
						|
	lock->slock = 0;
 | 
						|
	barrier();
 | 
						|
}
 | 
						|
 | 
						|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
 | 
						|
{
 | 
						|
	char oldval = lock->slock;
 | 
						|
 | 
						|
	lock->slock = 0;
 | 
						|
	barrier();
 | 
						|
 | 
						|
	return oldval > 0;
 | 
						|
}
 | 
						|
 | 
						|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
 | 
						|
{
 | 
						|
	barrier();
 | 
						|
	lock->slock = 1;
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * Read-write spinlocks. No debug version.
 | 
						|
 */
 | 
						|
#define arch_read_lock(lock)		do { barrier(); (void)(lock); } while (0)
 | 
						|
#define arch_write_lock(lock)		do { barrier(); (void)(lock); } while (0)
 | 
						|
#define arch_read_trylock(lock)	({ barrier(); (void)(lock); 1; })
 | 
						|
#define arch_write_trylock(lock)	({ barrier(); (void)(lock); 1; })
 | 
						|
#define arch_read_unlock(lock)		do { barrier(); (void)(lock); } while (0)
 | 
						|
#define arch_write_unlock(lock)	do { barrier(); (void)(lock); } while (0)
 | 
						|
 | 
						|
#else /* DEBUG_SPINLOCK */
 | 
						|
#define arch_spin_is_locked(lock)	((void)(lock), 0)
 | 
						|
/* for sched/core.c and kernel_lock.c: */
 | 
						|
# define arch_spin_lock(lock)		do { barrier(); (void)(lock); } while (0)
 | 
						|
# define arch_spin_lock_flags(lock, flags)	do { barrier(); (void)(lock); } while (0)
 | 
						|
# define arch_spin_unlock(lock)	do { barrier(); (void)(lock); } while (0)
 | 
						|
# define arch_spin_trylock(lock)	({ barrier(); (void)(lock); 1; })
 | 
						|
#endif /* DEBUG_SPINLOCK */
 | 
						|
 | 
						|
#define arch_spin_is_contended(lock)	(((void)(lock), 0))
 | 
						|
 | 
						|
#endif /* __LINUX_SPINLOCK_UP_H */
 |