forked from mirrors/linux
		
	The conditional inc/dec ops differ for atomic_t and atomic64_t: - atomic_inc_unless_positive() is optional for atomic_t, and doesn't exist for atomic64_t. - atomic_dec_unless_negative() is optional for atomic_t, and doesn't exist for atomic64_t. - atomic_dec_if_positive is optional for atomic_t, and is mandatory for atomic64_t. Let's make these consistently optional for both. At the same time, let's clean up the existing fallbacks to use atomic_try_cmpxchg(). The instrumented atomics are updated accordingly. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/lkml/20180621121321.4761-18-mark.rutland@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
		
			
				
	
	
		
			67 lines
		
	
	
	
		
			1.7 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			67 lines
		
	
	
	
		
			1.7 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/* SPDX-License-Identifier: GPL-2.0 */
 | 
						|
/* atomic.h: Thankfully the V9 is at least reasonable for this
 | 
						|
 *           stuff.
 | 
						|
 *
 | 
						|
 * Copyright (C) 1996, 1997, 2000, 2012 David S. Miller (davem@redhat.com)
 | 
						|
 */
 | 
						|
 | 
						|
#ifndef __ARCH_SPARC64_ATOMIC__
 | 
						|
#define __ARCH_SPARC64_ATOMIC__
 | 
						|
 | 
						|
#include <linux/types.h>
 | 
						|
#include <asm/cmpxchg.h>
 | 
						|
#include <asm/barrier.h>
 | 
						|
 | 
						|
#define ATOMIC_INIT(i)		{ (i) }
 | 
						|
#define ATOMIC64_INIT(i)	{ (i) }
 | 
						|
 | 
						|
#define atomic_read(v)		READ_ONCE((v)->counter)
 | 
						|
#define atomic64_read(v)	READ_ONCE((v)->counter)
 | 
						|
 | 
						|
#define atomic_set(v, i)	WRITE_ONCE(((v)->counter), (i))
 | 
						|
#define atomic64_set(v, i)	WRITE_ONCE(((v)->counter), (i))
 | 
						|
 | 
						|
#define ATOMIC_OP(op)							\
 | 
						|
void atomic_##op(int, atomic_t *);					\
 | 
						|
void atomic64_##op(long, atomic64_t *);
 | 
						|
 | 
						|
#define ATOMIC_OP_RETURN(op)						\
 | 
						|
int atomic_##op##_return(int, atomic_t *);				\
 | 
						|
long atomic64_##op##_return(long, atomic64_t *);
 | 
						|
 | 
						|
#define ATOMIC_FETCH_OP(op)						\
 | 
						|
int atomic_fetch_##op(int, atomic_t *);					\
 | 
						|
long atomic64_fetch_##op(long, atomic64_t *);
 | 
						|
 | 
						|
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
 | 
						|
 | 
						|
ATOMIC_OPS(add)
 | 
						|
ATOMIC_OPS(sub)
 | 
						|
 | 
						|
#undef ATOMIC_OPS
 | 
						|
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
 | 
						|
 | 
						|
ATOMIC_OPS(and)
 | 
						|
ATOMIC_OPS(or)
 | 
						|
ATOMIC_OPS(xor)
 | 
						|
 | 
						|
#undef ATOMIC_OPS
 | 
						|
#undef ATOMIC_FETCH_OP
 | 
						|
#undef ATOMIC_OP_RETURN
 | 
						|
#undef ATOMIC_OP
 | 
						|
 | 
						|
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
 | 
						|
 | 
						|
static inline int atomic_xchg(atomic_t *v, int new)
 | 
						|
{
 | 
						|
	return xchg(&v->counter, new);
 | 
						|
}
 | 
						|
 | 
						|
#define atomic64_cmpxchg(v, o, n) \
 | 
						|
	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
 | 
						|
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 | 
						|
 | 
						|
long atomic64_dec_if_positive(atomic64_t *v);
 | 
						|
#define atomic64_dec_if_positive atomic64_dec_if_positive
 | 
						|
 | 
						|
#endif /* !(__ARCH_SPARC64_ATOMIC__) */
 |