mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	trivial now Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
		
			
				
	
	
		
			197 lines
		
	
	
	
		
			4.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			197 lines
		
	
	
	
		
			4.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
// SPDX-License-Identifier: GPL-2.0
 | 
						|
/*
 | 
						|
 * atomic32.c: 32-bit atomic_t implementation
 | 
						|
 *
 | 
						|
 * Copyright (C) 2004 Keith M Wesolowski
 | 
						|
 * Copyright (C) 2007 Kyle McMartin
 | 
						|
 * 
 | 
						|
 * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
 | 
						|
 */
 | 
						|
 | 
						|
#include <linux/atomic.h>
 | 
						|
#include <linux/spinlock.h>
 | 
						|
#include <linux/module.h>
 | 
						|
 | 
						|
#ifdef CONFIG_SMP
 | 
						|
#define ATOMIC_HASH_SIZE	4
 | 
						|
#define ATOMIC_HASH(a)	(&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
 | 
						|
 | 
						|
spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
 | 
						|
	[0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
 | 
						|
};
 | 
						|
 | 
						|
#else /* SMP */
 | 
						|
 | 
						|
static DEFINE_SPINLOCK(dummy);
 | 
						|
#define ATOMIC_HASH_SIZE	1
 | 
						|
#define ATOMIC_HASH(a)		(&dummy)
 | 
						|
 | 
						|
#endif /* SMP */
 | 
						|
 | 
						|
#define ATOMIC_FETCH_OP(op, c_op)					\
 | 
						|
int arch_atomic_fetch_##op(int i, atomic_t *v)				\
 | 
						|
{									\
 | 
						|
	int ret;							\
 | 
						|
	unsigned long flags;						\
 | 
						|
	spin_lock_irqsave(ATOMIC_HASH(v), flags);			\
 | 
						|
									\
 | 
						|
	ret = v->counter;						\
 | 
						|
	v->counter c_op i;						\
 | 
						|
									\
 | 
						|
	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);			\
 | 
						|
	return ret;							\
 | 
						|
}									\
 | 
						|
EXPORT_SYMBOL(arch_atomic_fetch_##op);
 | 
						|
 | 
						|
#define ATOMIC_OP_RETURN(op, c_op)					\
 | 
						|
int arch_atomic_##op##_return(int i, atomic_t *v)			\
 | 
						|
{									\
 | 
						|
	int ret;							\
 | 
						|
	unsigned long flags;						\
 | 
						|
	spin_lock_irqsave(ATOMIC_HASH(v), flags);			\
 | 
						|
									\
 | 
						|
	ret = (v->counter c_op i);					\
 | 
						|
									\
 | 
						|
	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);			\
 | 
						|
	return ret;							\
 | 
						|
}									\
 | 
						|
EXPORT_SYMBOL(arch_atomic_##op##_return);
 | 
						|
 | 
						|
ATOMIC_OP_RETURN(add, +=)
 | 
						|
 | 
						|
ATOMIC_FETCH_OP(add, +=)
 | 
						|
ATOMIC_FETCH_OP(and, &=)
 | 
						|
ATOMIC_FETCH_OP(or, |=)
 | 
						|
ATOMIC_FETCH_OP(xor, ^=)
 | 
						|
 | 
						|
#undef ATOMIC_FETCH_OP
 | 
						|
#undef ATOMIC_OP_RETURN
 | 
						|
 | 
						|
int arch_atomic_xchg(atomic_t *v, int new)
 | 
						|
{
 | 
						|
	int ret;
 | 
						|
	unsigned long flags;
 | 
						|
 | 
						|
	spin_lock_irqsave(ATOMIC_HASH(v), flags);
 | 
						|
	ret = v->counter;
 | 
						|
	v->counter = new;
 | 
						|
	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
 | 
						|
	return ret;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(arch_atomic_xchg);
 | 
						|
 | 
						|
int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
 | 
						|
{
 | 
						|
	int ret;
 | 
						|
	unsigned long flags;
 | 
						|
 | 
						|
	spin_lock_irqsave(ATOMIC_HASH(v), flags);
 | 
						|
	ret = v->counter;
 | 
						|
	if (likely(ret == old))
 | 
						|
		v->counter = new;
 | 
						|
 | 
						|
	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
 | 
						|
	return ret;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(arch_atomic_cmpxchg);
 | 
						|
 | 
						|
int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
 | 
						|
{
 | 
						|
	int ret;
 | 
						|
	unsigned long flags;
 | 
						|
 | 
						|
	spin_lock_irqsave(ATOMIC_HASH(v), flags);
 | 
						|
	ret = v->counter;
 | 
						|
	if (ret != u)
 | 
						|
		v->counter += a;
 | 
						|
	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
 | 
						|
	return ret;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(arch_atomic_fetch_add_unless);
 | 
						|
 | 
						|
/* Atomic operations are already serializing */
 | 
						|
void arch_atomic_set(atomic_t *v, int i)
 | 
						|
{
 | 
						|
	unsigned long flags;
 | 
						|
 | 
						|
	spin_lock_irqsave(ATOMIC_HASH(v), flags);
 | 
						|
	v->counter = i;
 | 
						|
	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(arch_atomic_set);
 | 
						|
 | 
						|
unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask)
 | 
						|
{
 | 
						|
	unsigned long old, flags;
 | 
						|
 | 
						|
	spin_lock_irqsave(ATOMIC_HASH(addr), flags);
 | 
						|
	old = *addr;
 | 
						|
	*addr = old | mask;
 | 
						|
	spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
 | 
						|
 | 
						|
	return old & mask;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(sp32___set_bit);
 | 
						|
 | 
						|
unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask)
 | 
						|
{
 | 
						|
	unsigned long old, flags;
 | 
						|
 | 
						|
	spin_lock_irqsave(ATOMIC_HASH(addr), flags);
 | 
						|
	old = *addr;
 | 
						|
	*addr = old & ~mask;
 | 
						|
	spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
 | 
						|
 | 
						|
	return old & mask;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(sp32___clear_bit);
 | 
						|
 | 
						|
unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask)
 | 
						|
{
 | 
						|
	unsigned long old, flags;
 | 
						|
 | 
						|
	spin_lock_irqsave(ATOMIC_HASH(addr), flags);
 | 
						|
	old = *addr;
 | 
						|
	*addr = old ^ mask;
 | 
						|
	spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
 | 
						|
 | 
						|
	return old & mask;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(sp32___change_bit);
 | 
						|
 | 
						|
#define CMPXCHG(T)						\
 | 
						|
	T __cmpxchg_##T(volatile T *ptr, T old, T new)		\
 | 
						|
	{							\
 | 
						|
		unsigned long flags;				\
 | 
						|
		T prev;						\
 | 
						|
								\
 | 
						|
		spin_lock_irqsave(ATOMIC_HASH(ptr), flags);	\
 | 
						|
		if ((prev = *ptr) == old)			\
 | 
						|
			*ptr = new;				\
 | 
						|
		spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);\
 | 
						|
								\
 | 
						|
		return prev;					\
 | 
						|
	}
 | 
						|
 | 
						|
CMPXCHG(u8)
 | 
						|
CMPXCHG(u16)
 | 
						|
CMPXCHG(u32)
 | 
						|
CMPXCHG(u64)
 | 
						|
EXPORT_SYMBOL(__cmpxchg_u8);
 | 
						|
EXPORT_SYMBOL(__cmpxchg_u16);
 | 
						|
EXPORT_SYMBOL(__cmpxchg_u32);
 | 
						|
EXPORT_SYMBOL(__cmpxchg_u64);
 | 
						|
 | 
						|
unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
 | 
						|
{
 | 
						|
	unsigned long flags;
 | 
						|
	u32 prev;
 | 
						|
 | 
						|
	spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
 | 
						|
	prev = *ptr;
 | 
						|
	*ptr = new;
 | 
						|
	spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
 | 
						|
 | 
						|
	return (unsigned long)prev;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL(__xchg_u32);
 |