forked from mirrors/linux
		
	The ror32 implementation (word >> shift) | (word << (32 - shift) has
undefined behaviour if shift is outside the [1, 31] range.  Similarly
for the 64 bit variants.  Most callers pass a compile-time constant
(naturally in that range), but there's an UBSAN report that these may
actually be called with a shift count of 0.
Instead of special-casing that, we can make them DTRT for all values of
shift while also avoiding UB.  For some reason, this was already partly
done for rol32 (which was well-defined for [0, 31]).  gcc 8 recognizes
these patterns as rotates, so for example
  __u32 rol32(__u32 word, unsigned int shift)
  {
	return (word << (shift & 31)) | (word >> ((-shift) & 31));
  }
compiles to
0000000000000020 <rol32>:
  20:   89 f8                   mov    %edi,%eax
  22:   89 f1                   mov    %esi,%ecx
  24:   d3 c0                   rol    %cl,%eax
  26:   c3                      retq
Older compilers unfortunately do not do as well, but this only affects
the small minority of users that don't pass constants.
Due to integer promotions, ro[lr]8 were already well-defined for shifts
in [0, 8], and ro[lr]16 were mostly well-defined for shifts in [0, 16]
(only mostly - u16 gets promoted to _signed_ int, so if bit 15 is set,
word << 16 is undefined).  For consistency, update those as well.
Link: http://lkml.kernel.org/r/20190410211906.2190-1-linux@rasmusvillemoes.dk
Signed-off-by: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Reported-by: Ido Schimmel <idosch@mellanox.com>
Tested-by: Ido Schimmel <idosch@mellanox.com>
Reviewed-by: Will Deacon <will.deacon@arm.com>
Cc: Vadim Pasternak <vadimp@mellanox.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Jacek Anaszewski <jacek.anaszewski@gmail.com>
Cc: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
	
			
		
			
				
	
	
		
			282 lines
		
	
	
	
		
			6.7 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			282 lines
		
	
	
	
		
			6.7 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/* SPDX-License-Identifier: GPL-2.0 */
 | 
						|
#ifndef _LINUX_BITOPS_H
 | 
						|
#define _LINUX_BITOPS_H
 | 
						|
#include <asm/types.h>
 | 
						|
#include <linux/bits.h>
 | 
						|
 | 
						|
#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
 | 
						|
#define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
 | 
						|
 | 
						|
extern unsigned int __sw_hweight8(unsigned int w);
 | 
						|
extern unsigned int __sw_hweight16(unsigned int w);
 | 
						|
extern unsigned int __sw_hweight32(unsigned int w);
 | 
						|
extern unsigned long __sw_hweight64(__u64 w);
 | 
						|
 | 
						|
/*
 | 
						|
 * Include this here because some architectures need generic_ffs/fls in
 | 
						|
 * scope
 | 
						|
 */
 | 
						|
#include <asm/bitops.h>
 | 
						|
 | 
						|
#define for_each_set_bit(bit, addr, size) \
 | 
						|
	for ((bit) = find_first_bit((addr), (size));		\
 | 
						|
	     (bit) < (size);					\
 | 
						|
	     (bit) = find_next_bit((addr), (size), (bit) + 1))
 | 
						|
 | 
						|
/* same as for_each_set_bit() but use bit as value to start with */
 | 
						|
#define for_each_set_bit_from(bit, addr, size) \
 | 
						|
	for ((bit) = find_next_bit((addr), (size), (bit));	\
 | 
						|
	     (bit) < (size);					\
 | 
						|
	     (bit) = find_next_bit((addr), (size), (bit) + 1))
 | 
						|
 | 
						|
#define for_each_clear_bit(bit, addr, size) \
 | 
						|
	for ((bit) = find_first_zero_bit((addr), (size));	\
 | 
						|
	     (bit) < (size);					\
 | 
						|
	     (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
 | 
						|
 | 
						|
/* same as for_each_clear_bit() but use bit as value to start with */
 | 
						|
#define for_each_clear_bit_from(bit, addr, size) \
 | 
						|
	for ((bit) = find_next_zero_bit((addr), (size), (bit));	\
 | 
						|
	     (bit) < (size);					\
 | 
						|
	     (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
 | 
						|
 | 
						|
static inline int get_bitmask_order(unsigned int count)
 | 
						|
{
 | 
						|
	int order;
 | 
						|
 | 
						|
	order = fls(count);
 | 
						|
	return order;	/* We could be slightly more clever with -1 here... */
 | 
						|
}
 | 
						|
 | 
						|
static __always_inline unsigned long hweight_long(unsigned long w)
 | 
						|
{
 | 
						|
	return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * rol64 - rotate a 64-bit value left
 | 
						|
 * @word: value to rotate
 | 
						|
 * @shift: bits to roll
 | 
						|
 */
 | 
						|
static inline __u64 rol64(__u64 word, unsigned int shift)
 | 
						|
{
 | 
						|
	return (word << (shift & 63)) | (word >> ((-shift) & 63));
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * ror64 - rotate a 64-bit value right
 | 
						|
 * @word: value to rotate
 | 
						|
 * @shift: bits to roll
 | 
						|
 */
 | 
						|
static inline __u64 ror64(__u64 word, unsigned int shift)
 | 
						|
{
 | 
						|
	return (word >> (shift & 63)) | (word << ((-shift) & 63));
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * rol32 - rotate a 32-bit value left
 | 
						|
 * @word: value to rotate
 | 
						|
 * @shift: bits to roll
 | 
						|
 */
 | 
						|
static inline __u32 rol32(__u32 word, unsigned int shift)
 | 
						|
{
 | 
						|
	return (word << (shift & 31)) | (word >> ((-shift) & 31));
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * ror32 - rotate a 32-bit value right
 | 
						|
 * @word: value to rotate
 | 
						|
 * @shift: bits to roll
 | 
						|
 */
 | 
						|
static inline __u32 ror32(__u32 word, unsigned int shift)
 | 
						|
{
 | 
						|
	return (word >> (shift & 31)) | (word << ((-shift) & 31));
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * rol16 - rotate a 16-bit value left
 | 
						|
 * @word: value to rotate
 | 
						|
 * @shift: bits to roll
 | 
						|
 */
 | 
						|
static inline __u16 rol16(__u16 word, unsigned int shift)
 | 
						|
{
 | 
						|
	return (word << (shift & 15)) | (word >> ((-shift) & 15));
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * ror16 - rotate a 16-bit value right
 | 
						|
 * @word: value to rotate
 | 
						|
 * @shift: bits to roll
 | 
						|
 */
 | 
						|
static inline __u16 ror16(__u16 word, unsigned int shift)
 | 
						|
{
 | 
						|
	return (word >> (shift & 15)) | (word << ((-shift) & 15));
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * rol8 - rotate an 8-bit value left
 | 
						|
 * @word: value to rotate
 | 
						|
 * @shift: bits to roll
 | 
						|
 */
 | 
						|
static inline __u8 rol8(__u8 word, unsigned int shift)
 | 
						|
{
 | 
						|
	return (word << (shift & 7)) | (word >> ((-shift) & 7));
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * ror8 - rotate an 8-bit value right
 | 
						|
 * @word: value to rotate
 | 
						|
 * @shift: bits to roll
 | 
						|
 */
 | 
						|
static inline __u8 ror8(__u8 word, unsigned int shift)
 | 
						|
{
 | 
						|
	return (word >> (shift & 7)) | (word << ((-shift) & 7));
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
 | 
						|
 * @value: value to sign extend
 | 
						|
 * @index: 0 based bit index (0<=index<32) to sign bit
 | 
						|
 *
 | 
						|
 * This is safe to use for 16- and 8-bit types as well.
 | 
						|
 */
 | 
						|
static inline __s32 sign_extend32(__u32 value, int index)
 | 
						|
{
 | 
						|
	__u8 shift = 31 - index;
 | 
						|
	return (__s32)(value << shift) >> shift;
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
 | 
						|
 * @value: value to sign extend
 | 
						|
 * @index: 0 based bit index (0<=index<64) to sign bit
 | 
						|
 */
 | 
						|
static inline __s64 sign_extend64(__u64 value, int index)
 | 
						|
{
 | 
						|
	__u8 shift = 63 - index;
 | 
						|
	return (__s64)(value << shift) >> shift;
 | 
						|
}
 | 
						|
 | 
						|
static inline unsigned fls_long(unsigned long l)
 | 
						|
{
 | 
						|
	if (sizeof(l) == 4)
 | 
						|
		return fls(l);
 | 
						|
	return fls64(l);
 | 
						|
}
 | 
						|
 | 
						|
static inline int get_count_order(unsigned int count)
 | 
						|
{
 | 
						|
	int order;
 | 
						|
 | 
						|
	order = fls(count) - 1;
 | 
						|
	if (count & (count - 1))
 | 
						|
		order++;
 | 
						|
	return order;
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * get_count_order_long - get order after rounding @l up to power of 2
 | 
						|
 * @l: parameter
 | 
						|
 *
 | 
						|
 * it is same as get_count_order() but with long type parameter
 | 
						|
 */
 | 
						|
static inline int get_count_order_long(unsigned long l)
 | 
						|
{
 | 
						|
	if (l == 0UL)
 | 
						|
		return -1;
 | 
						|
	else if (l & (l - 1UL))
 | 
						|
		return (int)fls_long(l);
 | 
						|
	else
 | 
						|
		return (int)fls_long(l) - 1;
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * __ffs64 - find first set bit in a 64 bit word
 | 
						|
 * @word: The 64 bit word
 | 
						|
 *
 | 
						|
 * On 64 bit arches this is a synomyn for __ffs
 | 
						|
 * The result is not defined if no bits are set, so check that @word
 | 
						|
 * is non-zero before calling this.
 | 
						|
 */
 | 
						|
static inline unsigned long __ffs64(u64 word)
 | 
						|
{
 | 
						|
#if BITS_PER_LONG == 32
 | 
						|
	if (((u32)word) == 0UL)
 | 
						|
		return __ffs((u32)(word >> 32)) + 32;
 | 
						|
#elif BITS_PER_LONG != 64
 | 
						|
#error BITS_PER_LONG not 32 or 64
 | 
						|
#endif
 | 
						|
	return __ffs((unsigned long)word);
 | 
						|
}
 | 
						|
 | 
						|
/**
 | 
						|
 * assign_bit - Assign value to a bit in memory
 | 
						|
 * @nr: the bit to set
 | 
						|
 * @addr: the address to start counting from
 | 
						|
 * @value: the value to assign
 | 
						|
 */
 | 
						|
static __always_inline void assign_bit(long nr, volatile unsigned long *addr,
 | 
						|
				       bool value)
 | 
						|
{
 | 
						|
	if (value)
 | 
						|
		set_bit(nr, addr);
 | 
						|
	else
 | 
						|
		clear_bit(nr, addr);
 | 
						|
}
 | 
						|
 | 
						|
static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
 | 
						|
					 bool value)
 | 
						|
{
 | 
						|
	if (value)
 | 
						|
		__set_bit(nr, addr);
 | 
						|
	else
 | 
						|
		__clear_bit(nr, addr);
 | 
						|
}
 | 
						|
 | 
						|
#ifdef __KERNEL__
 | 
						|
 | 
						|
#ifndef set_mask_bits
 | 
						|
#define set_mask_bits(ptr, mask, bits)	\
 | 
						|
({								\
 | 
						|
	const typeof(*(ptr)) mask__ = (mask), bits__ = (bits);	\
 | 
						|
	typeof(*(ptr)) old__, new__;				\
 | 
						|
								\
 | 
						|
	do {							\
 | 
						|
		old__ = READ_ONCE(*(ptr));			\
 | 
						|
		new__ = (old__ & ~mask__) | bits__;		\
 | 
						|
	} while (cmpxchg(ptr, old__, new__) != old__);		\
 | 
						|
								\
 | 
						|
	old__;							\
 | 
						|
})
 | 
						|
#endif
 | 
						|
 | 
						|
#ifndef bit_clear_unless
 | 
						|
#define bit_clear_unless(ptr, clear, test)	\
 | 
						|
({								\
 | 
						|
	const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
 | 
						|
	typeof(*(ptr)) old__, new__;				\
 | 
						|
								\
 | 
						|
	do {							\
 | 
						|
		old__ = READ_ONCE(*(ptr));			\
 | 
						|
		new__ = old__ & ~clear__;			\
 | 
						|
	} while (!(old__ & test__) &&				\
 | 
						|
		 cmpxchg(ptr, old__, new__) != old__);		\
 | 
						|
								\
 | 
						|
	!(old__ & test__);					\
 | 
						|
})
 | 
						|
#endif
 | 
						|
 | 
						|
#ifndef find_last_bit
 | 
						|
/**
 | 
						|
 * find_last_bit - find the last set bit in a memory region
 | 
						|
 * @addr: The address to start the search at
 | 
						|
 * @size: The number of bits to search
 | 
						|
 *
 | 
						|
 * Returns the bit number of the last set bit, or size.
 | 
						|
 */
 | 
						|
extern unsigned long find_last_bit(const unsigned long *addr,
 | 
						|
				   unsigned long size);
 | 
						|
#endif
 | 
						|
 | 
						|
#endif /* __KERNEL__ */
 | 
						|
#endif
 |