forked from mirrors/linux
		
	 416eb60317
			
		
	
	
		416eb60317
		
	
	
	
	
		
			
			There's a somewhat common pattern of using FIELD_PREP()
even for single bits, e.g.
 cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP,
                          !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
which might as well be written as
 if (params.flags & HAL_SRNG_FLAGS_MSI_SWAP)
   cmd->info1 |= HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP;
(since info1 is fully initialized to start with), but in
a long chain of FIELD_PREP() this really seems fine.
However, it triggers a sparse warning, in the check in
the macro for whether a constant value fits into the mask,
as this contains a "& (_val)". In this case, this really
is always intentional, so just suppress the warning by
adding "0+" to the expression, indicating explicitly that
this is correct.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Kalle Valo <kvalo@kernel.org>
Link: https://msgid.link/20240223100146.d243b6b1a9a1.I033828b1187c6bccf086e31400f7e933bb8373e7@changeid
		
	
			
		
			
				
	
	
		
			206 lines
		
	
	
	
		
			6.4 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			206 lines
		
	
	
	
		
			6.4 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* SPDX-License-Identifier: GPL-2.0-only */
 | |
| /*
 | |
|  * Copyright (C) 2014 Felix Fietkau <nbd@nbd.name>
 | |
|  * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
 | |
|  */
 | |
| 
 | |
| #ifndef _LINUX_BITFIELD_H
 | |
| #define _LINUX_BITFIELD_H
 | |
| 
 | |
| #include <linux/build_bug.h>
 | |
| #include <asm/byteorder.h>
 | |
| 
 | |
| /*
 | |
|  * Bitfield access macros
 | |
|  *
 | |
|  * FIELD_{GET,PREP} macros take as first parameter shifted mask
 | |
|  * from which they extract the base mask and shift amount.
 | |
|  * Mask must be a compilation time constant.
 | |
|  *
 | |
|  * Example:
 | |
|  *
 | |
|  *  #include <linux/bitfield.h>
 | |
|  *  #include <linux/bits.h>
 | |
|  *
 | |
|  *  #define REG_FIELD_A  GENMASK(6, 0)
 | |
|  *  #define REG_FIELD_B  BIT(7)
 | |
|  *  #define REG_FIELD_C  GENMASK(15, 8)
 | |
|  *  #define REG_FIELD_D  GENMASK(31, 16)
 | |
|  *
 | |
|  * Get:
 | |
|  *  a = FIELD_GET(REG_FIELD_A, reg);
 | |
|  *  b = FIELD_GET(REG_FIELD_B, reg);
 | |
|  *
 | |
|  * Set:
 | |
|  *  reg = FIELD_PREP(REG_FIELD_A, 1) |
 | |
|  *	  FIELD_PREP(REG_FIELD_B, 0) |
 | |
|  *	  FIELD_PREP(REG_FIELD_C, c) |
 | |
|  *	  FIELD_PREP(REG_FIELD_D, 0x40);
 | |
|  *
 | |
|  * Modify:
 | |
|  *  reg &= ~REG_FIELD_C;
 | |
|  *  reg |= FIELD_PREP(REG_FIELD_C, c);
 | |
|  */
 | |
| 
 | |
| #define __bf_shf(x) (__builtin_ffsll(x) - 1)
 | |
| 
 | |
| #define __scalar_type_to_unsigned_cases(type)				\
 | |
| 		unsigned type:	(unsigned type)0,			\
 | |
| 		signed type:	(unsigned type)0
 | |
| 
 | |
| #define __unsigned_scalar_typeof(x) typeof(				\
 | |
| 		_Generic((x),						\
 | |
| 			char:	(unsigned char)0,			\
 | |
| 			__scalar_type_to_unsigned_cases(char),		\
 | |
| 			__scalar_type_to_unsigned_cases(short),		\
 | |
| 			__scalar_type_to_unsigned_cases(int),		\
 | |
| 			__scalar_type_to_unsigned_cases(long),		\
 | |
| 			__scalar_type_to_unsigned_cases(long long),	\
 | |
| 			default: (x)))
 | |
| 
 | |
| #define __bf_cast_unsigned(type, x)	((__unsigned_scalar_typeof(type))(x))
 | |
| 
 | |
| #define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx)			\
 | |
| 	({								\
 | |
| 		BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask),		\
 | |
| 				 _pfx "mask is not constant");		\
 | |
| 		BUILD_BUG_ON_MSG((_mask) == 0, _pfx "mask is zero");	\
 | |
| 		BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ?		\
 | |
| 				 ~((_mask) >> __bf_shf(_mask)) &	\
 | |
| 					(0 + (_val)) : 0,		\
 | |
| 				 _pfx "value too large for the field"); \
 | |
| 		BUILD_BUG_ON_MSG(__bf_cast_unsigned(_mask, _mask) >	\
 | |
| 				 __bf_cast_unsigned(_reg, ~0ull),	\
 | |
| 				 _pfx "type of reg too small for mask"); \
 | |
| 		__BUILD_BUG_ON_NOT_POWER_OF_2((_mask) +			\
 | |
| 					      (1ULL << __bf_shf(_mask))); \
 | |
| 	})
 | |
| 
 | |
| /**
 | |
|  * FIELD_MAX() - produce the maximum value representable by a field
 | |
|  * @_mask: shifted mask defining the field's length and position
 | |
|  *
 | |
|  * FIELD_MAX() returns the maximum value that can be held in the field
 | |
|  * specified by @_mask.
 | |
|  */
 | |
| #define FIELD_MAX(_mask)						\
 | |
| 	({								\
 | |
| 		__BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_MAX: ");	\
 | |
| 		(typeof(_mask))((_mask) >> __bf_shf(_mask));		\
 | |
| 	})
 | |
| 
 | |
| /**
 | |
|  * FIELD_FIT() - check if value fits in the field
 | |
|  * @_mask: shifted mask defining the field's length and position
 | |
|  * @_val:  value to test against the field
 | |
|  *
 | |
|  * Return: true if @_val can fit inside @_mask, false if @_val is too big.
 | |
|  */
 | |
| #define FIELD_FIT(_mask, _val)						\
 | |
| 	({								\
 | |
| 		__BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: ");	\
 | |
| 		!((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
 | |
| 	})
 | |
| 
 | |
| /**
 | |
|  * FIELD_PREP() - prepare a bitfield element
 | |
|  * @_mask: shifted mask defining the field's length and position
 | |
|  * @_val:  value to put in the field
 | |
|  *
 | |
|  * FIELD_PREP() masks and shifts up the value.  The result should
 | |
|  * be combined with other fields of the bitfield using logical OR.
 | |
|  */
 | |
| #define FIELD_PREP(_mask, _val)						\
 | |
| 	({								\
 | |
| 		__BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: ");	\
 | |
| 		((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask);	\
 | |
| 	})
 | |
| 
 | |
| #define __BF_CHECK_POW2(n)	BUILD_BUG_ON_ZERO(((n) & ((n) - 1)) != 0)
 | |
| 
 | |
| /**
 | |
|  * FIELD_PREP_CONST() - prepare a constant bitfield element
 | |
|  * @_mask: shifted mask defining the field's length and position
 | |
|  * @_val:  value to put in the field
 | |
|  *
 | |
|  * FIELD_PREP_CONST() masks and shifts up the value.  The result should
 | |
|  * be combined with other fields of the bitfield using logical OR.
 | |
|  *
 | |
|  * Unlike FIELD_PREP() this is a constant expression and can therefore
 | |
|  * be used in initializers. Error checking is less comfortable for this
 | |
|  * version, and non-constant masks cannot be used.
 | |
|  */
 | |
| #define FIELD_PREP_CONST(_mask, _val)					\
 | |
| 	(								\
 | |
| 		/* mask must be non-zero */				\
 | |
| 		BUILD_BUG_ON_ZERO((_mask) == 0) +			\
 | |
| 		/* check if value fits */				\
 | |
| 		BUILD_BUG_ON_ZERO(~((_mask) >> __bf_shf(_mask)) & (_val)) + \
 | |
| 		/* check if mask is contiguous */			\
 | |
| 		__BF_CHECK_POW2((_mask) + (1ULL << __bf_shf(_mask))) +	\
 | |
| 		/* and create the value */				\
 | |
| 		(((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask))	\
 | |
| 	)
 | |
| 
 | |
| /**
 | |
|  * FIELD_GET() - extract a bitfield element
 | |
|  * @_mask: shifted mask defining the field's length and position
 | |
|  * @_reg:  value of entire bitfield
 | |
|  *
 | |
|  * FIELD_GET() extracts the field specified by @_mask from the
 | |
|  * bitfield passed in as @_reg by masking and shifting it down.
 | |
|  */
 | |
| #define FIELD_GET(_mask, _reg)						\
 | |
| 	({								\
 | |
| 		__BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: ");	\
 | |
| 		(typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask));	\
 | |
| 	})
 | |
| 
 | |
| extern void __compiletime_error("value doesn't fit into mask")
 | |
| __field_overflow(void);
 | |
| extern void __compiletime_error("bad bitfield mask")
 | |
| __bad_mask(void);
 | |
| static __always_inline u64 field_multiplier(u64 field)
 | |
| {
 | |
| 	if ((field | (field - 1)) & ((field | (field - 1)) + 1))
 | |
| 		__bad_mask();
 | |
| 	return field & -field;
 | |
| }
 | |
| static __always_inline u64 field_mask(u64 field)
 | |
| {
 | |
| 	return field / field_multiplier(field);
 | |
| }
 | |
| #define field_max(field)	((typeof(field))field_mask(field))
 | |
| #define ____MAKE_OP(type,base,to,from)					\
 | |
| static __always_inline __##type type##_encode_bits(base v, base field)	\
 | |
| {									\
 | |
| 	if (__builtin_constant_p(v) && (v & ~field_mask(field)))	\
 | |
| 		__field_overflow();					\
 | |
| 	return to((v & field_mask(field)) * field_multiplier(field));	\
 | |
| }									\
 | |
| static __always_inline __##type type##_replace_bits(__##type old,	\
 | |
| 					base val, base field)		\
 | |
| {									\
 | |
| 	return (old & ~to(field)) | type##_encode_bits(val, field);	\
 | |
| }									\
 | |
| static __always_inline void type##p_replace_bits(__##type *p,		\
 | |
| 					base val, base field)		\
 | |
| {									\
 | |
| 	*p = (*p & ~to(field)) | type##_encode_bits(val, field);	\
 | |
| }									\
 | |
| static __always_inline base type##_get_bits(__##type v, base field)	\
 | |
| {									\
 | |
| 	return (from(v) & field)/field_multiplier(field);		\
 | |
| }
 | |
| #define __MAKE_OP(size)							\
 | |
| 	____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu)	\
 | |
| 	____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu)	\
 | |
| 	____MAKE_OP(u##size,u##size,,)
 | |
| ____MAKE_OP(u8,u8,,)
 | |
| __MAKE_OP(16)
 | |
| __MAKE_OP(32)
 | |
| __MAKE_OP(64)
 | |
| #undef __MAKE_OP
 | |
| #undef ____MAKE_OP
 | |
| 
 | |
| #endif
 |