forked from mirrors/linux
		
	 eb5c2d4b45
			
		
	
	
		eb5c2d4b45
		
	
	
	
	
		
			
			The kernel test robot reports that moving READ_ONCE() out into its own header breaks a W=1 build for parisc, which is relying on the definition of compiletime_assert() being available: | In file included from ./arch/parisc/include/generated/asm/rwonce.h:1, | from ./include/asm-generic/barrier.h:16, | from ./arch/parisc/include/asm/barrier.h:29, | from ./arch/parisc/include/asm/atomic.h:11, | from ./include/linux/atomic.h:7, | from kernel/locking/percpu-rwsem.c:2: | ./arch/parisc/include/asm/atomic.h: In function 'atomic_read': | ./include/asm-generic/rwonce.h:36:2: error: implicit declaration of function 'compiletime_assert' [-Werror=implicit-function-declaration] | 36 | compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \ | | ^~~~~~~~~~~~~~~~~~ | ./include/asm-generic/rwonce.h:49:2: note: in expansion of macro 'compiletime_assert_rwonce_type' | 49 | compiletime_assert_rwonce_type(x); \ | | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ./arch/parisc/include/asm/atomic.h:73:9: note: in expansion of macro 'READ_ONCE' | 73 | return READ_ONCE((v)->counter); | | ^~~~~~~~~ Move these macros into compiler_types.h, so that they are available to READ_ONCE() and friends. Link: http://lists.infradead.org/pipermail/linux-arm-kernel/2020-July/587094.html Reported-by: kernel test robot <lkp@intel.com> Signed-off-by: Will Deacon <will@kernel.org>
		
			
				
	
	
		
			287 lines
		
	
	
	
		
			8.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			287 lines
		
	
	
	
		
			8.3 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* SPDX-License-Identifier: GPL-2.0 */
 | |
| #ifndef __LINUX_COMPILER_H
 | |
| #define __LINUX_COMPILER_H
 | |
| 
 | |
| #include <linux/compiler_types.h>
 | |
| 
 | |
| #ifndef __ASSEMBLY__
 | |
| 
 | |
| #ifdef __KERNEL__
 | |
| 
 | |
| /*
 | |
|  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
 | |
|  * to disable branch tracing on a per file basis.
 | |
|  */
 | |
| #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
 | |
|     && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
 | |
| void ftrace_likely_update(struct ftrace_likely_data *f, int val,
 | |
| 			  int expect, int is_constant);
 | |
| 
 | |
| #define likely_notrace(x)	__builtin_expect(!!(x), 1)
 | |
| #define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
 | |
| 
 | |
| #define __branch_check__(x, expect, is_constant) ({			\
 | |
| 			long ______r;					\
 | |
| 			static struct ftrace_likely_data		\
 | |
| 				__aligned(4)				\
 | |
| 				__section(_ftrace_annotated_branch)	\
 | |
| 				______f = {				\
 | |
| 				.data.func = __func__,			\
 | |
| 				.data.file = __FILE__,			\
 | |
| 				.data.line = __LINE__,			\
 | |
| 			};						\
 | |
| 			______r = __builtin_expect(!!(x), expect);	\
 | |
| 			ftrace_likely_update(&______f, ______r,		\
 | |
| 					     expect, is_constant);	\
 | |
| 			______r;					\
 | |
| 		})
 | |
| 
 | |
| /*
 | |
|  * Using __builtin_constant_p(x) to ignore cases where the return
 | |
|  * value is always the same.  This idea is taken from a similar patch
 | |
|  * written by Daniel Walker.
 | |
|  */
 | |
| # ifndef likely
 | |
| #  define likely(x)	(__branch_check__(x, 1, __builtin_constant_p(x)))
 | |
| # endif
 | |
| # ifndef unlikely
 | |
| #  define unlikely(x)	(__branch_check__(x, 0, __builtin_constant_p(x)))
 | |
| # endif
 | |
| 
 | |
| #ifdef CONFIG_PROFILE_ALL_BRANCHES
 | |
| /*
 | |
|  * "Define 'is'", Bill Clinton
 | |
|  * "Define 'if'", Steven Rostedt
 | |
|  */
 | |
| #define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
 | |
| 
 | |
| #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
 | |
| 
 | |
| #define __trace_if_value(cond) ({			\
 | |
| 	static struct ftrace_branch_data		\
 | |
| 		__aligned(4)				\
 | |
| 		__section(_ftrace_branch)		\
 | |
| 		__if_trace = {				\
 | |
| 			.func = __func__,		\
 | |
| 			.file = __FILE__,		\
 | |
| 			.line = __LINE__,		\
 | |
| 		};					\
 | |
| 	(cond) ?					\
 | |
| 		(__if_trace.miss_hit[1]++,1) :		\
 | |
| 		(__if_trace.miss_hit[0]++,0);		\
 | |
| })
 | |
| 
 | |
| #endif /* CONFIG_PROFILE_ALL_BRANCHES */
 | |
| 
 | |
| #else
 | |
| # define likely(x)	__builtin_expect(!!(x), 1)
 | |
| # define unlikely(x)	__builtin_expect(!!(x), 0)
 | |
| #endif
 | |
| 
 | |
| /* Optimization barrier */
 | |
| #ifndef barrier
 | |
| # define barrier() __memory_barrier()
 | |
| #endif
 | |
| 
 | |
| #ifndef barrier_data
 | |
| # define barrier_data(ptr) barrier()
 | |
| #endif
 | |
| 
 | |
| /* workaround for GCC PR82365 if needed */
 | |
| #ifndef barrier_before_unreachable
 | |
| # define barrier_before_unreachable() do { } while (0)
 | |
| #endif
 | |
| 
 | |
| /* Unreachable code */
 | |
| #ifdef CONFIG_STACK_VALIDATION
 | |
| /*
 | |
|  * These macros help objtool understand GCC code flow for unreachable code.
 | |
|  * The __COUNTER__ based labels are a hack to make each instance of the macros
 | |
|  * unique, to convince GCC not to merge duplicate inline asm statements.
 | |
|  */
 | |
| #define annotate_reachable() ({						\
 | |
| 	asm volatile("%c0:\n\t"						\
 | |
| 		     ".pushsection .discard.reachable\n\t"		\
 | |
| 		     ".long %c0b - .\n\t"				\
 | |
| 		     ".popsection\n\t" : : "i" (__COUNTER__));		\
 | |
| })
 | |
| #define annotate_unreachable() ({					\
 | |
| 	asm volatile("%c0:\n\t"						\
 | |
| 		     ".pushsection .discard.unreachable\n\t"		\
 | |
| 		     ".long %c0b - .\n\t"				\
 | |
| 		     ".popsection\n\t" : : "i" (__COUNTER__));		\
 | |
| })
 | |
| #define ASM_UNREACHABLE							\
 | |
| 	"999:\n\t"							\
 | |
| 	".pushsection .discard.unreachable\n\t"				\
 | |
| 	".long 999b - .\n\t"						\
 | |
| 	".popsection\n\t"
 | |
| 
 | |
| /* Annotate a C jump table to allow objtool to follow the code flow */
 | |
| #define __annotate_jump_table __section(.rodata..c_jump_table)
 | |
| 
 | |
| #ifdef CONFIG_DEBUG_ENTRY
 | |
| /* Begin/end of an instrumentation safe region */
 | |
| #define instrumentation_begin() ({					\
 | |
| 	asm volatile("%c0: nop\n\t"						\
 | |
| 		     ".pushsection .discard.instr_begin\n\t"		\
 | |
| 		     ".long %c0b - .\n\t"				\
 | |
| 		     ".popsection\n\t" : : "i" (__COUNTER__));		\
 | |
| })
 | |
| 
 | |
| /*
 | |
|  * Because instrumentation_{begin,end}() can nest, objtool validation considers
 | |
|  * _begin() a +1 and _end() a -1 and computes a sum over the instructions.
 | |
|  * When the value is greater than 0, we consider instrumentation allowed.
 | |
|  *
 | |
|  * There is a problem with code like:
 | |
|  *
 | |
|  * noinstr void foo()
 | |
|  * {
 | |
|  *	instrumentation_begin();
 | |
|  *	...
 | |
|  *	if (cond) {
 | |
|  *		instrumentation_begin();
 | |
|  *		...
 | |
|  *		instrumentation_end();
 | |
|  *	}
 | |
|  *	bar();
 | |
|  *	instrumentation_end();
 | |
|  * }
 | |
|  *
 | |
|  * If instrumentation_end() would be an empty label, like all the other
 | |
|  * annotations, the inner _end(), which is at the end of a conditional block,
 | |
|  * would land on the instruction after the block.
 | |
|  *
 | |
|  * If we then consider the sum of the !cond path, we'll see that the call to
 | |
|  * bar() is with a 0-value, even though, we meant it to happen with a positive
 | |
|  * value.
 | |
|  *
 | |
|  * To avoid this, have _end() be a NOP instruction, this ensures it will be
 | |
|  * part of the condition block and does not escape.
 | |
|  */
 | |
| #define instrumentation_end() ({					\
 | |
| 	asm volatile("%c0: nop\n\t"					\
 | |
| 		     ".pushsection .discard.instr_end\n\t"		\
 | |
| 		     ".long %c0b - .\n\t"				\
 | |
| 		     ".popsection\n\t" : : "i" (__COUNTER__));		\
 | |
| })
 | |
| #endif /* CONFIG_DEBUG_ENTRY */
 | |
| 
 | |
| #else
 | |
| #define annotate_reachable()
 | |
| #define annotate_unreachable()
 | |
| #define __annotate_jump_table
 | |
| #endif
 | |
| 
 | |
| #ifndef instrumentation_begin
 | |
| #define instrumentation_begin()		do { } while(0)
 | |
| #define instrumentation_end()		do { } while(0)
 | |
| #endif
 | |
| 
 | |
| #ifndef ASM_UNREACHABLE
 | |
| # define ASM_UNREACHABLE
 | |
| #endif
 | |
| #ifndef unreachable
 | |
| # define unreachable() do {		\
 | |
| 	annotate_unreachable();		\
 | |
| 	__builtin_unreachable();	\
 | |
| } while (0)
 | |
| #endif
 | |
| 
 | |
| /*
 | |
|  * KENTRY - kernel entry point
 | |
|  * This can be used to annotate symbols (functions or data) that are used
 | |
|  * without their linker symbol being referenced explicitly. For example,
 | |
|  * interrupt vector handlers, or functions in the kernel image that are found
 | |
|  * programatically.
 | |
|  *
 | |
|  * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
 | |
|  * are handled in their own way (with KEEP() in linker scripts).
 | |
|  *
 | |
|  * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
 | |
|  * linker script. For example an architecture could KEEP() its entire
 | |
|  * boot/exception vector code rather than annotate each function and data.
 | |
|  */
 | |
| #ifndef KENTRY
 | |
| # define KENTRY(sym)						\
 | |
| 	extern typeof(sym) sym;					\
 | |
| 	static const unsigned long __kentry_##sym		\
 | |
| 	__used							\
 | |
| 	__section("___kentry" "+" #sym )			\
 | |
| 	= (unsigned long)&sym;
 | |
| #endif
 | |
| 
 | |
| #ifndef RELOC_HIDE
 | |
| # define RELOC_HIDE(ptr, off)					\
 | |
|   ({ unsigned long __ptr;					\
 | |
|      __ptr = (unsigned long) (ptr);				\
 | |
|     (typeof(ptr)) (__ptr + (off)); })
 | |
| #endif
 | |
| 
 | |
| #ifndef OPTIMIZER_HIDE_VAR
 | |
| /* Make the optimizer believe the variable can be manipulated arbitrarily. */
 | |
| #define OPTIMIZER_HIDE_VAR(var)						\
 | |
| 	__asm__ ("" : "=r" (var) : "0" (var))
 | |
| #endif
 | |
| 
 | |
| /* Not-quite-unique ID. */
 | |
| #ifndef __UNIQUE_ID
 | |
| # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
 | |
| #endif
 | |
| 
 | |
| /**
 | |
|  * data_race - mark an expression as containing intentional data races
 | |
|  *
 | |
|  * This data_race() macro is useful for situations in which data races
 | |
|  * should be forgiven.  One example is diagnostic code that accesses
 | |
|  * shared variables but is not a part of the core synchronization design.
 | |
|  *
 | |
|  * This macro *does not* affect normal code generation, but is a hint
 | |
|  * to tooling that data races here are to be ignored.
 | |
|  */
 | |
| #define data_race(expr)							\
 | |
| ({									\
 | |
| 	__unqual_scalar_typeof(({ expr; })) __v = ({			\
 | |
| 		__kcsan_disable_current();				\
 | |
| 		expr;							\
 | |
| 	});								\
 | |
| 	__kcsan_enable_current();					\
 | |
| 	__v;								\
 | |
| })
 | |
| 
 | |
| #endif /* __KERNEL__ */
 | |
| 
 | |
| /*
 | |
|  * Force the compiler to emit 'sym' as a symbol, so that we can reference
 | |
|  * it from inline assembler. Necessary in case 'sym' could be inlined
 | |
|  * otherwise, or eliminated entirely due to lack of references that are
 | |
|  * visible to the compiler.
 | |
|  */
 | |
| #define __ADDRESSABLE(sym) \
 | |
| 	static void * __section(.discard.addressable) __used \
 | |
| 		__PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
 | |
| 
 | |
| /**
 | |
|  * offset_to_ptr - convert a relative memory offset to an absolute pointer
 | |
|  * @off:	the address of the 32-bit offset value
 | |
|  */
 | |
| static inline void *offset_to_ptr(const int *off)
 | |
| {
 | |
| 	return (void *)((unsigned long)off + *off);
 | |
| }
 | |
| 
 | |
| #endif /* __ASSEMBLY__ */
 | |
| 
 | |
| /* &a[0] degrades to a pointer: a different type from an array */
 | |
| #define __must_be_array(a)	BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
 | |
| 
 | |
| /*
 | |
|  * This is needed in functions which generate the stack canary, see
 | |
|  * arch/x86/kernel/smpboot.c::start_secondary() for an example.
 | |
|  */
 | |
| #define prevent_tail_call_optimization()	mb()
 | |
| 
 | |
| #include <asm/rwonce.h>
 | |
| 
 | |
| #endif /* __LINUX_COMPILER_H */
 |