forked from mirrors/linux
		
	 0372007f5a
			
		
	
	
		0372007f5a
		
	
	
	
	
		
			
			context tracking lacks a few protection mechanisms against instrumentation: - While the core functions are marked NOKPROBE they lack protection against function tracing which is required as the function entry/exit points can be utilized by BPF. - static functions invoked from the protected functions need to be marked as well as they can be instrumented otherwise. - using plain inline allows the compiler to emit traceable and probable functions. Fix this by marking the functions noinstr and converting the plain inlines to __always_inline. The NOKPROBE_SYMBOL() annotations are removed as the .noinstr.text section is already excluded from being probed. Cures the following objtool warnings: vmlinux.o: warning: objtool: enter_from_user_mode()+0x34: call to __context_tracking_exit() leaves .noinstr.text section vmlinux.o: warning: objtool: prepare_exit_to_usermode()+0x29: call to __context_tracking_enter() leaves .noinstr.text section vmlinux.o: warning: objtool: syscall_return_slowpath()+0x29: call to __context_tracking_enter() leaves .noinstr.text section vmlinux.o: warning: objtool: do_syscall_64()+0x7f: call to __context_tracking_enter() leaves .noinstr.text section vmlinux.o: warning: objtool: do_int80_syscall_32()+0x3d: call to __context_tracking_enter() leaves .noinstr.text section vmlinux.o: warning: objtool: do_fast_syscall_32()+0x9c: call to __context_tracking_enter() leaves .noinstr.text section and generates new ones... Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org> Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200505134340.811520478@linutronix.de
		
			
				
	
	
		
			55 lines
		
	
	
	
		
			1.6 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			55 lines
		
	
	
	
		
			1.6 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* SPDX-License-Identifier: GPL-2.0 */
 | |
| #ifndef _LINUX_CONTEXT_TRACKING_STATE_H
 | |
| #define _LINUX_CONTEXT_TRACKING_STATE_H
 | |
| 
 | |
| #include <linux/percpu.h>
 | |
| #include <linux/static_key.h>
 | |
| 
 | |
| struct context_tracking {
 | |
| 	/*
 | |
| 	 * When active is false, probes are unset in order
 | |
| 	 * to minimize overhead: TIF flags are cleared
 | |
| 	 * and calls to user_enter/exit are ignored. This
 | |
| 	 * may be further optimized using static keys.
 | |
| 	 */
 | |
| 	bool active;
 | |
| 	int recursion;
 | |
| 	enum ctx_state {
 | |
| 		CONTEXT_DISABLED = -1,	/* returned by ct_state() if unknown */
 | |
| 		CONTEXT_KERNEL = 0,
 | |
| 		CONTEXT_USER,
 | |
| 		CONTEXT_GUEST,
 | |
| 	} state;
 | |
| };
 | |
| 
 | |
| #ifdef CONFIG_CONTEXT_TRACKING
 | |
| extern struct static_key_false context_tracking_key;
 | |
| DECLARE_PER_CPU(struct context_tracking, context_tracking);
 | |
| 
 | |
| static __always_inline bool context_tracking_enabled(void)
 | |
| {
 | |
| 	return static_branch_unlikely(&context_tracking_key);
 | |
| }
 | |
| 
 | |
| static __always_inline bool context_tracking_enabled_cpu(int cpu)
 | |
| {
 | |
| 	return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
 | |
| }
 | |
| 
 | |
| static inline bool context_tracking_enabled_this_cpu(void)
 | |
| {
 | |
| 	return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
 | |
| }
 | |
| 
 | |
| static __always_inline bool context_tracking_in_user(void)
 | |
| {
 | |
| 	return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
 | |
| }
 | |
| #else
 | |
| static inline bool context_tracking_in_user(void) { return false; }
 | |
| static inline bool context_tracking_enabled(void) { return false; }
 | |
| static inline bool context_tracking_enabled_cpu(int cpu) { return false; }
 | |
| static inline bool context_tracking_enabled_this_cpu(void) { return false; }
 | |
| #endif /* CONFIG_CONTEXT_TRACKING */
 | |
| 
 | |
| #endif
 |