forked from mirrors/linux
		
	- Mark arch_cpu_idle_dead() __noreturn, make all architectures & drivers that did
    this inconsistently follow this new, common convention, and fix all the fallout
    that objtool can now detect statically.
 
  - Fix/improve the ORC unwinder becoming unreliable due to UNWIND_HINT_EMPTY ambiguity,
    split it into UNWIND_HINT_END_OF_STACK and UNWIND_HINT_UNDEFINED to resolve it.
 
  - Fix noinstr violations in the KCSAN code and the lkdtm/stackleak code.
 
  - Generate ORC data for __pfx code
 
  - Add more __noreturn annotations to various kernel startup/shutdown/panic functions.
 
  - Misc improvements & fixes.
 
 Signed-off-by: Ingo Molnar <mingo@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmRK1x0RHG1pbmdvQGtl
 cm5lbC5vcmcACgkQEnMQ0APhK1ghxQ/+IkCynMYtdF5OG9YwbcGJqsPSfOPMEcEM
 pUSFYg+gGPBDT/fJfcVSqvUtdnWbLC2kXt9yiswXz3X3J2nmNkBk5YKQftsNDcul
 TmKeqIIAK51XTncpegKH0EGnOX63oZ9Vxa8CTPdDlb+YF23Km2FoudGRI9F5qbUd
 LoraXqGYeiaeySkGyWmZVl6Uc8dIxnMkTN3H/oI9aB6TOrsi059hAtFcSaFfyemP
 c4LqXXCH7k2baiQt+qaLZ8cuZVG/+K5r2N2cmjO5kmJc6ynIaFnfMe4XxZLjp5LT
 /PulYI15bXkvSARKx5CRh/CDHMOx5Blw+ASO0RhWbdy0WH4ZhhcaVF5AeIpPW86a
 1LBcz97rMp72WmvKgrJeVO1r9+ll4SI6/YKGJRsxsCMdP3hgFpqntXyVjTFNdTM1
 0gH6H5v55x06vJHvhtTk8SR3PfMTEM2fRU5jXEOrGowoGifx+wNUwORiwj6LE3KQ
 SKUdT19RNzoW3VkFxhgk65ThK1S7YsJUKRoac3YdhttpqqqtFV//erenrZoR4k/p
 vzvKy68EQ7RCNyD5wNWNFe0YjeJl5G8gQ8bUm4Xmab7djjgz+pn4WpQB8yYKJLAo
 x9dqQ+6eUbw3Hcgk6qQ9E+r/svbulnAL0AeALAWK/91DwnZ2mCzKroFkLN7napKi
 fRho4CqzrtM=
 =NwEV
 -----END PGP SIGNATURE-----
Merge tag 'objtool-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull objtool updates from Ingo Molnar:
 - Mark arch_cpu_idle_dead() __noreturn, make all architectures &
   drivers that did this inconsistently follow this new, common
   convention, and fix all the fallout that objtool can now detect
   statically
 - Fix/improve the ORC unwinder becoming unreliable due to
   UNWIND_HINT_EMPTY ambiguity, split it into UNWIND_HINT_END_OF_STACK
   and UNWIND_HINT_UNDEFINED to resolve it
 - Fix noinstr violations in the KCSAN code and the lkdtm/stackleak code
 - Generate ORC data for __pfx code
 - Add more __noreturn annotations to various kernel startup/shutdown
   and panic functions
 - Misc improvements & fixes
* tag 'objtool-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (52 commits)
  x86/hyperv: Mark hv_ghcb_terminate() as noreturn
  scsi: message: fusion: Mark mpt_halt_firmware() __noreturn
  x86/cpu: Mark {hlt,resume}_play_dead() __noreturn
  btrfs: Mark btrfs_assertfail() __noreturn
  objtool: Include weak functions in global_noreturns check
  cpu: Mark nmi_panic_self_stop() __noreturn
  cpu: Mark panic_smp_self_stop() __noreturn
  arm64/cpu: Mark cpu_park_loop() and friends __noreturn
  x86/head: Mark *_start_kernel() __noreturn
  init: Mark start_kernel() __noreturn
  init: Mark [arch_call_]rest_init() __noreturn
  objtool: Generate ORC data for __pfx code
  x86/linkage: Fix padding for typed functions
  objtool: Separate prefix code from stack validation code
  objtool: Remove superfluous dead_end_function() check
  objtool: Add symbol iteration helpers
  objtool: Add WARN_INSN()
  scripts/objdump-func: Support multiple functions
  context_tracking: Fix KCSAN noinstr violation
  objtool: Add stackleak instrumentation to uaccess safe list
  ...
		
	
			
		
			
				
	
	
		
			166 lines
		
	
	
	
		
			4.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			166 lines
		
	
	
	
		
			4.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/* SPDX-License-Identifier: GPL-2.0 */
 | 
						|
#ifndef _LINUX_CONTEXT_TRACKING_H
 | 
						|
#define _LINUX_CONTEXT_TRACKING_H
 | 
						|
 | 
						|
#include <linux/sched.h>
 | 
						|
#include <linux/vtime.h>
 | 
						|
#include <linux/context_tracking_state.h>
 | 
						|
#include <linux/instrumentation.h>
 | 
						|
 | 
						|
#include <asm/ptrace.h>
 | 
						|
 | 
						|
 | 
						|
#ifdef CONFIG_CONTEXT_TRACKING_USER
 | 
						|
extern void ct_cpu_track_user(int cpu);
 | 
						|
 | 
						|
/* Called with interrupts disabled.  */
 | 
						|
extern void __ct_user_enter(enum ctx_state state);
 | 
						|
extern void __ct_user_exit(enum ctx_state state);
 | 
						|
 | 
						|
extern void ct_user_enter(enum ctx_state state);
 | 
						|
extern void ct_user_exit(enum ctx_state state);
 | 
						|
 | 
						|
extern void user_enter_callable(void);
 | 
						|
extern void user_exit_callable(void);
 | 
						|
 | 
						|
static inline void user_enter(void)
 | 
						|
{
 | 
						|
	if (context_tracking_enabled())
 | 
						|
		ct_user_enter(CONTEXT_USER);
 | 
						|
 | 
						|
}
 | 
						|
static inline void user_exit(void)
 | 
						|
{
 | 
						|
	if (context_tracking_enabled())
 | 
						|
		ct_user_exit(CONTEXT_USER);
 | 
						|
}
 | 
						|
 | 
						|
/* Called with interrupts disabled.  */
 | 
						|
static __always_inline void user_enter_irqoff(void)
 | 
						|
{
 | 
						|
	if (context_tracking_enabled())
 | 
						|
		__ct_user_enter(CONTEXT_USER);
 | 
						|
 | 
						|
}
 | 
						|
static __always_inline void user_exit_irqoff(void)
 | 
						|
{
 | 
						|
	if (context_tracking_enabled())
 | 
						|
		__ct_user_exit(CONTEXT_USER);
 | 
						|
}
 | 
						|
 | 
						|
static inline enum ctx_state exception_enter(void)
 | 
						|
{
 | 
						|
	enum ctx_state prev_ctx;
 | 
						|
 | 
						|
	if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) ||
 | 
						|
	    !context_tracking_enabled())
 | 
						|
		return 0;
 | 
						|
 | 
						|
	prev_ctx = __ct_state();
 | 
						|
	if (prev_ctx != CONTEXT_KERNEL)
 | 
						|
		ct_user_exit(prev_ctx);
 | 
						|
 | 
						|
	return prev_ctx;
 | 
						|
}
 | 
						|
 | 
						|
static inline void exception_exit(enum ctx_state prev_ctx)
 | 
						|
{
 | 
						|
	if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) &&
 | 
						|
	    context_tracking_enabled()) {
 | 
						|
		if (prev_ctx != CONTEXT_KERNEL)
 | 
						|
			ct_user_enter(prev_ctx);
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
static __always_inline bool context_tracking_guest_enter(void)
 | 
						|
{
 | 
						|
	if (context_tracking_enabled())
 | 
						|
		__ct_user_enter(CONTEXT_GUEST);
 | 
						|
 | 
						|
	return context_tracking_enabled_this_cpu();
 | 
						|
}
 | 
						|
 | 
						|
static __always_inline void context_tracking_guest_exit(void)
 | 
						|
{
 | 
						|
	if (context_tracking_enabled())
 | 
						|
		__ct_user_exit(CONTEXT_GUEST);
 | 
						|
}
 | 
						|
 | 
						|
#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
 | 
						|
 | 
						|
#else
 | 
						|
static inline void user_enter(void) { }
 | 
						|
static inline void user_exit(void) { }
 | 
						|
static inline void user_enter_irqoff(void) { }
 | 
						|
static inline void user_exit_irqoff(void) { }
 | 
						|
static inline int exception_enter(void) { return 0; }
 | 
						|
static inline void exception_exit(enum ctx_state prev_ctx) { }
 | 
						|
static inline int ct_state(void) { return -1; }
 | 
						|
static inline int __ct_state(void) { return -1; }
 | 
						|
static __always_inline bool context_tracking_guest_enter(void) { return false; }
 | 
						|
static __always_inline void context_tracking_guest_exit(void) { }
 | 
						|
#define CT_WARN_ON(cond) do { } while (0)
 | 
						|
#endif /* !CONFIG_CONTEXT_TRACKING_USER */
 | 
						|
 | 
						|
#ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE
 | 
						|
extern void context_tracking_init(void);
 | 
						|
#else
 | 
						|
static inline void context_tracking_init(void) { }
 | 
						|
#endif /* CONFIG_CONTEXT_TRACKING_USER_FORCE */
 | 
						|
 | 
						|
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
 | 
						|
extern void ct_idle_enter(void);
 | 
						|
extern void ct_idle_exit(void);
 | 
						|
 | 
						|
/*
 | 
						|
 * Is the current CPU in an extended quiescent state?
 | 
						|
 *
 | 
						|
 * No ordering, as we are sampling CPU-local information.
 | 
						|
 */
 | 
						|
static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
 | 
						|
{
 | 
						|
	return !(arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX);
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * Increment the current CPU's context_tracking structure's ->state field
 | 
						|
 * with ordering.  Return the new value.
 | 
						|
 */
 | 
						|
static __always_inline unsigned long ct_state_inc(int incby)
 | 
						|
{
 | 
						|
	return arch_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
 | 
						|
}
 | 
						|
 | 
						|
static __always_inline bool warn_rcu_enter(void)
 | 
						|
{
 | 
						|
	bool ret = false;
 | 
						|
 | 
						|
	/*
 | 
						|
	 * Horrible hack to shut up recursive RCU isn't watching fail since
 | 
						|
	 * lots of the actual reporting also relies on RCU.
 | 
						|
	 */
 | 
						|
	preempt_disable_notrace();
 | 
						|
	if (rcu_dynticks_curr_cpu_in_eqs()) {
 | 
						|
		ret = true;
 | 
						|
		ct_state_inc(RCU_DYNTICKS_IDX);
 | 
						|
	}
 | 
						|
 | 
						|
	return ret;
 | 
						|
}
 | 
						|
 | 
						|
static __always_inline void warn_rcu_exit(bool rcu)
 | 
						|
{
 | 
						|
	if (rcu)
 | 
						|
		ct_state_inc(RCU_DYNTICKS_IDX);
 | 
						|
	preempt_enable_notrace();
 | 
						|
}
 | 
						|
 | 
						|
#else
 | 
						|
static inline void ct_idle_enter(void) { }
 | 
						|
static inline void ct_idle_exit(void) { }
 | 
						|
 | 
						|
static __always_inline bool warn_rcu_enter(void) { return false; }
 | 
						|
static __always_inline void warn_rcu_exit(bool rcu) { }
 | 
						|
#endif /* !CONFIG_CONTEXT_TRACKING_IDLE */
 | 
						|
 | 
						|
#endif
 |