mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	x86/entry: Clean up idtentry_enter/exit() leftovers
Now that everything is converted to conditional RCU handling remove idtentry_enter/exit() and tidy up the conditional functions. This does not remove rcu_irq_exit_preempt(), to avoid conflicts with the RCU tree. Will be removed once all of this hits Linus's tree. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Andy Lutomirski <luto@kernel.org> Link: https://lore.kernel.org/r/20200521202117.473597954@linutronix.de
This commit is contained in:
		
							parent
							
								
									fa95d7dc1a
								
							
						
					
					
						commit
						9ee01e0f69
					
				
					 2 changed files with 30 additions and 49 deletions
				
			
		| 
						 | 
					@ -515,7 +515,6 @@ SYSCALL_DEFINE0(ni_syscall)
 | 
				
			||||||
 * idtentry_enter_cond_rcu - Handle state tracking on idtentry with conditional
 | 
					 * idtentry_enter_cond_rcu - Handle state tracking on idtentry with conditional
 | 
				
			||||||
 *			     RCU handling
 | 
					 *			     RCU handling
 | 
				
			||||||
 * @regs:	Pointer to pt_regs of interrupted context
 | 
					 * @regs:	Pointer to pt_regs of interrupted context
 | 
				
			||||||
 * @cond_rcu:	Invoke rcu_irq_enter() only if RCU is not watching
 | 
					 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * Invokes:
 | 
					 * Invokes:
 | 
				
			||||||
 *  - lockdep irqflag state tracking as low level ASM entry disabled
 | 
					 *  - lockdep irqflag state tracking as low level ASM entry disabled
 | 
				
			||||||
| 
						 | 
					@ -545,14 +544,14 @@ SYSCALL_DEFINE0(ni_syscall)
 | 
				
			||||||
 * The return value must be fed into the rcu_exit argument of
 | 
					 * The return value must be fed into the rcu_exit argument of
 | 
				
			||||||
 * idtentry_exit_cond_rcu().
 | 
					 * idtentry_exit_cond_rcu().
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs, bool cond_rcu)
 | 
					bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (user_mode(regs)) {
 | 
						if (user_mode(regs)) {
 | 
				
			||||||
		enter_from_user_mode();
 | 
							enter_from_user_mode();
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!cond_rcu || !__rcu_is_watching()) {
 | 
						if (!__rcu_is_watching()) {
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * If RCU is not watching then the same careful
 | 
							 * If RCU is not watching then the same careful
 | 
				
			||||||
		 * sequence vs. lockdep and tracing is required
 | 
							 * sequence vs. lockdep and tracing is required
 | 
				
			||||||
| 
						 | 
					@ -608,52 +607,44 @@ void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
 | 
				
			||||||
	if (user_mode(regs)) {
 | 
						if (user_mode(regs)) {
 | 
				
			||||||
		prepare_exit_to_usermode(regs);
 | 
							prepare_exit_to_usermode(regs);
 | 
				
			||||||
	} else if (regs->flags & X86_EFLAGS_IF) {
 | 
						} else if (regs->flags & X86_EFLAGS_IF) {
 | 
				
			||||||
		/* Check kernel preemption, if enabled */
 | 
					 | 
				
			||||||
		if (IS_ENABLED(CONFIG_PREEMPTION)) {
 | 
					 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
			 * This needs to be done very carefully.
 | 
							 * If RCU was not watching on entry this needs to be done
 | 
				
			||||||
			 * idtentry_enter() invoked rcu_irq_enter(). This
 | 
							 * carefully and needs the same ordering of lockdep/tracing
 | 
				
			||||||
			 * needs to be undone before scheduling.
 | 
							 * and RCU as the return to user mode path.
 | 
				
			||||||
			 *
 | 
					 | 
				
			||||||
			 * Preemption is disabled inside of RCU idle
 | 
					 | 
				
			||||||
			 * sections. When the task returns from
 | 
					 | 
				
			||||||
			 * preempt_schedule_irq(), RCU is still watching.
 | 
					 | 
				
			||||||
			 *
 | 
					 | 
				
			||||||
			 * rcu_irq_exit_preempt() has additional state
 | 
					 | 
				
			||||||
			 * checking if CONFIG_PROVE_RCU=y
 | 
					 | 
				
			||||||
			 */
 | 
					 | 
				
			||||||
			if (!preempt_count()) {
 | 
					 | 
				
			||||||
				if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
 | 
					 | 
				
			||||||
					WARN_ON_ONCE(!on_thread_stack());
 | 
					 | 
				
			||||||
				instrumentation_begin();
 | 
					 | 
				
			||||||
				if (rcu_exit)
 | 
					 | 
				
			||||||
					rcu_irq_exit_preempt();
 | 
					 | 
				
			||||||
				if (need_resched())
 | 
					 | 
				
			||||||
					preempt_schedule_irq();
 | 
					 | 
				
			||||||
				/* Covers both tracing and lockdep */
 | 
					 | 
				
			||||||
				trace_hardirqs_on();
 | 
					 | 
				
			||||||
				instrumentation_end();
 | 
					 | 
				
			||||||
				return;
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		/*
 | 
					 | 
				
			||||||
		 * If preemption is disabled then this needs to be done
 | 
					 | 
				
			||||||
		 * carefully with respect to RCU. The exception might come
 | 
					 | 
				
			||||||
		 * from a RCU idle section in the idle task due to the fact
 | 
					 | 
				
			||||||
		 * that safe_halt() enables interrupts. So this needs the
 | 
					 | 
				
			||||||
		 * same ordering of lockdep/tracing and RCU as the return
 | 
					 | 
				
			||||||
		 * to user mode path.
 | 
					 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
 | 
							if (rcu_exit) {
 | 
				
			||||||
			instrumentation_begin();
 | 
								instrumentation_begin();
 | 
				
			||||||
			/* Tell the tracer that IRET will enable interrupts */
 | 
								/* Tell the tracer that IRET will enable interrupts */
 | 
				
			||||||
			trace_hardirqs_on_prepare();
 | 
								trace_hardirqs_on_prepare();
 | 
				
			||||||
			lockdep_hardirqs_on_prepare(CALLER_ADDR0);
 | 
								lockdep_hardirqs_on_prepare(CALLER_ADDR0);
 | 
				
			||||||
			instrumentation_end();
 | 
								instrumentation_end();
 | 
				
			||||||
		if (rcu_exit)
 | 
					 | 
				
			||||||
			rcu_irq_exit();
 | 
								rcu_irq_exit();
 | 
				
			||||||
			lockdep_hardirqs_on(CALLER_ADDR0);
 | 
								lockdep_hardirqs_on(CALLER_ADDR0);
 | 
				
			||||||
 | 
								return;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							instrumentation_begin();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							/* Check kernel preemption, if enabled */
 | 
				
			||||||
 | 
							if (IS_ENABLED(CONFIG_PREEMPTION)) {
 | 
				
			||||||
 | 
								if (!preempt_count()) {
 | 
				
			||||||
 | 
									/* Sanity check RCU and thread stack */
 | 
				
			||||||
 | 
									rcu_irq_exit_check_preempt();
 | 
				
			||||||
 | 
									if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
 | 
				
			||||||
 | 
										WARN_ON_ONCE(!on_thread_stack());
 | 
				
			||||||
 | 
									if (need_resched())
 | 
				
			||||||
 | 
										preempt_schedule_irq();
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							/* Covers both tracing and lockdep */
 | 
				
			||||||
 | 
							trace_hardirqs_on();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							instrumentation_end();
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		/* IRQ flags state is correct already. Just tell RCU. */
 | 
							/*
 | 
				
			||||||
 | 
							 * IRQ flags state is correct already. Just tell RCU if it
 | 
				
			||||||
 | 
							 * was not watching on entry.
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
		if (rcu_exit)
 | 
							if (rcu_exit)
 | 
				
			||||||
			rcu_irq_exit();
 | 
								rcu_irq_exit();
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -10,19 +10,9 @@
 | 
				
			||||||
void idtentry_enter_user(struct pt_regs *regs);
 | 
					void idtentry_enter_user(struct pt_regs *regs);
 | 
				
			||||||
void idtentry_exit_user(struct pt_regs *regs);
 | 
					void idtentry_exit_user(struct pt_regs *regs);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool idtentry_enter_cond_rcu(struct pt_regs *regs, bool cond_rcu);
 | 
					bool idtentry_enter_cond_rcu(struct pt_regs *regs);
 | 
				
			||||||
void idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit);
 | 
					void idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static __always_inline void idtentry_enter(struct pt_regs *regs)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	idtentry_enter_cond_rcu(regs, false);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static __always_inline void idtentry_exit(struct pt_regs *regs)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	idtentry_exit_cond_rcu(regs, true);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * DECLARE_IDTENTRY - Declare functions for simple IDT entry points
 | 
					 * DECLARE_IDTENTRY - Declare functions for simple IDT entry points
 | 
				
			||||||
 *		      No error code pushed by hardware
 | 
					 *		      No error code pushed by hardware
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue