mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	sched,rcu: Rework try_invoke_on_locked_down_task()
Give try_invoke_on_locked_down_task() a saner name and have it return an int so that the caller might distinguish between different reasons of failure. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Paul E. McKenney <paulmck@kernel.org> Acked-by: Vasily Gorbik <gor@linux.ibm.com> Tested-by: Vasily Gorbik <gor@linux.ibm.com> # on s390 Link: https://lkml.kernel.org/r/20210929152428.649944917@infradead.org
This commit is contained in:
		
							parent
							
								
									f6ac18fafc
								
							
						
					
					
						commit
						9b3c4ab304
					
				
					 4 changed files with 15 additions and 14 deletions
				
			
		| 
						 | 
					@ -1160,6 +1160,7 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i
 | 
				
			||||||
		(wait)->flags = 0;						\
 | 
							(wait)->flags = 0;						\
 | 
				
			||||||
	} while (0)
 | 
						} while (0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg);
 | 
					typedef int (*task_call_f)(struct task_struct *p, void *arg);
 | 
				
			||||||
 | 
					extern int task_call_func(struct task_struct *p, task_call_f func, void *arg);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* _LINUX_WAIT_H */
 | 
					#endif /* _LINUX_WAIT_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -928,7 +928,7 @@ static void trc_read_check_handler(void *t_in)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Callback function for scheduler to check locked-down task.  */
 | 
					/* Callback function for scheduler to check locked-down task.  */
 | 
				
			||||||
static bool trc_inspect_reader(struct task_struct *t, void *arg)
 | 
					static int trc_inspect_reader(struct task_struct *t, void *arg)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int cpu = task_cpu(t);
 | 
						int cpu = task_cpu(t);
 | 
				
			||||||
	bool in_qs = false;
 | 
						bool in_qs = false;
 | 
				
			||||||
| 
						 | 
					@ -939,7 +939,7 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// If no chance of heavyweight readers, do it the hard way.
 | 
							// If no chance of heavyweight readers, do it the hard way.
 | 
				
			||||||
		if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
 | 
							if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
 | 
				
			||||||
			return false;
 | 
								return -EINVAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// If heavyweight readers are enabled on the remote task,
 | 
							// If heavyweight readers are enabled on the remote task,
 | 
				
			||||||
		// we can inspect its state despite its currently running.
 | 
							// we can inspect its state despite its currently running.
 | 
				
			||||||
| 
						 | 
					@ -947,7 +947,7 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
 | 
				
			||||||
		n_heavy_reader_attempts++;
 | 
							n_heavy_reader_attempts++;
 | 
				
			||||||
		if (!ofl && // Check for "running" idle tasks on offline CPUs.
 | 
							if (!ofl && // Check for "running" idle tasks on offline CPUs.
 | 
				
			||||||
		    !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
 | 
							    !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
 | 
				
			||||||
			return false; // No quiescent state, do it the hard way.
 | 
								return -EINVAL; // No quiescent state, do it the hard way.
 | 
				
			||||||
		n_heavy_reader_updates++;
 | 
							n_heavy_reader_updates++;
 | 
				
			||||||
		if (ofl)
 | 
							if (ofl)
 | 
				
			||||||
			n_heavy_reader_ofl_updates++;
 | 
								n_heavy_reader_ofl_updates++;
 | 
				
			||||||
| 
						 | 
					@ -962,7 +962,7 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
 | 
				
			||||||
	t->trc_reader_checked = true;
 | 
						t->trc_reader_checked = true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (in_qs)
 | 
						if (in_qs)
 | 
				
			||||||
		return true;  // Already in quiescent state, done!!!
 | 
							return 0;  // Already in quiescent state, done!!!
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// The task is in a read-side critical section, so set up its
 | 
						// The task is in a read-side critical section, so set up its
 | 
				
			||||||
	// state so that it will awaken the grace-period kthread upon exit
 | 
						// state so that it will awaken the grace-period kthread upon exit
 | 
				
			||||||
| 
						 | 
					@ -970,7 +970,7 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
 | 
				
			||||||
	atomic_inc(&trc_n_readers_need_end); // One more to wait on.
 | 
						atomic_inc(&trc_n_readers_need_end); // One more to wait on.
 | 
				
			||||||
	WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
 | 
						WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
 | 
				
			||||||
	WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
 | 
						WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
 | 
				
			||||||
	return true;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Attempt to extract the state for the specified task. */
 | 
					/* Attempt to extract the state for the specified task. */
 | 
				
			||||||
| 
						 | 
					@ -992,7 +992,7 @@ static void trc_wait_for_one_reader(struct task_struct *t,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Attempt to nail down the task for inspection.
 | 
						// Attempt to nail down the task for inspection.
 | 
				
			||||||
	get_task_struct(t);
 | 
						get_task_struct(t);
 | 
				
			||||||
	if (try_invoke_on_locked_down_task(t, trc_inspect_reader, NULL)) {
 | 
						if (!task_call_func(t, trc_inspect_reader, NULL)) {
 | 
				
			||||||
		put_task_struct(t);
 | 
							put_task_struct(t);
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -240,16 +240,16 @@ struct rcu_stall_chk_rdr {
 | 
				
			||||||
 * Report out the state of a not-running task that is stalling the
 | 
					 * Report out the state of a not-running task that is stalling the
 | 
				
			||||||
 * current RCU grace period.
 | 
					 * current RCU grace period.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static bool check_slow_task(struct task_struct *t, void *arg)
 | 
					static int check_slow_task(struct task_struct *t, void *arg)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct rcu_stall_chk_rdr *rscrp = arg;
 | 
						struct rcu_stall_chk_rdr *rscrp = arg;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (task_curr(t))
 | 
						if (task_curr(t))
 | 
				
			||||||
		return false; // It is running, so decline to inspect it.
 | 
							return -EBUSY; // It is running, so decline to inspect it.
 | 
				
			||||||
	rscrp->nesting = t->rcu_read_lock_nesting;
 | 
						rscrp->nesting = t->rcu_read_lock_nesting;
 | 
				
			||||||
	rscrp->rs = t->rcu_read_unlock_special;
 | 
						rscrp->rs = t->rcu_read_unlock_special;
 | 
				
			||||||
	rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry);
 | 
						rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry);
 | 
				
			||||||
	return true;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -283,7 +283,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
 | 
				
			||||||
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 | 
						raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 | 
				
			||||||
	while (i) {
 | 
						while (i) {
 | 
				
			||||||
		t = ts[--i];
 | 
							t = ts[--i];
 | 
				
			||||||
		if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr))
 | 
							if (task_call_func(t, check_slow_task, &rscr))
 | 
				
			||||||
			pr_cont(" P%d", t->pid);
 | 
								pr_cont(" P%d", t->pid);
 | 
				
			||||||
		else
 | 
							else
 | 
				
			||||||
			pr_cont(" P%d/%d:%c%c%c%c",
 | 
								pr_cont(" P%d/%d:%c%c%c%c",
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4110,7 +4110,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * try_invoke_on_locked_down_task - Invoke a function on task in fixed state
 | 
					 * task_call_func - Invoke a function on task in fixed state
 | 
				
			||||||
 * @p: Process for which the function is to be invoked, can be @current.
 | 
					 * @p: Process for which the function is to be invoked, can be @current.
 | 
				
			||||||
 * @func: Function to invoke.
 | 
					 * @func: Function to invoke.
 | 
				
			||||||
 * @arg: Argument to function.
 | 
					 * @arg: Argument to function.
 | 
				
			||||||
| 
						 | 
					@ -4123,12 +4123,12 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
 | 
				
			||||||
 * Returns:
 | 
					 * Returns:
 | 
				
			||||||
 *   Whatever @func returns
 | 
					 *   Whatever @func returns
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg)
 | 
					int task_call_func(struct task_struct *p, task_call_f func, void *arg)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct rq *rq = NULL;
 | 
						struct rq *rq = NULL;
 | 
				
			||||||
	unsigned int state;
 | 
						unsigned int state;
 | 
				
			||||||
	struct rq_flags rf;
 | 
						struct rq_flags rf;
 | 
				
			||||||
	bool ret = false;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
 | 
						raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue