mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	rcu-tasks: Split ->trc_reader_need_end
This commit splits ->trc_reader_need_end by using the rcu_special union. This change permits readers to check to see if a memory barrier is required without any added overhead in the common case where no such barrier is required. This commit also adds the read-side checking. Later commits will add the machinery to properly set the new ->trc_reader_special.b.need_mb field. This commit also makes rcu_read_unlock_trace_special() tolerate nested read-side critical sections within interrupt and NMI handlers. Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
		
							parent
							
								
									b0afa0f056
								
							
						
					
					
						commit
						276c410448
					
				
					 5 changed files with 31 additions and 19 deletions
				
			
		| 
						 | 
				
			
			@ -31,7 +31,7 @@ static inline int rcu_read_lock_trace_held(void)
 | 
			
		|||
 | 
			
		||||
#ifdef CONFIG_TASKS_TRACE_RCU
 | 
			
		||||
 | 
			
		||||
void rcu_read_unlock_trace_special(struct task_struct *t);
 | 
			
		||||
void rcu_read_unlock_trace_special(struct task_struct *t, int nesting);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
 | 
			
		||||
| 
						 | 
				
			
			@ -50,6 +50,8 @@ static inline void rcu_read_lock_trace(void)
 | 
			
		|||
	struct task_struct *t = current;
 | 
			
		||||
 | 
			
		||||
	WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1);
 | 
			
		||||
	if (t->trc_reader_special.b.need_mb)
 | 
			
		||||
		smp_mb(); // Pairs with update-side barriers
 | 
			
		||||
	rcu_lock_acquire(&rcu_trace_lock_map);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -69,10 +71,11 @@ static inline void rcu_read_unlock_trace(void)
 | 
			
		|||
 | 
			
		||||
	rcu_lock_release(&rcu_trace_lock_map);
 | 
			
		||||
	nesting = READ_ONCE(t->trc_reader_nesting) - 1;
 | 
			
		||||
	WRITE_ONCE(t->trc_reader_nesting, nesting);
 | 
			
		||||
	if (likely(!READ_ONCE(t->trc_reader_need_end)) || nesting)
 | 
			
		||||
	if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) {
 | 
			
		||||
		WRITE_ONCE(t->trc_reader_nesting, nesting);
 | 
			
		||||
		return;  // We assume shallow reader nesting.
 | 
			
		||||
	rcu_read_unlock_trace_special(t);
 | 
			
		||||
	}
 | 
			
		||||
	rcu_read_unlock_trace_special(t, nesting);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -613,7 +613,7 @@ union rcu_special {
 | 
			
		|||
		u8			blocked;
 | 
			
		||||
		u8			need_qs;
 | 
			
		||||
		u8			exp_hint; /* Hint for performance. */
 | 
			
		||||
		u8			pad; /* No garbage from compiler! */
 | 
			
		||||
		u8			need_mb; /* Readers need smp_mb(). */
 | 
			
		||||
	} b; /* Bits. */
 | 
			
		||||
	u32 s; /* Set of bits. */
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			@ -727,7 +727,7 @@ struct task_struct {
 | 
			
		|||
#ifdef CONFIG_TASKS_TRACE_RCU
 | 
			
		||||
	int				trc_reader_nesting;
 | 
			
		||||
	int				trc_ipi_to_cpu;
 | 
			
		||||
	bool				trc_reader_need_end;
 | 
			
		||||
	union rcu_special		trc_reader_special;
 | 
			
		||||
	bool				trc_reader_checked;
 | 
			
		||||
	struct list_head		trc_holdout_list;
 | 
			
		||||
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -143,6 +143,7 @@ struct task_struct init_task
 | 
			
		|||
#endif
 | 
			
		||||
#ifdef CONFIG_TASKS_TRACE_RCU
 | 
			
		||||
	.trc_reader_nesting = 0,
 | 
			
		||||
	.trc_reader_special.s = 0,
 | 
			
		||||
	.trc_holdout_list = LIST_HEAD_INIT(init_task.trc_holdout_list),
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef CONFIG_CPUSETS
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1685,6 +1685,7 @@ static inline void rcu_copy_process(struct task_struct *p)
 | 
			
		|||
#endif /* #ifdef CONFIG_TASKS_RCU */
 | 
			
		||||
#ifdef CONFIG_TASKS_TRACE_RCU
 | 
			
		||||
	p->trc_reader_nesting = 0;
 | 
			
		||||
	p->trc_reader_special.s = 0;
 | 
			
		||||
	INIT_LIST_HEAD(&p->trc_holdout_list);
 | 
			
		||||
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -723,10 +723,17 @@ DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
 | 
			
		|||
		 "RCU Tasks Trace");
 | 
			
		||||
 | 
			
		||||
/* If we are the last reader, wake up the grace-period kthread. */
 | 
			
		||||
void rcu_read_unlock_trace_special(struct task_struct *t)
 | 
			
		||||
void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
 | 
			
		||||
{
 | 
			
		||||
	WRITE_ONCE(t->trc_reader_need_end, false);
 | 
			
		||||
	if (atomic_dec_and_test(&trc_n_readers_need_end))
 | 
			
		||||
	int nq = t->trc_reader_special.b.need_qs;
 | 
			
		||||
 | 
			
		||||
	if (t->trc_reader_special.b.need_mb)
 | 
			
		||||
		smp_mb(); // Pairs with update-side barriers.
 | 
			
		||||
	// Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
 | 
			
		||||
	if (nq)
 | 
			
		||||
		WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
 | 
			
		||||
	WRITE_ONCE(t->trc_reader_nesting, nesting);
 | 
			
		||||
	if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
 | 
			
		||||
		wake_up(&trc_wait);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
 | 
			
		||||
| 
						 | 
				
			
			@ -777,8 +784,8 @@ static void trc_read_check_handler(void *t_in)
 | 
			
		|||
	// Get here if the task is in a read-side critical section.  Set
 | 
			
		||||
	// its state so that it will awaken the grace-period kthread upon
 | 
			
		||||
	// exit from that critical section.
 | 
			
		||||
	WARN_ON_ONCE(t->trc_reader_need_end);
 | 
			
		||||
	WRITE_ONCE(t->trc_reader_need_end, true);
 | 
			
		||||
	WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
 | 
			
		||||
	WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
 | 
			
		||||
 | 
			
		||||
reset_ipi:
 | 
			
		||||
	// Allow future IPIs to be sent on CPU and for task.
 | 
			
		||||
| 
						 | 
				
			
			@ -804,8 +811,8 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
 | 
			
		|||
	// exit from that critical section.
 | 
			
		||||
	if (unlikely(t->trc_reader_nesting)) {
 | 
			
		||||
		atomic_inc(&trc_n_readers_need_end); // One more to wait on.
 | 
			
		||||
		WARN_ON_ONCE(t->trc_reader_need_end);
 | 
			
		||||
		WRITE_ONCE(t->trc_reader_need_end, true);
 | 
			
		||||
		WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
 | 
			
		||||
		WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
 | 
			
		||||
	}
 | 
			
		||||
	return true;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -884,7 +891,7 @@ static void rcu_tasks_trace_pregp_step(void)
 | 
			
		|||
static void rcu_tasks_trace_pertask(struct task_struct *t,
 | 
			
		||||
				    struct list_head *hop)
 | 
			
		||||
{
 | 
			
		||||
	WRITE_ONCE(t->trc_reader_need_end, false);
 | 
			
		||||
	WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
 | 
			
		||||
	WRITE_ONCE(t->trc_reader_checked, false);
 | 
			
		||||
	t->trc_ipi_to_cpu = -1;
 | 
			
		||||
	trc_wait_for_one_reader(t, hop);
 | 
			
		||||
| 
						 | 
				
			
			@ -916,7 +923,7 @@ static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
 | 
			
		|||
		 ".i"[is_idle_task(t)],
 | 
			
		||||
		 ".N"[cpu > 0 && tick_nohz_full_cpu(cpu)],
 | 
			
		||||
		 t->trc_reader_nesting,
 | 
			
		||||
		 " N"[!!t->trc_reader_need_end],
 | 
			
		||||
		 " N"[!!t->trc_reader_special.b.need_qs],
 | 
			
		||||
		 cpu);
 | 
			
		||||
	sched_show_task(t);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -980,11 +987,11 @@ static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
 | 
			
		|||
			break;  // Count reached zero.
 | 
			
		||||
		// Stall warning time, so make a list of the offenders.
 | 
			
		||||
		for_each_process_thread(g, t)
 | 
			
		||||
			if (READ_ONCE(t->trc_reader_need_end))
 | 
			
		||||
			if (READ_ONCE(t->trc_reader_special.b.need_qs))
 | 
			
		||||
				trc_add_holdout(t, &holdouts);
 | 
			
		||||
		firstreport = true;
 | 
			
		||||
		list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list)
 | 
			
		||||
			if (READ_ONCE(t->trc_reader_need_end)) {
 | 
			
		||||
			if (READ_ONCE(t->trc_reader_special.b.need_qs)) {
 | 
			
		||||
				show_stalled_task_trace(t, &firstreport);
 | 
			
		||||
				trc_del_holdout(t);
 | 
			
		||||
			}
 | 
			
		||||
| 
						 | 
				
			
			@ -1003,8 +1010,8 @@ void exit_tasks_rcu_finish_trace(struct task_struct *t)
 | 
			
		|||
	WRITE_ONCE(t->trc_reader_checked, true);
 | 
			
		||||
	WARN_ON_ONCE(t->trc_reader_nesting);
 | 
			
		||||
	WRITE_ONCE(t->trc_reader_nesting, 0);
 | 
			
		||||
	if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_need_end)))
 | 
			
		||||
		rcu_read_unlock_trace_special(t);
 | 
			
		||||
	if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
 | 
			
		||||
		rcu_read_unlock_trace_special(t, 0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue