forked from mirrors/linux
		
	 5f8e320269
			
		
	
	
		5f8e320269
		
	
	
	
	
		
			
			This commit adds the ability to output the CPU time consumed by the grace-period kthread for the RCU variant under test. The CPU time is whatever is in the designated task's current->stime field, and thus is controlled by whatever CPU-time accounting scheme is in effect. This output appears in microseconds as follows on the console: rcu_scale: Grace-period kthread CPU time: 42367.037 [ paulmck: Apply feedback from Stephen Rothwell and kernel test robot. ] Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Tested-by: Yujie Liu <yujie.liu@intel.com>
		
			
				
	
	
		
			101 lines
		
	
	
	
		
			3.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			101 lines
		
	
	
	
		
			3.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* SPDX-License-Identifier: GPL-2.0+ */
 | |
| /*
 | |
|  * Read-Copy Update mechanism for mutual exclusion, adapted for tracing.
 | |
|  *
 | |
|  * Copyright (C) 2020 Paul E. McKenney.
 | |
|  */
 | |
| 
 | |
| #ifndef __LINUX_RCUPDATE_TRACE_H
 | |
| #define __LINUX_RCUPDATE_TRACE_H
 | |
| 
 | |
| #include <linux/sched.h>
 | |
| #include <linux/rcupdate.h>
 | |
| 
 | |
| extern struct lockdep_map rcu_trace_lock_map;
 | |
| 
 | |
| #ifdef CONFIG_DEBUG_LOCK_ALLOC
 | |
| 
 | |
| static inline int rcu_read_lock_trace_held(void)
 | |
| {
 | |
| 	return lock_is_held(&rcu_trace_lock_map);
 | |
| }
 | |
| 
 | |
| #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 | |
| 
 | |
| static inline int rcu_read_lock_trace_held(void)
 | |
| {
 | |
| 	return 1;
 | |
| }
 | |
| 
 | |
| #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 | |
| 
 | |
| #ifdef CONFIG_TASKS_TRACE_RCU
 | |
| 
 | |
| void rcu_read_unlock_trace_special(struct task_struct *t);
 | |
| 
 | |
| /**
 | |
|  * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
 | |
|  *
 | |
|  * When synchronize_rcu_tasks_trace() is invoked by one task, then that
 | |
|  * task is guaranteed to block until all other tasks exit their read-side
 | |
|  * critical sections.  Similarly, if call_rcu_trace() is invoked on one
 | |
|  * task while other tasks are within RCU read-side critical sections,
 | |
|  * invocation of the corresponding RCU callback is deferred until after
 | |
|  * the all the other tasks exit their critical sections.
 | |
|  *
 | |
|  * For more details, please see the documentation for rcu_read_lock().
 | |
|  */
 | |
| static inline void rcu_read_lock_trace(void)
 | |
| {
 | |
| 	struct task_struct *t = current;
 | |
| 
 | |
| 	WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1);
 | |
| 	barrier();
 | |
| 	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
 | |
| 	    t->trc_reader_special.b.need_mb)
 | |
| 		smp_mb(); // Pairs with update-side barriers
 | |
| 	rcu_lock_acquire(&rcu_trace_lock_map);
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * rcu_read_unlock_trace - mark end of RCU-trace read-side critical section
 | |
|  *
 | |
|  * Pairs with a preceding call to rcu_read_lock_trace(), and nesting is
 | |
|  * allowed.  Invoking a rcu_read_unlock_trace() when there is no matching
 | |
|  * rcu_read_lock_trace() is verboten, and will result in lockdep complaints.
 | |
|  *
 | |
|  * For more details, please see the documentation for rcu_read_unlock().
 | |
|  */
 | |
| static inline void rcu_read_unlock_trace(void)
 | |
| {
 | |
| 	int nesting;
 | |
| 	struct task_struct *t = current;
 | |
| 
 | |
| 	rcu_lock_release(&rcu_trace_lock_map);
 | |
| 	nesting = READ_ONCE(t->trc_reader_nesting) - 1;
 | |
| 	barrier(); // Critical section before disabling.
 | |
| 	// Disable IPI-based setting of .need_qs.
 | |
| 	WRITE_ONCE(t->trc_reader_nesting, INT_MIN + nesting);
 | |
| 	if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) {
 | |
| 		WRITE_ONCE(t->trc_reader_nesting, nesting);
 | |
| 		return;  // We assume shallow reader nesting.
 | |
| 	}
 | |
| 	WARN_ON_ONCE(nesting != 0);
 | |
| 	rcu_read_unlock_trace_special(t);
 | |
| }
 | |
| 
 | |
| void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
 | |
| void synchronize_rcu_tasks_trace(void);
 | |
| void rcu_barrier_tasks_trace(void);
 | |
| struct task_struct *get_rcu_tasks_trace_gp_kthread(void);
 | |
| #else
 | |
| /*
 | |
|  * The BPF JIT forms these addresses even when it doesn't call these
 | |
|  * functions, so provide definitions that result in runtime errors.
 | |
|  */
 | |
| static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) { BUG(); }
 | |
| static inline void rcu_read_lock_trace(void) { BUG(); }
 | |
| static inline void rcu_read_unlock_trace(void) { BUG(); }
 | |
| #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
 | |
| 
 | |
| #endif /* __LINUX_RCUPDATE_TRACE_H */
 |