forked from mirrors/linux
		
	tracing: Rename trace_active to disable_stack_tracer and inline its modification
In order to eliminate a function call, make "trace_active" into "disable_stack_tracer" and convert stack_tracer_disable() and friends into static inline functions. Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
		
							parent
							
								
									5367278cb7
								
							
						
					
					
						commit
						8aaf1ee70e
					
				
					 2 changed files with 43 additions and 43 deletions
				
			
		|  | @ -287,8 +287,40 @@ stack_trace_sysctl(struct ctl_table *table, int write, | |||
| 		   void __user *buffer, size_t *lenp, | ||||
| 		   loff_t *ppos); | ||||
| 
 | ||||
| void stack_tracer_disable(void); | ||||
| void stack_tracer_enable(void); | ||||
| /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */ | ||||
| DECLARE_PER_CPU(int, disable_stack_tracer); | ||||
| 
 | ||||
| /**
 | ||||
|  * stack_tracer_disable - temporarily disable the stack tracer | ||||
|  * | ||||
|  * There's a few locations (namely in RCU) where stack tracing | ||||
|  * cannot be executed. This function is used to disable stack | ||||
|  * tracing during those critical sections. | ||||
|  * | ||||
|  * This function must be called with preemption or interrupts | ||||
|  * disabled and stack_tracer_enable() must be called shortly after | ||||
|  * while preemption or interrupts are still disabled. | ||||
|  */ | ||||
| static inline void stack_tracer_disable(void) | ||||
| { | ||||
| 	/* Preemption or interupts must be disabled */ | ||||
| 	if (IS_ENABLED(CONFIG_PREEMPT_DEBUG)) | ||||
| 		WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); | ||||
| 	this_cpu_inc(disable_stack_tracer); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * stack_tracer_enable - re-enable the stack tracer | ||||
|  * | ||||
|  * After stack_tracer_disable() is called, stack_tracer_enable() | ||||
|  * must be called shortly afterward. | ||||
|  */ | ||||
| static inline void stack_tracer_enable(void) | ||||
| { | ||||
| 	if (IS_ENABLED(CONFIG_PREEMPT_DEBUG)) | ||||
| 		WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); | ||||
| 	this_cpu_dec(disable_stack_tracer); | ||||
| } | ||||
| #else | ||||
| static inline void stack_tracer_disable(void) { } | ||||
| static inline void stack_tracer_enable(void) { } | ||||
|  |  | |||
|  | @ -35,44 +35,12 @@ unsigned long stack_trace_max_size; | |||
| arch_spinlock_t stack_trace_max_lock = | ||||
| 	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | ||||
| 
 | ||||
| static DEFINE_PER_CPU(int, trace_active); | ||||
| DEFINE_PER_CPU(int, disable_stack_tracer); | ||||
| static DEFINE_MUTEX(stack_sysctl_mutex); | ||||
| 
 | ||||
| int stack_tracer_enabled; | ||||
| static int last_stack_tracer_enabled; | ||||
| 
 | ||||
| /**
 | ||||
|  * stack_tracer_disable - temporarily disable the stack tracer | ||||
|  * | ||||
|  * There's a few locations (namely in RCU) where stack tracing | ||||
|  * cannot be executed. This function is used to disable stack | ||||
|  * tracing during those critical sections. | ||||
|  * | ||||
|  * This function must be called with preemption or interrupts | ||||
|  * disabled and stack_tracer_enable() must be called shortly after | ||||
|  * while preemption or interrupts are still disabled. | ||||
|  */ | ||||
| void stack_tracer_disable(void) | ||||
| { | ||||
| 	/* Preemption or interupts must be disabled */ | ||||
| 	if (IS_ENABLED(CONFIG_PREEMPT_DEBUG)) | ||||
| 		WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); | ||||
| 	this_cpu_inc(trace_active); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * stack_tracer_enable - re-enable the stack tracer | ||||
|  * | ||||
|  * After stack_tracer_disable() is called, stack_tracer_enable() | ||||
|  * must be called shortly afterward. | ||||
|  */ | ||||
| void stack_tracer_enable(void) | ||||
| { | ||||
| 	if (IS_ENABLED(CONFIG_PREEMPT_DEBUG)) | ||||
| 		WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); | ||||
| 	this_cpu_dec(trace_active); | ||||
| } | ||||
| 
 | ||||
| void stack_trace_print(void) | ||||
| { | ||||
| 	long i; | ||||
|  | @ -243,8 +211,8 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip, | |||
| 	preempt_disable_notrace(); | ||||
| 
 | ||||
| 	/* no atomic needed, we only modify this variable by this cpu */ | ||||
| 	__this_cpu_inc(trace_active); | ||||
| 	if (__this_cpu_read(trace_active) != 1) | ||||
| 	__this_cpu_inc(disable_stack_tracer); | ||||
| 	if (__this_cpu_read(disable_stack_tracer) != 1) | ||||
| 		goto out; | ||||
| 
 | ||||
| 	ip += MCOUNT_INSN_SIZE; | ||||
|  | @ -252,7 +220,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip, | |||
| 	check_stack(ip, &stack); | ||||
| 
 | ||||
|  out: | ||||
| 	__this_cpu_dec(trace_active); | ||||
| 	__this_cpu_dec(disable_stack_tracer); | ||||
| 	/* prevent recursion in schedule */ | ||||
| 	preempt_enable_notrace(); | ||||
| } | ||||
|  | @ -294,15 +262,15 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
| 	/*
 | ||||
| 	 * In case we trace inside arch_spin_lock() or after (NMI), | ||||
| 	 * we will cause circular lock, so we also need to increase | ||||
| 	 * the percpu trace_active here. | ||||
| 	 * the percpu disable_stack_tracer here. | ||||
| 	 */ | ||||
| 	__this_cpu_inc(trace_active); | ||||
| 	__this_cpu_inc(disable_stack_tracer); | ||||
| 
 | ||||
| 	arch_spin_lock(&stack_trace_max_lock); | ||||
| 	*ptr = val; | ||||
| 	arch_spin_unlock(&stack_trace_max_lock); | ||||
| 
 | ||||
| 	__this_cpu_dec(trace_active); | ||||
| 	__this_cpu_dec(disable_stack_tracer); | ||||
| 	local_irq_restore(flags); | ||||
| 
 | ||||
| 	return count; | ||||
|  | @ -338,7 +306,7 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
| { | ||||
| 	local_irq_disable(); | ||||
| 
 | ||||
| 	__this_cpu_inc(trace_active); | ||||
| 	__this_cpu_inc(disable_stack_tracer); | ||||
| 
 | ||||
| 	arch_spin_lock(&stack_trace_max_lock); | ||||
| 
 | ||||
|  | @ -352,7 +320,7 @@ static void t_stop(struct seq_file *m, void *p) | |||
| { | ||||
| 	arch_spin_unlock(&stack_trace_max_lock); | ||||
| 
 | ||||
| 	__this_cpu_dec(trace_active); | ||||
| 	__this_cpu_dec(disable_stack_tracer); | ||||
| 
 | ||||
| 	local_irq_enable(); | ||||
| } | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Steven Rostedt (VMware)
						Steven Rostedt (VMware)