mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-01 00:58:39 +02:00 
			
		
		
		
	tracing: Add support for preempt and irq enable/disable events
Preempt and irq trace events can be used for tracing the start and end of an atomic section which can be used by a trace viewer like systrace to graphically view the start and end of an atomic section and correlate them with latencies and scheduling issues. This also serves as a prelude to using synthetic events or probes to rewrite the preempt and irqsoff tracers, along with numerous benefits of using trace events features for these events. Link: http://lkml.kernel.org/r/20171006005432.14244-3-joelaf@google.com Link: http://lkml.kernel.org/r/20171010225137.17370-1-joelaf@google.com Cc: Peter Zilstra <peterz@infradead.org> Cc: kernel-team@android.com Signed-off-by: Joel Fernandes <joelaf@google.com> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
		
							parent
							
								
									aaecaa0b5f
								
							
						
					
					
						commit
						d59158162e
					
				
					 5 changed files with 118 additions and 2 deletions
				
			
		|  | @ -769,7 +769,8 @@ static inline unsigned long get_lock_parent_ip(void) | ||||||
|   static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { } |   static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { } | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_PREEMPT_TRACER | #if defined(CONFIG_PREEMPT_TRACER) || \ | ||||||
|  | 	(defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS)) | ||||||
|   extern void trace_preempt_on(unsigned long a0, unsigned long a1); |   extern void trace_preempt_on(unsigned long a0, unsigned long a1); | ||||||
|   extern void trace_preempt_off(unsigned long a0, unsigned long a1); |   extern void trace_preempt_off(unsigned long a0, unsigned long a1); | ||||||
| #else | #else | ||||||
|  |  | ||||||
							
								
								
									
										70
									
								
								include/trace/events/preemptirq.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										70
									
								
								include/trace/events/preemptirq.h
									
									
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,70 @@ | ||||||
|  | #ifdef CONFIG_PREEMPTIRQ_EVENTS | ||||||
|  | 
 | ||||||
|  | #undef TRACE_SYSTEM | ||||||
|  | #define TRACE_SYSTEM preemptirq | ||||||
|  | 
 | ||||||
|  | #if !defined(_TRACE_PREEMPTIRQ_H) || defined(TRACE_HEADER_MULTI_READ) | ||||||
|  | #define _TRACE_PREEMPTIRQ_H | ||||||
|  | 
 | ||||||
|  | #include <linux/ktime.h> | ||||||
|  | #include <linux/tracepoint.h> | ||||||
|  | #include <linux/string.h> | ||||||
|  | #include <asm/sections.h> | ||||||
|  | 
 | ||||||
|  | DECLARE_EVENT_CLASS(preemptirq_template, | ||||||
|  | 
 | ||||||
|  | 	TP_PROTO(unsigned long ip, unsigned long parent_ip), | ||||||
|  | 
 | ||||||
|  | 	TP_ARGS(ip, parent_ip), | ||||||
|  | 
 | ||||||
|  | 	TP_STRUCT__entry( | ||||||
|  | 		__field(u32, caller_offs) | ||||||
|  | 		__field(u32, parent_offs) | ||||||
|  | 	), | ||||||
|  | 
 | ||||||
|  | 	TP_fast_assign( | ||||||
|  | 		__entry->caller_offs = (u32)(ip - (unsigned long)_stext); | ||||||
|  | 		__entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext); | ||||||
|  | 	), | ||||||
|  | 
 | ||||||
|  | 	TP_printk("caller=%pF parent=%pF", | ||||||
|  | 		  (void *)((unsigned long)(_stext) + __entry->caller_offs), | ||||||
|  | 		  (void *)((unsigned long)(_stext) + __entry->parent_offs)) | ||||||
|  | ); | ||||||
|  | 
 | ||||||
|  | #ifndef CONFIG_PROVE_LOCKING | ||||||
|  | DEFINE_EVENT(preemptirq_template, irq_disable, | ||||||
|  | 	     TP_PROTO(unsigned long ip, unsigned long parent_ip), | ||||||
|  | 	     TP_ARGS(ip, parent_ip)); | ||||||
|  | 
 | ||||||
|  | DEFINE_EVENT(preemptirq_template, irq_enable, | ||||||
|  | 	     TP_PROTO(unsigned long ip, unsigned long parent_ip), | ||||||
|  | 	     TP_ARGS(ip, parent_ip)); | ||||||
|  | #endif | ||||||
|  | 
 | ||||||
|  | #ifdef CONFIG_DEBUG_PREEMPT | ||||||
|  | DEFINE_EVENT(preemptirq_template, preempt_disable, | ||||||
|  | 	     TP_PROTO(unsigned long ip, unsigned long parent_ip), | ||||||
|  | 	     TP_ARGS(ip, parent_ip)); | ||||||
|  | 
 | ||||||
|  | DEFINE_EVENT(preemptirq_template, preempt_enable, | ||||||
|  | 	     TP_PROTO(unsigned long ip, unsigned long parent_ip), | ||||||
|  | 	     TP_ARGS(ip, parent_ip)); | ||||||
|  | #endif | ||||||
|  | 
 | ||||||
|  | #endif /* _TRACE_PREEMPTIRQ_H */ | ||||||
|  | 
 | ||||||
|  | #include <trace/define_trace.h> | ||||||
|  | 
 | ||||||
|  | #else /* !CONFIG_PREEMPTIRQ_EVENTS */ | ||||||
|  | 
 | ||||||
|  | #define trace_irq_enable(...) | ||||||
|  | #define trace_irq_disable(...) | ||||||
|  | #define trace_preempt_enable(...) | ||||||
|  | #define trace_preempt_disable(...) | ||||||
|  | #define trace_irq_enable_rcuidle(...) | ||||||
|  | #define trace_irq_disable_rcuidle(...) | ||||||
|  | #define trace_preempt_enable_rcuidle(...) | ||||||
|  | #define trace_preempt_disable_rcuidle(...) | ||||||
|  | 
 | ||||||
|  | #endif | ||||||
|  | @ -160,6 +160,17 @@ config FUNCTION_GRAPH_TRACER | ||||||
| 	  address on the current task structure into a stack of calls. | 	  address on the current task structure into a stack of calls. | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | config PREEMPTIRQ_EVENTS | ||||||
|  | 	bool "Enable trace events for preempt and irq disable/enable" | ||||||
|  | 	select TRACE_IRQFLAGS | ||||||
|  | 	depends on DEBUG_PREEMPT || !PROVE_LOCKING | ||||||
|  | 	default n | ||||||
|  | 	help | ||||||
|  | 	  Enable tracing of disable and enable events for preemption and irqs. | ||||||
|  | 	  For tracing preempt disable/enable events, DEBUG_PREEMPT must be | ||||||
|  | 	  enabled. For tracing irq disable/enable events, PROVE_LOCKING must | ||||||
|  | 	  be disabled. | ||||||
|  | 
 | ||||||
| config IRQSOFF_TRACER | config IRQSOFF_TRACER | ||||||
| 	bool "Interrupts-off Latency Tracer" | 	bool "Interrupts-off Latency Tracer" | ||||||
| 	default n | 	default n | ||||||
|  |  | ||||||
|  | @ -34,6 +34,7 @@ obj-$(CONFIG_TRACING) += trace_printk.o | ||||||
| obj-$(CONFIG_TRACING_MAP) += tracing_map.o | obj-$(CONFIG_TRACING_MAP) += tracing_map.o | ||||||
| obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o | ||||||
| obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o | obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o | ||||||
|  | obj-$(CONFIG_PREEMPTIRQ_EVENTS) += trace_irqsoff.o | ||||||
| obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o | obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o | ||||||
| obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o | obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o | ||||||
| obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o | obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o | ||||||
|  |  | ||||||
|  | @ -16,6 +16,9 @@ | ||||||
| 
 | 
 | ||||||
| #include "trace.h" | #include "trace.h" | ||||||
| 
 | 
 | ||||||
|  | #define CREATE_TRACE_POINTS | ||||||
|  | #include <trace/events/preemptirq.h> | ||||||
|  | 
 | ||||||
| #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER) | #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER) | ||||||
| static struct trace_array		*irqsoff_trace __read_mostly; | static struct trace_array		*irqsoff_trace __read_mostly; | ||||||
| static int				tracer_enabled __read_mostly; | static int				tracer_enabled __read_mostly; | ||||||
|  | @ -777,26 +780,53 @@ static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { } | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING) | #if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING) | ||||||
|  | /* Per-cpu variable to prevent redundant calls when IRQs already off */ | ||||||
|  | static DEFINE_PER_CPU(int, tracing_irq_cpu); | ||||||
|  | 
 | ||||||
| void trace_hardirqs_on(void) | void trace_hardirqs_on(void) | ||||||
| { | { | ||||||
|  | 	if (!this_cpu_read(tracing_irq_cpu)) | ||||||
|  | 		return; | ||||||
|  | 
 | ||||||
|  | 	trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); | ||||||
| 	tracer_hardirqs_on(); | 	tracer_hardirqs_on(); | ||||||
|  | 
 | ||||||
|  | 	this_cpu_write(tracing_irq_cpu, 0); | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(trace_hardirqs_on); | EXPORT_SYMBOL(trace_hardirqs_on); | ||||||
| 
 | 
 | ||||||
| void trace_hardirqs_off(void) | void trace_hardirqs_off(void) | ||||||
| { | { | ||||||
|  | 	if (this_cpu_read(tracing_irq_cpu)) | ||||||
|  | 		return; | ||||||
|  | 
 | ||||||
|  | 	this_cpu_write(tracing_irq_cpu, 1); | ||||||
|  | 
 | ||||||
|  | 	trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); | ||||||
| 	tracer_hardirqs_off(); | 	tracer_hardirqs_off(); | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(trace_hardirqs_off); | EXPORT_SYMBOL(trace_hardirqs_off); | ||||||
| 
 | 
 | ||||||
| __visible void trace_hardirqs_on_caller(unsigned long caller_addr) | __visible void trace_hardirqs_on_caller(unsigned long caller_addr) | ||||||
| { | { | ||||||
|  | 	if (!this_cpu_read(tracing_irq_cpu)) | ||||||
|  | 		return; | ||||||
|  | 
 | ||||||
|  | 	trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr); | ||||||
| 	tracer_hardirqs_on_caller(caller_addr); | 	tracer_hardirqs_on_caller(caller_addr); | ||||||
|  | 
 | ||||||
|  | 	this_cpu_write(tracing_irq_cpu, 0); | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(trace_hardirqs_on_caller); | EXPORT_SYMBOL(trace_hardirqs_on_caller); | ||||||
| 
 | 
 | ||||||
| __visible void trace_hardirqs_off_caller(unsigned long caller_addr) | __visible void trace_hardirqs_off_caller(unsigned long caller_addr) | ||||||
| { | { | ||||||
|  | 	if (this_cpu_read(tracing_irq_cpu)) | ||||||
|  | 		return; | ||||||
|  | 
 | ||||||
|  | 	this_cpu_write(tracing_irq_cpu, 1); | ||||||
|  | 
 | ||||||
|  | 	trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr); | ||||||
| 	tracer_hardirqs_off_caller(caller_addr); | 	tracer_hardirqs_off_caller(caller_addr); | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(trace_hardirqs_off_caller); | EXPORT_SYMBOL(trace_hardirqs_off_caller); | ||||||
|  | @ -818,14 +848,17 @@ inline void print_irqtrace_events(struct task_struct *curr) | ||||||
| } | } | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_PREEMPT_TRACER | #if defined(CONFIG_PREEMPT_TRACER) || \ | ||||||
|  | 	(defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS)) | ||||||
| void trace_preempt_on(unsigned long a0, unsigned long a1) | void trace_preempt_on(unsigned long a0, unsigned long a1) | ||||||
| { | { | ||||||
|  | 	trace_preempt_enable_rcuidle(a0, a1); | ||||||
| 	tracer_preempt_on(a0, a1); | 	tracer_preempt_on(a0, a1); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void trace_preempt_off(unsigned long a0, unsigned long a1) | void trace_preempt_off(unsigned long a0, unsigned long a1) | ||||||
| { | { | ||||||
|  | 	trace_preempt_disable_rcuidle(a0, a1); | ||||||
| 	tracer_preempt_off(a0, a1); | 	tracer_preempt_off(a0, a1); | ||||||
| } | } | ||||||
| #endif | #endif | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Joel Fernandes
						Joel Fernandes