mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	fgraph: Have set_graph_notrace only affect function_graph tracer
In order to make the function graph infrastructure more generic, there can not be code specific for the function_graph tracer in the generic code. This includes the set_graph_notrace logic, that stops all graph calls when a function in the set_graph_notrace is hit. By using the trace_recursion mask, we can use a bit in the current task_struct to implement the notrace code, and move the logic out of fgraph.c and into trace_functions_graph.c and keeps it affecting only the tracer and not all call graph callbacks. Acked-by: Namhyung Kim <namhyung@kernel.org> Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
		
							parent
							
								
									d864a3ca88
								
							
						
					
					
						commit
						9cd2992f2d
					
				
					 3 changed files with 29 additions and 21 deletions
				
			
		| 
						 | 
					@ -64,30 +64,9 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func,
 | 
				
			||||||
		return -EBUSY;
 | 
							return -EBUSY;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * The curr_ret_stack is an index to ftrace return stack of
 | 
					 | 
				
			||||||
	 * current task.  Its value should be in [0, FTRACE_RETFUNC_
 | 
					 | 
				
			||||||
	 * DEPTH) when the function graph tracer is used.  To support
 | 
					 | 
				
			||||||
	 * filtering out specific functions, it makes the index
 | 
					 | 
				
			||||||
	 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
 | 
					 | 
				
			||||||
	 * so when it sees a negative index the ftrace will ignore
 | 
					 | 
				
			||||||
	 * the record.  And the index gets recovered when returning
 | 
					 | 
				
			||||||
	 * from the filtered function by adding the FTRACE_NOTRACE_
 | 
					 | 
				
			||||||
	 * DEPTH and then it'll continue to record functions normally.
 | 
					 | 
				
			||||||
	 *
 | 
					 | 
				
			||||||
	 * The curr_ret_stack is initialized to -1 and get increased
 | 
					 | 
				
			||||||
	 * in this function.  So it can be less than -1 only if it was
 | 
					 | 
				
			||||||
	 * filtered out via ftrace_graph_notrace_addr() which can be
 | 
					 | 
				
			||||||
	 * set from set_graph_notrace file in tracefs by user.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (current->curr_ret_stack < -1)
 | 
					 | 
				
			||||||
		return -EBUSY;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	calltime = trace_clock_local();
 | 
						calltime = trace_clock_local();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	index = ++current->curr_ret_stack;
 | 
						index = ++current->curr_ret_stack;
 | 
				
			||||||
	if (ftrace_graph_notrace_addr(func))
 | 
					 | 
				
			||||||
		current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
 | 
					 | 
				
			||||||
	barrier();
 | 
						barrier();
 | 
				
			||||||
	current->ret_stack[index].ret = ret;
 | 
						current->ret_stack[index].ret = ret;
 | 
				
			||||||
	current->ret_stack[index].func = func;
 | 
						current->ret_stack[index].func = func;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -534,6 +534,13 @@ enum {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	TRACE_GRAPH_DEPTH_START_BIT,
 | 
						TRACE_GRAPH_DEPTH_START_BIT,
 | 
				
			||||||
	TRACE_GRAPH_DEPTH_END_BIT,
 | 
						TRACE_GRAPH_DEPTH_END_BIT,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * To implement set_graph_notrace, if this bit is set, we ignore
 | 
				
			||||||
 | 
						 * function graph tracing of called functions, until the return
 | 
				
			||||||
 | 
						 * function is called to clear it.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						TRACE_GRAPH_NOTRACE_BIT,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define trace_recursion_set(bit)	do { (current)->trace_recursion |= (1<<(bit)); } while (0)
 | 
					#define trace_recursion_set(bit)	do { (current)->trace_recursion |= (1<<(bit)); } while (0)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -188,6 +188,18 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
 | 
				
			||||||
	int cpu;
 | 
						int cpu;
 | 
				
			||||||
	int pc;
 | 
						int pc;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
 | 
				
			||||||
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (ftrace_graph_notrace_addr(trace->func)) {
 | 
				
			||||||
 | 
							trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
 | 
				
			||||||
 | 
							/*
 | 
				
			||||||
 | 
							 * Need to return 1 to have the return called
 | 
				
			||||||
 | 
							 * that will clear the NOTRACE bit.
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
 | 
							return 1;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!ftrace_trace_task(tr))
 | 
						if (!ftrace_trace_task(tr))
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -290,6 +302,11 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ftrace_graph_addr_finish(trace);
 | 
						ftrace_graph_addr_finish(trace);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
 | 
				
			||||||
 | 
							trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	local_irq_save(flags);
 | 
						local_irq_save(flags);
 | 
				
			||||||
	cpu = raw_smp_processor_id();
 | 
						cpu = raw_smp_processor_id();
 | 
				
			||||||
	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 | 
						data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 | 
				
			||||||
| 
						 | 
					@ -315,6 +332,11 @@ static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	ftrace_graph_addr_finish(trace);
 | 
						ftrace_graph_addr_finish(trace);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
 | 
				
			||||||
 | 
							trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (tracing_thresh &&
 | 
						if (tracing_thresh &&
 | 
				
			||||||
	    (trace->rettime - trace->calltime < tracing_thresh))
 | 
						    (trace->rettime - trace->calltime < tracing_thresh))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue