mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	tracing: Remove TRACE_EVENT_FL_FILTERED logic
After commit dcb0b5575d ("tracing: Remove TRACE_EVENT_FL_USE_CALL_FILTER
 logic"), no one's going to set the TRACE_EVENT_FL_FILTERED or change the
call->filter, so remove related logic.
Link: https://lore.kernel.org/20240911010026.2302849-1-zhengyejian@huaweicloud.com
Signed-off-by: Zheng Yejian <zhengyejian@huaweicloud.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
			
			
This commit is contained in:
		
							parent
							
								
									2aa746ec02
								
							
						
					
					
						commit
						49e4154f4b
					
				
					 10 changed files with 20 additions and 78 deletions
				
			
		| 
						 | 
				
			
			@ -326,7 +326,6 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
 | 
			
		|||
void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
 | 
			
		||||
 | 
			
		||||
enum {
 | 
			
		||||
	TRACE_EVENT_FL_FILTERED_BIT,
 | 
			
		||||
	TRACE_EVENT_FL_CAP_ANY_BIT,
 | 
			
		||||
	TRACE_EVENT_FL_NO_SET_FILTER_BIT,
 | 
			
		||||
	TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
 | 
			
		||||
| 
						 | 
				
			
			@ -341,7 +340,6 @@ enum {
 | 
			
		|||
 | 
			
		||||
/*
 | 
			
		||||
 * Event flags:
 | 
			
		||||
 *  FILTERED	  - The event has a filter attached
 | 
			
		||||
 *  CAP_ANY	  - Any user can enable for perf
 | 
			
		||||
 *  NO_SET_FILTER - Set when filter has error and is to be ignored
 | 
			
		||||
 *  IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
 | 
			
		||||
| 
						 | 
				
			
			@ -356,7 +354,6 @@ enum {
 | 
			
		|||
 *                   to a tracepoint yet, then it is cleared when it is.
 | 
			
		||||
 */
 | 
			
		||||
enum {
 | 
			
		||||
	TRACE_EVENT_FL_FILTERED		= (1 << TRACE_EVENT_FL_FILTERED_BIT),
 | 
			
		||||
	TRACE_EVENT_FL_CAP_ANY		= (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
 | 
			
		||||
	TRACE_EVENT_FL_NO_SET_FILTER	= (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
 | 
			
		||||
	TRACE_EVENT_FL_IGNORE_ENABLE	= (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
 | 
			
		||||
| 
						 | 
				
			
			@ -381,7 +378,6 @@ struct trace_event_call {
 | 
			
		|||
	};
 | 
			
		||||
	struct trace_event	event;
 | 
			
		||||
	char			*print_fmt;
 | 
			
		||||
	struct event_filter	*filter;
 | 
			
		||||
	/*
 | 
			
		||||
	 * Static events can disappear with modules,
 | 
			
		||||
	 * where as dynamic ones need their own ref count.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -593,19 +593,6 @@ int tracing_check_open_get_tr(struct trace_array *tr)
 | 
			
		|||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int call_filter_check_discard(struct trace_event_call *call, void *rec,
 | 
			
		||||
			      struct trace_buffer *buffer,
 | 
			
		||||
			      struct ring_buffer_event *event)
 | 
			
		||||
{
 | 
			
		||||
	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
 | 
			
		||||
	    !filter_match_preds(call->filter, rec)) {
 | 
			
		||||
		__trace_event_discard_commit(buffer, event);
 | 
			
		||||
		return 1;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
 | 
			
		||||
 * @filtered_pids: The list of pids to check
 | 
			
		||||
| 
						 | 
				
			
			@ -2889,7 +2876,6 @@ void
 | 
			
		|||
trace_function(struct trace_array *tr, unsigned long ip, unsigned long
 | 
			
		||||
	       parent_ip, unsigned int trace_ctx)
 | 
			
		||||
{
 | 
			
		||||
	struct trace_event_call *call = &event_function;
 | 
			
		||||
	struct trace_buffer *buffer = tr->array_buffer.buffer;
 | 
			
		||||
	struct ring_buffer_event *event;
 | 
			
		||||
	struct ftrace_entry *entry;
 | 
			
		||||
| 
						 | 
				
			
			@ -2902,11 +2888,9 @@ trace_function(struct trace_array *tr, unsigned long ip, unsigned long
 | 
			
		|||
	entry->ip			= ip;
 | 
			
		||||
	entry->parent_ip		= parent_ip;
 | 
			
		||||
 | 
			
		||||
	if (!call_filter_check_discard(call, entry, buffer, event)) {
 | 
			
		||||
		if (static_branch_unlikely(&trace_function_exports_enabled))
 | 
			
		||||
			ftrace_exports(event, TRACE_EXPORT_FUNCTION);
 | 
			
		||||
		__buffer_unlock_commit(buffer, event);
 | 
			
		||||
	}
 | 
			
		||||
	if (static_branch_unlikely(&trace_function_exports_enabled))
 | 
			
		||||
		ftrace_exports(event, TRACE_EXPORT_FUNCTION);
 | 
			
		||||
	__buffer_unlock_commit(buffer, event);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_STACKTRACE
 | 
			
		||||
| 
						 | 
				
			
			@ -2932,7 +2916,6 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
 | 
			
		|||
				 unsigned int trace_ctx,
 | 
			
		||||
				 int skip, struct pt_regs *regs)
 | 
			
		||||
{
 | 
			
		||||
	struct trace_event_call *call = &event_kernel_stack;
 | 
			
		||||
	struct ring_buffer_event *event;
 | 
			
		||||
	unsigned int size, nr_entries;
 | 
			
		||||
	struct ftrace_stack *fstack;
 | 
			
		||||
| 
						 | 
				
			
			@ -2986,8 +2969,7 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
 | 
			
		|||
	memcpy(&entry->caller, fstack->calls,
 | 
			
		||||
	       flex_array_size(entry, caller, nr_entries));
 | 
			
		||||
 | 
			
		||||
	if (!call_filter_check_discard(call, entry, buffer, event))
 | 
			
		||||
		__buffer_unlock_commit(buffer, event);
 | 
			
		||||
	__buffer_unlock_commit(buffer, event);
 | 
			
		||||
 | 
			
		||||
 out:
 | 
			
		||||
	/* Again, don't let gcc optimize things here */
 | 
			
		||||
| 
						 | 
				
			
			@ -3060,7 +3042,6 @@ static void
 | 
			
		|||
ftrace_trace_userstack(struct trace_array *tr,
 | 
			
		||||
		       struct trace_buffer *buffer, unsigned int trace_ctx)
 | 
			
		||||
{
 | 
			
		||||
	struct trace_event_call *call = &event_user_stack;
 | 
			
		||||
	struct ring_buffer_event *event;
 | 
			
		||||
	struct userstack_entry *entry;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -3094,8 +3075,7 @@ ftrace_trace_userstack(struct trace_array *tr,
 | 
			
		|||
	memset(&entry->caller, 0, sizeof(entry->caller));
 | 
			
		||||
 | 
			
		||||
	stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
 | 
			
		||||
	if (!call_filter_check_discard(call, entry, buffer, event))
 | 
			
		||||
		__buffer_unlock_commit(buffer, event);
 | 
			
		||||
	__buffer_unlock_commit(buffer, event);
 | 
			
		||||
 | 
			
		||||
 out_drop_count:
 | 
			
		||||
	__this_cpu_dec(user_stack_count);
 | 
			
		||||
| 
						 | 
				
			
			@ -3264,7 +3244,6 @@ static void trace_printk_start_stop_comm(int enabled)
 | 
			
		|||
 */
 | 
			
		||||
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
 | 
			
		||||
{
 | 
			
		||||
	struct trace_event_call *call = &event_bprint;
 | 
			
		||||
	struct ring_buffer_event *event;
 | 
			
		||||
	struct trace_buffer *buffer;
 | 
			
		||||
	struct trace_array *tr = READ_ONCE(printk_trace);
 | 
			
		||||
| 
						 | 
				
			
			@ -3308,10 +3287,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
 | 
			
		|||
	entry->fmt			= fmt;
 | 
			
		||||
 | 
			
		||||
	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
 | 
			
		||||
	if (!call_filter_check_discard(call, entry, buffer, event)) {
 | 
			
		||||
		__buffer_unlock_commit(buffer, event);
 | 
			
		||||
		ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
 | 
			
		||||
	}
 | 
			
		||||
	__buffer_unlock_commit(buffer, event);
 | 
			
		||||
	ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
 | 
			
		||||
 | 
			
		||||
out:
 | 
			
		||||
	ring_buffer_nest_end(buffer);
 | 
			
		||||
| 
						 | 
				
			
			@ -3331,7 +3308,6 @@ static int
 | 
			
		|||
__trace_array_vprintk(struct trace_buffer *buffer,
 | 
			
		||||
		      unsigned long ip, const char *fmt, va_list args)
 | 
			
		||||
{
 | 
			
		||||
	struct trace_event_call *call = &event_print;
 | 
			
		||||
	struct ring_buffer_event *event;
 | 
			
		||||
	int len = 0, size;
 | 
			
		||||
	struct print_entry *entry;
 | 
			
		||||
| 
						 | 
				
			
			@ -3366,10 +3342,8 @@ __trace_array_vprintk(struct trace_buffer *buffer,
 | 
			
		|||
	entry->ip = ip;
 | 
			
		||||
 | 
			
		||||
	memcpy(&entry->buf, tbuffer, len + 1);
 | 
			
		||||
	if (!call_filter_check_discard(call, entry, buffer, event)) {
 | 
			
		||||
		__buffer_unlock_commit(buffer, event);
 | 
			
		||||
		ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL);
 | 
			
		||||
	}
 | 
			
		||||
	__buffer_unlock_commit(buffer, event);
 | 
			
		||||
	ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL);
 | 
			
		||||
 | 
			
		||||
out:
 | 
			
		||||
	ring_buffer_nest_end(buffer);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1429,10 +1429,6 @@ struct trace_subsystem_dir {
 | 
			
		|||
	int				nr_events;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
 | 
			
		||||
				     struct trace_buffer *buffer,
 | 
			
		||||
				     struct ring_buffer_event *event);
 | 
			
		||||
 | 
			
		||||
void trace_buffer_unlock_commit_regs(struct trace_array *tr,
 | 
			
		||||
				     struct trace_buffer *buffer,
 | 
			
		||||
				     struct ring_buffer_event *event,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -30,7 +30,6 @@ static struct trace_array *branch_tracer;
 | 
			
		|||
static void
 | 
			
		||||
probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
 | 
			
		||||
{
 | 
			
		||||
	struct trace_event_call *call = &event_branch;
 | 
			
		||||
	struct trace_array *tr = branch_tracer;
 | 
			
		||||
	struct trace_buffer *buffer;
 | 
			
		||||
	struct trace_array_cpu *data;
 | 
			
		||||
| 
						 | 
				
			
			@ -80,8 +79,7 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
 | 
			
		|||
	entry->line = f->data.line;
 | 
			
		||||
	entry->correct = val == expect;
 | 
			
		||||
 | 
			
		||||
	if (!call_filter_check_discard(call, entry, buffer, event))
 | 
			
		||||
		trace_buffer_unlock_commit_nostack(buffer, event);
 | 
			
		||||
	trace_buffer_unlock_commit_nostack(buffer, event);
 | 
			
		||||
 | 
			
		||||
 out:
 | 
			
		||||
	current->trace_recursion &= ~TRACE_BRANCH_BIT;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3149,8 +3149,6 @@ static void __trace_remove_event_call(struct trace_event_call *call)
 | 
			
		|||
{
 | 
			
		||||
	event_remove(call);
 | 
			
		||||
	trace_destroy_fields(call);
 | 
			
		||||
	free_event_filter(call->filter);
 | 
			
		||||
	call->filter = NULL;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int probe_remove_event_call(struct trace_event_call *call)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -102,7 +102,6 @@ int __trace_graph_entry(struct trace_array *tr,
 | 
			
		|||
				struct ftrace_graph_ent *trace,
 | 
			
		||||
				unsigned int trace_ctx)
 | 
			
		||||
{
 | 
			
		||||
	struct trace_event_call *call = &event_funcgraph_entry;
 | 
			
		||||
	struct ring_buffer_event *event;
 | 
			
		||||
	struct trace_buffer *buffer = tr->array_buffer.buffer;
 | 
			
		||||
	struct ftrace_graph_ent_entry *entry;
 | 
			
		||||
| 
						 | 
				
			
			@ -113,8 +112,7 @@ int __trace_graph_entry(struct trace_array *tr,
 | 
			
		|||
		return 0;
 | 
			
		||||
	entry	= ring_buffer_event_data(event);
 | 
			
		||||
	entry->graph_ent			= *trace;
 | 
			
		||||
	if (!call_filter_check_discard(call, entry, buffer, event))
 | 
			
		||||
		trace_buffer_unlock_commit_nostack(buffer, event);
 | 
			
		||||
	trace_buffer_unlock_commit_nostack(buffer, event);
 | 
			
		||||
 | 
			
		||||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -223,7 +221,6 @@ void __trace_graph_return(struct trace_array *tr,
 | 
			
		|||
				struct ftrace_graph_ret *trace,
 | 
			
		||||
				unsigned int trace_ctx)
 | 
			
		||||
{
 | 
			
		||||
	struct trace_event_call *call = &event_funcgraph_exit;
 | 
			
		||||
	struct ring_buffer_event *event;
 | 
			
		||||
	struct trace_buffer *buffer = tr->array_buffer.buffer;
 | 
			
		||||
	struct ftrace_graph_ret_entry *entry;
 | 
			
		||||
| 
						 | 
				
			
			@ -234,8 +231,7 @@ void __trace_graph_return(struct trace_array *tr,
 | 
			
		|||
		return;
 | 
			
		||||
	entry	= ring_buffer_event_data(event);
 | 
			
		||||
	entry->ret				= *trace;
 | 
			
		||||
	if (!call_filter_check_discard(call, entry, buffer, event))
 | 
			
		||||
		trace_buffer_unlock_commit_nostack(buffer, event);
 | 
			
		||||
	trace_buffer_unlock_commit_nostack(buffer, event);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void trace_graph_return(struct ftrace_graph_ret *trace,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -130,7 +130,6 @@ static bool hwlat_busy;
 | 
			
		|||
static void trace_hwlat_sample(struct hwlat_sample *sample)
 | 
			
		||||
{
 | 
			
		||||
	struct trace_array *tr = hwlat_trace;
 | 
			
		||||
	struct trace_event_call *call = &event_hwlat;
 | 
			
		||||
	struct trace_buffer *buffer = tr->array_buffer.buffer;
 | 
			
		||||
	struct ring_buffer_event *event;
 | 
			
		||||
	struct hwlat_entry *entry;
 | 
			
		||||
| 
						 | 
				
			
			@ -148,8 +147,7 @@ static void trace_hwlat_sample(struct hwlat_sample *sample)
 | 
			
		|||
	entry->nmi_count		= sample->nmi_count;
 | 
			
		||||
	entry->count			= sample->count;
 | 
			
		||||
 | 
			
		||||
	if (!call_filter_check_discard(call, entry, buffer, event))
 | 
			
		||||
		trace_buffer_unlock_commit_nostack(buffer, event);
 | 
			
		||||
	trace_buffer_unlock_commit_nostack(buffer, event);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Macros to encapsulate the time capturing infrastructure */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -294,7 +294,6 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
 | 
			
		|||
				struct trace_array_cpu *data,
 | 
			
		||||
				struct mmiotrace_rw *rw)
 | 
			
		||||
{
 | 
			
		||||
	struct trace_event_call *call = &event_mmiotrace_rw;
 | 
			
		||||
	struct trace_buffer *buffer = tr->array_buffer.buffer;
 | 
			
		||||
	struct ring_buffer_event *event;
 | 
			
		||||
	struct trace_mmiotrace_rw *entry;
 | 
			
		||||
| 
						 | 
				
			
			@ -310,8 +309,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
 | 
			
		|||
	entry	= ring_buffer_event_data(event);
 | 
			
		||||
	entry->rw			= *rw;
 | 
			
		||||
 | 
			
		||||
	if (!call_filter_check_discard(call, entry, buffer, event))
 | 
			
		||||
		trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
 | 
			
		||||
	trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void mmio_trace_rw(struct mmiotrace_rw *rw)
 | 
			
		||||
| 
						 | 
				
			
			@ -325,7 +323,6 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
 | 
			
		|||
				struct trace_array_cpu *data,
 | 
			
		||||
				struct mmiotrace_map *map)
 | 
			
		||||
{
 | 
			
		||||
	struct trace_event_call *call = &event_mmiotrace_map;
 | 
			
		||||
	struct trace_buffer *buffer = tr->array_buffer.buffer;
 | 
			
		||||
	struct ring_buffer_event *event;
 | 
			
		||||
	struct trace_mmiotrace_map *entry;
 | 
			
		||||
| 
						 | 
				
			
			@ -341,8 +338,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
 | 
			
		|||
	entry	= ring_buffer_event_data(event);
 | 
			
		||||
	entry->map			= *map;
 | 
			
		||||
 | 
			
		||||
	if (!call_filter_check_discard(call, entry, buffer, event))
 | 
			
		||||
		trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
 | 
			
		||||
	trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void mmio_trace_mapping(struct mmiotrace_map *map)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -499,7 +499,6 @@ static void print_osnoise_headers(struct seq_file *s)
 | 
			
		|||
static void
 | 
			
		||||
__trace_osnoise_sample(struct osnoise_sample *sample, struct trace_buffer *buffer)
 | 
			
		||||
{
 | 
			
		||||
	struct trace_event_call *call = &event_osnoise;
 | 
			
		||||
	struct ring_buffer_event *event;
 | 
			
		||||
	struct osnoise_entry *entry;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -517,8 +516,7 @@ __trace_osnoise_sample(struct osnoise_sample *sample, struct trace_buffer *buffe
 | 
			
		|||
	entry->softirq_count	= sample->softirq_count;
 | 
			
		||||
	entry->thread_count	= sample->thread_count;
 | 
			
		||||
 | 
			
		||||
	if (!call_filter_check_discard(call, entry, buffer, event))
 | 
			
		||||
		trace_buffer_unlock_commit_nostack(buffer, event);
 | 
			
		||||
	trace_buffer_unlock_commit_nostack(buffer, event);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			@ -578,7 +576,6 @@ static void print_timerlat_headers(struct seq_file *s)
 | 
			
		|||
static void
 | 
			
		||||
__trace_timerlat_sample(struct timerlat_sample *sample, struct trace_buffer *buffer)
 | 
			
		||||
{
 | 
			
		||||
	struct trace_event_call *call = &event_osnoise;
 | 
			
		||||
	struct ring_buffer_event *event;
 | 
			
		||||
	struct timerlat_entry *entry;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -591,8 +588,7 @@ __trace_timerlat_sample(struct timerlat_sample *sample, struct trace_buffer *buf
 | 
			
		|||
	entry->context			= sample->context;
 | 
			
		||||
	entry->timer_latency		= sample->timer_latency;
 | 
			
		||||
 | 
			
		||||
	if (!call_filter_check_discard(call, entry, buffer, event))
 | 
			
		||||
		trace_buffer_unlock_commit_nostack(buffer, event);
 | 
			
		||||
	trace_buffer_unlock_commit_nostack(buffer, event);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			@ -654,7 +650,6 @@ static void timerlat_save_stack(int skip)
 | 
			
		|||
static void
 | 
			
		||||
__timerlat_dump_stack(struct trace_buffer *buffer, struct trace_stack *fstack, unsigned int size)
 | 
			
		||||
{
 | 
			
		||||
	struct trace_event_call *call = &event_osnoise;
 | 
			
		||||
	struct ring_buffer_event *event;
 | 
			
		||||
	struct stack_entry *entry;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -668,8 +663,7 @@ __timerlat_dump_stack(struct trace_buffer *buffer, struct trace_stack *fstack, u
 | 
			
		|||
	memcpy(&entry->caller, fstack->calls, size);
 | 
			
		||||
	entry->size = fstack->nr_entries;
 | 
			
		||||
 | 
			
		||||
	if (!call_filter_check_discard(call, entry, buffer, event))
 | 
			
		||||
		trace_buffer_unlock_commit_nostack(buffer, event);
 | 
			
		||||
	trace_buffer_unlock_commit_nostack(buffer, event);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -378,7 +378,6 @@ tracing_sched_switch_trace(struct trace_array *tr,
 | 
			
		|||
			   struct task_struct *next,
 | 
			
		||||
			   unsigned int trace_ctx)
 | 
			
		||||
{
 | 
			
		||||
	struct trace_event_call *call = &event_context_switch;
 | 
			
		||||
	struct trace_buffer *buffer = tr->array_buffer.buffer;
 | 
			
		||||
	struct ring_buffer_event *event;
 | 
			
		||||
	struct ctx_switch_entry *entry;
 | 
			
		||||
| 
						 | 
				
			
			@ -396,8 +395,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
 | 
			
		|||
	entry->next_state		= task_state_index(next);
 | 
			
		||||
	entry->next_cpu	= task_cpu(next);
 | 
			
		||||
 | 
			
		||||
	if (!call_filter_check_discard(call, entry, buffer, event))
 | 
			
		||||
		trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
 | 
			
		||||
	trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void
 | 
			
		||||
| 
						 | 
				
			
			@ -406,7 +404,6 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
 | 
			
		|||
			   struct task_struct *curr,
 | 
			
		||||
			   unsigned int trace_ctx)
 | 
			
		||||
{
 | 
			
		||||
	struct trace_event_call *call = &event_wakeup;
 | 
			
		||||
	struct ring_buffer_event *event;
 | 
			
		||||
	struct ctx_switch_entry *entry;
 | 
			
		||||
	struct trace_buffer *buffer = tr->array_buffer.buffer;
 | 
			
		||||
| 
						 | 
				
			
			@ -424,8 +421,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
 | 
			
		|||
	entry->next_state		= task_state_index(wakee);
 | 
			
		||||
	entry->next_cpu			= task_cpu(wakee);
 | 
			
		||||
 | 
			
		||||
	if (!call_filter_check_discard(call, entry, buffer, event))
 | 
			
		||||
		trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
 | 
			
		||||
	trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void notrace
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue