mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	tracing: Remove TRACE_EVENT_FL_USE_CALL_FILTER logic
Nothing sets TRACE_EVENT_FL_USE_CALL_FILTER anymore. Remove it. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
		
							parent
							
								
									904d1857ad
								
							
						
					
					
						commit
						dcb0b5575d
					
				
					 2 changed files with 10 additions and 64 deletions
				
			
		|  | @ -214,7 +214,6 @@ enum { | ||||||
| 	TRACE_EVENT_FL_NO_SET_FILTER_BIT, | 	TRACE_EVENT_FL_NO_SET_FILTER_BIT, | ||||||
| 	TRACE_EVENT_FL_IGNORE_ENABLE_BIT, | 	TRACE_EVENT_FL_IGNORE_ENABLE_BIT, | ||||||
| 	TRACE_EVENT_FL_WAS_ENABLED_BIT, | 	TRACE_EVENT_FL_WAS_ENABLED_BIT, | ||||||
| 	TRACE_EVENT_FL_USE_CALL_FILTER_BIT, |  | ||||||
| 	TRACE_EVENT_FL_TRACEPOINT_BIT, | 	TRACE_EVENT_FL_TRACEPOINT_BIT, | ||||||
| 	TRACE_EVENT_FL_KPROBE_BIT, | 	TRACE_EVENT_FL_KPROBE_BIT, | ||||||
| 	TRACE_EVENT_FL_UPROBE_BIT, | 	TRACE_EVENT_FL_UPROBE_BIT, | ||||||
|  | @ -229,7 +228,6 @@ enum { | ||||||
|  *  WAS_ENABLED   - Set and stays set when an event was ever enabled |  *  WAS_ENABLED   - Set and stays set when an event was ever enabled | ||||||
|  *                    (used for module unloading, if a module event is enabled, |  *                    (used for module unloading, if a module event is enabled, | ||||||
|  *                     it is best to clear the buffers that used it). |  *                     it is best to clear the buffers that used it). | ||||||
|  *  USE_CALL_FILTER - For trace internal events, don't use file filter |  | ||||||
|  *  TRACEPOINT    - Event is a tracepoint |  *  TRACEPOINT    - Event is a tracepoint | ||||||
|  *  KPROBE        - Event is a kprobe |  *  KPROBE        - Event is a kprobe | ||||||
|  *  UPROBE        - Event is a uprobe |  *  UPROBE        - Event is a uprobe | ||||||
|  | @ -240,7 +238,6 @@ enum { | ||||||
| 	TRACE_EVENT_FL_NO_SET_FILTER	= (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), | 	TRACE_EVENT_FL_NO_SET_FILTER	= (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), | ||||||
| 	TRACE_EVENT_FL_IGNORE_ENABLE	= (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), | 	TRACE_EVENT_FL_IGNORE_ENABLE	= (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), | ||||||
| 	TRACE_EVENT_FL_WAS_ENABLED	= (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT), | 	TRACE_EVENT_FL_WAS_ENABLED	= (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT), | ||||||
| 	TRACE_EVENT_FL_USE_CALL_FILTER	= (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT), |  | ||||||
| 	TRACE_EVENT_FL_TRACEPOINT	= (1 << TRACE_EVENT_FL_TRACEPOINT_BIT), | 	TRACE_EVENT_FL_TRACEPOINT	= (1 << TRACE_EVENT_FL_TRACEPOINT_BIT), | ||||||
| 	TRACE_EVENT_FL_KPROBE		= (1 << TRACE_EVENT_FL_KPROBE_BIT), | 	TRACE_EVENT_FL_KPROBE		= (1 << TRACE_EVENT_FL_KPROBE_BIT), | ||||||
| 	TRACE_EVENT_FL_UPROBE		= (1 << TRACE_EVENT_FL_UPROBE_BIT), | 	TRACE_EVENT_FL_UPROBE		= (1 << TRACE_EVENT_FL_UPROBE_BIT), | ||||||
|  |  | ||||||
|  | @ -689,10 +689,7 @@ static void append_filter_err(struct filter_parse_state *ps, | ||||||
| 
 | 
 | ||||||
| static inline struct event_filter *event_filter(struct trace_event_file *file) | static inline struct event_filter *event_filter(struct trace_event_file *file) | ||||||
| { | { | ||||||
| 	if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | 	return file->filter; | ||||||
| 		return file->event_call->filter; |  | ||||||
| 	else |  | ||||||
| 		return file->filter; |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /* caller must hold event_mutex */ | /* caller must hold event_mutex */ | ||||||
|  | @ -826,12 +823,7 @@ static void __free_preds(struct event_filter *filter) | ||||||
| 
 | 
 | ||||||
| static void filter_disable(struct trace_event_file *file) | static void filter_disable(struct trace_event_file *file) | ||||||
| { | { | ||||||
| 	struct trace_event_call *call = file->event_call; | 	file->flags &= ~EVENT_FILE_FL_FILTERED; | ||||||
| 
 |  | ||||||
| 	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) |  | ||||||
| 		call->flags &= ~TRACE_EVENT_FL_FILTERED; |  | ||||||
| 	else |  | ||||||
| 		file->flags &= ~EVENT_FILE_FL_FILTERED; |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void __free_filter(struct event_filter *filter) | static void __free_filter(struct event_filter *filter) | ||||||
|  | @ -883,13 +875,8 @@ static int __alloc_preds(struct event_filter *filter, int n_preds) | ||||||
| 
 | 
 | ||||||
| static inline void __remove_filter(struct trace_event_file *file) | static inline void __remove_filter(struct trace_event_file *file) | ||||||
| { | { | ||||||
| 	struct trace_event_call *call = file->event_call; |  | ||||||
| 
 |  | ||||||
| 	filter_disable(file); | 	filter_disable(file); | ||||||
| 	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | 	remove_filter_string(file->filter); | ||||||
| 		remove_filter_string(call->filter); |  | ||||||
| 	else |  | ||||||
| 		remove_filter_string(file->filter); |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void filter_free_subsystem_preds(struct trace_subsystem_dir *dir, | static void filter_free_subsystem_preds(struct trace_subsystem_dir *dir, | ||||||
|  | @ -906,15 +893,8 @@ static void filter_free_subsystem_preds(struct trace_subsystem_dir *dir, | ||||||
| 
 | 
 | ||||||
| static inline void __free_subsystem_filter(struct trace_event_file *file) | static inline void __free_subsystem_filter(struct trace_event_file *file) | ||||||
| { | { | ||||||
| 	struct trace_event_call *call = file->event_call; | 	__free_filter(file->filter); | ||||||
| 
 | 	file->filter = NULL; | ||||||
| 	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) { |  | ||||||
| 		__free_filter(call->filter); |  | ||||||
| 		call->filter = NULL; |  | ||||||
| 	} else { |  | ||||||
| 		__free_filter(file->filter); |  | ||||||
| 		file->filter = NULL; |  | ||||||
| 	} |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir, | static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir, | ||||||
|  | @ -1718,69 +1698,38 @@ static int replace_preds(struct trace_event_call *call, | ||||||
| 
 | 
 | ||||||
| static inline void event_set_filtered_flag(struct trace_event_file *file) | static inline void event_set_filtered_flag(struct trace_event_file *file) | ||||||
| { | { | ||||||
| 	struct trace_event_call *call = file->event_call; | 	file->flags |= EVENT_FILE_FL_FILTERED; | ||||||
| 
 |  | ||||||
| 	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) |  | ||||||
| 		call->flags |= TRACE_EVENT_FL_FILTERED; |  | ||||||
| 	else |  | ||||||
| 		file->flags |= EVENT_FILE_FL_FILTERED; |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void event_set_filter(struct trace_event_file *file, | static inline void event_set_filter(struct trace_event_file *file, | ||||||
| 				    struct event_filter *filter) | 				    struct event_filter *filter) | ||||||
| { | { | ||||||
| 	struct trace_event_call *call = file->event_call; | 	rcu_assign_pointer(file->filter, filter); | ||||||
| 
 |  | ||||||
| 	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) |  | ||||||
| 		rcu_assign_pointer(call->filter, filter); |  | ||||||
| 	else |  | ||||||
| 		rcu_assign_pointer(file->filter, filter); |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void event_clear_filter(struct trace_event_file *file) | static inline void event_clear_filter(struct trace_event_file *file) | ||||||
| { | { | ||||||
| 	struct trace_event_call *call = file->event_call; | 	RCU_INIT_POINTER(file->filter, NULL); | ||||||
| 
 |  | ||||||
| 	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) |  | ||||||
| 		RCU_INIT_POINTER(call->filter, NULL); |  | ||||||
| 	else |  | ||||||
| 		RCU_INIT_POINTER(file->filter, NULL); |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void | static inline void | ||||||
| event_set_no_set_filter_flag(struct trace_event_file *file) | event_set_no_set_filter_flag(struct trace_event_file *file) | ||||||
| { | { | ||||||
| 	struct trace_event_call *call = file->event_call; | 	file->flags |= EVENT_FILE_FL_NO_SET_FILTER; | ||||||
| 
 |  | ||||||
| 	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) |  | ||||||
| 		call->flags |= TRACE_EVENT_FL_NO_SET_FILTER; |  | ||||||
| 	else |  | ||||||
| 		file->flags |= EVENT_FILE_FL_NO_SET_FILTER; |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void | static inline void | ||||||
| event_clear_no_set_filter_flag(struct trace_event_file *file) | event_clear_no_set_filter_flag(struct trace_event_file *file) | ||||||
| { | { | ||||||
| 	struct trace_event_call *call = file->event_call; | 	file->flags &= ~EVENT_FILE_FL_NO_SET_FILTER; | ||||||
| 
 |  | ||||||
| 	if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) |  | ||||||
| 		call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER; |  | ||||||
| 	else |  | ||||||
| 		file->flags &= ~EVENT_FILE_FL_NO_SET_FILTER; |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline bool | static inline bool | ||||||
| event_no_set_filter_flag(struct trace_event_file *file) | event_no_set_filter_flag(struct trace_event_file *file) | ||||||
| { | { | ||||||
| 	struct trace_event_call *call = file->event_call; |  | ||||||
| 
 |  | ||||||
| 	if (file->flags & EVENT_FILE_FL_NO_SET_FILTER) | 	if (file->flags & EVENT_FILE_FL_NO_SET_FILTER) | ||||||
| 		return true; | 		return true; | ||||||
| 
 | 
 | ||||||
| 	if ((call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) && |  | ||||||
| 	    (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)) |  | ||||||
| 		return true; |  | ||||||
| 
 |  | ||||||
| 	return false; | 	return false; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Steven Rostedt (Red Hat)
						Steven Rostedt (Red Hat)