mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	tracing: Have dynamic events have a ref counter
As dynamic events are not created by modules, if something is attached to one, calling "try_module_get()" on its "mod" field, is not going to keep the dynamic event from going away. Since dynamic events do not need the "mod" pointer of the event structure, make a union out of it in order to save memory (there's one structure for each of the thousand+ events in the kernel), and have any event with the DYNAMIC flag set to use a ref counter instead. Link: https://lore.kernel.org/linux-trace-devel/20210813004448.51c7de69ce432d338f4d226b@kernel.org/ Link: https://lkml.kernel.org/r/20210817035027.174869074@goodmis.org Suggested-by: Masami Hiramatsu <mhiramat@kernel.org> Acked-by: Masami Hiramatsu <mhiramat@kernel.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
		
							parent
							
								
									8b0e6c744f
								
							
						
					
					
						commit
						1d18538e6a
					
				
					 9 changed files with 124 additions and 24 deletions
				
			
		|  | @ -350,7 +350,14 @@ struct trace_event_call { | |||
| 	struct trace_event	event; | ||||
| 	char			*print_fmt; | ||||
| 	struct event_filter	*filter; | ||||
| 	void			*mod; | ||||
| 	/*
 | ||||
| 	 * Static events can disappear with modules, | ||||
| 	 * where as dynamic ones need their own ref count. | ||||
| 	 */ | ||||
| 	union { | ||||
| 		void				*module; | ||||
| 		atomic_t			refcnt; | ||||
| 	}; | ||||
| 	void			*data; | ||||
| 
 | ||||
| 	/* See the TRACE_EVENT_FL_* flags above */ | ||||
|  | @ -366,6 +373,42 @@ struct trace_event_call { | |||
| #endif | ||||
| }; | ||||
| 
 | ||||
| #ifdef CONFIG_DYNAMIC_EVENTS | ||||
| bool trace_event_dyn_try_get_ref(struct trace_event_call *call); | ||||
| void trace_event_dyn_put_ref(struct trace_event_call *call); | ||||
| bool trace_event_dyn_busy(struct trace_event_call *call); | ||||
| #else | ||||
| static inline bool trace_event_dyn_try_get_ref(struct trace_event_call *call) | ||||
| { | ||||
| 	/* Without DYNAMIC_EVENTS configured, nothing should be calling this */ | ||||
| 	return false; | ||||
| } | ||||
| static inline void trace_event_dyn_put_ref(struct trace_event_call *call) | ||||
| { | ||||
| } | ||||
| static inline bool trace_event_dyn_busy(struct trace_event_call *call) | ||||
| { | ||||
| 	/* Nothing should call this without DYNAIMIC_EVENTS configured. */ | ||||
| 	return true; | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
| static inline bool trace_event_try_get_ref(struct trace_event_call *call) | ||||
| { | ||||
| 	if (call->flags & TRACE_EVENT_FL_DYNAMIC) | ||||
| 		return trace_event_dyn_try_get_ref(call); | ||||
| 	else | ||||
| 		return try_module_get(call->module); | ||||
| } | ||||
| 
 | ||||
| static inline void trace_event_put_ref(struct trace_event_call *call) | ||||
| { | ||||
| 	if (call->flags & TRACE_EVENT_FL_DYNAMIC) | ||||
| 		trace_event_dyn_put_ref(call); | ||||
| 	else | ||||
| 		module_put(call->module); | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_PERF_EVENTS | ||||
| static inline bool bpf_prog_array_valid(struct trace_event_call *call) | ||||
| { | ||||
|  |  | |||
|  | @ -3697,11 +3697,11 @@ static bool trace_safe_str(struct trace_iterator *iter, const char *str) | |||
| 		return false; | ||||
| 
 | ||||
| 	event = container_of(trace_event, struct trace_event_call, event); | ||||
| 	if (!event->mod) | ||||
| 	if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module) | ||||
| 		return false; | ||||
| 
 | ||||
| 	/* Would rather have rodata, but this will suffice */ | ||||
| 	if (within_module_core(addr, event->mod)) | ||||
| 	if (within_module_core(addr, event->module)) | ||||
| 		return true; | ||||
| 
 | ||||
| 	return false; | ||||
|  |  | |||
|  | @ -13,11 +13,49 @@ | |||
| #include <linux/tracefs.h> | ||||
| 
 | ||||
| #include "trace.h" | ||||
| #include "trace_output.h"	/* for trace_event_sem */ | ||||
| #include "trace_dynevent.h" | ||||
| 
 | ||||
| static DEFINE_MUTEX(dyn_event_ops_mutex); | ||||
| static LIST_HEAD(dyn_event_ops_list); | ||||
| 
 | ||||
| bool trace_event_dyn_try_get_ref(struct trace_event_call *dyn_call) | ||||
| { | ||||
| 	struct trace_event_call *call; | ||||
| 	bool ret = false; | ||||
| 
 | ||||
| 	if (WARN_ON_ONCE(!(dyn_call->flags & TRACE_EVENT_FL_DYNAMIC))) | ||||
| 		return false; | ||||
| 
 | ||||
| 	down_read(&trace_event_sem); | ||||
| 	list_for_each_entry(call, &ftrace_events, list) { | ||||
| 		if (call == dyn_call) { | ||||
| 			atomic_inc(&dyn_call->refcnt); | ||||
| 			ret = true; | ||||
| 		} | ||||
| 	} | ||||
| 	up_read(&trace_event_sem); | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| void trace_event_dyn_put_ref(struct trace_event_call *call) | ||||
| { | ||||
| 	if (WARN_ON_ONCE(!(call->flags & TRACE_EVENT_FL_DYNAMIC))) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (WARN_ON_ONCE(atomic_read(&call->refcnt) <= 0)) { | ||||
| 		atomic_set(&call->refcnt, 0); | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	atomic_dec(&call->refcnt); | ||||
| } | ||||
| 
 | ||||
| bool trace_event_dyn_busy(struct trace_event_call *call) | ||||
| { | ||||
| 	return atomic_read(&call->refcnt) != 0; | ||||
| } | ||||
| 
 | ||||
| int dyn_event_register(struct dyn_event_operations *ops) | ||||
| { | ||||
| 	if (!ops || !ops->create || !ops->show || !ops->is_busy || | ||||
|  |  | |||
|  | @ -177,7 +177,7 @@ static void perf_trace_event_unreg(struct perf_event *p_event) | |||
| 		} | ||||
| 	} | ||||
| out: | ||||
| 	module_put(tp_event->mod); | ||||
| 	trace_event_put_ref(tp_event); | ||||
| } | ||||
| 
 | ||||
| static int perf_trace_event_open(struct perf_event *p_event) | ||||
|  | @ -224,10 +224,10 @@ int perf_trace_init(struct perf_event *p_event) | |||
| 	list_for_each_entry(tp_event, &ftrace_events, list) { | ||||
| 		if (tp_event->event.type == event_id && | ||||
| 		    tp_event->class && tp_event->class->reg && | ||||
| 		    try_module_get(tp_event->mod)) { | ||||
| 		    trace_event_try_get_ref(tp_event)) { | ||||
| 			ret = perf_trace_event_init(tp_event, p_event); | ||||
| 			if (ret) | ||||
| 				module_put(tp_event->mod); | ||||
| 				trace_event_put_ref(tp_event); | ||||
| 			break; | ||||
| 		} | ||||
| 	} | ||||
|  |  | |||
|  | @ -2525,7 +2525,10 @@ __register_event(struct trace_event_call *call, struct module *mod) | |||
| 		return ret; | ||||
| 
 | ||||
| 	list_add(&call->list, &ftrace_events); | ||||
| 	call->mod = mod; | ||||
| 	if (call->flags & TRACE_EVENT_FL_DYNAMIC) | ||||
| 		atomic_set(&call->refcnt, 0); | ||||
| 	else | ||||
| 		call->module = mod; | ||||
| 
 | ||||
| 	return 0; | ||||
| } | ||||
|  | @ -2839,7 +2842,9 @@ static void trace_module_remove_events(struct module *mod) | |||
| 
 | ||||
| 	down_write(&trace_event_sem); | ||||
| 	list_for_each_entry_safe(call, p, &ftrace_events, list) { | ||||
| 		if (call->mod == mod) | ||||
| 		if ((call->flags & TRACE_EVENT_FL_DYNAMIC) || !call->module) | ||||
| 			continue; | ||||
| 		if (call->module == mod) | ||||
| 			__trace_remove_event_call(call); | ||||
| 	} | ||||
| 	up_write(&trace_event_sem); | ||||
|  | @ -2982,7 +2987,7 @@ struct trace_event_file *trace_get_event_file(const char *instance, | |||
| 	} | ||||
| 
 | ||||
| 	/* Don't let event modules unload while in use */ | ||||
| 	ret = try_module_get(file->event_call->mod); | ||||
| 	ret = trace_event_try_get_ref(file->event_call); | ||||
| 	if (!ret) { | ||||
| 		trace_array_put(tr); | ||||
| 		ret = -EBUSY; | ||||
|  | @ -3012,7 +3017,7 @@ EXPORT_SYMBOL_GPL(trace_get_event_file); | |||
| void trace_put_event_file(struct trace_event_file *file) | ||||
| { | ||||
| 	mutex_lock(&event_mutex); | ||||
| 	module_put(file->event_call->mod); | ||||
| 	trace_event_put_ref(file->event_call); | ||||
| 	mutex_unlock(&event_mutex); | ||||
| 
 | ||||
| 	trace_array_put(file->tr); | ||||
|  | @ -3147,7 +3152,7 @@ static int free_probe_data(void *data) | |||
| 	if (!edata->ref) { | ||||
| 		/* Remove the SOFT_MODE flag */ | ||||
| 		__ftrace_event_enable_disable(edata->file, 0, 1); | ||||
| 		module_put(edata->file->event_call->mod); | ||||
| 		trace_event_put_ref(edata->file->event_call); | ||||
| 		kfree(edata); | ||||
| 	} | ||||
| 	return 0; | ||||
|  | @ -3280,7 +3285,7 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash, | |||
| 
 | ||||
|  out_reg: | ||||
| 	/* Don't let event modules unload while probe registered */ | ||||
| 	ret = try_module_get(file->event_call->mod); | ||||
| 	ret = trace_event_try_get_ref(file->event_call); | ||||
| 	if (!ret) { | ||||
| 		ret = -EBUSY; | ||||
| 		goto out_free; | ||||
|  | @ -3310,7 +3315,7 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash, | |||
|  out_disable: | ||||
| 	__ftrace_event_enable_disable(file, 0, 1); | ||||
|  out_put: | ||||
| 	module_put(file->event_call->mod); | ||||
| 	trace_event_put_ref(file->event_call); | ||||
|  out_free: | ||||
| 	kfree(data); | ||||
| 	goto out; | ||||
|  | @ -3376,7 +3381,8 @@ void __trace_early_add_events(struct trace_array *tr) | |||
| 
 | ||||
| 	list_for_each_entry(call, &ftrace_events, list) { | ||||
| 		/* Early boot up should not have any modules loaded */ | ||||
| 		if (WARN_ON_ONCE(call->mod)) | ||||
| 		if (!(call->flags & TRACE_EVENT_FL_DYNAMIC) && | ||||
| 		    WARN_ON_ONCE(call->module)) | ||||
| 			continue; | ||||
| 
 | ||||
| 		ret = __trace_early_add_new_event(call, tr); | ||||
|  |  | |||
|  | @ -1369,13 +1369,15 @@ static int destroy_synth_event(struct synth_event *se) | |||
| 	int ret; | ||||
| 
 | ||||
| 	if (se->ref) | ||||
| 		ret = -EBUSY; | ||||
| 	else { | ||||
| 		ret = unregister_synth_event(se); | ||||
| 		if (!ret) { | ||||
| 			dyn_event_remove(&se->devent); | ||||
| 			free_synth_event(se); | ||||
| 		} | ||||
| 		return -EBUSY; | ||||
| 
 | ||||
| 	if (trace_event_dyn_busy(&se->call)) | ||||
| 		return -EBUSY; | ||||
| 
 | ||||
| 	ret = unregister_synth_event(se); | ||||
| 	if (!ret) { | ||||
| 		dyn_event_remove(&se->devent); | ||||
| 		free_synth_event(se); | ||||
| 	} | ||||
| 
 | ||||
| 	return ret; | ||||
|  | @ -2102,6 +2104,9 @@ static int synth_event_release(struct dyn_event *ev) | |||
| 	if (event->ref) | ||||
| 		return -EBUSY; | ||||
| 
 | ||||
| 	if (trace_event_dyn_busy(&event->call)) | ||||
| 		return -EBUSY; | ||||
| 
 | ||||
| 	ret = unregister_synth_event(event); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
|  |  | |||
|  | @ -1334,7 +1334,7 @@ void event_enable_trigger_free(struct event_trigger_ops *ops, | |||
| 	if (!data->ref) { | ||||
| 		/* Remove the SOFT_MODE flag */ | ||||
| 		trace_event_enable_disable(enable_data->file, 0, 1); | ||||
| 		module_put(enable_data->file->event_call->mod); | ||||
| 		trace_event_put_ref(enable_data->file->event_call); | ||||
| 		trigger_data_free(data); | ||||
| 		kfree(enable_data); | ||||
| 	} | ||||
|  | @ -1481,7 +1481,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops, | |||
| 
 | ||||
|  out_reg: | ||||
| 	/* Don't let event modules unload while probe registered */ | ||||
| 	ret = try_module_get(event_enable_file->event_call->mod); | ||||
| 	ret = trace_event_try_get_ref(event_enable_file->event_call); | ||||
| 	if (!ret) { | ||||
| 		ret = -EBUSY; | ||||
| 		goto out_free; | ||||
|  | @ -1510,7 +1510,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops, | |||
|  out_disable: | ||||
| 	trace_event_enable_disable(event_enable_file, 0, 1); | ||||
|  out_put: | ||||
| 	module_put(event_enable_file->event_call->mod); | ||||
| 	trace_event_put_ref(event_enable_file->event_call); | ||||
|  out_free: | ||||
| 	if (cmd_ops->set_filter) | ||||
| 		cmd_ops->set_filter(NULL, trigger_data, NULL); | ||||
|  |  | |||
|  | @ -543,6 +543,10 @@ static int unregister_trace_kprobe(struct trace_kprobe *tk) | |||
| 	if (trace_probe_is_enabled(&tk->tp)) | ||||
| 		return -EBUSY; | ||||
| 
 | ||||
| 	/* If there's a reference to the dynamic event */ | ||||
| 	if (trace_event_dyn_busy(trace_probe_event_call(&tk->tp))) | ||||
| 		return -EBUSY; | ||||
| 
 | ||||
| 	/* Will fail if probe is being used by ftrace or perf */ | ||||
| 	if (unregister_kprobe_event(tk)) | ||||
| 		return -EBUSY; | ||||
|  |  | |||
|  | @ -393,6 +393,10 @@ static int unregister_trace_uprobe(struct trace_uprobe *tu) | |||
| 	if (trace_probe_has_sibling(&tu->tp)) | ||||
| 		goto unreg; | ||||
| 
 | ||||
| 	/* If there's a reference to the dynamic event */ | ||||
| 	if (trace_event_dyn_busy(trace_probe_event_call(&tu->tp))) | ||||
| 		return -EBUSY; | ||||
| 
 | ||||
| 	ret = unregister_uprobe_event(tu); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Steven Rostedt (VMware)
						Steven Rostedt (VMware)