mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	bpf: support raw tracepoints in modules
Distributions build drivers as modules, including network and filesystem drivers which export numerous tracepoints. This enables bpf(BPF_RAW_TRACEPOINT_OPEN) to attach to those tracepoints. Signed-off-by: Matt Mullins <mmullins@fb.com> Acked-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
		
							parent
							
								
									a137401d85
								
							
						
					
					
						commit
						a38d1107f9
					
				
					 5 changed files with 120 additions and 7 deletions
				
			
		| 
						 | 
				
			
			@ -432,6 +432,10 @@ struct module {
 | 
			
		|||
	unsigned int num_tracepoints;
 | 
			
		||||
	tracepoint_ptr_t *tracepoints_ptrs;
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef CONFIG_BPF_EVENTS
 | 
			
		||||
	unsigned int num_bpf_raw_events;
 | 
			
		||||
	struct bpf_raw_event_map *bpf_raw_events;
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef HAVE_JUMP_LABEL
 | 
			
		||||
	struct jump_entry *jump_entries;
 | 
			
		||||
	unsigned int num_jump_entries;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -471,7 +471,8 @@ void perf_event_detach_bpf_prog(struct perf_event *event);
 | 
			
		|||
int perf_event_query_prog_array(struct perf_event *event, void __user *info);
 | 
			
		||||
int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
 | 
			
		||||
int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
 | 
			
		||||
struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name);
 | 
			
		||||
struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
 | 
			
		||||
void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
 | 
			
		||||
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
 | 
			
		||||
			    u32 *fd_type, const char **buf,
 | 
			
		||||
			    u64 *probe_offset, u64 *probe_addr);
 | 
			
		||||
| 
						 | 
				
			
			@ -502,10 +503,13 @@ static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf
 | 
			
		|||
{
 | 
			
		||||
	return -EOPNOTSUPP;
 | 
			
		||||
}
 | 
			
		||||
static inline struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name)
 | 
			
		||||
static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
 | 
			
		||||
{
 | 
			
		||||
	return NULL;
 | 
			
		||||
}
 | 
			
		||||
static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
 | 
			
		||||
{
 | 
			
		||||
}
 | 
			
		||||
static inline int bpf_get_perf_event_info(const struct perf_event *event,
 | 
			
		||||
					  u32 *prog_id, u32 *fd_type,
 | 
			
		||||
					  const char **buf, u64 *probe_offset,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1604,6 +1604,7 @@ static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp)
 | 
			
		|||
		bpf_probe_unregister(raw_tp->btp, raw_tp->prog);
 | 
			
		||||
		bpf_prog_put(raw_tp->prog);
 | 
			
		||||
	}
 | 
			
		||||
	bpf_put_raw_tracepoint(raw_tp->btp);
 | 
			
		||||
	kfree(raw_tp);
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -1629,13 +1630,15 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
 | 
			
		|||
		return -EFAULT;
 | 
			
		||||
	tp_name[sizeof(tp_name) - 1] = 0;
 | 
			
		||||
 | 
			
		||||
	btp = bpf_find_raw_tracepoint(tp_name);
 | 
			
		||||
	btp = bpf_get_raw_tracepoint(tp_name);
 | 
			
		||||
	if (!btp)
 | 
			
		||||
		return -ENOENT;
 | 
			
		||||
 | 
			
		||||
	raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER);
 | 
			
		||||
	if (!raw_tp)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
	if (!raw_tp) {
 | 
			
		||||
		err = -ENOMEM;
 | 
			
		||||
		goto out_put_btp;
 | 
			
		||||
	}
 | 
			
		||||
	raw_tp->btp = btp;
 | 
			
		||||
 | 
			
		||||
	prog = bpf_prog_get_type(attr->raw_tracepoint.prog_fd,
 | 
			
		||||
| 
						 | 
				
			
			@ -1663,6 +1666,8 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
 | 
			
		|||
	bpf_prog_put(prog);
 | 
			
		||||
out_free_tp:
 | 
			
		||||
	kfree(raw_tp);
 | 
			
		||||
out_put_btp:
 | 
			
		||||
	bpf_put_raw_tracepoint(btp);
 | 
			
		||||
	return err;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3093,6 +3093,11 @@ static int find_module_sections(struct module *mod, struct load_info *info)
 | 
			
		|||
					     sizeof(*mod->tracepoints_ptrs),
 | 
			
		||||
					     &mod->num_tracepoints);
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef CONFIG_BPF_EVENTS
 | 
			
		||||
	mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map",
 | 
			
		||||
					   sizeof(*mod->bpf_raw_events),
 | 
			
		||||
					   &mod->num_bpf_raw_events);
 | 
			
		||||
#endif
 | 
			
		||||
#ifdef HAVE_JUMP_LABEL
 | 
			
		||||
	mod->jump_entries = section_objs(info, "__jump_table",
 | 
			
		||||
					sizeof(*mod->jump_entries),
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -17,6 +17,43 @@
 | 
			
		|||
#include "trace_probe.h"
 | 
			
		||||
#include "trace.h"
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_MODULES
 | 
			
		||||
struct bpf_trace_module {
 | 
			
		||||
	struct module *module;
 | 
			
		||||
	struct list_head list;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static LIST_HEAD(bpf_trace_modules);
 | 
			
		||||
static DEFINE_MUTEX(bpf_module_mutex);
 | 
			
		||||
 | 
			
		||||
static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
 | 
			
		||||
{
 | 
			
		||||
	struct bpf_raw_event_map *btp, *ret = NULL;
 | 
			
		||||
	struct bpf_trace_module *btm;
 | 
			
		||||
	unsigned int i;
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&bpf_module_mutex);
 | 
			
		||||
	list_for_each_entry(btm, &bpf_trace_modules, list) {
 | 
			
		||||
		for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
 | 
			
		||||
			btp = &btm->module->bpf_raw_events[i];
 | 
			
		||||
			if (!strcmp(btp->tp->name, name)) {
 | 
			
		||||
				if (try_module_get(btm->module))
 | 
			
		||||
					ret = btp;
 | 
			
		||||
				goto out;
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
out:
 | 
			
		||||
	mutex_unlock(&bpf_module_mutex);
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
#else
 | 
			
		||||
static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
 | 
			
		||||
{
 | 
			
		||||
	return NULL;
 | 
			
		||||
}
 | 
			
		||||
#endif /* CONFIG_MODULES */
 | 
			
		||||
 | 
			
		||||
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 | 
			
		||||
u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1076,7 +1113,7 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
 | 
			
		|||
extern struct bpf_raw_event_map __start__bpf_raw_tp[];
 | 
			
		||||
extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
 | 
			
		||||
 | 
			
		||||
struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name)
 | 
			
		||||
struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
 | 
			
		||||
{
 | 
			
		||||
	struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1084,7 +1121,16 @@ struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name)
 | 
			
		|||
		if (!strcmp(btp->tp->name, name))
 | 
			
		||||
			return btp;
 | 
			
		||||
	}
 | 
			
		||||
	return NULL;
 | 
			
		||||
 | 
			
		||||
	return bpf_get_raw_tracepoint_module(name);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
 | 
			
		||||
{
 | 
			
		||||
	struct module *mod = __module_address((unsigned long)btp);
 | 
			
		||||
 | 
			
		||||
	if (mod)
 | 
			
		||||
		module_put(mod);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static __always_inline
 | 
			
		||||
| 
						 | 
				
			
			@ -1222,3 +1268,52 @@ int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
 | 
			
		|||
 | 
			
		||||
	return err;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_MODULES
 | 
			
		||||
int bpf_event_notify(struct notifier_block *nb, unsigned long op, void *module)
 | 
			
		||||
{
 | 
			
		||||
	struct bpf_trace_module *btm, *tmp;
 | 
			
		||||
	struct module *mod = module;
 | 
			
		||||
 | 
			
		||||
	if (mod->num_bpf_raw_events == 0 ||
 | 
			
		||||
	    (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&bpf_module_mutex);
 | 
			
		||||
 | 
			
		||||
	switch (op) {
 | 
			
		||||
	case MODULE_STATE_COMING:
 | 
			
		||||
		btm = kzalloc(sizeof(*btm), GFP_KERNEL);
 | 
			
		||||
		if (btm) {
 | 
			
		||||
			btm->module = module;
 | 
			
		||||
			list_add(&btm->list, &bpf_trace_modules);
 | 
			
		||||
		}
 | 
			
		||||
		break;
 | 
			
		||||
	case MODULE_STATE_GOING:
 | 
			
		||||
		list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
 | 
			
		||||
			if (btm->module == module) {
 | 
			
		||||
				list_del(&btm->list);
 | 
			
		||||
				kfree(btm);
 | 
			
		||||
				break;
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	mutex_unlock(&bpf_module_mutex);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static struct notifier_block bpf_module_nb = {
 | 
			
		||||
	.notifier_call = bpf_event_notify,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
int __init bpf_event_init(void)
 | 
			
		||||
{
 | 
			
		||||
	register_module_notifier(&bpf_module_nb);
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
fs_initcall(bpf_event_init);
 | 
			
		||||
#endif /* CONFIG_MODULES */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue