forked from mirrors/linux
		
	bpf: Enable BPF_PROG_TEST_RUN for raw_tracepoint
Add .test_run for raw_tracepoint. Also, introduce a new feature that runs the target program on a specific CPU. This is achieved by a new flag in bpf_attr.test, BPF_F_TEST_RUN_ON_CPU. When this flag is set, the program is triggered on cpu with id bpf_attr.test.cpu. This feature is needed for BPF programs that handle perf_event and other percpu resources, as the program can access these resource locally. Signed-off-by: Song Liu <songliubraving@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: John Fastabend <john.fastabend@gmail.com> Acked-by: Andrii Nakryiko <andriin@fb.com> Link: https://lore.kernel.org/bpf/20200925205432.1777-2-songliubraving@fb.com
This commit is contained in:
		
							parent
							
								
									1fd17c8cd0
								
							
						
					
					
						commit
						1b4d60ec16
					
				
					 6 changed files with 110 additions and 1 deletions
				
			
		| 
						 | 
				
			
			@ -1396,6 +1396,9 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog,
 | 
			
		|||
int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
 | 
			
		||||
				     const union bpf_attr *kattr,
 | 
			
		||||
				     union bpf_attr __user *uattr);
 | 
			
		||||
int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
 | 
			
		||||
			     const union bpf_attr *kattr,
 | 
			
		||||
			     union bpf_attr __user *uattr);
 | 
			
		||||
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
 | 
			
		||||
		    const struct bpf_prog *prog,
 | 
			
		||||
		    struct bpf_insn_access_aux *info);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -424,6 +424,11 @@ enum {
 | 
			
		|||
 */
 | 
			
		||||
#define BPF_F_QUERY_EFFECTIVE	(1U << 0)
 | 
			
		||||
 | 
			
		||||
/* Flags for BPF_PROG_TEST_RUN */
 | 
			
		||||
 | 
			
		||||
/* If set, run the test on the cpu specified by bpf_attr.test.cpu */
 | 
			
		||||
#define BPF_F_TEST_RUN_ON_CPU	(1U << 0)
 | 
			
		||||
 | 
			
		||||
/* type for BPF_ENABLE_STATS */
 | 
			
		||||
enum bpf_stats_type {
 | 
			
		||||
	/* enabled run_time_ns and run_cnt */
 | 
			
		||||
| 
						 | 
				
			
			@ -566,6 +571,8 @@ union bpf_attr {
 | 
			
		|||
						 */
 | 
			
		||||
		__aligned_u64	ctx_in;
 | 
			
		||||
		__aligned_u64	ctx_out;
 | 
			
		||||
		__u32		flags;
 | 
			
		||||
		__u32		cpu;
 | 
			
		||||
	} test;
 | 
			
		||||
 | 
			
		||||
	struct { /* anonymous struct used by BPF_*_GET_*_ID */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2979,7 +2979,7 @@ static int bpf_prog_query(const union bpf_attr *attr,
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#define BPF_PROG_TEST_RUN_LAST_FIELD test.ctx_out
 | 
			
		||||
#define BPF_PROG_TEST_RUN_LAST_FIELD test.cpu
 | 
			
		||||
 | 
			
		||||
static int bpf_prog_test_run(const union bpf_attr *attr,
 | 
			
		||||
			     union bpf_attr __user *uattr)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1678,6 +1678,7 @@ const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
 | 
			
		|||
};
 | 
			
		||||
 | 
			
		||||
const struct bpf_prog_ops raw_tracepoint_prog_ops = {
 | 
			
		||||
	.test_run = bpf_prog_test_run_raw_tp,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
const struct bpf_verifier_ops tracing_verifier_ops = {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -11,6 +11,7 @@
 | 
			
		|||
#include <net/sock.h>
 | 
			
		||||
#include <net/tcp.h>
 | 
			
		||||
#include <linux/error-injection.h>
 | 
			
		||||
#include <linux/smp.h>
 | 
			
		||||
 | 
			
		||||
#define CREATE_TRACE_POINTS
 | 
			
		||||
#include <trace/events/bpf_test_run.h>
 | 
			
		||||
| 
						 | 
				
			
			@ -204,6 +205,9 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog,
 | 
			
		|||
	int b = 2, err = -EFAULT;
 | 
			
		||||
	u32 retval = 0;
 | 
			
		||||
 | 
			
		||||
	if (kattr->test.flags || kattr->test.cpu)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	switch (prog->expected_attach_type) {
 | 
			
		||||
	case BPF_TRACE_FENTRY:
 | 
			
		||||
	case BPF_TRACE_FEXIT:
 | 
			
		||||
| 
						 | 
				
			
			@ -236,6 +240,87 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog,
 | 
			
		|||
	return err;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct bpf_raw_tp_test_run_info {
 | 
			
		||||
	struct bpf_prog *prog;
 | 
			
		||||
	void *ctx;
 | 
			
		||||
	u32 retval;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static void
 | 
			
		||||
__bpf_prog_test_run_raw_tp(void *data)
 | 
			
		||||
{
 | 
			
		||||
	struct bpf_raw_tp_test_run_info *info = data;
 | 
			
		||||
 | 
			
		||||
	rcu_read_lock();
 | 
			
		||||
	migrate_disable();
 | 
			
		||||
	info->retval = BPF_PROG_RUN(info->prog, info->ctx);
 | 
			
		||||
	migrate_enable();
 | 
			
		||||
	rcu_read_unlock();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
 | 
			
		||||
			     const union bpf_attr *kattr,
 | 
			
		||||
			     union bpf_attr __user *uattr)
 | 
			
		||||
{
 | 
			
		||||
	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
 | 
			
		||||
	__u32 ctx_size_in = kattr->test.ctx_size_in;
 | 
			
		||||
	struct bpf_raw_tp_test_run_info info;
 | 
			
		||||
	int cpu = kattr->test.cpu, err = 0;
 | 
			
		||||
 | 
			
		||||
	/* doesn't support data_in/out, ctx_out, duration, or repeat */
 | 
			
		||||
	if (kattr->test.data_in || kattr->test.data_out ||
 | 
			
		||||
	    kattr->test.ctx_out || kattr->test.duration ||
 | 
			
		||||
	    kattr->test.repeat)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	if (ctx_size_in < prog->aux->max_ctx_offset)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	if (ctx_size_in) {
 | 
			
		||||
		info.ctx = kzalloc(ctx_size_in, GFP_USER);
 | 
			
		||||
		if (!info.ctx)
 | 
			
		||||
			return -ENOMEM;
 | 
			
		||||
		if (copy_from_user(info.ctx, ctx_in, ctx_size_in)) {
 | 
			
		||||
			err = -EFAULT;
 | 
			
		||||
			goto out;
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		info.ctx = NULL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	info.prog = prog;
 | 
			
		||||
 | 
			
		||||
	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
 | 
			
		||||
	    cpu == smp_processor_id()) {
 | 
			
		||||
		__bpf_prog_test_run_raw_tp(&info);
 | 
			
		||||
	} else {
 | 
			
		||||
		/* smp_call_function_single() also checks cpu_online()
 | 
			
		||||
		 * after csd_lock(). However, since cpu is from user
 | 
			
		||||
		 * space, let's do an extra quick check to filter out
 | 
			
		||||
		 * invalid value before smp_call_function_single().
 | 
			
		||||
		 */
 | 
			
		||||
		if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
 | 
			
		||||
			err = -ENXIO;
 | 
			
		||||
			goto out;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
 | 
			
		||||
					       &info, 1);
 | 
			
		||||
		if (err)
 | 
			
		||||
			goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
 | 
			
		||||
		err = -EFAULT;
 | 
			
		||||
 | 
			
		||||
out:
 | 
			
		||||
	kfree(info.ctx);
 | 
			
		||||
	return err;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
 | 
			
		||||
{
 | 
			
		||||
	void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
 | 
			
		||||
| 
						 | 
				
			
			@ -410,6 +495,9 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
 | 
			
		|||
	void *data;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	if (kattr->test.flags || kattr->test.cpu)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
 | 
			
		||||
			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
 | 
			
		||||
	if (IS_ERR(data))
 | 
			
		||||
| 
						 | 
				
			
			@ -607,6 +695,9 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
 | 
			
		|||
	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	if (kattr->test.flags || kattr->test.cpu)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	if (size < ETH_HLEN)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -424,6 +424,11 @@ enum {
 | 
			
		|||
 */
 | 
			
		||||
#define BPF_F_QUERY_EFFECTIVE	(1U << 0)
 | 
			
		||||
 | 
			
		||||
/* Flags for BPF_PROG_TEST_RUN */
 | 
			
		||||
 | 
			
		||||
/* If set, run the test on the cpu specified by bpf_attr.test.cpu */
 | 
			
		||||
#define BPF_F_TEST_RUN_ON_CPU	(1U << 0)
 | 
			
		||||
 | 
			
		||||
/* type for BPF_ENABLE_STATS */
 | 
			
		||||
enum bpf_stats_type {
 | 
			
		||||
	/* enabled run_time_ns and run_cnt */
 | 
			
		||||
| 
						 | 
				
			
			@ -566,6 +571,8 @@ union bpf_attr {
 | 
			
		|||
						 */
 | 
			
		||||
		__aligned_u64	ctx_in;
 | 
			
		||||
		__aligned_u64	ctx_out;
 | 
			
		||||
		__u32		flags;
 | 
			
		||||
		__u32		cpu;
 | 
			
		||||
	} test;
 | 
			
		||||
 | 
			
		||||
	struct { /* anonymous struct used by BPF_*_GET_*_ID */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue