forked from mirrors/linux
		
	selftests/bpf: Test gen_prologue and gen_epilogue
This test adds a new struct_ops "bpf_testmod_st_ops" in bpf_testmod.
The ops of the bpf_testmod_st_ops is triggered by new kfunc calls
"bpf_kfunc_st_ops_test_*logue". These new kfunc calls are
primarily used by the SEC("syscall") program. The test triggering
sequence is like:
    SEC("syscall")
    syscall_prologue(struct st_ops_args *args)
        bpf_kfunc_st_op_test_prologue(args)
	    st_ops->test_prologue(args)
.gen_prologue adds 1000 to args->a
.gen_epilogue adds 10000 to args->a
.gen_epilogue will also set the r0 to 2 * args->a.
The .gen_prologue and .gen_epilogue of the bpf_testmod_st_ops
will test the prog->aux->attach_func_name to decide if
it needs to generate codes.
The main programs of the pro_epilogue.c will call a
new kfunc bpf_kfunc_st_ops_inc10 which does "args->a += 10".
It will also call a subprog() which does "args->a += 1".
This patch uses the test_loader infra to check the __xlated
instructions patched after gen_prologue and/or gen_epilogue.
The __xlated check is based on Eduard's example (Thanks!) in v1.
args->a is returned by the struct_ops prog (either the main prog
or the epilogue). Thus, the __retval of the SEC("syscall") prog
is checked. For example, when triggering the ops in the
'SEC("struct_ops/test_epilogue") int test_epilogue'
The expected args->a is +1 (subprog call) + 10 (kfunc call)
    	     	     	+ 10000 (.gen_epilogue) = 10011.
The expected return value is 2 * 10011 (.gen_epilogue).
Suggested-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20240829210833.388152-7-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
			
			
This commit is contained in:
		
							parent
							
								
									a0dbf6d0b2
								
							
						
					
					
						commit
						47e69431b5
					
				
					 5 changed files with 371 additions and 0 deletions
				
			
		|  | @ -17,6 +17,7 @@ | |||
| #include <linux/in.h> | ||||
| #include <linux/in6.h> | ||||
| #include <linux/un.h> | ||||
| #include <linux/filter.h> | ||||
| #include <net/sock.h> | ||||
| #include <linux/namei.h> | ||||
| #include "bpf_testmod.h" | ||||
|  | @ -945,6 +946,51 @@ __bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args) | |||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| static DEFINE_MUTEX(st_ops_mutex); | ||||
| static struct bpf_testmod_st_ops *st_ops; | ||||
| 
 | ||||
| __bpf_kfunc int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args) | ||||
| { | ||||
| 	int ret = -1; | ||||
| 
 | ||||
| 	mutex_lock(&st_ops_mutex); | ||||
| 	if (st_ops && st_ops->test_prologue) | ||||
| 		ret = st_ops->test_prologue(args); | ||||
| 	mutex_unlock(&st_ops_mutex); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| __bpf_kfunc int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args) | ||||
| { | ||||
| 	int ret = -1; | ||||
| 
 | ||||
| 	mutex_lock(&st_ops_mutex); | ||||
| 	if (st_ops && st_ops->test_epilogue) | ||||
| 		ret = st_ops->test_epilogue(args); | ||||
| 	mutex_unlock(&st_ops_mutex); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| __bpf_kfunc int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args) | ||||
| { | ||||
| 	int ret = -1; | ||||
| 
 | ||||
| 	mutex_lock(&st_ops_mutex); | ||||
| 	if (st_ops && st_ops->test_pro_epilogue) | ||||
| 		ret = st_ops->test_pro_epilogue(args); | ||||
| 	mutex_unlock(&st_ops_mutex); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| __bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args) | ||||
| { | ||||
| 	args->a += 10; | ||||
| 	return args->a; | ||||
| } | ||||
| 
 | ||||
| BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids) | ||||
| BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc) | ||||
| BTF_ID_FLAGS(func, bpf_kfunc_call_test1) | ||||
|  | @ -981,6 +1027,10 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE) | |||
| BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE) | ||||
| BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE) | ||||
| BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE) | ||||
| BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_prologue, KF_TRUSTED_ARGS | KF_SLEEPABLE) | ||||
| BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE) | ||||
| BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE) | ||||
| BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10, KF_TRUSTED_ARGS) | ||||
| BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids) | ||||
| 
 | ||||
| static int bpf_testmod_ops_init(struct btf *btf) | ||||
|  | @ -1100,6 +1150,144 @@ struct bpf_struct_ops bpf_testmod_ops2 = { | |||
| 	.owner = THIS_MODULE, | ||||
| }; | ||||
| 
 | ||||
| static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args) | ||||
| { | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static int bpf_test_mod_st_ops__test_epilogue(struct st_ops_args *args) | ||||
| { | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static int bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args *args) | ||||
| { | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write, | ||||
| 			       const struct bpf_prog *prog) | ||||
| { | ||||
| 	struct bpf_insn *insn = insn_buf; | ||||
| 
 | ||||
| 	if (strcmp(prog->aux->attach_func_name, "test_prologue") && | ||||
| 	    strcmp(prog->aux->attach_func_name, "test_pro_epilogue")) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	/* r6 = r1[0]; // r6 will be "struct st_ops *args". r1 is "u64 *ctx".
 | ||||
| 	 * r7 = r6->a; | ||||
| 	 * r7 += 1000; | ||||
| 	 * r6->a = r7; | ||||
| 	 */ | ||||
| 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0); | ||||
| 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a)); | ||||
| 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000); | ||||
| 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a)); | ||||
| 	*insn++ = prog->insnsi[0]; | ||||
| 
 | ||||
| 	return insn - insn_buf; | ||||
| } | ||||
| 
 | ||||
| static int st_ops_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog, | ||||
| 			       s16 ctx_stack_off) | ||||
| { | ||||
| 	struct bpf_insn *insn = insn_buf; | ||||
| 
 | ||||
| 	if (strcmp(prog->aux->attach_func_name, "test_epilogue") && | ||||
| 	    strcmp(prog->aux->attach_func_name, "test_pro_epilogue")) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	/* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
 | ||||
| 	 * r1 = r1[0]; // r1 will be "struct st_ops *args"
 | ||||
| 	 * r6 = r1->a; | ||||
| 	 * r6 += 10000; | ||||
| 	 * r1->a = r6; | ||||
| 	 * r0 = r6; | ||||
| 	 * r0 *= 2; | ||||
| 	 * BPF_EXIT; | ||||
| 	 */ | ||||
| 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off); | ||||
| 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0); | ||||
| 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a)); | ||||
| 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000); | ||||
| 	*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a)); | ||||
| 	*insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6); | ||||
| 	*insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2); | ||||
| 	*insn++ = BPF_EXIT_INSN(); | ||||
| 
 | ||||
| 	return insn - insn_buf; | ||||
| } | ||||
| 
 | ||||
| static int st_ops_btf_struct_access(struct bpf_verifier_log *log, | ||||
| 				    const struct bpf_reg_state *reg, | ||||
| 				    int off, int size) | ||||
| { | ||||
| 	if (off < 0 || off + size > sizeof(struct st_ops_args)) | ||||
| 		return -EACCES; | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static const struct bpf_verifier_ops st_ops_verifier_ops = { | ||||
| 	.is_valid_access = bpf_testmod_ops_is_valid_access, | ||||
| 	.btf_struct_access = st_ops_btf_struct_access, | ||||
| 	.gen_prologue = st_ops_gen_prologue, | ||||
| 	.gen_epilogue = st_ops_gen_epilogue, | ||||
| 	.get_func_proto = bpf_base_func_proto, | ||||
| }; | ||||
| 
 | ||||
| static struct bpf_testmod_st_ops st_ops_cfi_stubs = { | ||||
| 	.test_prologue = bpf_test_mod_st_ops__test_prologue, | ||||
| 	.test_epilogue = bpf_test_mod_st_ops__test_epilogue, | ||||
| 	.test_pro_epilogue = bpf_test_mod_st_ops__test_pro_epilogue, | ||||
| }; | ||||
| 
 | ||||
| static int st_ops_reg(void *kdata, struct bpf_link *link) | ||||
| { | ||||
| 	int err = 0; | ||||
| 
 | ||||
| 	mutex_lock(&st_ops_mutex); | ||||
| 	if (st_ops) { | ||||
| 		pr_err("st_ops has already been registered\n"); | ||||
| 		err = -EEXIST; | ||||
| 		goto unlock; | ||||
| 	} | ||||
| 	st_ops = kdata; | ||||
| 
 | ||||
| unlock: | ||||
| 	mutex_unlock(&st_ops_mutex); | ||||
| 	return err; | ||||
| } | ||||
| 
 | ||||
| static void st_ops_unreg(void *kdata, struct bpf_link *link) | ||||
| { | ||||
| 	mutex_lock(&st_ops_mutex); | ||||
| 	st_ops = NULL; | ||||
| 	mutex_unlock(&st_ops_mutex); | ||||
| } | ||||
| 
 | ||||
| static int st_ops_init(struct btf *btf) | ||||
| { | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static int st_ops_init_member(const struct btf_type *t, | ||||
| 			      const struct btf_member *member, | ||||
| 			      void *kdata, const void *udata) | ||||
| { | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static struct bpf_struct_ops testmod_st_ops = { | ||||
| 	.verifier_ops = &st_ops_verifier_ops, | ||||
| 	.init = st_ops_init, | ||||
| 	.init_member = st_ops_init_member, | ||||
| 	.reg = st_ops_reg, | ||||
| 	.unreg = st_ops_unreg, | ||||
| 	.cfi_stubs = &st_ops_cfi_stubs, | ||||
| 	.name = "bpf_testmod_st_ops", | ||||
| 	.owner = THIS_MODULE, | ||||
| }; | ||||
| 
 | ||||
| extern int bpf_fentry_test1(int a); | ||||
| 
 | ||||
| static int bpf_testmod_init(void) | ||||
|  | @ -1117,8 +1305,10 @@ static int bpf_testmod_init(void) | |||
| 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set); | ||||
| 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set); | ||||
| 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set); | ||||
| 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set); | ||||
| 	ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops); | ||||
| 	ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2); | ||||
| 	ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops); | ||||
| 	ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors, | ||||
| 						 ARRAY_SIZE(bpf_testmod_dtors), | ||||
| 						 THIS_MODULE); | ||||
|  |  | |||
|  | @ -94,4 +94,15 @@ struct bpf_testmod_ops2 { | |||
| 	int (*test_1)(void); | ||||
| }; | ||||
| 
 | ||||
| struct st_ops_args { | ||||
| 	u64 a; | ||||
| }; | ||||
| 
 | ||||
| struct bpf_testmod_st_ops { | ||||
| 	int (*test_prologue)(struct st_ops_args *args); | ||||
| 	int (*test_epilogue)(struct st_ops_args *args); | ||||
| 	int (*test_pro_epilogue)(struct st_ops_args *args); | ||||
| 	struct module *owner; | ||||
| }; | ||||
| 
 | ||||
| #endif /* _BPF_TESTMOD_H */ | ||||
|  |  | |||
|  | @ -148,4 +148,10 @@ struct sk_buff *bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head | |||
| struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr) __ksym; | ||||
| void bpf_kfunc_nested_release_test(struct sk_buff *ptr) __ksym; | ||||
| 
 | ||||
| struct st_ops_args; | ||||
| int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args) __ksym; | ||||
| int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args) __ksym; | ||||
| int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args) __ksym; | ||||
| int bpf_kfunc_st_ops_inc10(struct st_ops_args *args) __ksym; | ||||
| 
 | ||||
| #endif /* _BPF_TESTMOD_KFUNC_H */ | ||||
|  |  | |||
							
								
								
									
										10
									
								
								tools/testing/selftests/bpf/prog_tests/pro_epilogue.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								tools/testing/selftests/bpf/prog_tests/pro_epilogue.c
									
									
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,10 @@ | |||
| // SPDX-License-Identifier: GPL-2.0-only
 | ||||
| /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ | ||||
| 
 | ||||
| #include <test_progs.h> | ||||
| #include "pro_epilogue.skel.h" | ||||
| 
 | ||||
| void test_pro_epilogue(void) | ||||
| { | ||||
| 	RUN_TESTS(pro_epilogue); | ||||
| } | ||||
							
								
								
									
										154
									
								
								tools/testing/selftests/bpf/progs/pro_epilogue.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										154
									
								
								tools/testing/selftests/bpf/progs/pro_epilogue.c
									
									
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,154 @@ | |||
| // SPDX-License-Identifier: GPL-2.0
 | ||||
| /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ | ||||
| 
 | ||||
| #include <vmlinux.h> | ||||
| #include <bpf/bpf_tracing.h> | ||||
| #include "bpf_misc.h" | ||||
| #include "../bpf_testmod/bpf_testmod.h" | ||||
| #include "../bpf_testmod/bpf_testmod_kfunc.h" | ||||
| 
 | ||||
| char _license[] SEC("license") = "GPL"; | ||||
| 
 | ||||
| void __kfunc_btf_root(void) | ||||
| { | ||||
| 	bpf_kfunc_st_ops_inc10(NULL); | ||||
| } | ||||
| 
 | ||||
| static __noinline __used int subprog(struct st_ops_args *args) | ||||
| { | ||||
| 	args->a += 1; | ||||
| 	return args->a; | ||||
| } | ||||
| 
 | ||||
| __success | ||||
| /* prologue */ | ||||
| __xlated("0: r6 = *(u64 *)(r1 +0)") | ||||
| __xlated("1: r7 = *(u64 *)(r6 +0)") | ||||
| __xlated("2: r7 += 1000") | ||||
| __xlated("3: *(u64 *)(r6 +0) = r7") | ||||
| /* main prog */ | ||||
| __xlated("4: r1 = *(u64 *)(r1 +0)") | ||||
| __xlated("5: r6 = r1") | ||||
| __xlated("6: call kernel-function") | ||||
| __xlated("7: r1 = r6") | ||||
| __xlated("8: call pc+1") | ||||
| __xlated("9: exit") | ||||
| SEC("struct_ops/test_prologue") | ||||
| __naked int test_prologue(void) | ||||
| { | ||||
| 	asm volatile ( | ||||
| 	"r1 = *(u64 *)(r1 +0);" | ||||
| 	"r6 = r1;" | ||||
| 	"call %[bpf_kfunc_st_ops_inc10];" | ||||
| 	"r1 = r6;" | ||||
| 	"call subprog;" | ||||
| 	"exit;" | ||||
| 	: | ||||
| 	: __imm(bpf_kfunc_st_ops_inc10) | ||||
| 	: __clobber_all); | ||||
| } | ||||
| 
 | ||||
| __success | ||||
| /* save __u64 *ctx to stack */ | ||||
| __xlated("0: *(u64 *)(r10 -8) = r1") | ||||
| /* main prog */ | ||||
| __xlated("1: r1 = *(u64 *)(r1 +0)") | ||||
| __xlated("2: r6 = r1") | ||||
| __xlated("3: call kernel-function") | ||||
| __xlated("4: r1 = r6") | ||||
| __xlated("5: call pc+") | ||||
| /* epilogue */ | ||||
| __xlated("6: r1 = *(u64 *)(r10 -8)") | ||||
| __xlated("7: r1 = *(u64 *)(r1 +0)") | ||||
| __xlated("8: r6 = *(u64 *)(r1 +0)") | ||||
| __xlated("9: r6 += 10000") | ||||
| __xlated("10: *(u64 *)(r1 +0) = r6") | ||||
| __xlated("11: r0 = r6") | ||||
| __xlated("12: r0 *= 2") | ||||
| __xlated("13: exit") | ||||
| SEC("struct_ops/test_epilogue") | ||||
| __naked int test_epilogue(void) | ||||
| { | ||||
| 	asm volatile ( | ||||
| 	"r1 = *(u64 *)(r1 +0);" | ||||
| 	"r6 = r1;" | ||||
| 	"call %[bpf_kfunc_st_ops_inc10];" | ||||
| 	"r1 = r6;" | ||||
| 	"call subprog;" | ||||
| 	"exit;" | ||||
| 	: | ||||
| 	: __imm(bpf_kfunc_st_ops_inc10) | ||||
| 	: __clobber_all); | ||||
| } | ||||
| 
 | ||||
| __success | ||||
| /* prologue */ | ||||
| __xlated("0: r6 = *(u64 *)(r1 +0)") | ||||
| __xlated("1: r7 = *(u64 *)(r6 +0)") | ||||
| __xlated("2: r7 += 1000") | ||||
| __xlated("3: *(u64 *)(r6 +0) = r7") | ||||
| /* save __u64 *ctx to stack */ | ||||
| __xlated("4: *(u64 *)(r10 -8) = r1") | ||||
| /* main prog */ | ||||
| __xlated("5: r1 = *(u64 *)(r1 +0)") | ||||
| __xlated("6: r6 = r1") | ||||
| __xlated("7: call kernel-function") | ||||
| __xlated("8: r1 = r6") | ||||
| __xlated("9: call pc+") | ||||
| /* epilogue */ | ||||
| __xlated("10: r1 = *(u64 *)(r10 -8)") | ||||
| __xlated("11: r1 = *(u64 *)(r1 +0)") | ||||
| __xlated("12: r6 = *(u64 *)(r1 +0)") | ||||
| __xlated("13: r6 += 10000") | ||||
| __xlated("14: *(u64 *)(r1 +0) = r6") | ||||
| __xlated("15: r0 = r6") | ||||
| __xlated("16: r0 *= 2") | ||||
| __xlated("17: exit") | ||||
| SEC("struct_ops/test_pro_epilogue") | ||||
| __naked int test_pro_epilogue(void) | ||||
| { | ||||
| 	asm volatile ( | ||||
| 	"r1 = *(u64 *)(r1 +0);" | ||||
| 	"r6 = r1;" | ||||
| 	"call %[bpf_kfunc_st_ops_inc10];" | ||||
| 	"r1 = r6;" | ||||
| 	"call subprog;" | ||||
| 	"exit;" | ||||
| 	: | ||||
| 	: __imm(bpf_kfunc_st_ops_inc10) | ||||
| 	: __clobber_all); | ||||
| } | ||||
| 
 | ||||
| SEC("syscall") | ||||
| __retval(1011) /* PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] */ | ||||
| int syscall_prologue(void *ctx) | ||||
| { | ||||
| 	struct st_ops_args args = {}; | ||||
| 
 | ||||
| 	return bpf_kfunc_st_ops_test_prologue(&args); | ||||
| } | ||||
| 
 | ||||
| SEC("syscall") | ||||
| __retval(20022) /* (KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */ | ||||
| int syscall_epilogue(void *ctx) | ||||
| { | ||||
| 	struct st_ops_args args = {}; | ||||
| 
 | ||||
| 	return bpf_kfunc_st_ops_test_epilogue(&args); | ||||
| } | ||||
| 
 | ||||
| SEC("syscall") | ||||
| __retval(22022) /* (PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */ | ||||
| int syscall_pro_epilogue(void *ctx) | ||||
| { | ||||
| 	struct st_ops_args args = {}; | ||||
| 
 | ||||
| 	return bpf_kfunc_st_ops_test_pro_epilogue(&args); | ||||
| } | ||||
| 
 | ||||
| SEC(".struct_ops.link") | ||||
| struct bpf_testmod_st_ops pro_epilogue = { | ||||
| 	.test_prologue = (void *)test_prologue, | ||||
| 	.test_epilogue = (void *)test_epilogue, | ||||
| 	.test_pro_epilogue = (void *)test_pro_epilogue, | ||||
| }; | ||||
		Loading…
	
		Reference in a new issue
	
	 Martin KaFai Lau
						Martin KaFai Lau