mirror of
https://github.com/torvalds/linux.git
synced 2025-11-01 09:09:47 +02:00
bpf: Add function to find program from stack trace
In preparation of figuring out the closest program that led to the current point in the kernel, implement a function that scans through the stack trace and finds out the closest BPF program when walking down the stack trace. Special care needs to be taken to skip over kernel and BPF subprog frames. We basically scan until we find a BPF main prog frame. The assumption is that if a program calls into us transitively, we'll hit it along the way. If not, we end up returning NULL. Contextually the function will be used in places where we know the program may have called into us. Due to reliance on arch_bpf_stack_walk(), this function only works on x86 with CONFIG_UNWINDER_ORC, arm64, and s390. Remove the warning from arch_bpf_stack_walk as well since we call it outside bpf_throw() context. Acked-by: Eduard Zingerman <eddyz87@gmail.com> Reviewed-by: Emil Tsalapatis <emil@etsalapatis.com> Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Link: https://lore.kernel.org/r/20250703204818.925464-6-memxor@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
d090326860
commit
f0c53fd4a7
3 changed files with 34 additions and 1 deletions
|
|
@ -3845,7 +3845,6 @@ void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp
|
|||
}
|
||||
return;
|
||||
#endif
|
||||
WARN(1, "verification of programs using bpf_throw should have failed\n");
|
||||
}
|
||||
|
||||
void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
|
||||
|
|
|
|||
|
|
@ -3663,5 +3663,6 @@ static inline bool bpf_is_subprog(const struct bpf_prog *prog)
|
|||
|
||||
int bpf_prog_get_file_line(struct bpf_prog *prog, unsigned long ip, const char **filep,
|
||||
const char **linep, int *nump);
|
||||
struct bpf_prog *bpf_prog_find_from_stack(void);
|
||||
|
||||
#endif /* _LINUX_BPF_H */
|
||||
|
|
|
|||
|
|
@ -3262,4 +3262,37 @@ int bpf_prog_get_file_line(struct bpf_prog *prog, unsigned long ip, const char *
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct walk_stack_ctx {
|
||||
struct bpf_prog *prog;
|
||||
};
|
||||
|
||||
static bool find_from_stack_cb(void *cookie, u64 ip, u64 sp, u64 bp)
|
||||
{
|
||||
struct walk_stack_ctx *ctxp = cookie;
|
||||
struct bpf_prog *prog;
|
||||
|
||||
/*
|
||||
* The RCU read lock is held to safely traverse the latch tree, but we
|
||||
* don't need its protection when accessing the prog, since it has an
|
||||
* active stack frame on the current stack trace, and won't disappear.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
prog = bpf_prog_ksym_find(ip);
|
||||
rcu_read_unlock();
|
||||
if (!prog)
|
||||
return true;
|
||||
if (bpf_is_subprog(prog))
|
||||
return true;
|
||||
ctxp->prog = prog;
|
||||
return false;
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_prog_find_from_stack(void)
|
||||
{
|
||||
struct walk_stack_ctx ctx = {};
|
||||
|
||||
arch_bpf_stack_walk(find_from_stack_cb, &ctx);
|
||||
return ctx.prog;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
|||
Loading…
Reference in a new issue