mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	. avoid walking the stack when there is no room left in the buffer . generalize get_perf_callchain() to be called from bpf helper Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
		
			
				
	
	
		
			150 lines
		
	
	
	
		
			3.6 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			150 lines
		
	
	
	
		
			3.6 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/*
 | 
						|
 * Stack trace management functions
 | 
						|
 *
 | 
						|
 *  Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 | 
						|
 */
 | 
						|
#include <linux/sched.h>
 | 
						|
#include <linux/stacktrace.h>
 | 
						|
#include <linux/module.h>
 | 
						|
#include <linux/uaccess.h>
 | 
						|
#include <asm/stacktrace.h>
 | 
						|
 | 
						|
static int save_stack_stack(void *data, char *name)
 | 
						|
{
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
 | 
						|
static int
 | 
						|
__save_stack_address(void *data, unsigned long addr, bool reliable, bool nosched)
 | 
						|
{
 | 
						|
	struct stack_trace *trace = data;
 | 
						|
#ifdef CONFIG_FRAME_POINTER
 | 
						|
	if (!reliable)
 | 
						|
		return 0;
 | 
						|
#endif
 | 
						|
	if (nosched && in_sched_functions(addr))
 | 
						|
		return 0;
 | 
						|
	if (trace->skip > 0) {
 | 
						|
		trace->skip--;
 | 
						|
		return 0;
 | 
						|
	}
 | 
						|
	if (trace->nr_entries < trace->max_entries) {
 | 
						|
		trace->entries[trace->nr_entries++] = addr;
 | 
						|
		return 0;
 | 
						|
	} else {
 | 
						|
		return -1; /* no more room, stop walking the stack */
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
static int save_stack_address(void *data, unsigned long addr, int reliable)
 | 
						|
{
 | 
						|
	return __save_stack_address(data, addr, reliable, false);
 | 
						|
}
 | 
						|
 | 
						|
static int
 | 
						|
save_stack_address_nosched(void *data, unsigned long addr, int reliable)
 | 
						|
{
 | 
						|
	return __save_stack_address(data, addr, reliable, true);
 | 
						|
}
 | 
						|
 | 
						|
static const struct stacktrace_ops save_stack_ops = {
 | 
						|
	.stack		= save_stack_stack,
 | 
						|
	.address	= save_stack_address,
 | 
						|
	.walk_stack	= print_context_stack,
 | 
						|
};
 | 
						|
 | 
						|
static const struct stacktrace_ops save_stack_ops_nosched = {
 | 
						|
	.stack		= save_stack_stack,
 | 
						|
	.address	= save_stack_address_nosched,
 | 
						|
	.walk_stack	= print_context_stack,
 | 
						|
};
 | 
						|
 | 
						|
/*
 | 
						|
 * Save stack-backtrace addresses into a stack_trace buffer.
 | 
						|
 */
 | 
						|
void save_stack_trace(struct stack_trace *trace)
 | 
						|
{
 | 
						|
	dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace);
 | 
						|
	if (trace->nr_entries < trace->max_entries)
 | 
						|
		trace->entries[trace->nr_entries++] = ULONG_MAX;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(save_stack_trace);
 | 
						|
 | 
						|
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
 | 
						|
{
 | 
						|
	dump_trace(current, regs, NULL, 0, &save_stack_ops, trace);
 | 
						|
	if (trace->nr_entries < trace->max_entries)
 | 
						|
		trace->entries[trace->nr_entries++] = ULONG_MAX;
 | 
						|
}
 | 
						|
 | 
						|
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
 | 
						|
{
 | 
						|
	dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
 | 
						|
	if (trace->nr_entries < trace->max_entries)
 | 
						|
		trace->entries[trace->nr_entries++] = ULONG_MAX;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
 | 
						|
 | 
						|
/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
 | 
						|
 | 
						|
struct stack_frame_user {
 | 
						|
	const void __user	*next_fp;
 | 
						|
	unsigned long		ret_addr;
 | 
						|
};
 | 
						|
 | 
						|
static int
 | 
						|
copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
 | 
						|
{
 | 
						|
	int ret;
 | 
						|
 | 
						|
	if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
 | 
						|
		return 0;
 | 
						|
 | 
						|
	ret = 1;
 | 
						|
	pagefault_disable();
 | 
						|
	if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
 | 
						|
		ret = 0;
 | 
						|
	pagefault_enable();
 | 
						|
 | 
						|
	return ret;
 | 
						|
}
 | 
						|
 | 
						|
static inline void __save_stack_trace_user(struct stack_trace *trace)
 | 
						|
{
 | 
						|
	const struct pt_regs *regs = task_pt_regs(current);
 | 
						|
	const void __user *fp = (const void __user *)regs->bp;
 | 
						|
 | 
						|
	if (trace->nr_entries < trace->max_entries)
 | 
						|
		trace->entries[trace->nr_entries++] = regs->ip;
 | 
						|
 | 
						|
	while (trace->nr_entries < trace->max_entries) {
 | 
						|
		struct stack_frame_user frame;
 | 
						|
 | 
						|
		frame.next_fp = NULL;
 | 
						|
		frame.ret_addr = 0;
 | 
						|
		if (!copy_stack_frame(fp, &frame))
 | 
						|
			break;
 | 
						|
		if ((unsigned long)fp < regs->sp)
 | 
						|
			break;
 | 
						|
		if (frame.ret_addr) {
 | 
						|
			trace->entries[trace->nr_entries++] =
 | 
						|
				frame.ret_addr;
 | 
						|
		}
 | 
						|
		if (fp == frame.next_fp)
 | 
						|
			break;
 | 
						|
		fp = frame.next_fp;
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
void save_stack_trace_user(struct stack_trace *trace)
 | 
						|
{
 | 
						|
	/*
 | 
						|
	 * Trace user stack if we are not a kernel thread
 | 
						|
	 */
 | 
						|
	if (current->mm) {
 | 
						|
		__save_stack_trace_user(trace);
 | 
						|
	}
 | 
						|
	if (trace->nr_entries < trace->max_entries)
 | 
						|
		trace->entries[trace->nr_entries++] = ULONG_MAX;
 | 
						|
}
 | 
						|
 |