mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	x86/stacktrace: Use common infrastructure
Replace the stack_trace_save*() functions with the new arch_stack_walk() interfaces. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: linux-arch@vger.kernel.org Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Alexander Potapenko <glider@google.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: linux-mm@kvack.org Cc: David Rientjes <rientjes@google.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: kasan-dev@googlegroups.com Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Akinobu Mita <akinobu.mita@gmail.com> Cc: Christoph Hellwig <hch@lst.de> Cc: iommu@lists.linux-foundation.org Cc: Robin Murphy <robin.murphy@arm.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Johannes Thumshirn <jthumshirn@suse.de> Cc: David Sterba <dsterba@suse.com> Cc: Chris Mason <clm@fb.com> Cc: Josef Bacik <josef@toxicpanda.com> Cc: linux-btrfs@vger.kernel.org Cc: dm-devel@redhat.com Cc: Mike Snitzer <snitzer@redhat.com> Cc: Alasdair Kergon <agk@redhat.com> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: intel-gfx@lists.freedesktop.org Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: dri-devel@lists.freedesktop.org Cc: David Airlie <airlied@linux.ie> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Tom Zanussi <tom.zanussi@linux.intel.com> Cc: Miroslav Benes <mbenes@suse.cz> Link: https://lkml.kernel.org/r/20190425094803.816485461@linutronix.de
This commit is contained in:
		
							parent
							
								
									214d8ca6ee
								
							
						
					
					
						commit
						3599fe12a1
					
				
					 2 changed files with 20 additions and 97 deletions
				
			
		|  | @ -74,6 +74,7 @@ config X86 | |||
| 	select ARCH_MIGHT_HAVE_ACPI_PDC		if ACPI | ||||
| 	select ARCH_MIGHT_HAVE_PC_PARPORT | ||||
| 	select ARCH_MIGHT_HAVE_PC_SERIO | ||||
| 	select ARCH_STACKWALK | ||||
| 	select ARCH_SUPPORTS_ACPI | ||||
| 	select ARCH_SUPPORTS_ATOMIC_RMW | ||||
| 	select ARCH_SUPPORTS_NUMA_BALANCING	if X86_64 | ||||
|  |  | |||
|  | @ -12,75 +12,31 @@ | |||
| #include <asm/stacktrace.h> | ||||
| #include <asm/unwind.h> | ||||
| 
 | ||||
| static int save_stack_address(struct stack_trace *trace, unsigned long addr, | ||||
| 			      bool nosched) | ||||
| { | ||||
| 	if (nosched && in_sched_functions(addr)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	if (trace->skip > 0) { | ||||
| 		trace->skip--; | ||||
| 		return 0; | ||||
| 	} | ||||
| 
 | ||||
| 	if (trace->nr_entries >= trace->max_entries) | ||||
| 		return -1; | ||||
| 
 | ||||
| 	trace->entries[trace->nr_entries++] = addr; | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static void noinline __save_stack_trace(struct stack_trace *trace, | ||||
| 			       struct task_struct *task, struct pt_regs *regs, | ||||
| 			       bool nosched) | ||||
| void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, | ||||
| 		     struct task_struct *task, struct pt_regs *regs) | ||||
| { | ||||
| 	struct unwind_state state; | ||||
| 	unsigned long addr; | ||||
| 
 | ||||
| 	if (regs) | ||||
| 		save_stack_address(trace, regs->ip, nosched); | ||||
| 	if (regs && !consume_entry(cookie, regs->ip, false)) | ||||
| 		return; | ||||
| 
 | ||||
| 	for (unwind_start(&state, task, regs, NULL); !unwind_done(&state); | ||||
| 	     unwind_next_frame(&state)) { | ||||
| 		addr = unwind_get_return_address(&state); | ||||
| 		if (!addr || save_stack_address(trace, addr, nosched)) | ||||
| 		if (!addr || !consume_entry(cookie, addr, false)) | ||||
| 			break; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Save stack-backtrace addresses into a stack_trace buffer. | ||||
|  * This function returns an error if it detects any unreliable features of the | ||||
|  * stack.  Otherwise it guarantees that the stack trace is reliable. | ||||
|  * | ||||
|  * If the task is not 'current', the caller *must* ensure the task is inactive. | ||||
|  */ | ||||
| void save_stack_trace(struct stack_trace *trace) | ||||
| { | ||||
| 	trace->skip++; | ||||
| 	__save_stack_trace(trace, current, NULL, false); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(save_stack_trace); | ||||
| 
 | ||||
| void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) | ||||
| { | ||||
| 	__save_stack_trace(trace, current, regs, false); | ||||
| } | ||||
| 
 | ||||
| void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | ||||
| { | ||||
| 	if (!try_get_task_stack(tsk)) | ||||
| 		return; | ||||
| 
 | ||||
| 	if (tsk == current) | ||||
| 		trace->skip++; | ||||
| 	__save_stack_trace(trace, tsk, NULL, true); | ||||
| 
 | ||||
| 	put_task_stack(tsk); | ||||
| } | ||||
| EXPORT_SYMBOL_GPL(save_stack_trace_tsk); | ||||
| 
 | ||||
| #ifdef CONFIG_HAVE_RELIABLE_STACKTRACE | ||||
| 
 | ||||
| static int __always_inline | ||||
| __save_stack_trace_reliable(struct stack_trace *trace, | ||||
| 			    struct task_struct *task) | ||||
| int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, | ||||
| 			     void *cookie, struct task_struct *task) | ||||
| { | ||||
| 	struct unwind_state state; | ||||
| 	struct pt_regs *regs; | ||||
|  | @ -117,7 +73,7 @@ __save_stack_trace_reliable(struct stack_trace *trace, | |||
| 		if (!addr) | ||||
| 			return -EINVAL; | ||||
| 
 | ||||
| 		if (save_stack_address(trace, addr, false)) | ||||
| 		if (!consume_entry(cookie, addr, false)) | ||||
| 			return -EINVAL; | ||||
| 	} | ||||
| 
 | ||||
|  | @ -132,32 +88,6 @@ __save_stack_trace_reliable(struct stack_trace *trace, | |||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * This function returns an error if it detects any unreliable features of the | ||||
|  * stack.  Otherwise it guarantees that the stack trace is reliable. | ||||
|  * | ||||
|  * If the task is not 'current', the caller *must* ensure the task is inactive. | ||||
|  */ | ||||
| int save_stack_trace_tsk_reliable(struct task_struct *tsk, | ||||
| 				  struct stack_trace *trace) | ||||
| { | ||||
| 	int ret; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * If the task doesn't have a stack (e.g., a zombie), the stack is | ||||
| 	 * "reliably" empty. | ||||
| 	 */ | ||||
| 	if (!try_get_task_stack(tsk)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	ret = __save_stack_trace_reliable(trace, tsk); | ||||
| 
 | ||||
| 	put_task_stack(tsk); | ||||
| 
 | ||||
| 	return ret; | ||||
| } | ||||
| #endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */ | ||||
| 
 | ||||
| /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ | ||||
| 
 | ||||
| struct stack_frame_user { | ||||
|  | @ -182,15 +112,15 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) | |||
| 	return ret; | ||||
| } | ||||
| 
 | ||||
| static inline void __save_stack_trace_user(struct stack_trace *trace) | ||||
| void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, | ||||
| 			  const struct pt_regs *regs) | ||||
| { | ||||
| 	const struct pt_regs *regs = task_pt_regs(current); | ||||
| 	const void __user *fp = (const void __user *)regs->bp; | ||||
| 
 | ||||
| 	if (trace->nr_entries < trace->max_entries) | ||||
| 		trace->entries[trace->nr_entries++] = regs->ip; | ||||
| 	if (!consume_entry(cookie, regs->ip, false)) | ||||
| 		return; | ||||
| 
 | ||||
| 	while (trace->nr_entries < trace->max_entries) { | ||||
| 	while (1) { | ||||
| 		struct stack_frame_user frame; | ||||
| 
 | ||||
| 		frame.next_fp = NULL; | ||||
|  | @ -200,8 +130,8 @@ static inline void __save_stack_trace_user(struct stack_trace *trace) | |||
| 		if ((unsigned long)fp < regs->sp) | ||||
| 			break; | ||||
| 		if (frame.ret_addr) { | ||||
| 			trace->entries[trace->nr_entries++] = | ||||
| 				frame.ret_addr; | ||||
| 			if (!consume_entry(cookie, frame.ret_addr, false)) | ||||
| 				return; | ||||
| 		} | ||||
| 		if (fp == frame.next_fp) | ||||
| 			break; | ||||
|  | @ -209,11 +139,3 @@ static inline void __save_stack_trace_user(struct stack_trace *trace) | |||
| 	} | ||||
| } | ||||
| 
 | ||||
| void save_stack_trace_user(struct stack_trace *trace) | ||||
| { | ||||
| 	/*
 | ||||
| 	 * Trace user stack if we are not a kernel thread | ||||
| 	 */ | ||||
| 	if (current->mm) | ||||
| 		__save_stack_trace_user(trace); | ||||
| } | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Thomas Gleixner
						Thomas Gleixner