mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	lockdep: Simplify stack trace handling
Replace the indirection through struct stack_trace by using the storage array based interfaces and storing the information is a small lockdep specific data structure. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Alexander Potapenko <glider@google.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: linux-mm@kvack.org Cc: David Rientjes <rientjes@google.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: kasan-dev@googlegroups.com Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Akinobu Mita <akinobu.mita@gmail.com> Cc: Christoph Hellwig <hch@lst.de> Cc: iommu@lists.linux-foundation.org Cc: Robin Murphy <robin.murphy@arm.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Johannes Thumshirn <jthumshirn@suse.de> Cc: David Sterba <dsterba@suse.com> Cc: Chris Mason <clm@fb.com> Cc: Josef Bacik <josef@toxicpanda.com> Cc: linux-btrfs@vger.kernel.org Cc: dm-devel@redhat.com Cc: Mike Snitzer <snitzer@redhat.com> Cc: Alasdair Kergon <agk@redhat.com> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: intel-gfx@lists.freedesktop.org Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: dri-devel@lists.freedesktop.org Cc: David Airlie <airlied@linux.ie> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Tom Zanussi <tom.zanussi@linux.intel.com> Cc: Miroslav Benes <mbenes@suse.cz> Cc: linux-arch@vger.kernel.org Link: https://lkml.kernel.org/r/20190425094802.891724020@linutronix.de
This commit is contained in:
		
							parent
							
								
									76b14436b4
								
							
						
					
					
						commit
						c120bce780
					
				
					 2 changed files with 35 additions and 29 deletions
				
			
		|  | @ -66,6 +66,11 @@ struct lock_class_key { | |||
| 
 | ||||
| extern struct lock_class_key __lockdep_no_validate__; | ||||
| 
 | ||||
| struct lock_trace { | ||||
| 	unsigned int		nr_entries; | ||||
| 	unsigned int		offset; | ||||
| }; | ||||
| 
 | ||||
| #define LOCKSTAT_POINTS		4 | ||||
| 
 | ||||
| /*
 | ||||
|  | @ -100,7 +105,7 @@ struct lock_class { | |||
| 	 * IRQ/softirq usage tracking bits: | ||||
| 	 */ | ||||
| 	unsigned long			usage_mask; | ||||
| 	struct stack_trace		usage_traces[XXX_LOCK_USAGE_STATES]; | ||||
| 	struct lock_trace		usage_traces[XXX_LOCK_USAGE_STATES]; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Generation counter, when doing certain classes of graph walking, | ||||
|  | @ -188,7 +193,7 @@ struct lock_list { | |||
| 	struct list_head		entry; | ||||
| 	struct lock_class		*class; | ||||
| 	struct lock_class		*links_to; | ||||
| 	struct stack_trace		trace; | ||||
| 	struct lock_trace		trace; | ||||
| 	int				distance; | ||||
| 
 | ||||
| 	/*
 | ||||
|  |  | |||
|  | @ -434,18 +434,14 @@ static void print_lockdep_off(const char *bug_msg) | |||
| #endif | ||||
| } | ||||
| 
 | ||||
| static int save_trace(struct stack_trace *trace) | ||||
| static int save_trace(struct lock_trace *trace) | ||||
| { | ||||
| 	trace->nr_entries = 0; | ||||
| 	trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; | ||||
| 	trace->entries = stack_trace + nr_stack_trace_entries; | ||||
| 
 | ||||
| 	trace->skip = 3; | ||||
| 
 | ||||
| 	save_stack_trace(trace); | ||||
| 
 | ||||
| 	trace->max_entries = trace->nr_entries; | ||||
| 	unsigned long *entries = stack_trace + nr_stack_trace_entries; | ||||
| 	unsigned int max_entries; | ||||
| 
 | ||||
| 	trace->offset = nr_stack_trace_entries; | ||||
| 	max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; | ||||
| 	trace->nr_entries = stack_trace_save(entries, max_entries, 3); | ||||
| 	nr_stack_trace_entries += trace->nr_entries; | ||||
| 
 | ||||
| 	if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) { | ||||
|  | @ -1196,7 +1192,7 @@ static struct lock_list *alloc_list_entry(void) | |||
| static int add_lock_to_list(struct lock_class *this, | ||||
| 			    struct lock_class *links_to, struct list_head *head, | ||||
| 			    unsigned long ip, int distance, | ||||
| 			    struct stack_trace *trace) | ||||
| 			    struct lock_trace *trace) | ||||
| { | ||||
| 	struct lock_list *entry; | ||||
| 	/*
 | ||||
|  | @ -1415,6 +1411,13 @@ static inline int __bfs_backwards(struct lock_list *src_entry, | |||
|  * checking. | ||||
|  */ | ||||
| 
 | ||||
| static void print_lock_trace(struct lock_trace *trace, unsigned int spaces) | ||||
| { | ||||
| 	unsigned long *entries = stack_trace + trace->offset; | ||||
| 
 | ||||
| 	stack_trace_print(entries, trace->nr_entries, spaces); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Print a dependency chain entry (this is only done when a deadlock | ||||
|  * has been detected): | ||||
|  | @ -1427,8 +1430,7 @@ print_circular_bug_entry(struct lock_list *target, int depth) | |||
| 	printk("\n-> #%u", depth); | ||||
| 	print_lock_name(target->class); | ||||
| 	printk(KERN_CONT ":\n"); | ||||
| 	print_stack_trace(&target->trace, 6); | ||||
| 
 | ||||
| 	print_lock_trace(&target->trace, 6); | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
|  | @ -1740,7 +1742,7 @@ static void print_lock_class_header(struct lock_class *class, int depth) | |||
| 
 | ||||
| 			len += printk("%*s   %s", depth, "", usage_str[bit]); | ||||
| 			len += printk(KERN_CONT " at:\n"); | ||||
| 			print_stack_trace(class->usage_traces + bit, len); | ||||
| 			print_lock_trace(class->usage_traces + bit, len); | ||||
| 		} | ||||
| 	} | ||||
| 	printk("%*s }\n", depth, ""); | ||||
|  | @ -1765,7 +1767,7 @@ print_shortest_lock_dependencies(struct lock_list *leaf, | |||
| 	do { | ||||
| 		print_lock_class_header(entry->class, depth); | ||||
| 		printk("%*s ... acquired at:\n", depth, ""); | ||||
| 		print_stack_trace(&entry->trace, 2); | ||||
| 		print_lock_trace(&entry->trace, 2); | ||||
| 		printk("\n"); | ||||
| 
 | ||||
| 		if (depth == 0 && (entry != root)) { | ||||
|  | @ -1878,14 +1880,14 @@ print_bad_irq_dependency(struct task_struct *curr, | |||
| 	print_lock_name(backwards_entry->class); | ||||
| 	pr_warn("\n... which became %s-irq-safe at:\n", irqclass); | ||||
| 
 | ||||
| 	print_stack_trace(backwards_entry->class->usage_traces + bit1, 1); | ||||
| 	print_lock_trace(backwards_entry->class->usage_traces + bit1, 1); | ||||
| 
 | ||||
| 	pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass); | ||||
| 	print_lock_name(forwards_entry->class); | ||||
| 	pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass); | ||||
| 	pr_warn("..."); | ||||
| 
 | ||||
| 	print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); | ||||
| 	print_lock_trace(forwards_entry->class->usage_traces + bit2, 1); | ||||
| 
 | ||||
| 	pr_warn("\nother info that might help us debug this:\n\n"); | ||||
| 	print_irq_lock_scenario(backwards_entry, forwards_entry, | ||||
|  | @ -2158,7 +2160,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next, | |||
|  */ | ||||
| static int | ||||
| check_prev_add(struct task_struct *curr, struct held_lock *prev, | ||||
| 	       struct held_lock *next, int distance, struct stack_trace *trace) | ||||
| 	       struct held_lock *next, int distance, struct lock_trace *trace) | ||||
| { | ||||
| 	struct lock_list *uninitialized_var(target_entry); | ||||
| 	struct lock_list *entry; | ||||
|  | @ -2196,7 +2198,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
| 	this.parent = NULL; | ||||
| 	ret = check_noncircular(&this, hlock_class(prev), &target_entry); | ||||
| 	if (unlikely(!ret)) { | ||||
| 		if (!trace->entries) { | ||||
| 		if (!trace->nr_entries) { | ||||
| 			/*
 | ||||
| 			 * If save_trace fails here, the printing might | ||||
| 			 * trigger a WARN but because of the !nr_entries it | ||||
|  | @ -2252,7 +2254,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
| 		return print_bfs_bug(ret); | ||||
| 
 | ||||
| 
 | ||||
| 	if (!trace->entries && !save_trace(trace)) | ||||
| 	if (!trace->nr_entries && !save_trace(trace)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	/*
 | ||||
|  | @ -2284,14 +2286,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
| static int | ||||
| check_prevs_add(struct task_struct *curr, struct held_lock *next) | ||||
| { | ||||
| 	struct lock_trace trace = { .nr_entries = 0 }; | ||||
| 	int depth = curr->lockdep_depth; | ||||
| 	struct held_lock *hlock; | ||||
| 	struct stack_trace trace = { | ||||
| 		.nr_entries = 0, | ||||
| 		.max_entries = 0, | ||||
| 		.entries = NULL, | ||||
| 		.skip = 0, | ||||
| 	}; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Debugging checks. | ||||
|  | @ -2719,6 +2716,10 @@ static inline int validate_chain(struct task_struct *curr, | |||
| { | ||||
| 	return 1; | ||||
| } | ||||
| 
 | ||||
| static void print_lock_trace(struct lock_trace *trace, unsigned int spaces) | ||||
| { | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
| /*
 | ||||
|  | @ -2815,7 +2816,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, | |||
| 	print_lock(this); | ||||
| 
 | ||||
| 	pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]); | ||||
| 	print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); | ||||
| 	print_lock_trace(hlock_class(this)->usage_traces + prev_bit, 1); | ||||
| 
 | ||||
| 	print_irqtrace_events(curr); | ||||
| 	pr_warn("\nother info that might help us debug this:\n"); | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Thomas Gleixner
						Thomas Gleixner