mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	lockdep: sanitise CONFIG_PROVE_LOCKING
Ensure that all of the lock dependency tracking code is under CONFIG_PROVE_LOCKING. This allows us to use the held lock tracking code for other purposes. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Ingo Molnar <mingo@elte.hu> Acked-by: Jason Baron <jbaron@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									21f8ca3bf6
								
							
						
					
					
						commit
						ca58abcb4a
					
				
					 2 changed files with 14 additions and 3 deletions
				
			
		|  | @ -95,6 +95,7 @@ static int lockdep_initialized; | ||||||
| unsigned long nr_list_entries; | unsigned long nr_list_entries; | ||||||
| static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; | static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; | ||||||
| 
 | 
 | ||||||
|  | #ifdef CONFIG_PROVE_LOCKING | ||||||
| /*
 | /*
 | ||||||
|  * Allocate a lockdep entry. (assumes the graph_lock held, returns |  * Allocate a lockdep entry. (assumes the graph_lock held, returns | ||||||
|  * with NULL on failure) |  * with NULL on failure) | ||||||
|  | @ -111,6 +112,7 @@ static struct lock_list *alloc_list_entry(void) | ||||||
| 	} | 	} | ||||||
| 	return list_entries + nr_list_entries++; | 	return list_entries + nr_list_entries++; | ||||||
| } | } | ||||||
|  | #endif | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  * All data structures here are protected by the global debug_lock. |  * All data structures here are protected by the global debug_lock. | ||||||
|  | @ -140,7 +142,9 @@ LIST_HEAD(all_lock_classes); | ||||||
| static struct list_head classhash_table[CLASSHASH_SIZE]; | static struct list_head classhash_table[CLASSHASH_SIZE]; | ||||||
| 
 | 
 | ||||||
| unsigned long nr_lock_chains; | unsigned long nr_lock_chains; | ||||||
|  | #ifdef CONFIG_PROVE_LOCKING | ||||||
| static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; | static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; | ||||||
|  | #endif | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  * We put the lock dependency chains into a hash-table as well, to cache |  * We put the lock dependency chains into a hash-table as well, to cache | ||||||
|  | @ -482,6 +486,7 @@ static void print_lock_dependencies(struct lock_class *class, int depth) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | #ifdef CONFIG_PROVE_LOCKING | ||||||
| /*
 | /*
 | ||||||
|  * Add a new dependency to the head of the list: |  * Add a new dependency to the head of the list: | ||||||
|  */ |  */ | ||||||
|  | @ -541,6 +546,7 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth) | ||||||
| 
 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
|  | #endif | ||||||
| 
 | 
 | ||||||
| static void print_kernel_version(void) | static void print_kernel_version(void) | ||||||
| { | { | ||||||
|  | @ -549,6 +555,7 @@ static void print_kernel_version(void) | ||||||
| 		init_utsname()->version); | 		init_utsname()->version); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | #ifdef CONFIG_PROVE_LOCKING | ||||||
| /*
 | /*
 | ||||||
|  * When a circular dependency is detected, print the |  * When a circular dependency is detected, print the | ||||||
|  * header first: |  * header first: | ||||||
|  | @ -639,6 +646,7 @@ check_noncircular(struct lock_class *source, unsigned int depth) | ||||||
| 	} | 	} | ||||||
| 	return 1; | 	return 1; | ||||||
| } | } | ||||||
|  | #endif | ||||||
| 
 | 
 | ||||||
| static int very_verbose(struct lock_class *class) | static int very_verbose(struct lock_class *class) | ||||||
| { | { | ||||||
|  | @ -823,6 +831,7 @@ check_usage(struct task_struct *curr, struct held_lock *prev, | ||||||
| 
 | 
 | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
|  | #ifdef CONFIG_PROVE_LOCKING | ||||||
| static int | static int | ||||||
| print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, | print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, | ||||||
| 		   struct held_lock *next) | 		   struct held_lock *next) | ||||||
|  | @ -1087,7 +1096,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) | ||||||
| 
 | 
 | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | #endif | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  * Is this the address of a static object: |  * Is this the address of a static object: | ||||||
|  | @ -1307,6 +1316,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | ||||||
| 	return class; | 	return class; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | #ifdef CONFIG_PROVE_LOCKING | ||||||
| /*
 | /*
 | ||||||
|  * Look up a dependency chain. If the key is not present yet then |  * Look up a dependency chain. If the key is not present yet then | ||||||
|  * add it and return 1 - in this case the new dependency chain is |  * add it and return 1 - in this case the new dependency chain is | ||||||
|  | @ -1381,6 +1391,7 @@ static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class) | ||||||
| 
 | 
 | ||||||
| 	return 1; | 	return 1; | ||||||
| } | } | ||||||
|  | #endif | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  * We are building curr_chain_key incrementally, so double-check |  * We are building curr_chain_key incrementally, so double-check | ||||||
|  |  | ||||||
|  | @ -88,7 +88,7 @@ unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | ||||||
| 	 * _raw_spin_lock_flags() code, because lockdep assumes | 	 * _raw_spin_lock_flags() code, because lockdep assumes | ||||||
| 	 * that interrupts are not re-enabled during lock-acquire: | 	 * that interrupts are not re-enabled during lock-acquire: | ||||||
| 	 */ | 	 */ | ||||||
| #ifdef CONFIG_PROVE_LOCKING | #ifdef CONFIG_LOCKDEP | ||||||
| 	_raw_spin_lock(lock); | 	_raw_spin_lock(lock); | ||||||
| #else | #else | ||||||
| 	_raw_spin_lock_flags(lock, &flags); | 	_raw_spin_lock_flags(lock, &flags); | ||||||
|  | @ -305,7 +305,7 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas | ||||||
| 	 * _raw_spin_lock_flags() code, because lockdep assumes | 	 * _raw_spin_lock_flags() code, because lockdep assumes | ||||||
| 	 * that interrupts are not re-enabled during lock-acquire: | 	 * that interrupts are not re-enabled during lock-acquire: | ||||||
| 	 */ | 	 */ | ||||||
| #ifdef CONFIG_PROVE_SPIN_LOCKING | #ifdef CONFIG_LOCKDEP | ||||||
| 	_raw_spin_lock(lock); | 	_raw_spin_lock(lock); | ||||||
| #else | #else | ||||||
| 	_raw_spin_lock_flags(lock, &flags); | 	_raw_spin_lock_flags(lock, &flags); | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Peter Zijlstra
						Peter Zijlstra