forked from mirrors/linux
		
	[PATCH] lockdep: better lock debugging
Generic lock debugging:
 - generalized lock debugging framework. For example, a bug in one lock
   subsystem turns off debugging in all lock subsystems.
 - got rid of the caller address passing (__IP__/__IP_DECL__/etc.) from
   the mutex/rtmutex debugging code: it caused way too much prototype
   hackery, and lockdep will give the same information anyway.
 - ability to do silent tests
 - check lock freeing in vfree too.
 - more finegrained debugging options, to allow distributions to
   turn off more expensive debugging features.
There's no separate 'held mutexes' list anymore - but there's a 'held locks'
stack within lockdep, which unifies deadlock detection across all lock
classes.  (this is independent of the lockdep validation stuff - lockdep first
checks whether we are holding a lock already)
Here are the current debugging options:
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_LOCK_ALLOC=y
which do:
 config DEBUG_MUTEXES
          bool "Mutex debugging, basic checks"
 config DEBUG_LOCK_ALLOC
         bool "Detect incorrect freeing of live mutexes"
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
			
			
This commit is contained in:
		
							parent
							
								
									fb7e42413a
								
							
						
					
					
						commit
						9a11b49a80
					
				
					 25 changed files with 264 additions and 566 deletions
				
			
		|  | @ -151,7 +151,7 @@ static struct sysrq_key_op sysrq_mountro_op = { | ||||||
| static void sysrq_handle_showlocks(int key, struct pt_regs *pt_regs, | static void sysrq_handle_showlocks(int key, struct pt_regs *pt_regs, | ||||||
| 				struct tty_struct *tty) | 				struct tty_struct *tty) | ||||||
| { | { | ||||||
| 	mutex_debug_show_all_locks(); | 	debug_show_all_locks(); | ||||||
| } | } | ||||||
| static struct sysrq_key_op sysrq_showlocks_op = { | static struct sysrq_key_op sysrq_showlocks_op = { | ||||||
| 	.handler	= sysrq_handle_showlocks, | 	.handler	= sysrq_handle_showlocks, | ||||||
|  |  | ||||||
|  | @ -10,14 +10,9 @@ | ||||||
| #ifndef _ASM_GENERIC_MUTEX_NULL_H | #ifndef _ASM_GENERIC_MUTEX_NULL_H | ||||||
| #define _ASM_GENERIC_MUTEX_NULL_H | #define _ASM_GENERIC_MUTEX_NULL_H | ||||||
| 
 | 
 | ||||||
| /* extra parameter only needed for mutex debugging: */ | #define __mutex_fastpath_lock(count, fail_fn)		fail_fn(count) | ||||||
| #ifndef __IP__ | #define __mutex_fastpath_lock_retval(count, fail_fn)	fail_fn(count) | ||||||
| # define __IP__ | #define __mutex_fastpath_unlock(count, fail_fn)		fail_fn(count) | ||||||
| #endif |  | ||||||
| 
 |  | ||||||
| #define __mutex_fastpath_lock(count, fail_fn)	      fail_fn(count __RET_IP__) |  | ||||||
| #define __mutex_fastpath_lock_retval(count, fail_fn)  fail_fn(count __RET_IP__) |  | ||||||
| #define __mutex_fastpath_unlock(count, fail_fn)       fail_fn(count __RET_IP__) |  | ||||||
| #define __mutex_fastpath_trylock(count, fail_fn)	fail_fn(count) | #define __mutex_fastpath_trylock(count, fail_fn)	fail_fn(count) | ||||||
| #define __mutex_slowpath_needs_to_unlock()		1 | #define __mutex_slowpath_needs_to_unlock()		1 | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
							
								
								
									
										69
									
								
								include/linux/debug_locks.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										69
									
								
								include/linux/debug_locks.h
									
									
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,69 @@ | ||||||
|  | #ifndef __LINUX_DEBUG_LOCKING_H | ||||||
|  | #define __LINUX_DEBUG_LOCKING_H | ||||||
|  | 
 | ||||||
|  | extern int debug_locks; | ||||||
|  | extern int debug_locks_silent; | ||||||
|  | 
 | ||||||
|  | /*
 | ||||||
|  |  * Generic 'turn off all lock debugging' function: | ||||||
|  |  */ | ||||||
|  | extern int debug_locks_off(void); | ||||||
|  | 
 | ||||||
|  | /*
 | ||||||
|  |  * In the debug case we carry the caller's instruction pointer into | ||||||
|  |  * other functions, but we dont want the function argument overhead | ||||||
|  |  * in the nondebug case - hence these macros: | ||||||
|  |  */ | ||||||
|  | #define _RET_IP_		(unsigned long)__builtin_return_address(0) | ||||||
|  | #define _THIS_IP_  ({ __label__ __here; __here: (unsigned long)&&__here; }) | ||||||
|  | 
 | ||||||
|  | #define DEBUG_LOCKS_WARN_ON(c)						\ | ||||||
|  | ({									\ | ||||||
|  | 	int __ret = 0;							\ | ||||||
|  | 									\ | ||||||
|  | 	if (unlikely(c)) {						\ | ||||||
|  | 		if (debug_locks_off())					\ | ||||||
|  | 			WARN_ON(1);					\ | ||||||
|  | 		__ret = 1;						\ | ||||||
|  | 	}								\ | ||||||
|  | 	__ret;								\ | ||||||
|  | }) | ||||||
|  | 
 | ||||||
|  | #ifdef CONFIG_SMP | ||||||
|  | # define SMP_DEBUG_LOCKS_WARN_ON(c)			DEBUG_LOCKS_WARN_ON(c) | ||||||
|  | #else | ||||||
|  | # define SMP_DEBUG_LOCKS_WARN_ON(c)			do { } while (0) | ||||||
|  | #endif | ||||||
|  | 
 | ||||||
|  | #ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS | ||||||
|  |   extern void locking_selftest(void); | ||||||
|  | #else | ||||||
|  | # define locking_selftest()	do { } while (0) | ||||||
|  | #endif | ||||||
|  | 
 | ||||||
|  | #ifdef CONFIG_LOCKDEP | ||||||
|  | extern void debug_show_all_locks(void); | ||||||
|  | extern void debug_show_held_locks(struct task_struct *task); | ||||||
|  | extern void debug_check_no_locks_freed(const void *from, unsigned long len); | ||||||
|  | extern void debug_check_no_locks_held(struct task_struct *task); | ||||||
|  | #else | ||||||
|  | static inline void debug_show_all_locks(void) | ||||||
|  | { | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static inline void debug_show_held_locks(struct task_struct *task) | ||||||
|  | { | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static inline void | ||||||
|  | debug_check_no_locks_freed(const void *from, unsigned long len) | ||||||
|  | { | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | static inline void | ||||||
|  | debug_check_no_locks_held(struct task_struct *task) | ||||||
|  | { | ||||||
|  | } | ||||||
|  | #endif | ||||||
|  | 
 | ||||||
|  | #endif | ||||||
|  | @ -124,7 +124,6 @@ extern struct group_info init_groups; | ||||||
| 	.cpu_timers	= INIT_CPU_TIMERS(tsk.cpu_timers),		\ | 	.cpu_timers	= INIT_CPU_TIMERS(tsk.cpu_timers),		\ | ||||||
| 	.fs_excl	= ATOMIC_INIT(0),				\ | 	.fs_excl	= ATOMIC_INIT(0),				\ | ||||||
| 	.pi_lock	= SPIN_LOCK_UNLOCKED,				\ | 	.pi_lock	= SPIN_LOCK_UNLOCKED,				\ | ||||||
| 	INIT_RT_MUTEXES(tsk)						\ |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -14,6 +14,7 @@ | ||||||
| #include <linux/prio_tree.h> | #include <linux/prio_tree.h> | ||||||
| #include <linux/fs.h> | #include <linux/fs.h> | ||||||
| #include <linux/mutex.h> | #include <linux/mutex.h> | ||||||
|  | #include <linux/debug_locks.h> | ||||||
| 
 | 
 | ||||||
| struct mempolicy; | struct mempolicy; | ||||||
| struct anon_vma; | struct anon_vma; | ||||||
|  | @ -1034,13 +1035,6 @@ static inline void vm_stat_account(struct mm_struct *mm, | ||||||
| } | } | ||||||
| #endif /* CONFIG_PROC_FS */ | #endif /* CONFIG_PROC_FS */ | ||||||
| 
 | 
 | ||||||
| static inline void |  | ||||||
| debug_check_no_locks_freed(const void *from, unsigned long len) |  | ||||||
| { |  | ||||||
| 	mutex_debug_check_no_locks_freed(from, len); |  | ||||||
| 	rt_mutex_debug_check_no_locks_freed(from, len); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| #ifndef CONFIG_DEBUG_PAGEALLOC | #ifndef CONFIG_DEBUG_PAGEALLOC | ||||||
| static inline void | static inline void | ||||||
| kernel_map_pages(struct page *page, int numpages, int enable) | kernel_map_pages(struct page *page, int numpages, int enable) | ||||||
|  |  | ||||||
|  | @ -8,16 +8,10 @@ | ||||||
|  */ |  */ | ||||||
| 
 | 
 | ||||||
| #define __DEBUG_MUTEX_INITIALIZER(lockname)				\ | #define __DEBUG_MUTEX_INITIALIZER(lockname)				\ | ||||||
| 	, .held_list = LIST_HEAD_INIT(lockname.held_list), \ | 	, .magic = &lockname | ||||||
| 	  .name = #lockname , .magic = &lockname |  | ||||||
| 
 | 
 | ||||||
| #define mutex_init(sem)		__mutex_init(sem, __FUNCTION__) | #define mutex_init(sem)		__mutex_init(sem, __FILE__":"#sem) | ||||||
| 
 | 
 | ||||||
| extern void FASTCALL(mutex_destroy(struct mutex *lock)); | extern void FASTCALL(mutex_destroy(struct mutex *lock)); | ||||||
| 
 | 
 | ||||||
| extern void mutex_debug_show_all_locks(void); |  | ||||||
| extern void mutex_debug_show_held_locks(struct task_struct *filter); |  | ||||||
| extern void mutex_debug_check_no_locks_held(struct task_struct *task); |  | ||||||
| extern void mutex_debug_check_no_locks_freed(const void *from, unsigned long len); |  | ||||||
| 
 |  | ||||||
| #endif | #endif | ||||||
|  |  | ||||||
|  | @ -50,8 +50,6 @@ struct mutex { | ||||||
| 	struct list_head	wait_list; | 	struct list_head	wait_list; | ||||||
| #ifdef CONFIG_DEBUG_MUTEXES | #ifdef CONFIG_DEBUG_MUTEXES | ||||||
| 	struct thread_info	*owner; | 	struct thread_info	*owner; | ||||||
| 	struct list_head	held_list; |  | ||||||
| 	unsigned long		acquire_ip; |  | ||||||
| 	const char 		*name; | 	const char 		*name; | ||||||
| 	void			*magic; | 	void			*magic; | ||||||
| #endif | #endif | ||||||
|  | @ -76,10 +74,6 @@ struct mutex_waiter { | ||||||
| # define __DEBUG_MUTEX_INITIALIZER(lockname) | # define __DEBUG_MUTEX_INITIALIZER(lockname) | ||||||
| # define mutex_init(mutex)			__mutex_init(mutex, NULL) | # define mutex_init(mutex)			__mutex_init(mutex, NULL) | ||||||
| # define mutex_destroy(mutex)				do { } while (0) | # define mutex_destroy(mutex)				do { } while (0) | ||||||
| # define mutex_debug_show_all_locks()			do { } while (0) |  | ||||||
| # define mutex_debug_show_held_locks(p)			do { } while (0) |  | ||||||
| # define mutex_debug_check_no_locks_held(task)		do { } while (0) |  | ||||||
| # define mutex_debug_check_no_locks_freed(from, len)	do { } while (0) |  | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #define __MUTEX_INITIALIZER(lockname) \ | #define __MUTEX_INITIALIZER(lockname) \ | ||||||
|  |  | ||||||
|  | @ -29,8 +29,6 @@ struct rt_mutex { | ||||||
| 	struct task_struct	*owner; | 	struct task_struct	*owner; | ||||||
| #ifdef CONFIG_DEBUG_RT_MUTEXES | #ifdef CONFIG_DEBUG_RT_MUTEXES | ||||||
| 	int			save_state; | 	int			save_state; | ||||||
| 	struct list_head	held_list_entry; |  | ||||||
| 	unsigned long		acquire_ip; |  | ||||||
| 	const char 		*name, *file; | 	const char 		*name, *file; | ||||||
| 	int			line; | 	int			line; | ||||||
| 	void			*magic; | 	void			*magic; | ||||||
|  | @ -98,14 +96,6 @@ extern int rt_mutex_trylock(struct rt_mutex *lock); | ||||||
| 
 | 
 | ||||||
| extern void rt_mutex_unlock(struct rt_mutex *lock); | extern void rt_mutex_unlock(struct rt_mutex *lock); | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_DEBUG_RT_MUTEXES |  | ||||||
| # define INIT_RT_MUTEX_DEBUG(tsk)					\ |  | ||||||
| 	.held_list_head	= LIST_HEAD_INIT(tsk.held_list_head),		\ |  | ||||||
| 	.held_list_lock	= SPIN_LOCK_UNLOCKED |  | ||||||
| #else |  | ||||||
| # define INIT_RT_MUTEX_DEBUG(tsk) |  | ||||||
| #endif |  | ||||||
| 
 |  | ||||||
| #ifdef CONFIG_RT_MUTEXES | #ifdef CONFIG_RT_MUTEXES | ||||||
| # define INIT_RT_MUTEXES(tsk)						\ | # define INIT_RT_MUTEXES(tsk)						\ | ||||||
| 	.pi_waiters	= PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock),	\ | 	.pi_waiters	= PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock),	\ | ||||||
|  |  | ||||||
|  | @ -865,10 +865,6 @@ struct task_struct { | ||||||
| 	struct plist_head pi_waiters; | 	struct plist_head pi_waiters; | ||||||
| 	/* Deadlock detection and priority inheritance handling */ | 	/* Deadlock detection and priority inheritance handling */ | ||||||
| 	struct rt_mutex_waiter *pi_blocked_on; | 	struct rt_mutex_waiter *pi_blocked_on; | ||||||
| # ifdef CONFIG_DEBUG_RT_MUTEXES |  | ||||||
| 	spinlock_t held_list_lock; |  | ||||||
| 	struct list_head held_list_head; |  | ||||||
| # endif |  | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_DEBUG_MUTEXES | #ifdef CONFIG_DEBUG_MUTEXES | ||||||
|  |  | ||||||
|  | @ -47,6 +47,7 @@ | ||||||
| #include <linux/key.h> | #include <linux/key.h> | ||||||
| #include <linux/unwind.h> | #include <linux/unwind.h> | ||||||
| #include <linux/buffer_head.h> | #include <linux/buffer_head.h> | ||||||
|  | #include <linux/debug_locks.h> | ||||||
| 
 | 
 | ||||||
| #include <asm/io.h> | #include <asm/io.h> | ||||||
| #include <asm/bugs.h> | #include <asm/bugs.h> | ||||||
|  | @ -511,6 +512,13 @@ asmlinkage void __init start_kernel(void) | ||||||
| 	console_init(); | 	console_init(); | ||||||
| 	if (panic_later) | 	if (panic_later) | ||||||
| 		panic(panic_later, panic_param); | 		panic(panic_later, panic_param); | ||||||
|  | 	/*
 | ||||||
|  | 	 * Need to run this when irqs are enabled, because it wants | ||||||
|  | 	 * to self-test [hard/soft]-irqs on/off lock inversion bugs | ||||||
|  | 	 * too: | ||||||
|  | 	 */ | ||||||
|  | 	locking_selftest(); | ||||||
|  | 
 | ||||||
| #ifdef CONFIG_BLK_DEV_INITRD | #ifdef CONFIG_BLK_DEV_INITRD | ||||||
| 	if (initrd_start && !initrd_below_start_ok && | 	if (initrd_start && !initrd_below_start_ok && | ||||||
| 			initrd_start < min_low_pfn << PAGE_SHIFT) { | 			initrd_start < min_low_pfn << PAGE_SHIFT) { | ||||||
|  |  | ||||||
|  | @ -933,10 +933,9 @@ fastcall NORET_TYPE void do_exit(long code) | ||||||
| 	if (unlikely(current->pi_state_cache)) | 	if (unlikely(current->pi_state_cache)) | ||||||
| 		kfree(current->pi_state_cache); | 		kfree(current->pi_state_cache); | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * If DEBUG_MUTEXES is on, make sure we are holding no locks: | 	 * Make sure we are holding no locks: | ||||||
| 	 */ | 	 */ | ||||||
| 	mutex_debug_check_no_locks_held(tsk); | 	debug_check_no_locks_held(tsk); | ||||||
| 	rt_mutex_debug_check_no_locks_held(tsk); |  | ||||||
| 
 | 
 | ||||||
| 	if (tsk->io_context) | 	if (tsk->io_context) | ||||||
| 		exit_io_context(); | 		exit_io_context(); | ||||||
|  |  | ||||||
|  | @ -919,10 +919,6 @@ static inline void rt_mutex_init_task(struct task_struct *p) | ||||||
| 	spin_lock_init(&p->pi_lock); | 	spin_lock_init(&p->pi_lock); | ||||||
| 	plist_head_init(&p->pi_waiters, &p->pi_lock); | 	plist_head_init(&p->pi_waiters, &p->pi_lock); | ||||||
| 	p->pi_blocked_on = NULL; | 	p->pi_blocked_on = NULL; | ||||||
| # ifdef CONFIG_DEBUG_RT_MUTEXES |  | ||||||
| 	spin_lock_init(&p->held_list_lock); |  | ||||||
| 	INIT_LIST_HEAD(&p->held_list_head); |  | ||||||
| # endif |  | ||||||
| #endif | #endif | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -20,52 +20,19 @@ | ||||||
| #include <linux/spinlock.h> | #include <linux/spinlock.h> | ||||||
| #include <linux/kallsyms.h> | #include <linux/kallsyms.h> | ||||||
| #include <linux/interrupt.h> | #include <linux/interrupt.h> | ||||||
|  | #include <linux/debug_locks.h> | ||||||
| 
 | 
 | ||||||
| #include "mutex-debug.h" | #include "mutex-debug.h" | ||||||
| 
 | 
 | ||||||
| /*
 |  | ||||||
|  * We need a global lock when we walk through the multi-process |  | ||||||
|  * lock tree. Only used in the deadlock-debugging case. |  | ||||||
|  */ |  | ||||||
| DEFINE_SPINLOCK(debug_mutex_lock); |  | ||||||
| 
 |  | ||||||
| /*
 |  | ||||||
|  * All locks held by all tasks, in a single global list: |  | ||||||
|  */ |  | ||||||
| LIST_HEAD(debug_mutex_held_locks); |  | ||||||
| 
 |  | ||||||
| /*
 |  | ||||||
|  * In the debug case we carry the caller's instruction pointer into |  | ||||||
|  * other functions, but we dont want the function argument overhead |  | ||||||
|  * in the nondebug case - hence these macros: |  | ||||||
|  */ |  | ||||||
| #define __IP_DECL__		, unsigned long ip |  | ||||||
| #define __IP__			, ip |  | ||||||
| #define __RET_IP__		, (unsigned long)__builtin_return_address(0) |  | ||||||
| 
 |  | ||||||
| /*
 |  | ||||||
|  * "mutex debugging enabled" flag. We turn it off when we detect |  | ||||||
|  * the first problem because we dont want to recurse back |  | ||||||
|  * into the tracing code when doing error printk or |  | ||||||
|  * executing a BUG(): |  | ||||||
|  */ |  | ||||||
| int debug_mutex_on = 1; |  | ||||||
| 
 |  | ||||||
| /*
 | /*
 | ||||||
|  * Must be called with lock->wait_lock held. |  * Must be called with lock->wait_lock held. | ||||||
|  */ |  */ | ||||||
| void debug_mutex_set_owner(struct mutex *lock, | void debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner) | ||||||
| 			   struct thread_info *new_owner __IP_DECL__) |  | ||||||
| { | { | ||||||
| 	lock->owner = new_owner; | 	lock->owner = new_owner; | ||||||
| 	DEBUG_LOCKS_WARN_ON(!list_empty(&lock->held_list)); |  | ||||||
| 	if (debug_mutex_on) { |  | ||||||
| 		list_add_tail(&lock->held_list, &debug_mutex_held_locks); |  | ||||||
| 		lock->acquire_ip = ip; |  | ||||||
| 	} |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void debug_mutex_init_waiter(struct mutex_waiter *waiter) | void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) | ||||||
| { | { | ||||||
| 	memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter)); | 	memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter)); | ||||||
| 	waiter->magic = waiter; | 	waiter->magic = waiter; | ||||||
|  | @ -87,9 +54,10 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, | void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, | ||||||
| 			    struct thread_info *ti __IP_DECL__) | 			    struct thread_info *ti) | ||||||
| { | { | ||||||
| 	SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); | 	SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); | ||||||
|  | 
 | ||||||
| 	/* Mark the current thread as blocked on the lock: */ | 	/* Mark the current thread as blocked on the lock: */ | ||||||
| 	ti->task->blocked_on = waiter; | 	ti->task->blocked_on = waiter; | ||||||
| 	waiter->lock = lock; | 	waiter->lock = lock; | ||||||
|  | @ -109,13 +77,10 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, | ||||||
| 
 | 
 | ||||||
| void debug_mutex_unlock(struct mutex *lock) | void debug_mutex_unlock(struct mutex *lock) | ||||||
| { | { | ||||||
|  | 	DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); | ||||||
| 	DEBUG_LOCKS_WARN_ON(lock->magic != lock); | 	DEBUG_LOCKS_WARN_ON(lock->magic != lock); | ||||||
| 	DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); | 	DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); | ||||||
| 	DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); | 	DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); | ||||||
| 	if (debug_mutex_on) { |  | ||||||
| 		DEBUG_LOCKS_WARN_ON(list_empty(&lock->held_list)); |  | ||||||
| 		list_del_init(&lock->held_list); |  | ||||||
| 	} |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void debug_mutex_init(struct mutex *lock, const char *name) | void debug_mutex_init(struct mutex *lock, const char *name) | ||||||
|  | @ -123,10 +88,8 @@ void debug_mutex_init(struct mutex *lock, const char *name) | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Make sure we are not reinitializing a held lock: | 	 * Make sure we are not reinitializing a held lock: | ||||||
| 	 */ | 	 */ | ||||||
| 	mutex_debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | 	debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | ||||||
| 	lock->owner = NULL; | 	lock->owner = NULL; | ||||||
| 	INIT_LIST_HEAD(&lock->held_list); |  | ||||||
| 	lock->name = name; |  | ||||||
| 	lock->magic = lock; | 	lock->magic = lock; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -10,102 +10,44 @@ | ||||||
|  * More details are in kernel/mutex-debug.c. |  * More details are in kernel/mutex-debug.c. | ||||||
|  */ |  */ | ||||||
| 
 | 
 | ||||||
| extern spinlock_t debug_mutex_lock; |  | ||||||
| extern struct list_head debug_mutex_held_locks; |  | ||||||
| extern int debug_mutex_on; |  | ||||||
| 
 |  | ||||||
| /*
 |  | ||||||
|  * In the debug case we carry the caller's instruction pointer into |  | ||||||
|  * other functions, but we dont want the function argument overhead |  | ||||||
|  * in the nondebug case - hence these macros: |  | ||||||
|  */ |  | ||||||
| #define __IP_DECL__		, unsigned long ip |  | ||||||
| #define __IP__			, ip |  | ||||||
| #define __RET_IP__		, (unsigned long)__builtin_return_address(0) |  | ||||||
| 
 |  | ||||||
| /*
 | /*
 | ||||||
|  * This must be called with lock->wait_lock held. |  * This must be called with lock->wait_lock held. | ||||||
|  */ |  */ | ||||||
| extern void debug_mutex_set_owner(struct mutex *lock, | extern void | ||||||
| 				  struct thread_info *new_owner __IP_DECL__); | debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner); | ||||||
| 
 | 
 | ||||||
| static inline void debug_mutex_clear_owner(struct mutex *lock) | static inline void debug_mutex_clear_owner(struct mutex *lock) | ||||||
| { | { | ||||||
| 	lock->owner = NULL; | 	lock->owner = NULL; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| extern void debug_mutex_init_waiter(struct mutex_waiter *waiter); | extern void debug_mutex_lock_common(struct mutex *lock, | ||||||
|  | 				    struct mutex_waiter *waiter); | ||||||
| extern void debug_mutex_wake_waiter(struct mutex *lock, | extern void debug_mutex_wake_waiter(struct mutex *lock, | ||||||
| 				    struct mutex_waiter *waiter); | 				    struct mutex_waiter *waiter); | ||||||
| extern void debug_mutex_free_waiter(struct mutex_waiter *waiter); | extern void debug_mutex_free_waiter(struct mutex_waiter *waiter); | ||||||
| extern void debug_mutex_add_waiter(struct mutex *lock, | extern void debug_mutex_add_waiter(struct mutex *lock, | ||||||
| 				   struct mutex_waiter *waiter, | 				   struct mutex_waiter *waiter, | ||||||
| 				   struct thread_info *ti __IP_DECL__); | 				   struct thread_info *ti); | ||||||
| extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, | extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, | ||||||
| 				struct thread_info *ti); | 				struct thread_info *ti); | ||||||
| extern void debug_mutex_unlock(struct mutex *lock); | extern void debug_mutex_unlock(struct mutex *lock); | ||||||
| extern void debug_mutex_init(struct mutex *lock, const char *name); | extern void debug_mutex_init(struct mutex *lock, const char *name, | ||||||
| 
 | 			     struct lock_class_key *key); | ||||||
| #define debug_spin_lock_save(lock, flags)		\ |  | ||||||
| 	do {						\ |  | ||||||
| 		local_irq_save(flags);			\ |  | ||||||
| 		if (debug_mutex_on)			\ |  | ||||||
| 			spin_lock(lock);		\ |  | ||||||
| 	} while (0) |  | ||||||
| 
 |  | ||||||
| #define debug_spin_unlock_restore(lock, flags)		\ |  | ||||||
| 	do {						\ |  | ||||||
| 		if (debug_mutex_on)			\ |  | ||||||
| 			spin_unlock(lock);		\ |  | ||||||
| 		local_irq_restore(flags);		\ |  | ||||||
| 		preempt_check_resched();		\ |  | ||||||
| 	} while (0) |  | ||||||
| 
 | 
 | ||||||
| #define spin_lock_mutex(lock, flags)			\ | #define spin_lock_mutex(lock, flags)			\ | ||||||
| 	do {						\ | 	do {						\ | ||||||
| 		struct mutex *l = container_of(lock, struct mutex, wait_lock); \ | 		struct mutex *l = container_of(lock, struct mutex, wait_lock); \ | ||||||
| 							\ | 							\ | ||||||
| 		DEBUG_LOCKS_WARN_ON(in_interrupt());	\ | 		DEBUG_LOCKS_WARN_ON(in_interrupt());	\ | ||||||
| 		debug_spin_lock_save(&debug_mutex_lock, flags); \ | 		local_irq_save(flags);			\ | ||||||
| 		spin_lock(lock);			\ | 		__raw_spin_lock(&(lock)->raw_lock);	\ | ||||||
| 		DEBUG_LOCKS_WARN_ON(l->magic != l);	\ | 		DEBUG_LOCKS_WARN_ON(l->magic != l);	\ | ||||||
| 	} while (0) | 	} while (0) | ||||||
| 
 | 
 | ||||||
| #define spin_unlock_mutex(lock, flags)			\ | #define spin_unlock_mutex(lock, flags)			\ | ||||||
| 	do {						\ | 	do {						\ | ||||||
| 		spin_unlock(lock);			\ | 		__raw_spin_unlock(&(lock)->raw_lock);	\ | ||||||
| 		debug_spin_unlock_restore(&debug_mutex_lock, flags);	\ | 		local_irq_restore(flags);		\ | ||||||
|  | 		preempt_check_resched();		\ | ||||||
| 	} while (0) | 	} while (0) | ||||||
| 
 |  | ||||||
| #define DEBUG_OFF()					\ |  | ||||||
| do {							\ |  | ||||||
| 	if (debug_mutex_on) {				\ |  | ||||||
| 		debug_mutex_on = 0;			\ |  | ||||||
| 		console_verbose();			\ |  | ||||||
| 		if (spin_is_locked(&debug_mutex_lock))	\ |  | ||||||
| 			spin_unlock(&debug_mutex_lock);	\ |  | ||||||
| 	}						\ |  | ||||||
| } while (0) |  | ||||||
| 
 |  | ||||||
| #define DEBUG_BUG()					\ |  | ||||||
| do {							\ |  | ||||||
| 	if (debug_mutex_on) {				\ |  | ||||||
| 		DEBUG_OFF();				\ |  | ||||||
| 		BUG();					\ |  | ||||||
| 	}						\ |  | ||||||
| } while (0) |  | ||||||
| 
 |  | ||||||
| #define DEBUG_LOCKS_WARN_ON(c)				\ |  | ||||||
| do {							\ |  | ||||||
| 	if (unlikely(c && debug_mutex_on)) {		\ |  | ||||||
| 		DEBUG_OFF();				\ |  | ||||||
| 		WARN_ON(1);				\ |  | ||||||
| 	}						\ |  | ||||||
| } while (0) |  | ||||||
| 
 |  | ||||||
| #ifdef CONFIG_SMP |  | ||||||
| # define SMP_DEBUG_LOCKS_WARN_ON(c)			DEBUG_LOCKS_WARN_ON(c) |  | ||||||
| #else |  | ||||||
| # define SMP_DEBUG_LOCKS_WARN_ON(c)			do { } while (0) |  | ||||||
| #endif |  | ||||||
| 
 |  | ||||||
|  |  | ||||||
|  | @ -17,6 +17,7 @@ | ||||||
| #include <linux/module.h> | #include <linux/module.h> | ||||||
| #include <linux/spinlock.h> | #include <linux/spinlock.h> | ||||||
| #include <linux/interrupt.h> | #include <linux/interrupt.h> | ||||||
|  | #include <linux/debug_locks.h> | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  * In the DEBUG case we are using the "NULL fastpath" for mutexes, |  * In the DEBUG case we are using the "NULL fastpath" for mutexes, | ||||||
|  | @ -38,7 +39,7 @@ | ||||||
|  * |  * | ||||||
|  * It is not allowed to initialize an already locked mutex. |  * It is not allowed to initialize an already locked mutex. | ||||||
|  */ |  */ | ||||||
| void fastcall __mutex_init(struct mutex *lock, const char *name) | __always_inline void fastcall __mutex_init(struct mutex *lock, const char *name) | ||||||
| { | { | ||||||
| 	atomic_set(&lock->count, 1); | 	atomic_set(&lock->count, 1); | ||||||
| 	spin_lock_init(&lock->wait_lock); | 	spin_lock_init(&lock->wait_lock); | ||||||
|  | @ -56,7 +57,7 @@ EXPORT_SYMBOL(__mutex_init); | ||||||
|  * branch is predicted by the CPU as default-untaken. |  * branch is predicted by the CPU as default-untaken. | ||||||
|  */ |  */ | ||||||
| static void fastcall noinline __sched | static void fastcall noinline __sched | ||||||
| __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__); | __mutex_lock_slowpath(atomic_t *lock_count); | ||||||
| 
 | 
 | ||||||
| /***
 | /***
 | ||||||
|  * mutex_lock - acquire the mutex |  * mutex_lock - acquire the mutex | ||||||
|  | @ -79,7 +80,7 @@ __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__); | ||||||
|  * |  * | ||||||
|  * This function is similar to (but not equivalent to) down(). |  * This function is similar to (but not equivalent to) down(). | ||||||
|  */ |  */ | ||||||
| void fastcall __sched mutex_lock(struct mutex *lock) | void inline fastcall __sched mutex_lock(struct mutex *lock) | ||||||
| { | { | ||||||
| 	might_sleep(); | 	might_sleep(); | ||||||
| 	/*
 | 	/*
 | ||||||
|  | @ -92,7 +93,7 @@ void fastcall __sched mutex_lock(struct mutex *lock) | ||||||
| EXPORT_SYMBOL(mutex_lock); | EXPORT_SYMBOL(mutex_lock); | ||||||
| 
 | 
 | ||||||
| static void fastcall noinline __sched | static void fastcall noinline __sched | ||||||
| __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__); | __mutex_unlock_slowpath(atomic_t *lock_count); | ||||||
| 
 | 
 | ||||||
| /***
 | /***
 | ||||||
|  * mutex_unlock - release the mutex |  * mutex_unlock - release the mutex | ||||||
|  | @ -120,18 +121,17 @@ EXPORT_SYMBOL(mutex_unlock); | ||||||
|  * Lock a mutex (possibly interruptible), slowpath: |  * Lock a mutex (possibly interruptible), slowpath: | ||||||
|  */ |  */ | ||||||
| static inline int __sched | static inline int __sched | ||||||
| __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) | ||||||
| { | { | ||||||
| 	struct task_struct *task = current; | 	struct task_struct *task = current; | ||||||
| 	struct mutex_waiter waiter; | 	struct mutex_waiter waiter; | ||||||
| 	unsigned int old_val; | 	unsigned int old_val; | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
| 	debug_mutex_init_waiter(&waiter); |  | ||||||
| 
 |  | ||||||
| 	spin_lock_mutex(&lock->wait_lock, flags); | 	spin_lock_mutex(&lock->wait_lock, flags); | ||||||
| 
 | 
 | ||||||
| 	debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip); | 	debug_mutex_lock_common(lock, &waiter); | ||||||
|  | 	debug_mutex_add_waiter(lock, &waiter, task->thread_info); | ||||||
| 
 | 
 | ||||||
| 	/* add waiting tasks to the end of the waitqueue (FIFO): */ | 	/* add waiting tasks to the end of the waitqueue (FIFO): */ | ||||||
| 	list_add_tail(&waiter.list, &lock->wait_list); | 	list_add_tail(&waiter.list, &lock->wait_list); | ||||||
|  | @ -173,7 +173,7 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) | ||||||
| 
 | 
 | ||||||
| 	/* got the lock - rejoice! */ | 	/* got the lock - rejoice! */ | ||||||
| 	mutex_remove_waiter(lock, &waiter, task->thread_info); | 	mutex_remove_waiter(lock, &waiter, task->thread_info); | ||||||
| 	debug_mutex_set_owner(lock, task->thread_info __IP__); | 	debug_mutex_set_owner(lock, task->thread_info); | ||||||
| 
 | 
 | ||||||
| 	/* set it to 0 if there are no waiters left: */ | 	/* set it to 0 if there are no waiters left: */ | ||||||
| 	if (likely(list_empty(&lock->wait_list))) | 	if (likely(list_empty(&lock->wait_list))) | ||||||
|  | @ -183,32 +183,28 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__) | ||||||
| 
 | 
 | ||||||
| 	debug_mutex_free_waiter(&waiter); | 	debug_mutex_free_waiter(&waiter); | ||||||
| 
 | 
 | ||||||
| 	DEBUG_LOCKS_WARN_ON(list_empty(&lock->held_list)); |  | ||||||
| 	DEBUG_LOCKS_WARN_ON(lock->owner != task->thread_info); |  | ||||||
| 
 |  | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void fastcall noinline __sched | static void fastcall noinline __sched | ||||||
| __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__) | __mutex_lock_slowpath(atomic_t *lock_count) | ||||||
| { | { | ||||||
| 	struct mutex *lock = container_of(lock_count, struct mutex, count); | 	struct mutex *lock = container_of(lock_count, struct mutex, count); | ||||||
| 
 | 
 | ||||||
| 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE __IP__); | 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  * Release the lock, slowpath: |  * Release the lock, slowpath: | ||||||
|  */ |  */ | ||||||
| static fastcall noinline void | static fastcall inline void | ||||||
| __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) | __mutex_unlock_common_slowpath(atomic_t *lock_count) | ||||||
| { | { | ||||||
| 	struct mutex *lock = container_of(lock_count, struct mutex, count); | 	struct mutex *lock = container_of(lock_count, struct mutex, count); | ||||||
| 	unsigned long flags; | 	unsigned long flags; | ||||||
| 
 | 
 | ||||||
| 	DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); |  | ||||||
| 
 |  | ||||||
| 	spin_lock_mutex(&lock->wait_lock, flags); | 	spin_lock_mutex(&lock->wait_lock, flags); | ||||||
|  | 	debug_mutex_unlock(lock); | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * some architectures leave the lock unlocked in the fastpath failure | 	 * some architectures leave the lock unlocked in the fastpath failure | ||||||
|  | @ -218,8 +214,6 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) | ||||||
| 	if (__mutex_slowpath_needs_to_unlock()) | 	if (__mutex_slowpath_needs_to_unlock()) | ||||||
| 		atomic_set(&lock->count, 1); | 		atomic_set(&lock->count, 1); | ||||||
| 
 | 
 | ||||||
| 	debug_mutex_unlock(lock); |  | ||||||
| 
 |  | ||||||
| 	if (!list_empty(&lock->wait_list)) { | 	if (!list_empty(&lock->wait_list)) { | ||||||
| 		/* get the first entry from the wait-list: */ | 		/* get the first entry from the wait-list: */ | ||||||
| 		struct mutex_waiter *waiter = | 		struct mutex_waiter *waiter = | ||||||
|  | @ -236,12 +230,21 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) | ||||||
| 	spin_unlock_mutex(&lock->wait_lock, flags); | 	spin_unlock_mutex(&lock->wait_lock, flags); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | /*
 | ||||||
|  |  * Release the lock, slowpath: | ||||||
|  |  */ | ||||||
|  | static fastcall noinline void | ||||||
|  | __mutex_unlock_slowpath(atomic_t *lock_count) | ||||||
|  | { | ||||||
|  | 	__mutex_unlock_common_slowpath(lock_count); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| /*
 | /*
 | ||||||
|  * Here come the less common (and hence less performance-critical) APIs: |  * Here come the less common (and hence less performance-critical) APIs: | ||||||
|  * mutex_lock_interruptible() and mutex_trylock(). |  * mutex_lock_interruptible() and mutex_trylock(). | ||||||
|  */ |  */ | ||||||
| static int fastcall noinline __sched | static int fastcall noinline __sched | ||||||
| __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__); | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); | ||||||
| 
 | 
 | ||||||
| /***
 | /***
 | ||||||
|  * mutex_lock_interruptible - acquire the mutex, interruptable |  * mutex_lock_interruptible - acquire the mutex, interruptable | ||||||
|  | @ -264,11 +267,11 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock) | ||||||
| EXPORT_SYMBOL(mutex_lock_interruptible); | EXPORT_SYMBOL(mutex_lock_interruptible); | ||||||
| 
 | 
 | ||||||
| static int fastcall noinline __sched | static int fastcall noinline __sched | ||||||
| __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__) | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) | ||||||
| { | { | ||||||
| 	struct mutex *lock = container_of(lock_count, struct mutex, count); | 	struct mutex *lock = container_of(lock_count, struct mutex, count); | ||||||
| 
 | 
 | ||||||
| 	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE __IP__); | 	return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  | @ -285,7 +288,8 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | ||||||
| 
 | 
 | ||||||
| 	prev = atomic_xchg(&lock->count, -1); | 	prev = atomic_xchg(&lock->count, -1); | ||||||
| 	if (likely(prev == 1)) | 	if (likely(prev == 1)) | ||||||
| 		debug_mutex_set_owner(lock, current_thread_info() __RET_IP__); | 		debug_mutex_set_owner(lock, current_thread_info()); | ||||||
|  | 
 | ||||||
| 	/* Set it back to 0 if there are no waiters: */ | 	/* Set it back to 0 if there are no waiters: */ | ||||||
| 	if (likely(list_empty(&lock->wait_list))) | 	if (likely(list_empty(&lock->wait_list))) | ||||||
| 		atomic_set(&lock->count, 0); | 		atomic_set(&lock->count, 0); | ||||||
|  |  | ||||||
|  | @ -16,22 +16,15 @@ | ||||||
| #define mutex_remove_waiter(lock, waiter, ti) \ | #define mutex_remove_waiter(lock, waiter, ti) \ | ||||||
| 		__list_del((waiter)->list.prev, (waiter)->list.next) | 		__list_del((waiter)->list.prev, (waiter)->list.next) | ||||||
| 
 | 
 | ||||||
| #define DEBUG_LOCKS_WARN_ON(c)				do { } while (0) |  | ||||||
| #define debug_mutex_set_owner(lock, new_owner)		do { } while (0) | #define debug_mutex_set_owner(lock, new_owner)		do { } while (0) | ||||||
| #define debug_mutex_clear_owner(lock)			do { } while (0) | #define debug_mutex_clear_owner(lock)			do { } while (0) | ||||||
| #define debug_mutex_init_waiter(waiter)			do { } while (0) |  | ||||||
| #define debug_mutex_wake_waiter(lock, waiter)		do { } while (0) | #define debug_mutex_wake_waiter(lock, waiter)		do { } while (0) | ||||||
| #define debug_mutex_free_waiter(waiter)			do { } while (0) | #define debug_mutex_free_waiter(waiter)			do { } while (0) | ||||||
| #define debug_mutex_add_waiter(lock, waiter, ti, ip)	do { } while (0) | #define debug_mutex_add_waiter(lock, waiter, ti)	do { } while (0) | ||||||
| #define debug_mutex_unlock(lock)			do { } while (0) | #define debug_mutex_unlock(lock)			do { } while (0) | ||||||
| #define debug_mutex_init(lock, name)			do { } while (0) | #define debug_mutex_init(lock, name)			do { } while (0) | ||||||
| 
 | 
 | ||||||
| /*
 | static inline void | ||||||
|  * Return-address parameters/declarations. They are very useful for | debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) | ||||||
|  * debugging, but add overhead in the !DEBUG case - so we go the | { | ||||||
|  * trouble of using this not too elegant but zero-cost solution: | } | ||||||
|  */ |  | ||||||
| #define __IP_DECL__ |  | ||||||
| #define __IP__ |  | ||||||
| #define __RET_IP__ |  | ||||||
| 
 |  | ||||||
|  |  | ||||||
|  | @ -26,6 +26,7 @@ | ||||||
| #include <linux/interrupt.h> | #include <linux/interrupt.h> | ||||||
| #include <linux/plist.h> | #include <linux/plist.h> | ||||||
| #include <linux/fs.h> | #include <linux/fs.h> | ||||||
|  | #include <linux/debug_locks.h> | ||||||
| 
 | 
 | ||||||
| #include "rtmutex_common.h" | #include "rtmutex_common.h" | ||||||
| 
 | 
 | ||||||
|  | @ -45,8 +46,6 @@ do {								\ | ||||||
| 		console_verbose();				\ | 		console_verbose();				\ | ||||||
| 		if (spin_is_locked(¤t->pi_lock))		\ | 		if (spin_is_locked(¤t->pi_lock))		\ | ||||||
| 			spin_unlock(¤t->pi_lock);		\ | 			spin_unlock(¤t->pi_lock);		\ | ||||||
| 		if (spin_is_locked(¤t->held_list_lock))	\ |  | ||||||
| 			spin_unlock(¤t->held_list_lock);	\ |  | ||||||
| 	}							\ | 	}							\ | ||||||
| } while (0) | } while (0) | ||||||
| 
 | 
 | ||||||
|  | @ -105,14 +104,6 @@ static void printk_task(task_t *p) | ||||||
| 		printk("<none>"); | 		printk("<none>"); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static void printk_task_short(task_t *p) |  | ||||||
| { |  | ||||||
| 	if (p) |  | ||||||
| 		printk("%s/%d [%p, %3d]", p->comm, p->pid, p, p->prio); |  | ||||||
| 	else |  | ||||||
| 		printk("<none>"); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static void printk_lock(struct rt_mutex *lock, int print_owner) | static void printk_lock(struct rt_mutex *lock, int print_owner) | ||||||
| { | { | ||||||
| 	if (lock->name) | 	if (lock->name) | ||||||
|  | @ -128,222 +119,6 @@ static void printk_lock(struct rt_mutex *lock, int print_owner) | ||||||
| 		printk_task(rt_mutex_owner(lock)); | 		printk_task(rt_mutex_owner(lock)); | ||||||
| 		printk("\n"); | 		printk("\n"); | ||||||
| 	} | 	} | ||||||
| 	if (rt_mutex_owner(lock)) { |  | ||||||
| 		printk("... acquired at:               "); |  | ||||||
| 		print_symbol("%s\n", lock->acquire_ip); |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static void printk_waiter(struct rt_mutex_waiter *w) |  | ||||||
| { |  | ||||||
| 	printk("-------------------------\n"); |  | ||||||
| 	printk("| waiter struct %p:\n", w); |  | ||||||
| 	printk("| w->list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n", |  | ||||||
| 	       w->list_entry.plist.prio_list.prev, w->list_entry.plist.prio_list.next, |  | ||||||
| 	       w->list_entry.plist.node_list.prev, w->list_entry.plist.node_list.next, |  | ||||||
| 	       w->list_entry.prio); |  | ||||||
| 	printk("| w->pi_list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n", |  | ||||||
| 	       w->pi_list_entry.plist.prio_list.prev, w->pi_list_entry.plist.prio_list.next, |  | ||||||
| 	       w->pi_list_entry.plist.node_list.prev, w->pi_list_entry.plist.node_list.next, |  | ||||||
| 	       w->pi_list_entry.prio); |  | ||||||
| 	printk("\n| lock:\n"); |  | ||||||
| 	printk_lock(w->lock, 1); |  | ||||||
| 	printk("| w->ti->task:\n"); |  | ||||||
| 	printk_task(w->task); |  | ||||||
| 	printk("| blocked at:  "); |  | ||||||
| 	print_symbol("%s\n", w->ip); |  | ||||||
| 	printk("-------------------------\n"); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| static void show_task_locks(task_t *p) |  | ||||||
| { |  | ||||||
| 	switch (p->state) { |  | ||||||
| 	case TASK_RUNNING:		printk("R"); break; |  | ||||||
| 	case TASK_INTERRUPTIBLE:	printk("S"); break; |  | ||||||
| 	case TASK_UNINTERRUPTIBLE:	printk("D"); break; |  | ||||||
| 	case TASK_STOPPED:		printk("T"); break; |  | ||||||
| 	case EXIT_ZOMBIE:		printk("Z"); break; |  | ||||||
| 	case EXIT_DEAD:			printk("X"); break; |  | ||||||
| 	default:			printk("?"); break; |  | ||||||
| 	} |  | ||||||
| 	printk_task(p); |  | ||||||
| 	if (p->pi_blocked_on) { |  | ||||||
| 		struct rt_mutex *lock = p->pi_blocked_on->lock; |  | ||||||
| 
 |  | ||||||
| 		printk(" blocked on:"); |  | ||||||
| 		printk_lock(lock, 1); |  | ||||||
| 	} else |  | ||||||
| 		printk(" (not blocked)\n"); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| void rt_mutex_show_held_locks(task_t *task, int verbose) |  | ||||||
| { |  | ||||||
| 	struct list_head *curr, *cursor = NULL; |  | ||||||
| 	struct rt_mutex *lock; |  | ||||||
| 	task_t *t; |  | ||||||
| 	unsigned long flags; |  | ||||||
| 	int count = 0; |  | ||||||
| 
 |  | ||||||
| 	if (!rt_trace_on) |  | ||||||
| 		return; |  | ||||||
| 
 |  | ||||||
| 	if (verbose) { |  | ||||||
| 		printk("------------------------------\n"); |  | ||||||
| 		printk("| showing all locks held by: |  ("); |  | ||||||
| 		printk_task_short(task); |  | ||||||
| 		printk("):\n"); |  | ||||||
| 		printk("------------------------------\n"); |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| next: |  | ||||||
| 	spin_lock_irqsave(&task->held_list_lock, flags); |  | ||||||
| 	list_for_each(curr, &task->held_list_head) { |  | ||||||
| 		if (cursor && curr != cursor) |  | ||||||
| 			continue; |  | ||||||
| 		lock = list_entry(curr, struct rt_mutex, held_list_entry); |  | ||||||
| 		t = rt_mutex_owner(lock); |  | ||||||
| 		WARN_ON(t != task); |  | ||||||
| 		count++; |  | ||||||
| 		cursor = curr->next; |  | ||||||
| 		spin_unlock_irqrestore(&task->held_list_lock, flags); |  | ||||||
| 
 |  | ||||||
| 		printk("\n#%03d:            ", count); |  | ||||||
| 		printk_lock(lock, 0); |  | ||||||
| 		goto next; |  | ||||||
| 	} |  | ||||||
| 	spin_unlock_irqrestore(&task->held_list_lock, flags); |  | ||||||
| 
 |  | ||||||
| 	printk("\n"); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| void rt_mutex_show_all_locks(void) |  | ||||||
| { |  | ||||||
| 	task_t *g, *p; |  | ||||||
| 	int count = 10; |  | ||||||
| 	int unlock = 1; |  | ||||||
| 
 |  | ||||||
| 	printk("\n"); |  | ||||||
| 	printk("----------------------\n"); |  | ||||||
| 	printk("| showing all tasks: |\n"); |  | ||||||
| 	printk("----------------------\n"); |  | ||||||
| 
 |  | ||||||
| 	/*
 |  | ||||||
| 	 * Here we try to get the tasklist_lock as hard as possible, |  | ||||||
| 	 * if not successful after 2 seconds we ignore it (but keep |  | ||||||
| 	 * trying). This is to enable a debug printout even if a |  | ||||||
| 	 * tasklist_lock-holding task deadlocks or crashes. |  | ||||||
| 	 */ |  | ||||||
| retry: |  | ||||||
| 	if (!read_trylock(&tasklist_lock)) { |  | ||||||
| 		if (count == 10) |  | ||||||
| 			printk("hm, tasklist_lock locked, retrying... "); |  | ||||||
| 		if (count) { |  | ||||||
| 			count--; |  | ||||||
| 			printk(" #%d", 10-count); |  | ||||||
| 			mdelay(200); |  | ||||||
| 			goto retry; |  | ||||||
| 		} |  | ||||||
| 		printk(" ignoring it.\n"); |  | ||||||
| 		unlock = 0; |  | ||||||
| 	} |  | ||||||
| 	if (count != 10) |  | ||||||
| 		printk(" locked it.\n"); |  | ||||||
| 
 |  | ||||||
| 	do_each_thread(g, p) { |  | ||||||
| 		show_task_locks(p); |  | ||||||
| 		if (!unlock) |  | ||||||
| 			if (read_trylock(&tasklist_lock)) |  | ||||||
| 				unlock = 1; |  | ||||||
| 	} while_each_thread(g, p); |  | ||||||
| 
 |  | ||||||
| 	printk("\n"); |  | ||||||
| 
 |  | ||||||
| 	printk("-----------------------------------------\n"); |  | ||||||
| 	printk("| showing all locks held in the system: |\n"); |  | ||||||
| 	printk("-----------------------------------------\n"); |  | ||||||
| 
 |  | ||||||
| 	do_each_thread(g, p) { |  | ||||||
| 		rt_mutex_show_held_locks(p, 0); |  | ||||||
| 		if (!unlock) |  | ||||||
| 			if (read_trylock(&tasklist_lock)) |  | ||||||
| 				unlock = 1; |  | ||||||
| 	} while_each_thread(g, p); |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| 	printk("=============================================\n\n"); |  | ||||||
| 
 |  | ||||||
| 	if (unlock) |  | ||||||
| 		read_unlock(&tasklist_lock); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| void rt_mutex_debug_check_no_locks_held(task_t *task) |  | ||||||
| { |  | ||||||
| 	struct rt_mutex_waiter *w; |  | ||||||
| 	struct list_head *curr; |  | ||||||
| 	struct rt_mutex *lock; |  | ||||||
| 
 |  | ||||||
| 	if (!rt_trace_on) |  | ||||||
| 		return; |  | ||||||
| 	if (!rt_prio(task->normal_prio) && rt_prio(task->prio)) { |  | ||||||
| 		printk("BUG: PI priority boost leaked!\n"); |  | ||||||
| 		printk_task(task); |  | ||||||
| 		printk("\n"); |  | ||||||
| 	} |  | ||||||
| 	if (list_empty(&task->held_list_head)) |  | ||||||
| 		return; |  | ||||||
| 
 |  | ||||||
| 	spin_lock(&task->pi_lock); |  | ||||||
| 	plist_for_each_entry(w, &task->pi_waiters, pi_list_entry) { |  | ||||||
| 		TRACE_OFF(); |  | ||||||
| 
 |  | ||||||
| 		printk("hm, PI interest held at exit time? Task:\n"); |  | ||||||
| 		printk_task(task); |  | ||||||
| 		printk_waiter(w); |  | ||||||
| 		return; |  | ||||||
| 	} |  | ||||||
| 	spin_unlock(&task->pi_lock); |  | ||||||
| 
 |  | ||||||
| 	list_for_each(curr, &task->held_list_head) { |  | ||||||
| 		lock = list_entry(curr, struct rt_mutex, held_list_entry); |  | ||||||
| 
 |  | ||||||
| 		printk("BUG: %s/%d, lock held at task exit time!\n", |  | ||||||
| 		       task->comm, task->pid); |  | ||||||
| 		printk_lock(lock, 1); |  | ||||||
| 		if (rt_mutex_owner(lock) != task) |  | ||||||
| 			printk("exiting task is not even the owner??\n"); |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| int rt_mutex_debug_check_no_locks_freed(const void *from, unsigned long len) |  | ||||||
| { |  | ||||||
| 	const void *to = from + len; |  | ||||||
| 	struct list_head *curr; |  | ||||||
| 	struct rt_mutex *lock; |  | ||||||
| 	unsigned long flags; |  | ||||||
| 	void *lock_addr; |  | ||||||
| 
 |  | ||||||
| 	if (!rt_trace_on) |  | ||||||
| 		return 0; |  | ||||||
| 
 |  | ||||||
| 	spin_lock_irqsave(¤t->held_list_lock, flags); |  | ||||||
| 	list_for_each(curr, ¤t->held_list_head) { |  | ||||||
| 		lock = list_entry(curr, struct rt_mutex, held_list_entry); |  | ||||||
| 		lock_addr = lock; |  | ||||||
| 		if (lock_addr < from || lock_addr >= to) |  | ||||||
| 			continue; |  | ||||||
| 		TRACE_OFF(); |  | ||||||
| 
 |  | ||||||
| 		printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n", |  | ||||||
| 			current->comm, current->pid, lock, from, to); |  | ||||||
| 		dump_stack(); |  | ||||||
| 		printk_lock(lock, 1); |  | ||||||
| 		if (rt_mutex_owner(lock) != current) |  | ||||||
| 			printk("freeing task is not even the owner??\n"); |  | ||||||
| 		return 1; |  | ||||||
| 	} |  | ||||||
| 	spin_unlock_irqrestore(¤t->held_list_lock, flags); |  | ||||||
| 
 |  | ||||||
| 	return 0; |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void rt_mutex_debug_task_free(struct task_struct *task) | void rt_mutex_debug_task_free(struct task_struct *task) | ||||||
|  | @ -395,85 +170,41 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter) | ||||||
| 	       current->comm, current->pid); | 	       current->comm, current->pid); | ||||||
| 	printk_lock(waiter->lock, 1); | 	printk_lock(waiter->lock, 1); | ||||||
| 
 | 
 | ||||||
| 	printk("... trying at:                 "); |  | ||||||
| 	print_symbol("%s\n", waiter->ip); |  | ||||||
| 
 |  | ||||||
| 	printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid); | 	printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid); | ||||||
| 	printk_lock(waiter->deadlock_lock, 1); | 	printk_lock(waiter->deadlock_lock, 1); | ||||||
| 
 | 
 | ||||||
| 	rt_mutex_show_held_locks(current, 1); | 	debug_show_held_locks(current); | ||||||
| 	rt_mutex_show_held_locks(task, 1); | 	debug_show_held_locks(task); | ||||||
| 
 | 
 | ||||||
| 	printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid); | 	printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid); | ||||||
| 	show_stack(task, NULL); | 	show_stack(task, NULL); | ||||||
| 	printk("\n%s/%d's [current] stackdump:\n\n", | 	printk("\n%s/%d's [current] stackdump:\n\n", | ||||||
| 	       current->comm, current->pid); | 	       current->comm, current->pid); | ||||||
| 	dump_stack(); | 	dump_stack(); | ||||||
| 	rt_mutex_show_all_locks(); | 	debug_show_all_locks(); | ||||||
|  | 
 | ||||||
| 	printk("[ turning off deadlock detection." | 	printk("[ turning off deadlock detection." | ||||||
| 	       "Please report this trace. ]\n\n"); | 	       "Please report this trace. ]\n\n"); | ||||||
| 	local_irq_disable(); | 	local_irq_disable(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__) | void debug_rt_mutex_lock(struct rt_mutex *lock) | ||||||
| { | { | ||||||
| 	unsigned long flags; |  | ||||||
| 
 |  | ||||||
| 	if (rt_trace_on) { |  | ||||||
| 		TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry)); |  | ||||||
| 
 |  | ||||||
| 		spin_lock_irqsave(¤t->held_list_lock, flags); |  | ||||||
| 		list_add_tail(&lock->held_list_entry, ¤t->held_list_head); |  | ||||||
| 		spin_unlock_irqrestore(¤t->held_list_lock, flags); |  | ||||||
| 
 |  | ||||||
| 		lock->acquire_ip = ip; |  | ||||||
| 	} |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void debug_rt_mutex_unlock(struct rt_mutex *lock) | void debug_rt_mutex_unlock(struct rt_mutex *lock) | ||||||
| { | { | ||||||
| 	unsigned long flags; |  | ||||||
| 
 |  | ||||||
| 	if (rt_trace_on) { |  | ||||||
| 	TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current); | 	TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current); | ||||||
| 		TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry)); |  | ||||||
| 
 |  | ||||||
| 		spin_lock_irqsave(¤t->held_list_lock, flags); |  | ||||||
| 		list_del_init(&lock->held_list_entry); |  | ||||||
| 		spin_unlock_irqrestore(¤t->held_list_lock, flags); |  | ||||||
| 	} |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, | void | ||||||
| 			       struct task_struct *powner __IP_DECL__) | debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner) | ||||||
| { | { | ||||||
| 	unsigned long flags; |  | ||||||
| 
 |  | ||||||
| 	if (rt_trace_on) { |  | ||||||
| 		TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry)); |  | ||||||
| 
 |  | ||||||
| 		spin_lock_irqsave(&powner->held_list_lock, flags); |  | ||||||
| 		list_add_tail(&lock->held_list_entry, &powner->held_list_head); |  | ||||||
| 		spin_unlock_irqrestore(&powner->held_list_lock, flags); |  | ||||||
| 
 |  | ||||||
| 		lock->acquire_ip = ip; |  | ||||||
| 	} |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) | void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) | ||||||
| { | { | ||||||
| 	unsigned long flags; | 	TRACE_WARN_ON_LOCKED(!rt_mutex_owner(lock)); | ||||||
| 
 |  | ||||||
| 	if (rt_trace_on) { |  | ||||||
| 		struct task_struct *owner = rt_mutex_owner(lock); |  | ||||||
| 
 |  | ||||||
| 		TRACE_WARN_ON_LOCKED(!owner); |  | ||||||
| 		TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry)); |  | ||||||
| 
 |  | ||||||
| 		spin_lock_irqsave(&owner->held_list_lock, flags); |  | ||||||
| 		list_del_init(&lock->held_list_entry); |  | ||||||
| 		spin_unlock_irqrestore(&owner->held_list_lock, flags); |  | ||||||
| 	} |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) | void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) | ||||||
|  | @ -493,15 +224,12 @@ void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) | ||||||
| 
 | 
 | ||||||
| void debug_rt_mutex_init(struct rt_mutex *lock, const char *name) | void debug_rt_mutex_init(struct rt_mutex *lock, const char *name) | ||||||
| { | { | ||||||
| 	void *addr = lock; | 	/*
 | ||||||
| 
 | 	 * Make sure we are not reinitializing a held lock: | ||||||
| 	if (rt_trace_on) { | 	 */ | ||||||
| 		rt_mutex_debug_check_no_locks_freed(addr, | 	debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | ||||||
| 						    sizeof(struct rt_mutex)); |  | ||||||
| 		INIT_LIST_HEAD(&lock->held_list_entry); |  | ||||||
| 	lock->name = name; | 	lock->name = name; | ||||||
| } | } | ||||||
| } |  | ||||||
| 
 | 
 | ||||||
| void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, task_t *task) | void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, task_t *task) | ||||||
| { | { | ||||||
|  |  | ||||||
|  | @ -9,20 +9,16 @@ | ||||||
|  * This file contains macros used solely by rtmutex.c. Debug version. |  * This file contains macros used solely by rtmutex.c. Debug version. | ||||||
|  */ |  */ | ||||||
| 
 | 
 | ||||||
| #define __IP_DECL__		, unsigned long ip |  | ||||||
| #define __IP__			, ip |  | ||||||
| #define __RET_IP__		, (unsigned long)__builtin_return_address(0) |  | ||||||
| 
 |  | ||||||
| extern void | extern void | ||||||
| rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task); | rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task); | ||||||
| extern void rt_mutex_deadlock_account_unlock(struct task_struct *task); | extern void rt_mutex_deadlock_account_unlock(struct task_struct *task); | ||||||
| extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); | extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); | ||||||
| extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter); | extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter); | ||||||
| extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name); | extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name); | ||||||
| extern void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__); | extern void debug_rt_mutex_lock(struct rt_mutex *lock); | ||||||
| extern void debug_rt_mutex_unlock(struct rt_mutex *lock); | extern void debug_rt_mutex_unlock(struct rt_mutex *lock); | ||||||
| extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, | extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, | ||||||
| 				      struct task_struct *powner __IP_DECL__); | 				      struct task_struct *powner); | ||||||
| extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock); | extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock); | ||||||
| extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter, | extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter, | ||||||
| 				    struct rt_mutex *lock); | 				    struct rt_mutex *lock); | ||||||
|  |  | ||||||
|  | @ -161,8 +161,7 @@ static int rt_mutex_adjust_prio_chain(task_t *task, | ||||||
| 				      int deadlock_detect, | 				      int deadlock_detect, | ||||||
| 				      struct rt_mutex *orig_lock, | 				      struct rt_mutex *orig_lock, | ||||||
| 				      struct rt_mutex_waiter *orig_waiter, | 				      struct rt_mutex_waiter *orig_waiter, | ||||||
| 				      struct task_struct *top_task | 				      struct task_struct *top_task) | ||||||
| 				      __IP_DECL__) |  | ||||||
| { | { | ||||||
| 	struct rt_mutex *lock; | 	struct rt_mutex *lock; | ||||||
| 	struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; | 	struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; | ||||||
|  | @ -357,7 +356,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock) | ||||||
|  * |  * | ||||||
|  * Must be called with lock->wait_lock held. |  * Must be called with lock->wait_lock held. | ||||||
|  */ |  */ | ||||||
| static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__) | static int try_to_take_rt_mutex(struct rt_mutex *lock) | ||||||
| { | { | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * We have to be careful here if the atomic speedups are | 	 * We have to be careful here if the atomic speedups are | ||||||
|  | @ -384,7 +383,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__) | ||||||
| 		return 0; | 		return 0; | ||||||
| 
 | 
 | ||||||
| 	/* We got the lock. */ | 	/* We got the lock. */ | ||||||
| 	debug_rt_mutex_lock(lock __IP__); | 	debug_rt_mutex_lock(lock); | ||||||
| 
 | 
 | ||||||
| 	rt_mutex_set_owner(lock, current, 0); | 	rt_mutex_set_owner(lock, current, 0); | ||||||
| 
 | 
 | ||||||
|  | @ -402,8 +401,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__) | ||||||
|  */ |  */ | ||||||
| static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | ||||||
| 				   struct rt_mutex_waiter *waiter, | 				   struct rt_mutex_waiter *waiter, | ||||||
| 				   int detect_deadlock | 				   int detect_deadlock) | ||||||
| 				   __IP_DECL__) |  | ||||||
| { | { | ||||||
| 	struct rt_mutex_waiter *top_waiter = waiter; | 	struct rt_mutex_waiter *top_waiter = waiter; | ||||||
| 	task_t *owner = rt_mutex_owner(lock); | 	task_t *owner = rt_mutex_owner(lock); | ||||||
|  | @ -454,7 +452,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | ||||||
| 	spin_unlock(&lock->wait_lock); | 	spin_unlock(&lock->wait_lock); | ||||||
| 
 | 
 | ||||||
| 	res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, | 	res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, | ||||||
| 					 current __IP__); | 					 current); | ||||||
| 
 | 
 | ||||||
| 	spin_lock(&lock->wait_lock); | 	spin_lock(&lock->wait_lock); | ||||||
| 
 | 
 | ||||||
|  | @ -526,7 +524,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) | ||||||
|  * Must be called with lock->wait_lock held |  * Must be called with lock->wait_lock held | ||||||
|  */ |  */ | ||||||
| static void remove_waiter(struct rt_mutex *lock, | static void remove_waiter(struct rt_mutex *lock, | ||||||
| 			  struct rt_mutex_waiter *waiter  __IP_DECL__) | 			  struct rt_mutex_waiter *waiter) | ||||||
| { | { | ||||||
| 	int first = (waiter == rt_mutex_top_waiter(lock)); | 	int first = (waiter == rt_mutex_top_waiter(lock)); | ||||||
| 	int boost = 0; | 	int boost = 0; | ||||||
|  | @ -568,7 +566,7 @@ static void remove_waiter(struct rt_mutex *lock, | ||||||
| 
 | 
 | ||||||
| 	spin_unlock(&lock->wait_lock); | 	spin_unlock(&lock->wait_lock); | ||||||
| 
 | 
 | ||||||
| 	rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current __IP__); | 	rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); | ||||||
| 
 | 
 | ||||||
| 	spin_lock(&lock->wait_lock); | 	spin_lock(&lock->wait_lock); | ||||||
| } | } | ||||||
|  | @ -595,7 +593,7 @@ void rt_mutex_adjust_pi(struct task_struct *task) | ||||||
| 	get_task_struct(task); | 	get_task_struct(task); | ||||||
| 	spin_unlock_irqrestore(&task->pi_lock, flags); | 	spin_unlock_irqrestore(&task->pi_lock, flags); | ||||||
| 
 | 
 | ||||||
| 	rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task __RET_IP__); | 	rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  | @ -604,7 +602,7 @@ void rt_mutex_adjust_pi(struct task_struct *task) | ||||||
| static int __sched | static int __sched | ||||||
| rt_mutex_slowlock(struct rt_mutex *lock, int state, | rt_mutex_slowlock(struct rt_mutex *lock, int state, | ||||||
| 		  struct hrtimer_sleeper *timeout, | 		  struct hrtimer_sleeper *timeout, | ||||||
| 		  int detect_deadlock __IP_DECL__) | 		  int detect_deadlock) | ||||||
| { | { | ||||||
| 	struct rt_mutex_waiter waiter; | 	struct rt_mutex_waiter waiter; | ||||||
| 	int ret = 0; | 	int ret = 0; | ||||||
|  | @ -615,7 +613,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | ||||||
| 	spin_lock(&lock->wait_lock); | 	spin_lock(&lock->wait_lock); | ||||||
| 
 | 
 | ||||||
| 	/* Try to acquire the lock again: */ | 	/* Try to acquire the lock again: */ | ||||||
| 	if (try_to_take_rt_mutex(lock __IP__)) { | 	if (try_to_take_rt_mutex(lock)) { | ||||||
| 		spin_unlock(&lock->wait_lock); | 		spin_unlock(&lock->wait_lock); | ||||||
| 		return 0; | 		return 0; | ||||||
| 	} | 	} | ||||||
|  | @ -629,7 +627,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | ||||||
| 
 | 
 | ||||||
| 	for (;;) { | 	for (;;) { | ||||||
| 		/* Try to acquire the lock: */ | 		/* Try to acquire the lock: */ | ||||||
| 		if (try_to_take_rt_mutex(lock __IP__)) | 		if (try_to_take_rt_mutex(lock)) | ||||||
| 			break; | 			break; | ||||||
| 
 | 
 | ||||||
| 		/*
 | 		/*
 | ||||||
|  | @ -653,7 +651,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | ||||||
| 		 */ | 		 */ | ||||||
| 		if (!waiter.task) { | 		if (!waiter.task) { | ||||||
| 			ret = task_blocks_on_rt_mutex(lock, &waiter, | 			ret = task_blocks_on_rt_mutex(lock, &waiter, | ||||||
| 						      detect_deadlock __IP__); | 						      detect_deadlock); | ||||||
| 			/*
 | 			/*
 | ||||||
| 			 * If we got woken up by the owner then start loop | 			 * If we got woken up by the owner then start loop | ||||||
| 			 * all over without going into schedule to try | 			 * all over without going into schedule to try | ||||||
|  | @ -680,7 +678,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | ||||||
| 	set_current_state(TASK_RUNNING); | 	set_current_state(TASK_RUNNING); | ||||||
| 
 | 
 | ||||||
| 	if (unlikely(waiter.task)) | 	if (unlikely(waiter.task)) | ||||||
| 		remove_waiter(lock, &waiter __IP__); | 		remove_waiter(lock, &waiter); | ||||||
| 
 | 
 | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * try_to_take_rt_mutex() sets the waiter bit | 	 * try_to_take_rt_mutex() sets the waiter bit | ||||||
|  | @ -711,7 +709,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | ||||||
|  * Slow path try-lock function: |  * Slow path try-lock function: | ||||||
|  */ |  */ | ||||||
| static inline int | static inline int | ||||||
| rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__) | rt_mutex_slowtrylock(struct rt_mutex *lock) | ||||||
| { | { | ||||||
| 	int ret = 0; | 	int ret = 0; | ||||||
| 
 | 
 | ||||||
|  | @ -719,7 +717,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__) | ||||||
| 
 | 
 | ||||||
| 	if (likely(rt_mutex_owner(lock) != current)) { | 	if (likely(rt_mutex_owner(lock) != current)) { | ||||||
| 
 | 
 | ||||||
| 		ret = try_to_take_rt_mutex(lock __IP__); | 		ret = try_to_take_rt_mutex(lock); | ||||||
| 		/*
 | 		/*
 | ||||||
| 		 * try_to_take_rt_mutex() sets the lock waiters | 		 * try_to_take_rt_mutex() sets the lock waiters | ||||||
| 		 * bit unconditionally. Clean this up. | 		 * bit unconditionally. Clean this up. | ||||||
|  | @ -769,13 +767,13 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state, | ||||||
| 		  int detect_deadlock, | 		  int detect_deadlock, | ||||||
| 		  int (*slowfn)(struct rt_mutex *lock, int state, | 		  int (*slowfn)(struct rt_mutex *lock, int state, | ||||||
| 				struct hrtimer_sleeper *timeout, | 				struct hrtimer_sleeper *timeout, | ||||||
| 				int detect_deadlock __IP_DECL__)) | 				int detect_deadlock)) | ||||||
| { | { | ||||||
| 	if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | 	if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | ||||||
| 		rt_mutex_deadlock_account_lock(lock, current); | 		rt_mutex_deadlock_account_lock(lock, current); | ||||||
| 		return 0; | 		return 0; | ||||||
| 	} else | 	} else | ||||||
| 		return slowfn(lock, state, NULL, detect_deadlock __RET_IP__); | 		return slowfn(lock, state, NULL, detect_deadlock); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline int | static inline int | ||||||
|  | @ -783,24 +781,24 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, | ||||||
| 			struct hrtimer_sleeper *timeout, int detect_deadlock, | 			struct hrtimer_sleeper *timeout, int detect_deadlock, | ||||||
| 			int (*slowfn)(struct rt_mutex *lock, int state, | 			int (*slowfn)(struct rt_mutex *lock, int state, | ||||||
| 				      struct hrtimer_sleeper *timeout, | 				      struct hrtimer_sleeper *timeout, | ||||||
| 				      int detect_deadlock __IP_DECL__)) | 				      int detect_deadlock)) | ||||||
| { | { | ||||||
| 	if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | 	if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | ||||||
| 		rt_mutex_deadlock_account_lock(lock, current); | 		rt_mutex_deadlock_account_lock(lock, current); | ||||||
| 		return 0; | 		return 0; | ||||||
| 	} else | 	} else | ||||||
| 		return slowfn(lock, state, timeout, detect_deadlock __RET_IP__); | 		return slowfn(lock, state, timeout, detect_deadlock); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline int | static inline int | ||||||
| rt_mutex_fasttrylock(struct rt_mutex *lock, | rt_mutex_fasttrylock(struct rt_mutex *lock, | ||||||
| 		     int (*slowfn)(struct rt_mutex *lock __IP_DECL__)) | 		     int (*slowfn)(struct rt_mutex *lock)) | ||||||
| { | { | ||||||
| 	if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { | 	if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { | ||||||
| 		rt_mutex_deadlock_account_lock(lock, current); | 		rt_mutex_deadlock_account_lock(lock, current); | ||||||
| 		return 1; | 		return 1; | ||||||
| 	} | 	} | ||||||
| 	return slowfn(lock __RET_IP__); | 	return slowfn(lock); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline void | static inline void | ||||||
|  | @ -948,7 +946,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, | ||||||
| 				struct task_struct *proxy_owner) | 				struct task_struct *proxy_owner) | ||||||
| { | { | ||||||
| 	__rt_mutex_init(lock, NULL); | 	__rt_mutex_init(lock, NULL); | ||||||
| 	debug_rt_mutex_proxy_lock(lock, proxy_owner __RET_IP__); | 	debug_rt_mutex_proxy_lock(lock, proxy_owner); | ||||||
| 	rt_mutex_set_owner(lock, proxy_owner, 0); | 	rt_mutex_set_owner(lock, proxy_owner, 0); | ||||||
| 	rt_mutex_deadlock_account_lock(lock, proxy_owner); | 	rt_mutex_deadlock_account_lock(lock, proxy_owner); | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -10,9 +10,6 @@ | ||||||
|  * Non-debug version. |  * Non-debug version. | ||||||
|  */ |  */ | ||||||
| 
 | 
 | ||||||
| #define __IP_DECL__ |  | ||||||
| #define __IP__ |  | ||||||
| #define __RET_IP__ |  | ||||||
| #define rt_mutex_deadlock_check(l)			(0) | #define rt_mutex_deadlock_check(l)			(0) | ||||||
| #define rt_mutex_deadlock_account_lock(m, t)		do { } while (0) | #define rt_mutex_deadlock_account_lock(m, t)		do { } while (0) | ||||||
| #define rt_mutex_deadlock_account_unlock(l)		do { } while (0) | #define rt_mutex_deadlock_account_unlock(l)		do { } while (0) | ||||||
|  |  | ||||||
|  | @ -30,6 +30,7 @@ | ||||||
| #include <linux/capability.h> | #include <linux/capability.h> | ||||||
| #include <linux/completion.h> | #include <linux/completion.h> | ||||||
| #include <linux/kernel_stat.h> | #include <linux/kernel_stat.h> | ||||||
|  | #include <linux/debug_locks.h> | ||||||
| #include <linux/security.h> | #include <linux/security.h> | ||||||
| #include <linux/notifier.h> | #include <linux/notifier.h> | ||||||
| #include <linux/profile.h> | #include <linux/profile.h> | ||||||
|  | @ -3142,12 +3143,13 @@ void fastcall add_preempt_count(int val) | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Underflow? | 	 * Underflow? | ||||||
| 	 */ | 	 */ | ||||||
| 	BUG_ON((preempt_count() < 0)); | 	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) | ||||||
|  | 		return; | ||||||
| 	preempt_count() += val; | 	preempt_count() += val; | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Spinlock count overflowing soon? | 	 * Spinlock count overflowing soon? | ||||||
| 	 */ | 	 */ | ||||||
| 	BUG_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10); | 	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10); | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(add_preempt_count); | EXPORT_SYMBOL(add_preempt_count); | ||||||
| 
 | 
 | ||||||
|  | @ -3156,11 +3158,15 @@ void fastcall sub_preempt_count(int val) | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Underflow? | 	 * Underflow? | ||||||
| 	 */ | 	 */ | ||||||
| 	BUG_ON(val > preempt_count()); | 	if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) | ||||||
|  | 		return; | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Is the spinlock portion underflowing? | 	 * Is the spinlock portion underflowing? | ||||||
| 	 */ | 	 */ | ||||||
| 	BUG_ON((val < PREEMPT_MASK) && !(preempt_count() & PREEMPT_MASK)); | 	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && | ||||||
|  | 			!(preempt_count() & PREEMPT_MASK))) | ||||||
|  | 		return; | ||||||
|  | 
 | ||||||
| 	preempt_count() -= val; | 	preempt_count() -= val; | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(sub_preempt_count); | EXPORT_SYMBOL(sub_preempt_count); | ||||||
|  | @ -4690,7 +4696,7 @@ void show_state(void) | ||||||
| 	} while_each_thread(g, p); | 	} while_each_thread(g, p); | ||||||
| 
 | 
 | ||||||
| 	read_unlock(&tasklist_lock); | 	read_unlock(&tasklist_lock); | ||||||
| 	mutex_debug_show_all_locks(); | 	debug_show_all_locks(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  |  | ||||||
|  | @ -11,7 +11,7 @@ lib-$(CONFIG_SMP) += cpumask.o | ||||||
| 
 | 
 | ||||||
| lib-y	+= kobject.o kref.o kobject_uevent.o klist.o | lib-y	+= kobject.o kref.o kobject_uevent.o klist.o | ||||||
| 
 | 
 | ||||||
| obj-y += sort.o parser.o halfmd4.o iomap_copy.o | obj-y += sort.o parser.o halfmd4.o iomap_copy.o debug_locks.o | ||||||
| 
 | 
 | ||||||
| ifeq ($(CONFIG_DEBUG_KOBJECT),y) | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | ||||||
| CFLAGS_kobject.o += -DDEBUG | CFLAGS_kobject.o += -DDEBUG | ||||||
|  |  | ||||||
							
								
								
									
										45
									
								
								lib/debug_locks.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										45
									
								
								lib/debug_locks.c
									
									
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,45 @@ | ||||||
|  | /*
 | ||||||
|  |  * lib/debug_locks.c | ||||||
|  |  * | ||||||
|  |  * Generic place for common debugging facilities for various locks: | ||||||
|  |  * spinlocks, rwlocks, mutexes and rwsems. | ||||||
|  |  * | ||||||
|  |  * Started by Ingo Molnar: | ||||||
|  |  * | ||||||
|  |  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||||||
|  |  */ | ||||||
|  | #include <linux/rwsem.h> | ||||||
|  | #include <linux/mutex.h> | ||||||
|  | #include <linux/module.h> | ||||||
|  | #include <linux/spinlock.h> | ||||||
|  | #include <linux/debug_locks.h> | ||||||
|  | 
 | ||||||
|  | /*
 | ||||||
|  |  * We want to turn all lock-debugging facilities on/off at once, | ||||||
|  |  * via a global flag. The reason is that once a single bug has been | ||||||
|  |  * detected and reported, there might be cascade of followup bugs | ||||||
|  |  * that would just muddy the log. So we report the first one and | ||||||
|  |  * shut up after that. | ||||||
|  |  */ | ||||||
|  | int debug_locks = 1; | ||||||
|  | 
 | ||||||
|  | /*
 | ||||||
|  |  * The locking-testsuite uses <debug_locks_silent> to get a | ||||||
|  |  * 'silent failure': nothing is printed to the console when | ||||||
|  |  * a locking bug is detected. | ||||||
|  |  */ | ||||||
|  | int debug_locks_silent; | ||||||
|  | 
 | ||||||
|  | /*
 | ||||||
|  |  * Generic 'turn off all lock debugging' function: | ||||||
|  |  */ | ||||||
|  | int debug_locks_off(void) | ||||||
|  | { | ||||||
|  | 	if (xchg(&debug_locks, 0)) { | ||||||
|  | 		if (!debug_locks_silent) { | ||||||
|  | 			console_verbose(); | ||||||
|  | 			return 1; | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return 0; | ||||||
|  | } | ||||||
|  | @ -8,14 +8,17 @@ | ||||||
| 
 | 
 | ||||||
| #include <linux/spinlock.h> | #include <linux/spinlock.h> | ||||||
| #include <linux/interrupt.h> | #include <linux/interrupt.h> | ||||||
|  | #include <linux/debug_locks.h> | ||||||
| #include <linux/delay.h> | #include <linux/delay.h> | ||||||
|  | #include <linux/module.h> | ||||||
| 
 | 
 | ||||||
| static void spin_bug(spinlock_t *lock, const char *msg) | static void spin_bug(spinlock_t *lock, const char *msg) | ||||||
| { | { | ||||||
| 	static long print_once = 1; |  | ||||||
| 	struct task_struct *owner = NULL; | 	struct task_struct *owner = NULL; | ||||||
| 
 | 
 | ||||||
| 	if (xchg(&print_once, 0)) { | 	if (!debug_locks_off()) | ||||||
|  | 		return; | ||||||
|  | 
 | ||||||
| 	if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) | 	if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) | ||||||
| 		owner = lock->owner; | 		owner = lock->owner; | ||||||
| 	printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", | 	printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", | ||||||
|  | @ -28,18 +31,12 @@ static void spin_bug(spinlock_t *lock, const char *msg) | ||||||
| 		owner ? owner->pid : -1, | 		owner ? owner->pid : -1, | ||||||
| 		lock->owner_cpu); | 		lock->owner_cpu); | ||||||
| 	dump_stack(); | 	dump_stack(); | ||||||
| #ifdef CONFIG_SMP |  | ||||||
| 		/*
 |  | ||||||
| 		 * We cannot continue on SMP: |  | ||||||
| 		 */ |  | ||||||
| //		panic("bad locking");
 |  | ||||||
| #endif |  | ||||||
| 	} |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) | #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) | ||||||
| 
 | 
 | ||||||
| static inline void debug_spin_lock_before(spinlock_t *lock) | static inline void | ||||||
|  | debug_spin_lock_before(spinlock_t *lock) | ||||||
| { | { | ||||||
| 	SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); | 	SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); | ||||||
| 	SPIN_BUG_ON(lock->owner == current, lock, "recursion"); | 	SPIN_BUG_ON(lock->owner == current, lock, "recursion"); | ||||||
|  | @ -118,20 +115,13 @@ void _raw_spin_unlock(spinlock_t *lock) | ||||||
| 
 | 
 | ||||||
| static void rwlock_bug(rwlock_t *lock, const char *msg) | static void rwlock_bug(rwlock_t *lock, const char *msg) | ||||||
| { | { | ||||||
| 	static long print_once = 1; | 	if (!debug_locks_off()) | ||||||
|  | 		return; | ||||||
| 
 | 
 | ||||||
| 	if (xchg(&print_once, 0)) { |  | ||||||
| 	printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n", | 	printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n", | ||||||
| 		msg, raw_smp_processor_id(), current->comm, | 		msg, raw_smp_processor_id(), current->comm, | ||||||
| 		current->pid, lock); | 		current->pid, lock); | ||||||
| 	dump_stack(); | 	dump_stack(); | ||||||
| #ifdef CONFIG_SMP |  | ||||||
| 		/*
 |  | ||||||
| 		 * We cannot continue on SMP: |  | ||||||
| 		 */ |  | ||||||
| 		panic("bad locking"); |  | ||||||
| #endif |  | ||||||
| 	} |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) | #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) | ||||||
|  |  | ||||||
|  | @ -330,6 +330,8 @@ void __vunmap(void *addr, int deallocate_pages) | ||||||
| 		return; | 		return; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	debug_check_no_locks_freed(addr, area->size); | ||||||
|  | 
 | ||||||
| 	if (deallocate_pages) { | 	if (deallocate_pages) { | ||||||
| 		int i; | 		int i; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Ingo Molnar
						Ingo Molnar