mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	locking/lockdep: Fix "USED" <- "IN-NMI" inversions
During the LPC RCU BoF Paul asked how come the "USED" <- "IN-NMI"
detector doesn't trip over rcu_read_lock()'s lockdep annotation.
Looking into this I found a very embarrasing typo in
verify_lock_unused():
	-	if (!(class->usage_mask & LOCK_USED))
	+	if (!(class->usage_mask & LOCKF_USED))
fixing that will indeed cause rcu_read_lock() to insta-splat :/
The above typo means that instead of testing for: 0x100 (1 <<
LOCK_USED), we test for 8 (LOCK_USED), which corresponds to (1 <<
LOCK_ENABLED_HARDIRQ).
So instead of testing for _any_ used lock, it will only match any lock
used with interrupts enabled.
The rcu_read_lock() annotation uses .check=0, which means it will not
set any of the interrupt bits and will thus never match.
In order to properly fix the situation and allow rcu_read_lock() to
correctly work, split LOCK_USED into LOCK_USED and LOCK_USED_READ and by
having .read users set USED_READ and test USED, pure read-recursive
locks are permitted.
Fixes: f6f48e1804 ("lockdep: Teach lockdep about "USED" <- "IN-NMI" inversions")
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Tested-by: Masami Hiramatsu <mhiramat@kernel.org>
Acked-by: Paul E. McKenney <paulmck@kernel.org>
Link: https://lore.kernel.org/r/20200902160323.GK1362448@hirez.programming.kicks-ass.net
			
			
This commit is contained in:
		
							parent
							
								
									fc3abb5325
								
							
						
					
					
						commit
						23870f1227
					
				
					 2 changed files with 31 additions and 6 deletions
				
			
		| 
						 | 
					@ -3969,13 +3969,18 @@ static int separate_irq_context(struct task_struct *curr,
 | 
				
			||||||
static int mark_lock(struct task_struct *curr, struct held_lock *this,
 | 
					static int mark_lock(struct task_struct *curr, struct held_lock *this,
 | 
				
			||||||
			     enum lock_usage_bit new_bit)
 | 
								     enum lock_usage_bit new_bit)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned int new_mask = 1 << new_bit, ret = 1;
 | 
						unsigned int old_mask, new_mask, ret = 1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (new_bit >= LOCK_USAGE_STATES) {
 | 
						if (new_bit >= LOCK_USAGE_STATES) {
 | 
				
			||||||
		DEBUG_LOCKS_WARN_ON(1);
 | 
							DEBUG_LOCKS_WARN_ON(1);
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (new_bit == LOCK_USED && this->read)
 | 
				
			||||||
 | 
							new_bit = LOCK_USED_READ;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						new_mask = 1 << new_bit;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * If already set then do not dirty the cacheline,
 | 
						 * If already set then do not dirty the cacheline,
 | 
				
			||||||
	 * nor do any checks:
 | 
						 * nor do any checks:
 | 
				
			||||||
| 
						 | 
					@ -3988,13 +3993,22 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Make sure we didn't race:
 | 
						 * Make sure we didn't race:
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
 | 
						if (unlikely(hlock_class(this)->usage_mask & new_mask))
 | 
				
			||||||
		graph_unlock();
 | 
							goto unlock;
 | 
				
			||||||
		return 1;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						old_mask = hlock_class(this)->usage_mask;
 | 
				
			||||||
	hlock_class(this)->usage_mask |= new_mask;
 | 
						hlock_class(this)->usage_mask |= new_mask;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Save one usage_traces[] entry and map both LOCK_USED and
 | 
				
			||||||
 | 
						 * LOCK_USED_READ onto the same entry.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (new_bit == LOCK_USED || new_bit == LOCK_USED_READ) {
 | 
				
			||||||
 | 
							if (old_mask & (LOCKF_USED | LOCKF_USED_READ))
 | 
				
			||||||
 | 
								goto unlock;
 | 
				
			||||||
 | 
							new_bit = LOCK_USED;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
 | 
						if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4008,6 +4022,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
 | 
				
			||||||
			return 0;
 | 
								return 0;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					unlock:
 | 
				
			||||||
	graph_unlock();
 | 
						graph_unlock();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					@ -4942,12 +4957,20 @@ static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
#ifdef CONFIG_PROVE_LOCKING
 | 
					#ifdef CONFIG_PROVE_LOCKING
 | 
				
			||||||
	struct lock_class *class = look_up_lock_class(lock, subclass);
 | 
						struct lock_class *class = look_up_lock_class(lock, subclass);
 | 
				
			||||||
 | 
						unsigned long mask = LOCKF_USED;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* if it doesn't have a class (yet), it certainly hasn't been used yet */
 | 
						/* if it doesn't have a class (yet), it certainly hasn't been used yet */
 | 
				
			||||||
	if (!class)
 | 
						if (!class)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!(class->usage_mask & LOCK_USED))
 | 
						/*
 | 
				
			||||||
 | 
						 * READ locks only conflict with USED, such that if we only ever use
 | 
				
			||||||
 | 
						 * READ locks, there is no deadlock possible -- RCU.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (!hlock->read)
 | 
				
			||||||
 | 
							mask |= LOCKF_USED_READ;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!(class->usage_mask & mask))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	hlock->class_idx = class - lock_classes;
 | 
						hlock->class_idx = class - lock_classes;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -19,6 +19,7 @@ enum lock_usage_bit {
 | 
				
			||||||
#include "lockdep_states.h"
 | 
					#include "lockdep_states.h"
 | 
				
			||||||
#undef LOCKDEP_STATE
 | 
					#undef LOCKDEP_STATE
 | 
				
			||||||
	LOCK_USED,
 | 
						LOCK_USED,
 | 
				
			||||||
 | 
						LOCK_USED_READ,
 | 
				
			||||||
	LOCK_USAGE_STATES
 | 
						LOCK_USAGE_STATES
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -40,6 +41,7 @@ enum {
 | 
				
			||||||
#include "lockdep_states.h"
 | 
					#include "lockdep_states.h"
 | 
				
			||||||
#undef LOCKDEP_STATE
 | 
					#undef LOCKDEP_STATE
 | 
				
			||||||
	__LOCKF(USED)
 | 
						__LOCKF(USED)
 | 
				
			||||||
 | 
						__LOCKF(USED_READ)
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define LOCKDEP_STATE(__STATE)	LOCKF_ENABLED_##__STATE |
 | 
					#define LOCKDEP_STATE(__STATE)	LOCKF_ENABLED_##__STATE |
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue