mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	sched: Make cond_resched_lock() variants RT aware
The __might_resched() checks in the cond_resched_lock() variants use PREEMPT_LOCK_OFFSET for preempt count offset checking which takes the preemption disable by the spin_lock() which is still held at that point into account. On PREEMPT_RT enabled kernels spin/rw_lock held sections stay preemptible which means PREEMPT_LOCK_OFFSET is 0, but that still triggers the __might_resched() check because that takes RCU read side nesting into account. On RT enabled kernels spin/read/write_lock() issue rcu_read_lock() to resemble the !RT semantics, which means in cond_resched_lock() the might resched check will see preempt_count() == 0 and rcu_preempt_depth() == 1. Introduce PREEMPT_LOCK_SCHED_OFFSET for those might resched checks and map them depending on CONFIG_PREEMPT_RT. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20210923165358.305969211@linutronix.de
This commit is contained in:
		
							parent
							
								
									50e081b96e
								
							
						
					
					
						commit
						3e9cc688e5
					
				
					 2 changed files with 28 additions and 11 deletions
				
			
		| 
						 | 
				
			
			@ -124,6 +124,7 @@
 | 
			
		|||
#if !defined(CONFIG_PREEMPT_RT)
 | 
			
		||||
#define PREEMPT_LOCK_OFFSET		PREEMPT_DISABLE_OFFSET
 | 
			
		||||
#else
 | 
			
		||||
/* Locks on RT do not disable preemption */
 | 
			
		||||
#define PREEMPT_LOCK_OFFSET		0
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2049,18 +2049,34 @@ extern int __cond_resched_rwlock_write(rwlock_t *lock);
 | 
			
		|||
#define MIGHT_RESCHED_RCU_SHIFT		8
 | 
			
		||||
#define MIGHT_RESCHED_PREEMPT_MASK	((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
 | 
			
		||||
 | 
			
		||||
#ifndef CONFIG_PREEMPT_RT
 | 
			
		||||
/*
 | 
			
		||||
 * Non RT kernels have an elevated preempt count due to the held lock,
 | 
			
		||||
 * but are not allowed to be inside a RCU read side critical section
 | 
			
		||||
 */
 | 
			
		||||
# define PREEMPT_LOCK_RESCHED_OFFSETS	PREEMPT_LOCK_OFFSET
 | 
			
		||||
#else
 | 
			
		||||
/*
 | 
			
		||||
 * spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in
 | 
			
		||||
 * cond_resched*lock() has to take that into account because it checks for
 | 
			
		||||
 * preempt_count() and rcu_preempt_depth().
 | 
			
		||||
 */
 | 
			
		||||
# define PREEMPT_LOCK_RESCHED_OFFSETS	\
 | 
			
		||||
	(PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#define cond_resched_lock(lock) ({						\
 | 
			
		||||
	__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
 | 
			
		||||
	__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS);	\
 | 
			
		||||
	__cond_resched_lock(lock);						\
 | 
			
		||||
})
 | 
			
		||||
 | 
			
		||||
#define cond_resched_rwlock_read(lock) ({					\
 | 
			
		||||
	__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
 | 
			
		||||
	__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS);	\
 | 
			
		||||
	__cond_resched_rwlock_read(lock);					\
 | 
			
		||||
})
 | 
			
		||||
 | 
			
		||||
#define cond_resched_rwlock_write(lock) ({					\
 | 
			
		||||
	__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
 | 
			
		||||
	__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS);	\
 | 
			
		||||
	__cond_resched_rwlock_write(lock);					\
 | 
			
		||||
})
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue