mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	rcu: Add support for consolidated-RCU reader checking
This commit adds RCU-reader checks to list_for_each_entry_rcu() and hlist_for_each_entry_rcu(). These checks are optional, and are indicated by a lockdep expression passed to a new optional argument to these two macros. If this optional lockdep expression is omitted, these two macros act as before, checking for an RCU read-side critical section. Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> [ paulmck: Update to eliminate return within macro and update comment. ] Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
This commit is contained in:
		
							parent
							
								
									9147089bee
								
							
						
					
					
						commit
						28875945ba
					
				
					 4 changed files with 108 additions and 38 deletions
				
			
		| 
						 | 
				
			
			@ -40,6 +40,24 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
 | 
			
		|||
 */
 | 
			
		||||
#define list_next_rcu(list)	(*((struct list_head __rcu **)(&(list)->next)))
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Check during list traversal that we are within an RCU reader
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#define check_arg_count_one(dummy)
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_PROVE_RCU_LIST
 | 
			
		||||
#define __list_check_rcu(dummy, cond, extra...)				\
 | 
			
		||||
	({								\
 | 
			
		||||
	check_arg_count_one(extra);					\
 | 
			
		||||
	RCU_LOCKDEP_WARN(!cond && !rcu_read_lock_any_held(),		\
 | 
			
		||||
			 "RCU-list traversed in non-reader section!");	\
 | 
			
		||||
	 })
 | 
			
		||||
#else
 | 
			
		||||
#define __list_check_rcu(dummy, cond, extra...)				\
 | 
			
		||||
	({ check_arg_count_one(extra); })
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Insert a new entry between two known consecutive entries.
 | 
			
		||||
 *
 | 
			
		||||
| 
						 | 
				
			
			@ -343,13 +361,15 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
 | 
			
		|||
 * @pos:	the type * to use as a loop cursor.
 | 
			
		||||
 * @head:	the head for your list.
 | 
			
		||||
 * @member:	the name of the list_head within the struct.
 | 
			
		||||
 * @cond:	optional lockdep expression if called from non-RCU protection.
 | 
			
		||||
 *
 | 
			
		||||
 * This list-traversal primitive may safely run concurrently with
 | 
			
		||||
 * the _rcu list-mutation primitives such as list_add_rcu()
 | 
			
		||||
 * as long as the traversal is guarded by rcu_read_lock().
 | 
			
		||||
 */
 | 
			
		||||
#define list_for_each_entry_rcu(pos, head, member) \
 | 
			
		||||
	for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \
 | 
			
		||||
#define list_for_each_entry_rcu(pos, head, member, cond...)		\
 | 
			
		||||
	for (__list_check_rcu(dummy, ## cond, 0),			\
 | 
			
		||||
	     pos = list_entry_rcu((head)->next, typeof(*pos), member);	\
 | 
			
		||||
		&pos->member != (head);					\
 | 
			
		||||
		pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -616,13 +636,15 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
 | 
			
		|||
 * @pos:	the type * to use as a loop cursor.
 | 
			
		||||
 * @head:	the head for your list.
 | 
			
		||||
 * @member:	the name of the hlist_node within the struct.
 | 
			
		||||
 * @cond:	optional lockdep expression if called from non-RCU protection.
 | 
			
		||||
 *
 | 
			
		||||
 * This list-traversal primitive may safely run concurrently with
 | 
			
		||||
 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
 | 
			
		||||
 * as long as the traversal is guarded by rcu_read_lock().
 | 
			
		||||
 */
 | 
			
		||||
#define hlist_for_each_entry_rcu(pos, head, member)			\
 | 
			
		||||
	for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
 | 
			
		||||
#define hlist_for_each_entry_rcu(pos, head, member, cond...)		\
 | 
			
		||||
	for (__list_check_rcu(dummy, ## cond, 0),			\
 | 
			
		||||
	     pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
 | 
			
		||||
			typeof(*(pos)), member);			\
 | 
			
		||||
		pos;							\
 | 
			
		||||
		pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -221,6 +221,7 @@ int debug_lockdep_rcu_enabled(void);
 | 
			
		|||
int rcu_read_lock_held(void);
 | 
			
		||||
int rcu_read_lock_bh_held(void);
 | 
			
		||||
int rcu_read_lock_sched_held(void);
 | 
			
		||||
int rcu_read_lock_any_held(void);
 | 
			
		||||
 | 
			
		||||
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -241,6 +242,12 @@ static inline int rcu_read_lock_sched_held(void)
 | 
			
		|||
{
 | 
			
		||||
	return !preemptible();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline int rcu_read_lock_any_held(void)
 | 
			
		||||
{
 | 
			
		||||
	return !preemptible();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_PROVE_RCU
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -8,6 +8,17 @@ menu "RCU Debugging"
 | 
			
		|||
config PROVE_RCU
 | 
			
		||||
	def_bool PROVE_LOCKING
 | 
			
		||||
 | 
			
		||||
config PROVE_RCU_LIST
 | 
			
		||||
	bool "RCU list lockdep debugging"
 | 
			
		||||
	depends on PROVE_RCU && RCU_EXPERT
 | 
			
		||||
	default n
 | 
			
		||||
	help
 | 
			
		||||
	  Enable RCU lockdep checking for list usages. By default it is
 | 
			
		||||
	  turned off since there are several list RCU users that still
 | 
			
		||||
	  need to be converted to pass a lockdep expression. To prevent
 | 
			
		||||
	  false-positive splats, we keep it default disabled but once all
 | 
			
		||||
	  users are converted, we can remove this config option.
 | 
			
		||||
 | 
			
		||||
config TORTURE_TEST
 | 
			
		||||
	tristate
 | 
			
		||||
	default n
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -61,9 +61,15 @@ module_param(rcu_normal_after_boot, int, 0);
 | 
			
		|||
 | 
			
		||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
 | 
			
		||||
/**
 | 
			
		||||
 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
 | 
			
		||||
 * rcu_read_lock_held_common() - might we be in RCU-sched read-side critical section?
 | 
			
		||||
 * @ret:	Best guess answer if lockdep cannot be relied on
 | 
			
		||||
 *
 | 
			
		||||
 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
 | 
			
		||||
 * Returns true if lockdep must be ignored, in which case *ret contains
 | 
			
		||||
 * the best guess described below.  Otherwise returns false, in which
 | 
			
		||||
 * case *ret tells the caller nothing and the caller should instead
 | 
			
		||||
 * consult lockdep.
 | 
			
		||||
 *
 | 
			
		||||
 * If CONFIG_DEBUG_LOCK_ALLOC is selected, set *ret to nonzero iff in an
 | 
			
		||||
 * RCU-sched read-side critical section.  In absence of
 | 
			
		||||
 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
 | 
			
		||||
 * critical section unless it can prove otherwise.  Note that disabling
 | 
			
		||||
| 
						 | 
				
			
			@ -75,30 +81,44 @@ module_param(rcu_normal_after_boot, int, 0);
 | 
			
		|||
 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
 | 
			
		||||
 * and while lockdep is disabled.
 | 
			
		||||
 *
 | 
			
		||||
 * Note that if the CPU is in the idle loop from an RCU point of
 | 
			
		||||
 * view (ie: that we are in the section between rcu_idle_enter() and
 | 
			
		||||
 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
 | 
			
		||||
 * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
 | 
			
		||||
 * that are in such a section, considering these as in extended quiescent
 | 
			
		||||
 * state, so such a CPU is effectively never in an RCU read-side critical
 | 
			
		||||
 * section regardless of what RCU primitives it invokes.  This state of
 | 
			
		||||
 * affairs is required --- we need to keep an RCU-free window in idle
 | 
			
		||||
 * where the CPU may possibly enter into low power mode. This way we can
 | 
			
		||||
 * notice an extended quiescent state to other CPUs that started a grace
 | 
			
		||||
 * period. Otherwise we would delay any grace period as long as we run in
 | 
			
		||||
 * the idle task.
 | 
			
		||||
 * Note that if the CPU is in the idle loop from an RCU point of view (ie:
 | 
			
		||||
 * that we are in the section between rcu_idle_enter() and rcu_idle_exit())
 | 
			
		||||
 * then rcu_read_lock_held() sets *ret to false even if the CPU did an
 | 
			
		||||
 * rcu_read_lock().  The reason for this is that RCU ignores CPUs that are
 | 
			
		||||
 * in such a section, considering these as in extended quiescent state,
 | 
			
		||||
 * so such a CPU is effectively never in an RCU read-side critical section
 | 
			
		||||
 * regardless of what RCU primitives it invokes.  This state of affairs is
 | 
			
		||||
 * required --- we need to keep an RCU-free window in idle where the CPU may
 | 
			
		||||
 * possibly enter into low power mode. This way we can notice an extended
 | 
			
		||||
 * quiescent state to other CPUs that started a grace period. Otherwise
 | 
			
		||||
 * we would delay any grace period as long as we run in the idle task.
 | 
			
		||||
 *
 | 
			
		||||
 * Similarly, we avoid claiming an SRCU read lock held if the current
 | 
			
		||||
 * Similarly, we avoid claiming an RCU read lock held if the current
 | 
			
		||||
 * CPU is offline.
 | 
			
		||||
 */
 | 
			
		||||
static bool rcu_read_lock_held_common(bool *ret)
 | 
			
		||||
{
 | 
			
		||||
	if (!debug_lockdep_rcu_enabled()) {
 | 
			
		||||
		*ret = 1;
 | 
			
		||||
		return true;
 | 
			
		||||
	}
 | 
			
		||||
	if (!rcu_is_watching()) {
 | 
			
		||||
		*ret = 0;
 | 
			
		||||
		return true;
 | 
			
		||||
	}
 | 
			
		||||
	if (!rcu_lockdep_current_cpu_online()) {
 | 
			
		||||
		*ret = 0;
 | 
			
		||||
		return true;
 | 
			
		||||
	}
 | 
			
		||||
	return false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int rcu_read_lock_sched_held(void)
 | 
			
		||||
{
 | 
			
		||||
	if (!debug_lockdep_rcu_enabled())
 | 
			
		||||
		return 1;
 | 
			
		||||
	if (!rcu_is_watching())
 | 
			
		||||
		return 0;
 | 
			
		||||
	if (!rcu_lockdep_current_cpu_online())
 | 
			
		||||
		return 0;
 | 
			
		||||
	bool ret;
 | 
			
		||||
 | 
			
		||||
	if (rcu_read_lock_held_common(&ret))
 | 
			
		||||
		return ret;
 | 
			
		||||
	return lock_is_held(&rcu_sched_lock_map) || !preemptible();
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(rcu_read_lock_sched_held);
 | 
			
		||||
| 
						 | 
				
			
			@ -257,12 +277,10 @@ NOKPROBE_SYMBOL(debug_lockdep_rcu_enabled);
 | 
			
		|||
 */
 | 
			
		||||
int rcu_read_lock_held(void)
 | 
			
		||||
{
 | 
			
		||||
	if (!debug_lockdep_rcu_enabled())
 | 
			
		||||
		return 1;
 | 
			
		||||
	if (!rcu_is_watching())
 | 
			
		||||
		return 0;
 | 
			
		||||
	if (!rcu_lockdep_current_cpu_online())
 | 
			
		||||
		return 0;
 | 
			
		||||
	bool ret;
 | 
			
		||||
 | 
			
		||||
	if (rcu_read_lock_held_common(&ret))
 | 
			
		||||
		return ret;
 | 
			
		||||
	return lock_is_held(&rcu_lock_map);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(rcu_read_lock_held);
 | 
			
		||||
| 
						 | 
				
			
			@ -284,16 +302,28 @@ EXPORT_SYMBOL_GPL(rcu_read_lock_held);
 | 
			
		|||
 */
 | 
			
		||||
int rcu_read_lock_bh_held(void)
 | 
			
		||||
{
 | 
			
		||||
	if (!debug_lockdep_rcu_enabled())
 | 
			
		||||
		return 1;
 | 
			
		||||
	if (!rcu_is_watching())
 | 
			
		||||
		return 0;
 | 
			
		||||
	if (!rcu_lockdep_current_cpu_online())
 | 
			
		||||
		return 0;
 | 
			
		||||
	bool ret;
 | 
			
		||||
 | 
			
		||||
	if (rcu_read_lock_held_common(&ret))
 | 
			
		||||
		return ret;
 | 
			
		||||
	return in_softirq() || irqs_disabled();
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
 | 
			
		||||
 | 
			
		||||
int rcu_read_lock_any_held(void)
 | 
			
		||||
{
 | 
			
		||||
	bool ret;
 | 
			
		||||
 | 
			
		||||
	if (rcu_read_lock_held_common(&ret))
 | 
			
		||||
		return ret;
 | 
			
		||||
	if (lock_is_held(&rcu_lock_map) ||
 | 
			
		||||
	    lock_is_held(&rcu_bh_lock_map) ||
 | 
			
		||||
	    lock_is_held(&rcu_sched_lock_map))
 | 
			
		||||
		return 1;
 | 
			
		||||
	return !preemptible();
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(rcu_read_lock_any_held);
 | 
			
		||||
 | 
			
		||||
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue