mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	rcuwait_active only returns whether w->task is not NULL. This is exactly one of the usecases that are mentioned in the documentation for rcu_access_pointer() where it is correct to bypass lockdep checks. This avoids a splat from kvm_vcpu_on_spin(). Reported-by: Wanpeng Li <kernellwp@gmail.com> Tested-by: Wanpeng Li <kernellwp@gmail.com> Acked-by: Davidlohr Bueso <dave@stgolabs.net> Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
		
			
				
	
	
		
			80 lines
		
	
	
	
		
			1.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			80 lines
		
	
	
	
		
			1.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/* SPDX-License-Identifier: GPL-2.0 */
 | 
						|
#ifndef _LINUX_RCUWAIT_H_
 | 
						|
#define _LINUX_RCUWAIT_H_
 | 
						|
 | 
						|
#include <linux/rcupdate.h>
 | 
						|
#include <linux/sched/signal.h>
 | 
						|
 | 
						|
/*
 | 
						|
 * rcuwait provides a way of blocking and waking up a single
 | 
						|
 * task in an rcu-safe manner.
 | 
						|
 *
 | 
						|
 * The only time @task is non-nil is when a user is blocked (or
 | 
						|
 * checking if it needs to) on a condition, and reset as soon as we
 | 
						|
 * know that the condition has succeeded and are awoken.
 | 
						|
 */
 | 
						|
struct rcuwait {
 | 
						|
	struct task_struct __rcu *task;
 | 
						|
};
 | 
						|
 | 
						|
#define __RCUWAIT_INITIALIZER(name)		\
 | 
						|
	{ .task = NULL, }
 | 
						|
 | 
						|
static inline void rcuwait_init(struct rcuwait *w)
 | 
						|
{
 | 
						|
	w->task = NULL;
 | 
						|
}
 | 
						|
 | 
						|
/*
 | 
						|
 * Note: this provides no serialization and, just as with waitqueues,
 | 
						|
 * requires care to estimate as to whether or not the wait is active.
 | 
						|
 */
 | 
						|
static inline int rcuwait_active(struct rcuwait *w)
 | 
						|
{
 | 
						|
	return !!rcu_access_pointer(w->task);
 | 
						|
}
 | 
						|
 | 
						|
extern int rcuwait_wake_up(struct rcuwait *w);
 | 
						|
 | 
						|
/*
 | 
						|
 * The caller is responsible for locking around rcuwait_wait_event(),
 | 
						|
 * and [prepare_to/finish]_rcuwait() such that writes to @task are
 | 
						|
 * properly serialized.
 | 
						|
 */
 | 
						|
 | 
						|
static inline void prepare_to_rcuwait(struct rcuwait *w)
 | 
						|
{
 | 
						|
	rcu_assign_pointer(w->task, current);
 | 
						|
}
 | 
						|
 | 
						|
static inline void finish_rcuwait(struct rcuwait *w)
 | 
						|
{
 | 
						|
        rcu_assign_pointer(w->task, NULL);
 | 
						|
	__set_current_state(TASK_RUNNING);
 | 
						|
}
 | 
						|
 | 
						|
#define rcuwait_wait_event(w, condition, state)				\
 | 
						|
({									\
 | 
						|
	int __ret = 0;							\
 | 
						|
	prepare_to_rcuwait(w);						\
 | 
						|
	for (;;) {							\
 | 
						|
		/*							\
 | 
						|
		 * Implicit barrier (A) pairs with (B) in		\
 | 
						|
		 * rcuwait_wake_up().					\
 | 
						|
		 */							\
 | 
						|
		set_current_state(state);				\
 | 
						|
		if (condition)						\
 | 
						|
			break;						\
 | 
						|
									\
 | 
						|
		if (signal_pending_state(state, current)) {		\
 | 
						|
			__ret = -EINTR;					\
 | 
						|
			break;						\
 | 
						|
		}							\
 | 
						|
									\
 | 
						|
		schedule();						\
 | 
						|
	}								\
 | 
						|
	finish_rcuwait(w);						\
 | 
						|
	__ret;								\
 | 
						|
})
 | 
						|
 | 
						|
#endif /* _LINUX_RCUWAIT_H_ */
 |