forked from mirrors/linux
		
	 89da3b94bb
			
		
	
	
		89da3b94bb
		
	
	
	
	
		
			
			With this patch rcu_sync has a single state variable and the transition rules become really simple: GP_IDLE - owned by the first rcu_sync_enter() which moves it to GP_ENTER - owned by rcu-callback which moves it to GP_PASSED - owned by the last rcu_sync_exit() which moves it to GP_EXIT - and this is the only "nontrivial" state. rcu-callback moves it back to GP_IDLE unless another enter() comes before a GP pass. If rcu-callback is invoked before the next rcu_sync_exit() it must see gp_count incremented by that enter() and set GP_PASSED. Otherwise, if the next rcu_sync_exit() wins the race, it will move it to GP_REPLAY - owned by rcu-callback which moves it to GP_EXIT Signed-off-by: Oleg Nesterov <oleg@redhat.com> [ paulmck: While here, apply READ_ONCE() and WRITE_ONCE() to ->gp_state. ] [ paulmck: Tweaks to make htmldocs happy. (Reported by kbuild test robot.) ] Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
		
			
				
	
	
		
			56 lines
		
	
	
	
		
			1.5 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			56 lines
		
	
	
	
		
			1.5 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* SPDX-License-Identifier: GPL-2.0+ */
 | |
| /*
 | |
|  * RCU-based infrastructure for lightweight reader-writer locking
 | |
|  *
 | |
|  * Copyright (c) 2015, Red Hat, Inc.
 | |
|  *
 | |
|  * Author: Oleg Nesterov <oleg@redhat.com>
 | |
|  */
 | |
| 
 | |
| #ifndef _LINUX_RCU_SYNC_H_
 | |
| #define _LINUX_RCU_SYNC_H_
 | |
| 
 | |
| #include <linux/wait.h>
 | |
| #include <linux/rcupdate.h>
 | |
| 
 | |
| /* Structure to mediate between updaters and fastpath-using readers.  */
 | |
| struct rcu_sync {
 | |
| 	int			gp_state;
 | |
| 	int			gp_count;
 | |
| 	wait_queue_head_t	gp_wait;
 | |
| 
 | |
| 	struct rcu_head		cb_head;
 | |
| };
 | |
| 
 | |
| /**
 | |
|  * rcu_sync_is_idle() - Are readers permitted to use their fastpaths?
 | |
|  * @rsp: Pointer to rcu_sync structure to use for synchronization
 | |
|  *
 | |
|  * Returns true if readers are permitted to use their fastpaths.  Must be
 | |
|  * invoked within some flavor of RCU read-side critical section.
 | |
|  */
 | |
| static inline bool rcu_sync_is_idle(struct rcu_sync *rsp)
 | |
| {
 | |
| 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
 | |
| 			 !rcu_read_lock_bh_held() &&
 | |
| 			 !rcu_read_lock_sched_held(),
 | |
| 			 "suspicious rcu_sync_is_idle() usage");
 | |
| 	return !READ_ONCE(rsp->gp_state); /* GP_IDLE */
 | |
| }
 | |
| 
 | |
| extern void rcu_sync_init(struct rcu_sync *);
 | |
| extern void rcu_sync_enter_start(struct rcu_sync *);
 | |
| extern void rcu_sync_enter(struct rcu_sync *);
 | |
| extern void rcu_sync_exit(struct rcu_sync *);
 | |
| extern void rcu_sync_dtor(struct rcu_sync *);
 | |
| 
 | |
| #define __RCU_SYNC_INITIALIZER(name) {					\
 | |
| 		.gp_state = 0,						\
 | |
| 		.gp_count = 0,						\
 | |
| 		.gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait),	\
 | |
| 	}
 | |
| 
 | |
| #define	DEFINE_RCU_SYNC(name)	\
 | |
| 	struct rcu_sync name = __RCU_SYNC_INITIALIZER(name)
 | |
| 
 | |
| #endif /* _LINUX_RCU_SYNC_H_ */
 |