mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	srcu: Make Tiny SRCU explicitly disable preemption
Because Tiny SRCU is used only in kernels built with either CONFIG_PREEMPT_NONE=y or CONFIG_PREEMPT_VOLUNTARY=y, there has not been any need for TINY SRCU to explicitly disable preemption. However, the prospect of lazy preemption changes that, and the lazy-preemption patches do result in rcutorture runs finding both too-short grace periods and grace-period hangs for Tiny SRCU. This commit therefore adds the needed preempt_disable() and preempt_enable() calls to Tiny SRCU. Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Cc: Ankur Arora <ankur.a.arora@oracle.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
This commit is contained in:
		
							parent
							
								
									c1ec7c1580
								
							
						
					
					
						commit
						65b4a59557
					
				
					 2 changed files with 28 additions and 5 deletions
				
			
		|  | @ -64,8 +64,10 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp) | ||||||
| { | { | ||||||
| 	int idx; | 	int idx; | ||||||
| 
 | 
 | ||||||
|  | 	preempt_disable();  // Needed for PREEMPT_AUTO
 | ||||||
| 	idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1; | 	idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1; | ||||||
| 	WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1); | 	WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1); | ||||||
|  | 	preempt_enable(); | ||||||
| 	return idx; | 	return idx; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -96,9 +96,12 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct); | ||||||
|  */ |  */ | ||||||
| void __srcu_read_unlock(struct srcu_struct *ssp, int idx) | void __srcu_read_unlock(struct srcu_struct *ssp, int idx) | ||||||
| { | { | ||||||
| 	int newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1; | 	int newval; | ||||||
| 
 | 
 | ||||||
|  | 	preempt_disable();  // Needed for PREEMPT_AUTO
 | ||||||
|  | 	newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1; | ||||||
| 	WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval); | 	WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval); | ||||||
|  | 	preempt_enable(); | ||||||
| 	if (!newval && READ_ONCE(ssp->srcu_gp_waiting) && in_task()) | 	if (!newval && READ_ONCE(ssp->srcu_gp_waiting) && in_task()) | ||||||
| 		swake_up_one(&ssp->srcu_wq); | 		swake_up_one(&ssp->srcu_wq); | ||||||
| } | } | ||||||
|  | @ -117,8 +120,11 @@ void srcu_drive_gp(struct work_struct *wp) | ||||||
| 	struct srcu_struct *ssp; | 	struct srcu_struct *ssp; | ||||||
| 
 | 
 | ||||||
| 	ssp = container_of(wp, struct srcu_struct, srcu_work); | 	ssp = container_of(wp, struct srcu_struct, srcu_work); | ||||||
| 	if (ssp->srcu_gp_running || ULONG_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) | 	preempt_disable();  // Needed for PREEMPT_AUTO
 | ||||||
|  | 	if (ssp->srcu_gp_running || ULONG_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) { | ||||||
| 		return; /* Already running or nothing to do. */ | 		return; /* Already running or nothing to do. */ | ||||||
|  | 		preempt_enable(); | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	/* Remove recently arrived callbacks and wait for readers. */ | 	/* Remove recently arrived callbacks and wait for readers. */ | ||||||
| 	WRITE_ONCE(ssp->srcu_gp_running, true); | 	WRITE_ONCE(ssp->srcu_gp_running, true); | ||||||
|  | @ -130,9 +136,12 @@ void srcu_drive_gp(struct work_struct *wp) | ||||||
| 	idx = (ssp->srcu_idx & 0x2) / 2; | 	idx = (ssp->srcu_idx & 0x2) / 2; | ||||||
| 	WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); | 	WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); | ||||||
| 	WRITE_ONCE(ssp->srcu_gp_waiting, true);  /* srcu_read_unlock() wakes! */ | 	WRITE_ONCE(ssp->srcu_gp_waiting, true);  /* srcu_read_unlock() wakes! */ | ||||||
|  | 	preempt_enable(); | ||||||
| 	swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx])); | 	swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx])); | ||||||
|  | 	preempt_disable();  // Needed for PREEMPT_AUTO
 | ||||||
| 	WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */ | 	WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */ | ||||||
| 	WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); | 	WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); | ||||||
|  | 	preempt_enable(); | ||||||
| 
 | 
 | ||||||
| 	/* Invoke the callbacks we removed above. */ | 	/* Invoke the callbacks we removed above. */ | ||||||
| 	while (lh) { | 	while (lh) { | ||||||
|  | @ -150,8 +159,11 @@ void srcu_drive_gp(struct work_struct *wp) | ||||||
| 	 * at interrupt level, but the ->srcu_gp_running checks will | 	 * at interrupt level, but the ->srcu_gp_running checks will | ||||||
| 	 * straighten that out. | 	 * straighten that out. | ||||||
| 	 */ | 	 */ | ||||||
|  | 	preempt_disable();  // Needed for PREEMPT_AUTO
 | ||||||
| 	WRITE_ONCE(ssp->srcu_gp_running, false); | 	WRITE_ONCE(ssp->srcu_gp_running, false); | ||||||
| 	if (ULONG_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) | 	idx = ULONG_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)); | ||||||
|  | 	preempt_enable(); | ||||||
|  | 	if (idx) | ||||||
| 		schedule_work(&ssp->srcu_work); | 		schedule_work(&ssp->srcu_work); | ||||||
| } | } | ||||||
| EXPORT_SYMBOL_GPL(srcu_drive_gp); | EXPORT_SYMBOL_GPL(srcu_drive_gp); | ||||||
|  | @ -160,9 +172,12 @@ static void srcu_gp_start_if_needed(struct srcu_struct *ssp) | ||||||
| { | { | ||||||
| 	unsigned long cookie; | 	unsigned long cookie; | ||||||
| 
 | 
 | ||||||
|  | 	preempt_disable();  // Needed for PREEMPT_AUTO
 | ||||||
| 	cookie = get_state_synchronize_srcu(ssp); | 	cookie = get_state_synchronize_srcu(ssp); | ||||||
| 	if (ULONG_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie)) | 	if (ULONG_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie)) { | ||||||
|  | 		preempt_enable(); | ||||||
| 		return; | 		return; | ||||||
|  | 	} | ||||||
| 	WRITE_ONCE(ssp->srcu_idx_max, cookie); | 	WRITE_ONCE(ssp->srcu_idx_max, cookie); | ||||||
| 	if (!READ_ONCE(ssp->srcu_gp_running)) { | 	if (!READ_ONCE(ssp->srcu_gp_running)) { | ||||||
| 		if (likely(srcu_init_done)) | 		if (likely(srcu_init_done)) | ||||||
|  | @ -170,6 +185,7 @@ static void srcu_gp_start_if_needed(struct srcu_struct *ssp) | ||||||
| 		else if (list_empty(&ssp->srcu_work.entry)) | 		else if (list_empty(&ssp->srcu_work.entry)) | ||||||
| 			list_add(&ssp->srcu_work.entry, &srcu_boot_list); | 			list_add(&ssp->srcu_work.entry, &srcu_boot_list); | ||||||
| 	} | 	} | ||||||
|  | 	preempt_enable(); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  | @ -183,11 +199,13 @@ void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, | ||||||
| 
 | 
 | ||||||
| 	rhp->func = func; | 	rhp->func = func; | ||||||
| 	rhp->next = NULL; | 	rhp->next = NULL; | ||||||
|  | 	preempt_disable();  // Needed for PREEMPT_AUTO
 | ||||||
| 	local_irq_save(flags); | 	local_irq_save(flags); | ||||||
| 	*ssp->srcu_cb_tail = rhp; | 	*ssp->srcu_cb_tail = rhp; | ||||||
| 	ssp->srcu_cb_tail = &rhp->next; | 	ssp->srcu_cb_tail = &rhp->next; | ||||||
| 	local_irq_restore(flags); | 	local_irq_restore(flags); | ||||||
| 	srcu_gp_start_if_needed(ssp); | 	srcu_gp_start_if_needed(ssp); | ||||||
|  | 	preempt_enable(); | ||||||
| } | } | ||||||
| EXPORT_SYMBOL_GPL(call_srcu); | EXPORT_SYMBOL_GPL(call_srcu); | ||||||
| 
 | 
 | ||||||
|  | @ -241,9 +259,12 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_srcu); | ||||||
|  */ |  */ | ||||||
| unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp) | unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp) | ||||||
| { | { | ||||||
| 	unsigned long ret = get_state_synchronize_srcu(ssp); | 	unsigned long ret; | ||||||
| 
 | 
 | ||||||
|  | 	preempt_disable();  // Needed for PREEMPT_AUTO
 | ||||||
|  | 	ret = get_state_synchronize_srcu(ssp); | ||||||
| 	srcu_gp_start_if_needed(ssp); | 	srcu_gp_start_if_needed(ssp); | ||||||
|  | 	preempt_enable(); | ||||||
| 	return ret; | 	return ret; | ||||||
| } | } | ||||||
| EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu); | EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu); | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Paul E. McKenney
						Paul E. McKenney