mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	workqueue: Always queue work items to the newest PWQ for order workqueues
To ensure non-reentrancy, __queue_work() attempts to enqueue a work
item to the pool of the currently executing worker. This is not only
unnecessary for an ordered workqueue, where order inherently suggests
non-reentrancy, but it could also disrupt the sequence if the item is
not enqueued on the newest PWQ.
Just queue it to the newest PWQ and let order management guarantees
non-reentrancy.
Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
Fixes: 4c065dbce1 ("workqueue: Enable unbound cpumask update on ordered workqueues")
Cc: stable@vger.kernel.org # v6.9+
Signed-off-by: Tejun Heo <tj@kernel.org>
(cherry picked from commit 74347be3edfd11277799242766edf844c43dd5d3)
			
			
This commit is contained in:
		
							parent
							
								
									b2b1f93384
								
							
						
					
					
						commit
						58629d4871
					
				
					 1 changed files with 5 additions and 1 deletions
				
			
		| 
						 | 
					@ -2274,9 +2274,13 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
 | 
				
			||||||
	 * If @work was previously on a different pool, it might still be
 | 
						 * If @work was previously on a different pool, it might still be
 | 
				
			||||||
	 * running there, in which case the work needs to be queued on that
 | 
						 * running there, in which case the work needs to be queued on that
 | 
				
			||||||
	 * pool to guarantee non-reentrancy.
 | 
						 * pool to guarantee non-reentrancy.
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * For ordered workqueue, work items must be queued on the newest pwq
 | 
				
			||||||
 | 
						 * for accurate order management.  Guaranteed order also guarantees
 | 
				
			||||||
 | 
						 * non-reentrancy.  See the comments above unplug_oldest_pwq().
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	last_pool = get_work_pool(work);
 | 
						last_pool = get_work_pool(work);
 | 
				
			||||||
	if (last_pool && last_pool != pool) {
 | 
						if (last_pool && last_pool != pool && !(wq->flags & __WQ_ORDERED)) {
 | 
				
			||||||
		struct worker *worker;
 | 
							struct worker *worker;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		raw_spin_lock(&last_pool->lock);
 | 
							raw_spin_lock(&last_pool->lock);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue