mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	workqueue: implement WQ_NON_REENTRANT
With gcwq managing all the workers and work->data pointing to the last gcwq it was on, non-reentrance can be easily implemented by checking whether the work is still running on the previous gcwq on queueing. Implement it. Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
		
							parent
							
								
									7a22ad757e
								
							
						
					
					
						commit
						18aa9effad
					
				
					 2 changed files with 30 additions and 3 deletions
				
			
		| 
						 | 
					@ -225,6 +225,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
 | 
				
			||||||
enum {
 | 
					enum {
 | 
				
			||||||
	WQ_FREEZEABLE		= 1 << 0, /* freeze during suspend */
 | 
						WQ_FREEZEABLE		= 1 << 0, /* freeze during suspend */
 | 
				
			||||||
	WQ_SINGLE_CPU		= 1 << 1, /* only single cpu at a time */
 | 
						WQ_SINGLE_CPU		= 1 << 1, /* only single cpu at a time */
 | 
				
			||||||
 | 
						WQ_NON_REENTRANT	= 1 << 2, /* guarantee non-reentrance */
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern struct workqueue_struct *
 | 
					extern struct workqueue_struct *
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -534,10 +534,36 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	debug_work_activate(work);
 | 
						debug_work_activate(work);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* determine gcwq to use */
 | 
						/*
 | 
				
			||||||
 | 
						 * Determine gcwq to use.  SINGLE_CPU is inherently
 | 
				
			||||||
 | 
						 * NON_REENTRANT, so test it first.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
	if (!(wq->flags & WQ_SINGLE_CPU)) {
 | 
						if (!(wq->flags & WQ_SINGLE_CPU)) {
 | 
				
			||||||
		/* just use the requested cpu for multicpu workqueues */
 | 
							struct global_cwq *last_gcwq;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							/*
 | 
				
			||||||
 | 
							 * It's multi cpu.  If @wq is non-reentrant and @work
 | 
				
			||||||
 | 
							 * was previously on a different cpu, it might still
 | 
				
			||||||
 | 
							 * be running there, in which case the work needs to
 | 
				
			||||||
 | 
							 * be queued on that cpu to guarantee non-reentrance.
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
		gcwq = get_gcwq(cpu);
 | 
							gcwq = get_gcwq(cpu);
 | 
				
			||||||
 | 
							if (wq->flags & WQ_NON_REENTRANT &&
 | 
				
			||||||
 | 
							    (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
 | 
				
			||||||
 | 
								struct worker *worker;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								spin_lock_irqsave(&last_gcwq->lock, flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								worker = find_worker_executing_work(last_gcwq, work);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								if (worker && worker->current_cwq->wq == wq)
 | 
				
			||||||
 | 
									gcwq = last_gcwq;
 | 
				
			||||||
 | 
								else {
 | 
				
			||||||
 | 
									/* meh... not running there, queue here */
 | 
				
			||||||
 | 
									spin_unlock_irqrestore(&last_gcwq->lock, flags);
 | 
				
			||||||
 | 
									spin_lock_irqsave(&gcwq->lock, flags);
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							} else
 | 
				
			||||||
			spin_lock_irqsave(&gcwq->lock, flags);
 | 
								spin_lock_irqsave(&gcwq->lock, flags);
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		unsigned int req_cpu = cpu;
 | 
							unsigned int req_cpu = cpu;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue