mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	workqueue: Detach workers directly in idle_cull_fn()
The code to kick off the destruction of workers is now in a process context (idle_cull_fn()), and the detaching of a worker is not required to be inside the worker thread now, so just do the detaching directly in idle_cull_fn(). wake_dying_workers() is renamed to detach_dying_workers() and the unneeded wakeup in wake_dying_workers() is also removed. Cc: Valentin Schneider <vschneid@redhat.com> Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
		
							parent
							
								
									f45b1c3c33
								
							
						
					
					
						commit
						f4b7b53c94
					
				
					 1 changed files with 19 additions and 26 deletions
				
			
		|  | @ -2695,6 +2695,16 @@ static void unbind_worker(struct worker *worker) | |||
| 		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| static void detach_worker(struct worker *worker) | ||||
| { | ||||
| 	lockdep_assert_held(&wq_pool_attach_mutex); | ||||
| 
 | ||||
| 	unbind_worker(worker); | ||||
| 	list_del(&worker->node); | ||||
| 	worker->pool = NULL; | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * worker_detach_from_pool() - detach a worker from its pool | ||||
|  * @worker: worker which is attached to its pool | ||||
|  | @ -2711,11 +2721,7 @@ static void worker_detach_from_pool(struct worker *worker) | |||
| 	WARN_ON_ONCE(pool->flags & POOL_BH); | ||||
| 
 | ||||
| 	mutex_lock(&wq_pool_attach_mutex); | ||||
| 
 | ||||
| 	unbind_worker(worker); | ||||
| 	list_del(&worker->node); | ||||
| 	worker->pool = NULL; | ||||
| 
 | ||||
| 	detach_worker(worker); | ||||
| 	mutex_unlock(&wq_pool_attach_mutex); | ||||
| 
 | ||||
| 	/* clear leftover flags without pool->lock after it is detached */ | ||||
|  | @ -2807,24 +2813,12 @@ static struct worker *create_worker(struct worker_pool *pool) | |||
| 	return NULL; | ||||
| } | ||||
| 
 | ||||
| static void wake_dying_workers(struct list_head *cull_list) | ||||
| static void detach_dying_workers(struct list_head *cull_list) | ||||
| { | ||||
| 	struct worker *worker; | ||||
| 
 | ||||
| 	list_for_each_entry(worker, cull_list, entry) { | ||||
| 		unbind_worker(worker); | ||||
| 		/*
 | ||||
| 		 * If the worker was somehow already running, then it had to be | ||||
| 		 * in pool->idle_list when set_worker_dying() happened or we | ||||
| 		 * wouldn't have gotten here. | ||||
| 		 * | ||||
| 		 * Thus, the worker must either have observed the WORKER_DIE | ||||
| 		 * flag, or have set its state to TASK_IDLE. Either way, the | ||||
| 		 * below will be observed by the worker and is safe to do | ||||
| 		 * outside of pool->lock. | ||||
| 		 */ | ||||
| 		wake_up_process(worker->task); | ||||
| 	} | ||||
| 	list_for_each_entry(worker, cull_list, entry) | ||||
| 		detach_worker(worker); | ||||
| } | ||||
| 
 | ||||
| static void reap_dying_workers(struct list_head *cull_list) | ||||
|  | @ -2930,9 +2924,9 @@ static void idle_cull_fn(struct work_struct *work) | |||
| 
 | ||||
| 	/*
 | ||||
| 	 * Grabbing wq_pool_attach_mutex here ensures an already-running worker | ||||
| 	 * cannot proceed beyong worker_detach_from_pool() in its self-destruct | ||||
| 	 * path. This is required as a previously-preempted worker could run after | ||||
| 	 * set_worker_dying() has happened but before wake_dying_workers() did. | ||||
| 	 * cannot proceed beyong set_pf_worker() in its self-destruct path. | ||||
| 	 * This is required as a previously-preempted worker could run after | ||||
| 	 * set_worker_dying() has happened but before detach_dying_workers() did. | ||||
| 	 */ | ||||
| 	mutex_lock(&wq_pool_attach_mutex); | ||||
| 	raw_spin_lock_irq(&pool->lock); | ||||
|  | @ -2953,7 +2947,7 @@ static void idle_cull_fn(struct work_struct *work) | |||
| 	} | ||||
| 
 | ||||
| 	raw_spin_unlock_irq(&pool->lock); | ||||
| 	wake_dying_workers(&cull_list); | ||||
| 	detach_dying_workers(&cull_list); | ||||
| 	mutex_unlock(&wq_pool_attach_mutex); | ||||
| 
 | ||||
| 	reap_dying_workers(&cull_list); | ||||
|  | @ -3336,7 +3330,6 @@ static int worker_thread(void *__worker) | |||
| 
 | ||||
| 		set_task_comm(worker->task, "kworker/dying"); | ||||
| 		ida_free(&pool->worker_ida, worker->id); | ||||
| 		worker_detach_from_pool(worker); | ||||
| 		WARN_ON_ONCE(!list_empty(&worker->entry)); | ||||
| 		return 0; | ||||
| 	} | ||||
|  | @ -4921,7 +4914,7 @@ static void put_unbound_pool(struct worker_pool *pool) | |||
| 	WARN_ON(pool->nr_workers || pool->nr_idle); | ||||
| 	raw_spin_unlock_irq(&pool->lock); | ||||
| 
 | ||||
| 	wake_dying_workers(&cull_list); | ||||
| 	detach_dying_workers(&cull_list); | ||||
| 
 | ||||
| 	mutex_unlock(&wq_pool_attach_mutex); | ||||
| 
 | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Lai Jiangshan
						Lai Jiangshan