mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	workqueue/hotplug: simplify workqueue_offline_cpu()
Since the recent cpu/hotplug refactoring, workqueue_offline_cpu() is guaranteed to run on the local cpu which is going offline. This also fixes the following deadlock by removing work item scheduling and flushing from CPU hotplug path. http://lkml.kernel.org/r/1504764252-29091-1-git-send-email-prsood@codeaurora.org tj: Description update. Signed-off-by: Lai Jiangshan <jiangshanlai@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
		
							parent
							
								
									c98a980509
								
							
						
					
					
						commit
						e8b3f8db7a
					
				
					 1 changed files with 6 additions and 9 deletions
				
			
		| 
						 | 
				
			
			@ -1635,7 +1635,7 @@ static void worker_enter_idle(struct worker *worker)
 | 
			
		|||
		mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Sanity check nr_running.  Because wq_unbind_fn() releases
 | 
			
		||||
	 * Sanity check nr_running.  Because unbind_workers() releases
 | 
			
		||||
	 * pool->lock between setting %WORKER_UNBOUND and zapping
 | 
			
		||||
	 * nr_running, the warning may trigger spuriously.  Check iff
 | 
			
		||||
	 * unbind is not in progress.
 | 
			
		||||
| 
						 | 
				
			
			@ -4511,9 +4511,8 @@ void show_workqueue_state(void)
 | 
			
		|||
 * cpu comes back online.
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
static void wq_unbind_fn(struct work_struct *work)
 | 
			
		||||
static void unbind_workers(int cpu)
 | 
			
		||||
{
 | 
			
		||||
	int cpu = smp_processor_id();
 | 
			
		||||
	struct worker_pool *pool;
 | 
			
		||||
	struct worker *worker;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -4710,12 +4709,13 @@ int workqueue_online_cpu(unsigned int cpu)
 | 
			
		|||
 | 
			
		||||
int workqueue_offline_cpu(unsigned int cpu)
 | 
			
		||||
{
 | 
			
		||||
	struct work_struct unbind_work;
 | 
			
		||||
	struct workqueue_struct *wq;
 | 
			
		||||
 | 
			
		||||
	/* unbinding per-cpu workers should happen on the local CPU */
 | 
			
		||||
	INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
 | 
			
		||||
	queue_work_on(cpu, system_highpri_wq, &unbind_work);
 | 
			
		||||
	if (WARN_ON(cpu != smp_processor_id()))
 | 
			
		||||
		return -1;
 | 
			
		||||
 | 
			
		||||
	unbind_workers(cpu);
 | 
			
		||||
 | 
			
		||||
	/* update NUMA affinity of unbound workqueues */
 | 
			
		||||
	mutex_lock(&wq_pool_mutex);
 | 
			
		||||
| 
						 | 
				
			
			@ -4723,9 +4723,6 @@ int workqueue_offline_cpu(unsigned int cpu)
 | 
			
		|||
		wq_update_unbound_numa(wq, cpu, false);
 | 
			
		||||
	mutex_unlock(&wq_pool_mutex);
 | 
			
		||||
 | 
			
		||||
	/* wait for per-cpu unbinding to finish */
 | 
			
		||||
	flush_work(&unbind_work);
 | 
			
		||||
	destroy_work_on_stack(&unbind_work);
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue