mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 08:38:45 +02:00 
			
		
		
		
	workqueue: Tag bound workers with KTHREAD_IS_PER_CPU
Mark the per-cpu workqueue workers as KTHREAD_IS_PER_CPU. Workqueues have unfortunate semantics in that per-cpu workers are not default flushed and parked during hotplug, however a subset does manual flush on hotplug and hard relies on them for correctness. Therefore play silly games.. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Valentin Schneider <valentin.schneider@arm.com> Tested-by: Valentin Schneider <valentin.schneider@arm.com> Link: https://lkml.kernel.org/r/20210121103506.693465814@infradead.org
This commit is contained in:
		
							parent
							
								
									ac687e6e8c
								
							
						
					
					
						commit
						5c25b5ff89
					
				
					 1 changed files with 9 additions and 2 deletions
				
			
		|  | @ -1861,6 +1861,8 @@ static void worker_attach_to_pool(struct worker *worker, | |||
| 	 */ | ||||
| 	if (pool->flags & POOL_DISASSOCIATED) | ||||
| 		worker->flags |= WORKER_UNBOUND; | ||||
| 	else | ||||
| 		kthread_set_per_cpu(worker->task, pool->cpu); | ||||
| 
 | ||||
| 	list_add_tail(&worker->node, &pool->workers); | ||||
| 	worker->pool = pool; | ||||
|  | @ -1883,6 +1885,7 @@ static void worker_detach_from_pool(struct worker *worker) | |||
| 
 | ||||
| 	mutex_lock(&wq_pool_attach_mutex); | ||||
| 
 | ||||
| 	kthread_set_per_cpu(worker->task, -1); | ||||
| 	list_del(&worker->node); | ||||
| 	worker->pool = NULL; | ||||
| 
 | ||||
|  | @ -4919,8 +4922,10 @@ static void unbind_workers(int cpu) | |||
| 
 | ||||
| 		raw_spin_unlock_irq(&pool->lock); | ||||
| 
 | ||||
| 		for_each_pool_worker(worker, pool) | ||||
| 		for_each_pool_worker(worker, pool) { | ||||
| 			kthread_set_per_cpu(worker->task, -1); | ||||
| 			WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); | ||||
| 		} | ||||
| 
 | ||||
| 		mutex_unlock(&wq_pool_attach_mutex); | ||||
| 
 | ||||
|  | @ -4972,9 +4977,11 @@ static void rebind_workers(struct worker_pool *pool) | |||
| 	 * of all workers first and then clear UNBOUND.  As we're called | ||||
| 	 * from CPU_ONLINE, the following shouldn't fail. | ||||
| 	 */ | ||||
| 	for_each_pool_worker(worker, pool) | ||||
| 	for_each_pool_worker(worker, pool) { | ||||
| 		kthread_set_per_cpu(worker->task, pool->cpu); | ||||
| 		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, | ||||
| 						  pool->attrs->cpumask) < 0); | ||||
| 	} | ||||
| 
 | ||||
| 	raw_spin_lock_irq(&pool->lock); | ||||
| 
 | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Peter Zijlstra
						Peter Zijlstra