forked from mirrors/linux
		
	workqueue: reject adjusting max_active or applying attrs to ordered workqueues
Adjusting max_active of or applying new workqueue_attrs to an ordered workqueue breaks its ordering guarantee. The former is obvious. The latter is because applying attrs creates a new pwq (pool_workqueue) and there is no ordering constraint between the old and new pwqs. Make apply_workqueue_attrs() and workqueue_set_max_active() trigger WARN_ON() if those operations are requested on an ordered workqueue and fail / ignore respectively. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
This commit is contained in:
		
							parent
							
								
									618b01eb42
								
							
						
					
					
						commit
						8719dceae2
					
				
					 2 changed files with 11 additions and 1 deletions
				
			
		|  | @ -295,6 +295,7 @@ enum { | ||||||
| 	WQ_CPU_INTENSIVE	= 1 << 5, /* cpu instensive workqueue */ | 	WQ_CPU_INTENSIVE	= 1 << 5, /* cpu instensive workqueue */ | ||||||
| 
 | 
 | ||||||
| 	__WQ_DRAINING		= 1 << 16, /* internal: workqueue is draining */ | 	__WQ_DRAINING		= 1 << 16, /* internal: workqueue is draining */ | ||||||
|  | 	__WQ_ORDERED		= 1 << 17, /* internal: workqueue is ordered */ | ||||||
| 
 | 
 | ||||||
| 	WQ_MAX_ACTIVE		= 512,	  /* I like 512, better ideas? */ | 	WQ_MAX_ACTIVE		= 512,	  /* I like 512, better ideas? */ | ||||||
| 	WQ_MAX_UNBOUND_PER_CPU	= 4,	  /* 4 * #cpus for unbound wq */ | 	WQ_MAX_UNBOUND_PER_CPU	= 4,	  /* 4 * #cpus for unbound wq */ | ||||||
|  | @ -397,7 +398,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, | ||||||
|  * Pointer to the allocated workqueue on success, %NULL on failure. |  * Pointer to the allocated workqueue on success, %NULL on failure. | ||||||
|  */ |  */ | ||||||
| #define alloc_ordered_workqueue(fmt, flags, args...)			\ | #define alloc_ordered_workqueue(fmt, flags, args...)			\ | ||||||
| 	alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args) | 	alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) | ||||||
| 
 | 
 | ||||||
| #define create_workqueue(name)						\ | #define create_workqueue(name)						\ | ||||||
| 	alloc_workqueue((name), WQ_MEM_RECLAIM, 1) | 	alloc_workqueue((name), WQ_MEM_RECLAIM, 1) | ||||||
|  |  | ||||||
|  | @ -3494,9 +3494,14 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, | ||||||
| 	struct pool_workqueue *pwq, *last_pwq; | 	struct pool_workqueue *pwq, *last_pwq; | ||||||
| 	struct worker_pool *pool; | 	struct worker_pool *pool; | ||||||
| 
 | 
 | ||||||
|  | 	/* only unbound workqueues can change attributes */ | ||||||
| 	if (WARN_ON(!(wq->flags & WQ_UNBOUND))) | 	if (WARN_ON(!(wq->flags & WQ_UNBOUND))) | ||||||
| 		return -EINVAL; | 		return -EINVAL; | ||||||
| 
 | 
 | ||||||
|  | 	/* creating multiple pwqs breaks ordering guarantee */ | ||||||
|  | 	if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) | ||||||
|  | 		return -EINVAL; | ||||||
|  | 
 | ||||||
| 	pwq = kmem_cache_zalloc(pwq_cache, GFP_KERNEL); | 	pwq = kmem_cache_zalloc(pwq_cache, GFP_KERNEL); | ||||||
| 	if (!pwq) | 	if (!pwq) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
|  | @ -3752,6 +3757,10 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) | ||||||
| { | { | ||||||
| 	struct pool_workqueue *pwq; | 	struct pool_workqueue *pwq; | ||||||
| 
 | 
 | ||||||
|  | 	/* disallow meddling with max_active for ordered workqueues */ | ||||||
|  | 	if (WARN_ON(wq->flags & __WQ_ORDERED)) | ||||||
|  | 		return; | ||||||
|  | 
 | ||||||
| 	max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); | 	max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); | ||||||
| 
 | 
 | ||||||
| 	spin_lock_irq(&workqueue_lock); | 	spin_lock_irq(&workqueue_lock); | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Tejun Heo
						Tejun Heo