mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	io_uring: use private workqueue for exit work
Rather than use the system unbound event workqueue, use an io_uring specific one. This avoids dependencies with the tty, which also uses the system_unbound_wq, and issues flushes of said workqueue from inside its poll handling. Cc: stable@vger.kernel.org Reported-by: Rasmus Karlsson <rasmus.karlsson@pajlada.com> Tested-by: Rasmus Karlsson <rasmus.karlsson@pajlada.com> Tested-by: Iskren Chernev <me@iskren.info> Link: https://github.com/axboe/liburing/issues/1113 Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									bee1d5becd
								
							
						
					
					
						commit
						73eaa2b583
					
				
					 1 changed files with 4 additions and 1 deletions
				
			
		| 
						 | 
					@ -147,6 +147,7 @@ static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
 | 
				
			||||||
static void io_queue_sqe(struct io_kiocb *req);
 | 
					static void io_queue_sqe(struct io_kiocb *req);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct kmem_cache *req_cachep;
 | 
					struct kmem_cache *req_cachep;
 | 
				
			||||||
 | 
					static struct workqueue_struct *iou_wq __ro_after_init;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int __read_mostly sysctl_io_uring_disabled;
 | 
					static int __read_mostly sysctl_io_uring_disabled;
 | 
				
			||||||
static int __read_mostly sysctl_io_uring_group = -1;
 | 
					static int __read_mostly sysctl_io_uring_group = -1;
 | 
				
			||||||
| 
						 | 
					@ -3166,7 +3167,7 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
 | 
				
			||||||
	 * noise and overhead, there's no discernable change in runtime
 | 
						 * noise and overhead, there's no discernable change in runtime
 | 
				
			||||||
	 * over using system_wq.
 | 
						 * over using system_wq.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	queue_work(system_unbound_wq, &ctx->exit_work);
 | 
						queue_work(iou_wq, &ctx->exit_work);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int io_uring_release(struct inode *inode, struct file *file)
 | 
					static int io_uring_release(struct inode *inode, struct file *file)
 | 
				
			||||||
| 
						 | 
					@ -4190,6 +4191,8 @@ static int __init io_uring_init(void)
 | 
				
			||||||
	io_buf_cachep = KMEM_CACHE(io_buffer,
 | 
						io_buf_cachep = KMEM_CACHE(io_buffer,
 | 
				
			||||||
					  SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
 | 
										  SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						iou_wq = alloc_workqueue("iou_exit", WQ_UNBOUND, 64);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_SYSCTL
 | 
					#ifdef CONFIG_SYSCTL
 | 
				
			||||||
	register_sysctl_init("kernel", kernel_io_uring_disabled_table);
 | 
						register_sysctl_init("kernel", kernel_io_uring_disabled_table);
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue