mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	io_uring: cache req->apoll->events in req->cflags
When we arm poll on behalf of a different type of request, like a network receive, then we allocate req->apoll as our poll entry. Running network workloads shows io_poll_check_events() as the most expensive part of io_uring, and it's all due to having to pull in req->apoll instead of just the request which we have hot already. Cache poll->events in req->cflags, which isn't used until the request completes anyway. This isn't strictly needed for regular poll, where req->poll.events is used and thus already hot, but for the sake of unification we do it all around. This saves 3-4% of overhead in certain request workloads. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									521d61fc76
								
							
						
					
					
						commit
						81459350d5
					
				
					 1 changed files with 19 additions and 12 deletions
				
			
		| 
						 | 
				
			
			@ -5876,13 +5876,13 @@ static int io_poll_check_events(struct io_kiocb *req)
 | 
			
		|||
			return -ECANCELED;
 | 
			
		||||
 | 
			
		||||
		if (!req->result) {
 | 
			
		||||
			struct poll_table_struct pt = { ._key = poll->events };
 | 
			
		||||
			struct poll_table_struct pt = { ._key = req->cflags };
 | 
			
		||||
 | 
			
		||||
			req->result = vfs_poll(req->file, &pt) & poll->events;
 | 
			
		||||
			req->result = vfs_poll(req->file, &pt) & req->cflags;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		/* multishot, just fill an CQE and proceed */
 | 
			
		||||
		if (req->result && !(poll->events & EPOLLONESHOT)) {
 | 
			
		||||
		if (req->result && !(req->cflags & EPOLLONESHOT)) {
 | 
			
		||||
			__poll_t mask = mangle_poll(req->result & poll->events);
 | 
			
		||||
			bool filled;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -5953,9 +5953,16 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
 | 
			
		|||
		io_req_complete_failed(req, ret);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void __io_poll_execute(struct io_kiocb *req, int mask)
 | 
			
		||||
static void __io_poll_execute(struct io_kiocb *req, int mask, int events)
 | 
			
		||||
{
 | 
			
		||||
	req->result = mask;
 | 
			
		||||
	/*
 | 
			
		||||
	 * This is useful for poll that is armed on behalf of another
 | 
			
		||||
	 * request, and where the wakeup path could be on a different
 | 
			
		||||
	 * CPU. We want to avoid pulling in req->apoll->events for that
 | 
			
		||||
	 * case.
 | 
			
		||||
	 */
 | 
			
		||||
	req->cflags = events;
 | 
			
		||||
	if (req->opcode == IORING_OP_POLL_ADD)
 | 
			
		||||
		req->io_task_work.func = io_poll_task_func;
 | 
			
		||||
	else
 | 
			
		||||
| 
						 | 
				
			
			@ -5965,17 +5972,17 @@ static void __io_poll_execute(struct io_kiocb *req, int mask)
 | 
			
		|||
	io_req_task_work_add(req, false);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void io_poll_execute(struct io_kiocb *req, int res)
 | 
			
		||||
static inline void io_poll_execute(struct io_kiocb *req, int res, int events)
 | 
			
		||||
{
 | 
			
		||||
	if (io_poll_get_ownership(req))
 | 
			
		||||
		__io_poll_execute(req, res);
 | 
			
		||||
		__io_poll_execute(req, res, events);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void io_poll_cancel_req(struct io_kiocb *req)
 | 
			
		||||
{
 | 
			
		||||
	io_poll_mark_cancelled(req);
 | 
			
		||||
	/* kick tw, which should complete the request */
 | 
			
		||||
	io_poll_execute(req, 0);
 | 
			
		||||
	io_poll_execute(req, 0, 0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
 | 
			
		||||
| 
						 | 
				
			
			@ -5989,7 +5996,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
 | 
			
		|||
	if (unlikely(mask & POLLFREE)) {
 | 
			
		||||
		io_poll_mark_cancelled(req);
 | 
			
		||||
		/* we have to kick tw in case it's not already */
 | 
			
		||||
		io_poll_execute(req, 0);
 | 
			
		||||
		io_poll_execute(req, 0, poll->events);
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * If the waitqueue is being freed early but someone is already
 | 
			
		||||
| 
						 | 
				
			
			@ -6020,7 +6027,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
 | 
			
		|||
			list_del_init(&poll->wait.entry);
 | 
			
		||||
			poll->head = NULL;
 | 
			
		||||
		}
 | 
			
		||||
		__io_poll_execute(req, mask);
 | 
			
		||||
		__io_poll_execute(req, mask, poll->events);
 | 
			
		||||
	}
 | 
			
		||||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -6124,7 +6131,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
 | 
			
		|||
		/* can't multishot if failed, just queue the event we've got */
 | 
			
		||||
		if (unlikely(ipt->error || !ipt->nr_entries))
 | 
			
		||||
			poll->events |= EPOLLONESHOT;
 | 
			
		||||
		__io_poll_execute(req, mask);
 | 
			
		||||
		__io_poll_execute(req, mask, poll->events);
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
	io_add_napi(req->file, req->ctx);
 | 
			
		||||
| 
						 | 
				
			
			@ -6135,7 +6142,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
 | 
			
		|||
	 */
 | 
			
		||||
	v = atomic_dec_return(&req->poll_refs);
 | 
			
		||||
	if (unlikely(v & IO_POLL_REF_MASK))
 | 
			
		||||
		__io_poll_execute(req, 0);
 | 
			
		||||
		__io_poll_execute(req, 0, poll->events);
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -6333,7 +6340,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
 | 
			
		|||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	io_req_set_refcount(req);
 | 
			
		||||
	poll->events = io_poll_parse_events(sqe, flags);
 | 
			
		||||
	req->cflags = poll->events = io_poll_parse_events(sqe, flags);
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue