mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	io_uring/cancel: don't default to setting req->work.cancel_seq
Just leave it unset by default, avoiding dipping into the last cacheline (which is otherwise untouched) for the fast path of using poll to drive networked traffic. Add a flag that tells us if the sequence is valid or not, and then we can defer actually assigning the flag and sequence until someone runs cancelations. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									4bcb982cce
								
							
						
					
					
						commit
						521223d7c2
					
				
					 5 changed files with 15 additions and 8 deletions
				
			
		|  | @ -463,6 +463,7 @@ enum { | |||
| 	REQ_F_SUPPORT_NOWAIT_BIT, | ||||
| 	REQ_F_ISREG_BIT, | ||||
| 	REQ_F_POLL_NO_LAZY_BIT, | ||||
| 	REQ_F_CANCEL_SEQ_BIT, | ||||
| 
 | ||||
| 	/* not a real bit, just to check we're not overflowing the space */ | ||||
| 	__REQ_F_LAST_BIT, | ||||
|  | @ -535,6 +536,8 @@ enum { | |||
| 	REQ_F_HASH_LOCKED	= IO_REQ_FLAG(REQ_F_HASH_LOCKED_BIT), | ||||
| 	/* don't use lazy poll wake for this request */ | ||||
| 	REQ_F_POLL_NO_LAZY	= IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT), | ||||
| 	/* cancel sequence is set and valid */ | ||||
| 	REQ_F_CANCEL_SEQ	= IO_REQ_FLAG(REQ_F_CANCEL_SEQ_BIT), | ||||
| }; | ||||
| 
 | ||||
| typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts); | ||||
|  |  | |||
|  | @ -58,9 +58,8 @@ bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd) | |||
| 		return false; | ||||
| 	if (cd->flags & IORING_ASYNC_CANCEL_ALL) { | ||||
| check_seq: | ||||
| 		if (cd->seq == req->work.cancel_seq) | ||||
| 		if (io_cancel_match_sequence(req, cd->seq)) | ||||
| 			return false; | ||||
| 		req->work.cancel_seq = cd->seq; | ||||
| 	} | ||||
| 
 | ||||
| 	return true; | ||||
|  |  | |||
|  | @ -25,4 +25,14 @@ void init_hash_table(struct io_hash_table *table, unsigned size); | |||
| int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg); | ||||
| bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd); | ||||
| 
 | ||||
| static inline bool io_cancel_match_sequence(struct io_kiocb *req, int sequence) | ||||
| { | ||||
| 	if ((req->flags & REQ_F_CANCEL_SEQ) && sequence == req->work.cancel_seq) | ||||
| 		return true; | ||||
| 
 | ||||
| 	req->flags |= REQ_F_CANCEL_SEQ; | ||||
| 	req->work.cancel_seq = sequence; | ||||
| 	return false; | ||||
| } | ||||
| 
 | ||||
| #endif | ||||
|  |  | |||
|  | @ -463,7 +463,6 @@ static void io_prep_async_work(struct io_kiocb *req) | |||
| 
 | ||||
| 	req->work.list.next = NULL; | ||||
| 	req->work.flags = 0; | ||||
| 	req->work.cancel_seq = atomic_read(&ctx->cancel_seq); | ||||
| 	if (req->flags & REQ_F_FORCE_ASYNC) | ||||
| 		req->work.flags |= IO_WQ_WORK_CONCURRENT; | ||||
| 
 | ||||
|  |  | |||
|  | @ -588,10 +588,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req, | |||
| 				 struct io_poll_table *ipt, __poll_t mask, | ||||
| 				 unsigned issue_flags) | ||||
| { | ||||
| 	struct io_ring_ctx *ctx = req->ctx; | ||||
| 
 | ||||
| 	INIT_HLIST_NODE(&req->hash_node); | ||||
| 	req->work.cancel_seq = atomic_read(&ctx->cancel_seq); | ||||
| 	io_init_poll_iocb(poll, mask); | ||||
| 	poll->file = req->file; | ||||
| 	req->apoll_events = poll->events; | ||||
|  | @ -818,9 +815,8 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, | |||
| 		if (poll_only && req->opcode != IORING_OP_POLL_ADD) | ||||
| 			continue; | ||||
| 		if (cd->flags & IORING_ASYNC_CANCEL_ALL) { | ||||
| 			if (cd->seq == req->work.cancel_seq) | ||||
| 			if (io_cancel_match_sequence(req, cd->seq)) | ||||
| 				continue; | ||||
| 			req->work.cancel_seq = cd->seq; | ||||
| 		} | ||||
| 		*out_bucket = hb; | ||||
| 		return req; | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Jens Axboe
						Jens Axboe