mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-01 00:58:39 +02:00 
			
		
		
		
	 521223d7c2
			
		
	
	
		521223d7c2
		
	
	
	
	
		
			
			Just leave it unset by default, avoiding dipping into the last cacheline (which is otherwise untouched) for the fast path of using poll to drive networked traffic. Add a flag that tells us if the sequence is valid or not, and then we can defer actually assigning the flag and sequence until someone runs cancelations. Signed-off-by: Jens Axboe <axboe@kernel.dk>
		
			
				
	
	
		
			38 lines
		
	
	
	
		
			978 B
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			38 lines
		
	
	
	
		
			978 B
		
	
	
	
		
			C
		
	
	
	
	
	
| // SPDX-License-Identifier: GPL-2.0
 | |
| #ifndef IORING_CANCEL_H
 | |
| #define IORING_CANCEL_H
 | |
| 
 | |
| #include <linux/io_uring_types.h>
 | |
| 
 | |
| struct io_cancel_data {
 | |
| 	struct io_ring_ctx *ctx;
 | |
| 	union {
 | |
| 		u64 data;
 | |
| 		struct file *file;
 | |
| 	};
 | |
| 	u8 opcode;
 | |
| 	u32 flags;
 | |
| 	int seq;
 | |
| };
 | |
| 
 | |
| int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 | |
| int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags);
 | |
| 
 | |
| int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
 | |
| 		  unsigned int issue_flags);
 | |
| void init_hash_table(struct io_hash_table *table, unsigned size);
 | |
| 
 | |
| int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg);
 | |
| bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd);
 | |
| 
 | |
| static inline bool io_cancel_match_sequence(struct io_kiocb *req, int sequence)
 | |
| {
 | |
| 	if ((req->flags & REQ_F_CANCEL_SEQ) && sequence == req->work.cancel_seq)
 | |
| 		return true;
 | |
| 
 | |
| 	req->flags |= REQ_F_CANCEL_SEQ;
 | |
| 	req->work.cancel_seq = sequence;
 | |
| 	return false;
 | |
| }
 | |
| 
 | |
| #endif
 |