mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-01 00:58:39 +02:00 
			
		
		
		
	 f31ecf671d
			
		
	
	
		f31ecf671d
		
	
	
	
	
		
			
			This adds support for an async version of waitid(2), in a fully async version. If an event isn't immediately available, wait for a callback to trigger a retry. The format of the sqe is as follows: sqe->len The 'which', the idtype being queried/waited for. sqe->fd The 'pid' (or id) being waited for. sqe->file_index The 'options' being set. sqe->addr2 A pointer to siginfo_t, if any, being filled in. buf_index, add3, and waitid_flags are reserved/unused for now. waitid_flags will be used for options for this request type. One interesting use case may be to add multi-shot support, so that the request stays armed and posts a notification every time a monitored process state change occurs. Note that this does not support rusage, on Arnd's recommendation. See the waitid(2) man page for details on the arguments. Signed-off-by: Jens Axboe <axboe@kernel.dk>
		
			
				
	
	
		
			15 lines
		
	
	
	
		
			484 B
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			15 lines
		
	
	
	
		
			484 B
		
	
	
	
		
			C
		
	
	
	
	
	
| // SPDX-License-Identifier: GPL-2.0
 | |
| 
 | |
| #include "../kernel/exit.h"
 | |
| 
 | |
| struct io_waitid_async {
 | |
| 	struct io_kiocb *req;
 | |
| 	struct wait_opts wo;
 | |
| };
 | |
| 
 | |
| int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 | |
| int io_waitid(struct io_kiocb *req, unsigned int issue_flags);
 | |
| int io_waitid_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
 | |
| 		     unsigned int issue_flags);
 | |
| bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
 | |
| 			  bool cancel_all);
 |