mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	io_uring: add io_add_aux_cqe() helper
This helper will post a CQE, and can be called from task_work where we now that the ctx is already properly locked and that deferred completions will get flushed later on. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									c3ac76f9ca
								
							
						
					
					
						commit
						f33096a3c9
					
				
					 2 changed files with 24 additions and 4 deletions
				
			
		| 
						 | 
					@ -801,19 +801,38 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
 | 
				
			||||||
	return false;
 | 
						return false;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res,
 | 
				
			||||||
 | 
								      u32 cflags)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						bool filled;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
 | 
				
			||||||
 | 
						if (!filled)
 | 
				
			||||||
 | 
							filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return filled;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
 | 
					bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	bool filled;
 | 
						bool filled;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	io_cq_lock(ctx);
 | 
						io_cq_lock(ctx);
 | 
				
			||||||
	filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
 | 
						filled = __io_post_aux_cqe(ctx, user_data, res, cflags);
 | 
				
			||||||
	if (!filled)
 | 
					 | 
				
			||||||
		filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	io_cq_unlock_post(ctx);
 | 
						io_cq_unlock_post(ctx);
 | 
				
			||||||
	return filled;
 | 
						return filled;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Must be called from inline task_work so we now a flush will happen later,
 | 
				
			||||||
 | 
					 * and obviously with ctx->uring_lock held (tw always has that).
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						__io_post_aux_cqe(ctx, user_data, res, cflags);
 | 
				
			||||||
 | 
						ctx->submit_state.cq_flush = true;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * A helper for multishot requests posting additional CQEs.
 | 
					 * A helper for multishot requests posting additional CQEs.
 | 
				
			||||||
 * Should only be used from a task_work including IO_URING_F_MULTISHOT.
 | 
					 * Should only be used from a task_work including IO_URING_F_MULTISHOT.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -65,6 +65,7 @@ bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
 | 
				
			||||||
int io_run_task_work_sig(struct io_ring_ctx *ctx);
 | 
					int io_run_task_work_sig(struct io_ring_ctx *ctx);
 | 
				
			||||||
void io_req_defer_failed(struct io_kiocb *req, s32 res);
 | 
					void io_req_defer_failed(struct io_kiocb *req, s32 res);
 | 
				
			||||||
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
 | 
					bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
 | 
				
			||||||
 | 
					void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
 | 
				
			||||||
bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags);
 | 
					bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags);
 | 
				
			||||||
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
 | 
					void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue