mirror of
https://github.com/torvalds/linux.git
synced 2025-11-02 09:40:27 +02:00
io_uring: add mshot helper for posting CQE32
Add a helper for posting 32 byte CQEs in a multishot mode and add a cmd helper on top. As it specifically works with requests, the helper ignore the passed in cqe->user_data and sets it to the one stored in the request. The command helper is only valid with multishot requests. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/c29d7720c16e1f981cfaa903df187138baa3946b.1750065793.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
b955754959
commit
ac479eac22
4 changed files with 56 additions and 0 deletions
|
|
@ -793,6 +793,21 @@ bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool io_fill_cqe_aux32(struct io_ring_ctx *ctx,
|
||||||
|
struct io_uring_cqe src_cqe[2])
|
||||||
|
{
|
||||||
|
struct io_uring_cqe *cqe;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(!(ctx->flags & IORING_SETUP_CQE32)))
|
||||||
|
return false;
|
||||||
|
if (unlikely(!io_get_cqe(ctx, &cqe)))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
memcpy(cqe, src_cqe, 2 * sizeof(*cqe));
|
||||||
|
trace_io_uring_complete(ctx, NULL, cqe);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
|
static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
|
||||||
u32 cflags)
|
u32 cflags)
|
||||||
{
|
{
|
||||||
|
|
@ -904,6 +919,31 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
|
||||||
return posted;
|
return posted;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A helper for multishot requests posting additional CQEs.
|
||||||
|
* Should only be used from a task_work including IO_URING_F_MULTISHOT.
|
||||||
|
*/
|
||||||
|
bool io_req_post_cqe32(struct io_kiocb *req, struct io_uring_cqe cqe[2])
|
||||||
|
{
|
||||||
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
|
bool posted;
|
||||||
|
|
||||||
|
lockdep_assert(!io_wq_current_is_worker());
|
||||||
|
lockdep_assert_held(&ctx->uring_lock);
|
||||||
|
|
||||||
|
cqe[0].user_data = req->cqe.user_data;
|
||||||
|
if (!ctx->lockless_cq) {
|
||||||
|
spin_lock(&ctx->completion_lock);
|
||||||
|
posted = io_fill_cqe_aux32(ctx, cqe);
|
||||||
|
spin_unlock(&ctx->completion_lock);
|
||||||
|
} else {
|
||||||
|
posted = io_fill_cqe_aux32(ctx, cqe);
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx->submit_state.cq_flush = true;
|
||||||
|
return posted;
|
||||||
|
}
|
||||||
|
|
||||||
static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
|
static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
|
||||||
{
|
{
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
|
|
|
||||||
|
|
@ -81,6 +81,7 @@ void io_req_defer_failed(struct io_kiocb *req, s32 res);
|
||||||
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
|
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
|
||||||
void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
|
void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
|
||||||
bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags);
|
bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags);
|
||||||
|
bool io_req_post_cqe32(struct io_kiocb *req, struct io_uring_cqe src_cqe[2]);
|
||||||
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
|
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
|
||||||
|
|
||||||
void io_req_track_inflight(struct io_kiocb *req);
|
void io_req_track_inflight(struct io_kiocb *req);
|
||||||
|
|
|
||||||
|
|
@ -328,3 +328,14 @@ int io_cmd_poll_multishot(struct io_uring_cmd *cmd,
|
||||||
ret = io_arm_apoll(req, issue_flags, mask);
|
ret = io_arm_apoll(req, issue_flags, mask);
|
||||||
return ret == IO_APOLL_OK ? -EIOCBQUEUED : -ECANCELED;
|
return ret == IO_APOLL_OK ? -EIOCBQUEUED : -ECANCELED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool io_uring_cmd_post_mshot_cqe32(struct io_uring_cmd *cmd,
|
||||||
|
unsigned int issue_flags,
|
||||||
|
struct io_uring_cqe cqe[2])
|
||||||
|
{
|
||||||
|
struct io_kiocb *req = cmd_to_io_kiocb(cmd);
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_MULTISHOT)))
|
||||||
|
return false;
|
||||||
|
return io_req_post_cqe32(req, cqe);
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -17,6 +17,10 @@ void io_uring_cmd_cleanup(struct io_kiocb *req);
|
||||||
bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
|
bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
|
||||||
struct io_uring_task *tctx, bool cancel_all);
|
struct io_uring_task *tctx, bool cancel_all);
|
||||||
|
|
||||||
|
bool io_uring_cmd_post_mshot_cqe32(struct io_uring_cmd *cmd,
|
||||||
|
unsigned int issue_flags,
|
||||||
|
struct io_uring_cqe cqe[2]);
|
||||||
|
|
||||||
void io_cmd_cache_free(const void *entry);
|
void io_cmd_cache_free(const void *entry);
|
||||||
|
|
||||||
int io_cmd_poll_multishot(struct io_uring_cmd *cmd,
|
int io_cmd_poll_multishot(struct io_uring_cmd *cmd,
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue