mirror of
https://github.com/torvalds/linux.git
synced 2025-11-02 17:49:03 +02:00
io_uring: make io_alloc_ocqe() take a struct io_cqe pointer
The number of arguments to io_alloc_ocqe() is a bit unwieldy. Make it take a struct io_cqe pointer rather than three separate CQE args. One path already has that readily available, add an io_init_cqe() helper for the remaining two. Reviewed-by: Caleb Sander Mateos <csander@purestorage.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
10f466abc4
commit
072d37b52c
1 changed files with 16 additions and 10 deletions
|
|
@ -724,8 +724,8 @@ static bool io_cqring_add_overflow(struct io_ring_ctx *ctx,
|
|||
}
|
||||
|
||||
static struct io_overflow_cqe *io_alloc_ocqe(struct io_ring_ctx *ctx,
|
||||
u64 user_data, s32 res, u32 cflags,
|
||||
u64 extra1, u64 extra2, gfp_t gfp)
|
||||
struct io_cqe *cqe, u64 extra1,
|
||||
u64 extra2, gfp_t gfp)
|
||||
{
|
||||
struct io_overflow_cqe *ocqe;
|
||||
size_t ocq_size = sizeof(struct io_overflow_cqe);
|
||||
|
|
@ -735,11 +735,11 @@ static struct io_overflow_cqe *io_alloc_ocqe(struct io_ring_ctx *ctx,
|
|||
ocq_size += sizeof(struct io_uring_cqe);
|
||||
|
||||
ocqe = kmalloc(ocq_size, gfp | __GFP_ACCOUNT);
|
||||
trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe);
|
||||
trace_io_uring_cqe_overflow(ctx, cqe->user_data, cqe->res, cqe->flags, ocqe);
|
||||
if (ocqe) {
|
||||
ocqe->cqe.user_data = user_data;
|
||||
ocqe->cqe.res = res;
|
||||
ocqe->cqe.flags = cflags;
|
||||
ocqe->cqe.user_data = cqe->user_data;
|
||||
ocqe->cqe.res = cqe->res;
|
||||
ocqe->cqe.flags = cqe->flags;
|
||||
if (is_cqe32) {
|
||||
ocqe->cqe.big_cqe[0] = extra1;
|
||||
ocqe->cqe.big_cqe[1] = extra2;
|
||||
|
|
@ -806,6 +806,11 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline struct io_cqe io_init_cqe(u64 user_data, s32 res, u32 cflags)
|
||||
{
|
||||
return (struct io_cqe) { .user_data = user_data, .res = res, .flags = cflags };
|
||||
}
|
||||
|
||||
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
|
||||
{
|
||||
bool filled;
|
||||
|
|
@ -814,8 +819,9 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags
|
|||
filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
|
||||
if (unlikely(!filled)) {
|
||||
struct io_overflow_cqe *ocqe;
|
||||
struct io_cqe cqe = io_init_cqe(user_data, res, cflags);
|
||||
|
||||
ocqe = io_alloc_ocqe(ctx, user_data, res, cflags, 0, 0, GFP_ATOMIC);
|
||||
ocqe = io_alloc_ocqe(ctx, &cqe, 0, 0, GFP_ATOMIC);
|
||||
filled = io_cqring_add_overflow(ctx, ocqe);
|
||||
}
|
||||
io_cq_unlock_post(ctx);
|
||||
|
|
@ -833,8 +839,9 @@ void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
|
|||
|
||||
if (!io_fill_cqe_aux(ctx, user_data, res, cflags)) {
|
||||
struct io_overflow_cqe *ocqe;
|
||||
struct io_cqe cqe = io_init_cqe(user_data, res, cflags);
|
||||
|
||||
ocqe = io_alloc_ocqe(ctx, user_data, res, cflags, 0, 0, GFP_KERNEL);
|
||||
ocqe = io_alloc_ocqe(ctx, &cqe, 0, 0, GFP_KERNEL);
|
||||
spin_lock(&ctx->completion_lock);
|
||||
io_cqring_add_overflow(ctx, ocqe);
|
||||
spin_unlock(&ctx->completion_lock);
|
||||
|
|
@ -1444,8 +1451,7 @@ void __io_submit_flush_completions(struct io_ring_ctx *ctx)
|
|||
gfp_t gfp = ctx->lockless_cq ? GFP_KERNEL : GFP_ATOMIC;
|
||||
struct io_overflow_cqe *ocqe;
|
||||
|
||||
ocqe = io_alloc_ocqe(ctx, req->cqe.user_data, req->cqe.res,
|
||||
req->cqe.flags, req->big_cqe.extra1,
|
||||
ocqe = io_alloc_ocqe(ctx, &req->cqe, req->big_cqe.extra1,
|
||||
req->big_cqe.extra2, gfp);
|
||||
if (ctx->lockless_cq) {
|
||||
spin_lock(&ctx->completion_lock);
|
||||
|
|
|
|||
Loading…
Reference in a new issue