3
0
Fork 0
forked from mirrors/linux

io_uring/kbuf: limit legacy provided buffer lists to USHRT_MAX

The buffer ID for a provided buffer is an unsigned short, and hence
there can only be 64k added to any given buffer list before having
duplicate BIDs. Cap the legacy provided buffers at 64k in the list.
This is mostly to prevent silly stall reports from syzbot, which
likes to dump tons of buffers into a list and then have kernels with
lockdep and kasan churning through them and hitting long wait times
for buffer pruning at ring exit time.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2025-06-03 07:42:28 -06:00
parent e931d3a9d5
commit 607d09d1a0
2 changed files with 18 additions and 2 deletions

View file

@ -108,6 +108,7 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
buf = req->kbuf;
bl = io_buffer_get_list(ctx, buf->bgid);
list_add(&buf->list, &bl->buf_list);
bl->nbufs++;
req->flags &= ~REQ_F_BUFFER_SELECTED;
io_ring_submit_unlock(ctx, issue_flags);
@ -122,6 +123,7 @@ static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
list_del(&kbuf->list);
bl->nbufs--;
if (*len == 0 || *len > kbuf->len)
*len = kbuf->len;
if (list_empty(&bl->buf_list))
@ -390,6 +392,7 @@ static int io_remove_buffers_legacy(struct io_ring_ctx *ctx,
for (i = 0; i < nbufs && !list_empty(&bl->buf_list); i++) {
nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
list_del(&nxt->list);
bl->nbufs--;
kfree(nxt);
cond_resched();
}
@ -491,14 +494,24 @@ static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
{
struct io_buffer *buf;
u64 addr = pbuf->addr;
int i, bid = pbuf->bid;
int ret = -ENOMEM, i, bid = pbuf->bid;
for (i = 0; i < pbuf->nbufs; i++) {
/*
* Nonsensical to have more than sizeof(bid) buffers in a
* buffer list, as the application then has no way of knowing
* which duplicate bid refers to what buffer.
*/
if (bl->nbufs == USHRT_MAX) {
ret = -EOVERFLOW;
break;
}
buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
if (!buf)
break;
list_add_tail(&buf->list, &bl->buf_list);
bl->nbufs++;
buf->addr = addr;
buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
buf->bid = bid;
@ -508,7 +521,7 @@ static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
cond_resched();
}
return i ? 0 : -ENOMEM;
return i ? 0 : ret;
}
static int __io_manage_buffers_legacy(struct io_kiocb *req,

View file

@ -21,6 +21,9 @@ struct io_buffer_list {
struct list_head buf_list;
struct io_uring_buf_ring *buf_ring;
};
/* count of classic/legacy buffers in buffer list */
int nbufs;
__u16 bgid;
/* below is for ring provided buffers */