mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	io_uring/rsrc: get rid of the empty node and dummy_ubuf
The empty node was used as a placeholder for a sparse entry, but it didn't really solve any issues. The caller still has to check for whether it's the empty node or not, it may as well just check for a NULL return instead. The dummy_ubuf was used for a sparse buffer entry, but NULL will serve the same purpose there of ensuring an -EFAULT on attempted import. Just use NULL for a sparse node, regardless of whether or not it's a file or buffer resource. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									4007c3d8c2
								
							
						
					
					
						commit
						d50f94d761
					
				
					 6 changed files with 40 additions and 50 deletions
				
			
		| 
						 | 
					@ -178,9 +178,14 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	seq_printf(m, "UserBufs:\t%u\n", ctx->buf_table.nr);
 | 
						seq_printf(m, "UserBufs:\t%u\n", ctx->buf_table.nr);
 | 
				
			||||||
	for (i = 0; has_lock && i < ctx->buf_table.nr; i++) {
 | 
						for (i = 0; has_lock && i < ctx->buf_table.nr; i++) {
 | 
				
			||||||
		struct io_mapped_ubuf *buf = ctx->buf_table.nodes[i]->buf;
 | 
							struct io_mapped_ubuf *buf = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, buf->len);
 | 
							if (ctx->buf_table.nodes[i])
 | 
				
			||||||
 | 
								buf = ctx->buf_table.nodes[i]->buf;
 | 
				
			||||||
 | 
							if (buf)
 | 
				
			||||||
 | 
								seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, buf->len);
 | 
				
			||||||
 | 
							else
 | 
				
			||||||
 | 
								seq_printf(m, "%5u: <none>\n", i);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if (has_lock && !xa_empty(&ctx->personalities)) {
 | 
						if (has_lock && !xa_empty(&ctx->personalities)) {
 | 
				
			||||||
		unsigned long index;
 | 
							unsigned long index;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -947,8 +947,8 @@ void io_req_defer_failed(struct io_kiocb *req, s32 res)
 | 
				
			||||||
static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
 | 
					static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	req->ctx = ctx;
 | 
						req->ctx = ctx;
 | 
				
			||||||
	req->rsrc_nodes[IORING_RSRC_FILE] = rsrc_empty_node;
 | 
						req->rsrc_nodes[IORING_RSRC_FILE] = NULL;
 | 
				
			||||||
	req->rsrc_nodes[IORING_RSRC_BUFFER] = rsrc_empty_node;
 | 
						req->rsrc_nodes[IORING_RSRC_BUFFER] = NULL;
 | 
				
			||||||
	req->link = NULL;
 | 
						req->link = NULL;
 | 
				
			||||||
	req->async_data = NULL;
 | 
						req->async_data = NULL;
 | 
				
			||||||
	/* not necessary, but safer to zero */
 | 
						/* not necessary, but safer to zero */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -117,8 +117,8 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx)
 | 
				
			||||||
	notif->file = NULL;
 | 
						notif->file = NULL;
 | 
				
			||||||
	notif->task = current;
 | 
						notif->task = current;
 | 
				
			||||||
	io_get_task_refs(1);
 | 
						io_get_task_refs(1);
 | 
				
			||||||
	notif->rsrc_nodes[IORING_RSRC_FILE] = rsrc_empty_node;
 | 
						notif->rsrc_nodes[IORING_RSRC_FILE] = NULL;
 | 
				
			||||||
	notif->rsrc_nodes[IORING_RSRC_BUFFER] = rsrc_empty_node;
 | 
						notif->rsrc_nodes[IORING_RSRC_BUFFER] = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	nd = io_notif_to_data(notif);
 | 
						nd = io_notif_to_data(notif);
 | 
				
			||||||
	nd->zc_report = false;
 | 
						nd->zc_report = false;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -32,17 +32,6 @@ static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
 | 
				
			||||||
#define IORING_MAX_FIXED_FILES	(1U << 20)
 | 
					#define IORING_MAX_FIXED_FILES	(1U << 20)
 | 
				
			||||||
#define IORING_MAX_REG_BUFFERS	(1U << 14)
 | 
					#define IORING_MAX_REG_BUFFERS	(1U << 14)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static const struct io_mapped_ubuf dummy_ubuf = {
 | 
					 | 
				
			||||||
	/* set invalid range, so io_import_fixed() fails meeting it */
 | 
					 | 
				
			||||||
	.ubuf = -1UL,
 | 
					 | 
				
			||||||
	.len = UINT_MAX,
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
const struct io_rsrc_node empty_node = {
 | 
					 | 
				
			||||||
	.type = IORING_RSRC_BUFFER,
 | 
					 | 
				
			||||||
	.buf = (struct io_mapped_ubuf *) &dummy_ubuf,
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
 | 
					int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long page_limit, cur_pages, new_pages;
 | 
						unsigned long page_limit, cur_pages, new_pages;
 | 
				
			||||||
| 
						 | 
					@ -116,7 +105,7 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned int i;
 | 
						unsigned int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (node->buf != &dummy_ubuf) {
 | 
						if (node->buf) {
 | 
				
			||||||
		struct io_mapped_ubuf *imu = node->buf;
 | 
							struct io_mapped_ubuf *imu = node->buf;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!refcount_dec_and_test(&imu->refs))
 | 
							if (!refcount_dec_and_test(&imu->refs))
 | 
				
			||||||
| 
						 | 
					@ -265,20 +254,21 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
 | 
				
			||||||
		err = io_buffer_validate(iov);
 | 
							err = io_buffer_validate(iov);
 | 
				
			||||||
		if (err)
 | 
							if (err)
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		if (!iov->iov_base && tag) {
 | 
					 | 
				
			||||||
			err = -EINVAL;
 | 
					 | 
				
			||||||
			break;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		node = io_sqe_buffer_register(ctx, iov, &last_hpage);
 | 
							node = io_sqe_buffer_register(ctx, iov, &last_hpage);
 | 
				
			||||||
		if (IS_ERR(node)) {
 | 
							if (IS_ERR(node)) {
 | 
				
			||||||
			err = PTR_ERR(node);
 | 
								err = PTR_ERR(node);
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
							if (tag) {
 | 
				
			||||||
 | 
								if (!node) {
 | 
				
			||||||
 | 
									err = -EINVAL;
 | 
				
			||||||
 | 
									break;
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
								node->tag = tag;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
		i = array_index_nospec(up->offset + done, ctx->buf_table.nr);
 | 
							i = array_index_nospec(up->offset + done, ctx->buf_table.nr);
 | 
				
			||||||
		io_reset_rsrc_node(&ctx->buf_table, i);
 | 
							io_reset_rsrc_node(&ctx->buf_table, i);
 | 
				
			||||||
		ctx->buf_table.nodes[i] = node;
 | 
							ctx->buf_table.nodes[i] = node;
 | 
				
			||||||
		if (tag)
 | 
					 | 
				
			||||||
			node->tag = tag;
 | 
					 | 
				
			||||||
		if (ctx->compat)
 | 
							if (ctx->compat)
 | 
				
			||||||
			user_data += sizeof(struct compat_iovec);
 | 
								user_data += sizeof(struct compat_iovec);
 | 
				
			||||||
		else
 | 
							else
 | 
				
			||||||
| 
						 | 
					@ -591,8 +581,11 @@ static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
 | 
				
			||||||
	/* check previously registered pages */
 | 
						/* check previously registered pages */
 | 
				
			||||||
	for (i = 0; i < ctx->buf_table.nr; i++) {
 | 
						for (i = 0; i < ctx->buf_table.nr; i++) {
 | 
				
			||||||
		struct io_rsrc_node *node = ctx->buf_table.nodes[i];
 | 
							struct io_rsrc_node *node = ctx->buf_table.nodes[i];
 | 
				
			||||||
		struct io_mapped_ubuf *imu = node->buf;
 | 
							struct io_mapped_ubuf *imu;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (!node)
 | 
				
			||||||
 | 
								continue;
 | 
				
			||||||
 | 
							imu = node->buf;
 | 
				
			||||||
		for (j = 0; j < imu->nr_bvecs; j++) {
 | 
							for (j = 0; j < imu->nr_bvecs; j++) {
 | 
				
			||||||
			if (!PageCompound(imu->bvec[j].bv_page))
 | 
								if (!PageCompound(imu->bvec[j].bv_page))
 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
| 
						 | 
					@ -742,7 +735,7 @@ static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
 | 
				
			||||||
	bool coalesced;
 | 
						bool coalesced;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!iov->iov_base)
 | 
						if (!iov->iov_base)
 | 
				
			||||||
		return rsrc_empty_node;
 | 
							return NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
 | 
						node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
 | 
				
			||||||
	if (!node)
 | 
						if (!node)
 | 
				
			||||||
| 
						 | 
					@ -850,10 +843,6 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
 | 
				
			||||||
				ret = -EFAULT;
 | 
									ret = -EFAULT;
 | 
				
			||||||
				break;
 | 
									break;
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			if (tag && !iov->iov_base) {
 | 
					 | 
				
			||||||
				ret = -EINVAL;
 | 
					 | 
				
			||||||
				break;
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		node = io_sqe_buffer_register(ctx, iov, &last_hpage);
 | 
							node = io_sqe_buffer_register(ctx, iov, &last_hpage);
 | 
				
			||||||
| 
						 | 
					@ -861,8 +850,13 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
 | 
				
			||||||
			ret = PTR_ERR(node);
 | 
								ret = PTR_ERR(node);
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if (tag)
 | 
							if (tag) {
 | 
				
			||||||
 | 
								if (!node) {
 | 
				
			||||||
 | 
									ret = -EINVAL;
 | 
				
			||||||
 | 
									break;
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
			node->tag = tag;
 | 
								node->tag = tag;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
		data.nodes[i] = node;
 | 
							data.nodes[i] = node;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -957,8 +951,8 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
 | 
				
			||||||
		struct io_rsrc_node *dst_node, *src_node;
 | 
							struct io_rsrc_node *dst_node, *src_node;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		src_node = io_rsrc_node_lookup(&src_ctx->buf_table, i);
 | 
							src_node = io_rsrc_node_lookup(&src_ctx->buf_table, i);
 | 
				
			||||||
		if (src_node == rsrc_empty_node) {
 | 
							if (!src_node) {
 | 
				
			||||||
			dst_node = rsrc_empty_node;
 | 
								dst_node = NULL;
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			dst_node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
 | 
								dst_node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
 | 
				
			||||||
			if (!dst_node) {
 | 
								if (!dst_node) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -67,9 +67,6 @@ int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
 | 
				
			||||||
int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
 | 
					int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
 | 
				
			||||||
			unsigned int size, unsigned int type);
 | 
								unsigned int size, unsigned int type);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern const struct io_rsrc_node empty_node;
 | 
					 | 
				
			||||||
#define rsrc_empty_node	(struct io_rsrc_node *) &empty_node
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline struct io_rsrc_node *io_rsrc_node_lookup(struct io_rsrc_data *data,
 | 
					static inline struct io_rsrc_node *io_rsrc_node_lookup(struct io_rsrc_data *data,
 | 
				
			||||||
						       int index)
 | 
											       int index)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -80,7 +77,7 @@ static inline struct io_rsrc_node *io_rsrc_node_lookup(struct io_rsrc_data *data
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void io_put_rsrc_node(struct io_rsrc_node *node)
 | 
					static inline void io_put_rsrc_node(struct io_rsrc_node *node)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (node != rsrc_empty_node && !--node->refs)
 | 
						if (node && !--node->refs)
 | 
				
			||||||
		io_free_rsrc_node(node);
 | 
							io_free_rsrc_node(node);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -97,23 +94,17 @@ static inline bool io_reset_rsrc_node(struct io_rsrc_data *data, int index)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void io_req_put_rsrc_nodes(struct io_kiocb *req)
 | 
					static inline void io_req_put_rsrc_nodes(struct io_kiocb *req)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (req->rsrc_nodes[IORING_RSRC_FILE] != rsrc_empty_node) {
 | 
						io_put_rsrc_node(req->rsrc_nodes[IORING_RSRC_FILE]);
 | 
				
			||||||
		io_put_rsrc_node(req->rsrc_nodes[IORING_RSRC_FILE]);
 | 
						io_put_rsrc_node(req->rsrc_nodes[IORING_RSRC_BUFFER]);
 | 
				
			||||||
		req->rsrc_nodes[IORING_RSRC_FILE] = rsrc_empty_node;
 | 
						req->rsrc_nodes[IORING_RSRC_FILE] = NULL;
 | 
				
			||||||
	}
 | 
						req->rsrc_nodes[IORING_RSRC_BUFFER] = NULL;
 | 
				
			||||||
	if (req->rsrc_nodes[IORING_RSRC_BUFFER] != rsrc_empty_node) {
 | 
					 | 
				
			||||||
		io_put_rsrc_node(req->rsrc_nodes[IORING_RSRC_BUFFER]);
 | 
					 | 
				
			||||||
		req->rsrc_nodes[IORING_RSRC_BUFFER] = rsrc_empty_node;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void io_req_assign_rsrc_node(struct io_kiocb *req,
 | 
					static inline void io_req_assign_rsrc_node(struct io_kiocb *req,
 | 
				
			||||||
					   struct io_rsrc_node *node)
 | 
										   struct io_rsrc_node *node)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (node != rsrc_empty_node) {
 | 
						node->refs++;
 | 
				
			||||||
		node->refs++;
 | 
						req->rsrc_nodes[node->type] = node;
 | 
				
			||||||
		req->rsrc_nodes[node->type] = node;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int io_files_update(struct io_kiocb *req, unsigned int issue_flags);
 | 
					int io_files_update(struct io_kiocb *req, unsigned int issue_flags);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -35,7 +35,7 @@ static int __io_splice_prep(struct io_kiocb *req,
 | 
				
			||||||
	if (unlikely(sp->flags & ~valid_flags))
 | 
						if (unlikely(sp->flags & ~valid_flags))
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
	sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
 | 
						sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
 | 
				
			||||||
	sp->rsrc_node = rsrc_empty_node;
 | 
						sp->rsrc_node = NULL;
 | 
				
			||||||
	req->flags |= REQ_F_FORCE_ASYNC;
 | 
						req->flags |= REQ_F_FORCE_ASYNC;
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue