mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	 b3fdea6ecb
			
		
	
	
		b3fdea6ecb
		
	
	
	
	
		
			
			Support multishot receive for io_uring. Typical server applications will run a loop where for each recv CQE it requeues another recv/recvmsg. This can be simplified by using the existing multishot functionality combined with io_uring's provided buffers. The API is to add the IORING_RECV_MULTISHOT flag to the SQE. CQEs will then be posted (with IORING_CQE_F_MORE flag set) when data is available and is read. Once an error occurs or the socket ends, the multishot will be removed and a completion without IORING_CQE_F_MORE will be posted. The benefit to this is that the recv is much more performant. * Subsequent receives are queued up straight away without requiring the application to finish a processing loop. * If there are more data in the socket (sat the provided buffer size is smaller than the socket buffer) then the data is immediately returned, improving batching. * Poll is only armed once and reused, saving CPU cycles Signed-off-by: Dylan Yudaken <dylany@fb.com> Link: https://lore.kernel.org/r/20220630091231.1456789-11-dylany@fb.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
		
			
				
	
	
		
			860 lines
		
	
	
	
		
			20 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			860 lines
		
	
	
	
		
			20 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| // SPDX-License-Identifier: GPL-2.0
 | |
| #include <linux/kernel.h>
 | |
| #include <linux/errno.h>
 | |
| #include <linux/file.h>
 | |
| #include <linux/slab.h>
 | |
| #include <linux/net.h>
 | |
| #include <linux/compat.h>
 | |
| #include <net/compat.h>
 | |
| #include <linux/io_uring.h>
 | |
| 
 | |
| #include <uapi/linux/io_uring.h>
 | |
| 
 | |
| #include "io_uring.h"
 | |
| #include "kbuf.h"
 | |
| #include "net.h"
 | |
| 
 | |
| #if defined(CONFIG_NET)
 | |
| struct io_shutdown {
 | |
| 	struct file			*file;
 | |
| 	int				how;
 | |
| };
 | |
| 
 | |
| struct io_accept {
 | |
| 	struct file			*file;
 | |
| 	struct sockaddr __user		*addr;
 | |
| 	int __user			*addr_len;
 | |
| 	int				flags;
 | |
| 	u32				file_slot;
 | |
| 	unsigned long			nofile;
 | |
| };
 | |
| 
 | |
| struct io_socket {
 | |
| 	struct file			*file;
 | |
| 	int				domain;
 | |
| 	int				type;
 | |
| 	int				protocol;
 | |
| 	int				flags;
 | |
| 	u32				file_slot;
 | |
| 	unsigned long			nofile;
 | |
| };
 | |
| 
 | |
| struct io_connect {
 | |
| 	struct file			*file;
 | |
| 	struct sockaddr __user		*addr;
 | |
| 	int				addr_len;
 | |
| };
 | |
| 
 | |
| struct io_sr_msg {
 | |
| 	struct file			*file;
 | |
| 	union {
 | |
| 		struct compat_msghdr __user	*umsg_compat;
 | |
| 		struct user_msghdr __user	*umsg;
 | |
| 		void __user			*buf;
 | |
| 	};
 | |
| 	int				msg_flags;
 | |
| 	size_t				len;
 | |
| 	size_t				done_io;
 | |
| 	unsigned int			flags;
 | |
| };
 | |
| 
 | |
| #define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
 | |
| 
 | |
| int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 | |
| {
 | |
| 	struct io_shutdown *shutdown = io_kiocb_to_cmd(req);
 | |
| 
 | |
| 	if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
 | |
| 		     sqe->buf_index || sqe->splice_fd_in))
 | |
| 		return -EINVAL;
 | |
| 
 | |
| 	shutdown->how = READ_ONCE(sqe->len);
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
 | |
| {
 | |
| 	struct io_shutdown *shutdown = io_kiocb_to_cmd(req);
 | |
| 	struct socket *sock;
 | |
| 	int ret;
 | |
| 
 | |
| 	if (issue_flags & IO_URING_F_NONBLOCK)
 | |
| 		return -EAGAIN;
 | |
| 
 | |
| 	sock = sock_from_file(req->file);
 | |
| 	if (unlikely(!sock))
 | |
| 		return -ENOTSOCK;
 | |
| 
 | |
| 	ret = __sys_shutdown_sock(sock, shutdown->how);
 | |
| 	io_req_set_res(req, ret, 0);
 | |
| 	return IOU_OK;
 | |
| }
 | |
| 
 | |
| static bool io_net_retry(struct socket *sock, int flags)
 | |
| {
 | |
| 	if (!(flags & MSG_WAITALL))
 | |
| 		return false;
 | |
| 	return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
 | |
| }
 | |
| 
 | |
| static int io_setup_async_msg(struct io_kiocb *req,
 | |
| 			      struct io_async_msghdr *kmsg)
 | |
| {
 | |
| 	struct io_async_msghdr *async_msg = req->async_data;
 | |
| 
 | |
| 	if (async_msg)
 | |
| 		return -EAGAIN;
 | |
| 	if (io_alloc_async_data(req)) {
 | |
| 		kfree(kmsg->free_iov);
 | |
| 		return -ENOMEM;
 | |
| 	}
 | |
| 	async_msg = req->async_data;
 | |
| 	req->flags |= REQ_F_NEED_CLEANUP;
 | |
| 	memcpy(async_msg, kmsg, sizeof(*kmsg));
 | |
| 	async_msg->msg.msg_name = &async_msg->addr;
 | |
| 	/* if were using fast_iov, set it to the new one */
 | |
| 	if (!async_msg->free_iov)
 | |
| 		async_msg->msg.msg_iter.iov = async_msg->fast_iov;
 | |
| 
 | |
| 	return -EAGAIN;
 | |
| }
 | |
| 
 | |
| static int io_sendmsg_copy_hdr(struct io_kiocb *req,
 | |
| 			       struct io_async_msghdr *iomsg)
 | |
| {
 | |
| 	struct io_sr_msg *sr = io_kiocb_to_cmd(req);
 | |
| 
 | |
| 	iomsg->msg.msg_name = &iomsg->addr;
 | |
| 	iomsg->free_iov = iomsg->fast_iov;
 | |
| 	return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
 | |
| 					&iomsg->free_iov);
 | |
| }
 | |
| 
 | |
| int io_sendmsg_prep_async(struct io_kiocb *req)
 | |
| {
 | |
| 	int ret;
 | |
| 
 | |
| 	ret = io_sendmsg_copy_hdr(req, req->async_data);
 | |
| 	if (!ret)
 | |
| 		req->flags |= REQ_F_NEED_CLEANUP;
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
 | |
| {
 | |
| 	struct io_async_msghdr *io = req->async_data;
 | |
| 
 | |
| 	kfree(io->free_iov);
 | |
| }
 | |
| 
 | |
| int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 | |
| {
 | |
| 	struct io_sr_msg *sr = io_kiocb_to_cmd(req);
 | |
| 
 | |
| 	if (unlikely(sqe->file_index || sqe->addr2))
 | |
| 		return -EINVAL;
 | |
| 
 | |
| 	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
 | |
| 	sr->len = READ_ONCE(sqe->len);
 | |
| 	sr->flags = READ_ONCE(sqe->ioprio);
 | |
| 	if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
 | |
| 		return -EINVAL;
 | |
| 	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
 | |
| 	if (sr->msg_flags & MSG_DONTWAIT)
 | |
| 		req->flags |= REQ_F_NOWAIT;
 | |
| 
 | |
| #ifdef CONFIG_COMPAT
 | |
| 	if (req->ctx->compat)
 | |
| 		sr->msg_flags |= MSG_CMSG_COMPAT;
 | |
| #endif
 | |
| 	sr->done_io = 0;
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
 | |
| {
 | |
| 	struct io_sr_msg *sr = io_kiocb_to_cmd(req);
 | |
| 	struct io_async_msghdr iomsg, *kmsg;
 | |
| 	struct socket *sock;
 | |
| 	unsigned flags;
 | |
| 	int min_ret = 0;
 | |
| 	int ret;
 | |
| 
 | |
| 	sock = sock_from_file(req->file);
 | |
| 	if (unlikely(!sock))
 | |
| 		return -ENOTSOCK;
 | |
| 
 | |
| 	if (req_has_async_data(req)) {
 | |
| 		kmsg = req->async_data;
 | |
| 	} else {
 | |
| 		ret = io_sendmsg_copy_hdr(req, &iomsg);
 | |
| 		if (ret)
 | |
| 			return ret;
 | |
| 		kmsg = &iomsg;
 | |
| 	}
 | |
| 
 | |
| 	if (!(req->flags & REQ_F_POLLED) &&
 | |
| 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 | |
| 		return io_setup_async_msg(req, kmsg);
 | |
| 
 | |
| 	flags = sr->msg_flags;
 | |
| 	if (issue_flags & IO_URING_F_NONBLOCK)
 | |
| 		flags |= MSG_DONTWAIT;
 | |
| 	if (flags & MSG_WAITALL)
 | |
| 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
 | |
| 
 | |
| 	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
 | |
| 
 | |
| 	if (ret < min_ret) {
 | |
| 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
 | |
| 			return io_setup_async_msg(req, kmsg);
 | |
| 		if (ret == -ERESTARTSYS)
 | |
| 			ret = -EINTR;
 | |
| 		if (ret > 0 && io_net_retry(sock, flags)) {
 | |
| 			sr->done_io += ret;
 | |
| 			req->flags |= REQ_F_PARTIAL_IO;
 | |
| 			return io_setup_async_msg(req, kmsg);
 | |
| 		}
 | |
| 		req_set_fail(req);
 | |
| 	}
 | |
| 	/* fast path, check for non-NULL to avoid function call */
 | |
| 	if (kmsg->free_iov)
 | |
| 		kfree(kmsg->free_iov);
 | |
| 	req->flags &= ~REQ_F_NEED_CLEANUP;
 | |
| 	if (ret >= 0)
 | |
| 		ret += sr->done_io;
 | |
| 	else if (sr->done_io)
 | |
| 		ret = sr->done_io;
 | |
| 	io_req_set_res(req, ret, 0);
 | |
| 	return IOU_OK;
 | |
| }
 | |
| 
 | |
| int io_send(struct io_kiocb *req, unsigned int issue_flags)
 | |
| {
 | |
| 	struct io_sr_msg *sr = io_kiocb_to_cmd(req);
 | |
| 	struct msghdr msg;
 | |
| 	struct iovec iov;
 | |
| 	struct socket *sock;
 | |
| 	unsigned flags;
 | |
| 	int min_ret = 0;
 | |
| 	int ret;
 | |
| 
 | |
| 	if (!(req->flags & REQ_F_POLLED) &&
 | |
| 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 | |
| 		return -EAGAIN;
 | |
| 
 | |
| 	sock = sock_from_file(req->file);
 | |
| 	if (unlikely(!sock))
 | |
| 		return -ENOTSOCK;
 | |
| 
 | |
| 	ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
 | |
| 	if (unlikely(ret))
 | |
| 		return ret;
 | |
| 
 | |
| 	msg.msg_name = NULL;
 | |
| 	msg.msg_control = NULL;
 | |
| 	msg.msg_controllen = 0;
 | |
| 	msg.msg_namelen = 0;
 | |
| 
 | |
| 	flags = sr->msg_flags;
 | |
| 	if (issue_flags & IO_URING_F_NONBLOCK)
 | |
| 		flags |= MSG_DONTWAIT;
 | |
| 	if (flags & MSG_WAITALL)
 | |
| 		min_ret = iov_iter_count(&msg.msg_iter);
 | |
| 
 | |
| 	msg.msg_flags = flags;
 | |
| 	ret = sock_sendmsg(sock, &msg);
 | |
| 	if (ret < min_ret) {
 | |
| 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
 | |
| 			return -EAGAIN;
 | |
| 		if (ret == -ERESTARTSYS)
 | |
| 			ret = -EINTR;
 | |
| 		if (ret > 0 && io_net_retry(sock, flags)) {
 | |
| 			sr->len -= ret;
 | |
| 			sr->buf += ret;
 | |
| 			sr->done_io += ret;
 | |
| 			req->flags |= REQ_F_PARTIAL_IO;
 | |
| 			return -EAGAIN;
 | |
| 		}
 | |
| 		req_set_fail(req);
 | |
| 	}
 | |
| 	if (ret >= 0)
 | |
| 		ret += sr->done_io;
 | |
| 	else if (sr->done_io)
 | |
| 		ret = sr->done_io;
 | |
| 	io_req_set_res(req, ret, 0);
 | |
| 	return IOU_OK;
 | |
| }
 | |
| 
 | |
| static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
 | |
| 				 struct io_async_msghdr *iomsg)
 | |
| {
 | |
| 	struct io_sr_msg *sr = io_kiocb_to_cmd(req);
 | |
| 	struct iovec __user *uiov;
 | |
| 	size_t iov_len;
 | |
| 	int ret;
 | |
| 
 | |
| 	ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
 | |
| 					&iomsg->uaddr, &uiov, &iov_len);
 | |
| 	if (ret)
 | |
| 		return ret;
 | |
| 
 | |
| 	if (req->flags & REQ_F_BUFFER_SELECT) {
 | |
| 		if (iov_len == 0) {
 | |
| 			sr->len = iomsg->fast_iov[0].iov_len = 0;
 | |
| 			iomsg->fast_iov[0].iov_base = NULL;
 | |
| 			iomsg->free_iov = NULL;
 | |
| 		} else if (iov_len > 1) {
 | |
| 			return -EINVAL;
 | |
| 		} else {
 | |
| 			if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
 | |
| 				return -EFAULT;
 | |
| 			sr->len = iomsg->fast_iov[0].iov_len;
 | |
| 			iomsg->free_iov = NULL;
 | |
| 		}
 | |
| 	} else {
 | |
| 		iomsg->free_iov = iomsg->fast_iov;
 | |
| 		ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
 | |
| 				     &iomsg->free_iov, &iomsg->msg.msg_iter,
 | |
| 				     false);
 | |
| 		if (ret > 0)
 | |
| 			ret = 0;
 | |
| 	}
 | |
| 
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| #ifdef CONFIG_COMPAT
 | |
| static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
 | |
| 					struct io_async_msghdr *iomsg)
 | |
| {
 | |
| 	struct io_sr_msg *sr = io_kiocb_to_cmd(req);
 | |
| 	struct compat_iovec __user *uiov;
 | |
| 	compat_uptr_t ptr;
 | |
| 	compat_size_t len;
 | |
| 	int ret;
 | |
| 
 | |
| 	ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
 | |
| 				  &ptr, &len);
 | |
| 	if (ret)
 | |
| 		return ret;
 | |
| 
 | |
| 	uiov = compat_ptr(ptr);
 | |
| 	if (req->flags & REQ_F_BUFFER_SELECT) {
 | |
| 		compat_ssize_t clen;
 | |
| 
 | |
| 		if (len > 1)
 | |
| 			return -EINVAL;
 | |
| 		if (!access_ok(uiov, sizeof(*uiov)))
 | |
| 			return -EFAULT;
 | |
| 		if (__get_user(clen, &uiov->iov_len))
 | |
| 			return -EFAULT;
 | |
| 		if (clen < 0)
 | |
| 			return -EINVAL;
 | |
| 		sr->len = clen;
 | |
| 		iomsg->free_iov = NULL;
 | |
| 	} else {
 | |
| 		iomsg->free_iov = iomsg->fast_iov;
 | |
| 		ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
 | |
| 				   UIO_FASTIOV, &iomsg->free_iov,
 | |
| 				   &iomsg->msg.msg_iter, true);
 | |
| 		if (ret < 0)
 | |
| 			return ret;
 | |
| 	}
 | |
| 
 | |
| 	return 0;
 | |
| }
 | |
| #endif
 | |
| 
 | |
| static int io_recvmsg_copy_hdr(struct io_kiocb *req,
 | |
| 			       struct io_async_msghdr *iomsg)
 | |
| {
 | |
| 	iomsg->msg.msg_name = &iomsg->addr;
 | |
| 
 | |
| #ifdef CONFIG_COMPAT
 | |
| 	if (req->ctx->compat)
 | |
| 		return __io_compat_recvmsg_copy_hdr(req, iomsg);
 | |
| #endif
 | |
| 
 | |
| 	return __io_recvmsg_copy_hdr(req, iomsg);
 | |
| }
 | |
| 
 | |
| int io_recvmsg_prep_async(struct io_kiocb *req)
 | |
| {
 | |
| 	int ret;
 | |
| 
 | |
| 	ret = io_recvmsg_copy_hdr(req, req->async_data);
 | |
| 	if (!ret)
 | |
| 		req->flags |= REQ_F_NEED_CLEANUP;
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
 | |
| 
 | |
| int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 | |
| {
 | |
| 	struct io_sr_msg *sr = io_kiocb_to_cmd(req);
 | |
| 
 | |
| 	if (unlikely(sqe->file_index || sqe->addr2))
 | |
| 		return -EINVAL;
 | |
| 
 | |
| 	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
 | |
| 	sr->len = READ_ONCE(sqe->len);
 | |
| 	sr->flags = READ_ONCE(sqe->ioprio);
 | |
| 	if (sr->flags & ~(RECVMSG_FLAGS))
 | |
| 		return -EINVAL;
 | |
| 	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
 | |
| 	if (sr->msg_flags & MSG_DONTWAIT)
 | |
| 		req->flags |= REQ_F_NOWAIT;
 | |
| 	if (sr->msg_flags & MSG_ERRQUEUE)
 | |
| 		req->flags |= REQ_F_CLEAR_POLLIN;
 | |
| 	if (sr->flags & IORING_RECV_MULTISHOT) {
 | |
| 		if (!(req->flags & REQ_F_BUFFER_SELECT))
 | |
| 			return -EINVAL;
 | |
| 		if (sr->msg_flags & MSG_WAITALL)
 | |
| 			return -EINVAL;
 | |
| 		if (req->opcode == IORING_OP_RECV && sr->len)
 | |
| 			return -EINVAL;
 | |
| 		req->flags |= REQ_F_APOLL_MULTISHOT;
 | |
| 	}
 | |
| 
 | |
| #ifdef CONFIG_COMPAT
 | |
| 	if (req->ctx->compat)
 | |
| 		sr->msg_flags |= MSG_CMSG_COMPAT;
 | |
| #endif
 | |
| 	sr->done_io = 0;
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| static inline void io_recv_prep_retry(struct io_kiocb *req)
 | |
| {
 | |
| 	struct io_sr_msg *sr = io_kiocb_to_cmd(req);
 | |
| 
 | |
| 	sr->done_io = 0;
 | |
| 	sr->len = 0; /* get from the provided buffer */
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * Finishes io_recv and io_recvmsg.
 | |
|  *
 | |
|  * Returns true if it is actually finished, or false if it should run
 | |
|  * again (for multishot).
 | |
|  */
 | |
| static inline bool io_recv_finish(struct io_kiocb *req, int *ret, unsigned int cflags)
 | |
| {
 | |
| 	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
 | |
| 		io_req_set_res(req, *ret, cflags);
 | |
| 		*ret = IOU_OK;
 | |
| 		return true;
 | |
| 	}
 | |
| 
 | |
| 	if (*ret > 0) {
 | |
| 		if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
 | |
| 				    cflags | IORING_CQE_F_MORE, false)) {
 | |
| 			io_recv_prep_retry(req);
 | |
| 			return false;
 | |
| 		}
 | |
| 		/*
 | |
| 		 * Otherwise stop multishot but use the current result.
 | |
| 		 * Probably will end up going into overflow, but this means
 | |
| 		 * we cannot trust the ordering anymore
 | |
| 		 */
 | |
| 	}
 | |
| 
 | |
| 	io_req_set_res(req, *ret, cflags);
 | |
| 
 | |
| 	if (req->flags & REQ_F_POLLED)
 | |
| 		*ret = IOU_STOP_MULTISHOT;
 | |
| 	return true;
 | |
| }
 | |
| 
 | |
| int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
 | |
| {
 | |
| 	struct io_sr_msg *sr = io_kiocb_to_cmd(req);
 | |
| 	struct io_async_msghdr iomsg, *kmsg;
 | |
| 	struct socket *sock;
 | |
| 	unsigned int cflags;
 | |
| 	unsigned flags;
 | |
| 	int ret, min_ret = 0;
 | |
| 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 | |
| 	size_t len = sr->len;
 | |
| 
 | |
| 	sock = sock_from_file(req->file);
 | |
| 	if (unlikely(!sock))
 | |
| 		return -ENOTSOCK;
 | |
| 
 | |
| 	if (req_has_async_data(req)) {
 | |
| 		kmsg = req->async_data;
 | |
| 	} else {
 | |
| 		ret = io_recvmsg_copy_hdr(req, &iomsg);
 | |
| 		if (ret)
 | |
| 			return ret;
 | |
| 		kmsg = &iomsg;
 | |
| 	}
 | |
| 
 | |
| 	if (!(req->flags & REQ_F_POLLED) &&
 | |
| 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 | |
| 		return io_setup_async_msg(req, kmsg);
 | |
| 
 | |
| retry_multishot:
 | |
| 	if (io_do_buffer_select(req)) {
 | |
| 		void __user *buf;
 | |
| 
 | |
| 		buf = io_buffer_select(req, &len, issue_flags);
 | |
| 		if (!buf)
 | |
| 			return -ENOBUFS;
 | |
| 		kmsg->fast_iov[0].iov_base = buf;
 | |
| 		kmsg->fast_iov[0].iov_len = len;
 | |
| 		iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
 | |
| 				len);
 | |
| 	}
 | |
| 
 | |
| 	flags = sr->msg_flags;
 | |
| 	if (force_nonblock)
 | |
| 		flags |= MSG_DONTWAIT;
 | |
| 	if (flags & MSG_WAITALL)
 | |
| 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
 | |
| 
 | |
| 	kmsg->msg.msg_get_inq = 1;
 | |
| 	ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg, kmsg->uaddr, flags);
 | |
| 	if (ret < min_ret) {
 | |
| 		if (ret == -EAGAIN && force_nonblock) {
 | |
| 			ret = io_setup_async_msg(req, kmsg);
 | |
| 			if (ret == -EAGAIN && (req->flags & IO_APOLL_MULTI_POLLED) ==
 | |
| 					       IO_APOLL_MULTI_POLLED) {
 | |
| 				io_kbuf_recycle(req, issue_flags);
 | |
| 				return IOU_ISSUE_SKIP_COMPLETE;
 | |
| 			}
 | |
| 			return ret;
 | |
| 		}
 | |
| 		if (ret == -ERESTARTSYS)
 | |
| 			ret = -EINTR;
 | |
| 		if (ret > 0 && io_net_retry(sock, flags)) {
 | |
| 			sr->done_io += ret;
 | |
| 			req->flags |= REQ_F_PARTIAL_IO;
 | |
| 			return io_setup_async_msg(req, kmsg);
 | |
| 		}
 | |
| 		req_set_fail(req);
 | |
| 	} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
 | |
| 		req_set_fail(req);
 | |
| 	}
 | |
| 
 | |
| 	/* fast path, check for non-NULL to avoid function call */
 | |
| 	if (kmsg->free_iov)
 | |
| 		kfree(kmsg->free_iov);
 | |
| 	req->flags &= ~REQ_F_NEED_CLEANUP;
 | |
| 	if (ret > 0)
 | |
| 		ret += sr->done_io;
 | |
| 	else if (sr->done_io)
 | |
| 		ret = sr->done_io;
 | |
| 	else
 | |
| 		io_kbuf_recycle(req, issue_flags);
 | |
| 
 | |
| 	cflags = io_put_kbuf(req, issue_flags);
 | |
| 	if (kmsg->msg.msg_inq)
 | |
| 		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
 | |
| 
 | |
| 	if (!io_recv_finish(req, &ret, cflags))
 | |
| 		goto retry_multishot;
 | |
| 
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| int io_recv(struct io_kiocb *req, unsigned int issue_flags)
 | |
| {
 | |
| 	struct io_sr_msg *sr = io_kiocb_to_cmd(req);
 | |
| 	struct msghdr msg;
 | |
| 	struct socket *sock;
 | |
| 	struct iovec iov;
 | |
| 	unsigned int cflags;
 | |
| 	unsigned flags;
 | |
| 	int ret, min_ret = 0;
 | |
| 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 | |
| 	size_t len = sr->len;
 | |
| 
 | |
| 	if (!(req->flags & REQ_F_POLLED) &&
 | |
| 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 | |
| 		return -EAGAIN;
 | |
| 
 | |
| 	sock = sock_from_file(req->file);
 | |
| 	if (unlikely(!sock))
 | |
| 		return -ENOTSOCK;
 | |
| 
 | |
| retry_multishot:
 | |
| 	if (io_do_buffer_select(req)) {
 | |
| 		void __user *buf;
 | |
| 
 | |
| 		buf = io_buffer_select(req, &len, issue_flags);
 | |
| 		if (!buf)
 | |
| 			return -ENOBUFS;
 | |
| 		sr->buf = buf;
 | |
| 	}
 | |
| 
 | |
| 	ret = import_single_range(READ, sr->buf, len, &iov, &msg.msg_iter);
 | |
| 	if (unlikely(ret))
 | |
| 		goto out_free;
 | |
| 
 | |
| 	msg.msg_name = NULL;
 | |
| 	msg.msg_namelen = 0;
 | |
| 	msg.msg_control = NULL;
 | |
| 	msg.msg_get_inq = 1;
 | |
| 	msg.msg_flags = 0;
 | |
| 	msg.msg_controllen = 0;
 | |
| 	msg.msg_iocb = NULL;
 | |
| 
 | |
| 	flags = sr->msg_flags;
 | |
| 	if (force_nonblock)
 | |
| 		flags |= MSG_DONTWAIT;
 | |
| 	if (flags & MSG_WAITALL)
 | |
| 		min_ret = iov_iter_count(&msg.msg_iter);
 | |
| 
 | |
| 	ret = sock_recvmsg(sock, &msg, flags);
 | |
| 	if (ret < min_ret) {
 | |
| 		if (ret == -EAGAIN && force_nonblock) {
 | |
| 			if ((req->flags & IO_APOLL_MULTI_POLLED) == IO_APOLL_MULTI_POLLED) {
 | |
| 				io_kbuf_recycle(req, issue_flags);
 | |
| 				return IOU_ISSUE_SKIP_COMPLETE;
 | |
| 			}
 | |
| 
 | |
| 			return -EAGAIN;
 | |
| 		}
 | |
| 		if (ret == -ERESTARTSYS)
 | |
| 			ret = -EINTR;
 | |
| 		if (ret > 0 && io_net_retry(sock, flags)) {
 | |
| 			sr->len -= ret;
 | |
| 			sr->buf += ret;
 | |
| 			sr->done_io += ret;
 | |
| 			req->flags |= REQ_F_PARTIAL_IO;
 | |
| 			return -EAGAIN;
 | |
| 		}
 | |
| 		req_set_fail(req);
 | |
| 	} else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
 | |
| out_free:
 | |
| 		req_set_fail(req);
 | |
| 	}
 | |
| 
 | |
| 	if (ret > 0)
 | |
| 		ret += sr->done_io;
 | |
| 	else if (sr->done_io)
 | |
| 		ret = sr->done_io;
 | |
| 	else
 | |
| 		io_kbuf_recycle(req, issue_flags);
 | |
| 
 | |
| 	cflags = io_put_kbuf(req, issue_flags);
 | |
| 	if (msg.msg_inq)
 | |
| 		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
 | |
| 
 | |
| 	if (!io_recv_finish(req, &ret, cflags))
 | |
| 		goto retry_multishot;
 | |
| 
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 | |
| {
 | |
| 	struct io_accept *accept = io_kiocb_to_cmd(req);
 | |
| 	unsigned flags;
 | |
| 
 | |
| 	if (sqe->len || sqe->buf_index)
 | |
| 		return -EINVAL;
 | |
| 
 | |
| 	accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
 | |
| 	accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
 | |
| 	accept->flags = READ_ONCE(sqe->accept_flags);
 | |
| 	accept->nofile = rlimit(RLIMIT_NOFILE);
 | |
| 	flags = READ_ONCE(sqe->ioprio);
 | |
| 	if (flags & ~IORING_ACCEPT_MULTISHOT)
 | |
| 		return -EINVAL;
 | |
| 
 | |
| 	accept->file_slot = READ_ONCE(sqe->file_index);
 | |
| 	if (accept->file_slot) {
 | |
| 		if (accept->flags & SOCK_CLOEXEC)
 | |
| 			return -EINVAL;
 | |
| 		if (flags & IORING_ACCEPT_MULTISHOT &&
 | |
| 		    accept->file_slot != IORING_FILE_INDEX_ALLOC)
 | |
| 			return -EINVAL;
 | |
| 	}
 | |
| 	if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
 | |
| 		return -EINVAL;
 | |
| 	if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
 | |
| 		accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
 | |
| 	if (flags & IORING_ACCEPT_MULTISHOT)
 | |
| 		req->flags |= REQ_F_APOLL_MULTISHOT;
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| int io_accept(struct io_kiocb *req, unsigned int issue_flags)
 | |
| {
 | |
| 	struct io_ring_ctx *ctx = req->ctx;
 | |
| 	struct io_accept *accept = io_kiocb_to_cmd(req);
 | |
| 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 | |
| 	unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
 | |
| 	bool fixed = !!accept->file_slot;
 | |
| 	struct file *file;
 | |
| 	int ret, fd;
 | |
| 
 | |
| retry:
 | |
| 	if (!fixed) {
 | |
| 		fd = __get_unused_fd_flags(accept->flags, accept->nofile);
 | |
| 		if (unlikely(fd < 0))
 | |
| 			return fd;
 | |
| 	}
 | |
| 	file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
 | |
| 			 accept->flags);
 | |
| 	if (IS_ERR(file)) {
 | |
| 		if (!fixed)
 | |
| 			put_unused_fd(fd);
 | |
| 		ret = PTR_ERR(file);
 | |
| 		if (ret == -EAGAIN && force_nonblock) {
 | |
| 			/*
 | |
| 			 * if it's multishot and polled, we don't need to
 | |
| 			 * return EAGAIN to arm the poll infra since it
 | |
| 			 * has already been done
 | |
| 			 */
 | |
| 			if ((req->flags & IO_APOLL_MULTI_POLLED) ==
 | |
| 			    IO_APOLL_MULTI_POLLED)
 | |
| 				ret = IOU_ISSUE_SKIP_COMPLETE;
 | |
| 			return ret;
 | |
| 		}
 | |
| 		if (ret == -ERESTARTSYS)
 | |
| 			ret = -EINTR;
 | |
| 		req_set_fail(req);
 | |
| 	} else if (!fixed) {
 | |
| 		fd_install(fd, file);
 | |
| 		ret = fd;
 | |
| 	} else {
 | |
| 		ret = io_fixed_fd_install(req, issue_flags, file,
 | |
| 						accept->file_slot);
 | |
| 	}
 | |
| 
 | |
| 	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
 | |
| 		io_req_set_res(req, ret, 0);
 | |
| 		return IOU_OK;
 | |
| 	}
 | |
| 
 | |
| 	if (ret >= 0 &&
 | |
| 	    io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
 | |
| 		goto retry;
 | |
| 
 | |
| 	io_req_set_res(req, ret, 0);
 | |
| 	if (req->flags & REQ_F_POLLED)
 | |
| 		return IOU_STOP_MULTISHOT;
 | |
| 	return IOU_OK;
 | |
| }
 | |
| 
 | |
| int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 | |
| {
 | |
| 	struct io_socket *sock = io_kiocb_to_cmd(req);
 | |
| 
 | |
| 	if (sqe->addr || sqe->rw_flags || sqe->buf_index)
 | |
| 		return -EINVAL;
 | |
| 
 | |
| 	sock->domain = READ_ONCE(sqe->fd);
 | |
| 	sock->type = READ_ONCE(sqe->off);
 | |
| 	sock->protocol = READ_ONCE(sqe->len);
 | |
| 	sock->file_slot = READ_ONCE(sqe->file_index);
 | |
| 	sock->nofile = rlimit(RLIMIT_NOFILE);
 | |
| 
 | |
| 	sock->flags = sock->type & ~SOCK_TYPE_MASK;
 | |
| 	if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
 | |
| 		return -EINVAL;
 | |
| 	if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
 | |
| 		return -EINVAL;
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| int io_socket(struct io_kiocb *req, unsigned int issue_flags)
 | |
| {
 | |
| 	struct io_socket *sock = io_kiocb_to_cmd(req);
 | |
| 	bool fixed = !!sock->file_slot;
 | |
| 	struct file *file;
 | |
| 	int ret, fd;
 | |
| 
 | |
| 	if (!fixed) {
 | |
| 		fd = __get_unused_fd_flags(sock->flags, sock->nofile);
 | |
| 		if (unlikely(fd < 0))
 | |
| 			return fd;
 | |
| 	}
 | |
| 	file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
 | |
| 	if (IS_ERR(file)) {
 | |
| 		if (!fixed)
 | |
| 			put_unused_fd(fd);
 | |
| 		ret = PTR_ERR(file);
 | |
| 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
 | |
| 			return -EAGAIN;
 | |
| 		if (ret == -ERESTARTSYS)
 | |
| 			ret = -EINTR;
 | |
| 		req_set_fail(req);
 | |
| 	} else if (!fixed) {
 | |
| 		fd_install(fd, file);
 | |
| 		ret = fd;
 | |
| 	} else {
 | |
| 		ret = io_fixed_fd_install(req, issue_flags, file,
 | |
| 					    sock->file_slot);
 | |
| 	}
 | |
| 	io_req_set_res(req, ret, 0);
 | |
| 	return IOU_OK;
 | |
| }
 | |
| 
 | |
| int io_connect_prep_async(struct io_kiocb *req)
 | |
| {
 | |
| 	struct io_async_connect *io = req->async_data;
 | |
| 	struct io_connect *conn = io_kiocb_to_cmd(req);
 | |
| 
 | |
| 	return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
 | |
| }
 | |
| 
 | |
| int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 | |
| {
 | |
| 	struct io_connect *conn = io_kiocb_to_cmd(req);
 | |
| 
 | |
| 	if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
 | |
| 		return -EINVAL;
 | |
| 
 | |
| 	conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
 | |
| 	conn->addr_len =  READ_ONCE(sqe->addr2);
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| int io_connect(struct io_kiocb *req, unsigned int issue_flags)
 | |
| {
 | |
| 	struct io_connect *connect = io_kiocb_to_cmd(req);
 | |
| 	struct io_async_connect __io, *io;
 | |
| 	unsigned file_flags;
 | |
| 	int ret;
 | |
| 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 | |
| 
 | |
| 	if (req_has_async_data(req)) {
 | |
| 		io = req->async_data;
 | |
| 	} else {
 | |
| 		ret = move_addr_to_kernel(connect->addr,
 | |
| 						connect->addr_len,
 | |
| 						&__io.address);
 | |
| 		if (ret)
 | |
| 			goto out;
 | |
| 		io = &__io;
 | |
| 	}
 | |
| 
 | |
| 	file_flags = force_nonblock ? O_NONBLOCK : 0;
 | |
| 
 | |
| 	ret = __sys_connect_file(req->file, &io->address,
 | |
| 					connect->addr_len, file_flags);
 | |
| 	if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
 | |
| 		if (req_has_async_data(req))
 | |
| 			return -EAGAIN;
 | |
| 		if (io_alloc_async_data(req)) {
 | |
| 			ret = -ENOMEM;
 | |
| 			goto out;
 | |
| 		}
 | |
| 		memcpy(req->async_data, &__io, sizeof(__io));
 | |
| 		return -EAGAIN;
 | |
| 	}
 | |
| 	if (ret == -ERESTARTSYS)
 | |
| 		ret = -EINTR;
 | |
| out:
 | |
| 	if (ret < 0)
 | |
| 		req_set_fail(req);
 | |
| 	io_req_set_res(req, ret, 0);
 | |
| 	return IOU_OK;
 | |
| }
 | |
| #endif
 |