forked from mirrors/linux
		
	aio: move sanity checks and request allocation to io_submit_one()
makes for somewhat cleaner control flow in __io_submit_one() Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
		
							parent
							
								
									fa0ca2aee3
								
							
						
					
					
						commit
						7316b49c2a
					
				
					 1 changed files with 53 additions and 66 deletions
				
			
		
							
								
								
									
										119
									
								
								fs/aio.c
									
									
									
									
									
								
							
							
						
						
									
										119
									
								
								fs/aio.c
									
									
									
									
									
								
							| 
						 | 
					@ -1777,35 +1777,12 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
 | 
					static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
 | 
				
			||||||
			   struct iocb __user *user_iocb, bool compat)
 | 
								   struct iocb __user *user_iocb, struct aio_kiocb *req,
 | 
				
			||||||
 | 
								   bool compat)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct aio_kiocb *req;
 | 
					 | 
				
			||||||
	int ret;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* enforce forwards compatibility on users */
 | 
					 | 
				
			||||||
	if (unlikely(iocb->aio_reserved2)) {
 | 
					 | 
				
			||||||
		pr_debug("EINVAL: reserve field set\n");
 | 
					 | 
				
			||||||
		return -EINVAL;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* prevent overflows */
 | 
					 | 
				
			||||||
	if (unlikely(
 | 
					 | 
				
			||||||
	    (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
 | 
					 | 
				
			||||||
	    (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
 | 
					 | 
				
			||||||
	    ((ssize_t)iocb->aio_nbytes < 0)
 | 
					 | 
				
			||||||
	   )) {
 | 
					 | 
				
			||||||
		pr_debug("EINVAL: overflow check\n");
 | 
					 | 
				
			||||||
		return -EINVAL;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	req = aio_get_req(ctx);
 | 
					 | 
				
			||||||
	if (unlikely(!req))
 | 
					 | 
				
			||||||
		return -EAGAIN;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	req->ki_filp = fget(iocb->aio_fildes);
 | 
						req->ki_filp = fget(iocb->aio_fildes);
 | 
				
			||||||
	ret = -EBADF;
 | 
					 | 
				
			||||||
	if (unlikely(!req->ki_filp))
 | 
						if (unlikely(!req->ki_filp))
 | 
				
			||||||
		goto out_put_req;
 | 
							return -EBADF;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (iocb->aio_flags & IOCB_FLAG_RESFD) {
 | 
						if (iocb->aio_flags & IOCB_FLAG_RESFD) {
 | 
				
			||||||
		struct eventfd_ctx *eventfd;
 | 
							struct eventfd_ctx *eventfd;
 | 
				
			||||||
| 
						 | 
					@ -1816,17 +1793,15 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
 | 
				
			||||||
		 * event using the eventfd_signal() function.
 | 
							 * event using the eventfd_signal() function.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
 | 
							eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
 | 
				
			||||||
		if (IS_ERR(eventfd)) {
 | 
							if (IS_ERR(eventfd))
 | 
				
			||||||
			ret = PTR_ERR(eventfd);
 | 
								return PTR_ERR(req->ki_eventfd);
 | 
				
			||||||
			goto out_put_req;
 | 
					
 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		req->ki_eventfd = eventfd;
 | 
							req->ki_eventfd = eventfd;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = put_user(KIOCB_KEY, &user_iocb->aio_key);
 | 
						if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) {
 | 
				
			||||||
	if (unlikely(ret)) {
 | 
					 | 
				
			||||||
		pr_debug("EFAULT: aio_key\n");
 | 
							pr_debug("EFAULT: aio_key\n");
 | 
				
			||||||
		goto out_put_req;
 | 
							return -EFAULT;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	req->ki_res.obj = (u64)(unsigned long)user_iocb;
 | 
						req->ki_res.obj = (u64)(unsigned long)user_iocb;
 | 
				
			||||||
| 
						 | 
					@ -1836,58 +1811,70 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	switch (iocb->aio_lio_opcode) {
 | 
						switch (iocb->aio_lio_opcode) {
 | 
				
			||||||
	case IOCB_CMD_PREAD:
 | 
						case IOCB_CMD_PREAD:
 | 
				
			||||||
		ret = aio_read(&req->rw, iocb, false, compat);
 | 
							return aio_read(&req->rw, iocb, false, compat);
 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	case IOCB_CMD_PWRITE:
 | 
						case IOCB_CMD_PWRITE:
 | 
				
			||||||
		ret = aio_write(&req->rw, iocb, false, compat);
 | 
							return aio_write(&req->rw, iocb, false, compat);
 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	case IOCB_CMD_PREADV:
 | 
						case IOCB_CMD_PREADV:
 | 
				
			||||||
		ret = aio_read(&req->rw, iocb, true, compat);
 | 
							return aio_read(&req->rw, iocb, true, compat);
 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	case IOCB_CMD_PWRITEV:
 | 
						case IOCB_CMD_PWRITEV:
 | 
				
			||||||
		ret = aio_write(&req->rw, iocb, true, compat);
 | 
							return aio_write(&req->rw, iocb, true, compat);
 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	case IOCB_CMD_FSYNC:
 | 
						case IOCB_CMD_FSYNC:
 | 
				
			||||||
		ret = aio_fsync(&req->fsync, iocb, false);
 | 
							return aio_fsync(&req->fsync, iocb, false);
 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	case IOCB_CMD_FDSYNC:
 | 
						case IOCB_CMD_FDSYNC:
 | 
				
			||||||
		ret = aio_fsync(&req->fsync, iocb, true);
 | 
							return aio_fsync(&req->fsync, iocb, true);
 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	case IOCB_CMD_POLL:
 | 
						case IOCB_CMD_POLL:
 | 
				
			||||||
		ret = aio_poll(req, iocb);
 | 
							return aio_poll(req, iocb);
 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	default:
 | 
						default:
 | 
				
			||||||
		pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
 | 
							pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
 | 
				
			||||||
		ret = -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* Done with the synchronous reference */
 | 
					 | 
				
			||||||
	iocb_put(req);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * If ret is 0, we'd either done aio_complete() ourselves or have
 | 
					 | 
				
			||||||
	 * arranged for that to be done asynchronously.  Anything non-zero
 | 
					 | 
				
			||||||
	 * means that we need to destroy req ourselves.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (!ret)
 | 
					 | 
				
			||||||
		return 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
out_put_req:
 | 
					 | 
				
			||||||
	iocb_destroy(req);
 | 
					 | 
				
			||||||
	put_reqs_available(ctx, 1);
 | 
					 | 
				
			||||||
	return ret;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
 | 
					static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
 | 
				
			||||||
			 bool compat)
 | 
								 bool compat)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						struct aio_kiocb *req;
 | 
				
			||||||
	struct iocb iocb;
 | 
						struct iocb iocb;
 | 
				
			||||||
 | 
						int err;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
 | 
						if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
 | 
				
			||||||
		return -EFAULT;
 | 
							return -EFAULT;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return __io_submit_one(ctx, &iocb, user_iocb, compat);
 | 
						/* enforce forwards compatibility on users */
 | 
				
			||||||
 | 
						if (unlikely(iocb.aio_reserved2)) {
 | 
				
			||||||
 | 
							pr_debug("EINVAL: reserve field set\n");
 | 
				
			||||||
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* prevent overflows */
 | 
				
			||||||
 | 
						if (unlikely(
 | 
				
			||||||
 | 
						    (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
 | 
				
			||||||
 | 
						    (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
 | 
				
			||||||
 | 
						    ((ssize_t)iocb.aio_nbytes < 0)
 | 
				
			||||||
 | 
						   )) {
 | 
				
			||||||
 | 
							pr_debug("EINVAL: overflow check\n");
 | 
				
			||||||
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						req = aio_get_req(ctx);
 | 
				
			||||||
 | 
						if (unlikely(!req))
 | 
				
			||||||
 | 
							return -EAGAIN;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Done with the synchronous reference */
 | 
				
			||||||
 | 
						iocb_put(req);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * If err is 0, we'd either done aio_complete() ourselves or have
 | 
				
			||||||
 | 
						 * arranged for that to be done asynchronously.  Anything non-zero
 | 
				
			||||||
 | 
						 * means that we need to destroy req ourselves.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (unlikely(err)) {
 | 
				
			||||||
 | 
							iocb_destroy(req);
 | 
				
			||||||
 | 
							put_reqs_available(ctx, 1);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return err;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* sys_io_submit:
 | 
					/* sys_io_submit:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue