forked from mirrors/linux
		
	io_uring: get rid of kiocb_wait_page_queue_init()
The 5.9 merge moved this function io_uring, which means that we don't need to retain the generic nature of it. Clean up this part by removing redundant checks, and just inlining the small remainder in io_rw_should_retry(). No functional changes in this patch. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									b711d4eaf0
								
							
						
					
					
						commit
						3b2a4439e0
					
				
					 1 changed files with 11 additions and 30 deletions
				
			
		| 
						 | 
				
			
			@ -3074,27 +3074,6 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
 | 
			
		|||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline int kiocb_wait_page_queue_init(struct kiocb *kiocb,
 | 
			
		||||
					     struct wait_page_queue *wait,
 | 
			
		||||
					     wait_queue_func_t func,
 | 
			
		||||
					     void *data)
 | 
			
		||||
{
 | 
			
		||||
	/* Can't support async wakeup with polled IO */
 | 
			
		||||
	if (kiocb->ki_flags & IOCB_HIPRI)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	if (kiocb->ki_filp->f_mode & FMODE_BUF_RASYNC) {
 | 
			
		||||
		wait->wait.func = func;
 | 
			
		||||
		wait->wait.private = data;
 | 
			
		||||
		wait->wait.flags = 0;
 | 
			
		||||
		INIT_LIST_HEAD(&wait->wait.entry);
 | 
			
		||||
		kiocb->ki_flags |= IOCB_WAITQ;
 | 
			
		||||
		kiocb->ki_waitq = wait;
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return -EOPNOTSUPP;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * This controls whether a given IO request should be armed for async page
 | 
			
		||||
 * based retry. If we return false here, the request is handed to the async
 | 
			
		||||
| 
						 | 
				
			
			@ -3109,16 +3088,17 @@ static inline int kiocb_wait_page_queue_init(struct kiocb *kiocb,
 | 
			
		|||
 */
 | 
			
		||||
static bool io_rw_should_retry(struct io_kiocb *req)
 | 
			
		||||
{
 | 
			
		||||
	struct wait_page_queue *wait = &req->io->rw.wpq;
 | 
			
		||||
	struct kiocb *kiocb = &req->rw.kiocb;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	/* never retry for NOWAIT, we just complete with -EAGAIN */
 | 
			
		||||
	if (req->flags & REQ_F_NOWAIT)
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
	/* Only for buffered IO */
 | 
			
		||||
	if (kiocb->ki_flags & IOCB_DIRECT)
 | 
			
		||||
	if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * just use poll if we can, and don't attempt if the fs doesn't
 | 
			
		||||
	 * support callback based unlocks
 | 
			
		||||
| 
						 | 
				
			
			@ -3126,16 +3106,17 @@ static bool io_rw_should_retry(struct io_kiocb *req)
 | 
			
		|||
	if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
	ret = kiocb_wait_page_queue_init(kiocb, &req->io->rw.wpq,
 | 
			
		||||
						io_async_buf_func, req);
 | 
			
		||||
	if (!ret) {
 | 
			
		||||
	wait->wait.func = io_async_buf_func;
 | 
			
		||||
	wait->wait.private = req;
 | 
			
		||||
	wait->wait.flags = 0;
 | 
			
		||||
	INIT_LIST_HEAD(&wait->wait.entry);
 | 
			
		||||
	kiocb->ki_flags |= IOCB_WAITQ;
 | 
			
		||||
	kiocb->ki_waitq = wait;
 | 
			
		||||
 | 
			
		||||
	io_get_req_task(req);
 | 
			
		||||
	return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
	return false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
 | 
			
		||||
{
 | 
			
		||||
	if (req->file->f_op->read_iter)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue