mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	new helper: copy_page_from_iter()
parallel to copy_page_to_iter(). pipe_write() switched to it (and became ->write_iter()). Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
		
							parent
							
								
									84c3d55cc4
								
							
						
					
					
						commit
						f0d1bec9d5
					
				
					 3 changed files with 99 additions and 110 deletions
				
			
		
							
								
								
									
										129
									
								
								fs/pipe.c
									
									
									
									
									
								
							
							
						
						
									
										129
									
								
								fs/pipe.c
									
									
									
									
									
								
							|  | @ -116,50 +116,6 @@ void pipe_wait(struct pipe_inode_info *pipe) | |||
| 	pipe_lock(pipe); | ||||
| } | ||||
| 
 | ||||
| static int | ||||
| pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len, | ||||
| 			int atomic) | ||||
| { | ||||
| 	unsigned long copy; | ||||
| 
 | ||||
| 	while (len > 0) { | ||||
| 		while (!iov->iov_len) | ||||
| 			iov++; | ||||
| 		copy = min_t(unsigned long, len, iov->iov_len); | ||||
| 
 | ||||
| 		if (atomic) { | ||||
| 			if (__copy_from_user_inatomic(to, iov->iov_base, copy)) | ||||
| 				return -EFAULT; | ||||
| 		} else { | ||||
| 			if (copy_from_user(to, iov->iov_base, copy)) | ||||
| 				return -EFAULT; | ||||
| 		} | ||||
| 		to += copy; | ||||
| 		len -= copy; | ||||
| 		iov->iov_base += copy; | ||||
| 		iov->iov_len -= copy; | ||||
| 	} | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * Pre-fault in the user memory, so we can use atomic copies. | ||||
|  */ | ||||
| static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len) | ||||
| { | ||||
| 	while (!iov->iov_len) | ||||
| 		iov++; | ||||
| 
 | ||||
| 	while (len > 0) { | ||||
| 		unsigned long this_len; | ||||
| 
 | ||||
| 		this_len = min_t(unsigned long, len, iov->iov_len); | ||||
| 		fault_in_pages_readable(iov->iov_base, this_len); | ||||
| 		len -= this_len; | ||||
| 		iov++; | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| static void anon_pipe_buf_release(struct pipe_inode_info *pipe, | ||||
| 				  struct pipe_buffer *buf) | ||||
| { | ||||
|  | @ -380,24 +336,19 @@ static inline int is_packetized(struct file *file) | |||
| } | ||||
| 
 | ||||
| static ssize_t | ||||
| pipe_write(struct kiocb *iocb, const struct iovec *_iov, | ||||
| 	    unsigned long nr_segs, loff_t ppos) | ||||
| pipe_write(struct kiocb *iocb, struct iov_iter *from) | ||||
| { | ||||
| 	struct file *filp = iocb->ki_filp; | ||||
| 	struct pipe_inode_info *pipe = filp->private_data; | ||||
| 	ssize_t ret; | ||||
| 	int do_wakeup; | ||||
| 	struct iovec *iov = (struct iovec *)_iov; | ||||
| 	size_t total_len; | ||||
| 	ssize_t ret = 0; | ||||
| 	int do_wakeup = 0; | ||||
| 	size_t total_len = iov_iter_count(from); | ||||
| 	ssize_t chars; | ||||
| 
 | ||||
| 	total_len = iov_length(iov, nr_segs); | ||||
| 	/* Null write succeeds. */ | ||||
| 	if (unlikely(total_len == 0)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	do_wakeup = 0; | ||||
| 	ret = 0; | ||||
| 	__pipe_lock(pipe); | ||||
| 
 | ||||
| 	if (!pipe->readers) { | ||||
|  | @ -416,38 +367,19 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov, | |||
| 		int offset = buf->offset + buf->len; | ||||
| 
 | ||||
| 		if (ops->can_merge && offset + chars <= PAGE_SIZE) { | ||||
| 			int error, atomic = 1; | ||||
| 			void *addr; | ||||
| 
 | ||||
| 			error = ops->confirm(pipe, buf); | ||||
| 			int error = ops->confirm(pipe, buf); | ||||
| 			if (error) | ||||
| 				goto out; | ||||
| 
 | ||||
| 			iov_fault_in_pages_read(iov, chars); | ||||
| redo1: | ||||
| 			if (atomic) | ||||
| 				addr = kmap_atomic(buf->page); | ||||
| 			else | ||||
| 				addr = kmap(buf->page); | ||||
| 			error = pipe_iov_copy_from_user(offset + addr, iov, | ||||
| 							chars, atomic); | ||||
| 			if (atomic) | ||||
| 				kunmap_atomic(addr); | ||||
| 			else | ||||
| 				kunmap(buf->page); | ||||
| 			ret = error; | ||||
| 			do_wakeup = 1; | ||||
| 			if (error) { | ||||
| 				if (atomic) { | ||||
| 					atomic = 0; | ||||
| 					goto redo1; | ||||
| 				} | ||||
| 			ret = copy_page_from_iter(buf->page, offset, chars, from); | ||||
| 			if (unlikely(ret < chars)) { | ||||
| 				error = -EFAULT; | ||||
| 				goto out; | ||||
| 			} | ||||
| 			do_wakeup = 1; | ||||
| 			buf->len += chars; | ||||
| 			total_len -= chars; | ||||
| 			ret = chars; | ||||
| 			if (!total_len) | ||||
| 			if (!iov_iter_count(from)) | ||||
| 				goto out; | ||||
| 		} | ||||
| 	} | ||||
|  | @ -466,8 +398,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov, | |||
| 			int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1); | ||||
| 			struct pipe_buffer *buf = pipe->bufs + newbuf; | ||||
| 			struct page *page = pipe->tmp_page; | ||||
| 			char *src; | ||||
| 			int error, atomic = 1; | ||||
| 			int copied; | ||||
| 
 | ||||
| 			if (!page) { | ||||
| 				page = alloc_page(GFP_HIGHUSER); | ||||
|  | @ -483,40 +414,19 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov, | |||
| 			 * FIXME! Is this really true? | ||||
| 			 */ | ||||
| 			do_wakeup = 1; | ||||
| 			chars = PAGE_SIZE; | ||||
| 			if (chars > total_len) | ||||
| 				chars = total_len; | ||||
| 
 | ||||
| 			iov_fault_in_pages_read(iov, chars); | ||||
| redo2: | ||||
| 			if (atomic) | ||||
| 				src = kmap_atomic(page); | ||||
| 			else | ||||
| 				src = kmap(page); | ||||
| 
 | ||||
| 			error = pipe_iov_copy_from_user(src, iov, chars, | ||||
| 							atomic); | ||||
| 			if (atomic) | ||||
| 				kunmap_atomic(src); | ||||
| 			else | ||||
| 				kunmap(page); | ||||
| 
 | ||||
| 			if (unlikely(error)) { | ||||
| 				if (atomic) { | ||||
| 					atomic = 0; | ||||
| 					goto redo2; | ||||
| 				} | ||||
| 			copied = copy_page_from_iter(page, 0, PAGE_SIZE, from); | ||||
| 			if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) { | ||||
| 				if (!ret) | ||||
| 					ret = error; | ||||
| 					ret = -EFAULT; | ||||
| 				break; | ||||
| 			} | ||||
| 			ret += chars; | ||||
| 			ret += copied; | ||||
| 
 | ||||
| 			/* Insert it into the buffer array */ | ||||
| 			buf->page = page; | ||||
| 			buf->ops = &anon_pipe_buf_ops; | ||||
| 			buf->offset = 0; | ||||
| 			buf->len = chars; | ||||
| 			buf->len = copied; | ||||
| 			buf->flags = 0; | ||||
| 			if (is_packetized(filp)) { | ||||
| 				buf->ops = &packet_pipe_buf_ops; | ||||
|  | @ -525,8 +435,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov, | |||
| 			pipe->nrbufs = ++bufs; | ||||
| 			pipe->tmp_page = NULL; | ||||
| 
 | ||||
| 			total_len -= chars; | ||||
| 			if (!total_len) | ||||
| 			if (!iov_iter_count(from)) | ||||
| 				break; | ||||
| 		} | ||||
| 		if (bufs < pipe->buffers) | ||||
|  | @ -1040,8 +949,8 @@ const struct file_operations pipefifo_fops = { | |||
| 	.llseek		= no_llseek, | ||||
| 	.read		= new_sync_read, | ||||
| 	.read_iter	= pipe_read, | ||||
| 	.write		= do_sync_write, | ||||
| 	.aio_write	= pipe_write, | ||||
| 	.write		= new_sync_write, | ||||
| 	.write_iter	= pipe_write, | ||||
| 	.poll		= pipe_poll, | ||||
| 	.unlocked_ioctl	= pipe_ioctl, | ||||
| 	.release	= pipe_release, | ||||
|  |  | |||
|  | @ -68,6 +68,8 @@ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); | |||
| size_t iov_iter_single_seg_count(const struct iov_iter *i); | ||||
| size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, | ||||
| 			 struct iov_iter *i); | ||||
| size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, | ||||
| 			 struct iov_iter *i); | ||||
| unsigned long iov_iter_alignment(const struct iov_iter *i); | ||||
| void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, | ||||
| 			unsigned long nr_segs, size_t count); | ||||
|  |  | |||
|  | @ -82,6 +82,84 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, | |||
| } | ||||
| EXPORT_SYMBOL(copy_page_to_iter); | ||||
| 
 | ||||
| size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, | ||||
| 			 struct iov_iter *i) | ||||
| { | ||||
| 	size_t skip, copy, left, wanted; | ||||
| 	const struct iovec *iov; | ||||
| 	char __user *buf; | ||||
| 	void *kaddr, *to; | ||||
| 
 | ||||
| 	if (unlikely(bytes > i->count)) | ||||
| 		bytes = i->count; | ||||
| 
 | ||||
| 	if (unlikely(!bytes)) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	wanted = bytes; | ||||
| 	iov = i->iov; | ||||
| 	skip = i->iov_offset; | ||||
| 	buf = iov->iov_base + skip; | ||||
| 	copy = min(bytes, iov->iov_len - skip); | ||||
| 
 | ||||
| 	if (!fault_in_pages_readable(buf, copy)) { | ||||
| 		kaddr = kmap_atomic(page); | ||||
| 		to = kaddr + offset; | ||||
| 
 | ||||
| 		/* first chunk, usually the only one */ | ||||
| 		left = __copy_from_user_inatomic(to, buf, copy); | ||||
| 		copy -= left; | ||||
| 		skip += copy; | ||||
| 		to += copy; | ||||
| 		bytes -= copy; | ||||
| 
 | ||||
| 		while (unlikely(!left && bytes)) { | ||||
| 			iov++; | ||||
| 			buf = iov->iov_base; | ||||
| 			copy = min(bytes, iov->iov_len); | ||||
| 			left = __copy_from_user_inatomic(to, buf, copy); | ||||
| 			copy -= left; | ||||
| 			skip = copy; | ||||
| 			to += copy; | ||||
| 			bytes -= copy; | ||||
| 		} | ||||
| 		if (likely(!bytes)) { | ||||
| 			kunmap_atomic(kaddr); | ||||
| 			goto done; | ||||
| 		} | ||||
| 		offset = to - kaddr; | ||||
| 		buf += copy; | ||||
| 		kunmap_atomic(kaddr); | ||||
| 		copy = min(bytes, iov->iov_len - skip); | ||||
| 	} | ||||
| 	/* Too bad - revert to non-atomic kmap */ | ||||
| 	kaddr = kmap(page); | ||||
| 	to = kaddr + offset; | ||||
| 	left = __copy_from_user(to, buf, copy); | ||||
| 	copy -= left; | ||||
| 	skip += copy; | ||||
| 	to += copy; | ||||
| 	bytes -= copy; | ||||
| 	while (unlikely(!left && bytes)) { | ||||
| 		iov++; | ||||
| 		buf = iov->iov_base; | ||||
| 		copy = min(bytes, iov->iov_len); | ||||
| 		left = __copy_from_user(to, buf, copy); | ||||
| 		copy -= left; | ||||
| 		skip = copy; | ||||
| 		to += copy; | ||||
| 		bytes -= copy; | ||||
| 	} | ||||
| 	kunmap(page); | ||||
| done: | ||||
| 	i->count -= wanted - bytes; | ||||
| 	i->nr_segs -= iov - i->iov; | ||||
| 	i->iov = iov; | ||||
| 	i->iov_offset = skip; | ||||
| 	return wanted - bytes; | ||||
| } | ||||
| EXPORT_SYMBOL(copy_page_from_iter); | ||||
| 
 | ||||
| static size_t __iovec_copy_from_user_inatomic(char *vaddr, | ||||
| 			const struct iovec *iov, size_t base, size_t bytes) | ||||
| { | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Al Viro
						Al Viro