forked from mirrors/linux
		
	iov_iter: Handle compound highmem pages in copy_page_from_iter_atomic()
copy_page_from_iter_atomic() already handles !highmem compound pages correctly, but if we are passed a highmem compound page, each base page needs to be mapped & unmapped individually. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Kent Overstreet <kent.overstreet@linux.dev> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Tested-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
		
							parent
							
								
									f7f9a0c873
								
							
						
					
					
						commit
						908a1ad894
					
				
					 1 changed files with 23 additions and 10 deletions
				
			
		|  | @ -566,24 +566,37 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i) | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(iov_iter_zero); | EXPORT_SYMBOL(iov_iter_zero); | ||||||
| 
 | 
 | ||||||
| size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes, | size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, | ||||||
| 				  struct iov_iter *i) | 		size_t bytes, struct iov_iter *i) | ||||||
| { | { | ||||||
| 	char *p; | 	size_t n, copied = 0; | ||||||
| 
 | 
 | ||||||
| 	if (!page_copy_sane(page, offset, bytes)) | 	if (!page_copy_sane(page, offset, bytes)) | ||||||
| 		return 0; | 		return 0; | ||||||
| 	if (WARN_ON_ONCE(!i->data_source)) | 	if (WARN_ON_ONCE(!i->data_source)) | ||||||
| 		return 0; | 		return 0; | ||||||
| 
 | 
 | ||||||
| 	p = kmap_atomic(page) + offset; | 	do { | ||||||
| 	iterate_and_advance(i, bytes, base, len, off, | 		char *p; | ||||||
| 		copyin(p + off, base, len), |  | ||||||
| 		memcpy_from_iter(i, p + off, base, len) |  | ||||||
| 	) |  | ||||||
| 	kunmap_atomic(p); |  | ||||||
| 
 | 
 | ||||||
| 	return bytes; | 		n = bytes - copied; | ||||||
|  | 		if (PageHighMem(page)) { | ||||||
|  | 			page += offset / PAGE_SIZE; | ||||||
|  | 			offset %= PAGE_SIZE; | ||||||
|  | 			n = min_t(size_t, n, PAGE_SIZE - offset); | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		p = kmap_atomic(page) + offset; | ||||||
|  | 		iterate_and_advance(i, n, base, len, off, | ||||||
|  | 			copyin(p + off, base, len), | ||||||
|  | 			memcpy_from_iter(i, p + off, base, len) | ||||||
|  | 		) | ||||||
|  | 		kunmap_atomic(p); | ||||||
|  | 		copied += n; | ||||||
|  | 		offset += n; | ||||||
|  | 	} while (PageHighMem(page) && copied != bytes && n > 0); | ||||||
|  | 
 | ||||||
|  | 	return copied; | ||||||
| } | } | ||||||
| EXPORT_SYMBOL(copy_page_from_iter_atomic); | EXPORT_SYMBOL(copy_page_from_iter_atomic); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Matthew Wilcox (Oracle)
						Matthew Wilcox (Oracle)