mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 08:38:45 +02:00 
			
		
		
		
	io_uring/zcrx: prepare fallback for larger pages
io_zcrx_copy_chunk() processes one page at a time, which won't be sufficient when the net_iov size grows. Introduce a structure keeping the target niov page and other parameters, it's more convenient and can be reused later. And add a helper function that can efficient copy buffers of an arbitrary length. For 64bit archs the loop inside should be compiled out. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/e84bc705a4e1edeb9aefff470d96558d8232388f.1751466461.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									1b4dc1ff0a
								
							
						
					
					
						commit
						e67645bb7f
					
				
					 1 changed files with 56 additions and 27 deletions
				
			
		|  | @ -929,6 +929,51 @@ static struct net_iov *io_zcrx_alloc_fallback(struct io_zcrx_area *area) | ||||||
| 	return niov; | 	return niov; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | struct io_copy_cache { | ||||||
|  | 	struct page		*page; | ||||||
|  | 	unsigned long		offset; | ||||||
|  | 	size_t			size; | ||||||
|  | }; | ||||||
|  | 
 | ||||||
|  | static ssize_t io_copy_page(struct io_copy_cache *cc, struct page *src_page, | ||||||
|  | 			    unsigned int src_offset, size_t len) | ||||||
|  | { | ||||||
|  | 	size_t copied = 0; | ||||||
|  | 
 | ||||||
|  | 	len = min(len, cc->size); | ||||||
|  | 
 | ||||||
|  | 	while (len) { | ||||||
|  | 		void *src_addr, *dst_addr; | ||||||
|  | 		struct page *dst_page = cc->page; | ||||||
|  | 		unsigned dst_offset = cc->offset; | ||||||
|  | 		size_t n = len; | ||||||
|  | 
 | ||||||
|  | 		if (folio_test_partial_kmap(page_folio(dst_page)) || | ||||||
|  | 		    folio_test_partial_kmap(page_folio(src_page))) { | ||||||
|  | 			dst_page = nth_page(dst_page, dst_offset / PAGE_SIZE); | ||||||
|  | 			dst_offset = offset_in_page(dst_offset); | ||||||
|  | 			src_page = nth_page(src_page, src_offset / PAGE_SIZE); | ||||||
|  | 			src_offset = offset_in_page(src_offset); | ||||||
|  | 			n = min(PAGE_SIZE - src_offset, PAGE_SIZE - dst_offset); | ||||||
|  | 			n = min(n, len); | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		dst_addr = kmap_local_page(dst_page) + dst_offset; | ||||||
|  | 		src_addr = kmap_local_page(src_page) + src_offset; | ||||||
|  | 
 | ||||||
|  | 		memcpy(dst_addr, src_addr, n); | ||||||
|  | 
 | ||||||
|  | 		kunmap_local(src_addr); | ||||||
|  | 		kunmap_local(dst_addr); | ||||||
|  | 
 | ||||||
|  | 		cc->size -= n; | ||||||
|  | 		cc->offset += n; | ||||||
|  | 		len -= n; | ||||||
|  | 		copied += n; | ||||||
|  | 	} | ||||||
|  | 	return copied; | ||||||
|  | } | ||||||
|  | 
 | ||||||
| static ssize_t io_zcrx_copy_chunk(struct io_kiocb *req, struct io_zcrx_ifq *ifq, | static ssize_t io_zcrx_copy_chunk(struct io_kiocb *req, struct io_zcrx_ifq *ifq, | ||||||
| 				  struct page *src_page, unsigned int src_offset, | 				  struct page *src_page, unsigned int src_offset, | ||||||
| 				  size_t len) | 				  size_t len) | ||||||
|  | @ -941,11 +986,9 @@ static ssize_t io_zcrx_copy_chunk(struct io_kiocb *req, struct io_zcrx_ifq *ifq, | ||||||
| 		return -EFAULT; | 		return -EFAULT; | ||||||
| 
 | 
 | ||||||
| 	while (len) { | 	while (len) { | ||||||
| 		size_t copy_size = min_t(size_t, PAGE_SIZE, len); | 		struct io_copy_cache cc; | ||||||
| 		const int dst_off = 0; |  | ||||||
| 		struct net_iov *niov; | 		struct net_iov *niov; | ||||||
| 		struct page *dst_page; | 		size_t n; | ||||||
| 		void *dst_addr, *src_addr; |  | ||||||
| 
 | 
 | ||||||
| 		niov = io_zcrx_alloc_fallback(area); | 		niov = io_zcrx_alloc_fallback(area); | ||||||
| 		if (!niov) { | 		if (!niov) { | ||||||
|  | @ -953,25 +996,22 @@ static ssize_t io_zcrx_copy_chunk(struct io_kiocb *req, struct io_zcrx_ifq *ifq, | ||||||
| 			break; | 			break; | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		dst_page = io_zcrx_iov_page(niov); | 		cc.page = io_zcrx_iov_page(niov); | ||||||
| 		dst_addr = kmap_local_page(dst_page); | 		cc.offset = 0; | ||||||
| 		src_addr = kmap_local_page(src_page); | 		cc.size = PAGE_SIZE; | ||||||
| 
 | 
 | ||||||
| 		memcpy(dst_addr, src_addr + src_offset, copy_size); | 		n = io_copy_page(&cc, src_page, src_offset, len); | ||||||
| 
 | 
 | ||||||
| 		kunmap_local(src_addr); | 		if (!io_zcrx_queue_cqe(req, niov, ifq, 0, n)) { | ||||||
| 		kunmap_local(dst_addr); |  | ||||||
| 
 |  | ||||||
| 		if (!io_zcrx_queue_cqe(req, niov, ifq, dst_off, copy_size)) { |  | ||||||
| 			io_zcrx_return_niov(niov); | 			io_zcrx_return_niov(niov); | ||||||
| 			ret = -ENOSPC; | 			ret = -ENOSPC; | ||||||
| 			break; | 			break; | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		io_zcrx_get_niov_uref(niov); | 		io_zcrx_get_niov_uref(niov); | ||||||
| 		src_offset += copy_size; | 		src_offset += n; | ||||||
| 		len -= copy_size; | 		len -= n; | ||||||
| 		copied += copy_size; | 		copied += n; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return copied ? copied : ret; | 	return copied ? copied : ret; | ||||||
|  | @ -981,19 +1021,8 @@ static int io_zcrx_copy_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq, | ||||||
| 			     const skb_frag_t *frag, int off, int len) | 			     const skb_frag_t *frag, int off, int len) | ||||||
| { | { | ||||||
| 	struct page *page = skb_frag_page(frag); | 	struct page *page = skb_frag_page(frag); | ||||||
| 	u32 p_off, p_len, t, copied = 0; |  | ||||||
| 	int ret = 0; |  | ||||||
| 
 | 
 | ||||||
| 	off += skb_frag_off(frag); | 	return io_zcrx_copy_chunk(req, ifq, page, off + skb_frag_off(frag), len); | ||||||
| 
 |  | ||||||
| 	skb_frag_foreach_page(frag, off, len, |  | ||||||
| 			      page, p_off, p_len, t) { |  | ||||||
| 		ret = io_zcrx_copy_chunk(req, ifq, page, p_off, p_len); |  | ||||||
| 		if (ret < 0) |  | ||||||
| 			return copied ? copied : ret; |  | ||||||
| 		copied += ret; |  | ||||||
| 	} |  | ||||||
| 	return copied; |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static int io_zcrx_recv_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq, | static int io_zcrx_recv_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq, | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Pavel Begunkov
						Pavel Begunkov