forked from mirrors/linux
		
	io_uring/memmap: optimise single folio regions
We don't need to vmap if memory is already physically contiguous. There are two important cases it covers: PAGE_SIZE regions and huge pages. Use io_check_coalesce_buffer() to get the number of contiguous folios. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/d5240af23064a824c29d14d2406f1ae764bf4505.1732886067.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									226ae1b4d1
								
							
						
					
					
						commit
						c4d0ac1c15
					
				
					 1 changed files with 22 additions and 7 deletions
				
			
		| 
						 | 
					@ -226,12 +226,31 @@ void io_free_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr)
 | 
				
			||||||
	memset(mr, 0, sizeof(*mr));
 | 
						memset(mr, 0, sizeof(*mr));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int io_region_init_ptr(struct io_mapped_region *mr)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct io_imu_folio_data ifd;
 | 
				
			||||||
 | 
						void *ptr;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (io_check_coalesce_buffer(mr->pages, mr->nr_pages, &ifd)) {
 | 
				
			||||||
 | 
							if (ifd.nr_folios == 1) {
 | 
				
			||||||
 | 
								mr->ptr = page_address(mr->pages[0]);
 | 
				
			||||||
 | 
								return 0;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						ptr = vmap(mr->pages, mr->nr_pages, VM_MAP, PAGE_KERNEL);
 | 
				
			||||||
 | 
						if (!ptr)
 | 
				
			||||||
 | 
							return -ENOMEM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mr->ptr = ptr;
 | 
				
			||||||
 | 
						mr->flags |= IO_REGION_F_VMAP;
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
 | 
					int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
 | 
				
			||||||
		     struct io_uring_region_desc *reg)
 | 
							     struct io_uring_region_desc *reg)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct page **pages;
 | 
						struct page **pages;
 | 
				
			||||||
	int nr_pages, ret;
 | 
						int nr_pages, ret;
 | 
				
			||||||
	void *vptr;
 | 
					 | 
				
			||||||
	u64 end;
 | 
						u64 end;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (WARN_ON_ONCE(mr->pages || mr->ptr || mr->nr_pages))
 | 
						if (WARN_ON_ONCE(mr->pages || mr->ptr || mr->nr_pages))
 | 
				
			||||||
| 
						 | 
					@ -267,13 +286,9 @@ int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
 | 
				
			||||||
	mr->pages = pages;
 | 
						mr->pages = pages;
 | 
				
			||||||
	mr->flags |= IO_REGION_F_USER_PROVIDED;
 | 
						mr->flags |= IO_REGION_F_USER_PROVIDED;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vptr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
 | 
						ret = io_region_init_ptr(mr);
 | 
				
			||||||
	if (!vptr) {
 | 
						if (ret)
 | 
				
			||||||
		ret = -ENOMEM;
 | 
					 | 
				
			||||||
		goto out_free;
 | 
							goto out_free;
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	mr->ptr = vptr;
 | 
					 | 
				
			||||||
	mr->flags |= IO_REGION_F_VMAP;
 | 
					 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
out_free:
 | 
					out_free:
 | 
				
			||||||
	io_free_region(ctx, mr);
 | 
						io_free_region(ctx, mr);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue