mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	aio: fix race in ring buffer page lookup introduced by page migration support
Prior to the introduction of page migration support in "fs/aio: Add support
to aio ring pages migration" / 36bc08cc01,
mapping of the ring buffer pages was done via get_user_pages() while
retaining mmap_sem held for write.  This avoided possible races with userland
racing an munmap() or mremap().  The page migration patch, however, switched
to using mm_populate() to prime the page mapping.  mm_populate() cannot be
called with mmap_sem held.
Instead of dropping the mmap_sem, revert to the old behaviour and simply
drop the use of mm_populate() since get_user_pages() will cause the pages to
get mapped anyways.  Thanks to Al Viro for spotting this issue.
Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
			
			
This commit is contained in:
		
							parent
							
								
									77d30b14d2
								
							
						
					
					
						commit
						d6c355c7da
					
				
					 1 changed files with 12 additions and 3 deletions
				
			
		
							
								
								
									
										15
									
								
								fs/aio.c
									
									
									
									
									
								
							
							
						
						
									
										15
									
								
								fs/aio.c
									
									
									
									
									
								
							| 
						 | 
					@ -307,16 +307,25 @@ static int aio_setup_ring(struct kioctx *ctx)
 | 
				
			||||||
		aio_free_ring(ctx);
 | 
							aio_free_ring(ctx);
 | 
				
			||||||
		return -EAGAIN;
 | 
							return -EAGAIN;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	up_write(&mm->mmap_sem);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	mm_populate(ctx->mmap_base, populate);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
 | 
						pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* We must do this while still holding mmap_sem for write, as we
 | 
				
			||||||
 | 
						 * need to be protected against userspace attempting to mremap()
 | 
				
			||||||
 | 
						 * or munmap() the ring buffer.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
	ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages,
 | 
						ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages,
 | 
				
			||||||
				       1, 0, ctx->ring_pages, NULL);
 | 
									       1, 0, ctx->ring_pages, NULL);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Dropping the reference here is safe as the page cache will hold
 | 
				
			||||||
 | 
						 * onto the pages for us.  It is also required so that page migration
 | 
				
			||||||
 | 
						 * can unmap the pages and get the right reference count.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
	for (i = 0; i < ctx->nr_pages; i++)
 | 
						for (i = 0; i < ctx->nr_pages; i++)
 | 
				
			||||||
		put_page(ctx->ring_pages[i]);
 | 
							put_page(ctx->ring_pages[i]);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						up_write(&mm->mmap_sem);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (unlikely(ctx->nr_pages != nr_pages)) {
 | 
						if (unlikely(ctx->nr_pages != nr_pages)) {
 | 
				
			||||||
		aio_free_ring(ctx);
 | 
							aio_free_ring(ctx);
 | 
				
			||||||
		return -EAGAIN;
 | 
							return -EAGAIN;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue