mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm: drop mmap_sem before calling balance_dirty_pages() in write fault
One of our services is observing hanging ps/top/etc under heavy write
IO, and the task states show this is an mmap_sem priority inversion:
A write fault is holding the mmap_sem in read-mode and waiting for
(heavily cgroup-limited) IO in balance_dirty_pages():
    balance_dirty_pages+0x724/0x905
    balance_dirty_pages_ratelimited+0x254/0x390
    fault_dirty_shared_page.isra.96+0x4a/0x90
    do_wp_page+0x33e/0x400
    __handle_mm_fault+0x6f0/0xfa0
    handle_mm_fault+0xe4/0x200
    __do_page_fault+0x22b/0x4a0
    page_fault+0x45/0x50
Somebody tries to change the address space, contending for the mmap_sem in
write-mode:
    call_rwsem_down_write_failed_killable+0x13/0x20
    do_mprotect_pkey+0xa8/0x330
    SyS_mprotect+0xf/0x20
    do_syscall_64+0x5b/0x100
    entry_SYSCALL_64_after_hwframe+0x3d/0xa2
The waiting writer locks out all subsequent readers to avoid lock
starvation, and several threads can be seen hanging like this:
    call_rwsem_down_read_failed+0x14/0x30
    proc_pid_cmdline_read+0xa0/0x480
    __vfs_read+0x23/0x140
    vfs_read+0x87/0x130
    SyS_read+0x42/0x90
    do_syscall_64+0x5b/0x100
    entry_SYSCALL_64_after_hwframe+0x3d/0xa2
To fix this, do what we do for cache read faults already: drop the
mmap_sem before calling into anything IO bound, in this case the
balance_dirty_pages() function, and return VM_FAULT_RETRY.
Link: http://lkml.kernel.org/r/20190924194238.GA29030@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: Hillf Danton <hdanton@sina.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
			
			
This commit is contained in:
		
							parent
							
								
									1603c8d1b1
								
							
						
					
					
						commit
						89b15332af
					
				
					 3 changed files with 50 additions and 34 deletions
				
			
		
							
								
								
									
										21
									
								
								mm/filemap.c
									
									
									
									
									
								
							
							
						
						
									
										21
									
								
								mm/filemap.c
									
									
									
									
									
								
							| 
						 | 
					@ -2329,27 +2329,6 @@ EXPORT_SYMBOL(generic_file_read_iter);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_MMU
 | 
					#ifdef CONFIG_MMU
 | 
				
			||||||
#define MMAP_LOTSAMISS  (100)
 | 
					#define MMAP_LOTSAMISS  (100)
 | 
				
			||||||
static struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
 | 
					 | 
				
			||||||
					     struct file *fpin)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	int flags = vmf->flags;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (fpin)
 | 
					 | 
				
			||||||
		return fpin;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
 | 
					 | 
				
			||||||
	 * anything, so we only pin the file and drop the mmap_sem if only
 | 
					 | 
				
			||||||
	 * FAULT_FLAG_ALLOW_RETRY is set.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) ==
 | 
					 | 
				
			||||||
	    FAULT_FLAG_ALLOW_RETRY) {
 | 
					 | 
				
			||||||
		fpin = get_file(vmf->vma->vm_file);
 | 
					 | 
				
			||||||
		up_read(&vmf->vma->vm_mm->mmap_sem);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return fpin;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_sem
 | 
					 * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_sem
 | 
				
			||||||
 * @vmf - the vm_fault for this fault.
 | 
					 * @vmf - the vm_fault for this fault.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -362,6 +362,27 @@ vma_address(struct page *page, struct vm_area_struct *vma)
 | 
				
			||||||
	return max(start, vma->vm_start);
 | 
						return max(start, vma->vm_start);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
 | 
				
			||||||
 | 
											    struct file *fpin)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int flags = vmf->flags;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (fpin)
 | 
				
			||||||
 | 
							return fpin;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
 | 
				
			||||||
 | 
						 * anything, so we only pin the file and drop the mmap_sem if only
 | 
				
			||||||
 | 
						 * FAULT_FLAG_ALLOW_RETRY is set.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) ==
 | 
				
			||||||
 | 
						    FAULT_FLAG_ALLOW_RETRY) {
 | 
				
			||||||
 | 
							fpin = get_file(vmf->vma->vm_file);
 | 
				
			||||||
 | 
							up_read(&vmf->vma->vm_mm->mmap_sem);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return fpin;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#else /* !CONFIG_MMU */
 | 
					#else /* !CONFIG_MMU */
 | 
				
			||||||
static inline void clear_page_mlock(struct page *page) { }
 | 
					static inline void clear_page_mlock(struct page *page) { }
 | 
				
			||||||
static inline void mlock_vma_page(struct page *page) { }
 | 
					static inline void mlock_vma_page(struct page *page) { }
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										42
									
								
								mm/memory.c
									
									
									
									
									
								
							
							
						
						
									
										42
									
								
								mm/memory.c
									
									
									
									
									
								
							| 
						 | 
					@ -2289,10 +2289,11 @@ static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * The function expects the page to be locked and unlocks it.
 | 
					 * The function expects the page to be locked and unlocks it.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void fault_dirty_shared_page(struct vm_area_struct *vma,
 | 
					static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
 | 
				
			||||||
				    struct page *page)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						struct vm_area_struct *vma = vmf->vma;
 | 
				
			||||||
	struct address_space *mapping;
 | 
						struct address_space *mapping;
 | 
				
			||||||
 | 
						struct page *page = vmf->page;
 | 
				
			||||||
	bool dirtied;
 | 
						bool dirtied;
 | 
				
			||||||
	bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
 | 
						bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2307,16 +2308,30 @@ static void fault_dirty_shared_page(struct vm_area_struct *vma,
 | 
				
			||||||
	mapping = page_rmapping(page);
 | 
						mapping = page_rmapping(page);
 | 
				
			||||||
	unlock_page(page);
 | 
						unlock_page(page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if ((dirtied || page_mkwrite) && mapping) {
 | 
					 | 
				
			||||||
		/*
 | 
					 | 
				
			||||||
		 * Some device drivers do not set page.mapping
 | 
					 | 
				
			||||||
		 * but still dirty their pages
 | 
					 | 
				
			||||||
		 */
 | 
					 | 
				
			||||||
		balance_dirty_pages_ratelimited(mapping);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!page_mkwrite)
 | 
						if (!page_mkwrite)
 | 
				
			||||||
		file_update_time(vma->vm_file);
 | 
							file_update_time(vma->vm_file);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Throttle page dirtying rate down to writeback speed.
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * mapping may be NULL here because some device drivers do not
 | 
				
			||||||
 | 
						 * set page.mapping but still dirty their pages
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * Drop the mmap_sem before waiting on IO, if we can. The file
 | 
				
			||||||
 | 
						 * is pinning the mapping, as per above.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if ((dirtied || page_mkwrite) && mapping) {
 | 
				
			||||||
 | 
							struct file *fpin;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							fpin = maybe_unlock_mmap_for_io(vmf, NULL);
 | 
				
			||||||
 | 
							balance_dirty_pages_ratelimited(mapping);
 | 
				
			||||||
 | 
							if (fpin) {
 | 
				
			||||||
 | 
								fput(fpin);
 | 
				
			||||||
 | 
								return VM_FAULT_RETRY;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -2571,6 +2586,7 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf)
 | 
				
			||||||
	__releases(vmf->ptl)
 | 
						__releases(vmf->ptl)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct vm_area_struct *vma = vmf->vma;
 | 
						struct vm_area_struct *vma = vmf->vma;
 | 
				
			||||||
 | 
						vm_fault_t ret = VM_FAULT_WRITE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	get_page(vmf->page);
 | 
						get_page(vmf->page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2594,10 +2610,10 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf)
 | 
				
			||||||
		wp_page_reuse(vmf);
 | 
							wp_page_reuse(vmf);
 | 
				
			||||||
		lock_page(vmf->page);
 | 
							lock_page(vmf->page);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	fault_dirty_shared_page(vma, vmf->page);
 | 
						ret |= fault_dirty_shared_page(vmf);
 | 
				
			||||||
	put_page(vmf->page);
 | 
						put_page(vmf->page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return VM_FAULT_WRITE;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -3641,7 +3657,7 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf)
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	fault_dirty_shared_page(vma, vmf->page);
 | 
						ret |= fault_dirty_shared_page(vmf);
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue