mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm/mmap: prevent pagefault handler from racing with mmu_notifier registration
Page fault handlers might need to fire MMU notifications while a new notifier is being registered. Modify mm_take_all_locks to write-lock all VMAs and prevent this race with page fault handlers that would hold VMA locks. VMAs are locked before i_mmap_rwsem and anon_vma to keep the same locking order as in page fault handlers. Link: https://lkml.kernel.org/r/20230227173632.3292573-22-surenb@google.com Signed-off-by: Suren Baghdasaryan <surenb@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									f2e13784c1
								
							
						
					
					
						commit
						eeff9a5d47
					
				
					 1 changed files with 9 additions and 0 deletions
				
			
		| 
						 | 
					@ -3494,6 +3494,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
 | 
				
			||||||
 * of mm/rmap.c:
 | 
					 * of mm/rmap.c:
 | 
				
			||||||
 *   - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
 | 
					 *   - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
 | 
				
			||||||
 *     hugetlb mapping);
 | 
					 *     hugetlb mapping);
 | 
				
			||||||
 | 
					 *   - all vmas marked locked
 | 
				
			||||||
 *   - all i_mmap_rwsem locks;
 | 
					 *   - all i_mmap_rwsem locks;
 | 
				
			||||||
 *   - all anon_vma->rwseml
 | 
					 *   - all anon_vma->rwseml
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
| 
						 | 
					@ -3516,6 +3517,13 @@ int mm_take_all_locks(struct mm_struct *mm)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mutex_lock(&mm_all_locks_mutex);
 | 
						mutex_lock(&mm_all_locks_mutex);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mas_for_each(&mas, vma, ULONG_MAX) {
 | 
				
			||||||
 | 
							if (signal_pending(current))
 | 
				
			||||||
 | 
								goto out_unlock;
 | 
				
			||||||
 | 
							vma_start_write(vma);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mas_set(&mas, 0);
 | 
				
			||||||
	mas_for_each(&mas, vma, ULONG_MAX) {
 | 
						mas_for_each(&mas, vma, ULONG_MAX) {
 | 
				
			||||||
		if (signal_pending(current))
 | 
							if (signal_pending(current))
 | 
				
			||||||
			goto out_unlock;
 | 
								goto out_unlock;
 | 
				
			||||||
| 
						 | 
					@ -3605,6 +3613,7 @@ void mm_drop_all_locks(struct mm_struct *mm)
 | 
				
			||||||
		if (vma->vm_file && vma->vm_file->f_mapping)
 | 
							if (vma->vm_file && vma->vm_file->f_mapping)
 | 
				
			||||||
			vm_unlock_mapping(vma->vm_file->f_mapping);
 | 
								vm_unlock_mapping(vma->vm_file->f_mapping);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						vma_end_write_all(mm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mutex_unlock(&mm_all_locks_mutex);
 | 
						mutex_unlock(&mm_all_locks_mutex);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue