forked from mirrors/linux
		
	userfaultfd/shmem: support UFFDIO_CONTINUE for shmem
With this change, userspace can resolve a minor fault within a shmem-backed area with a UFFDIO_CONTINUE ioctl. The semantics for this match those for hugetlbfs - we look up the existing page in the page cache, and install a PTE for it. This commit introduces a new helper: mfill_atomic_install_pte. Why handle UFFDIO_CONTINUE for shmem in mm/userfaultfd.c, instead of in shmem.c? The existing userfault implementation only relies on shmem.c for VM_SHARED VMAs. However, minor fault handling / CONTINUE work just fine for !VM_SHARED VMAs as well. We'd prefer to handle CONTINUE for shmem in one place, regardless of shared/private (to reduce code duplication). Why add a new mfill_atomic_install_pte helper? A problem we have with continue is that shmem_mfill_atomic_pte() and mcopy_atomic_pte() are *close* to what we want, but not exactly. We do want to setup the PTEs in a CONTINUE operation, but we don't want to e.g. allocate a new page, charge it (e.g. to the shmem inode), manipulate various flags, etc. Also we have the problem stated above: shmem_mfill_atomic_pte() and mcopy_atomic_pte() both handle one-half of the problem (shared / private) continue cares about. So, introduce mcontinue_atomic_pte(), to handle all of the shmem continue cases. Introduce the helper so it doesn't duplicate code with mcopy_atomic_pte(). In a future commit, shmem_mfill_atomic_pte() will also be modified to use this new helper. However, since this is a bigger refactor, it seems most clear to do it as a separate change. Link: https://lkml.kernel.org/r/20210503180737.2487560-5-axelrasmussen@google.com Signed-off-by: Axel Rasmussen <axelrasmussen@google.com> Acked-by: Hugh Dickins <hughd@google.com> Acked-by: Peter Xu <peterx@redhat.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Brian Geffon <bgeffon@google.com> Cc: "Dr . David Alan Gilbert" <dgilbert@redhat.com> Cc: Jerome Glisse <jglisse@redhat.com> Cc: Joe Perches <joe@perches.com> Cc: Kirill A. Shutemov <kirill@shutemov.name> Cc: Lokesh Gidra <lokeshgidra@google.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Mina Almasry <almasrymina@google.com> Cc: Oliver Upton <oupton@google.com> Cc: Shaohua Li <shli@fb.com> Cc: Shuah Khan <shuah@kernel.org> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Wang Qing <wangqing@vivo.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									c949b097ef
								
							
						
					
					
						commit
						153132571f
					
				
					 1 changed files with 127 additions and 45 deletions
				
			
		
							
								
								
									
										172
									
								
								mm/userfaultfd.c
									
									
									
									
									
								
							
							
						
						
									
										172
									
								
								mm/userfaultfd.c
									
									
									
									
									
								
							| 
						 | 
					@ -48,6 +48,83 @@ struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
 | 
				
			||||||
	return dst_vma;
 | 
						return dst_vma;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Install PTEs, to map dst_addr (within dst_vma) to page.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * This function handles MCOPY_ATOMIC_CONTINUE (which is always file-backed),
 | 
				
			||||||
 | 
					 * whether or not dst_vma is VM_SHARED. It also handles the more general
 | 
				
			||||||
 | 
					 * MCOPY_ATOMIC_NORMAL case, when dst_vma is *not* VM_SHARED (it may be file
 | 
				
			||||||
 | 
					 * backed, or not).
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Note that MCOPY_ATOMIC_NORMAL for a VM_SHARED dst_vma is handled by
 | 
				
			||||||
 | 
					 * shmem_mcopy_atomic_pte instead.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
 | 
				
			||||||
 | 
									    struct vm_area_struct *dst_vma,
 | 
				
			||||||
 | 
									    unsigned long dst_addr, struct page *page,
 | 
				
			||||||
 | 
									    bool newly_allocated, bool wp_copy)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
						pte_t _dst_pte, *dst_pte;
 | 
				
			||||||
 | 
						bool writable = dst_vma->vm_flags & VM_WRITE;
 | 
				
			||||||
 | 
						bool vm_shared = dst_vma->vm_flags & VM_SHARED;
 | 
				
			||||||
 | 
						bool page_in_cache = page->mapping;
 | 
				
			||||||
 | 
						spinlock_t *ptl;
 | 
				
			||||||
 | 
						struct inode *inode;
 | 
				
			||||||
 | 
						pgoff_t offset, max_off;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
 | 
				
			||||||
 | 
						if (page_in_cache && !vm_shared)
 | 
				
			||||||
 | 
							writable = false;
 | 
				
			||||||
 | 
						if (writable || !page_in_cache)
 | 
				
			||||||
 | 
							_dst_pte = pte_mkdirty(_dst_pte);
 | 
				
			||||||
 | 
						if (writable) {
 | 
				
			||||||
 | 
							if (wp_copy)
 | 
				
			||||||
 | 
								_dst_pte = pte_mkuffd_wp(_dst_pte);
 | 
				
			||||||
 | 
							else
 | 
				
			||||||
 | 
								_dst_pte = pte_mkwrite(_dst_pte);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (vma_is_shmem(dst_vma)) {
 | 
				
			||||||
 | 
							/* serialize against truncate with the page table lock */
 | 
				
			||||||
 | 
							inode = dst_vma->vm_file->f_inode;
 | 
				
			||||||
 | 
							offset = linear_page_index(dst_vma, dst_addr);
 | 
				
			||||||
 | 
							max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
 | 
				
			||||||
 | 
							ret = -EFAULT;
 | 
				
			||||||
 | 
							if (unlikely(offset >= max_off))
 | 
				
			||||||
 | 
								goto out_unlock;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = -EEXIST;
 | 
				
			||||||
 | 
						if (!pte_none(*dst_pte))
 | 
				
			||||||
 | 
							goto out_unlock;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (page_in_cache)
 | 
				
			||||||
 | 
							page_add_file_rmap(page, false);
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Must happen after rmap, as mm_counter() checks mapping (via
 | 
				
			||||||
 | 
						 * PageAnon()), which is set by __page_set_anon_rmap().
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						inc_mm_counter(dst_mm, mm_counter(page));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (newly_allocated)
 | 
				
			||||||
 | 
							lru_cache_add_inactive_or_unevictable(page, dst_vma);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* No need to invalidate - it was non-present before */
 | 
				
			||||||
 | 
						update_mmu_cache(dst_vma, dst_addr, dst_pte);
 | 
				
			||||||
 | 
						ret = 0;
 | 
				
			||||||
 | 
					out_unlock:
 | 
				
			||||||
 | 
						pte_unmap_unlock(dst_pte, ptl);
 | 
				
			||||||
 | 
						return ret;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int mcopy_atomic_pte(struct mm_struct *dst_mm,
 | 
					static int mcopy_atomic_pte(struct mm_struct *dst_mm,
 | 
				
			||||||
			    pmd_t *dst_pmd,
 | 
								    pmd_t *dst_pmd,
 | 
				
			||||||
			    struct vm_area_struct *dst_vma,
 | 
								    struct vm_area_struct *dst_vma,
 | 
				
			||||||
| 
						 | 
					@ -56,13 +133,9 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
 | 
				
			||||||
			    struct page **pagep,
 | 
								    struct page **pagep,
 | 
				
			||||||
			    bool wp_copy)
 | 
								    bool wp_copy)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	pte_t _dst_pte, *dst_pte;
 | 
					 | 
				
			||||||
	spinlock_t *ptl;
 | 
					 | 
				
			||||||
	void *page_kaddr;
 | 
						void *page_kaddr;
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
	struct page *page;
 | 
						struct page *page;
 | 
				
			||||||
	pgoff_t offset, max_off;
 | 
					 | 
				
			||||||
	struct inode *inode;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!*pagep) {
 | 
						if (!*pagep) {
 | 
				
			||||||
		ret = -ENOMEM;
 | 
							ret = -ENOMEM;
 | 
				
			||||||
| 
						 | 
					@ -99,43 +172,12 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
 | 
				
			||||||
	if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL))
 | 
						if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL))
 | 
				
			||||||
		goto out_release;
 | 
							goto out_release;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	_dst_pte = pte_mkdirty(mk_pte(page, dst_vma->vm_page_prot));
 | 
						ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
 | 
				
			||||||
	if (dst_vma->vm_flags & VM_WRITE) {
 | 
									       page, true, wp_copy);
 | 
				
			||||||
		if (wp_copy)
 | 
						if (ret)
 | 
				
			||||||
			_dst_pte = pte_mkuffd_wp(_dst_pte);
 | 
							goto out_release;
 | 
				
			||||||
		else
 | 
					 | 
				
			||||||
			_dst_pte = pte_mkwrite(_dst_pte);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
 | 
					 | 
				
			||||||
	if (dst_vma->vm_file) {
 | 
					 | 
				
			||||||
		/* the shmem MAP_PRIVATE case requires checking the i_size */
 | 
					 | 
				
			||||||
		inode = dst_vma->vm_file->f_inode;
 | 
					 | 
				
			||||||
		offset = linear_page_index(dst_vma, dst_addr);
 | 
					 | 
				
			||||||
		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
 | 
					 | 
				
			||||||
		ret = -EFAULT;
 | 
					 | 
				
			||||||
		if (unlikely(offset >= max_off))
 | 
					 | 
				
			||||||
			goto out_release_uncharge_unlock;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	ret = -EEXIST;
 | 
					 | 
				
			||||||
	if (!pte_none(*dst_pte))
 | 
					 | 
				
			||||||
		goto out_release_uncharge_unlock;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	inc_mm_counter(dst_mm, MM_ANONPAGES);
 | 
					 | 
				
			||||||
	page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
 | 
					 | 
				
			||||||
	lru_cache_add_inactive_or_unevictable(page, dst_vma);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* No need to invalidate - it was non-present before */
 | 
					 | 
				
			||||||
	update_mmu_cache(dst_vma, dst_addr, dst_pte);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	pte_unmap_unlock(dst_pte, ptl);
 | 
					 | 
				
			||||||
	ret = 0;
 | 
					 | 
				
			||||||
out:
 | 
					out:
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
out_release_uncharge_unlock:
 | 
					 | 
				
			||||||
	pte_unmap_unlock(dst_pte, ptl);
 | 
					 | 
				
			||||||
out_release:
 | 
					out_release:
 | 
				
			||||||
	put_page(page);
 | 
						put_page(page);
 | 
				
			||||||
	goto out;
 | 
						goto out;
 | 
				
			||||||
| 
						 | 
					@ -176,6 +218,41 @@ static int mfill_zeropage_pte(struct mm_struct *dst_mm,
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
 | 
				
			||||||
 | 
					static int mcontinue_atomic_pte(struct mm_struct *dst_mm,
 | 
				
			||||||
 | 
									pmd_t *dst_pmd,
 | 
				
			||||||
 | 
									struct vm_area_struct *dst_vma,
 | 
				
			||||||
 | 
									unsigned long dst_addr,
 | 
				
			||||||
 | 
									bool wp_copy)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct inode *inode = file_inode(dst_vma->vm_file);
 | 
				
			||||||
 | 
						pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
 | 
				
			||||||
 | 
						struct page *page;
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = shmem_getpage(inode, pgoff, &page, SGP_READ);
 | 
				
			||||||
 | 
						if (ret)
 | 
				
			||||||
 | 
							goto out;
 | 
				
			||||||
 | 
						if (!page) {
 | 
				
			||||||
 | 
							ret = -EFAULT;
 | 
				
			||||||
 | 
							goto out;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
 | 
				
			||||||
 | 
									       page, false, wp_copy);
 | 
				
			||||||
 | 
						if (ret)
 | 
				
			||||||
 | 
							goto out_release;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						unlock_page(page);
 | 
				
			||||||
 | 
						ret = 0;
 | 
				
			||||||
 | 
					out:
 | 
				
			||||||
 | 
						return ret;
 | 
				
			||||||
 | 
					out_release:
 | 
				
			||||||
 | 
						unlock_page(page);
 | 
				
			||||||
 | 
						put_page(page);
 | 
				
			||||||
 | 
						goto out;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
 | 
					static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	pgd_t *pgd;
 | 
						pgd_t *pgd;
 | 
				
			||||||
| 
						 | 
					@ -367,11 +444,16 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
 | 
				
			||||||
						unsigned long dst_addr,
 | 
											unsigned long dst_addr,
 | 
				
			||||||
						unsigned long src_addr,
 | 
											unsigned long src_addr,
 | 
				
			||||||
						struct page **page,
 | 
											struct page **page,
 | 
				
			||||||
						bool zeropage,
 | 
											enum mcopy_atomic_mode mode,
 | 
				
			||||||
						bool wp_copy)
 | 
											bool wp_copy)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	ssize_t err;
 | 
						ssize_t err;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (mode == MCOPY_ATOMIC_CONTINUE) {
 | 
				
			||||||
 | 
							return mcontinue_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
 | 
				
			||||||
 | 
										    wp_copy);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * The normal page fault path for a shmem will invoke the
 | 
						 * The normal page fault path for a shmem will invoke the
 | 
				
			||||||
	 * fault, fill the hole in the file and COW it right away. The
 | 
						 * fault, fill the hole in the file and COW it right away. The
 | 
				
			||||||
| 
						 | 
					@ -383,7 +465,7 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
 | 
				
			||||||
	 * and not in the radix tree.
 | 
						 * and not in the radix tree.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (!(dst_vma->vm_flags & VM_SHARED)) {
 | 
						if (!(dst_vma->vm_flags & VM_SHARED)) {
 | 
				
			||||||
		if (!zeropage)
 | 
							if (mode == MCOPY_ATOMIC_NORMAL)
 | 
				
			||||||
			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
 | 
								err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
 | 
				
			||||||
					       dst_addr, src_addr, page,
 | 
										       dst_addr, src_addr, page,
 | 
				
			||||||
					       wp_copy);
 | 
										       wp_copy);
 | 
				
			||||||
| 
						 | 
					@ -393,7 +475,8 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		VM_WARN_ON_ONCE(wp_copy);
 | 
							VM_WARN_ON_ONCE(wp_copy);
 | 
				
			||||||
		err = shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
 | 
							err = shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
 | 
				
			||||||
					     dst_addr, src_addr, zeropage,
 | 
										     dst_addr, src_addr,
 | 
				
			||||||
 | 
										     mode != MCOPY_ATOMIC_NORMAL,
 | 
				
			||||||
					     page);
 | 
										     page);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -415,7 +498,6 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
 | 
				
			||||||
	long copied;
 | 
						long copied;
 | 
				
			||||||
	struct page *page;
 | 
						struct page *page;
 | 
				
			||||||
	bool wp_copy;
 | 
						bool wp_copy;
 | 
				
			||||||
	bool zeropage = (mcopy_mode == MCOPY_ATOMIC_ZEROPAGE);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Sanitize the command parameters:
 | 
						 * Sanitize the command parameters:
 | 
				
			||||||
| 
						 | 
					@ -478,7 +560,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
 | 
						if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
 | 
				
			||||||
		goto out_unlock;
 | 
							goto out_unlock;
 | 
				
			||||||
	if (mcopy_mode == MCOPY_ATOMIC_CONTINUE)
 | 
						if (!vma_is_shmem(dst_vma) && mcopy_mode == MCOPY_ATOMIC_CONTINUE)
 | 
				
			||||||
		goto out_unlock;
 | 
							goto out_unlock;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					@ -526,7 +608,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
 | 
				
			||||||
		BUG_ON(pmd_trans_huge(*dst_pmd));
 | 
							BUG_ON(pmd_trans_huge(*dst_pmd));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
 | 
							err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
 | 
				
			||||||
				       src_addr, &page, zeropage, wp_copy);
 | 
									       src_addr, &page, mcopy_mode, wp_copy);
 | 
				
			||||||
		cond_resched();
 | 
							cond_resched();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (unlikely(err == -ENOENT)) {
 | 
							if (unlikely(err == -ENOENT)) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue