forked from mirrors/linux
		
	mm: remove unnecessary wrapper function do_mmap_pgoff()
The current split between do_mmap() and do_mmap_pgoff() was introduced in
commit 1fcfd8db7f ("mm, mpx: add "vm_flags_t vm_flags" arg to
do_mmap_pgoff()") to support MPX.
The wrapper function do_mmap_pgoff() always passed 0 as the value of the
vm_flags argument to do_mmap().  However, MPX support has subsequently
been removed from the kernel and there were no more direct callers of
do_mmap(); all calls were going via do_mmap_pgoff().
Simplify the code by removing do_mmap_pgoff() and changing all callers to
directly call do_mmap(), which now no longer takes a vm_flags argument.
Signed-off-by: Peter Collingbourne <pcc@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Link: http://lkml.kernel.org/r/20200727194109.1371462-1-pcc@google.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
			
			
This commit is contained in:
		
							parent
							
								
									d70cec8983
								
							
						
					
					
						commit
						45e55300f1
					
				
					 9 changed files with 21 additions and 31 deletions
				
			
		
							
								
								
									
										2
									
								
								fs/aio.c
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								fs/aio.c
									
									
									
									
									
								
							|  | @ -525,7 +525,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events) | |||
| 		return -EINTR; | ||||
| 	} | ||||
| 
 | ||||
| 	ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, | ||||
| 	ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size, | ||||
| 				 PROT_READ | PROT_WRITE, | ||||
| 				 MAP_SHARED, 0, &unused, NULL); | ||||
| 	mmap_write_unlock(mm); | ||||
|  |  | |||
|  | @ -140,7 +140,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 	 * already been checked by prepare_hugepage_range.  If you add | ||||
| 	 * any error returns here, do so after setting VM_HUGETLB, so | ||||
| 	 * is_vm_hugetlb_page tests below unmap_region go the right | ||||
| 	 * way when do_mmap_pgoff unwinds (may be important on powerpc | ||||
| 	 * way when do_mmap unwinds (may be important on powerpc | ||||
| 	 * and ia64). | ||||
| 	 */ | ||||
| 	vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; | ||||
|  |  | |||
|  | @ -528,7 +528,7 @@ static inline int mapping_mapped(struct address_space *mapping) | |||
| 
 | ||||
| /*
 | ||||
|  * Might pages of this file have been modified in userspace? | ||||
|  * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff | ||||
|  * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap | ||||
|  * marks vma as VM_SHARED if it is shared, and the file was opened for | ||||
|  * writing i.e. vma may be mprotected writable even if now readonly. | ||||
|  * | ||||
|  |  | |||
|  | @ -2546,23 +2546,13 @@ extern unsigned long mmap_region(struct file *file, unsigned long addr, | |||
| 	struct list_head *uf); | ||||
| extern unsigned long do_mmap(struct file *file, unsigned long addr, | ||||
| 	unsigned long len, unsigned long prot, unsigned long flags, | ||||
| 	vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, | ||||
| 	struct list_head *uf); | ||||
| 	unsigned long pgoff, unsigned long *populate, struct list_head *uf); | ||||
| extern int __do_munmap(struct mm_struct *, unsigned long, size_t, | ||||
| 		       struct list_head *uf, bool downgrade); | ||||
| extern int do_munmap(struct mm_struct *, unsigned long, size_t, | ||||
| 		     struct list_head *uf); | ||||
| extern int do_madvise(unsigned long start, size_t len_in, int behavior); | ||||
| 
 | ||||
| static inline unsigned long | ||||
| do_mmap_pgoff(struct file *file, unsigned long addr, | ||||
| 	unsigned long len, unsigned long prot, unsigned long flags, | ||||
| 	unsigned long pgoff, unsigned long *populate, | ||||
| 	struct list_head *uf) | ||||
| { | ||||
| 	return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf); | ||||
| } | ||||
| 
 | ||||
| #ifdef CONFIG_MMU | ||||
| extern int __mm_populate(unsigned long addr, unsigned long len, | ||||
| 			 int ignore_errors); | ||||
|  |  | |||
|  | @ -1558,7 +1558,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, | |||
| 			goto invalid; | ||||
| 	} | ||||
| 
 | ||||
| 	addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate, NULL); | ||||
| 	addr = do_mmap(file, addr, size, prot, flags, 0, &populate, NULL); | ||||
| 	*raddr = addr; | ||||
| 	err = 0; | ||||
| 	if (IS_ERR_VALUE(addr)) | ||||
|  |  | |||
							
								
								
									
										16
									
								
								mm/mmap.c
									
									
									
									
									
								
							
							
						
						
									
										16
									
								
								mm/mmap.c
									
									
									
									
									
								
							|  | @ -1030,7 +1030,7 @@ static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1, | |||
|  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. | ||||
|  * | ||||
|  * We don't check here for the merged mmap wrapping around the end of pagecache | ||||
|  * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which | ||||
|  * indices (16TB on ia32) because do_mmap() does not permit mmap's which | ||||
|  * wrap, nor mmaps which cover the final page at index -1UL. | ||||
|  */ | ||||
| static int | ||||
|  | @ -1365,11 +1365,11 @@ static inline bool file_mmap_ok(struct file *file, struct inode *inode, | |||
|  */ | ||||
| unsigned long do_mmap(struct file *file, unsigned long addr, | ||||
| 			unsigned long len, unsigned long prot, | ||||
| 			unsigned long flags, vm_flags_t vm_flags, | ||||
| 			unsigned long pgoff, unsigned long *populate, | ||||
| 			struct list_head *uf) | ||||
| 			unsigned long flags, unsigned long pgoff, | ||||
| 			unsigned long *populate, struct list_head *uf) | ||||
| { | ||||
| 	struct mm_struct *mm = current->mm; | ||||
| 	vm_flags_t vm_flags; | ||||
| 	int pkey = 0; | ||||
| 
 | ||||
| 	*populate = 0; | ||||
|  | @ -1431,7 +1431,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, | |||
| 	 * to. we assume access permissions have been handled by the open | ||||
| 	 * of the memory object, so we don't do any here. | ||||
| 	 */ | ||||
| 	vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | | ||||
| 	vm_flags = calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | | ||||
| 			mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; | ||||
| 
 | ||||
| 	if (flags & MAP_LOCKED) | ||||
|  | @ -2230,7 +2230,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, | |||
| 		/*
 | ||||
| 		 * mmap_region() will call shmem_zero_setup() to create a file, | ||||
| 		 * so use shmem's get_unmapped_area in case it can be huge. | ||||
| 		 * do_mmap_pgoff() will clear pgoff, so match alignment. | ||||
| 		 * do_mmap() will clear pgoff, so match alignment. | ||||
| 		 */ | ||||
| 		pgoff = 0; | ||||
| 		get_area = shmem_get_unmapped_area; | ||||
|  | @ -3003,7 +3003,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, | |||
| 	} | ||||
| 
 | ||||
| 	file = get_file(vma->vm_file); | ||||
| 	ret = do_mmap_pgoff(vma->vm_file, start, size, | ||||
| 	ret = do_mmap(vma->vm_file, start, size, | ||||
| 			prot, flags, pgoff, &populate, NULL); | ||||
| 	fput(file); | ||||
| out: | ||||
|  | @ -3223,7 +3223,7 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) | |||
| 	 * By setting it to reflect the virtual start address of the | ||||
| 	 * vma, merges and splits can happen in a seamless way, just | ||||
| 	 * using the existing file pgoff checks and manipulations. | ||||
| 	 * Similarly in do_mmap_pgoff and in do_brk. | ||||
| 	 * Similarly in do_mmap and in do_brk. | ||||
| 	 */ | ||||
| 	if (vma_is_anonymous(vma)) { | ||||
| 		BUG_ON(vma->anon_vma); | ||||
|  |  | |||
|  | @ -1078,7 +1078,6 @@ unsigned long do_mmap(struct file *file, | |||
| 			unsigned long len, | ||||
| 			unsigned long prot, | ||||
| 			unsigned long flags, | ||||
| 			vm_flags_t vm_flags, | ||||
| 			unsigned long pgoff, | ||||
| 			unsigned long *populate, | ||||
| 			struct list_head *uf) | ||||
|  | @ -1086,6 +1085,7 @@ unsigned long do_mmap(struct file *file, | |||
| 	struct vm_area_struct *vma; | ||||
| 	struct vm_region *region; | ||||
| 	struct rb_node *rb; | ||||
| 	vm_flags_t vm_flags; | ||||
| 	unsigned long capabilities, result; | ||||
| 	int ret; | ||||
| 
 | ||||
|  | @ -1104,7 +1104,7 @@ unsigned long do_mmap(struct file *file, | |||
| 
 | ||||
| 	/* we've determined that we can make the mapping, now translate what we
 | ||||
| 	 * now know into VMA flags */ | ||||
| 	vm_flags |= determine_vm_flags(file, prot, flags, capabilities); | ||||
| 	vm_flags = determine_vm_flags(file, prot, flags, capabilities); | ||||
| 
 | ||||
| 	/* we're going to need to record the mapping */ | ||||
| 	region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); | ||||
|  | @ -1763,7 +1763,7 @@ EXPORT_SYMBOL_GPL(access_process_vm); | |||
|  * | ||||
|  * Check the shared mappings on an inode on behalf of a shrinking truncate to | ||||
|  * make sure that that any outstanding VMAs aren't broken and then shrink the | ||||
|  * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't | ||||
|  * vm_regions that extend that beyond so that do_mmap() doesn't | ||||
|  * automatically grant mappings that are too large. | ||||
|  */ | ||||
| int nommu_shrink_inode_mappings(struct inode *inode, size_t size, | ||||
|  |  | |||
|  | @ -4245,7 +4245,7 @@ EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt); | |||
| 
 | ||||
| /**
 | ||||
|  * shmem_zero_setup - setup a shared anonymous mapping | ||||
|  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff | ||||
|  * @vma: the vma to be mmapped is prepared by do_mmap | ||||
|  */ | ||||
| int shmem_zero_setup(struct vm_area_struct *vma) | ||||
| { | ||||
|  |  | |||
|  | @ -503,8 +503,8 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, | |||
| 	if (!ret) { | ||||
| 		if (mmap_write_lock_killable(mm)) | ||||
| 			return -EINTR; | ||||
| 		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, | ||||
| 				    &populate, &uf); | ||||
| 		ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate, | ||||
| 			      &uf); | ||||
| 		mmap_write_unlock(mm); | ||||
| 		userfaultfd_unmap_complete(mm, &uf); | ||||
| 		if (populate) | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Peter Collingbourne
						Peter Collingbourne