mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm: replace get_user_pages() write/force parameters with gup_flags
This removes the 'write' and 'force' from get_user_pages() and replaces them with 'gup_flags' to make the use of FOLL_FORCE explicit in callers as use of this flag can result in surprising behaviour (and hence bugs) within the mm subsystem. Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com> Acked-by: Christian König <christian.koenig@amd.com> Acked-by: Jesper Nilsson <jesper.nilsson@axis.com> Acked-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Jan Kara <jack@suse.cz> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									7f23b3504a
								
							
						
					
					
						commit
						768ae309a9
					
				
					 22 changed files with 49 additions and 54 deletions
				
			
		| 
						 | 
					@ -2722,7 +2722,6 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
 | 
				
			||||||
	err = get_user_pages((unsigned long int)(oper.indata + prev_ix),
 | 
						err = get_user_pages((unsigned long int)(oper.indata + prev_ix),
 | 
				
			||||||
			     noinpages,
 | 
								     noinpages,
 | 
				
			||||||
			     0,  /* read access only for in data */
 | 
								     0,  /* read access only for in data */
 | 
				
			||||||
			     0, /* no force */
 | 
					 | 
				
			||||||
			     inpages,
 | 
								     inpages,
 | 
				
			||||||
			     NULL);
 | 
								     NULL);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2736,8 +2735,7 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
 | 
				
			||||||
	if (oper.do_cipher){
 | 
						if (oper.do_cipher){
 | 
				
			||||||
		err = get_user_pages((unsigned long int)oper.cipher_outdata,
 | 
							err = get_user_pages((unsigned long int)oper.cipher_outdata,
 | 
				
			||||||
				     nooutpages,
 | 
									     nooutpages,
 | 
				
			||||||
				     1, /* write access for out data */
 | 
									     FOLL_WRITE, /* write access for out data */
 | 
				
			||||||
				     0, /* no force */
 | 
					 | 
				
			||||||
				     outpages,
 | 
									     outpages,
 | 
				
			||||||
				     NULL);
 | 
									     NULL);
 | 
				
			||||||
		up_read(¤t->mm->mmap_sem);
 | 
							up_read(¤t->mm->mmap_sem);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -142,7 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
 | 
				
			||||||
	u64 virt_addr=simple_strtoull(buf, NULL, 16);
 | 
						u64 virt_addr=simple_strtoull(buf, NULL, 16);
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = get_user_pages(virt_addr, 1, VM_READ, 0, NULL, NULL);
 | 
						ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL);
 | 
				
			||||||
	if (ret<=0) {
 | 
						if (ret<=0) {
 | 
				
			||||||
#ifdef ERR_INJ_DEBUG
 | 
					#ifdef ERR_INJ_DEBUG
 | 
				
			||||||
		printk("Virtual address %lx is not existing.\n",virt_addr);
 | 
							printk("Virtual address %lx is not existing.\n",virt_addr);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -544,10 +544,9 @@ static int mpx_resolve_fault(long __user *addr, int write)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	long gup_ret;
 | 
						long gup_ret;
 | 
				
			||||||
	int nr_pages = 1;
 | 
						int nr_pages = 1;
 | 
				
			||||||
	int force = 0;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	gup_ret = get_user_pages((unsigned long)addr, nr_pages, write,
 | 
						gup_ret = get_user_pages((unsigned long)addr, nr_pages,
 | 
				
			||||||
			force, NULL, NULL);
 | 
								write ? FOLL_WRITE : 0,	NULL, NULL);
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * get_user_pages() returns number of pages gotten.
 | 
						 * get_user_pages() returns number of pages gotten.
 | 
				
			||||||
	 * 0 means we failed to fault in and get anything,
 | 
						 * 0 means we failed to fault in and get anything,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -555,10 +555,13 @@ struct amdgpu_ttm_tt {
 | 
				
			||||||
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
 | 
					int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 | 
						struct amdgpu_ttm_tt *gtt = (void *)ttm;
 | 
				
			||||||
	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
 | 
						unsigned int flags = 0;
 | 
				
			||||||
	unsigned pinned = 0;
 | 
						unsigned pinned = 0;
 | 
				
			||||||
	int r;
 | 
						int r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
 | 
				
			||||||
 | 
							flags |= FOLL_WRITE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
 | 
						if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
 | 
				
			||||||
		/* check that we only use anonymous memory
 | 
							/* check that we only use anonymous memory
 | 
				
			||||||
		   to prevent problems with writeback */
 | 
							   to prevent problems with writeback */
 | 
				
			||||||
| 
						 | 
					@ -581,7 +584,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
 | 
				
			||||||
		list_add(&guptask.list, >t->guptasks);
 | 
							list_add(&guptask.list, >t->guptasks);
 | 
				
			||||||
		spin_unlock(>t->guptasklock);
 | 
							spin_unlock(>t->guptasklock);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		r = get_user_pages(userptr, num_pages, write, 0, p, NULL);
 | 
							r = get_user_pages(userptr, num_pages, flags, p, NULL);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		spin_lock(>t->guptasklock);
 | 
							spin_lock(>t->guptasklock);
 | 
				
			||||||
		list_del(&guptask.list);
 | 
							list_del(&guptask.list);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -566,7 +566,8 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
 | 
				
			||||||
		uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
 | 
							uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
 | 
				
			||||||
		struct page **pages = ttm->pages + pinned;
 | 
							struct page **pages = ttm->pages + pinned;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		r = get_user_pages(userptr, num_pages, write, 0, pages, NULL);
 | 
							r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
 | 
				
			||||||
 | 
									   pages, NULL);
 | 
				
			||||||
		if (r < 0)
 | 
							if (r < 0)
 | 
				
			||||||
			goto release_pages;
 | 
								goto release_pages;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -241,8 +241,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg,  drm_via_dmablit_t *xfer)
 | 
				
			||||||
	down_read(¤t->mm->mmap_sem);
 | 
						down_read(¤t->mm->mmap_sem);
 | 
				
			||||||
	ret = get_user_pages((unsigned long)xfer->mem_addr,
 | 
						ret = get_user_pages((unsigned long)xfer->mem_addr,
 | 
				
			||||||
			     vsg->num_pages,
 | 
								     vsg->num_pages,
 | 
				
			||||||
			     (vsg->direction == DMA_FROM_DEVICE),
 | 
								     (vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0,
 | 
				
			||||||
			     0, vsg->pages, NULL);
 | 
								     vsg->pages, NULL);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	up_read(¤t->mm->mmap_sem);
 | 
						up_read(¤t->mm->mmap_sem);
 | 
				
			||||||
	if (ret != vsg->num_pages) {
 | 
						if (ret != vsg->num_pages) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -94,6 +94,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
 | 
				
			||||||
	unsigned long dma_attrs = 0;
 | 
						unsigned long dma_attrs = 0;
 | 
				
			||||||
	struct scatterlist *sg, *sg_list_start;
 | 
						struct scatterlist *sg, *sg_list_start;
 | 
				
			||||||
	int need_release = 0;
 | 
						int need_release = 0;
 | 
				
			||||||
 | 
						unsigned int gup_flags = FOLL_WRITE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (dmasync)
 | 
						if (dmasync)
 | 
				
			||||||
		dma_attrs |= DMA_ATTR_WRITE_BARRIER;
 | 
							dma_attrs |= DMA_ATTR_WRITE_BARRIER;
 | 
				
			||||||
| 
						 | 
					@ -183,6 +184,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
 | 
				
			||||||
	if (ret)
 | 
						if (ret)
 | 
				
			||||||
		goto out;
 | 
							goto out;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!umem->writable)
 | 
				
			||||||
 | 
							gup_flags |= FOLL_FORCE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	need_release = 1;
 | 
						need_release = 1;
 | 
				
			||||||
	sg_list_start = umem->sg_head.sgl;
 | 
						sg_list_start = umem->sg_head.sgl;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -190,7 +194,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
 | 
				
			||||||
		ret = get_user_pages(cur_base,
 | 
							ret = get_user_pages(cur_base,
 | 
				
			||||||
				     min_t(unsigned long, npages,
 | 
									     min_t(unsigned long, npages,
 | 
				
			||||||
					   PAGE_SIZE / sizeof (struct page *)),
 | 
										   PAGE_SIZE / sizeof (struct page *)),
 | 
				
			||||||
				     1, !umem->writable, page_list, vma_list);
 | 
									     gup_flags, page_list, vma_list);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (ret < 0)
 | 
							if (ret < 0)
 | 
				
			||||||
			goto out;
 | 
								goto out;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
 | 
				
			||||||
		goto out;
 | 
							goto out;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = get_user_pages(uaddr & PAGE_MASK, 1, 1, 0, pages, NULL);
 | 
						ret = get_user_pages(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages, NULL);
 | 
				
			||||||
	if (ret < 0)
 | 
						if (ret < 0)
 | 
				
			||||||
		goto out;
 | 
							goto out;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -67,7 +67,8 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (got = 0; got < num_pages; got += ret) {
 | 
						for (got = 0; got < num_pages; got += ret) {
 | 
				
			||||||
		ret = get_user_pages(start_page + got * PAGE_SIZE,
 | 
							ret = get_user_pages(start_page + got * PAGE_SIZE,
 | 
				
			||||||
				     num_pages - got, 1, 1,
 | 
									     num_pages - got,
 | 
				
			||||||
 | 
									     FOLL_WRITE | FOLL_FORCE,
 | 
				
			||||||
				     p + got, NULL);
 | 
									     p + got, NULL);
 | 
				
			||||||
		if (ret < 0)
 | 
							if (ret < 0)
 | 
				
			||||||
			goto bail_release;
 | 
								goto bail_release;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -111,6 +111,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
	int flags;
 | 
						int flags;
 | 
				
			||||||
	dma_addr_t pa;
 | 
						dma_addr_t pa;
 | 
				
			||||||
 | 
						unsigned int gup_flags;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!can_do_mlock())
 | 
						if (!can_do_mlock())
 | 
				
			||||||
		return -EPERM;
 | 
							return -EPERM;
 | 
				
			||||||
| 
						 | 
					@ -135,6 +136,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	flags = IOMMU_READ | IOMMU_CACHE;
 | 
						flags = IOMMU_READ | IOMMU_CACHE;
 | 
				
			||||||
	flags |= (writable) ? IOMMU_WRITE : 0;
 | 
						flags |= (writable) ? IOMMU_WRITE : 0;
 | 
				
			||||||
 | 
						gup_flags = FOLL_WRITE;
 | 
				
			||||||
 | 
						gup_flags |= (writable) ? 0 : FOLL_FORCE;
 | 
				
			||||||
	cur_base = addr & PAGE_MASK;
 | 
						cur_base = addr & PAGE_MASK;
 | 
				
			||||||
	ret = 0;
 | 
						ret = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -142,7 +145,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
 | 
				
			||||||
		ret = get_user_pages(cur_base,
 | 
							ret = get_user_pages(cur_base,
 | 
				
			||||||
					min_t(unsigned long, npages,
 | 
										min_t(unsigned long, npages,
 | 
				
			||||||
					PAGE_SIZE / sizeof(struct page *)),
 | 
										PAGE_SIZE / sizeof(struct page *)),
 | 
				
			||||||
					1, !writable, page_list, NULL);
 | 
										gup_flags, page_list, NULL);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (ret < 0)
 | 
							if (ret < 0)
 | 
				
			||||||
			goto out;
 | 
								goto out;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -156,6 +156,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long first, last;
 | 
						unsigned long first, last;
 | 
				
			||||||
	int err, rw = 0;
 | 
						int err, rw = 0;
 | 
				
			||||||
 | 
						unsigned int flags = FOLL_FORCE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dma->direction = direction;
 | 
						dma->direction = direction;
 | 
				
			||||||
	switch (dma->direction) {
 | 
						switch (dma->direction) {
 | 
				
			||||||
| 
						 | 
					@ -178,12 +179,14 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
 | 
				
			||||||
	if (NULL == dma->pages)
 | 
						if (NULL == dma->pages)
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (rw == READ)
 | 
				
			||||||
 | 
							flags |= FOLL_WRITE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
 | 
						dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
 | 
				
			||||||
		data, size, dma->nr_pages);
 | 
							data, size, dma->nr_pages);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
 | 
						err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
 | 
				
			||||||
			     rw == READ, 1, /* force */
 | 
								     flags, dma->pages, NULL);
 | 
				
			||||||
			     dma->pages, NULL);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (err != dma->nr_pages) {
 | 
						if (err != dma->nr_pages) {
 | 
				
			||||||
		dma->nr_pages = (err >= 0) ? err : 0;
 | 
							dma->nr_pages = (err >= 0) ? err : 0;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1396,8 +1396,7 @@ int __scif_pin_pages(void *addr, size_t len, int *out_prot,
 | 
				
			||||||
		pinned_pages->nr_pages = get_user_pages(
 | 
							pinned_pages->nr_pages = get_user_pages(
 | 
				
			||||||
				(u64)addr,
 | 
									(u64)addr,
 | 
				
			||||||
				nr_pages,
 | 
									nr_pages,
 | 
				
			||||||
				!!(prot & SCIF_PROT_WRITE),
 | 
									(prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
 | 
				
			||||||
				0,
 | 
					 | 
				
			||||||
				pinned_pages->pages,
 | 
									pinned_pages->pages,
 | 
				
			||||||
				NULL);
 | 
									NULL);
 | 
				
			||||||
		up_write(&mm->mmap_sem);
 | 
							up_write(&mm->mmap_sem);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -198,7 +198,7 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
	*pageshift = PAGE_SHIFT;
 | 
						*pageshift = PAGE_SHIFT;
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
	if (get_user_pages(vaddr, 1, write, 0, &page, NULL) <= 0)
 | 
						if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
 | 
				
			||||||
		return -EFAULT;
 | 
							return -EFAULT;
 | 
				
			||||||
	*paddr = page_to_phys(page);
 | 
						*paddr = page_to_phys(page);
 | 
				
			||||||
	put_page(page);
 | 
						put_page(page);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -309,7 +309,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
 | 
				
			||||||
		 * much memory to the process.
 | 
							 * much memory to the process.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		down_read(¤t->mm->mmap_sem);
 | 
							down_read(¤t->mm->mmap_sem);
 | 
				
			||||||
		ret = get_user_pages(address, 1, !is_write, 0, &page, NULL);
 | 
							ret = get_user_pages(address, 1, is_write ? 0 : FOLL_WRITE,
 | 
				
			||||||
 | 
									&page, NULL);
 | 
				
			||||||
		up_read(¤t->mm->mmap_sem);
 | 
							up_read(¤t->mm->mmap_sem);
 | 
				
			||||||
		if (ret < 0)
 | 
							if (ret < 0)
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -892,7 +892,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
 | 
				
			||||||
		down_read(¤t->mm->mmap_sem);
 | 
							down_read(¤t->mm->mmap_sem);
 | 
				
			||||||
		pinned = get_user_pages(
 | 
							pinned = get_user_pages(
 | 
				
			||||||
				(unsigned long)xfer->loc_addr & PAGE_MASK,
 | 
									(unsigned long)xfer->loc_addr & PAGE_MASK,
 | 
				
			||||||
				nr_pages, dir == DMA_FROM_DEVICE, 0,
 | 
									nr_pages,
 | 
				
			||||||
 | 
									dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
 | 
				
			||||||
				page_list, NULL);
 | 
									page_list, NULL);
 | 
				
			||||||
		up_read(¤t->mm->mmap_sem);
 | 
							up_read(¤t->mm->mmap_sem);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -423,8 +423,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
 | 
				
			||||||
		actual_pages = get_user_pages(task, task->mm,
 | 
							actual_pages = get_user_pages(task, task->mm,
 | 
				
			||||||
				          (unsigned long)buf & ~(PAGE_SIZE - 1),
 | 
									          (unsigned long)buf & ~(PAGE_SIZE - 1),
 | 
				
			||||||
					  num_pages,
 | 
										  num_pages,
 | 
				
			||||||
					  (type == PAGELIST_READ) /*Write */ ,
 | 
										  (type == PAGELIST_READ) ? FOLL_WRITE : 0,
 | 
				
			||||||
					  0 /*Force */ ,
 | 
					 | 
				
			||||||
					  pages,
 | 
										  pages,
 | 
				
			||||||
					  NULL /*vmas */);
 | 
										  NULL /*vmas */);
 | 
				
			||||||
		up_read(&task->mm->mmap_sem);
 | 
							up_read(&task->mm->mmap_sem);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1477,8 +1477,7 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes)
 | 
				
			||||||
		current->mm,              /* mm */
 | 
							current->mm,              /* mm */
 | 
				
			||||||
		(unsigned long)virt_addr, /* start */
 | 
							(unsigned long)virt_addr, /* start */
 | 
				
			||||||
		num_pages,                /* len */
 | 
							num_pages,                /* len */
 | 
				
			||||||
		0,                        /* write */
 | 
							0,                        /* gup_flags */
 | 
				
			||||||
		0,                        /* force */
 | 
					 | 
				
			||||||
		pages,                    /* pages (array of page pointers) */
 | 
							pages,                    /* pages (array of page pointers) */
 | 
				
			||||||
		NULL);                    /* vmas */
 | 
							NULL);                    /* vmas */
 | 
				
			||||||
	up_read(¤t->mm->mmap_sem);
 | 
						up_read(¤t->mm->mmap_sem);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -245,8 +245,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
 | 
				
			||||||
	/* Get the physical addresses of the source buffer */
 | 
						/* Get the physical addresses of the source buffer */
 | 
				
			||||||
	down_read(¤t->mm->mmap_sem);
 | 
						down_read(¤t->mm->mmap_sem);
 | 
				
			||||||
	num_pinned = get_user_pages(param.local_vaddr - lb_offset,
 | 
						num_pinned = get_user_pages(param.local_vaddr - lb_offset,
 | 
				
			||||||
		num_pages, (param.source == -1) ? READ : WRITE,
 | 
							num_pages, (param.source == -1) ? 0 : FOLL_WRITE,
 | 
				
			||||||
		0, pages, NULL);
 | 
							pages, NULL);
 | 
				
			||||||
	up_read(¤t->mm->mmap_sem);
 | 
						up_read(¤t->mm->mmap_sem);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (num_pinned != num_pages) {
 | 
						if (num_pinned != num_pages) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1279,7 +1279,7 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
 | 
				
			||||||
			    int write, int force, struct page **pages,
 | 
								    int write, int force, struct page **pages,
 | 
				
			||||||
			    struct vm_area_struct **vmas);
 | 
								    struct vm_area_struct **vmas);
 | 
				
			||||||
long get_user_pages(unsigned long start, unsigned long nr_pages,
 | 
					long get_user_pages(unsigned long start, unsigned long nr_pages,
 | 
				
			||||||
			    int write, int force, struct page **pages,
 | 
								    unsigned int gup_flags, struct page **pages,
 | 
				
			||||||
			    struct vm_area_struct **vmas);
 | 
								    struct vm_area_struct **vmas);
 | 
				
			||||||
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
 | 
					long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
 | 
				
			||||||
		    unsigned int gup_flags, struct page **pages, int *locked);
 | 
							    unsigned int gup_flags, struct page **pages, int *locked);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										12
									
								
								mm/gup.c
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								mm/gup.c
									
									
									
									
									
								
							| 
						 | 
					@ -987,18 +987,12 @@ EXPORT_SYMBOL(get_user_pages_remote);
 | 
				
			||||||
 * obviously don't pass FOLL_REMOTE in here.
 | 
					 * obviously don't pass FOLL_REMOTE in here.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
long get_user_pages(unsigned long start, unsigned long nr_pages,
 | 
					long get_user_pages(unsigned long start, unsigned long nr_pages,
 | 
				
			||||||
		int write, int force, struct page **pages,
 | 
							unsigned int gup_flags, struct page **pages,
 | 
				
			||||||
		struct vm_area_struct **vmas)
 | 
							struct vm_area_struct **vmas)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned int flags = FOLL_TOUCH;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (write)
 | 
					 | 
				
			||||||
		flags |= FOLL_WRITE;
 | 
					 | 
				
			||||||
	if (force)
 | 
					 | 
				
			||||||
		flags |= FOLL_FORCE;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return __get_user_pages_locked(current, current->mm, start, nr_pages,
 | 
						return __get_user_pages_locked(current, current->mm, start, nr_pages,
 | 
				
			||||||
				       pages, vmas, NULL, false, flags);
 | 
									       pages, vmas, NULL, false,
 | 
				
			||||||
 | 
									       gup_flags | FOLL_TOUCH);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(get_user_pages);
 | 
					EXPORT_SYMBOL(get_user_pages);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -850,7 +850,7 @@ static int lookup_node(unsigned long addr)
 | 
				
			||||||
	struct page *p;
 | 
						struct page *p;
 | 
				
			||||||
	int err;
 | 
						int err;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = get_user_pages(addr & PAGE_MASK, 1, 0, 0, &p, NULL);
 | 
						err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
 | 
				
			||||||
	if (err >= 0) {
 | 
						if (err >= 0) {
 | 
				
			||||||
		err = page_to_nid(p);
 | 
							err = page_to_nid(p);
 | 
				
			||||||
		put_page(p);
 | 
							put_page(p);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										18
									
								
								mm/nommu.c
									
									
									
									
									
								
							
							
						
						
									
										18
									
								
								mm/nommu.c
									
									
									
									
									
								
							| 
						 | 
					@ -160,18 +160,11 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 | 
				
			||||||
 * - don't permit access to VMAs that don't support it, such as I/O mappings
 | 
					 * - don't permit access to VMAs that don't support it, such as I/O mappings
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
long get_user_pages(unsigned long start, unsigned long nr_pages,
 | 
					long get_user_pages(unsigned long start, unsigned long nr_pages,
 | 
				
			||||||
		    int write, int force, struct page **pages,
 | 
							    unsigned int gup_flags, struct page **pages,
 | 
				
			||||||
		    struct vm_area_struct **vmas)
 | 
							    struct vm_area_struct **vmas)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int flags = 0;
 | 
						return __get_user_pages(current, current->mm, start, nr_pages,
 | 
				
			||||||
 | 
									gup_flags, pages, vmas, NULL);
 | 
				
			||||||
	if (write)
 | 
					 | 
				
			||||||
		flags |= FOLL_WRITE;
 | 
					 | 
				
			||||||
	if (force)
 | 
					 | 
				
			||||||
		flags |= FOLL_FORCE;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return __get_user_pages(current, current->mm, start, nr_pages, flags,
 | 
					 | 
				
			||||||
				pages, vmas, NULL);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(get_user_pages);
 | 
					EXPORT_SYMBOL(get_user_pages);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -179,10 +172,7 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
 | 
				
			||||||
			    unsigned int gup_flags, struct page **pages,
 | 
								    unsigned int gup_flags, struct page **pages,
 | 
				
			||||||
			    int *locked)
 | 
								    int *locked)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int write = gup_flags & FOLL_WRITE;
 | 
						return get_user_pages(start, nr_pages, gup_flags, pages, NULL);
 | 
				
			||||||
	int force = gup_flags & FOLL_FORCE;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return get_user_pages(start, nr_pages, write, force, pages, NULL);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(get_user_pages_locked);
 | 
					EXPORT_SYMBOL(get_user_pages_locked);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue