mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	mm/gup: Switch all callers of get_user_pages() to not pass tsk/mm
We will soon modify the vanilla get_user_pages() so it can no longer be used on mm/tasks other than 'current/current->mm', which is by far the most common way it is called. For now, we allow the old-style calls, but warn when they are used. (implemented in previous patch) This patch switches all callers of: get_user_pages() get_user_pages_unlocked() get_user_pages_locked() to stop passing tsk/mm so they will no longer see the warnings. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave@sr71.net> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: jack@suse.cz Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210156.113E9407@viggo.jf.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
		
							parent
							
								
									cde70140fe
								
							
						
					
					
						commit
						d4edcf0d56
					
				
					 29 changed files with 44 additions and 64 deletions
				
			
		| 
						 | 
				
			
			@ -2719,9 +2719,7 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
 | 
			
		|||
	/* Acquire the mm page semaphore. */
 | 
			
		||||
	down_read(¤t->mm->mmap_sem);
 | 
			
		||||
 | 
			
		||||
	err = get_user_pages(current,
 | 
			
		||||
			     current->mm,
 | 
			
		||||
			     (unsigned long int)(oper.indata + prev_ix),
 | 
			
		||||
	err = get_user_pages((unsigned long int)(oper.indata + prev_ix),
 | 
			
		||||
			     noinpages,
 | 
			
		||||
			     0,  /* read access only for in data */
 | 
			
		||||
			     0, /* no force */
 | 
			
		||||
| 
						 | 
				
			
			@ -2736,9 +2734,7 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
 | 
			
		|||
	}
 | 
			
		||||
	noinpages = err;
 | 
			
		||||
	if (oper.do_cipher){
 | 
			
		||||
		err = get_user_pages(current,
 | 
			
		||||
				     current->mm,
 | 
			
		||||
				     (unsigned long int)oper.cipher_outdata,
 | 
			
		||||
		err = get_user_pages((unsigned long int)oper.cipher_outdata,
 | 
			
		||||
				     nooutpages,
 | 
			
		||||
				     1, /* write access for out data */
 | 
			
		||||
				     0, /* no force */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -142,8 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
 | 
			
		|||
	u64 virt_addr=simple_strtoull(buf, NULL, 16);
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
        ret = get_user_pages(current, current->mm, virt_addr,
 | 
			
		||||
                        1, VM_READ, 0, NULL, NULL);
 | 
			
		||||
	ret = get_user_pages(virt_addr, 1, VM_READ, 0, NULL, NULL);
 | 
			
		||||
	if (ret<=0) {
 | 
			
		||||
#ifdef ERR_INJ_DEBUG
 | 
			
		||||
		printk("Virtual address %lx is not existing.\n",virt_addr);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -286,8 +286,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
 | 
			
		|||
	start += nr << PAGE_SHIFT;
 | 
			
		||||
	pages += nr;
 | 
			
		||||
 | 
			
		||||
	ret = get_user_pages_unlocked(current, mm, start,
 | 
			
		||||
				      (end - start) >> PAGE_SHIFT,
 | 
			
		||||
	ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT,
 | 
			
		||||
				      write, 0, pages);
 | 
			
		||||
 | 
			
		||||
	/* Have to be a bit careful with return values */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -210,7 +210,6 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
 | 
			
		|||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
 | 
			
		||||
			struct page **pages)
 | 
			
		||||
{
 | 
			
		||||
	struct mm_struct *mm = current->mm;
 | 
			
		||||
	int nr, ret;
 | 
			
		||||
 | 
			
		||||
	might_sleep();
 | 
			
		||||
| 
						 | 
				
			
			@ -222,8 +221,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
 | 
			
		|||
	/* Try to get the remaining pages with get_user_pages */
 | 
			
		||||
	start += nr << PAGE_SHIFT;
 | 
			
		||||
	pages += nr;
 | 
			
		||||
	ret = get_user_pages_unlocked(current, mm, start,
 | 
			
		||||
			     nr_pages - nr, write, 0, pages);
 | 
			
		||||
	ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
 | 
			
		||||
	/* Have to be a bit careful with return values */
 | 
			
		||||
	if (nr > 0)
 | 
			
		||||
		ret = (ret < 0) ? nr : ret + nr;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -257,7 +257,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
 | 
			
		|||
		start += nr << PAGE_SHIFT;
 | 
			
		||||
		pages += nr;
 | 
			
		||||
 | 
			
		||||
		ret = get_user_pages_unlocked(current, mm, start,
 | 
			
		||||
		ret = get_user_pages_unlocked(start,
 | 
			
		||||
			(end - start) >> PAGE_SHIFT, write, 0, pages);
 | 
			
		||||
 | 
			
		||||
		/* Have to be a bit careful with return values */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -237,7 +237,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
 | 
			
		|||
		start += nr << PAGE_SHIFT;
 | 
			
		||||
		pages += nr;
 | 
			
		||||
 | 
			
		||||
		ret = get_user_pages_unlocked(current, mm, start,
 | 
			
		||||
		ret = get_user_pages_unlocked(start,
 | 
			
		||||
			(end - start) >> PAGE_SHIFT, write, 0, pages);
 | 
			
		||||
 | 
			
		||||
		/* Have to be a bit careful with return values */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -422,7 +422,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
 | 
			
		|||
		start += nr << PAGE_SHIFT;
 | 
			
		||||
		pages += nr;
 | 
			
		||||
 | 
			
		||||
		ret = get_user_pages_unlocked(current, mm, start,
 | 
			
		||||
		ret = get_user_pages_unlocked(start,
 | 
			
		||||
					      (end - start) >> PAGE_SHIFT,
 | 
			
		||||
					      write, 0, pages);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -546,8 +546,8 @@ static int mpx_resolve_fault(long __user *addr, int write)
 | 
			
		|||
	int nr_pages = 1;
 | 
			
		||||
	int force = 0;
 | 
			
		||||
 | 
			
		||||
	gup_ret = get_user_pages(current, current->mm, (unsigned long)addr,
 | 
			
		||||
				 nr_pages, write, force, NULL, NULL);
 | 
			
		||||
	gup_ret = get_user_pages((unsigned long)addr, nr_pages, write,
 | 
			
		||||
			force, NULL, NULL);
 | 
			
		||||
	/*
 | 
			
		||||
	 * get_user_pages() returns number of pages gotten.
 | 
			
		||||
	 * 0 means we failed to fault in and get anything,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -518,8 +518,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
 | 
			
		|||
		uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
 | 
			
		||||
		struct page **pages = ttm->pages + pinned;
 | 
			
		||||
 | 
			
		||||
		r = get_user_pages(current, current->mm, userptr, num_pages,
 | 
			
		||||
				   write, 0, pages, NULL);
 | 
			
		||||
		r = get_user_pages(userptr, num_pages, write, 0, pages, NULL);
 | 
			
		||||
		if (r < 0)
 | 
			
		||||
			goto release_pages;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -554,8 +554,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
 | 
			
		|||
		uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
 | 
			
		||||
		struct page **pages = ttm->pages + pinned;
 | 
			
		||||
 | 
			
		||||
		r = get_user_pages(current, current->mm, userptr, num_pages,
 | 
			
		||||
				   write, 0, pages, NULL);
 | 
			
		||||
		r = get_user_pages(userptr, num_pages, write, 0, pages, NULL);
 | 
			
		||||
		if (r < 0)
 | 
			
		||||
			goto release_pages;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -239,8 +239,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg,  drm_via_dmablit_t *xfer)
 | 
			
		|||
	if (NULL == vsg->pages)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
	down_read(¤t->mm->mmap_sem);
 | 
			
		||||
	ret = get_user_pages(current, current->mm,
 | 
			
		||||
			     (unsigned long)xfer->mem_addr,
 | 
			
		||||
	ret = get_user_pages((unsigned long)xfer->mem_addr,
 | 
			
		||||
			     vsg->num_pages,
 | 
			
		||||
			     (vsg->direction == DMA_FROM_DEVICE),
 | 
			
		||||
			     0, vsg->pages, NULL);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -188,7 +188,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
 | 
			
		|||
	sg_list_start = umem->sg_head.sgl;
 | 
			
		||||
 | 
			
		||||
	while (npages) {
 | 
			
		||||
		ret = get_user_pages(current, current->mm, cur_base,
 | 
			
		||||
		ret = get_user_pages(cur_base,
 | 
			
		||||
				     min_t(unsigned long, npages,
 | 
			
		||||
					   PAGE_SIZE / sizeof (struct page *)),
 | 
			
		||||
				     1, !umem->writable, page_list, vma_list);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -472,8 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
 | 
			
		|||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
 | 
			
		||||
			     pages, NULL);
 | 
			
		||||
	ret = get_user_pages(uaddr & PAGE_MASK, 1, 1, 0, pages, NULL);
 | 
			
		||||
	if (ret < 0)
 | 
			
		||||
		goto out;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -66,8 +66,7 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	for (got = 0; got < num_pages; got += ret) {
 | 
			
		||||
		ret = get_user_pages(current, current->mm,
 | 
			
		||||
				     start_page + got * PAGE_SIZE,
 | 
			
		||||
		ret = get_user_pages(start_page + got * PAGE_SIZE,
 | 
			
		||||
				     num_pages - got, 1, 1,
 | 
			
		||||
				     p + got, NULL);
 | 
			
		||||
		if (ret < 0)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -144,7 +144,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
 | 
			
		|||
	ret = 0;
 | 
			
		||||
 | 
			
		||||
	while (npages) {
 | 
			
		||||
		ret = get_user_pages(current, current->mm, cur_base,
 | 
			
		||||
		ret = get_user_pages(cur_base,
 | 
			
		||||
					min_t(unsigned long, npages,
 | 
			
		||||
					PAGE_SIZE / sizeof(struct page *)),
 | 
			
		||||
					1, !writable, page_list, NULL);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -124,8 +124,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	/* Get user pages for DMA Xfer */
 | 
			
		||||
	err = get_user_pages_unlocked(current, current->mm,
 | 
			
		||||
			user_dma.uaddr, user_dma.page_count, 0, 1, dma->map);
 | 
			
		||||
	err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, 0,
 | 
			
		||||
			1, dma->map);
 | 
			
		||||
 | 
			
		||||
	if (user_dma.page_count != err) {
 | 
			
		||||
		IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -75,14 +75,12 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
 | 
			
		|||
	ivtv_udma_get_page_info (&uv_dma, (unsigned long)args->uv_source, 360 * uv_decode_height);
 | 
			
		||||
 | 
			
		||||
	/* Get user pages for DMA Xfer */
 | 
			
		||||
	y_pages = get_user_pages_unlocked(current, current->mm,
 | 
			
		||||
				y_dma.uaddr, y_dma.page_count, 0, 1,
 | 
			
		||||
				&dma->map[0]);
 | 
			
		||||
	y_pages = get_user_pages_unlocked(y_dma.uaddr,
 | 
			
		||||
			y_dma.page_count, 0, 1, &dma->map[0]);
 | 
			
		||||
	uv_pages = 0; /* silence gcc. value is set and consumed only if: */
 | 
			
		||||
	if (y_pages == y_dma.page_count) {
 | 
			
		||||
		uv_pages = get_user_pages_unlocked(current, current->mm,
 | 
			
		||||
					uv_dma.uaddr, uv_dma.page_count, 0, 1,
 | 
			
		||||
					&dma->map[y_pages]);
 | 
			
		||||
		uv_pages = get_user_pages_unlocked(uv_dma.uaddr,
 | 
			
		||||
				uv_dma.page_count, 0, 1, &dma->map[y_pages]);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -181,8 +181,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
 | 
			
		|||
	dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
 | 
			
		||||
		data, size, dma->nr_pages);
 | 
			
		||||
 | 
			
		||||
	err = get_user_pages(current, current->mm,
 | 
			
		||||
			     data & PAGE_MASK, dma->nr_pages,
 | 
			
		||||
	err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
 | 
			
		||||
			     rw == READ, 1, /* force */
 | 
			
		||||
			     dma->pages, NULL);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1394,8 +1394,6 @@ int __scif_pin_pages(void *addr, size_t len, int *out_prot,
 | 
			
		|||
		}
 | 
			
		||||
 | 
			
		||||
		pinned_pages->nr_pages = get_user_pages(
 | 
			
		||||
				current,
 | 
			
		||||
				mm,
 | 
			
		||||
				(u64)addr,
 | 
			
		||||
				nr_pages,
 | 
			
		||||
				!!(prot & SCIF_PROT_WRITE),
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -198,8 +198,7 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
 | 
			
		|||
#else
 | 
			
		||||
	*pageshift = PAGE_SHIFT;
 | 
			
		||||
#endif
 | 
			
		||||
	if (get_user_pages
 | 
			
		||||
	    (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
 | 
			
		||||
	if (get_user_pages(vaddr, 1, write, 0, &page, NULL) <= 0)
 | 
			
		||||
		return -EFAULT;
 | 
			
		||||
	*paddr = page_to_phys(page);
 | 
			
		||||
	put_page(page);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -4817,8 +4817,6 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
 | 
			
		|||
        /* Try to fault in all of the necessary pages */
 | 
			
		||||
        /* rw==READ means read from drive, write into memory area */
 | 
			
		||||
	res = get_user_pages_unlocked(
 | 
			
		||||
		current,
 | 
			
		||||
		current->mm,
 | 
			
		||||
		uaddr,
 | 
			
		||||
		nr_pages,
 | 
			
		||||
		rw == READ,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -686,8 +686,8 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
 | 
			
		|||
	if (!pages)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	ret = get_user_pages_unlocked(current, current->mm, (unsigned long)buf,
 | 
			
		||||
				      nr_pages, WRITE, 0, pages);
 | 
			
		||||
	ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, WRITE,
 | 
			
		||||
			0, pages);
 | 
			
		||||
 | 
			
		||||
	if (ret < nr_pages) {
 | 
			
		||||
		nr_pages = ret;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -244,9 +244,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
 | 
			
		|||
 | 
			
		||||
	/* Get the physical addresses of the source buffer */
 | 
			
		||||
	down_read(¤t->mm->mmap_sem);
 | 
			
		||||
	num_pinned = get_user_pages(current, current->mm,
 | 
			
		||||
		param.local_vaddr - lb_offset, num_pages,
 | 
			
		||||
		(param.source == -1) ? READ : WRITE,
 | 
			
		||||
	num_pinned = get_user_pages(param.local_vaddr - lb_offset,
 | 
			
		||||
		num_pages, (param.source == -1) ? READ : WRITE,
 | 
			
		||||
		0, pages, NULL);
 | 
			
		||||
	up_read(¤t->mm->mmap_sem);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -58,7 +58,7 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
 | 
			
		|||
	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
 | 
			
		||||
		vec->got_ref = true;
 | 
			
		||||
		vec->is_pfns = false;
 | 
			
		||||
		ret = get_user_pages_locked(current, mm, start, nr_frames,
 | 
			
		||||
		ret = get_user_pages_locked(start, nr_frames,
 | 
			
		||||
			write, force, (struct page **)(vec->ptrs), &locked);
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										6
									
								
								mm/gup.c
									
									
									
									
									
								
							
							
						
						
									
										6
									
								
								mm/gup.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -936,8 +936,10 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
 | 
			
		|||
EXPORT_SYMBOL(get_user_pages_remote);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * This is the same as get_user_pages_remote() for the time
 | 
			
		||||
 * being.
 | 
			
		||||
 * This is the same as get_user_pages_remote(), just with a
 | 
			
		||||
 * less-flexible calling convention where we assume that the task
 | 
			
		||||
 * and mm being operated on are the current task's.  We also
 | 
			
		||||
 * obviously don't pass FOLL_REMOTE in here.
 | 
			
		||||
 */
 | 
			
		||||
long get_user_pages6(unsigned long start, unsigned long nr_pages,
 | 
			
		||||
		int write, int force, struct page **pages,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										2
									
								
								mm/ksm.c
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								mm/ksm.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -352,7 +352,7 @@ static inline bool ksm_test_exit(struct mm_struct *mm)
 | 
			
		|||
/*
 | 
			
		||||
 * We use break_ksm to break COW on a ksm page: it's a stripped down
 | 
			
		||||
 *
 | 
			
		||||
 *	if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1)
 | 
			
		||||
 *	if (get_user_pages(addr, 1, 1, 1, &page, NULL) == 1)
 | 
			
		||||
 *		put_page(page);
 | 
			
		||||
 *
 | 
			
		||||
 * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -844,12 +844,12 @@ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int lookup_node(struct mm_struct *mm, unsigned long addr)
 | 
			
		||||
static int lookup_node(unsigned long addr)
 | 
			
		||||
{
 | 
			
		||||
	struct page *p;
 | 
			
		||||
	int err;
 | 
			
		||||
 | 
			
		||||
	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
 | 
			
		||||
	err = get_user_pages(addr & PAGE_MASK, 1, 0, 0, &p, NULL);
 | 
			
		||||
	if (err >= 0) {
 | 
			
		||||
		err = page_to_nid(p);
 | 
			
		||||
		put_page(p);
 | 
			
		||||
| 
						 | 
				
			
			@ -904,7 +904,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
 | 
			
		|||
 | 
			
		||||
	if (flags & MPOL_F_NODE) {
 | 
			
		||||
		if (flags & MPOL_F_ADDR) {
 | 
			
		||||
			err = lookup_node(mm, addr);
 | 
			
		||||
			err = lookup_node(addr);
 | 
			
		||||
			if (err < 0)
 | 
			
		||||
				goto out;
 | 
			
		||||
			*policy = err;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -24,7 +24,7 @@ struct page **ceph_get_direct_page_vector(const void __user *data,
 | 
			
		|||
		return ERR_PTR(-ENOMEM);
 | 
			
		||||
 | 
			
		||||
	while (got < num_pages) {
 | 
			
		||||
		rc = get_user_pages_unlocked(current, current->mm,
 | 
			
		||||
		rc = get_user_pages_unlocked(
 | 
			
		||||
		    (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
 | 
			
		||||
		    num_pages - got, write_page, 0, pages + got);
 | 
			
		||||
		if (rc < 0)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1264,15 +1264,16 @@ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w
 | 
			
		|||
	return gfn_to_hva_memslot_prot(slot, gfn, writable);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
 | 
			
		||||
	unsigned long start, int write, struct page **page)
 | 
			
		||||
static int get_user_page_nowait(unsigned long start, int write,
 | 
			
		||||
		struct page **page)
 | 
			
		||||
{
 | 
			
		||||
	int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;
 | 
			
		||||
 | 
			
		||||
	if (write)
 | 
			
		||||
		flags |= FOLL_WRITE;
 | 
			
		||||
 | 
			
		||||
	return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL);
 | 
			
		||||
	return __get_user_pages(current, current->mm, start, 1, flags, page,
 | 
			
		||||
			NULL, NULL);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline int check_user_page_hwpoison(unsigned long addr)
 | 
			
		||||
| 
						 | 
				
			
			@ -1334,8 +1335,7 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
 | 
			
		|||
 | 
			
		||||
	if (async) {
 | 
			
		||||
		down_read(¤t->mm->mmap_sem);
 | 
			
		||||
		npages = get_user_page_nowait(current, current->mm,
 | 
			
		||||
					      addr, write_fault, page);
 | 
			
		||||
		npages = get_user_page_nowait(addr, write_fault, page);
 | 
			
		||||
		up_read(¤t->mm->mmap_sem);
 | 
			
		||||
	} else
 | 
			
		||||
		npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue