mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mm: use vmf->address instead of of vmf->virtual_address
Every single user of vmf->virtual_address typed that entry to unsigned long before doing anything with it so the type of virtual_address does not really provide us any additional safety. Just use masked vmf->address which already has the appropriate type. Link: http://lkml.kernel.org/r/1479460644-25076-3-git-send-email-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									82b0f8c39a
								
							
						
					
					
						commit
						1a29d85eb0
					
				
					 27 changed files with 57 additions and 79 deletions
				
			
		| 
						 | 
					@ -236,7 +236,6 @@ static int
 | 
				
			||||||
spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
					spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct spu_context *ctx	= vma->vm_file->private_data;
 | 
						struct spu_context *ctx	= vma->vm_file->private_data;
 | 
				
			||||||
	unsigned long address = (unsigned long)vmf->virtual_address;
 | 
					 | 
				
			||||||
	unsigned long pfn, offset;
 | 
						unsigned long pfn, offset;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	offset = vmf->pgoff << PAGE_SHIFT;
 | 
						offset = vmf->pgoff << PAGE_SHIFT;
 | 
				
			||||||
| 
						 | 
					@ -244,7 +243,7 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
		return VM_FAULT_SIGBUS;
 | 
							return VM_FAULT_SIGBUS;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
 | 
						pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
 | 
				
			||||||
			address, offset);
 | 
								vmf->address, offset);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (spu_acquire(ctx))
 | 
						if (spu_acquire(ctx))
 | 
				
			||||||
		return VM_FAULT_NOPAGE;
 | 
							return VM_FAULT_NOPAGE;
 | 
				
			||||||
| 
						 | 
					@ -256,7 +255,7 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
		vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
 | 
							vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
 | 
				
			||||||
		pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
 | 
							pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	vm_insert_pfn(vma, address, pfn);
 | 
						vm_insert_pfn(vma, vmf->address, pfn);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spu_release(ctx);
 | 
						spu_release(ctx);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -355,8 +354,7 @@ static int spufs_ps_fault(struct vm_area_struct *vma,
 | 
				
			||||||
		down_read(¤t->mm->mmap_sem);
 | 
							down_read(¤t->mm->mmap_sem);
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		area = ctx->spu->problem_phys + ps_offs;
 | 
							area = ctx->spu->problem_phys + ps_offs;
 | 
				
			||||||
		vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
 | 
							vm_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
 | 
				
			||||||
					(area + offset) >> PAGE_SHIFT);
 | 
					 | 
				
			||||||
		spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
 | 
							spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -109,7 +109,7 @@ static int vvar_fault(const struct vm_special_mapping *sm,
 | 
				
			||||||
		return VM_FAULT_SIGBUS;
 | 
							return VM_FAULT_SIGBUS;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (sym_offset == image->sym_vvar_page) {
 | 
						if (sym_offset == image->sym_vvar_page) {
 | 
				
			||||||
		ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
 | 
							ret = vm_insert_pfn(vma, vmf->address,
 | 
				
			||||||
				    __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
 | 
									    __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
 | 
				
			||||||
	} else if (sym_offset == image->sym_pvclock_page) {
 | 
						} else if (sym_offset == image->sym_pvclock_page) {
 | 
				
			||||||
		struct pvclock_vsyscall_time_info *pvti =
 | 
							struct pvclock_vsyscall_time_info *pvti =
 | 
				
			||||||
| 
						 | 
					@ -117,7 +117,7 @@ static int vvar_fault(const struct vm_special_mapping *sm,
 | 
				
			||||||
		if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
 | 
							if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
 | 
				
			||||||
			ret = vm_insert_pfn(
 | 
								ret = vm_insert_pfn(
 | 
				
			||||||
				vma,
 | 
									vma,
 | 
				
			||||||
				(unsigned long)vmf->virtual_address,
 | 
									vmf->address,
 | 
				
			||||||
				__pa(pvti) >> PAGE_SHIFT);
 | 
									__pa(pvti) >> PAGE_SHIFT);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -19,8 +19,7 @@ static int alpha_core_agp_vm_fault(struct vm_area_struct *vma,
 | 
				
			||||||
	unsigned long pa;
 | 
						unsigned long pa;
 | 
				
			||||||
	struct page *page;
 | 
						struct page *page;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dma_addr = (unsigned long)vmf->virtual_address - vma->vm_start
 | 
						dma_addr = vmf->address - vma->vm_start + agp->aperture.bus_base;
 | 
				
			||||||
						+ agp->aperture.bus_base;
 | 
					 | 
				
			||||||
	pa = agp->ops->translate(agp, dma_addr);
 | 
						pa = agp->ops->translate(agp, dma_addr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (pa == (unsigned long)-EINVAL)
 | 
						if (pa == (unsigned long)-EINVAL)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -227,7 +227,7 @@ mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
	 * be because another thread has installed the pte first, so it
 | 
						 * be because another thread has installed the pte first, so it
 | 
				
			||||||
	 * is no problem.
 | 
						 * is no problem.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
 | 
						vm_insert_pfn(vma, vmf->address, pfn);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return VM_FAULT_NOPAGE;
 | 
						return VM_FAULT_NOPAGE;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -328,7 +328,6 @@ static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff,
 | 
				
			||||||
static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
 | 
					static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
 | 
				
			||||||
		struct vm_fault *vmf)
 | 
							struct vm_fault *vmf)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long vaddr = (unsigned long) vmf->virtual_address;
 | 
					 | 
				
			||||||
	struct device *dev = &dax_dev->dev;
 | 
						struct device *dev = &dax_dev->dev;
 | 
				
			||||||
	struct dax_region *dax_region;
 | 
						struct dax_region *dax_region;
 | 
				
			||||||
	int rc = VM_FAULT_SIGBUS;
 | 
						int rc = VM_FAULT_SIGBUS;
 | 
				
			||||||
| 
						 | 
					@ -353,7 +352,7 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
 | 
						pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rc = vm_insert_mixed(vma, vaddr, pfn);
 | 
						rc = vm_insert_mixed(vma, vmf->address, pfn);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (rc == -ENOMEM)
 | 
						if (rc == -ENOMEM)
 | 
				
			||||||
		return VM_FAULT_OOM;
 | 
							return VM_FAULT_OOM;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -17,12 +17,11 @@
 | 
				
			||||||
static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
					static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
 | 
						struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
 | 
				
			||||||
	unsigned long addr = (unsigned long)vmf->virtual_address;
 | 
					 | 
				
			||||||
	unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
 | 
						unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
 | 
						pfn += (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 | 
				
			||||||
	ret = vm_insert_pfn(vma, addr, pfn);
 | 
						ret = vm_insert_pfn(vma, vmf->address, pfn);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	switch (ret) {
 | 
						switch (ret) {
 | 
				
			||||||
	case 0:
 | 
						case 0:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -124,8 +124,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
		 * Using vm_pgoff as a selector forces us to use this unusual
 | 
							 * Using vm_pgoff as a selector forces us to use this unusual
 | 
				
			||||||
		 * addressing scheme.
 | 
							 * addressing scheme.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		resource_size_t offset = (unsigned long)vmf->virtual_address -
 | 
							resource_size_t offset = vmf->address - vma->vm_start;
 | 
				
			||||||
			vma->vm_start;
 | 
					 | 
				
			||||||
		resource_size_t baddr = map->offset + offset;
 | 
							resource_size_t baddr = map->offset + offset;
 | 
				
			||||||
		struct drm_agp_mem *agpmem;
 | 
							struct drm_agp_mem *agpmem;
 | 
				
			||||||
		struct page *page;
 | 
							struct page *page;
 | 
				
			||||||
| 
						 | 
					@ -195,7 +194,7 @@ static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
	if (!map)
 | 
						if (!map)
 | 
				
			||||||
		return VM_FAULT_SIGBUS;	/* Nothing allocated */
 | 
							return VM_FAULT_SIGBUS;	/* Nothing allocated */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
 | 
						offset = vmf->address - vma->vm_start;
 | 
				
			||||||
	i = (unsigned long)map->handle + offset;
 | 
						i = (unsigned long)map->handle + offset;
 | 
				
			||||||
	page = vmalloc_to_page((void *)i);
 | 
						page = vmalloc_to_page((void *)i);
 | 
				
			||||||
	if (!page)
 | 
						if (!page)
 | 
				
			||||||
| 
						 | 
					@ -301,7 +300,8 @@ static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
	if (!dma->pagelist)
 | 
						if (!dma->pagelist)
 | 
				
			||||||
		return VM_FAULT_SIGBUS;	/* Nothing allocated */
 | 
							return VM_FAULT_SIGBUS;	/* Nothing allocated */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	offset = (unsigned long)vmf->virtual_address - vma->vm_start;	/* vm_[pg]off[set] should be 0 */
 | 
						offset = vmf->address - vma->vm_start;
 | 
				
			||||||
 | 
										/* vm_[pg]off[set] should be 0 */
 | 
				
			||||||
	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
 | 
						page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
 | 
				
			||||||
	page = virt_to_page((void *)dma->pagelist[page_nr]);
 | 
						page = virt_to_page((void *)dma->pagelist[page_nr]);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -337,7 +337,7 @@ static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
	if (!entry->pagelist)
 | 
						if (!entry->pagelist)
 | 
				
			||||||
		return VM_FAULT_SIGBUS;	/* Nothing allocated */
 | 
							return VM_FAULT_SIGBUS;	/* Nothing allocated */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
 | 
						offset = vmf->address - vma->vm_start;
 | 
				
			||||||
	map_offset = map->offset - (unsigned long)dev->sg->virtual;
 | 
						map_offset = map->offset - (unsigned long)dev->sg->virtual;
 | 
				
			||||||
	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
 | 
						page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
 | 
				
			||||||
	page = entry->pagelist[page_offset];
 | 
						page = entry->pagelist[page_offset];
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -202,15 +202,14 @@ int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* We don't use vmf->pgoff since that has the fake offset: */
 | 
						/* We don't use vmf->pgoff since that has the fake offset: */
 | 
				
			||||||
	pgoff = ((unsigned long)vmf->virtual_address -
 | 
						pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 | 
				
			||||||
			vma->vm_start) >> PAGE_SHIFT;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	page = pages[pgoff];
 | 
						page = pages[pgoff];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
 | 
						VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 | 
				
			||||||
	     page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
 | 
						     page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
 | 
						ret = vm_insert_page(vma, vmf->address, page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
out:
 | 
					out:
 | 
				
			||||||
	switch (ret) {
 | 
						switch (ret) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -455,8 +455,7 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
	pgoff_t page_offset;
 | 
						pgoff_t page_offset;
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	page_offset = ((unsigned long)vmf->virtual_address -
 | 
						page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 | 
				
			||||||
			vma->vm_start) >> PAGE_SHIFT;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
 | 
						if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
 | 
				
			||||||
		DRM_ERROR("invalid page offset\n");
 | 
							DRM_ERROR("invalid page offset\n");
 | 
				
			||||||
| 
						 | 
					@ -465,8 +464,7 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pfn = page_to_pfn(exynos_gem->pages[page_offset]);
 | 
						pfn = page_to_pfn(exynos_gem->pages[page_offset]);
 | 
				
			||||||
	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
 | 
						ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
 | 
				
			||||||
			__pfn_to_pfn_t(pfn, PFN_DEV));
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
out:
 | 
					out:
 | 
				
			||||||
	switch (ret) {
 | 
						switch (ret) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -125,7 +125,7 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
				  psbfb->gtt->offset;
 | 
									  psbfb->gtt->offset;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	page_num = vma_pages(vma);
 | 
						page_num = vma_pages(vma);
 | 
				
			||||||
	address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
 | 
						address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 | 
						vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -197,15 +197,14 @@ int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Page relative to the VMA start - we must calculate this ourselves
 | 
						/* Page relative to the VMA start - we must calculate this ourselves
 | 
				
			||||||
	   because vmf->pgoff is the fake GEM offset */
 | 
						   because vmf->pgoff is the fake GEM offset */
 | 
				
			||||||
	page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
 | 
						page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 | 
				
			||||||
				>> PAGE_SHIFT;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* CPU view of the page, don't go via the GART for CPU writes */
 | 
						/* CPU view of the page, don't go via the GART for CPU writes */
 | 
				
			||||||
	if (r->stolen)
 | 
						if (r->stolen)
 | 
				
			||||||
		pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
 | 
							pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		pfn = page_to_pfn(r->pages[page_offset]);
 | 
							pfn = page_to_pfn(r->pages[page_offset]);
 | 
				
			||||||
	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
 | 
						ret = vm_insert_pfn(vma, vmf->address, pfn);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
fail:
 | 
					fail:
 | 
				
			||||||
	mutex_unlock(&dev_priv->mmap_mutex);
 | 
						mutex_unlock(&dev_priv->mmap_mutex);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1796,8 +1796,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* We don't use vmf->pgoff since that has the fake offset */
 | 
						/* We don't use vmf->pgoff since that has the fake offset */
 | 
				
			||||||
	page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
 | 
						page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
 | 
				
			||||||
		PAGE_SHIFT;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	trace_i915_gem_object_fault(obj, page_offset, true, write);
 | 
						trace_i915_gem_object_fault(obj, page_offset, true, write);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -225,16 +225,14 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* We don't use vmf->pgoff since that has the fake offset: */
 | 
						/* We don't use vmf->pgoff since that has the fake offset: */
 | 
				
			||||||
	pgoff = ((unsigned long)vmf->virtual_address -
 | 
						pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 | 
				
			||||||
			vma->vm_start) >> PAGE_SHIFT;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pfn = page_to_pfn(pages[pgoff]);
 | 
						pfn = page_to_pfn(pages[pgoff]);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
 | 
						VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 | 
				
			||||||
			pfn, pfn << PAGE_SHIFT);
 | 
								pfn, pfn << PAGE_SHIFT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
 | 
						ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
 | 
				
			||||||
			__pfn_to_pfn_t(pfn, PFN_DEV));
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
out_unlock:
 | 
					out_unlock:
 | 
				
			||||||
	mutex_unlock(&dev->struct_mutex);
 | 
						mutex_unlock(&dev->struct_mutex);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -398,8 +398,7 @@ static int fault_1d(struct drm_gem_object *obj,
 | 
				
			||||||
	pgoff_t pgoff;
 | 
						pgoff_t pgoff;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* We don't use vmf->pgoff since that has the fake offset: */
 | 
						/* We don't use vmf->pgoff since that has the fake offset: */
 | 
				
			||||||
	pgoff = ((unsigned long)vmf->virtual_address -
 | 
						pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 | 
				
			||||||
			vma->vm_start) >> PAGE_SHIFT;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (omap_obj->pages) {
 | 
						if (omap_obj->pages) {
 | 
				
			||||||
		omap_gem_cpu_sync(obj, pgoff);
 | 
							omap_gem_cpu_sync(obj, pgoff);
 | 
				
			||||||
| 
						 | 
					@ -409,11 +408,10 @@ static int fault_1d(struct drm_gem_object *obj,
 | 
				
			||||||
		pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
 | 
							pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
 | 
						VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 | 
				
			||||||
			pfn, pfn << PAGE_SHIFT);
 | 
								pfn, pfn << PAGE_SHIFT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
 | 
						return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
 | 
				
			||||||
			__pfn_to_pfn_t(pfn, PFN_DEV));
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Special handling for the case of faulting in 2d tiled buffers */
 | 
					/* Special handling for the case of faulting in 2d tiled buffers */
 | 
				
			||||||
| 
						 | 
					@ -427,7 +425,7 @@ static int fault_2d(struct drm_gem_object *obj,
 | 
				
			||||||
	struct page *pages[64];  /* XXX is this too much to have on stack? */
 | 
						struct page *pages[64];  /* XXX is this too much to have on stack? */
 | 
				
			||||||
	unsigned long pfn;
 | 
						unsigned long pfn;
 | 
				
			||||||
	pgoff_t pgoff, base_pgoff;
 | 
						pgoff_t pgoff, base_pgoff;
 | 
				
			||||||
	void __user *vaddr;
 | 
						unsigned long vaddr;
 | 
				
			||||||
	int i, ret, slots;
 | 
						int i, ret, slots;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					@ -447,8 +445,7 @@ static int fault_2d(struct drm_gem_object *obj,
 | 
				
			||||||
	const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
 | 
						const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* We don't use vmf->pgoff since that has the fake offset: */
 | 
						/* We don't use vmf->pgoff since that has the fake offset: */
 | 
				
			||||||
	pgoff = ((unsigned long)vmf->virtual_address -
 | 
						pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 | 
				
			||||||
			vma->vm_start) >> PAGE_SHIFT;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Actual address we start mapping at is rounded down to previous slot
 | 
						 * Actual address we start mapping at is rounded down to previous slot
 | 
				
			||||||
| 
						 | 
					@ -459,7 +456,7 @@ static int fault_2d(struct drm_gem_object *obj,
 | 
				
			||||||
	/* figure out buffer width in slots */
 | 
						/* figure out buffer width in slots */
 | 
				
			||||||
	slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
 | 
						slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
 | 
						vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
 | 
						entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -503,12 +500,11 @@ static int fault_2d(struct drm_gem_object *obj,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pfn = entry->paddr >> PAGE_SHIFT;
 | 
						pfn = entry->paddr >> PAGE_SHIFT;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
 | 
						VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 | 
				
			||||||
			pfn, pfn << PAGE_SHIFT);
 | 
								pfn, pfn << PAGE_SHIFT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = n; i > 0; i--) {
 | 
						for (i = n; i > 0; i--) {
 | 
				
			||||||
		vm_insert_mixed(vma, (unsigned long)vaddr,
 | 
							vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
 | 
				
			||||||
				__pfn_to_pfn_t(pfn, PFN_DEV));
 | 
					 | 
				
			||||||
		pfn += priv->usergart[fmt].stride_pfn;
 | 
							pfn += priv->usergart[fmt].stride_pfn;
 | 
				
			||||||
		vaddr += PAGE_SIZE * m;
 | 
							vaddr += PAGE_SIZE * m;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -452,10 +452,10 @@ static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
	if (!bo->pages)
 | 
						if (!bo->pages)
 | 
				
			||||||
		return VM_FAULT_SIGBUS;
 | 
							return VM_FAULT_SIGBUS;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
 | 
						offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 | 
				
			||||||
	page = bo->pages[offset];
 | 
						page = bo->pages[offset];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
 | 
						err = vm_insert_page(vma, vmf->address, page);
 | 
				
			||||||
	switch (err) {
 | 
						switch (err) {
 | 
				
			||||||
	case -EAGAIN:
 | 
						case -EAGAIN:
 | 
				
			||||||
	case 0:
 | 
						case 0:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -101,7 +101,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
	struct page *page;
 | 
						struct page *page;
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
	unsigned long address = (unsigned long)vmf->virtual_address;
 | 
						unsigned long address = vmf->address;
 | 
				
			||||||
	int retval = VM_FAULT_NOPAGE;
 | 
						int retval = VM_FAULT_NOPAGE;
 | 
				
			||||||
	struct ttm_mem_type_manager *man =
 | 
						struct ttm_mem_type_manager *man =
 | 
				
			||||||
		&bdev->man[bo->mem.mem_type];
 | 
							&bdev->man[bo->mem.mem_type];
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -107,14 +107,13 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
	unsigned int page_offset;
 | 
						unsigned int page_offset;
 | 
				
			||||||
	int ret = 0;
 | 
						int ret = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
 | 
						page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 | 
				
			||||||
		PAGE_SHIFT;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!obj->pages)
 | 
						if (!obj->pages)
 | 
				
			||||||
		return VM_FAULT_SIGBUS;
 | 
							return VM_FAULT_SIGBUS;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	page = obj->pages[page_offset];
 | 
						page = obj->pages[page_offset];
 | 
				
			||||||
	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
 | 
						ret = vm_insert_page(vma, vmf->address, page);
 | 
				
			||||||
	switch (ret) {
 | 
						switch (ret) {
 | 
				
			||||||
	case -EAGAIN:
 | 
						case -EAGAIN:
 | 
				
			||||||
	case 0:
 | 
						case 0:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -54,7 +54,7 @@ static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct drm_vgem_gem_object *obj = vma->vm_private_data;
 | 
						struct drm_vgem_gem_object *obj = vma->vm_private_data;
 | 
				
			||||||
	/* We don't use vmf->pgoff since that has the fake offset */
 | 
						/* We don't use vmf->pgoff since that has the fake offset */
 | 
				
			||||||
	unsigned long vaddr = (unsigned long)vmf->virtual_address;
 | 
						unsigned long vaddr = vmf->address;
 | 
				
			||||||
	struct page *page;
 | 
						struct page *page;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
 | 
						page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -439,13 +439,12 @@ static int videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
	struct page *page;
 | 
						struct page *page;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n",
 | 
						dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n",
 | 
				
			||||||
		(unsigned long)vmf->virtual_address,
 | 
							vmf->address, vma->vm_start, vma->vm_end);
 | 
				
			||||||
		vma->vm_start, vma->vm_end);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	page = alloc_page(GFP_USER | __GFP_DMA32);
 | 
						page = alloc_page(GFP_USER | __GFP_DMA32);
 | 
				
			||||||
	if (!page)
 | 
						if (!page)
 | 
				
			||||||
		return VM_FAULT_OOM;
 | 
							return VM_FAULT_OOM;
 | 
				
			||||||
	clear_user_highpage(page, (unsigned long)vmf->virtual_address);
 | 
						clear_user_highpage(page, vmf->address);
 | 
				
			||||||
	vmf->page = page;
 | 
						vmf->page = page;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -117,13 +117,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
 | 
				
			||||||
static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
					static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct cxl_context *ctx = vma->vm_file->private_data;
 | 
						struct cxl_context *ctx = vma->vm_file->private_data;
 | 
				
			||||||
	unsigned long address = (unsigned long)vmf->virtual_address;
 | 
					 | 
				
			||||||
	u64 area, offset;
 | 
						u64 area, offset;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	offset = vmf->pgoff << PAGE_SHIFT;
 | 
						offset = vmf->pgoff << PAGE_SHIFT;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
 | 
						pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
 | 
				
			||||||
			__func__, ctx->pe, address, offset);
 | 
								__func__, ctx->pe, vmf->address, offset);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
 | 
						if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
 | 
				
			||||||
		area = ctx->afu->psn_phys;
 | 
							area = ctx->afu->psn_phys;
 | 
				
			||||||
| 
						 | 
					@ -155,7 +154,7 @@ static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
		return VM_FAULT_SIGBUS;
 | 
							return VM_FAULT_SIGBUS;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
 | 
						vm_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mutex_unlock(&ctx->status_mutex);
 | 
						mutex_unlock(&ctx->status_mutex);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -932,7 +932,7 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
	unsigned long paddr, vaddr;
 | 
						unsigned long paddr, vaddr;
 | 
				
			||||||
	unsigned long expires;
 | 
						unsigned long expires;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vaddr = (unsigned long)vmf->virtual_address;
 | 
						vaddr = vmf->address;
 | 
				
			||||||
	gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
 | 
						gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
 | 
				
			||||||
		vma, vaddr, GSEG_BASE(vaddr));
 | 
							vma, vaddr, GSEG_BASE(vaddr));
 | 
				
			||||||
	STAT(nopfn);
 | 
						STAT(nopfn);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -882,7 +882,7 @@ static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
	BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
 | 
						BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
 | 
						pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
 | 
				
			||||||
	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
 | 
						ret = vm_insert_pfn(vma, vmf->address, pfn);
 | 
				
			||||||
	mutex_unlock(&buffer->lock);
 | 
						mutex_unlock(&buffer->lock);
 | 
				
			||||||
	if (ret)
 | 
						if (ret)
 | 
				
			||||||
		return VM_FAULT_ERROR;
 | 
							return VM_FAULT_ERROR;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1014,7 +1014,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
 | 
				
			||||||
		       "page %p map %p index %lu flags %lx count %u priv %0lx: got addr %p type NOPAGE\n",
 | 
							       "page %p map %p index %lu flags %lx count %u priv %0lx: got addr %p type NOPAGE\n",
 | 
				
			||||||
		       vmf->page, vmf->page->mapping, vmf->page->index,
 | 
							       vmf->page, vmf->page->mapping, vmf->page->index,
 | 
				
			||||||
		       (long)vmf->page->flags, page_count(vmf->page),
 | 
							       (long)vmf->page->flags, page_count(vmf->page),
 | 
				
			||||||
		       page_private(vmf->page), vmf->virtual_address);
 | 
							       page_private(vmf->page), (void *)vmf->address);
 | 
				
			||||||
		if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) {
 | 
							if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) {
 | 
				
			||||||
			lock_page(vmf->page);
 | 
								lock_page(vmf->page);
 | 
				
			||||||
			cfio->ft_flags |= VM_FAULT_LOCKED;
 | 
								cfio->ft_flags |= VM_FAULT_LOCKED;
 | 
				
			||||||
| 
						 | 
					@ -1025,12 +1025,12 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (cfio->ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
 | 
						if (cfio->ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
 | 
				
			||||||
		CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
 | 
							CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", (void *)vmf->address);
 | 
				
			||||||
		return -EFAULT;
 | 
							return -EFAULT;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (cfio->ft_flags & VM_FAULT_OOM) {
 | 
						if (cfio->ft_flags & VM_FAULT_OOM) {
 | 
				
			||||||
		CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
 | 
							CDEBUG(D_PAGE, "got addr %p - OOM\n", (void *)vmf->address);
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -602,7 +602,7 @@ static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
 | 
						printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
 | 
				
			||||||
	       vma, vma->vm_start, vma->vm_end,
 | 
						       vma, vma->vm_start, vma->vm_end,
 | 
				
			||||||
	       vmf->pgoff, vmf->virtual_address);
 | 
						       vmf->pgoff, (void *)vmf->address);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return VM_FAULT_SIGBUS;
 | 
						return VM_FAULT_SIGBUS;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										4
									
								
								fs/dax.c
									
									
									
									
									
								
							
							
						
						
									
										4
									
								
								fs/dax.c
									
									
									
									
									
								
							| 
						 | 
					@ -738,7 +738,7 @@ static int dax_insert_mapping(struct address_space *mapping,
 | 
				
			||||||
		struct block_device *bdev, sector_t sector, size_t size,
 | 
							struct block_device *bdev, sector_t sector, size_t size,
 | 
				
			||||||
		void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
							void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long vaddr = (unsigned long)vmf->virtual_address;
 | 
						unsigned long vaddr = vmf->address;
 | 
				
			||||||
	struct blk_dax_ctl dax = {
 | 
						struct blk_dax_ctl dax = {
 | 
				
			||||||
		.sector = sector,
 | 
							.sector = sector,
 | 
				
			||||||
		.size = size,
 | 
							.size = size,
 | 
				
			||||||
| 
						 | 
					@ -948,7 +948,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct address_space *mapping = vma->vm_file->f_mapping;
 | 
						struct address_space *mapping = vma->vm_file->f_mapping;
 | 
				
			||||||
	struct inode *inode = mapping->host;
 | 
						struct inode *inode = mapping->host;
 | 
				
			||||||
	unsigned long vaddr = (unsigned long)vmf->virtual_address;
 | 
						unsigned long vaddr = vmf->address;
 | 
				
			||||||
	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
 | 
						loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
 | 
				
			||||||
	sector_t sector;
 | 
						sector_t sector;
 | 
				
			||||||
	struct iomap iomap = { 0 };
 | 
						struct iomap iomap = { 0 };
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -297,8 +297,6 @@ struct vm_fault {
 | 
				
			||||||
	gfp_t gfp_mask;			/* gfp mask to be used for allocations */
 | 
						gfp_t gfp_mask;			/* gfp mask to be used for allocations */
 | 
				
			||||||
	pgoff_t pgoff;			/* Logical page offset based on vma */
 | 
						pgoff_t pgoff;			/* Logical page offset based on vma */
 | 
				
			||||||
	unsigned long address;		/* Faulting virtual address */
 | 
						unsigned long address;		/* Faulting virtual address */
 | 
				
			||||||
	void __user *virtual_address;	/* Faulting virtual address masked by
 | 
					 | 
				
			||||||
					 * PAGE_MASK */
 | 
					 | 
				
			||||||
	pmd_t *pmd;			/* Pointer to pmd entry matching
 | 
						pmd_t *pmd;			/* Pointer to pmd entry matching
 | 
				
			||||||
					 * the 'address'
 | 
										 * the 'address'
 | 
				
			||||||
					 */
 | 
										 */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2040,7 +2040,7 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
 | 
				
			||||||
	struct vm_fault vmf;
 | 
						struct vm_fault vmf;
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vmf.virtual_address = (void __user *)(address & PAGE_MASK);
 | 
						vmf.address = address & PAGE_MASK;
 | 
				
			||||||
	vmf.pgoff = page->index;
 | 
						vmf.pgoff = page->index;
 | 
				
			||||||
	vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
 | 
						vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
 | 
				
			||||||
	vmf.gfp_mask = __get_fault_gfp_mask(vma);
 | 
						vmf.gfp_mask = __get_fault_gfp_mask(vma);
 | 
				
			||||||
| 
						 | 
					@ -2276,8 +2276,7 @@ static int wp_pfn_shared(struct vm_fault *vmf, pte_t orig_pte)
 | 
				
			||||||
		struct vm_fault vmf2 = {
 | 
							struct vm_fault vmf2 = {
 | 
				
			||||||
			.page = NULL,
 | 
								.page = NULL,
 | 
				
			||||||
			.pgoff = linear_page_index(vma, vmf->address),
 | 
								.pgoff = linear_page_index(vma, vmf->address),
 | 
				
			||||||
			.virtual_address =
 | 
								.address = vmf->address,
 | 
				
			||||||
				(void __user *)(vmf->address & PAGE_MASK),
 | 
					 | 
				
			||||||
			.flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE,
 | 
								.flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE,
 | 
				
			||||||
		};
 | 
							};
 | 
				
			||||||
		int ret;
 | 
							int ret;
 | 
				
			||||||
| 
						 | 
					@ -2852,7 +2851,7 @@ static int __do_fault(struct vm_fault *vmf, pgoff_t pgoff,
 | 
				
			||||||
	struct vm_fault vmf2;
 | 
						struct vm_fault vmf2;
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vmf2.virtual_address = (void __user *)(vmf->address & PAGE_MASK);
 | 
						vmf2.address = vmf->address;
 | 
				
			||||||
	vmf2.pgoff = pgoff;
 | 
						vmf2.pgoff = pgoff;
 | 
				
			||||||
	vmf2.flags = vmf->flags;
 | 
						vmf2.flags = vmf->flags;
 | 
				
			||||||
	vmf2.page = NULL;
 | 
						vmf2.page = NULL;
 | 
				
			||||||
| 
						 | 
					@ -3612,7 +3611,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct vm_fault vmf = {
 | 
						struct vm_fault vmf = {
 | 
				
			||||||
		.vma = vma,
 | 
							.vma = vma,
 | 
				
			||||||
		.address = address,
 | 
							.address = address & PAGE_MASK,
 | 
				
			||||||
		.flags = flags,
 | 
							.flags = flags,
 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
	struct mm_struct *mm = vma->vm_mm;
 | 
						struct mm_struct *mm = vma->vm_mm;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue