forked from mirrors/linux
		
	mm: kill vma flag VM_RESERVED and mm->reserved_vm counter
A long time ago, in v2.4, VM_RESERVED kept swapout process off VMA, currently it lost original meaning but still has some effects: | effect | alternative flags -+------------------------+--------------------------------------------- 1| account as reserved_vm | VM_IO 2| skip in core dump | VM_IO, VM_DONTDUMP 3| do not merge or expand | VM_IO, VM_DONTEXPAND, VM_HUGETLB, VM_PFNMAP 4| do not mlock | VM_IO, VM_DONTEXPAND, VM_HUGETLB, VM_PFNMAP This patch removes reserved_vm counter from mm_struct. Seems like nobody cares about it, it does not exported into userspace directly, it only reduces total_vm showed in proc. Thus VM_RESERVED can be replaced with VM_IO or pair VM_DONTEXPAND | VM_DONTDUMP. remap_pfn_range() and io_remap_pfn_range() set VM_IO|VM_DONTEXPAND|VM_DONTDUMP. remap_vmalloc_range() set VM_DONTEXPAND | VM_DONTDUMP. [akpm@linux-foundation.org: drivers/vfio/pci/vfio_pci.c fixup] Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Carsten Otte <cotte@de.ibm.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Eric Paris <eparis@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Morris <james.l.morris@oracle.com> Cc: Jason Baron <jbaron@redhat.com> Cc: Kentaro Takeda <takedakn@nttdata.co.jp> Cc: Matt Helsley <matthltc@us.ibm.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Robert Richter <robert.richter@amd.com> Cc: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Cc: Venkatesh Pallipadi <venki@google.com> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									0103bd16fb
								
							
						
					
					
						commit
						314e51b985
					
				
					 70 changed files with 77 additions and 105 deletions
				
			
		| 
						 | 
				
			
			@ -371,8 +371,8 @@ mlock_fixup() filters several classes of "special" VMAs:
 | 
			
		|||
   mlock_fixup() will call make_pages_present() in the hugetlbfs VMA range to
 | 
			
		||||
   allocate the huge pages and populate the ptes.
 | 
			
		||||
 | 
			
		||||
3) VMAs with VM_DONTEXPAND or VM_RESERVED are generally userspace mappings of
 | 
			
		||||
   kernel pages, such as the VDSO page, relay channel pages, etc.  These pages
 | 
			
		||||
3) VMAs with VM_DONTEXPAND are generally userspace mappings of kernel pages,
 | 
			
		||||
   such as the VDSO page, relay channel pages, etc. These pages
 | 
			
		||||
   are inherently unevictable and are not managed on the LRU lists.
 | 
			
		||||
   mlock_fixup() treats these VMAs the same as hugetlbfs VMAs.  It calls
 | 
			
		||||
   make_pages_present() to populate the ptes.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -26,7 +26,7 @@ static int hose_mmap_page_range(struct pci_controller *hose,
 | 
			
		|||
		base = sparse ? hose->sparse_io_base : hose->dense_io_base;
 | 
			
		||||
 | 
			
		||||
	vma->vm_pgoff += base >> PAGE_SHIFT;
 | 
			
		||||
	vma->vm_flags |= (VM_IO | VM_RESERVED);
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
 | 
			
		||||
	return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 | 
			
		||||
				  vma->vm_end - vma->vm_start,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2307,7 +2307,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
 | 
			
		|||
	 */
 | 
			
		||||
	vma->vm_mm	     = mm;
 | 
			
		||||
	vma->vm_file	     = get_file(filp);
 | 
			
		||||
	vma->vm_flags	     = VM_READ| VM_MAYREAD |VM_RESERVED;
 | 
			
		||||
	vma->vm_flags	     = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP;
 | 
			
		||||
	vma->vm_page_prot    = PAGE_READONLY; /* XXX may need to change */
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -138,7 +138,8 @@ ia64_init_addr_space (void)
 | 
			
		|||
			vma->vm_mm = current->mm;
 | 
			
		||||
			vma->vm_end = PAGE_SIZE;
 | 
			
		||||
			vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
 | 
			
		||||
			vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
 | 
			
		||||
			vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
 | 
			
		||||
					VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
			down_write(¤t->mm->mmap_sem);
 | 
			
		||||
			if (insert_vm_struct(current->mm, vma)) {
 | 
			
		||||
				up_write(¤t->mm->mmap_sem);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1183,7 +1183,7 @@ static const struct vm_operations_struct kvm_rma_vm_ops = {
 | 
			
		|||
 | 
			
		||||
static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
 | 
			
		||||
{
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED;
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	vma->vm_ops = &kvm_rma_vm_ops;
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -779,7 +779,7 @@ static int __pci_mmap_make_offset(struct pci_dev *pdev,
 | 
			
		|||
static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
 | 
			
		||||
					    enum pci_mmap_state mmap_state)
 | 
			
		||||
{
 | 
			
		||||
	vma->vm_flags |= (VM_IO | VM_RESERVED);
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -380,7 +380,7 @@ int vectors_user_mapping(void)
 | 
			
		|||
	return install_special_mapping(mm, 0xffff0000, PAGE_SIZE,
 | 
			
		||||
				       VM_READ | VM_EXEC |
 | 
			
		||||
				       VM_MAYREAD | VM_MAYEXEC |
 | 
			
		||||
				       VM_RESERVED,
 | 
			
		||||
				       VM_DONTEXPAND | VM_DONTDUMP,
 | 
			
		||||
				       NULL);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2451,8 +2451,7 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
 | 
			
		|||
 | 
			
		||||
	prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
 | 
			
		||||
 | 
			
		||||
	BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) ==
 | 
			
		||||
				(VM_PFNMAP | VM_RESERVED | VM_IO)));
 | 
			
		||||
	BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
 | 
			
		||||
 | 
			
		||||
	rmd.mfn = mfn;
 | 
			
		||||
	rmd.prot = prot;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -507,7 +507,7 @@ static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma)
 | 
			
		|||
 | 
			
		||||
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 | 
			
		||||
 | 
			
		||||
	/* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
 | 
			
		||||
	/* Remap-pfn-range will mark the range VM_IO */
 | 
			
		||||
	if (remap_pfn_range(vma,
 | 
			
		||||
			    vma->vm_start,
 | 
			
		||||
			    __pa(soft->gscr_addr) >> PAGE_SHIFT,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -322,7 +322,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
 | 
			
		|||
 | 
			
		||||
	vma->vm_ops = &mmap_mem_ops;
 | 
			
		||||
 | 
			
		||||
	/* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
 | 
			
		||||
	/* Remap-pfn-range will mark the range VM_IO */
 | 
			
		||||
	if (remap_pfn_range(vma,
 | 
			
		||||
			    vma->vm_start,
 | 
			
		||||
			    vma->vm_pgoff,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -286,7 +286,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
 | 
			
		|||
	atomic_set(&vdata->refcnt, 1);
 | 
			
		||||
	vma->vm_private_data = vdata;
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
 | 
			
		||||
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 | 
			
		||||
	vma->vm_ops = &mspec_vm_ops;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -706,7 +706,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 | 
			
		|||
		goto out_unlock;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	vma->vm_ops = obj->dev->driver->gem_vm_ops;
 | 
			
		||||
	vma->vm_private_data = map->handle;
 | 
			
		||||
	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -514,8 +514,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
 | 
			
		|||
 | 
			
		||||
	vma->vm_ops = &drm_vm_dma_ops;
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED;	/* Don't swap */
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND;
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
 | 
			
		||||
	drm_vm_open_locked(dev, vma);
 | 
			
		||||
	return 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -643,21 +642,16 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
 | 
			
		|||
	case _DRM_SHM:
 | 
			
		||||
		vma->vm_ops = &drm_vm_shm_ops;
 | 
			
		||||
		vma->vm_private_data = (void *)map;
 | 
			
		||||
		/* Don't let this area swap.  Change when
 | 
			
		||||
		   DRM_KERNEL advisory is supported. */
 | 
			
		||||
		vma->vm_flags |= VM_RESERVED;
 | 
			
		||||
		break;
 | 
			
		||||
	case _DRM_SCATTER_GATHER:
 | 
			
		||||
		vma->vm_ops = &drm_vm_sg_ops;
 | 
			
		||||
		vma->vm_private_data = (void *)map;
 | 
			
		||||
		vma->vm_flags |= VM_RESERVED;
 | 
			
		||||
		vma->vm_page_prot = drm_dma_prot(map->type, vma);
 | 
			
		||||
		break;
 | 
			
		||||
	default:
 | 
			
		||||
		return -EINVAL;	/* This should never happen. */
 | 
			
		||||
	}
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED;	/* Don't swap */
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND;
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
 | 
			
		||||
	drm_vm_open_locked(dev, vma);
 | 
			
		||||
	return 0;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -500,7 +500,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
 | 
			
		|||
 | 
			
		||||
	DRM_DEBUG_KMS("%s\n", __FILE__);
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= (VM_IO | VM_RESERVED);
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
 | 
			
		||||
	update_vm_cache_attr(exynos_gem_obj, vma);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -178,8 +178,7 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 | 
			
		|||
	 */
 | 
			
		||||
	vma->vm_ops = &psbfb_vm_ops;
 | 
			
		||||
	vma->vm_private_data = (void *)psbfb;
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED | VM_IO |
 | 
			
		||||
					VM_MIXEDMAP | VM_DONTEXPAND;
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -285,7 +285,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
 | 
			
		|||
	 */
 | 
			
		||||
 | 
			
		||||
	vma->vm_private_data = bo;
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	return 0;
 | 
			
		||||
out_unref:
 | 
			
		||||
	ttm_bo_unref(&bo);
 | 
			
		||||
| 
						 | 
				
			
			@ -300,7 +300,7 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
 | 
			
		|||
 | 
			
		||||
	vma->vm_ops = &ttm_bo_vm_ops;
 | 
			
		||||
	vma->vm_private_data = ttm_bo_reference(bo);
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(ttm_fbdev_mmap);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -243,7 +243,7 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 | 
			
		|||
			size = 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED;	/* avoid to swap out this VMA */
 | 
			
		||||
	/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -117,7 +117,7 @@ static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
 | 
			
		|||
	physical = galpas->user.fw_handle;
 | 
			
		||||
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 | 
			
		||||
	ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical);
 | 
			
		||||
	/* VM_IO | VM_RESERVED are set by remap_pfn_range() */
 | 
			
		||||
	/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
 | 
			
		||||
	ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT,
 | 
			
		||||
			   vma->vm_page_prot);
 | 
			
		||||
	if (unlikely(ret)) {
 | 
			
		||||
| 
						 | 
				
			
			@ -139,7 +139,7 @@ static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
 | 
			
		|||
	u64 start, ofs;
 | 
			
		||||
	struct page *page;
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED;
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	start = vma->vm_start;
 | 
			
		||||
	for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
 | 
			
		||||
		u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1225,7 +1225,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
 | 
			
		|||
 | 
			
		||||
	vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
 | 
			
		||||
	vma->vm_ops = &ipath_file_vm_ops;
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	ret = 1;
 | 
			
		||||
 | 
			
		||||
bail:
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -971,7 +971,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
 | 
			
		|||
 | 
			
		||||
	vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
 | 
			
		||||
	vma->vm_ops = &qib_file_vm_ops;
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	ret = 1;
 | 
			
		||||
 | 
			
		||||
bail:
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1647,7 +1647,7 @@ static int meye_mmap(struct file *file, struct vm_area_struct *vma)
 | 
			
		|||
 | 
			
		||||
	vma->vm_ops = &meye_vm_ops;
 | 
			
		||||
	vma->vm_flags &= ~VM_IO;	/* not I/O memory */
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED;	/* avoid to swap out this VMA */
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	vma->vm_private_data = (void *) (offset / gbufsize);
 | 
			
		||||
	meye_vm_open(vma);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -911,7 +911,7 @@ static int omap_vout_mmap(struct file *file, struct vm_area_struct *vma)
 | 
			
		|||
 | 
			
		||||
	q->bufs[i]->baddr = vma->vm_start;
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED;
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 | 
			
		||||
	vma->vm_ops = &omap_vout_vm_ops;
 | 
			
		||||
	vma->vm_private_data = (void *) vout;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3950,7 +3950,7 @@ static int vino_mmap(struct file *file, struct vm_area_struct *vma)
 | 
			
		|||
 | 
			
		||||
	fb->map_count = 1;
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	vma->vm_flags &= ~VM_IO;
 | 
			
		||||
	vma->vm_private_data = fb;
 | 
			
		||||
	vma->vm_file = file;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2126,8 +2126,7 @@ static int sn9c102_mmap(struct file* filp, struct vm_area_struct *vma)
 | 
			
		|||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_IO;
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED;
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
 | 
			
		||||
	pos = cam->frame[i].bufmem;
 | 
			
		||||
	while (size > 0) { /* size is page-aligned */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1108,8 +1108,7 @@ static int usbvision_mmap(struct file *file, struct vm_area_struct *vma)
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	/* VM_IO is eventually going to replace PageReserved altogether */
 | 
			
		||||
	vma->vm_flags |= VM_IO;
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED;	/* avoid to swap out this VMA */
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
 | 
			
		||||
	pos = usbvision->frame[i].data;
 | 
			
		||||
	while (size > 0) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -582,7 +582,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
 | 
			
		|||
	map->count    = 1;
 | 
			
		||||
	map->q        = q;
 | 
			
		||||
	vma->vm_ops   = &videobuf_vm_ops;
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
 | 
			
		||||
	vma->vm_private_data = map;
 | 
			
		||||
	dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -270,7 +270,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	vma->vm_ops          = &videobuf_vm_ops;
 | 
			
		||||
	vma->vm_flags       |= VM_DONTEXPAND | VM_RESERVED;
 | 
			
		||||
	vma->vm_flags       |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	vma->vm_private_data = map;
 | 
			
		||||
 | 
			
		||||
	dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -163,7 +163,7 @@ int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
 | 
			
		|||
		return ret;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags		|= VM_DONTEXPAND | VM_RESERVED;
 | 
			
		||||
	vma->vm_flags		|= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	vma->vm_private_data	= priv;
 | 
			
		||||
	vma->vm_ops		= vm_ops;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1243,8 +1243,6 @@ static int data_mmap(struct file *filp, struct vm_area_struct *vma)
 | 
			
		|||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* IO memory (stop cacheing) */
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_RESERVED;
 | 
			
		||||
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 | 
			
		||||
 | 
			
		||||
	return io_remap_pfn_range(vma, vma->vm_start, addr, vsize,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -108,9 +108,8 @@ static int gru_file_mmap(struct file *file, struct vm_area_struct *vma)
 | 
			
		|||
				vma->vm_end & (GRU_GSEG_PAGESIZE - 1))
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |=
 | 
			
		||||
	    (VM_IO | VM_DONTCOPY | VM_LOCKED | VM_DONTEXPAND | VM_PFNMAP |
 | 
			
		||||
			VM_RESERVED);
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_LOCKED |
 | 
			
		||||
			 VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	vma->vm_page_prot = PAGE_SHARED;
 | 
			
		||||
	vma->vm_ops = &gru_vm_ops;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1182,7 +1182,7 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
 | 
			
		|||
			return -EINVAL;
 | 
			
		||||
		if (set_vm_offset(vma, off) < 0)
 | 
			
		||||
			return -EINVAL;
 | 
			
		||||
		vma->vm_flags |= VM_IO | VM_RESERVED;
 | 
			
		||||
		vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
 | 
			
		||||
#ifdef pgprot_noncached
 | 
			
		||||
		if (file->f_flags & O_DSYNC || off >= __pa(high_memory))
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1257,7 +1257,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	sfp->mmap_called = 1;
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED;
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	vma->vm_private_data = sfp;
 | 
			
		||||
	vma->vm_ops = &sg_mmap_vm_ops;
 | 
			
		||||
	return 0;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -160,7 +160,7 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
 | 
			
		|||
		goto out_unlock;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	vma->vm_ops = obj->dev->driver->gem_vm_ops;
 | 
			
		||||
	vma->vm_private_data = obj;
 | 
			
		||||
	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -261,7 +261,7 @@ static int bridge_mmap(struct file *filp, struct vm_area_struct *vma)
 | 
			
		|||
{
 | 
			
		||||
	u32 status;
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED | VM_IO;
 | 
			
		||||
	/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
 | 
			
		||||
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 | 
			
		||||
 | 
			
		||||
	dev_dbg(bridge, "%s: vm filp %p start %lx end %lx page_prot %ulx "
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -653,8 +653,6 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
 | 
			
		|||
	if (mi < 0)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_RESERVED;
 | 
			
		||||
 | 
			
		||||
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 | 
			
		||||
 | 
			
		||||
	return remap_pfn_range(vma,
 | 
			
		||||
| 
						 | 
				
			
			@ -666,7 +664,7 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
 | 
			
		|||
 | 
			
		||||
static int uio_mmap_logical(struct vm_area_struct *vma)
 | 
			
		||||
{
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED;
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	vma->vm_ops = &uio_vm_ops;
 | 
			
		||||
	uio_vma_open(vma);
 | 
			
		||||
	return 0;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1247,7 +1247,7 @@ static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma)
 | 
			
		|||
{
 | 
			
		||||
	/* don't do anything here: "fault" will set up page table entries */
 | 
			
		||||
	vma->vm_ops = &mon_bin_vm_ops;
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED;
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	vma->vm_private_data = filp->private_data;
 | 
			
		||||
	mon_bin_vma_open(vma);
 | 
			
		||||
	return 0;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -400,7 +400,7 @@ static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 | 
			
		|||
#ifndef MMU
 | 
			
		||||
	/* this is uClinux (no MMU) specific code */
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED;
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	vma->vm_start = videomemory;
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1942,8 +1942,7 @@ static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 | 
			
		|||
	off = vma->vm_pgoff << PAGE_SHIFT;
 | 
			
		||||
	size = vma->vm_end - vma->vm_start;
 | 
			
		||||
 | 
			
		||||
	/* To stop the swapper from even considering these pages. */
 | 
			
		||||
	vma->vm_flags |= (VM_IO | VM_RESERVED);
 | 
			
		||||
	/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
 | 
			
		||||
 | 
			
		||||
	if (((vma->vm_pgoff == 0) && (size == info->fix.smem_len)) ||
 | 
			
		||||
	    ((off == info->fix.smem_len) && (size == PAGE_SIZE)))
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -653,9 +653,8 @@ int unifb_mmap(struct fb_info *info,
 | 
			
		|||
				vma->vm_page_prot))
 | 
			
		||||
		return -EAGAIN;
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED;	/* avoid to swap out this VMA */
 | 
			
		||||
	/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static struct fb_ops unifb_ops = {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -166,7 +166,7 @@ static const struct address_space_operations fb_deferred_io_aops = {
 | 
			
		|||
static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
 | 
			
		||||
{
 | 
			
		||||
	vma->vm_ops = &fb_deferred_io_vm_ops;
 | 
			
		||||
	vma->vm_flags |= ( VM_RESERVED | VM_DONTEXPAND );
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	if (!(info->flags & FBINFO_VIRTFB))
 | 
			
		||||
		vma->vm_flags |= VM_IO;
 | 
			
		||||
	vma->vm_private_data = info;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1410,8 +1410,7 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
 | 
			
		|||
		return -EINVAL;
 | 
			
		||||
	off += start;
 | 
			
		||||
	vma->vm_pgoff = off >> PAGE_SHIFT;
 | 
			
		||||
	/* This is an IO map - tell maydump to skip this VMA */
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_RESERVED;
 | 
			
		||||
	/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by io_remap_pfn_range()*/
 | 
			
		||||
	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 | 
			
		||||
	fb_pgprotect(file, vma, off);
 | 
			
		||||
	if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1024,7 +1024,7 @@ static int gbefb_mmap(struct fb_info *info,
 | 
			
		|||
	pgprot_val(vma->vm_page_prot) =
 | 
			
		||||
		pgprot_fb(pgprot_val(vma->vm_page_prot));
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_RESERVED;
 | 
			
		||||
	/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
 | 
			
		||||
 | 
			
		||||
	/* look for the starting tile */
 | 
			
		||||
	tile = &gbe_tiles.cpu[offset >> TILE_SHIFT];
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1128,7 +1128,7 @@ static int omapfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
 | 
			
		|||
	DBG("user mmap region start %lx, len %d, off %lx\n", start, len, off);
 | 
			
		||||
 | 
			
		||||
	vma->vm_pgoff = off >> PAGE_SHIFT;
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_RESERVED;
 | 
			
		||||
	/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
 | 
			
		||||
	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 | 
			
		||||
	vma->vm_ops = &mmap_user_ops;
 | 
			
		||||
	vma->vm_private_data = rg;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -57,8 +57,7 @@ int sbusfb_mmap_helper(struct sbus_mmap_map *map,
 | 
			
		|||
 | 
			
		||||
	off = vma->vm_pgoff << PAGE_SHIFT;
 | 
			
		||||
 | 
			
		||||
	/* To stop the swapper from even considering these pages */
 | 
			
		||||
	vma->vm_flags |= (VM_IO | VM_RESERVED);
 | 
			
		||||
	/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
 | 
			
		||||
 | 
			
		||||
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -803,7 +803,6 @@ static int ufx_ops_mmap(struct fb_info *info, struct vm_area_struct *vma)
 | 
			
		|||
			size = 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED;	/* avoid to swap out this VMA */
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -345,7 +345,6 @@ static int dlfb_ops_mmap(struct fb_info *info, struct vm_area_struct *vma)
 | 
			
		|||
			size = 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED;	/* avoid to swap out this VMA */
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1018,7 +1018,6 @@ static int vmlfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 | 
			
		|||
	offset += vinfo->vram_start;
 | 
			
		||||
	pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
 | 
			
		||||
	pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED | VM_IO;
 | 
			
		||||
	if (remap_pfn_range(vma, vma->vm_start, offset >> PAGE_SHIFT,
 | 
			
		||||
						size, vma->vm_page_prot))
 | 
			
		||||
		return -EAGAIN;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -439,7 +439,6 @@ static int vfb_mmap(struct fb_info *info,
 | 
			
		|||
			size = 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED;	/* avoid to swap out this VMA */
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -535,7 +535,7 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
 | 
			
		|||
 | 
			
		||||
	vma->vm_private_data = vm_priv;
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
 | 
			
		||||
	vma->vm_ops = &gntalloc_vmops;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -720,7 +720,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
 | 
			
		|||
 | 
			
		||||
	vma->vm_ops = &gntdev_vmops;
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED|VM_DONTEXPAND;
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
 | 
			
		||||
	if (use_ptemod)
 | 
			
		||||
		vma->vm_flags |= VM_DONTCOPY;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -455,7 +455,8 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
 | 
			
		|||
{
 | 
			
		||||
	/* DONTCOPY is essential for Xen because copy_page_range doesn't know
 | 
			
		||||
	 * how to recreate these mappings */
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
 | 
			
		||||
			 VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	vma->vm_ops = &privcmd_vm_ops;
 | 
			
		||||
	vma->vm_private_data = NULL;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1135,7 +1135,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	/* Do not dump I/O mapped devices or special mappings */
 | 
			
		||||
	if (vma->vm_flags & (VM_IO | VM_RESERVED))
 | 
			
		||||
	if (vma->vm_flags & VM_IO)
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	/* By default, dump shared memory if mapped from an anonymous file. */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1205,7 +1205,7 @@ static int maydump(struct vm_area_struct *vma, unsigned long mm_flags)
 | 
			
		|||
	int dump_ok;
 | 
			
		||||
 | 
			
		||||
	/* Do not dump I/O mapped devices or special mappings */
 | 
			
		||||
	if (vma->vm_flags & (VM_IO | VM_RESERVED)) {
 | 
			
		||||
	if (vma->vm_flags & VM_IO) {
 | 
			
		||||
		kdcore("%08lx: %08lx: no (IO)", vma->vm_start, vma->vm_flags);
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -110,7 +110,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
 | 
			
		|||
	 * way when do_mmap_pgoff unwinds (may be important on powerpc
 | 
			
		||||
	 * and ia64).
 | 
			
		||||
	 */
 | 
			
		||||
	vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
 | 
			
		||||
	vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	vma->vm_ops = &hugetlb_vm_ops;
 | 
			
		||||
 | 
			
		||||
	if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -54,7 +54,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
 | 
			
		|||
		"VmPTE:\t%8lu kB\n"
 | 
			
		||||
		"VmSwap:\t%8lu kB\n",
 | 
			
		||||
		hiwater_vm << (PAGE_SHIFT-10),
 | 
			
		||||
		(total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
 | 
			
		||||
		total_vm << (PAGE_SHIFT-10),
 | 
			
		||||
		mm->locked_vm << (PAGE_SHIFT-10),
 | 
			
		||||
		mm->pinned_vm << (PAGE_SHIFT-10),
 | 
			
		||||
		hiwater_rss << (PAGE_SHIFT-10),
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -239,7 +239,7 @@ extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
 | 
			
		|||
/* Check if a vma is migratable */
 | 
			
		||||
static inline int vma_migratable(struct vm_area_struct *vma)
 | 
			
		||||
{
 | 
			
		||||
	if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
 | 
			
		||||
	if (vma->vm_flags & (VM_IO | VM_HUGETLB | VM_PFNMAP))
 | 
			
		||||
		return 0;
 | 
			
		||||
	/*
 | 
			
		||||
	 * Migration allocates pages in the highest zone. If we cannot
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -96,7 +96,6 @@ extern unsigned int kobjsize(const void *objp);
 | 
			
		|||
 | 
			
		||||
#define VM_DONTCOPY	0x00020000      /* Do not copy this vma on fork */
 | 
			
		||||
#define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
 | 
			
		||||
#define VM_RESERVED	0x00080000	/* Count as reserved_vm like IO */
 | 
			
		||||
#define VM_ACCOUNT	0x00100000	/* Is a VM accounted object */
 | 
			
		||||
#define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
 | 
			
		||||
#define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
 | 
			
		||||
| 
						 | 
				
			
			@ -148,7 +147,7 @@ extern unsigned int kobjsize(const void *objp);
 | 
			
		|||
 * Special vmas that are non-mergable, non-mlock()able.
 | 
			
		||||
 * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
 | 
			
		||||
 */
 | 
			
		||||
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
 | 
			
		||||
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP)
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * mapping from the currently active vm_flags protection bits (the
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -349,7 +349,6 @@ struct mm_struct {
 | 
			
		|||
	unsigned long shared_vm;	/* Shared pages (files) */
 | 
			
		||||
	unsigned long exec_vm;		/* VM_EXEC & ~VM_WRITE */
 | 
			
		||||
	unsigned long stack_vm;		/* VM_GROWSUP/DOWN */
 | 
			
		||||
	unsigned long reserved_vm;	/* VM_RESERVED|VM_IO pages */
 | 
			
		||||
	unsigned long def_flags;
 | 
			
		||||
	unsigned long nr_ptes;		/* Page table pages */
 | 
			
		||||
	unsigned long start_code, end_code, start_data, end_data;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3671,7 +3671,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 | 
			
		|||
		atomic_inc(&event->mmap_count);
 | 
			
		||||
	mutex_unlock(&event->mmap_mutex);
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED;
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	vma->vm_ops = &perf_mmap_vmops;
 | 
			
		||||
 | 
			
		||||
	return ret;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										3
									
								
								mm/ksm.c
									
									
									
									
									
								
							
							
						
						
									
										3
									
								
								mm/ksm.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -1469,8 +1469,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
 | 
			
		|||
		 */
 | 
			
		||||
		if (*vm_flags & (VM_MERGEABLE | VM_SHARED  | VM_MAYSHARE   |
 | 
			
		||||
				 VM_PFNMAP    | VM_IO      | VM_DONTEXPAND |
 | 
			
		||||
				 VM_RESERVED  | VM_HUGETLB |
 | 
			
		||||
				 VM_NONLINEAR | VM_MIXEDMAP))
 | 
			
		||||
				 VM_HUGETLB | VM_NONLINEAR | VM_MIXEDMAP))
 | 
			
		||||
			return 0;		/* just ignore the advice */
 | 
			
		||||
 | 
			
		||||
#ifdef VM_SAO
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										11
									
								
								mm/memory.c
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								mm/memory.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -2297,14 +2297,13 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
 | 
			
		|||
	 * rest of the world about it:
 | 
			
		||||
	 *   VM_IO tells people not to look at these pages
 | 
			
		||||
	 *	(accesses can have side effects).
 | 
			
		||||
	 *   VM_RESERVED is specified all over the place, because
 | 
			
		||||
	 *	in 2.4 it kept swapout's vma scan off this vma; but
 | 
			
		||||
	 *	in 2.6 the LRU scan won't even find its pages, so this
 | 
			
		||||
	 *	flag means no more than count its pages in reserved_vm,
 | 
			
		||||
	 * 	and omit it from core dump, even when VM_IO turned off.
 | 
			
		||||
	 *   VM_PFNMAP tells the core MM that the base pages are just
 | 
			
		||||
	 *	raw PFN mappings, and do not have a "struct page" associated
 | 
			
		||||
	 *	with them.
 | 
			
		||||
	 *   VM_DONTEXPAND
 | 
			
		||||
	 *      Disable vma merging and expanding with mremap().
 | 
			
		||||
	 *   VM_DONTDUMP
 | 
			
		||||
	 *      Omit vma from core dump, even when VM_IO turned off.
 | 
			
		||||
	 *
 | 
			
		||||
	 * There's a horrible special case to handle copy-on-write
 | 
			
		||||
	 * behaviour that some programs depend on. We mark the "original"
 | 
			
		||||
| 
						 | 
				
			
			@ -2321,7 +2320,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
 | 
			
		|||
	if (err)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
 | 
			
		||||
	BUG_ON(addr >= end);
 | 
			
		||||
	pfn -= addr >> PAGE_SHIFT;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -227,7 +227,7 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
 | 
			
		|||
	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
 | 
			
		||||
		goto no_mlock;
 | 
			
		||||
 | 
			
		||||
	if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
 | 
			
		||||
	if (!((vma->vm_flags & VM_DONTEXPAND) ||
 | 
			
		||||
			is_vm_hugetlb_page(vma) ||
 | 
			
		||||
			vma == get_gate_vma(current->mm))) {
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -945,8 +945,6 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
 | 
			
		|||
			mm->exec_vm += pages;
 | 
			
		||||
	} else if (flags & stack_flags)
 | 
			
		||||
		mm->stack_vm += pages;
 | 
			
		||||
	if (flags & (VM_RESERVED|VM_IO))
 | 
			
		||||
		mm->reserved_vm += pages;
 | 
			
		||||
}
 | 
			
		||||
#endif /* CONFIG_PROC_FS */
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1811,7 +1811,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
 | 
			
		|||
	if (addr != (pfn << PAGE_SHIFT))
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
 | 
			
		||||
	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(remap_pfn_range);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2163,8 +2163,7 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
 | 
			
		|||
		usize -= PAGE_SIZE;
 | 
			
		||||
	} while (usize > 0);
 | 
			
		||||
 | 
			
		||||
	/* Prevent "things" like memory migration? VM_flags need a cleanup... */
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED;
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -485,7 +485,7 @@ static int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma)
 | 
			
		|||
			return -EACCES;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vma->vm_flags |= VM_RESERVED;
 | 
			
		||||
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	vma->vm_ops = &sel_mmap_policy_ops;
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -3039,7 +3039,7 @@ static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file
 | 
			
		|||
		return -EINVAL;
 | 
			
		||||
	area->vm_ops = &snd_pcm_vm_ops_status;
 | 
			
		||||
	area->vm_private_data = substream;
 | 
			
		||||
	area->vm_flags |= VM_RESERVED;
 | 
			
		||||
	area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -3076,7 +3076,7 @@ static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file
 | 
			
		|||
		return -EINVAL;
 | 
			
		||||
	area->vm_ops = &snd_pcm_vm_ops_control;
 | 
			
		||||
	area->vm_private_data = substream;
 | 
			
		||||
	area->vm_flags |= VM_RESERVED;
 | 
			
		||||
	area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
#else /* ! coherent mmap */
 | 
			
		||||
| 
						 | 
				
			
			@ -3170,7 +3170,7 @@ static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
 | 
			
		|||
int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
 | 
			
		||||
			     struct vm_area_struct *area)
 | 
			
		||||
{
 | 
			
		||||
	area->vm_flags |= VM_RESERVED;
 | 
			
		||||
	area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
#ifdef ARCH_HAS_DMA_MMAP_COHERENT
 | 
			
		||||
	if (!substream->ops->page &&
 | 
			
		||||
	    substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -262,7 +262,7 @@ static int usb_stream_hwdep_mmap(struct snd_hwdep *hw,
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	area->vm_ops = &usb_stream_hwdep_vm_ops;
 | 
			
		||||
	area->vm_flags |= VM_RESERVED;
 | 
			
		||||
	area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	area->vm_private_data = us122l;
 | 
			
		||||
	atomic_inc(&us122l->mmap_count);
 | 
			
		||||
out:
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -82,7 +82,7 @@ static int snd_us428ctls_mmap(struct snd_hwdep * hw, struct file *filp, struct v
 | 
			
		|||
		us428->us428ctls_sharedmem->CtlSnapShotLast = -2;
 | 
			
		||||
	}
 | 
			
		||||
	area->vm_ops = &us428ctls_vm_ops;
 | 
			
		||||
	area->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
 | 
			
		||||
	area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	area->vm_private_data = hw->private_data;
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -723,7 +723,7 @@ static int snd_usX2Y_hwdep_pcm_mmap(struct snd_hwdep * hw, struct file *filp, st
 | 
			
		|||
		return -ENODEV;
 | 
			
		||||
	}
 | 
			
		||||
	area->vm_ops = &snd_usX2Y_hwdep_pcm_vm_ops;
 | 
			
		||||
	area->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
 | 
			
		||||
	area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 | 
			
		||||
	area->vm_private_data = hw->private_data;
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue