mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	mincore: apply page table walker on do_mincore()
This patch makes do_mincore() use walk_page_vma(), which reduces many lines of code by using common page table walk code. [daeseok.youn@gmail.com: remove unneeded variable 'err'] Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Pavel Emelyanov <xemul@parallels.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Daeseok Youn <daeseok.youn@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									7d5b3bfaa2
								
							
						
					
					
						commit
						1e25a271c8
					
				
					 2 changed files with 62 additions and 128 deletions
				
			
		| 
						 | 
					@ -1412,26 +1412,6 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 | 
					 | 
				
			||||||
		unsigned long addr, unsigned long end,
 | 
					 | 
				
			||||||
		unsigned char *vec)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	spinlock_t *ptl;
 | 
					 | 
				
			||||||
	int ret = 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
 | 
					 | 
				
			||||||
		/*
 | 
					 | 
				
			||||||
		 * All logical pages in the range are present
 | 
					 | 
				
			||||||
		 * if backed by a huge page.
 | 
					 | 
				
			||||||
		 */
 | 
					 | 
				
			||||||
		spin_unlock(ptl);
 | 
					 | 
				
			||||||
		memset(vec, 1, (end - addr) >> PAGE_SHIFT);
 | 
					 | 
				
			||||||
		ret = 1;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return ret;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
 | 
					int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
 | 
				
			||||||
		  unsigned long old_addr,
 | 
							  unsigned long old_addr,
 | 
				
			||||||
		  unsigned long new_addr, unsigned long old_end,
 | 
							  unsigned long new_addr, unsigned long old_end,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										170
									
								
								mm/mincore.c
									
									
									
									
									
								
							
							
						
						
									
										170
									
								
								mm/mincore.c
									
									
									
									
									
								
							| 
						 | 
					@ -19,38 +19,25 @@
 | 
				
			||||||
#include <asm/uaccess.h>
 | 
					#include <asm/uaccess.h>
 | 
				
			||||||
#include <asm/pgtable.h>
 | 
					#include <asm/pgtable.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void mincore_hugetlb_page_range(struct vm_area_struct *vma,
 | 
					static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
 | 
				
			||||||
				unsigned long addr, unsigned long end,
 | 
								unsigned long end, struct mm_walk *walk)
 | 
				
			||||||
				unsigned char *vec)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
#ifdef CONFIG_HUGETLB_PAGE
 | 
					#ifdef CONFIG_HUGETLB_PAGE
 | 
				
			||||||
	struct hstate *h;
 | 
						unsigned char present;
 | 
				
			||||||
 | 
						unsigned char *vec = walk->private;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	h = hstate_vma(vma);
 | 
						/*
 | 
				
			||||||
	while (1) {
 | 
						 * Hugepages under user process are always in RAM and never
 | 
				
			||||||
		unsigned char present;
 | 
						 * swapped out, but theoretically it needs to be checked.
 | 
				
			||||||
		pte_t *ptep;
 | 
						 */
 | 
				
			||||||
		/*
 | 
						present = pte && !huge_pte_none(huge_ptep_get(pte));
 | 
				
			||||||
		 * Huge pages are always in RAM for now, but
 | 
						for (; addr != end; vec++, addr += PAGE_SIZE)
 | 
				
			||||||
		 * theoretically it needs to be checked.
 | 
							*vec = present;
 | 
				
			||||||
		 */
 | 
						walk->private = vec;
 | 
				
			||||||
		ptep = huge_pte_offset(current->mm,
 | 
					 | 
				
			||||||
				       addr & huge_page_mask(h));
 | 
					 | 
				
			||||||
		present = ptep && !huge_pte_none(huge_ptep_get(ptep));
 | 
					 | 
				
			||||||
		while (1) {
 | 
					 | 
				
			||||||
			*vec = present;
 | 
					 | 
				
			||||||
			vec++;
 | 
					 | 
				
			||||||
			addr += PAGE_SIZE;
 | 
					 | 
				
			||||||
			if (addr == end)
 | 
					 | 
				
			||||||
				return;
 | 
					 | 
				
			||||||
			/* check hugepage border */
 | 
					 | 
				
			||||||
			if (!(addr & ~huge_page_mask(h)))
 | 
					 | 
				
			||||||
				break;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
	BUG();
 | 
						BUG();
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -94,9 +81,8 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
 | 
				
			||||||
	return present;
 | 
						return present;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void mincore_unmapped_range(struct vm_area_struct *vma,
 | 
					static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
 | 
				
			||||||
				unsigned long addr, unsigned long end,
 | 
									struct vm_area_struct *vma, unsigned char *vec)
 | 
				
			||||||
				unsigned char *vec)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long nr = (end - addr) >> PAGE_SHIFT;
 | 
						unsigned long nr = (end - addr) >> PAGE_SHIFT;
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
| 
						 | 
					@ -111,23 +97,44 @@ static void mincore_unmapped_range(struct vm_area_struct *vma,
 | 
				
			||||||
		for (i = 0; i < nr; i++)
 | 
							for (i = 0; i < nr; i++)
 | 
				
			||||||
			vec[i] = 0;
 | 
								vec[i] = 0;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						return nr;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 | 
					static int mincore_unmapped_range(unsigned long addr, unsigned long end,
 | 
				
			||||||
			unsigned long addr, unsigned long end,
 | 
									   struct mm_walk *walk)
 | 
				
			||||||
			unsigned char *vec)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long next;
 | 
						walk->private += __mincore_unmapped_range(addr, end,
 | 
				
			||||||
	spinlock_t *ptl;
 | 
											  walk->vma, walk->private);
 | 
				
			||||||
	pte_t *ptep;
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 | 
					static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 | 
				
			||||||
	do {
 | 
								struct mm_walk *walk)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						spinlock_t *ptl;
 | 
				
			||||||
 | 
						struct vm_area_struct *vma = walk->vma;
 | 
				
			||||||
 | 
						pte_t *ptep;
 | 
				
			||||||
 | 
						unsigned char *vec = walk->private;
 | 
				
			||||||
 | 
						int nr = (end - addr) >> PAGE_SHIFT;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
 | 
				
			||||||
 | 
							memset(vec, 1, nr);
 | 
				
			||||||
 | 
							spin_unlock(ptl);
 | 
				
			||||||
 | 
							goto out;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (pmd_trans_unstable(pmd)) {
 | 
				
			||||||
 | 
							__mincore_unmapped_range(addr, end, vma, vec);
 | 
				
			||||||
 | 
							goto out;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
 | 
				
			||||||
 | 
						for (; addr != end; ptep++, addr += PAGE_SIZE) {
 | 
				
			||||||
		pte_t pte = *ptep;
 | 
							pte_t pte = *ptep;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		next = addr + PAGE_SIZE;
 | 
					 | 
				
			||||||
		if (pte_none(pte))
 | 
							if (pte_none(pte))
 | 
				
			||||||
			mincore_unmapped_range(vma, addr, next, vec);
 | 
								__mincore_unmapped_range(addr, addr + PAGE_SIZE,
 | 
				
			||||||
 | 
											 vma, vec);
 | 
				
			||||||
		else if (pte_present(pte))
 | 
							else if (pte_present(pte))
 | 
				
			||||||
			*vec = 1;
 | 
								*vec = 1;
 | 
				
			||||||
		else { /* pte is a swap entry */
 | 
							else { /* pte is a swap entry */
 | 
				
			||||||
| 
						 | 
					@ -150,69 +157,12 @@ static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		vec++;
 | 
							vec++;
 | 
				
			||||||
	} while (ptep++, addr = next, addr != end);
 | 
						}
 | 
				
			||||||
	pte_unmap_unlock(ptep - 1, ptl);
 | 
						pte_unmap_unlock(ptep - 1, ptl);
 | 
				
			||||||
}
 | 
					out:
 | 
				
			||||||
 | 
						walk->private += nr;
 | 
				
			||||||
static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
 | 
						cond_resched();
 | 
				
			||||||
			unsigned long addr, unsigned long end,
 | 
						return 0;
 | 
				
			||||||
			unsigned char *vec)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	unsigned long next;
 | 
					 | 
				
			||||||
	pmd_t *pmd;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	pmd = pmd_offset(pud, addr);
 | 
					 | 
				
			||||||
	do {
 | 
					 | 
				
			||||||
		next = pmd_addr_end(addr, end);
 | 
					 | 
				
			||||||
		if (pmd_trans_huge(*pmd)) {
 | 
					 | 
				
			||||||
			if (mincore_huge_pmd(vma, pmd, addr, next, vec)) {
 | 
					 | 
				
			||||||
				vec += (next - addr) >> PAGE_SHIFT;
 | 
					 | 
				
			||||||
				continue;
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
			/* fall through */
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
 | 
					 | 
				
			||||||
			mincore_unmapped_range(vma, addr, next, vec);
 | 
					 | 
				
			||||||
		else
 | 
					 | 
				
			||||||
			mincore_pte_range(vma, pmd, addr, next, vec);
 | 
					 | 
				
			||||||
		vec += (next - addr) >> PAGE_SHIFT;
 | 
					 | 
				
			||||||
	} while (pmd++, addr = next, addr != end);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
 | 
					 | 
				
			||||||
			unsigned long addr, unsigned long end,
 | 
					 | 
				
			||||||
			unsigned char *vec)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	unsigned long next;
 | 
					 | 
				
			||||||
	pud_t *pud;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	pud = pud_offset(pgd, addr);
 | 
					 | 
				
			||||||
	do {
 | 
					 | 
				
			||||||
		next = pud_addr_end(addr, end);
 | 
					 | 
				
			||||||
		if (pud_none_or_clear_bad(pud))
 | 
					 | 
				
			||||||
			mincore_unmapped_range(vma, addr, next, vec);
 | 
					 | 
				
			||||||
		else
 | 
					 | 
				
			||||||
			mincore_pmd_range(vma, pud, addr, next, vec);
 | 
					 | 
				
			||||||
		vec += (next - addr) >> PAGE_SHIFT;
 | 
					 | 
				
			||||||
	} while (pud++, addr = next, addr != end);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void mincore_page_range(struct vm_area_struct *vma,
 | 
					 | 
				
			||||||
			unsigned long addr, unsigned long end,
 | 
					 | 
				
			||||||
			unsigned char *vec)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	unsigned long next;
 | 
					 | 
				
			||||||
	pgd_t *pgd;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	pgd = pgd_offset(vma->vm_mm, addr);
 | 
					 | 
				
			||||||
	do {
 | 
					 | 
				
			||||||
		next = pgd_addr_end(addr, end);
 | 
					 | 
				
			||||||
		if (pgd_none_or_clear_bad(pgd))
 | 
					 | 
				
			||||||
			mincore_unmapped_range(vma, addr, next, vec);
 | 
					 | 
				
			||||||
		else
 | 
					 | 
				
			||||||
			mincore_pud_range(vma, pgd, addr, next, vec);
 | 
					 | 
				
			||||||
		vec += (next - addr) >> PAGE_SHIFT;
 | 
					 | 
				
			||||||
	} while (pgd++, addr = next, addr != end);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -224,18 +174,22 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct vm_area_struct *vma;
 | 
						struct vm_area_struct *vma;
 | 
				
			||||||
	unsigned long end;
 | 
						unsigned long end;
 | 
				
			||||||
 | 
						int err;
 | 
				
			||||||
 | 
						struct mm_walk mincore_walk = {
 | 
				
			||||||
 | 
							.pmd_entry = mincore_pte_range,
 | 
				
			||||||
 | 
							.pte_hole = mincore_unmapped_range,
 | 
				
			||||||
 | 
							.hugetlb_entry = mincore_hugetlb,
 | 
				
			||||||
 | 
							.private = vec,
 | 
				
			||||||
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vma = find_vma(current->mm, addr);
 | 
						vma = find_vma(current->mm, addr);
 | 
				
			||||||
	if (!vma || addr < vma->vm_start)
 | 
						if (!vma || addr < vma->vm_start)
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
 | 
						mincore_walk.mm = vma->vm_mm;
 | 
				
			||||||
	end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
 | 
						end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
 | 
				
			||||||
 | 
						err = walk_page_range(addr, end, &mincore_walk);
 | 
				
			||||||
	if (is_vm_hugetlb_page(vma))
 | 
						if (err < 0)
 | 
				
			||||||
		mincore_hugetlb_page_range(vma, addr, end, vec);
 | 
							return err;
 | 
				
			||||||
	else
 | 
					 | 
				
			||||||
		mincore_page_range(vma, addr, end, vec);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return (end - addr) >> PAGE_SHIFT;
 | 
						return (end - addr) >> PAGE_SHIFT;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue