forked from mirrors/linux
		
	mm/pagewalk: remove pgd_entry() and pud_entry()
Currently no user of page table walker sets ->pgd_entry() or ->pud_entry(), so checking their existence in each loop is just wasting CPU cycle. So let's remove it to reduce overhead. Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Pavel Emelyanov <xemul@parallels.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									05fbf357d9
								
							
						
					
					
						commit
						0b1fbfe500
					
				
					 2 changed files with 2 additions and 13 deletions
				
			
		|  | @ -1164,8 +1164,6 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, | ||||||
| 
 | 
 | ||||||
| /**
 | /**
 | ||||||
|  * mm_walk - callbacks for walk_page_range |  * mm_walk - callbacks for walk_page_range | ||||||
|  * @pgd_entry: if set, called for each non-empty PGD (top-level) entry |  | ||||||
|  * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry |  | ||||||
|  * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry |  * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry | ||||||
|  *	       this handler is required to be able to handle |  *	       this handler is required to be able to handle | ||||||
|  *	       pmd_trans_huge() pmds.  They may simply choose to |  *	       pmd_trans_huge() pmds.  They may simply choose to | ||||||
|  | @ -1179,10 +1177,6 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, | ||||||
|  * (see walk_page_range for more details) |  * (see walk_page_range for more details) | ||||||
|  */ |  */ | ||||||
| struct mm_walk { | struct mm_walk { | ||||||
| 	int (*pgd_entry)(pgd_t *pgd, unsigned long addr, |  | ||||||
| 			 unsigned long next, struct mm_walk *walk); |  | ||||||
| 	int (*pud_entry)(pud_t *pud, unsigned long addr, |  | ||||||
| 	                 unsigned long next, struct mm_walk *walk); |  | ||||||
| 	int (*pmd_entry)(pmd_t *pmd, unsigned long addr, | 	int (*pmd_entry)(pmd_t *pmd, unsigned long addr, | ||||||
| 			 unsigned long next, struct mm_walk *walk); | 			 unsigned long next, struct mm_walk *walk); | ||||||
| 	int (*pte_entry)(pte_t *pte, unsigned long addr, | 	int (*pte_entry)(pte_t *pte, unsigned long addr, | ||||||
|  |  | ||||||
|  | @ -86,9 +86,7 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, | ||||||
| 				break; | 				break; | ||||||
| 			continue; | 			continue; | ||||||
| 		} | 		} | ||||||
| 		if (walk->pud_entry) | 		if (walk->pmd_entry || walk->pte_entry) | ||||||
| 			err = walk->pud_entry(pud, addr, next, walk); |  | ||||||
| 		if (!err && (walk->pmd_entry || walk->pte_entry)) |  | ||||||
| 			err = walk_pmd_range(pud, addr, next, walk); | 			err = walk_pmd_range(pud, addr, next, walk); | ||||||
| 		if (err) | 		if (err) | ||||||
| 			break; | 			break; | ||||||
|  | @ -237,10 +235,7 @@ int walk_page_range(unsigned long addr, unsigned long end, | ||||||
| 			pgd++; | 			pgd++; | ||||||
| 			continue; | 			continue; | ||||||
| 		} | 		} | ||||||
| 		if (walk->pgd_entry) | 		if (walk->pmd_entry || walk->pte_entry) | ||||||
| 			err = walk->pgd_entry(pgd, addr, next, walk); |  | ||||||
| 		if (!err && |  | ||||||
| 		    (walk->pud_entry || walk->pmd_entry || walk->pte_entry)) |  | ||||||
| 			err = walk_pud_range(pgd, addr, next, walk); | 			err = walk_pud_range(pgd, addr, next, walk); | ||||||
| 		if (err) | 		if (err) | ||||||
| 			break; | 			break; | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Naoya Horiguchi
						Naoya Horiguchi