forked from mirrors/linux
		
	perf,mm: Handle non-page-table-aligned hugetlbfs
A limited nunmber of architectures support hugetlbfs sizes that do not align with the page-tables (ARM64, Power, Sparc64). Add support for this to the generic perf_get_page_size() implementation, and also allow an architecture to override this implementation. This latter is only needed when it uses non-page-table aligned huge pages in its kernel map. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
This commit is contained in:
		
							parent
							
								
									995f088efe
								
							
						
					
					
						commit
						51b646b2d9
					
				
					 2 changed files with 37 additions and 6 deletions
				
			
		| 
						 | 
					@ -1590,4 +1590,8 @@ extern void __weak arch_perf_update_userpage(struct perf_event *event,
 | 
				
			||||||
					     struct perf_event_mmap_page *userpg,
 | 
										     struct perf_event_mmap_page *userpg,
 | 
				
			||||||
					     u64 now);
 | 
										     u64 now);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_MMU
 | 
				
			||||||
 | 
					extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr);
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* _LINUX_PERF_EVENT_H */
 | 
					#endif /* _LINUX_PERF_EVENT_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -7011,10 +7011,18 @@ static u64 perf_virt_to_phys(u64 virt)
 | 
				
			||||||
#ifdef CONFIG_MMU
 | 
					#ifdef CONFIG_MMU
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Return the MMU page size of a given virtual address
 | 
					 * Return the MMU page size of a given virtual address.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * This generic implementation handles page-table aligned huge pages, as well
 | 
				
			||||||
 | 
					 * as non-page-table aligned hugetlbfs compound pages.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * If an architecture supports and uses non-page-table aligned pages in their
 | 
				
			||||||
 | 
					 * kernel mapping it will need to provide it's own implementation of this
 | 
				
			||||||
 | 
					 * function.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static u64 __perf_get_page_size(struct mm_struct *mm, unsigned long addr)
 | 
					__weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						struct page *page;
 | 
				
			||||||
	pgd_t *pgd;
 | 
						pgd_t *pgd;
 | 
				
			||||||
	p4d_t *p4d;
 | 
						p4d_t *p4d;
 | 
				
			||||||
	pud_t *pud;
 | 
						pud_t *pud;
 | 
				
			||||||
| 
						 | 
					@ -7036,15 +7044,27 @@ static u64 __perf_get_page_size(struct mm_struct *mm, unsigned long addr)
 | 
				
			||||||
	if (!pud_present(*pud))
 | 
						if (!pud_present(*pud))
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (pud_leaf(*pud))
 | 
						if (pud_leaf(*pud)) {
 | 
				
			||||||
 | 
					#ifdef pud_page
 | 
				
			||||||
 | 
							page = pud_page(*pud);
 | 
				
			||||||
 | 
							if (PageHuge(page))
 | 
				
			||||||
 | 
								return page_size(compound_head(page));
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
		return 1ULL << PUD_SHIFT;
 | 
							return 1ULL << PUD_SHIFT;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pmd = pmd_offset(pud, addr);
 | 
						pmd = pmd_offset(pud, addr);
 | 
				
			||||||
	if (!pmd_present(*pmd))
 | 
						if (!pmd_present(*pmd))
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (pmd_leaf(*pmd))
 | 
						if (pmd_leaf(*pmd)) {
 | 
				
			||||||
 | 
					#ifdef pmd_page
 | 
				
			||||||
 | 
							page = pmd_page(*pmd);
 | 
				
			||||||
 | 
							if (PageHuge(page))
 | 
				
			||||||
 | 
								return page_size(compound_head(page));
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
		return 1ULL << PMD_SHIFT;
 | 
							return 1ULL << PMD_SHIFT;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pte = pte_offset_map(pmd, addr);
 | 
						pte = pte_offset_map(pmd, addr);
 | 
				
			||||||
	if (!pte_present(*pte)) {
 | 
						if (!pte_present(*pte)) {
 | 
				
			||||||
| 
						 | 
					@ -7052,13 +7072,20 @@ static u64 __perf_get_page_size(struct mm_struct *mm, unsigned long addr)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						page = pte_page(*pte);
 | 
				
			||||||
 | 
						if (PageHuge(page)) {
 | 
				
			||||||
 | 
							u64 size = page_size(compound_head(page));
 | 
				
			||||||
 | 
							pte_unmap(pte);
 | 
				
			||||||
 | 
							return size;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pte_unmap(pte);
 | 
						pte_unmap(pte);
 | 
				
			||||||
	return PAGE_SIZE;
 | 
						return PAGE_SIZE;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static u64 __perf_get_page_size(struct mm_struct *mm, unsigned long addr)
 | 
					static u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -7089,7 +7116,7 @@ static u64 perf_get_page_size(unsigned long addr)
 | 
				
			||||||
		mm = &init_mm;
 | 
							mm = &init_mm;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	size = __perf_get_page_size(mm, addr);
 | 
						size = arch_perf_get_page_size(mm, addr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	local_irq_restore(flags);
 | 
						local_irq_restore(flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue