forked from mirrors/linux
		
	mm: introduce page_shift()
Replace PAGE_SHIFT + compound_order(page) with the new page_shift() function. Minor improvements in readability. [akpm@linux-foundation.org: fix build in tce_page_is_contained()] Link: http://lkml.kernel.org/r/201907241853.yNQTrJWd%25lkp@intel.com Link: http://lkml.kernel.org/r/20190721104612.19120-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									a50b854e07
								
							
						
					
					
						commit
						94ad933810
					
				
					 3 changed files with 12 additions and 9 deletions
				
			
		|  | @ -129,11 +129,8 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, | ||||||
| 		 * Allow to use larger than 64k IOMMU pages. Only do that | 		 * Allow to use larger than 64k IOMMU pages. Only do that | ||||||
| 		 * if we are backed by hugetlb. | 		 * if we are backed by hugetlb. | ||||||
| 		 */ | 		 */ | ||||||
| 		if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page)) { | 		if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page)) | ||||||
| 			struct page *head = compound_head(page); | 			pageshift = page_shift(compound_head(page)); | ||||||
| 
 |  | ||||||
| 			pageshift = compound_order(head) + PAGE_SHIFT; |  | ||||||
| 		} |  | ||||||
| 		mem->pageshift = min(mem->pageshift, pageshift); | 		mem->pageshift = min(mem->pageshift, pageshift); | ||||||
| 		/*
 | 		/*
 | ||||||
| 		 * We don't need struct page reference any more, switch | 		 * We don't need struct page reference any more, switch | ||||||
|  |  | ||||||
|  | @ -176,13 +176,13 @@ static long tce_iommu_register_pages(struct tce_container *container, | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa, | static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa, | ||||||
| 		unsigned int page_shift) | 		unsigned int it_page_shift) | ||||||
| { | { | ||||||
| 	struct page *page; | 	struct page *page; | ||||||
| 	unsigned long size = 0; | 	unsigned long size = 0; | ||||||
| 
 | 
 | ||||||
| 	if (mm_iommu_is_devmem(mm, hpa, page_shift, &size)) | 	if (mm_iommu_is_devmem(mm, hpa, it_page_shift, &size)) | ||||||
| 		return size == (1UL << page_shift); | 		return size == (1UL << it_page_shift); | ||||||
| 
 | 
 | ||||||
| 	page = pfn_to_page(hpa >> PAGE_SHIFT); | 	page = pfn_to_page(hpa >> PAGE_SHIFT); | ||||||
| 	/*
 | 	/*
 | ||||||
|  | @ -190,7 +190,7 @@ static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa, | ||||||
| 	 * a page we just found. Otherwise the hardware can get access to | 	 * a page we just found. Otherwise the hardware can get access to | ||||||
| 	 * a bigger memory chunk that it should. | 	 * a bigger memory chunk that it should. | ||||||
| 	 */ | 	 */ | ||||||
| 	return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift; | 	return page_shift(compound_head(page)) >= it_page_shift; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static inline bool tce_groups_attached(struct tce_container *container) | static inline bool tce_groups_attached(struct tce_container *container) | ||||||
|  |  | ||||||
|  | @ -811,6 +811,12 @@ static inline unsigned long page_size(struct page *page) | ||||||
| 	return PAGE_SIZE << compound_order(page); | 	return PAGE_SIZE << compound_order(page); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | /* Returns the number of bits needed for the number of bytes in a page */ | ||||||
|  | static inline unsigned int page_shift(struct page *page) | ||||||
|  | { | ||||||
|  | 	return PAGE_SHIFT + compound_order(page); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| void free_compound_page(struct page *page); | void free_compound_page(struct page *page); | ||||||
| 
 | 
 | ||||||
| #ifdef CONFIG_MMU | #ifdef CONFIG_MMU | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Matthew Wilcox (Oracle)
						Matthew Wilcox (Oracle)