forked from mirrors/linux
		
	mm: support THPs in zero_user_segments
We can only kmap() one subpage of a THP at a time, so loop over all relevant subpages, skipping ones which don't need to be zeroed. This is too large to inline when THPs are enabled and we actually need highmem, so put it in highmem.c. [willy@infradead.org: start1 was allowed to be less than start2] Link: https://lkml.kernel.org/r/20201124041507.28996-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Yang Shi <shy828301@gmail.com> Cc: Jan Kara <jack@suse.cz> Cc: Michal Hocko <mhocko@suse.com> Cc: Zi Yan <ziy@nvidia.com> Cc: Song Liu <songliubraving@fb.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Naresh Kamboju <naresh.kamboju@linaro.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									5e5dda81a0
								
							
						
					
					
						commit
						0060ef3b4e
					
				
					 2 changed files with 67 additions and 4 deletions
				
			
		|  | @ -284,13 +284,22 @@ static inline void clear_highpage(struct page *page) | |||
| 	kunmap_atomic(kaddr); | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * If we pass in a base or tail page, we can zero up to PAGE_SIZE. | ||||
|  * If we pass in a head page, we can zero up to the size of the compound page. | ||||
|  */ | ||||
| #if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE) | ||||
| void zero_user_segments(struct page *page, unsigned start1, unsigned end1, | ||||
| 		unsigned start2, unsigned end2); | ||||
| #else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */ | ||||
| static inline void zero_user_segments(struct page *page, | ||||
| 	unsigned start1, unsigned end1, | ||||
| 	unsigned start2, unsigned end2) | ||||
| 		unsigned start1, unsigned end1, | ||||
| 		unsigned start2, unsigned end2) | ||||
| { | ||||
| 	void *kaddr = kmap_atomic(page); | ||||
| 	unsigned int i; | ||||
| 
 | ||||
| 	BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); | ||||
| 	BUG_ON(end1 > page_size(page) || end2 > page_size(page)); | ||||
| 
 | ||||
| 	if (end1 > start1) | ||||
| 		memset(kaddr + start1, 0, end1 - start1); | ||||
|  | @ -299,8 +308,10 @@ static inline void zero_user_segments(struct page *page, | |||
| 		memset(kaddr + start2, 0, end2 - start2); | ||||
| 
 | ||||
| 	kunmap_atomic(kaddr); | ||||
| 	flush_dcache_page(page); | ||||
| 	for (i = 0; i < compound_nr(page); i++) | ||||
| 		flush_dcache_page(page + i); | ||||
| } | ||||
| #endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */ | ||||
| 
 | ||||
| static inline void zero_user_segment(struct page *page, | ||||
| 	unsigned start, unsigned end) | ||||
|  |  | |||
							
								
								
									
										52
									
								
								mm/highmem.c
									
									
									
									
									
								
							
							
						
						
									
										52
									
								
								mm/highmem.c
									
									
									
									
									
								
							|  | @ -369,6 +369,58 @@ void kunmap_high(struct page *page) | |||
| } | ||||
| 
 | ||||
| EXPORT_SYMBOL(kunmap_high); | ||||
| 
 | ||||
| #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||||
| void zero_user_segments(struct page *page, unsigned start1, unsigned end1, | ||||
| 		unsigned start2, unsigned end2) | ||||
| { | ||||
| 	unsigned int i; | ||||
| 
 | ||||
| 	BUG_ON(end1 > page_size(page) || end2 > page_size(page)); | ||||
| 
 | ||||
| 	for (i = 0; i < compound_nr(page); i++) { | ||||
| 		void *kaddr = NULL; | ||||
| 
 | ||||
| 		if (start1 < PAGE_SIZE || start2 < PAGE_SIZE) | ||||
| 			kaddr = kmap_atomic(page + i); | ||||
| 
 | ||||
| 		if (start1 >= PAGE_SIZE) { | ||||
| 			start1 -= PAGE_SIZE; | ||||
| 			end1 -= PAGE_SIZE; | ||||
| 		} else { | ||||
| 			unsigned this_end = min_t(unsigned, end1, PAGE_SIZE); | ||||
| 
 | ||||
| 			if (end1 > start1) | ||||
| 				memset(kaddr + start1, 0, this_end - start1); | ||||
| 			end1 -= this_end; | ||||
| 			start1 = 0; | ||||
| 		} | ||||
| 
 | ||||
| 		if (start2 >= PAGE_SIZE) { | ||||
| 			start2 -= PAGE_SIZE; | ||||
| 			end2 -= PAGE_SIZE; | ||||
| 		} else { | ||||
| 			unsigned this_end = min_t(unsigned, end2, PAGE_SIZE); | ||||
| 
 | ||||
| 			if (end2 > start2) | ||||
| 				memset(kaddr + start2, 0, this_end - start2); | ||||
| 			end2 -= this_end; | ||||
| 			start2 = 0; | ||||
| 		} | ||||
| 
 | ||||
| 		if (kaddr) { | ||||
| 			kunmap_atomic(kaddr); | ||||
| 			flush_dcache_page(page + i); | ||||
| 		} | ||||
| 
 | ||||
| 		if (!end1 && !end2) | ||||
| 			break; | ||||
| 	} | ||||
| 
 | ||||
| 	BUG_ON((start1 | start2 | end1 | end2) != 0); | ||||
| } | ||||
| EXPORT_SYMBOL(zero_user_segments); | ||||
| #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||||
| #endif	/* CONFIG_HIGHMEM */ | ||||
| 
 | ||||
| #if defined(HASHED_PAGE_VIRTUAL) | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Matthew Wilcox (Oracle)
						Matthew Wilcox (Oracle)