forked from mirrors/linux
		
	mm: avoid zeroing user movable page twice with init_on_alloc=1
Commit 6471384af2 ("mm: security: introduce init_on_alloc=1 and
init_on_free=1 boot options") forces allocated page to be zeroed in
post_alloc_hook() when init_on_alloc=1.
For order-0 folios, if arch does not define
vma_alloc_zeroed_movable_folio(), the default implementation again zeros
the page return from the buddy allocator.  So the page is zeroed twice. 
Fix it by passing __GFP_ZERO instead to avoid double page zeroing.  At the
moment, s390,arm64,x86,alpha,m68k are not impacted since they define their
own vma_alloc_zeroed_movable_folio().
For >0 order folios (mTHP and PMD THP), folio_zero_user() is called to
zero the folio again.  Fix it by calling folio_zero_user() only if
init_on_alloc is set.  All arch are impacted.
Add alloc_zeroed() helper to encapsulate the init_on_alloc check.
[ziy@nvidia.com: comment fixes, per David]
  Link: https://lkml.kernel.org/r/97DB52E1-C594-49B5-9736-89AC302FAB01@nvidia.com
Link: https://lkml.kernel.org/r/20241011150304.709590-1-ziy@nvidia.com
Signed-off-by: Zi Yan <ziy@nvidia.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
			
			
This commit is contained in:
		
							parent
							
								
									773ee2cda5
								
							
						
					
					
						commit
						5708d96da2
					
				
					 4 changed files with 23 additions and 9 deletions
				
			
		|  | @ -224,13 +224,7 @@ static inline | |||
| struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, | ||||
| 				   unsigned long vaddr) | ||||
| { | ||||
| 	struct folio *folio; | ||||
| 
 | ||||
| 	folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr); | ||||
| 	if (folio) | ||||
| 		clear_user_highpage(&folio->page, vaddr); | ||||
| 
 | ||||
| 	return folio; | ||||
| 	return vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr); | ||||
| } | ||||
| #endif | ||||
| 
 | ||||
|  |  | |||
|  | @ -1162,6 +1162,12 @@ static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma, | |||
| 	} | ||||
| 	folio_throttle_swaprate(folio, gfp); | ||||
| 
 | ||||
|        /*
 | ||||
| 	* When a folio is not zeroed during allocation (__GFP_ZERO not used), | ||||
| 	* folio_zero_user() is used to make sure that the page corresponding | ||||
| 	* to the faulting address will be hot in the cache after zeroing. | ||||
| 	*/ | ||||
| 	if (!alloc_zeroed()) | ||||
| 		folio_zero_user(folio, addr); | ||||
| 	/*
 | ||||
| 	 * The memory barrier inside __folio_mark_uptodate makes sure that | ||||
|  |  | |||
|  | @ -1276,6 +1276,12 @@ void touch_pud(struct vm_area_struct *vma, unsigned long addr, | |||
| void touch_pmd(struct vm_area_struct *vma, unsigned long addr, | ||||
| 	       pmd_t *pmd, bool write); | ||||
| 
 | ||||
| static inline bool alloc_zeroed(void) | ||||
| { | ||||
| 	return static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, | ||||
| 			&init_on_alloc); | ||||
| } | ||||
| 
 | ||||
| enum { | ||||
| 	/* mark page accessed */ | ||||
| 	FOLL_TOUCH = 1 << 16, | ||||
|  |  | |||
|  | @ -4719,6 +4719,14 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf) | |||
| 				goto next; | ||||
| 			} | ||||
| 			folio_throttle_swaprate(folio, gfp); | ||||
| 			/*
 | ||||
| 			 * When a folio is not zeroed during allocation | ||||
| 			 * (__GFP_ZERO not used), folio_zero_user() is used | ||||
| 			 * to make sure that the page corresponding to the | ||||
| 			 * faulting address will be hot in the cache after | ||||
| 			 * zeroing. | ||||
| 			 */ | ||||
| 			if (!alloc_zeroed()) | ||||
| 				folio_zero_user(folio, vmf->address); | ||||
| 			return folio; | ||||
| 		} | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Zi Yan
						Zi Yan