forked from mirrors/linux
		
	mm: introduce mm_forbids_zeropage function
Add a new function stub to allow architectures to disable for an mm_structthe backing of non-present, anonymous pages with read-only empty zero pages. Signed-off-by: Dominik Dingel <dingel@linux.vnet.ibm.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
		
							parent
							
								
									a13cff318c
								
							
						
					
					
						commit
						593befa6ab
					
				
					 3 changed files with 13 additions and 2 deletions
				
			
		| 
						 | 
					@ -56,6 +56,17 @@ extern int sysctl_legacy_va_layout;
 | 
				
			||||||
#define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
 | 
					#define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * To prevent common memory management code establishing
 | 
				
			||||||
 | 
					 * a zero page mapping on a read fault.
 | 
				
			||||||
 | 
					 * This macro should be defined within <asm/pgtable.h>.
 | 
				
			||||||
 | 
					 * s390 does this to prevent multiplexing of hardware bits
 | 
				
			||||||
 | 
					 * related to the physical page in case of virtualization.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					#ifndef mm_forbids_zeropage
 | 
				
			||||||
 | 
					#define mm_forbids_zeropage(X)	(0)
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern unsigned long sysctl_user_reserve_kbytes;
 | 
					extern unsigned long sysctl_user_reserve_kbytes;
 | 
				
			||||||
extern unsigned long sysctl_admin_reserve_kbytes;
 | 
					extern unsigned long sysctl_admin_reserve_kbytes;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -805,7 +805,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 | 
				
			||||||
		return VM_FAULT_OOM;
 | 
							return VM_FAULT_OOM;
 | 
				
			||||||
	if (unlikely(khugepaged_enter(vma)))
 | 
						if (unlikely(khugepaged_enter(vma)))
 | 
				
			||||||
		return VM_FAULT_OOM;
 | 
							return VM_FAULT_OOM;
 | 
				
			||||||
	if (!(flags & FAULT_FLAG_WRITE) &&
 | 
						if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm) &&
 | 
				
			||||||
			transparent_hugepage_use_zero_page()) {
 | 
								transparent_hugepage_use_zero_page()) {
 | 
				
			||||||
		spinlock_t *ptl;
 | 
							spinlock_t *ptl;
 | 
				
			||||||
		pgtable_t pgtable;
 | 
							pgtable_t pgtable;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2640,7 +2640,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 | 
				
			||||||
		return VM_FAULT_SIGBUS;
 | 
							return VM_FAULT_SIGBUS;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Use the zero-page for reads */
 | 
						/* Use the zero-page for reads */
 | 
				
			||||||
	if (!(flags & FAULT_FLAG_WRITE)) {
 | 
						if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
 | 
				
			||||||
		entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
 | 
							entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
 | 
				
			||||||
						vma->vm_page_prot));
 | 
											vma->vm_page_prot));
 | 
				
			||||||
		page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
 | 
							page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue