mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	MIPS: make userspace mapping young by default
MIPS page fault path(except huge page) takes 3 exceptions (1 TLB Miss + 2 TLB Invalid), butthe second TLB Invalid exception is just triggered by __update_tlb from do_page_fault writing tlb without _PAGE_VALID set. With this patch, user space mapping prot is made young by default (with both _PAGE_VALID and _PAGE_YOUNG set), and it only take 1 TLB Miss + 1 TLB Invalid exception Remove pte_sw_mkyoung without polluting MM code and make page fault delay of MIPS on par with other architecture Link: https://lkml.kernel.org/r/20210204013942.8398-1-huangpei@loongson.cn Signed-off-by: Huang Pei <huangpei@loongson.cn> Reviewed-by: Nicholas Piggin <npiggin@gmail.com> Acked-by: <huangpei@loongson.cn> Acked-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: <ambrosehua@gmail.com> Cc: Bibo Mao <maobibo@loongson.cn> Cc: Jiaxun Yang <jiaxun.yang@flygoat.com> Cc: Paul Burton <paulburton@kernel.org> Cc: Li Xuefeng <lixuefeng@loongson.cn> Cc: Yang Tiezhu <yangtiezhu@loongson.cn> Cc: Gao Juxin <gaojuxin@loongson.cn> Cc: Fuxin Zhang <zhangfx@lemote.com> Cc: Huacai Chen <chenhc@lemote.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									dd23e8098f
								
							
						
					
					
						commit
						f685a533a7
					
				
					 3 changed files with 16 additions and 26 deletions
				
			
		| 
						 | 
					@ -157,29 +157,31 @@ unsigned long _page_cachable_default;
 | 
				
			||||||
EXPORT_SYMBOL(_page_cachable_default);
 | 
					EXPORT_SYMBOL(_page_cachable_default);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define PM(p)	__pgprot(_page_cachable_default | (p))
 | 
					#define PM(p)	__pgprot(_page_cachable_default | (p))
 | 
				
			||||||
 | 
					#define PVA(p)	PM(_PAGE_VALID | _PAGE_ACCESSED | (p))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void setup_protection_map(void)
 | 
					static inline void setup_protection_map(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	protection_map[0]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
 | 
						protection_map[0]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
 | 
				
			||||||
	protection_map[1]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
 | 
						protection_map[1]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
 | 
				
			||||||
	protection_map[2]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
 | 
						protection_map[2]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
 | 
				
			||||||
	protection_map[3]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
 | 
						protection_map[3]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
 | 
				
			||||||
	protection_map[4]  = PM(_PAGE_PRESENT);
 | 
						protection_map[4]  = PVA(_PAGE_PRESENT);
 | 
				
			||||||
	protection_map[5]  = PM(_PAGE_PRESENT);
 | 
						protection_map[5]  = PVA(_PAGE_PRESENT);
 | 
				
			||||||
	protection_map[6]  = PM(_PAGE_PRESENT);
 | 
						protection_map[6]  = PVA(_PAGE_PRESENT);
 | 
				
			||||||
	protection_map[7]  = PM(_PAGE_PRESENT);
 | 
						protection_map[7]  = PVA(_PAGE_PRESENT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	protection_map[8]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
 | 
						protection_map[8]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
 | 
				
			||||||
	protection_map[9]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
 | 
						protection_map[9]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
 | 
				
			||||||
	protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
 | 
						protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
 | 
				
			||||||
				_PAGE_NO_READ);
 | 
									_PAGE_NO_READ);
 | 
				
			||||||
	protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
 | 
						protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
 | 
				
			||||||
	protection_map[12] = PM(_PAGE_PRESENT);
 | 
						protection_map[12] = PVA(_PAGE_PRESENT);
 | 
				
			||||||
	protection_map[13] = PM(_PAGE_PRESENT);
 | 
						protection_map[13] = PVA(_PAGE_PRESENT);
 | 
				
			||||||
	protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
 | 
						protection_map[14] = PVA(_PAGE_PRESENT);
 | 
				
			||||||
	protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
 | 
						protection_map[15] = PVA(_PAGE_PRESENT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#undef _PVA
 | 
				
			||||||
#undef PM
 | 
					#undef PM
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void cpu_cache_init(void)
 | 
					void cpu_cache_init(void)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -432,14 +432,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
 | 
				
			||||||
 * To be differentiate with macro pte_mkyoung, this macro is used on platforms
 | 
					 * To be differentiate with macro pte_mkyoung, this macro is used on platforms
 | 
				
			||||||
 * where software maintains page access bit.
 | 
					 * where software maintains page access bit.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
#ifndef pte_sw_mkyoung
 | 
					 | 
				
			||||||
static inline pte_t pte_sw_mkyoung(pte_t pte)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return pte;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
#define pte_sw_mkyoung	pte_sw_mkyoung
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifndef pte_savedwrite
 | 
					#ifndef pte_savedwrite
 | 
				
			||||||
#define pte_savedwrite pte_write
 | 
					#define pte_savedwrite pte_write
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2902,7 +2902,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
 | 
							flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
 | 
				
			||||||
		entry = mk_pte(new_page, vma->vm_page_prot);
 | 
							entry = mk_pte(new_page, vma->vm_page_prot);
 | 
				
			||||||
		entry = pte_sw_mkyoung(entry);
 | 
					 | 
				
			||||||
		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 | 
							entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
| 
						 | 
					@ -3560,7 +3559,6 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 | 
				
			||||||
	__SetPageUptodate(page);
 | 
						__SetPageUptodate(page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	entry = mk_pte(page, vma->vm_page_prot);
 | 
						entry = mk_pte(page, vma->vm_page_prot);
 | 
				
			||||||
	entry = pte_sw_mkyoung(entry);
 | 
					 | 
				
			||||||
	if (vma->vm_flags & VM_WRITE)
 | 
						if (vma->vm_flags & VM_WRITE)
 | 
				
			||||||
		entry = pte_mkwrite(pte_mkdirty(entry));
 | 
							entry = pte_mkwrite(pte_mkdirty(entry));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3745,8 +3743,6 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (prefault && arch_wants_old_prefaulted_pte())
 | 
						if (prefault && arch_wants_old_prefaulted_pte())
 | 
				
			||||||
		entry = pte_mkold(entry);
 | 
							entry = pte_mkold(entry);
 | 
				
			||||||
	else
 | 
					 | 
				
			||||||
		entry = pte_sw_mkyoung(entry);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (write)
 | 
						if (write)
 | 
				
			||||||
		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 | 
							entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue