forked from mirrors/linux
		
	mm/thp: carry over dirty bit when thp splits on pmd
Carry over the dirty bit from pmd to pte when a huge pmd splits. It shouldn't be a correctness issue since when pmd_dirty() we'll have the page marked dirty anyway, however having dirty bit carried over helps the next initial writes of split ptes on some archs like x86. Link: https://lkml.kernel.org/r/20220811161331.37055-5-peterx@redhat.com Signed-off-by: Peter Xu <peterx@redhat.com> Reviewed-by: Huang Ying <ying.huang@intel.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Andi Kleen <andi.kleen@intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Hildenbrand <david@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: "Kirill A . Shutemov" <kirill@shutemov.name> Cc: Minchan Kim <minchan@kernel.org> Cc: Nadav Amit <nadav.amit@gmail.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Dave Hansen <dave.hansen@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									0d206b5d2e
								
							
						
					
					
						commit
						0ccf7f168e
					
				
					 1 changed files with 7 additions and 2 deletions
				
			
		|  | @ -2037,7 +2037,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, | ||||||
| 	pgtable_t pgtable; | 	pgtable_t pgtable; | ||||||
| 	pmd_t old_pmd, _pmd; | 	pmd_t old_pmd, _pmd; | ||||||
| 	bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false; | 	bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false; | ||||||
| 	bool anon_exclusive = false; | 	bool anon_exclusive = false, dirty = false; | ||||||
| 	unsigned long addr; | 	unsigned long addr; | ||||||
| 	int i; | 	int i; | ||||||
| 
 | 
 | ||||||
|  | @ -2126,8 +2126,10 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, | ||||||
| 		uffd_wp = pmd_swp_uffd_wp(old_pmd); | 		uffd_wp = pmd_swp_uffd_wp(old_pmd); | ||||||
| 	} else { | 	} else { | ||||||
| 		page = pmd_page(old_pmd); | 		page = pmd_page(old_pmd); | ||||||
| 		if (pmd_dirty(old_pmd)) | 		if (pmd_dirty(old_pmd)) { | ||||||
|  | 			dirty = true; | ||||||
| 			SetPageDirty(page); | 			SetPageDirty(page); | ||||||
|  | 		} | ||||||
| 		write = pmd_write(old_pmd); | 		write = pmd_write(old_pmd); | ||||||
| 		young = pmd_young(old_pmd); | 		young = pmd_young(old_pmd); | ||||||
| 		soft_dirty = pmd_soft_dirty(old_pmd); | 		soft_dirty = pmd_soft_dirty(old_pmd); | ||||||
|  | @ -2195,6 +2197,9 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, | ||||||
| 				entry = pte_wrprotect(entry); | 				entry = pte_wrprotect(entry); | ||||||
| 			if (!young) | 			if (!young) | ||||||
| 				entry = pte_mkold(entry); | 				entry = pte_mkold(entry); | ||||||
|  | 			/* NOTE: this may set soft-dirty too on some archs */ | ||||||
|  | 			if (dirty) | ||||||
|  | 				entry = pte_mkdirty(entry); | ||||||
| 			if (soft_dirty) | 			if (soft_dirty) | ||||||
| 				entry = pte_mksoft_dirty(entry); | 				entry = pte_mksoft_dirty(entry); | ||||||
| 			if (uffd_wp) | 			if (uffd_wp) | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Peter Xu
						Peter Xu