mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-03 18:20:25 +02:00 
			
		
		
		
	hugetlb: fix copy_hugetlb_page_range() to handle migration/hwpoisoned entry
There's a race between fork() and hugepage migration, as a result we try to "dereference" a swap entry as a normal pte, causing kernel panic. The cause of the problem is that copy_hugetlb_page_range() can't handle "swap entry" family (migration entry and hwpoisoned entry) so let's fix it. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Hugh Dickins <hughd@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: <stable@vger.kernel.org> [2.6.37+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									13ace4d0d9
								
							
						
					
					
						commit
						4a705fef98
					
				
					 1 changed files with 43 additions and 28 deletions
				
			
		
							
								
								
									
										71
									
								
								mm/hugetlb.c
									
									
									
									
									
								
							
							
						
						
									
										71
									
								
								mm/hugetlb.c
									
									
									
									
									
								
							| 
						 | 
					@ -2520,6 +2520,31 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
 | 
				
			||||||
		update_mmu_cache(vma, address, ptep);
 | 
							update_mmu_cache(vma, address, ptep);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int is_hugetlb_entry_migration(pte_t pte)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						swp_entry_t swp;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (huge_pte_none(pte) || pte_present(pte))
 | 
				
			||||||
 | 
							return 0;
 | 
				
			||||||
 | 
						swp = pte_to_swp_entry(pte);
 | 
				
			||||||
 | 
						if (non_swap_entry(swp) && is_migration_entry(swp))
 | 
				
			||||||
 | 
							return 1;
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int is_hugetlb_entry_hwpoisoned(pte_t pte)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						swp_entry_t swp;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (huge_pte_none(pte) || pte_present(pte))
 | 
				
			||||||
 | 
							return 0;
 | 
				
			||||||
 | 
						swp = pte_to_swp_entry(pte);
 | 
				
			||||||
 | 
						if (non_swap_entry(swp) && is_hwpoison_entry(swp))
 | 
				
			||||||
 | 
							return 1;
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
 | 
					int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
 | 
				
			||||||
			    struct vm_area_struct *vma)
 | 
								    struct vm_area_struct *vma)
 | 
				
			||||||
| 
						 | 
					@ -2559,10 +2584,26 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
 | 
				
			||||||
		dst_ptl = huge_pte_lock(h, dst, dst_pte);
 | 
							dst_ptl = huge_pte_lock(h, dst, dst_pte);
 | 
				
			||||||
		src_ptl = huge_pte_lockptr(h, src, src_pte);
 | 
							src_ptl = huge_pte_lockptr(h, src, src_pte);
 | 
				
			||||||
		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
 | 
							spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
 | 
				
			||||||
		if (!huge_pte_none(huge_ptep_get(src_pte))) {
 | 
							entry = huge_ptep_get(src_pte);
 | 
				
			||||||
 | 
							if (huge_pte_none(entry)) { /* skip none entry */
 | 
				
			||||||
 | 
								;
 | 
				
			||||||
 | 
							} else if (unlikely(is_hugetlb_entry_migration(entry) ||
 | 
				
			||||||
 | 
									    is_hugetlb_entry_hwpoisoned(entry))) {
 | 
				
			||||||
 | 
								swp_entry_t swp_entry = pte_to_swp_entry(entry);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								if (is_write_migration_entry(swp_entry) && cow) {
 | 
				
			||||||
 | 
									/*
 | 
				
			||||||
 | 
									 * COW mappings require pages in both
 | 
				
			||||||
 | 
									 * parent and child to be set to read.
 | 
				
			||||||
 | 
									 */
 | 
				
			||||||
 | 
									make_migration_entry_read(&swp_entry);
 | 
				
			||||||
 | 
									entry = swp_entry_to_pte(swp_entry);
 | 
				
			||||||
 | 
									set_huge_pte_at(src, addr, src_pte, entry);
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
								set_huge_pte_at(dst, addr, dst_pte, entry);
 | 
				
			||||||
 | 
							} else {
 | 
				
			||||||
			if (cow)
 | 
								if (cow)
 | 
				
			||||||
				huge_ptep_set_wrprotect(src, addr, src_pte);
 | 
									huge_ptep_set_wrprotect(src, addr, src_pte);
 | 
				
			||||||
			entry = huge_ptep_get(src_pte);
 | 
					 | 
				
			||||||
			ptepage = pte_page(entry);
 | 
								ptepage = pte_page(entry);
 | 
				
			||||||
			get_page(ptepage);
 | 
								get_page(ptepage);
 | 
				
			||||||
			page_dup_rmap(ptepage);
 | 
								page_dup_rmap(ptepage);
 | 
				
			||||||
| 
						 | 
					@ -2578,32 +2619,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int is_hugetlb_entry_migration(pte_t pte)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	swp_entry_t swp;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (huge_pte_none(pte) || pte_present(pte))
 | 
					 | 
				
			||||||
		return 0;
 | 
					 | 
				
			||||||
	swp = pte_to_swp_entry(pte);
 | 
					 | 
				
			||||||
	if (non_swap_entry(swp) && is_migration_entry(swp))
 | 
					 | 
				
			||||||
		return 1;
 | 
					 | 
				
			||||||
	else
 | 
					 | 
				
			||||||
		return 0;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static int is_hugetlb_entry_hwpoisoned(pte_t pte)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	swp_entry_t swp;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (huge_pte_none(pte) || pte_present(pte))
 | 
					 | 
				
			||||||
		return 0;
 | 
					 | 
				
			||||||
	swp = pte_to_swp_entry(pte);
 | 
					 | 
				
			||||||
	if (non_swap_entry(swp) && is_hwpoison_entry(swp))
 | 
					 | 
				
			||||||
		return 1;
 | 
					 | 
				
			||||||
	else
 | 
					 | 
				
			||||||
		return 0;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 | 
					void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 | 
				
			||||||
			    unsigned long start, unsigned long end,
 | 
								    unsigned long start, unsigned long end,
 | 
				
			||||||
			    struct page *ref_page)
 | 
								    struct page *ref_page)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue