mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	mm: migration: add migrate_entry_wait_huge()
When we have a page fault for the address which is backed by a hugepage under migration, the kernel can't wait correctly and do busy looping on hugepage fault until the migration finishes. As a result, users who try to kick hugepage migration (via soft offlining, for example) occasionally experience long delay or soft lockup. This is because pte_offset_map_lock() can't get a correct migration entry or a correct page table lock for hugepage. This patch introduces migration_entry_wait_huge() to solve this. Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Reviewed-by: Rik van Riel <riel@redhat.com> Reviewed-by: Wanpeng Li <liwanp@linux.vnet.ibm.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Cc: Andi Kleen <andi@firstfloor.org> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: <stable@vger.kernel.org> [2.6.35+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									27749f2ff0
								
							
						
					
					
						commit
						30dad30922
					
				
					 3 changed files with 22 additions and 6 deletions
				
			
		| 
						 | 
				
			
			@ -137,6 +137,7 @@ static inline void make_migration_entry_read(swp_entry_t *entry)
 | 
			
		|||
 | 
			
		||||
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 | 
			
		||||
					unsigned long address);
 | 
			
		||||
extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte);
 | 
			
		||||
#else
 | 
			
		||||
 | 
			
		||||
#define make_migration_entry(page, write) swp_entry(0, 0)
 | 
			
		||||
| 
						 | 
				
			
			@ -148,6 +149,8 @@ static inline int is_migration_entry(swp_entry_t swp)
 | 
			
		|||
static inline void make_migration_entry_read(swp_entry_t *entryp) { }
 | 
			
		||||
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 | 
			
		||||
					 unsigned long address) { }
 | 
			
		||||
static inline void migration_entry_wait_huge(struct mm_struct *mm,
 | 
			
		||||
					pte_t *pte) { }
 | 
			
		||||
static inline int is_write_migration_entry(swp_entry_t entry)
 | 
			
		||||
{
 | 
			
		||||
	return 0;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2839,7 +2839,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 | 
			
		|||
	if (ptep) {
 | 
			
		||||
		entry = huge_ptep_get(ptep);
 | 
			
		||||
		if (unlikely(is_hugetlb_entry_migration(entry))) {
 | 
			
		||||
			migration_entry_wait(mm, (pmd_t *)ptep, address);
 | 
			
		||||
			migration_entry_wait_huge(mm, ptep);
 | 
			
		||||
			return 0;
 | 
			
		||||
		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
 | 
			
		||||
			return VM_FAULT_HWPOISON_LARGE |
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										23
									
								
								mm/migrate.c
									
									
									
									
									
								
							
							
						
						
									
										23
									
								
								mm/migrate.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -200,15 +200,14 @@ static void remove_migration_ptes(struct page *old, struct page *new)
 | 
			
		|||
 * get to the page and wait until migration is finished.
 | 
			
		||||
 * When we return from this function the fault will be retried.
 | 
			
		||||
 */
 | 
			
		||||
void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 | 
			
		||||
				unsigned long address)
 | 
			
		||||
static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
 | 
			
		||||
				spinlock_t *ptl)
 | 
			
		||||
{
 | 
			
		||||
	pte_t *ptep, pte;
 | 
			
		||||
	spinlock_t *ptl;
 | 
			
		||||
	pte_t pte;
 | 
			
		||||
	swp_entry_t entry;
 | 
			
		||||
	struct page *page;
 | 
			
		||||
 | 
			
		||||
	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
 | 
			
		||||
	spin_lock(ptl);
 | 
			
		||||
	pte = *ptep;
 | 
			
		||||
	if (!is_swap_pte(pte))
 | 
			
		||||
		goto out;
 | 
			
		||||
| 
						 | 
				
			
			@ -236,6 +235,20 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 | 
			
		|||
	pte_unmap_unlock(ptep, ptl);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 | 
			
		||||
				unsigned long address)
 | 
			
		||||
{
 | 
			
		||||
	spinlock_t *ptl = pte_lockptr(mm, pmd);
 | 
			
		||||
	pte_t *ptep = pte_offset_map(pmd, address);
 | 
			
		||||
	__migration_entry_wait(mm, ptep, ptl);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte)
 | 
			
		||||
{
 | 
			
		||||
	spinlock_t *ptl = &(mm)->page_table_lock;
 | 
			
		||||
	__migration_entry_wait(mm, pte, ptl);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_BLOCK
 | 
			
		||||
/* Returns true if all buffers are successfully locked */
 | 
			
		||||
static bool buffer_migrate_lock_buffers(struct buffer_head *head,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue