forked from mirrors/linux
		
	hugetlb: use mmu_gather instead of a temporary linked list for accumulating pages
Use a mmu_gather instead of a temporary linked list for accumulating pages when we unmap a hugepage range Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: David Rientjes <rientjes@google.com> Cc: Hillf Danton <dhillf@gmail.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									972dc4de13
								
							
						
					
					
						commit
						24669e5847
					
				
					 4 changed files with 59 additions and 33 deletions
				
			
		| 
						 | 
					@ -416,8 +416,8 @@ hugetlb_vmtruncate_list(struct prio_tree_root *root, pgoff_t pgoff)
 | 
				
			||||||
		else
 | 
							else
 | 
				
			||||||
			v_offset = 0;
 | 
								v_offset = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		__unmap_hugepage_range(vma,
 | 
							unmap_hugepage_range(vma, vma->vm_start + v_offset,
 | 
				
			||||||
				vma->vm_start + v_offset, vma->vm_end, NULL);
 | 
									     vma->vm_end, NULL);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -7,6 +7,7 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct ctl_table;
 | 
					struct ctl_table;
 | 
				
			||||||
struct user_struct;
 | 
					struct user_struct;
 | 
				
			||||||
 | 
					struct mmu_gather;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_HUGETLB_PAGE
 | 
					#ifdef CONFIG_HUGETLB_PAGE
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -41,8 +42,9 @@ int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
 | 
				
			||||||
			unsigned long *, int *, int, unsigned int flags);
 | 
								unsigned long *, int *, int, unsigned int flags);
 | 
				
			||||||
void unmap_hugepage_range(struct vm_area_struct *,
 | 
					void unmap_hugepage_range(struct vm_area_struct *,
 | 
				
			||||||
			  unsigned long, unsigned long, struct page *);
 | 
								  unsigned long, unsigned long, struct page *);
 | 
				
			||||||
void __unmap_hugepage_range(struct vm_area_struct *,
 | 
					void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 | 
				
			||||||
			unsigned long, unsigned long, struct page *);
 | 
									unsigned long start, unsigned long end,
 | 
				
			||||||
 | 
									struct page *ref_page);
 | 
				
			||||||
int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
 | 
					int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
 | 
				
			||||||
void hugetlb_report_meminfo(struct seq_file *);
 | 
					void hugetlb_report_meminfo(struct seq_file *);
 | 
				
			||||||
int hugetlb_report_node_meminfo(int, char *);
 | 
					int hugetlb_report_node_meminfo(int, char *);
 | 
				
			||||||
| 
						 | 
					@ -98,7 +100,6 @@ static inline unsigned long hugetlb_total_pages(void)
 | 
				
			||||||
#define follow_huge_addr(mm, addr, write)	ERR_PTR(-EINVAL)
 | 
					#define follow_huge_addr(mm, addr, write)	ERR_PTR(-EINVAL)
 | 
				
			||||||
#define copy_hugetlb_page_range(src, dst, vma)	({ BUG(); 0; })
 | 
					#define copy_hugetlb_page_range(src, dst, vma)	({ BUG(); 0; })
 | 
				
			||||||
#define hugetlb_prefault(mapping, vma)		({ BUG(); 0; })
 | 
					#define hugetlb_prefault(mapping, vma)		({ BUG(); 0; })
 | 
				
			||||||
#define unmap_hugepage_range(vma, start, end, page)	BUG()
 | 
					 | 
				
			||||||
static inline void hugetlb_report_meminfo(struct seq_file *m)
 | 
					static inline void hugetlb_report_meminfo(struct seq_file *m)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -112,13 +113,24 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
 | 
				
			||||||
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
 | 
					#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
 | 
				
			||||||
#define hugetlb_fault(mm, vma, addr, flags)	({ BUG(); 0; })
 | 
					#define hugetlb_fault(mm, vma, addr, flags)	({ BUG(); 0; })
 | 
				
			||||||
#define huge_pte_offset(mm, address)	0
 | 
					#define huge_pte_offset(mm, address)	0
 | 
				
			||||||
#define dequeue_hwpoisoned_huge_page(page)	0
 | 
					static inline int dequeue_hwpoisoned_huge_page(struct page *page)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void copy_huge_page(struct page *dst, struct page *src)
 | 
					static inline void copy_huge_page(struct page *dst, struct page *src)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define hugetlb_change_protection(vma, address, end, newprot)
 | 
					#define hugetlb_change_protection(vma, address, end, newprot)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
 | 
				
			||||||
 | 
								struct vm_area_struct *vma, unsigned long start,
 | 
				
			||||||
 | 
								unsigned long end, struct page *ref_page)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						BUG();
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* !CONFIG_HUGETLB_PAGE */
 | 
					#endif /* !CONFIG_HUGETLB_PAGE */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define HUGETLB_ANON_FILE "anon_hugepage"
 | 
					#define HUGETLB_ANON_FILE "anon_hugepage"
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										59
									
								
								mm/hugetlb.c
									
									
									
									
									
								
							
							
						
						
									
										59
									
								
								mm/hugetlb.c
									
									
									
									
									
								
							| 
						 | 
					@ -24,8 +24,9 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include <asm/page.h>
 | 
					#include <asm/page.h>
 | 
				
			||||||
#include <asm/pgtable.h>
 | 
					#include <asm/pgtable.h>
 | 
				
			||||||
#include <linux/io.h>
 | 
					#include <asm/tlb.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <linux/io.h>
 | 
				
			||||||
#include <linux/hugetlb.h>
 | 
					#include <linux/hugetlb.h>
 | 
				
			||||||
#include <linux/node.h>
 | 
					#include <linux/node.h>
 | 
				
			||||||
#include "internal.h"
 | 
					#include "internal.h"
 | 
				
			||||||
| 
						 | 
					@ -2310,30 +2311,26 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 | 
					void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 | 
				
			||||||
			    unsigned long end, struct page *ref_page)
 | 
								    unsigned long start, unsigned long end,
 | 
				
			||||||
 | 
								    struct page *ref_page)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						int force_flush = 0;
 | 
				
			||||||
	struct mm_struct *mm = vma->vm_mm;
 | 
						struct mm_struct *mm = vma->vm_mm;
 | 
				
			||||||
	unsigned long address;
 | 
						unsigned long address;
 | 
				
			||||||
	pte_t *ptep;
 | 
						pte_t *ptep;
 | 
				
			||||||
	pte_t pte;
 | 
						pte_t pte;
 | 
				
			||||||
	struct page *page;
 | 
						struct page *page;
 | 
				
			||||||
	struct page *tmp;
 | 
					 | 
				
			||||||
	struct hstate *h = hstate_vma(vma);
 | 
						struct hstate *h = hstate_vma(vma);
 | 
				
			||||||
	unsigned long sz = huge_page_size(h);
 | 
						unsigned long sz = huge_page_size(h);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * A page gathering list, protected by per file i_mmap_mutex. The
 | 
					 | 
				
			||||||
	 * lock is used to avoid list corruption from multiple unmapping
 | 
					 | 
				
			||||||
	 * of the same page since we are using page->lru.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	LIST_HEAD(page_list);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	WARN_ON(!is_vm_hugetlb_page(vma));
 | 
						WARN_ON(!is_vm_hugetlb_page(vma));
 | 
				
			||||||
	BUG_ON(start & ~huge_page_mask(h));
 | 
						BUG_ON(start & ~huge_page_mask(h));
 | 
				
			||||||
	BUG_ON(end & ~huge_page_mask(h));
 | 
						BUG_ON(end & ~huge_page_mask(h));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						tlb_start_vma(tlb, vma);
 | 
				
			||||||
	mmu_notifier_invalidate_range_start(mm, start, end);
 | 
						mmu_notifier_invalidate_range_start(mm, start, end);
 | 
				
			||||||
 | 
					again:
 | 
				
			||||||
	spin_lock(&mm->page_table_lock);
 | 
						spin_lock(&mm->page_table_lock);
 | 
				
			||||||
	for (address = start; address < end; address += sz) {
 | 
						for (address = start; address < end; address += sz) {
 | 
				
			||||||
		ptep = huge_pte_offset(mm, address);
 | 
							ptep = huge_pte_offset(mm, address);
 | 
				
			||||||
| 
						 | 
					@ -2372,30 +2369,45 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		pte = huge_ptep_get_and_clear(mm, address, ptep);
 | 
							pte = huge_ptep_get_and_clear(mm, address, ptep);
 | 
				
			||||||
 | 
							tlb_remove_tlb_entry(tlb, ptep, address);
 | 
				
			||||||
		if (pte_dirty(pte))
 | 
							if (pte_dirty(pte))
 | 
				
			||||||
			set_page_dirty(page);
 | 
								set_page_dirty(page);
 | 
				
			||||||
		list_add(&page->lru, &page_list);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							page_remove_rmap(page);
 | 
				
			||||||
 | 
							force_flush = !__tlb_remove_page(tlb, page);
 | 
				
			||||||
 | 
							if (force_flush)
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
		/* Bail out after unmapping reference page if supplied */
 | 
							/* Bail out after unmapping reference page if supplied */
 | 
				
			||||||
		if (ref_page)
 | 
							if (ref_page)
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	flush_tlb_range(vma, start, end);
 | 
					 | 
				
			||||||
	spin_unlock(&mm->page_table_lock);
 | 
						spin_unlock(&mm->page_table_lock);
 | 
				
			||||||
	mmu_notifier_invalidate_range_end(mm, start, end);
 | 
						/*
 | 
				
			||||||
	list_for_each_entry_safe(page, tmp, &page_list, lru) {
 | 
						 * mmu_gather ran out of room to batch pages, we break out of
 | 
				
			||||||
		page_remove_rmap(page);
 | 
						 * the PTE lock to avoid doing the potential expensive TLB invalidate
 | 
				
			||||||
		list_del(&page->lru);
 | 
						 * and page-free while holding it.
 | 
				
			||||||
		put_page(page);
 | 
						 */
 | 
				
			||||||
 | 
						if (force_flush) {
 | 
				
			||||||
 | 
							force_flush = 0;
 | 
				
			||||||
 | 
							tlb_flush_mmu(tlb);
 | 
				
			||||||
 | 
							if (address < end && !ref_page)
 | 
				
			||||||
 | 
								goto again;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						mmu_notifier_invalidate_range_end(mm, start, end);
 | 
				
			||||||
 | 
						tlb_end_vma(tlb, vma);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 | 
					void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 | 
				
			||||||
			  unsigned long end, struct page *ref_page)
 | 
								  unsigned long end, struct page *ref_page)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
 | 
						struct mm_struct *mm;
 | 
				
			||||||
	__unmap_hugepage_range(vma, start, end, ref_page);
 | 
						struct mmu_gather tlb;
 | 
				
			||||||
	mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
 | 
					
 | 
				
			||||||
 | 
						mm = vma->vm_mm;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						tlb_gather_mmu(&tlb, mm, 0);
 | 
				
			||||||
 | 
						__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
 | 
				
			||||||
 | 
						tlb_finish_mmu(&tlb, start, end);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -2440,9 +2452,8 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
 | 
				
			||||||
		 * from the time of fork. This would look like data corruption
 | 
							 * from the time of fork. This would look like data corruption
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
 | 
							if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
 | 
				
			||||||
			__unmap_hugepage_range(iter_vma,
 | 
								unmap_hugepage_range(iter_vma, address,
 | 
				
			||||||
				address, address + huge_page_size(h),
 | 
										     address + huge_page_size(h), page);
 | 
				
			||||||
				page);
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	mutex_unlock(&mapping->i_mmap_mutex);
 | 
						mutex_unlock(&mapping->i_mmap_mutex);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1343,8 +1343,11 @@ static void unmap_single_vma(struct mmu_gather *tlb,
 | 
				
			||||||
			 * Since no pte has actually been setup, it is
 | 
								 * Since no pte has actually been setup, it is
 | 
				
			||||||
			 * safe to do nothing in this case.
 | 
								 * safe to do nothing in this case.
 | 
				
			||||||
			 */
 | 
								 */
 | 
				
			||||||
			if (vma->vm_file)
 | 
								if (vma->vm_file) {
 | 
				
			||||||
				unmap_hugepage_range(vma, start, end, NULL);
 | 
									mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
 | 
				
			||||||
 | 
									__unmap_hugepage_range(tlb, vma, start, end, NULL);
 | 
				
			||||||
 | 
									mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
		} else
 | 
							} else
 | 
				
			||||||
			unmap_page_range(tlb, vma, start, end, details);
 | 
								unmap_page_range(tlb, vma, start, end, details);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue