forked from mirrors/linux
		
	mm/huge_memory: Convert __split_huge_pmd() to take a folio
Convert split_huge_pmd_address() at the same time since it only passes the folio through, and its two callers already have a folio on hand. Removes numerous calls to compound_head() and removes an assumption that a page cannot be larger than a PMD. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
This commit is contained in:
		
							parent
							
								
									b3ac04132c
								
							
						
					
					
						commit
						af28a988b3
					
				
					 3 changed files with 31 additions and 29 deletions
				
			
		| 
						 | 
					@ -194,7 +194,7 @@ static inline int split_huge_page(struct page *page)
 | 
				
			||||||
void deferred_split_huge_page(struct page *page);
 | 
					void deferred_split_huge_page(struct page *page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 | 
					void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 | 
				
			||||||
		unsigned long address, bool freeze, struct page *page);
 | 
							unsigned long address, bool freeze, struct folio *folio);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define split_huge_pmd(__vma, __pmd, __address)				\
 | 
					#define split_huge_pmd(__vma, __pmd, __address)				\
 | 
				
			||||||
	do {								\
 | 
						do {								\
 | 
				
			||||||
| 
						 | 
					@ -207,7 +207,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
 | 
					void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
 | 
				
			||||||
		bool freeze, struct page *page);
 | 
							bool freeze, struct folio *folio);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
 | 
					void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
 | 
				
			||||||
		unsigned long address);
 | 
							unsigned long address);
 | 
				
			||||||
| 
						 | 
					@ -406,9 +406,9 @@ static inline void deferred_split_huge_page(struct page *page) {}
 | 
				
			||||||
	do { } while (0)
 | 
						do { } while (0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 | 
					static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 | 
				
			||||||
		unsigned long address, bool freeze, struct page *page) {}
 | 
							unsigned long address, bool freeze, struct folio *folio) {}
 | 
				
			||||||
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
 | 
					static inline void split_huge_pmd_address(struct vm_area_struct *vma,
 | 
				
			||||||
		unsigned long address, bool freeze, struct page *page) {}
 | 
							unsigned long address, bool freeze, struct folio *folio) {}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define split_huge_pud(__vma, __pmd, __address)	\
 | 
					#define split_huge_pud(__vma, __pmd, __address)	\
 | 
				
			||||||
	do { } while (0)
 | 
						do { } while (0)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2113,11 +2113,11 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 | 
					void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 | 
				
			||||||
		unsigned long address, bool freeze, struct page *page)
 | 
							unsigned long address, bool freeze, struct folio *folio)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	spinlock_t *ptl;
 | 
						spinlock_t *ptl;
 | 
				
			||||||
	struct mmu_notifier_range range;
 | 
						struct mmu_notifier_range range;
 | 
				
			||||||
	bool do_unlock_page = false;
 | 
						bool do_unlock_folio = false;
 | 
				
			||||||
	pmd_t _pmd;
 | 
						pmd_t _pmd;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
 | 
						mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
 | 
				
			||||||
| 
						 | 
					@ -2127,20 +2127,20 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 | 
				
			||||||
	ptl = pmd_lock(vma->vm_mm, pmd);
 | 
						ptl = pmd_lock(vma->vm_mm, pmd);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * If caller asks to setup a migration entries, we need a page to check
 | 
						 * If caller asks to setup a migration entry, we need a folio to check
 | 
				
			||||||
	 * pmd against. Otherwise we can end up replacing wrong page.
 | 
						 * pmd against. Otherwise we can end up replacing wrong folio.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	VM_BUG_ON(freeze && !page);
 | 
						VM_BUG_ON(freeze && !folio);
 | 
				
			||||||
	if (page) {
 | 
						if (folio) {
 | 
				
			||||||
		VM_WARN_ON_ONCE(!PageLocked(page));
 | 
							VM_WARN_ON_ONCE(!folio_test_locked(folio));
 | 
				
			||||||
		if (page != pmd_page(*pmd))
 | 
							if (folio != page_folio(pmd_page(*pmd)))
 | 
				
			||||||
			goto out;
 | 
								goto out;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
repeat:
 | 
					repeat:
 | 
				
			||||||
	if (pmd_trans_huge(*pmd)) {
 | 
						if (pmd_trans_huge(*pmd)) {
 | 
				
			||||||
		if (!page) {
 | 
							if (!folio) {
 | 
				
			||||||
			page = pmd_page(*pmd);
 | 
								folio = page_folio(pmd_page(*pmd));
 | 
				
			||||||
			/*
 | 
								/*
 | 
				
			||||||
			 * An anonymous page must be locked, to ensure that a
 | 
								 * An anonymous page must be locked, to ensure that a
 | 
				
			||||||
			 * concurrent reuse_swap_page() sees stable mapcount;
 | 
								 * concurrent reuse_swap_page() sees stable mapcount;
 | 
				
			||||||
| 
						 | 
					@ -2148,22 +2148,22 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 | 
				
			||||||
			 * and page lock must not be taken when zap_pmd_range()
 | 
								 * and page lock must not be taken when zap_pmd_range()
 | 
				
			||||||
			 * calls __split_huge_pmd() while i_mmap_lock is held.
 | 
								 * calls __split_huge_pmd() while i_mmap_lock is held.
 | 
				
			||||||
			 */
 | 
								 */
 | 
				
			||||||
			if (PageAnon(page)) {
 | 
								if (folio_test_anon(folio)) {
 | 
				
			||||||
				if (unlikely(!trylock_page(page))) {
 | 
									if (unlikely(!folio_trylock(folio))) {
 | 
				
			||||||
					get_page(page);
 | 
										folio_get(folio);
 | 
				
			||||||
					_pmd = *pmd;
 | 
										_pmd = *pmd;
 | 
				
			||||||
					spin_unlock(ptl);
 | 
										spin_unlock(ptl);
 | 
				
			||||||
					lock_page(page);
 | 
										folio_lock(folio);
 | 
				
			||||||
					spin_lock(ptl);
 | 
										spin_lock(ptl);
 | 
				
			||||||
					if (unlikely(!pmd_same(*pmd, _pmd))) {
 | 
										if (unlikely(!pmd_same(*pmd, _pmd))) {
 | 
				
			||||||
						unlock_page(page);
 | 
											folio_unlock(folio);
 | 
				
			||||||
						put_page(page);
 | 
											folio_put(folio);
 | 
				
			||||||
						page = NULL;
 | 
											folio = NULL;
 | 
				
			||||||
						goto repeat;
 | 
											goto repeat;
 | 
				
			||||||
					}
 | 
										}
 | 
				
			||||||
					put_page(page);
 | 
										folio_put(folio);
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
				do_unlock_page = true;
 | 
									do_unlock_folio = true;
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	} else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
 | 
						} else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
 | 
				
			||||||
| 
						 | 
					@ -2171,8 +2171,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 | 
				
			||||||
	__split_huge_pmd_locked(vma, pmd, range.start, freeze);
 | 
						__split_huge_pmd_locked(vma, pmd, range.start, freeze);
 | 
				
			||||||
out:
 | 
					out:
 | 
				
			||||||
	spin_unlock(ptl);
 | 
						spin_unlock(ptl);
 | 
				
			||||||
	if (do_unlock_page)
 | 
						if (do_unlock_folio)
 | 
				
			||||||
		unlock_page(page);
 | 
							folio_unlock(folio);
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * No need to double call mmu_notifier->invalidate_range() callback.
 | 
						 * No need to double call mmu_notifier->invalidate_range() callback.
 | 
				
			||||||
	 * They are 3 cases to consider inside __split_huge_pmd_locked():
 | 
						 * They are 3 cases to consider inside __split_huge_pmd_locked():
 | 
				
			||||||
| 
						 | 
					@ -2190,7 +2190,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
 | 
					void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
 | 
				
			||||||
		bool freeze, struct page *page)
 | 
							bool freeze, struct folio *folio)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	pgd_t *pgd;
 | 
						pgd_t *pgd;
 | 
				
			||||||
	p4d_t *p4d;
 | 
						p4d_t *p4d;
 | 
				
			||||||
| 
						 | 
					@ -2211,7 +2211,7 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pmd = pmd_offset(pud, address);
 | 
						pmd = pmd_offset(pud, address);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	__split_huge_pmd(vma, pmd, address, freeze, page);
 | 
						__split_huge_pmd(vma, pmd, address, freeze, folio);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
 | 
					static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1410,6 +1410,7 @@ void page_remove_rmap(struct page *page,
 | 
				
			||||||
static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 | 
					static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 | 
				
			||||||
		     unsigned long address, void *arg)
 | 
							     unsigned long address, void *arg)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						struct folio *folio = page_folio(page);
 | 
				
			||||||
	struct mm_struct *mm = vma->vm_mm;
 | 
						struct mm_struct *mm = vma->vm_mm;
 | 
				
			||||||
	DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0);
 | 
						DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0);
 | 
				
			||||||
	pte_t pteval;
 | 
						pte_t pteval;
 | 
				
			||||||
| 
						 | 
					@ -1428,7 +1429,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 | 
				
			||||||
		pvmw.flags = PVMW_SYNC;
 | 
							pvmw.flags = PVMW_SYNC;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (flags & TTU_SPLIT_HUGE_PMD)
 | 
						if (flags & TTU_SPLIT_HUGE_PMD)
 | 
				
			||||||
		split_huge_pmd_address(vma, address, false, page);
 | 
							split_huge_pmd_address(vma, address, false, folio);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * For THP, we have to assume the worse case ie pmd for invalidation.
 | 
						 * For THP, we have to assume the worse case ie pmd for invalidation.
 | 
				
			||||||
| 
						 | 
					@ -1700,6 +1701,7 @@ void try_to_unmap(struct page *page, enum ttu_flags flags)
 | 
				
			||||||
static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
 | 
					static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
 | 
				
			||||||
		     unsigned long address, void *arg)
 | 
							     unsigned long address, void *arg)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						struct folio *folio = page_folio(page);
 | 
				
			||||||
	struct mm_struct *mm = vma->vm_mm;
 | 
						struct mm_struct *mm = vma->vm_mm;
 | 
				
			||||||
	DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0);
 | 
						DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0);
 | 
				
			||||||
	pte_t pteval;
 | 
						pte_t pteval;
 | 
				
			||||||
| 
						 | 
					@ -1722,7 +1724,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
 | 
				
			||||||
	 * TTU_SPLIT_HUGE_PMD and it wants to freeze.
 | 
						 * TTU_SPLIT_HUGE_PMD and it wants to freeze.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (flags & TTU_SPLIT_HUGE_PMD)
 | 
						if (flags & TTU_SPLIT_HUGE_PMD)
 | 
				
			||||||
		split_huge_pmd_address(vma, address, true, page);
 | 
							split_huge_pmd_address(vma, address, true, folio);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * For THP, we have to assume the worse case ie pmd for invalidation.
 | 
						 * For THP, we have to assume the worse case ie pmd for invalidation.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue