forked from mirrors/linux
		
	mm/rmap: Convert rmap_walk() to take a folio
This ripples all the way through to every calling and called function from rmap. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
This commit is contained in:
		
							parent
							
								
									e05b34539d
								
							
						
					
					
						commit
						2f031c6f04
					
				
					 9 changed files with 80 additions and 99 deletions
				
			
		| 
						 | 
					@ -51,7 +51,7 @@ static inline void ksm_exit(struct mm_struct *mm)
 | 
				
			||||||
struct page *ksm_might_need_to_copy(struct page *page,
 | 
					struct page *ksm_might_need_to_copy(struct page *page,
 | 
				
			||||||
			struct vm_area_struct *vma, unsigned long address);
 | 
								struct vm_area_struct *vma, unsigned long address);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
 | 
					void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
 | 
				
			||||||
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
 | 
					void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#else  /* !CONFIG_KSM */
 | 
					#else  /* !CONFIG_KSM */
 | 
				
			||||||
| 
						 | 
					@ -78,7 +78,7 @@ static inline struct page *ksm_might_need_to_copy(struct page *page,
 | 
				
			||||||
	return page;
 | 
						return page;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void rmap_walk_ksm(struct page *page,
 | 
					static inline void rmap_walk_ksm(struct folio *folio,
 | 
				
			||||||
			struct rmap_walk_control *rwc)
 | 
								struct rmap_walk_control *rwc)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -266,7 +266,6 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Called by memory-failure.c to kill processes.
 | 
					 * Called by memory-failure.c to kill processes.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
struct anon_vma *page_lock_anon_vma_read(struct page *page);
 | 
					 | 
				
			||||||
struct anon_vma *folio_lock_anon_vma_read(struct folio *folio);
 | 
					struct anon_vma *folio_lock_anon_vma_read(struct folio *folio);
 | 
				
			||||||
void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
 | 
					void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
 | 
				
			||||||
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
 | 
					int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
 | 
				
			||||||
| 
						 | 
					@ -286,15 +285,15 @@ struct rmap_walk_control {
 | 
				
			||||||
	 * Return false if page table scanning in rmap_walk should be stopped.
 | 
						 * Return false if page table scanning in rmap_walk should be stopped.
 | 
				
			||||||
	 * Otherwise, return true.
 | 
						 * Otherwise, return true.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
 | 
						bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma,
 | 
				
			||||||
					unsigned long addr, void *arg);
 | 
										unsigned long addr, void *arg);
 | 
				
			||||||
	int (*done)(struct page *page);
 | 
						int (*done)(struct folio *folio);
 | 
				
			||||||
	struct anon_vma *(*anon_lock)(struct page *page);
 | 
						struct anon_vma *(*anon_lock)(struct folio *folio);
 | 
				
			||||||
	bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
 | 
						bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
 | 
					void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
 | 
				
			||||||
void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
 | 
					void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#else	/* !CONFIG_MMU */
 | 
					#else	/* !CONFIG_MMU */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -16,10 +16,10 @@
 | 
				
			||||||
#include "../internal.h"
 | 
					#include "../internal.h"
 | 
				
			||||||
#include "prmtv-common.h"
 | 
					#include "prmtv-common.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
 | 
					static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
 | 
				
			||||||
		unsigned long addr, void *arg)
 | 
							unsigned long addr, void *arg)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	DEFINE_PAGE_VMA_WALK(pvmw, page, vma, addr, 0);
 | 
						DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	while (page_vma_mapped_walk(&pvmw)) {
 | 
						while (page_vma_mapped_walk(&pvmw)) {
 | 
				
			||||||
		addr = pvmw.address;
 | 
							addr = pvmw.address;
 | 
				
			||||||
| 
						 | 
					@ -37,7 +37,7 @@ static void damon_pa_mkold(unsigned long paddr)
 | 
				
			||||||
	struct page *page = damon_get_page(PHYS_PFN(paddr));
 | 
						struct page *page = damon_get_page(PHYS_PFN(paddr));
 | 
				
			||||||
	struct rmap_walk_control rwc = {
 | 
						struct rmap_walk_control rwc = {
 | 
				
			||||||
		.rmap_one = __damon_pa_mkold,
 | 
							.rmap_one = __damon_pa_mkold,
 | 
				
			||||||
		.anon_lock = page_lock_anon_vma_read,
 | 
							.anon_lock = folio_lock_anon_vma_read,
 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
	bool need_lock;
 | 
						bool need_lock;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -54,7 +54,7 @@ static void damon_pa_mkold(unsigned long paddr)
 | 
				
			||||||
	if (need_lock && !folio_trylock(folio))
 | 
						if (need_lock && !folio_trylock(folio))
 | 
				
			||||||
		goto out;
 | 
							goto out;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rmap_walk(&folio->page, &rwc);
 | 
						rmap_walk(folio, &rwc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (need_lock)
 | 
						if (need_lock)
 | 
				
			||||||
		folio_unlock(folio);
 | 
							folio_unlock(folio);
 | 
				
			||||||
| 
						 | 
					@ -87,10 +87,9 @@ struct damon_pa_access_chk_result {
 | 
				
			||||||
	bool accessed;
 | 
						bool accessed;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma,
 | 
					static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
 | 
				
			||||||
		unsigned long addr, void *arg)
 | 
							unsigned long addr, void *arg)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct folio *folio = page_folio(page);
 | 
					 | 
				
			||||||
	struct damon_pa_access_chk_result *result = arg;
 | 
						struct damon_pa_access_chk_result *result = arg;
 | 
				
			||||||
	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
 | 
						DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -133,7 +132,7 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
 | 
				
			||||||
	struct rmap_walk_control rwc = {
 | 
						struct rmap_walk_control rwc = {
 | 
				
			||||||
		.arg = &result,
 | 
							.arg = &result,
 | 
				
			||||||
		.rmap_one = __damon_pa_young,
 | 
							.rmap_one = __damon_pa_young,
 | 
				
			||||||
		.anon_lock = page_lock_anon_vma_read,
 | 
							.anon_lock = folio_lock_anon_vma_read,
 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
	bool need_lock;
 | 
						bool need_lock;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -156,7 +155,7 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
 | 
				
			||||||
		return NULL;
 | 
							return NULL;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rmap_walk(&folio->page, &rwc);
 | 
						rmap_walk(folio, &rwc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (need_lock)
 | 
						if (need_lock)
 | 
				
			||||||
		folio_unlock(folio);
 | 
							folio_unlock(folio);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -164,10 +164,3 @@ void putback_lru_page(struct page *page)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	folio_putback_lru(page_folio(page));
 | 
						folio_putback_lru(page_folio(page));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifdef CONFIG_MMU
 | 
					 | 
				
			||||||
struct anon_vma *page_lock_anon_vma_read(struct page *page)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return folio_lock_anon_vma_read(page_folio(page));
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2572,7 +2572,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 | 
				
			||||||
		 * The caller does not necessarily hold an mmap_lock that would
 | 
							 * The caller does not necessarily hold an mmap_lock that would
 | 
				
			||||||
		 * prevent the anon_vma disappearing so we first we take a
 | 
							 * prevent the anon_vma disappearing so we first we take a
 | 
				
			||||||
		 * reference to it and then lock the anon_vma for write. This
 | 
							 * reference to it and then lock the anon_vma for write. This
 | 
				
			||||||
		 * is similar to page_lock_anon_vma_read except the write lock
 | 
							 * is similar to folio_lock_anon_vma_read except the write lock
 | 
				
			||||||
		 * is taken to serialise against parallel split or collapse
 | 
							 * is taken to serialise against parallel split or collapse
 | 
				
			||||||
		 * operations.
 | 
							 * operations.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										12
									
								
								mm/ksm.c
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								mm/ksm.c
									
									
									
									
									
								
							| 
						 | 
					@ -2588,21 +2588,21 @@ struct page *ksm_might_need_to_copy(struct page *page,
 | 
				
			||||||
	return new_page;
 | 
						return new_page;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
 | 
					void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct stable_node *stable_node;
 | 
						struct stable_node *stable_node;
 | 
				
			||||||
	struct rmap_item *rmap_item;
 | 
						struct rmap_item *rmap_item;
 | 
				
			||||||
	int search_new_forks = 0;
 | 
						int search_new_forks = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	VM_BUG_ON_PAGE(!PageKsm(page), page);
 | 
						VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Rely on the page lock to protect against concurrent modifications
 | 
						 * Rely on the page lock to protect against concurrent modifications
 | 
				
			||||||
	 * to that page's node of the stable tree.
 | 
						 * to that page's node of the stable tree.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	VM_BUG_ON_PAGE(!PageLocked(page), page);
 | 
						VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	stable_node = page_stable_node(page);
 | 
						stable_node = folio_stable_node(folio);
 | 
				
			||||||
	if (!stable_node)
 | 
						if (!stable_node)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
again:
 | 
					again:
 | 
				
			||||||
| 
						 | 
					@ -2637,11 +2637,11 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
 | 
				
			||||||
			if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
 | 
								if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (!rwc->rmap_one(page, vma, addr, rwc->arg)) {
 | 
								if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) {
 | 
				
			||||||
				anon_vma_unlock_read(anon_vma);
 | 
									anon_vma_unlock_read(anon_vma);
 | 
				
			||||||
				return;
 | 
									return;
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			if (rwc->done && rwc->done(page)) {
 | 
								if (rwc->done && rwc->done(folio)) {
 | 
				
			||||||
				anon_vma_unlock_read(anon_vma);
 | 
									anon_vma_unlock_read(anon_vma);
 | 
				
			||||||
				return;
 | 
									return;
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										10
									
								
								mm/migrate.c
									
									
									
									
									
								
							
							
						
						
									
										10
									
								
								mm/migrate.c
									
									
									
									
									
								
							| 
						 | 
					@ -171,13 +171,11 @@ void putback_movable_pages(struct list_head *l)
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Restore a potential migration pte to a working pte entry
 | 
					 * Restore a potential migration pte to a working pte entry
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
 | 
					static bool remove_migration_pte(struct folio *folio,
 | 
				
			||||||
				 unsigned long addr, void *old)
 | 
							struct vm_area_struct *vma, unsigned long addr, void *old)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct folio *folio = page_folio(page);
 | 
					 | 
				
			||||||
	DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
 | 
						DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	VM_BUG_ON_PAGE(PageTail(page), page);
 | 
					 | 
				
			||||||
	while (page_vma_mapped_walk(&pvmw)) {
 | 
						while (page_vma_mapped_walk(&pvmw)) {
 | 
				
			||||||
		pte_t pte;
 | 
							pte_t pte;
 | 
				
			||||||
		swp_entry_t entry;
 | 
							swp_entry_t entry;
 | 
				
			||||||
| 
						 | 
					@ -269,9 +267,9 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (locked)
 | 
						if (locked)
 | 
				
			||||||
		rmap_walk_locked(&dst->page, &rwc);
 | 
							rmap_walk_locked(dst, &rwc);
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		rmap_walk(&dst->page, &rwc);
 | 
							rmap_walk(dst, &rwc);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -46,11 +46,10 @@ static struct page *page_idle_get_page(unsigned long pfn)
 | 
				
			||||||
	return page;
 | 
						return page;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static bool page_idle_clear_pte_refs_one(struct page *page,
 | 
					static bool page_idle_clear_pte_refs_one(struct folio *folio,
 | 
				
			||||||
					struct vm_area_struct *vma,
 | 
										struct vm_area_struct *vma,
 | 
				
			||||||
					unsigned long addr, void *arg)
 | 
										unsigned long addr, void *arg)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct folio *folio = page_folio(page);
 | 
					 | 
				
			||||||
	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
 | 
						DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
 | 
				
			||||||
	bool referenced = false;
 | 
						bool referenced = false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -93,7 +92,7 @@ static void page_idle_clear_pte_refs(struct page *page)
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	static const struct rmap_walk_control rwc = {
 | 
						static const struct rmap_walk_control rwc = {
 | 
				
			||||||
		.rmap_one = page_idle_clear_pte_refs_one,
 | 
							.rmap_one = page_idle_clear_pte_refs_one,
 | 
				
			||||||
		.anon_lock = page_lock_anon_vma_read,
 | 
							.anon_lock = folio_lock_anon_vma_read,
 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
	bool need_lock;
 | 
						bool need_lock;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -104,7 +103,7 @@ static void page_idle_clear_pte_refs(struct page *page)
 | 
				
			||||||
	if (need_lock && !folio_trylock(folio))
 | 
						if (need_lock && !folio_trylock(folio))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rmap_walk(&folio->page, (struct rmap_walk_control *)&rwc);
 | 
						rmap_walk(folio, (struct rmap_walk_control *)&rwc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (need_lock)
 | 
						if (need_lock)
 | 
				
			||||||
		folio_unlock(folio);
 | 
							folio_unlock(folio);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										111
									
								
								mm/rmap.c
									
									
									
									
									
								
							
							
						
						
									
										111
									
								
								mm/rmap.c
									
									
									
									
									
								
							| 
						 | 
					@ -107,15 +107,15 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
 | 
				
			||||||
	VM_BUG_ON(atomic_read(&anon_vma->refcount));
 | 
						VM_BUG_ON(atomic_read(&anon_vma->refcount));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Synchronize against page_lock_anon_vma_read() such that
 | 
						 * Synchronize against folio_lock_anon_vma_read() such that
 | 
				
			||||||
	 * we can safely hold the lock without the anon_vma getting
 | 
						 * we can safely hold the lock without the anon_vma getting
 | 
				
			||||||
	 * freed.
 | 
						 * freed.
 | 
				
			||||||
	 *
 | 
						 *
 | 
				
			||||||
	 * Relies on the full mb implied by the atomic_dec_and_test() from
 | 
						 * Relies on the full mb implied by the atomic_dec_and_test() from
 | 
				
			||||||
	 * put_anon_vma() against the acquire barrier implied by
 | 
						 * put_anon_vma() against the acquire barrier implied by
 | 
				
			||||||
	 * down_read_trylock() from page_lock_anon_vma_read(). This orders:
 | 
						 * down_read_trylock() from folio_lock_anon_vma_read(). This orders:
 | 
				
			||||||
	 *
 | 
						 *
 | 
				
			||||||
	 * page_lock_anon_vma_read()	VS	put_anon_vma()
 | 
						 * folio_lock_anon_vma_read()	VS	put_anon_vma()
 | 
				
			||||||
	 *   down_read_trylock()		  atomic_dec_and_test()
 | 
						 *   down_read_trylock()		  atomic_dec_and_test()
 | 
				
			||||||
	 *   LOCK				  MB
 | 
						 *   LOCK				  MB
 | 
				
			||||||
	 *   atomic_read()			  rwsem_is_locked()
 | 
						 *   atomic_read()			  rwsem_is_locked()
 | 
				
			||||||
| 
						 | 
					@ -168,7 +168,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
 | 
				
			||||||
 * allocate a new one.
 | 
					 * allocate a new one.
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * Anon-vma allocations are very subtle, because we may have
 | 
					 * Anon-vma allocations are very subtle, because we may have
 | 
				
			||||||
 * optimistically looked up an anon_vma in page_lock_anon_vma_read()
 | 
					 * optimistically looked up an anon_vma in folio_lock_anon_vma_read()
 | 
				
			||||||
 * and that may actually touch the rwsem even in the newly
 | 
					 * and that may actually touch the rwsem even in the newly
 | 
				
			||||||
 * allocated vma (it depends on RCU to make sure that the
 | 
					 * allocated vma (it depends on RCU to make sure that the
 | 
				
			||||||
 * anon_vma isn't actually destroyed).
 | 
					 * anon_vma isn't actually destroyed).
 | 
				
			||||||
| 
						 | 
					@ -799,10 +799,9 @@ struct folio_referenced_arg {
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * arg: folio_referenced_arg will be passed
 | 
					 * arg: folio_referenced_arg will be passed
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static bool folio_referenced_one(struct page *page, struct vm_area_struct *vma,
 | 
					static bool folio_referenced_one(struct folio *folio,
 | 
				
			||||||
			unsigned long address, void *arg)
 | 
							struct vm_area_struct *vma, unsigned long address, void *arg)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct folio *folio = page_folio(page);
 | 
					 | 
				
			||||||
	struct folio_referenced_arg *pra = arg;
 | 
						struct folio_referenced_arg *pra = arg;
 | 
				
			||||||
	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
 | 
						DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
 | 
				
			||||||
	int referenced = 0;
 | 
						int referenced = 0;
 | 
				
			||||||
| 
						 | 
					@ -894,7 +893,7 @@ int folio_referenced(struct folio *folio, int is_locked,
 | 
				
			||||||
	struct rmap_walk_control rwc = {
 | 
						struct rmap_walk_control rwc = {
 | 
				
			||||||
		.rmap_one = folio_referenced_one,
 | 
							.rmap_one = folio_referenced_one,
 | 
				
			||||||
		.arg = (void *)&pra,
 | 
							.arg = (void *)&pra,
 | 
				
			||||||
		.anon_lock = page_lock_anon_vma_read,
 | 
							.anon_lock = folio_lock_anon_vma_read,
 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	*vm_flags = 0;
 | 
						*vm_flags = 0;
 | 
				
			||||||
| 
						 | 
					@ -919,7 +918,7 @@ int folio_referenced(struct folio *folio, int is_locked,
 | 
				
			||||||
		rwc.invalid_vma = invalid_folio_referenced_vma;
 | 
							rwc.invalid_vma = invalid_folio_referenced_vma;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rmap_walk(&folio->page, &rwc);
 | 
						rmap_walk(folio, &rwc);
 | 
				
			||||||
	*vm_flags = pra.vm_flags;
 | 
						*vm_flags = pra.vm_flags;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (we_locked)
 | 
						if (we_locked)
 | 
				
			||||||
| 
						 | 
					@ -928,10 +927,9 @@ int folio_referenced(struct folio *folio, int is_locked,
 | 
				
			||||||
	return pra.referenced;
 | 
						return pra.referenced;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
 | 
					static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
 | 
				
			||||||
			    unsigned long address, void *arg)
 | 
								    unsigned long address, void *arg)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct folio *folio = page_folio(page);
 | 
					 | 
				
			||||||
	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
 | 
						DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
 | 
				
			||||||
	struct mmu_notifier_range range;
 | 
						struct mmu_notifier_range range;
 | 
				
			||||||
	int *cleaned = arg;
 | 
						int *cleaned = arg;
 | 
				
			||||||
| 
						 | 
					@ -1025,7 +1023,7 @@ int folio_mkclean(struct folio *folio)
 | 
				
			||||||
	if (!mapping)
 | 
						if (!mapping)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rmap_walk(&folio->page, &rwc);
 | 
						rmap_walk(folio, &rwc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return cleaned;
 | 
						return cleaned;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -1410,10 +1408,9 @@ void page_remove_rmap(struct page *page,
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * @arg: enum ttu_flags will be passed to this argument
 | 
					 * @arg: enum ttu_flags will be passed to this argument
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 | 
					static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 | 
				
			||||||
		     unsigned long address, void *arg)
 | 
							     unsigned long address, void *arg)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct folio *folio = page_folio(page);
 | 
					 | 
				
			||||||
	struct mm_struct *mm = vma->vm_mm;
 | 
						struct mm_struct *mm = vma->vm_mm;
 | 
				
			||||||
	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
 | 
						DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
 | 
				
			||||||
	pte_t pteval;
 | 
						pte_t pteval;
 | 
				
			||||||
| 
						 | 
					@ -1667,9 +1664,9 @@ static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
 | 
				
			||||||
	return vma_is_temporary_stack(vma);
 | 
						return vma_is_temporary_stack(vma);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int page_not_mapped(struct page *page)
 | 
					static int page_not_mapped(struct folio *folio)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return !page_mapped(page);
 | 
						return !folio_mapped(folio);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					@ -1689,13 +1686,13 @@ void try_to_unmap(struct folio *folio, enum ttu_flags flags)
 | 
				
			||||||
		.rmap_one = try_to_unmap_one,
 | 
							.rmap_one = try_to_unmap_one,
 | 
				
			||||||
		.arg = (void *)flags,
 | 
							.arg = (void *)flags,
 | 
				
			||||||
		.done = page_not_mapped,
 | 
							.done = page_not_mapped,
 | 
				
			||||||
		.anon_lock = page_lock_anon_vma_read,
 | 
							.anon_lock = folio_lock_anon_vma_read,
 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (flags & TTU_RMAP_LOCKED)
 | 
						if (flags & TTU_RMAP_LOCKED)
 | 
				
			||||||
		rmap_walk_locked(&folio->page, &rwc);
 | 
							rmap_walk_locked(folio, &rwc);
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		rmap_walk(&folio->page, &rwc);
 | 
							rmap_walk(folio, &rwc);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -1704,10 +1701,9 @@ void try_to_unmap(struct folio *folio, enum ttu_flags flags)
 | 
				
			||||||
 * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs
 | 
					 * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs
 | 
				
			||||||
 * containing migration entries.
 | 
					 * containing migration entries.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
 | 
					static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
 | 
				
			||||||
		     unsigned long address, void *arg)
 | 
							     unsigned long address, void *arg)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct folio *folio = page_folio(page);
 | 
					 | 
				
			||||||
	struct mm_struct *mm = vma->vm_mm;
 | 
						struct mm_struct *mm = vma->vm_mm;
 | 
				
			||||||
	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
 | 
						DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
 | 
				
			||||||
	pte_t pteval;
 | 
						pte_t pteval;
 | 
				
			||||||
| 
						 | 
					@ -1951,7 +1947,7 @@ void try_to_migrate(struct folio *folio, enum ttu_flags flags)
 | 
				
			||||||
		.rmap_one = try_to_migrate_one,
 | 
							.rmap_one = try_to_migrate_one,
 | 
				
			||||||
		.arg = (void *)flags,
 | 
							.arg = (void *)flags,
 | 
				
			||||||
		.done = page_not_mapped,
 | 
							.done = page_not_mapped,
 | 
				
			||||||
		.anon_lock = page_lock_anon_vma_read,
 | 
							.anon_lock = folio_lock_anon_vma_read,
 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					@ -1977,9 +1973,9 @@ void try_to_migrate(struct folio *folio, enum ttu_flags flags)
 | 
				
			||||||
		rwc.invalid_vma = invalid_migration_vma;
 | 
							rwc.invalid_vma = invalid_migration_vma;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (flags & TTU_RMAP_LOCKED)
 | 
						if (flags & TTU_RMAP_LOCKED)
 | 
				
			||||||
		rmap_walk_locked(&folio->page, &rwc);
 | 
							rmap_walk_locked(folio, &rwc);
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		rmap_walk(&folio->page, &rwc);
 | 
							rmap_walk(folio, &rwc);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_DEVICE_PRIVATE
 | 
					#ifdef CONFIG_DEVICE_PRIVATE
 | 
				
			||||||
| 
						 | 
					@ -1990,10 +1986,9 @@ struct make_exclusive_args {
 | 
				
			||||||
	bool valid;
 | 
						bool valid;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static bool page_make_device_exclusive_one(struct page *page,
 | 
					static bool page_make_device_exclusive_one(struct folio *folio,
 | 
				
			||||||
		struct vm_area_struct *vma, unsigned long address, void *priv)
 | 
							struct vm_area_struct *vma, unsigned long address, void *priv)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct folio *folio = page_folio(page);
 | 
					 | 
				
			||||||
	struct mm_struct *mm = vma->vm_mm;
 | 
						struct mm_struct *mm = vma->vm_mm;
 | 
				
			||||||
	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
 | 
						DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
 | 
				
			||||||
	struct make_exclusive_args *args = priv;
 | 
						struct make_exclusive_args *args = priv;
 | 
				
			||||||
| 
						 | 
					@ -2098,7 +2093,7 @@ static bool folio_make_device_exclusive(struct folio *folio,
 | 
				
			||||||
	struct rmap_walk_control rwc = {
 | 
						struct rmap_walk_control rwc = {
 | 
				
			||||||
		.rmap_one = page_make_device_exclusive_one,
 | 
							.rmap_one = page_make_device_exclusive_one,
 | 
				
			||||||
		.done = page_not_mapped,
 | 
							.done = page_not_mapped,
 | 
				
			||||||
		.anon_lock = page_lock_anon_vma_read,
 | 
							.anon_lock = folio_lock_anon_vma_read,
 | 
				
			||||||
		.arg = &args,
 | 
							.arg = &args,
 | 
				
			||||||
	};
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2109,7 +2104,7 @@ static bool folio_make_device_exclusive(struct folio *folio,
 | 
				
			||||||
	if (!folio_test_anon(folio))
 | 
						if (!folio_test_anon(folio))
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rmap_walk(&folio->page, &rwc);
 | 
						rmap_walk(folio, &rwc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return args.valid && !folio_mapcount(folio);
 | 
						return args.valid && !folio_mapcount(folio);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -2177,17 +2172,16 @@ void __put_anon_vma(struct anon_vma *anon_vma)
 | 
				
			||||||
		anon_vma_free(root);
 | 
							anon_vma_free(root);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct anon_vma *rmap_walk_anon_lock(struct page *page,
 | 
					static struct anon_vma *rmap_walk_anon_lock(struct folio *folio,
 | 
				
			||||||
					struct rmap_walk_control *rwc)
 | 
										struct rmap_walk_control *rwc)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct folio *folio = page_folio(page);
 | 
					 | 
				
			||||||
	struct anon_vma *anon_vma;
 | 
						struct anon_vma *anon_vma;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (rwc->anon_lock)
 | 
						if (rwc->anon_lock)
 | 
				
			||||||
		return rwc->anon_lock(page);
 | 
							return rwc->anon_lock(folio);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read()
 | 
						 * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read()
 | 
				
			||||||
	 * because that depends on page_mapped(); but not all its usages
 | 
						 * because that depends on page_mapped(); but not all its usages
 | 
				
			||||||
	 * are holding mmap_lock. Users without mmap_lock are required to
 | 
						 * are holding mmap_lock. Users without mmap_lock are required to
 | 
				
			||||||
	 * take a reference count to prevent the anon_vma disappearing
 | 
						 * take a reference count to prevent the anon_vma disappearing
 | 
				
			||||||
| 
						 | 
					@ -2209,10 +2203,9 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
 | 
				
			||||||
 * Find all the mappings of a page using the mapping pointer and the vma chains
 | 
					 * Find all the mappings of a page using the mapping pointer and the vma chains
 | 
				
			||||||
 * contained in the anon_vma struct it points to.
 | 
					 * contained in the anon_vma struct it points to.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
 | 
					static void rmap_walk_anon(struct folio *folio, struct rmap_walk_control *rwc,
 | 
				
			||||||
		bool locked)
 | 
							bool locked)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct folio *folio = page_folio(page);
 | 
					 | 
				
			||||||
	struct anon_vma *anon_vma;
 | 
						struct anon_vma *anon_vma;
 | 
				
			||||||
	pgoff_t pgoff_start, pgoff_end;
 | 
						pgoff_t pgoff_start, pgoff_end;
 | 
				
			||||||
	struct anon_vma_chain *avc;
 | 
						struct anon_vma_chain *avc;
 | 
				
			||||||
| 
						 | 
					@ -2222,17 +2215,17 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
 | 
				
			||||||
		/* anon_vma disappear under us? */
 | 
							/* anon_vma disappear under us? */
 | 
				
			||||||
		VM_BUG_ON_FOLIO(!anon_vma, folio);
 | 
							VM_BUG_ON_FOLIO(!anon_vma, folio);
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		anon_vma = rmap_walk_anon_lock(page, rwc);
 | 
							anon_vma = rmap_walk_anon_lock(folio, rwc);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if (!anon_vma)
 | 
						if (!anon_vma)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pgoff_start = page_to_pgoff(page);
 | 
						pgoff_start = folio_pgoff(folio);
 | 
				
			||||||
	pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
 | 
						pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
 | 
				
			||||||
	anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
 | 
						anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
 | 
				
			||||||
			pgoff_start, pgoff_end) {
 | 
								pgoff_start, pgoff_end) {
 | 
				
			||||||
		struct vm_area_struct *vma = avc->vma;
 | 
							struct vm_area_struct *vma = avc->vma;
 | 
				
			||||||
		unsigned long address = vma_address(page, vma);
 | 
							unsigned long address = vma_address(&folio->page, vma);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		VM_BUG_ON_VMA(address == -EFAULT, vma);
 | 
							VM_BUG_ON_VMA(address == -EFAULT, vma);
 | 
				
			||||||
		cond_resched();
 | 
							cond_resched();
 | 
				
			||||||
| 
						 | 
					@ -2240,9 +2233,9 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
 | 
				
			||||||
		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
 | 
							if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!rwc->rmap_one(page, vma, address, rwc->arg))
 | 
							if (!rwc->rmap_one(folio, vma, address, rwc->arg))
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		if (rwc->done && rwc->done(page))
 | 
							if (rwc->done && rwc->done(folio))
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2258,10 +2251,10 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
 | 
				
			||||||
 * Find all the mappings of a page using the mapping pointer and the vma chains
 | 
					 * Find all the mappings of a page using the mapping pointer and the vma chains
 | 
				
			||||||
 * contained in the address_space struct it points to.
 | 
					 * contained in the address_space struct it points to.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
 | 
					static void rmap_walk_file(struct folio *folio, struct rmap_walk_control *rwc,
 | 
				
			||||||
		bool locked)
 | 
							bool locked)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct address_space *mapping = page_mapping(page);
 | 
						struct address_space *mapping = folio_mapping(folio);
 | 
				
			||||||
	pgoff_t pgoff_start, pgoff_end;
 | 
						pgoff_t pgoff_start, pgoff_end;
 | 
				
			||||||
	struct vm_area_struct *vma;
 | 
						struct vm_area_struct *vma;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2271,18 +2264,18 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
 | 
				
			||||||
	 * structure at mapping cannot be freed and reused yet,
 | 
						 * structure at mapping cannot be freed and reused yet,
 | 
				
			||||||
	 * so we can safely take mapping->i_mmap_rwsem.
 | 
						 * so we can safely take mapping->i_mmap_rwsem.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	VM_BUG_ON_PAGE(!PageLocked(page), page);
 | 
						VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!mapping)
 | 
						if (!mapping)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pgoff_start = page_to_pgoff(page);
 | 
						pgoff_start = folio_pgoff(folio);
 | 
				
			||||||
	pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
 | 
						pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
 | 
				
			||||||
	if (!locked)
 | 
						if (!locked)
 | 
				
			||||||
		i_mmap_lock_read(mapping);
 | 
							i_mmap_lock_read(mapping);
 | 
				
			||||||
	vma_interval_tree_foreach(vma, &mapping->i_mmap,
 | 
						vma_interval_tree_foreach(vma, &mapping->i_mmap,
 | 
				
			||||||
			pgoff_start, pgoff_end) {
 | 
								pgoff_start, pgoff_end) {
 | 
				
			||||||
		unsigned long address = vma_address(page, vma);
 | 
							unsigned long address = vma_address(&folio->page, vma);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		VM_BUG_ON_VMA(address == -EFAULT, vma);
 | 
							VM_BUG_ON_VMA(address == -EFAULT, vma);
 | 
				
			||||||
		cond_resched();
 | 
							cond_resched();
 | 
				
			||||||
| 
						 | 
					@ -2290,9 +2283,9 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
 | 
				
			||||||
		if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
 | 
							if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!rwc->rmap_one(page, vma, address, rwc->arg))
 | 
							if (!rwc->rmap_one(folio, vma, address, rwc->arg))
 | 
				
			||||||
			goto done;
 | 
								goto done;
 | 
				
			||||||
		if (rwc->done && rwc->done(page))
 | 
							if (rwc->done && rwc->done(folio))
 | 
				
			||||||
			goto done;
 | 
								goto done;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2301,25 +2294,25 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
 | 
				
			||||||
		i_mmap_unlock_read(mapping);
 | 
							i_mmap_unlock_read(mapping);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
 | 
					void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (unlikely(PageKsm(page)))
 | 
						if (unlikely(folio_test_ksm(folio)))
 | 
				
			||||||
		rmap_walk_ksm(page, rwc);
 | 
							rmap_walk_ksm(folio, rwc);
 | 
				
			||||||
	else if (PageAnon(page))
 | 
						else if (folio_test_anon(folio))
 | 
				
			||||||
		rmap_walk_anon(page, rwc, false);
 | 
							rmap_walk_anon(folio, rwc, false);
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		rmap_walk_file(page, rwc, false);
 | 
							rmap_walk_file(folio, rwc, false);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Like rmap_walk, but caller holds relevant rmap lock */
 | 
					/* Like rmap_walk, but caller holds relevant rmap lock */
 | 
				
			||||||
void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
 | 
					void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	/* no ksm support for now */
 | 
						/* no ksm support for now */
 | 
				
			||||||
	VM_BUG_ON_PAGE(PageKsm(page), page);
 | 
						VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio);
 | 
				
			||||||
	if (PageAnon(page))
 | 
						if (folio_test_anon(folio))
 | 
				
			||||||
		rmap_walk_anon(page, rwc, true);
 | 
							rmap_walk_anon(folio, rwc, true);
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		rmap_walk_file(page, rwc, true);
 | 
							rmap_walk_file(folio, rwc, true);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_HUGETLB_PAGE
 | 
					#ifdef CONFIG_HUGETLB_PAGE
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue