mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	mm/migrate: __unmap_and_move() push good newpage to LRU
Compaction, NUMA page movement, THP collapse/split, and memory failure do isolate unevictable pages from their "LRU", losing the record of mlock_count in doing so (isolators are likely to use page->lru for their own private lists, so mlock_count has to be presumed lost). That's unfortunate, and we should put in some work to correct that: one can imagine a function to build up the mlock_count again - but it would require i_mmap_rwsem for read, so be careful where it's called. Or page_referenced_one() and try_to_unmap_one() might do that extra work. But one place that can very easily be improved is page migration's __unmap_and_move(): a small adjustment to where the successful new page is put back on LRU, and its mlock_count (if any) is built back up by remove_migration_ptes(). Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
This commit is contained in:
		
							parent
							
								
									34b6792380
								
							
						
					
					
						commit
						c3096e6782
					
				
					 1 changed files with 19 additions and 12 deletions
				
			
		
							
								
								
									
										29
									
								
								mm/migrate.c
									
									
									
									
									
								
							
							
						
						
									
										29
									
								
								mm/migrate.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -1032,6 +1032,21 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
 | 
			
		|||
	if (!page_mapped(page))
 | 
			
		||||
		rc = move_to_new_page(newpage, page, mode);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * When successful, push newpage to LRU immediately: so that if it
 | 
			
		||||
	 * turns out to be an mlocked page, remove_migration_ptes() will
 | 
			
		||||
	 * automatically build up the correct newpage->mlock_count for it.
 | 
			
		||||
	 *
 | 
			
		||||
	 * We would like to do something similar for the old page, when
 | 
			
		||||
	 * unsuccessful, and other cases when a page has been temporarily
 | 
			
		||||
	 * isolated from the unevictable LRU: but this case is the easiest.
 | 
			
		||||
	 */
 | 
			
		||||
	if (rc == MIGRATEPAGE_SUCCESS) {
 | 
			
		||||
		lru_cache_add(newpage);
 | 
			
		||||
		if (page_was_mapped)
 | 
			
		||||
			lru_add_drain();
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (page_was_mapped)
 | 
			
		||||
		remove_migration_ptes(page,
 | 
			
		||||
			rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
 | 
			
		||||
| 
						 | 
				
			
			@ -1045,20 +1060,12 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
 | 
			
		|||
	unlock_page(page);
 | 
			
		||||
out:
 | 
			
		||||
	/*
 | 
			
		||||
	 * If migration is successful, decrease refcount of the newpage
 | 
			
		||||
	 * If migration is successful, decrease refcount of the newpage,
 | 
			
		||||
	 * which will not free the page because new page owner increased
 | 
			
		||||
	 * refcounter. As well, if it is LRU page, add the page to LRU
 | 
			
		||||
	 * list in here. Use the old state of the isolated source page to
 | 
			
		||||
	 * determine if we migrated a LRU page. newpage was already unlocked
 | 
			
		||||
	 * and possibly modified by its owner - don't rely on the page
 | 
			
		||||
	 * state.
 | 
			
		||||
	 * refcounter.
 | 
			
		||||
	 */
 | 
			
		||||
	if (rc == MIGRATEPAGE_SUCCESS) {
 | 
			
		||||
		if (unlikely(!is_lru))
 | 
			
		||||
	if (rc == MIGRATEPAGE_SUCCESS)
 | 
			
		||||
		put_page(newpage);
 | 
			
		||||
		else
 | 
			
		||||
			putback_lru_page(newpage);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return rc;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue