mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 16:48:26 +02:00 
			
		
		
		
	mm: migrate high-order folios in swap cache correctly
Large folios occupy N consecutive entries in the swap cache instead of
using multi-index entries like the page cache.  However, if a large folio
is re-added to the LRU list, it can be migrated.  The migration code was
not aware of the difference between the swap cache and the page cache and
assumed that a single xas_store() would be sufficient.
This leaves potentially many stale pointers to the now-migrated folio in
the swap cache, which can lead to almost arbitrary data corruption in the
future.  This can also manifest as infinite loops with the RCU read lock
held.
[willy@infradead.org: modifications to the changelog & tweaked the fix]
Fixes: 3417013e0d ("mm/migrate: Add folio_migrate_mapping()")
Link: https://lkml.kernel.org/r/20231214045841.961776-1-willy@infradead.org
Signed-off-by: Charan Teja Kalla <quic_charante@quicinc.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reported-by: Charan Teja Kalla <quic_charante@quicinc.com>
Closes: https://lkml.kernel.org/r/1700569840-17327-1-git-send-email-quic_charante@quicinc.com
Cc: David Hildenbrand <david@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
			
			
This commit is contained in:
		
							parent
							
								
									4249f13c11
								
							
						
					
					
						commit
						fc346d0a70
					
				
					 1 changed files with 8 additions and 1 deletions
				
			
		|  | @ -405,6 +405,7 @@ int folio_migrate_mapping(struct address_space *mapping, | |||
| 	int dirty; | ||||
| 	int expected_count = folio_expected_refs(mapping, folio) + extra_count; | ||||
| 	long nr = folio_nr_pages(folio); | ||||
| 	long entries, i; | ||||
| 
 | ||||
| 	if (!mapping) { | ||||
| 		/* Anonymous page without mapping */ | ||||
|  | @ -442,8 +443,10 @@ int folio_migrate_mapping(struct address_space *mapping, | |||
| 			folio_set_swapcache(newfolio); | ||||
| 			newfolio->private = folio_get_private(folio); | ||||
| 		} | ||||
| 		entries = nr; | ||||
| 	} else { | ||||
| 		VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); | ||||
| 		entries = 1; | ||||
| 	} | ||||
| 
 | ||||
| 	/* Move dirty while page refs frozen and newpage not yet exposed */ | ||||
|  | @ -453,7 +456,11 @@ int folio_migrate_mapping(struct address_space *mapping, | |||
| 		folio_set_dirty(newfolio); | ||||
| 	} | ||||
| 
 | ||||
| 	/* Swap cache still stores N entries instead of a high-order entry */ | ||||
| 	for (i = 0; i < entries; i++) { | ||||
| 		xas_store(&xas, newfolio); | ||||
| 		xas_next(&xas); | ||||
| 	} | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * Drop cache reference from old page by unfreezing | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Charan Teja Kalla
						Charan Teja Kalla