mirror of
				https://github.com/torvalds/linux.git
				synced 2025-10-31 08:38:45 +02:00 
			
		
		
		
	mm/migrate: make isolate_movable_page() skip slab pages
In the next commit we want to rearrange struct slab fields to allow a larger rcu_head. Afterwards, the page->mapping field will overlap with SLUB's "struct list_head slab_list", where the value of prev pointer can become LIST_POISON2, which is 0x122 + POISON_POINTER_DELTA. Unfortunately the bit 1 being set can confuse PageMovable() to be a false positive and cause a GPF as reported by lkp [1]. To fix this, make isolate_movable_page() skip pages with the PageSlab flag set. This is a bit tricky as we need to add memory barriers to SLAB and SLUB's page allocation and freeing, and their counterparts to isolate_movable_page(). Based on my RFC from [2]. Added a comment update from Matthew's variant in [3] and, as done there, moved the PageSlab checks to happen before trying to take the page lock. [1] https://lore.kernel.org/all/208c1757-5edd-fd42-67d4-1940cc43b50f@intel.com/ [2] https://lore.kernel.org/all/aec59f53-0e53-1736-5932-25407125d4d4@suse.cz/ [3] https://lore.kernel.org/all/YzsVM8eToHUeTP75@casper.infradead.org/ Reported-by: kernel test robot <yujie.liu@intel.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
This commit is contained in:
		
							parent
							
								
									bc29d5bd2b
								
							
						
					
					
						commit
						8b8817630a
					
				
					 3 changed files with 22 additions and 5 deletions
				
			
		
							
								
								
									
										15
									
								
								mm/migrate.c
									
									
									
									
									
								
							
							
						
						
									
										15
									
								
								mm/migrate.c
									
									
									
									
									
								
							|  | @ -74,13 +74,22 @@ int isolate_movable_page(struct page *page, isolate_mode_t mode) | |||
| 	if (unlikely(!get_page_unless_zero(page))) | ||||
| 		goto out; | ||||
| 
 | ||||
| 	if (unlikely(PageSlab(page))) | ||||
| 		goto out_putpage; | ||||
| 	/* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */ | ||||
| 	smp_rmb(); | ||||
| 	/*
 | ||||
| 	 * Check PageMovable before holding a PG_lock because page's owner | ||||
| 	 * assumes anybody doesn't touch PG_lock of newly allocated page | ||||
| 	 * so unconditionally grabbing the lock ruins page's owner side. | ||||
| 	 * Check movable flag before taking the page lock because | ||||
| 	 * we use non-atomic bitops on newly allocated page flags so | ||||
| 	 * unconditionally grabbing the lock ruins page's owner side. | ||||
| 	 */ | ||||
| 	if (unlikely(!__PageMovable(page))) | ||||
| 		goto out_putpage; | ||||
| 	/* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */ | ||||
| 	smp_rmb(); | ||||
| 	if (unlikely(PageSlab(page))) | ||||
| 		goto out_putpage; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * As movable pages are not isolated from LRU lists, concurrent | ||||
| 	 * compaction threads can race against page migration functions | ||||
|  |  | |||
|  | @ -1370,6 +1370,8 @@ static struct slab *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, | |||
| 
 | ||||
| 	account_slab(slab, cachep->gfporder, cachep, flags); | ||||
| 	__folio_set_slab(folio); | ||||
| 	/* Make the flag visible before any changes to folio->mapping */ | ||||
| 	smp_wmb(); | ||||
| 	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ | ||||
| 	if (sk_memalloc_socks() && page_is_pfmemalloc(folio_page(folio, 0))) | ||||
| 		slab_set_pfmemalloc(slab); | ||||
|  | @ -1387,9 +1389,11 @@ static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab) | |||
| 
 | ||||
| 	BUG_ON(!folio_test_slab(folio)); | ||||
| 	__slab_clear_pfmemalloc(slab); | ||||
| 	__folio_clear_slab(folio); | ||||
| 	page_mapcount_reset(folio_page(folio, 0)); | ||||
| 	folio->mapping = NULL; | ||||
| 	/* Make the mapping reset visible before clearing the flag */ | ||||
| 	smp_wmb(); | ||||
| 	__folio_clear_slab(folio); | ||||
| 
 | ||||
| 	if (current->reclaim_state) | ||||
| 		current->reclaim_state->reclaimed_slab += 1 << order; | ||||
|  |  | |||
|  | @ -1800,6 +1800,8 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node, | |||
| 
 | ||||
| 	slab = folio_slab(folio); | ||||
| 	__folio_set_slab(folio); | ||||
| 	/* Make the flag visible before any changes to folio->mapping */ | ||||
| 	smp_wmb(); | ||||
| 	if (page_is_pfmemalloc(folio_page(folio, 0))) | ||||
| 		slab_set_pfmemalloc(slab); | ||||
| 
 | ||||
|  | @ -2000,8 +2002,10 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab) | |||
| 	int pages = 1 << order; | ||||
| 
 | ||||
| 	__slab_clear_pfmemalloc(slab); | ||||
| 	__folio_clear_slab(folio); | ||||
| 	folio->mapping = NULL; | ||||
| 	/* Make the mapping reset visible before clearing the flag */ | ||||
| 	smp_wmb(); | ||||
| 	__folio_clear_slab(folio); | ||||
| 	if (current->reclaim_state) | ||||
| 		current->reclaim_state->reclaimed_slab += pages; | ||||
| 	unaccount_slab(slab, order, s); | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Vlastimil Babka
						Vlastimil Babka