forked from mirrors/linux
		
	Xarray: move forward index correctly in xas_pause()
After xas_load(), xas->index could point to mid of found multi-index entry and xas->index's bits under node->shift maybe non-zero. The afterward xas_pause() will move forward xas->index with xa->node->shift with bits under node->shift un-masked and thus skip some index unexpectedly. Consider following case: Assume XA_CHUNK_SHIFT is 4. xa_store_range(xa, 16, 31, ...) xa_store(xa, 32, ...) XA_STATE(xas, xa, 17); xas_for_each(&xas,...) xas_load(&xas) /* xas->index = 17, xas->xa_offset = 1, xas->xa_node->xa_shift = 4 */ xas_pause() /* xas->index = 33, xas->xa_offset = 2, xas->xa_node->xa_shift = 4 */ As we can see, index of 32 is skipped unexpectedly. Fix this by mask bit under node->xa_shift when move forward index in xas_pause(). For now, this will not cause serious problems. Only minor problem like cachestat return less number of page status could happen. Link: https://lkml.kernel.org/r/20241213122523.12764-3-shikemeng@huaweicloud.com Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com> Cc: Mattew Wilcox <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									7e060df04f
								
							
						
					
					
						commit
						c9ba5249ef
					
				
					 2 changed files with 36 additions and 0 deletions
				
			
		|  | @ -1511,6 +1511,41 @@ static noinline void check_pause(struct kunit *test) | ||||||
| 	XA_BUG_ON(xa, count != order_limit); | 	XA_BUG_ON(xa, count != order_limit); | ||||||
| 
 | 
 | ||||||
| 	xa_destroy(xa); | 	xa_destroy(xa); | ||||||
|  | 
 | ||||||
|  | 	index = 0; | ||||||
|  | 	for (order = XA_CHUNK_SHIFT; order > 0; order--) { | ||||||
|  | 		XA_BUG_ON(xa, xa_store_order(xa, index, order, | ||||||
|  | 					xa_mk_index(index), GFP_KERNEL)); | ||||||
|  | 		index += 1UL << order; | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	index = 0; | ||||||
|  | 	count = 0; | ||||||
|  | 	xas_set(&xas, 0); | ||||||
|  | 	rcu_read_lock(); | ||||||
|  | 	xas_for_each(&xas, entry, ULONG_MAX) { | ||||||
|  | 		XA_BUG_ON(xa, entry != xa_mk_index(index)); | ||||||
|  | 		index += 1UL << (XA_CHUNK_SHIFT - count); | ||||||
|  | 		count++; | ||||||
|  | 	} | ||||||
|  | 	rcu_read_unlock(); | ||||||
|  | 	XA_BUG_ON(xa, count != XA_CHUNK_SHIFT); | ||||||
|  | 
 | ||||||
|  | 	index = 0; | ||||||
|  | 	count = 0; | ||||||
|  | 	xas_set(&xas, XA_CHUNK_SIZE / 2 + 1); | ||||||
|  | 	rcu_read_lock(); | ||||||
|  | 	xas_for_each(&xas, entry, ULONG_MAX) { | ||||||
|  | 		XA_BUG_ON(xa, entry != xa_mk_index(index)); | ||||||
|  | 		index += 1UL << (XA_CHUNK_SHIFT - count); | ||||||
|  | 		count++; | ||||||
|  | 		xas_pause(&xas); | ||||||
|  | 	} | ||||||
|  | 	rcu_read_unlock(); | ||||||
|  | 	XA_BUG_ON(xa, count != XA_CHUNK_SHIFT); | ||||||
|  | 
 | ||||||
|  | 	xa_destroy(xa); | ||||||
|  | 
 | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| static noinline void check_move_tiny(struct kunit *test) | static noinline void check_move_tiny(struct kunit *test) | ||||||
|  |  | ||||||
|  | @ -1152,6 +1152,7 @@ void xas_pause(struct xa_state *xas) | ||||||
| 			if (!xa_is_sibling(xa_entry(xas->xa, node, offset))) | 			if (!xa_is_sibling(xa_entry(xas->xa, node, offset))) | ||||||
| 				break; | 				break; | ||||||
| 		} | 		} | ||||||
|  | 		xas->xa_index &= ~0UL << node->shift; | ||||||
| 		xas->xa_index += (offset - xas->xa_offset) << node->shift; | 		xas->xa_index += (offset - xas->xa_offset) << node->shift; | ||||||
| 		if (xas->xa_index == 0) | 		if (xas->xa_index == 0) | ||||||
| 			xas->xa_node = XAS_BOUNDS; | 			xas->xa_node = XAS_BOUNDS; | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Kemeng Shi
						Kemeng Shi