mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	lib/stackdepot: store free stack records in a freelist
Instead of using the global pool_offset variable to find a free slot when storing a new stack record, mainlain a freelist of free slots within the allocated stack pools. A global next_stack variable is used as the head of the freelist, and the next field in the stack_record struct is reused as freelist link (when the record is not in the freelist, this field is used as a link in the hash table). This is preparatory patch for implementing the eviction of stack records from the stack depot. Link: https://lkml.kernel.org/r/b9e4c79955c2121b69301778643b203d3fb09ccc.1700502145.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Alexander Potapenko <glider@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Marco Elver <elver@google.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									a5d21f7171
								
							
						
					
					
						commit
						b29d318858
					
				
					 1 changed files with 82 additions and 49 deletions
				
			
		
							
								
								
									
										131
									
								
								lib/stackdepot.c
									
									
									
									
									
								
							
							
						
						
									
										131
									
								
								lib/stackdepot.c
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -54,8 +54,8 @@ union handle_parts {
 | 
			
		|||
};
 | 
			
		||||
 | 
			
		||||
struct stack_record {
 | 
			
		||||
	struct stack_record *next;	/* Link in the hash table */
 | 
			
		||||
	u32 hash;			/* Hash in the hash table */
 | 
			
		||||
	struct stack_record *next;	/* Link in hash table or freelist */
 | 
			
		||||
	u32 hash;			/* Hash in hash table */
 | 
			
		||||
	u32 size;			/* Number of stored frames */
 | 
			
		||||
	union handle_parts handle;
 | 
			
		||||
	unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES];	/* Frames */
 | 
			
		||||
| 
						 | 
				
			
			@ -87,10 +87,10 @@ static unsigned int stack_hash_mask;
 | 
			
		|||
static void *stack_pools[DEPOT_MAX_POOLS];
 | 
			
		||||
/* Newly allocated pool that is not yet added to stack_pools. */
 | 
			
		||||
static void *new_pool;
 | 
			
		||||
/* Currently used pool in stack_pools. */
 | 
			
		||||
static int pool_index;
 | 
			
		||||
/* Offset to the unused space in the currently used pool. */
 | 
			
		||||
static size_t pool_offset;
 | 
			
		||||
/* Number of pools in stack_pools. */
 | 
			
		||||
static int pools_num;
 | 
			
		||||
/* Next stack in the freelist of stack records within stack_pools. */
 | 
			
		||||
static struct stack_record *next_stack;
 | 
			
		||||
/* Lock that protects the variables above. */
 | 
			
		||||
static DEFINE_RAW_SPINLOCK(pool_lock);
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			@ -226,6 +226,42 @@ int stack_depot_init(void)
 | 
			
		|||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(stack_depot_init);
 | 
			
		||||
 | 
			
		||||
/* Initializes a stack depol pool. */
 | 
			
		||||
static void depot_init_pool(void *pool)
 | 
			
		||||
{
 | 
			
		||||
	const int records_in_pool = DEPOT_POOL_SIZE / DEPOT_STACK_RECORD_SIZE;
 | 
			
		||||
	int i, offset;
 | 
			
		||||
 | 
			
		||||
	/* Initialize handles and link stack records to each other. */
 | 
			
		||||
	for (i = 0, offset = 0;
 | 
			
		||||
	     offset <= DEPOT_POOL_SIZE - DEPOT_STACK_RECORD_SIZE;
 | 
			
		||||
	     i++, offset += DEPOT_STACK_RECORD_SIZE) {
 | 
			
		||||
		struct stack_record *stack = pool + offset;
 | 
			
		||||
 | 
			
		||||
		stack->handle.pool_index = pools_num;
 | 
			
		||||
		stack->handle.offset = offset >> DEPOT_STACK_ALIGN;
 | 
			
		||||
		stack->handle.extra = 0;
 | 
			
		||||
 | 
			
		||||
		if (i < records_in_pool - 1)
 | 
			
		||||
			stack->next = (void *)stack + DEPOT_STACK_RECORD_SIZE;
 | 
			
		||||
		else
 | 
			
		||||
			stack->next = NULL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Link stack records into the freelist. */
 | 
			
		||||
	WARN_ON(next_stack);
 | 
			
		||||
	next_stack = pool;
 | 
			
		||||
 | 
			
		||||
	/* Save reference to the pool to be used by depot_fetch_stack(). */
 | 
			
		||||
	stack_pools[pools_num] = pool;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * WRITE_ONCE() pairs with potential concurrent read in
 | 
			
		||||
	 * depot_fetch_stack().
 | 
			
		||||
	 */
 | 
			
		||||
	WRITE_ONCE(pools_num, pools_num + 1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* Keeps the preallocated memory to be used for a new stack depot pool. */
 | 
			
		||||
static void depot_keep_new_pool(void **prealloc)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -242,7 +278,7 @@ static void depot_keep_new_pool(void **prealloc)
 | 
			
		|||
	 * Use the preallocated memory for the new pool
 | 
			
		||||
	 * as long as we do not exceed the maximum number of pools.
 | 
			
		||||
	 */
 | 
			
		||||
	if (pool_index + 1 < DEPOT_MAX_POOLS) {
 | 
			
		||||
	if (pools_num < DEPOT_MAX_POOLS) {
 | 
			
		||||
		new_pool = *prealloc;
 | 
			
		||||
		*prealloc = NULL;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -258,45 +294,42 @@ static void depot_keep_new_pool(void **prealloc)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
/* Updates references to the current and the next stack depot pools. */
 | 
			
		||||
static bool depot_update_pools(size_t required_size, void **prealloc)
 | 
			
		||||
static bool depot_update_pools(void **prealloc)
 | 
			
		||||
{
 | 
			
		||||
	/* Check if there is not enough space in the current pool. */
 | 
			
		||||
	if (unlikely(pool_offset + required_size > DEPOT_POOL_SIZE)) {
 | 
			
		||||
		/* Bail out if we reached the pool limit. */
 | 
			
		||||
		if (unlikely(pool_index + 1 >= DEPOT_MAX_POOLS)) {
 | 
			
		||||
			WARN_ONCE(1, "Stack depot reached limit capacity");
 | 
			
		||||
			return false;
 | 
			
		||||
		}
 | 
			
		||||
	/* Check if we still have objects in the freelist. */
 | 
			
		||||
	if (next_stack)
 | 
			
		||||
		goto out_keep_prealloc;
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * Move on to the new pool.
 | 
			
		||||
		 * WRITE_ONCE() pairs with potential concurrent read in
 | 
			
		||||
		 * stack_depot_fetch().
 | 
			
		||||
		 */
 | 
			
		||||
		WRITE_ONCE(pool_index, pool_index + 1);
 | 
			
		||||
		stack_pools[pool_index] = new_pool;
 | 
			
		||||
	/* Check if we have a new pool saved and use it. */
 | 
			
		||||
	if (new_pool) {
 | 
			
		||||
		depot_init_pool(new_pool);
 | 
			
		||||
		new_pool = NULL;
 | 
			
		||||
		pool_offset = 0;
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * If the maximum number of pools is not reached, take note
 | 
			
		||||
		 * that yet another new pool needs to be allocated.
 | 
			
		||||
		 * smp_store_release() pairs with smp_load_acquire() in
 | 
			
		||||
		 * stack_depot_save().
 | 
			
		||||
		 */
 | 
			
		||||
		if (pool_index + 1 < DEPOT_MAX_POOLS)
 | 
			
		||||
		/* Take note that we might need a new new_pool. */
 | 
			
		||||
		if (pools_num < DEPOT_MAX_POOLS)
 | 
			
		||||
			smp_store_release(&new_pool_required, 1);
 | 
			
		||||
 | 
			
		||||
		/* Try keeping the preallocated memory for new_pool. */
 | 
			
		||||
		goto out_keep_prealloc;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Check if the current pool is not yet allocated. */
 | 
			
		||||
	if (*prealloc && stack_pools[pool_index] == NULL) {
 | 
			
		||||
		/* Use the preallocated memory for the current pool. */
 | 
			
		||||
		stack_pools[pool_index] = *prealloc;
 | 
			
		||||
	/* Bail out if we reached the pool limit. */
 | 
			
		||||
	if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
 | 
			
		||||
		WARN_ONCE(1, "Stack depot reached limit capacity");
 | 
			
		||||
		return false;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Check if we have preallocated memory and use it. */
 | 
			
		||||
	if (*prealloc) {
 | 
			
		||||
		depot_init_pool(*prealloc);
 | 
			
		||||
		*prealloc = NULL;
 | 
			
		||||
		return true;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Otherwise, try using the preallocated memory for a new pool. */
 | 
			
		||||
	return false;
 | 
			
		||||
 | 
			
		||||
out_keep_prealloc:
 | 
			
		||||
	/* Keep the preallocated memory for a new pool if required. */
 | 
			
		||||
	if (*prealloc)
 | 
			
		||||
		depot_keep_new_pool(prealloc);
 | 
			
		||||
	return true;
 | 
			
		||||
| 
						 | 
				
			
			@ -307,35 +340,35 @@ static struct stack_record *
 | 
			
		|||
depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
 | 
			
		||||
{
 | 
			
		||||
	struct stack_record *stack;
 | 
			
		||||
	size_t required_size = DEPOT_STACK_RECORD_SIZE;
 | 
			
		||||
 | 
			
		||||
	/* Update current and new pools if required and possible. */
 | 
			
		||||
	if (!depot_update_pools(required_size, prealloc))
 | 
			
		||||
	if (!depot_update_pools(prealloc))
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	/* Check if we have a pool to save the stack trace. */
 | 
			
		||||
	if (stack_pools[pool_index] == NULL)
 | 
			
		||||
	/* Check if we have a stack record to save the stack trace. */
 | 
			
		||||
	stack = next_stack;
 | 
			
		||||
	if (!stack)
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	/* Advance the freelist. */
 | 
			
		||||
	next_stack = stack->next;
 | 
			
		||||
 | 
			
		||||
	/* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
 | 
			
		||||
	if (size > CONFIG_STACKDEPOT_MAX_FRAMES)
 | 
			
		||||
		size = CONFIG_STACKDEPOT_MAX_FRAMES;
 | 
			
		||||
 | 
			
		||||
	/* Save the stack trace. */
 | 
			
		||||
	stack = stack_pools[pool_index] + pool_offset;
 | 
			
		||||
	stack->next = NULL;
 | 
			
		||||
	stack->hash = hash;
 | 
			
		||||
	stack->size = size;
 | 
			
		||||
	stack->handle.pool_index = pool_index;
 | 
			
		||||
	stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
 | 
			
		||||
	stack->handle.extra = 0;
 | 
			
		||||
	/* stack->handle is already filled in by depot_init_pool(). */
 | 
			
		||||
	memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
 | 
			
		||||
	pool_offset += required_size;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Let KMSAN know the stored stack record is initialized. This shall
 | 
			
		||||
	 * prevent false positive reports if instrumented code accesses it.
 | 
			
		||||
	 */
 | 
			
		||||
	kmsan_unpoison_memory(stack, required_size);
 | 
			
		||||
	kmsan_unpoison_memory(stack, DEPOT_STACK_RECORD_SIZE);
 | 
			
		||||
 | 
			
		||||
	return stack;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -345,16 +378,16 @@ static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
 | 
			
		|||
	union handle_parts parts = { .handle = handle };
 | 
			
		||||
	/*
 | 
			
		||||
	 * READ_ONCE() pairs with potential concurrent write in
 | 
			
		||||
	 * depot_update_pools().
 | 
			
		||||
	 * depot_init_pool().
 | 
			
		||||
	 */
 | 
			
		||||
	int pool_index_cached = READ_ONCE(pool_index);
 | 
			
		||||
	int pools_num_cached = READ_ONCE(pools_num);
 | 
			
		||||
	void *pool;
 | 
			
		||||
	size_t offset = parts.offset << DEPOT_STACK_ALIGN;
 | 
			
		||||
	struct stack_record *stack;
 | 
			
		||||
 | 
			
		||||
	if (parts.pool_index > pool_index_cached) {
 | 
			
		||||
	if (parts.pool_index > pools_num_cached) {
 | 
			
		||||
		WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
 | 
			
		||||
		     parts.pool_index, pool_index_cached, handle);
 | 
			
		||||
		     parts.pool_index, pools_num_cached, handle);
 | 
			
		||||
		return NULL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue