mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	sbitmap: optimise sbitmap_deferred_clear()
Because of spinlocks and atomics sbitmap_deferred_clear() have to reload
&sb->map[index] on each access even though the map address won't change.
Pass in sbitmap_word instead of {sb, index}, so it's cached in a
variable. It also improves code generation of
sbitmap_find_bit_in_index().
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Reviewed-by: John Garry <john.garry@huawei.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
			
			
This commit is contained in:
		
							parent
							
								
									f6f371f7db
								
							
						
					
					
						commit
						b78beea038
					
				
					 1 changed files with 12 additions and 12 deletions
				
			
		| 
						 | 
				
			
			@ -12,32 +12,32 @@
 | 
			
		|||
/*
 | 
			
		||||
 * See if we have deferred clears that we can batch move
 | 
			
		||||
 */
 | 
			
		||||
static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
 | 
			
		||||
static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long mask, val;
 | 
			
		||||
	bool ret = false;
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
 | 
			
		||||
	spin_lock_irqsave(&sb->map[index].swap_lock, flags);
 | 
			
		||||
	spin_lock_irqsave(&map->swap_lock, flags);
 | 
			
		||||
 | 
			
		||||
	if (!sb->map[index].cleared)
 | 
			
		||||
	if (!map->cleared)
 | 
			
		||||
		goto out_unlock;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * First get a stable cleared mask, setting the old mask to 0.
 | 
			
		||||
	 */
 | 
			
		||||
	mask = xchg(&sb->map[index].cleared, 0);
 | 
			
		||||
	mask = xchg(&map->cleared, 0);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Now clear the masked bits in our free word
 | 
			
		||||
	 */
 | 
			
		||||
	do {
 | 
			
		||||
		val = sb->map[index].word;
 | 
			
		||||
	} while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val);
 | 
			
		||||
		val = map->word;
 | 
			
		||||
	} while (cmpxchg(&map->word, val, val & ~mask) != val);
 | 
			
		||||
 | 
			
		||||
	ret = true;
 | 
			
		||||
out_unlock:
 | 
			
		||||
	spin_unlock_irqrestore(&sb->map[index].swap_lock, flags);
 | 
			
		||||
	spin_unlock_irqrestore(&map->swap_lock, flags);
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -92,7 +92,7 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
 | 
			
		|||
	unsigned int i;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < sb->map_nr; i++)
 | 
			
		||||
		sbitmap_deferred_clear(sb, i);
 | 
			
		||||
		sbitmap_deferred_clear(&sb->map[i]);
 | 
			
		||||
 | 
			
		||||
	sb->depth = depth;
 | 
			
		||||
	sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
 | 
			
		||||
| 
						 | 
				
			
			@ -139,15 +139,15 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
 | 
			
		|||
static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
 | 
			
		||||
				     unsigned int alloc_hint, bool round_robin)
 | 
			
		||||
{
 | 
			
		||||
	struct sbitmap_word *map = &sb->map[index];
 | 
			
		||||
	int nr;
 | 
			
		||||
 | 
			
		||||
	do {
 | 
			
		||||
		nr = __sbitmap_get_word(&sb->map[index].word,
 | 
			
		||||
					sb->map[index].depth, alloc_hint,
 | 
			
		||||
		nr = __sbitmap_get_word(&map->word, map->depth, alloc_hint,
 | 
			
		||||
					!round_robin);
 | 
			
		||||
		if (nr != -1)
 | 
			
		||||
			break;
 | 
			
		||||
		if (!sbitmap_deferred_clear(sb, index))
 | 
			
		||||
		if (!sbitmap_deferred_clear(map))
 | 
			
		||||
			break;
 | 
			
		||||
	} while (1);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -207,7 +207,7 @@ int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
 | 
			
		|||
			break;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if (sbitmap_deferred_clear(sb, index))
 | 
			
		||||
		if (sbitmap_deferred_clear(&sb->map[index]))
 | 
			
		||||
			goto again;
 | 
			
		||||
 | 
			
		||||
		/* Jump to next index. */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue