mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	Inside sbitmap_queue_clear(), once the clear bit is set, it will be
visiable to allocation path immediately. Meantime READ/WRITE on old
associated instance(such as request in case of blk-mq) may be
out-of-order with the setting clear bit, so race with re-allocation
may be triggered.
Adds one memory barrier for ordering READ/WRITE of the freed associated
instance with setting clear bit for avoiding race with re-allocation.
The following kernel oops triggerd by block/006 on aarch64 may be fixed:
[  142.330954] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000330
[  142.338794] Mem abort info:
[  142.341554]   ESR = 0x96000005
[  142.344632]   Exception class = DABT (current EL), IL = 32 bits
[  142.350500]   SET = 0, FnV = 0
[  142.353544]   EA = 0, S1PTW = 0
[  142.356678] Data abort info:
[  142.359528]   ISV = 0, ISS = 0x00000005
[  142.363343]   CM = 0, WnR = 0
[  142.366305] user pgtable: 64k pages, 48-bit VAs, pgdp = 000000002a3c51c0
[  142.372983] [0000000000000330] pgd=0000000000000000, pud=0000000000000000
[  142.379777] Internal error: Oops: 96000005 [#1] SMP
[  142.384613] Modules linked in: null_blk ib_isert iscsi_target_mod ib_srpt target_core_mod ib_srp scsi_transport_srp vfat fat rpcrdma sunrpc rdma_ucm ib_iser rdma_cm iw_cm libiscsi ib_umad scsi_transport_iscsi ib_ipoib ib_cm mlx5_ib ib_uverbs ib_core sbsa_gwdt crct10dif_ce ghash_ce ipmi_ssif sha2_ce ipmi_devintf sha256_arm64 sg sha1_ce ipmi_msghandler ip_tables xfs libcrc32c mlx5_core sdhci_acpi mlxfw ahci_platform at803x sdhci libahci_platform qcom_emac mmc_core hdma hdma_mgmt i2c_dev [last unloaded: null_blk]
[  142.429753] CPU: 7 PID: 1983 Comm: fio Not tainted 5.0.0.cki #2
[  142.449458] pstate: 00400005 (nzcv daif +PAN -UAO)
[  142.454239] pc : __blk_mq_free_request+0x4c/0xa8
[  142.458830] lr : blk_mq_free_request+0xec/0x118
[  142.463344] sp : ffff00003360f6a0
[  142.466646] x29: ffff00003360f6a0 x28: ffff000010e70000
[  142.471941] x27: ffff801729a50048 x26: 0000000000010000
[  142.477232] x25: ffff00003360f954 x24: ffff7bdfff021440
[  142.482529] x23: 0000000000000000 x22: 00000000ffffffff
[  142.487830] x21: ffff801729810000 x20: 0000000000000000
[  142.493123] x19: ffff801729a50000 x18: 0000000000000000
[  142.498413] x17: 0000000000000000 x16: 0000000000000001
[  142.503709] x15: 00000000000000ff x14: ffff7fe000000000
[  142.509003] x13: ffff8017dcde09a0 x12: 0000000000000000
[  142.514308] x11: 0000000000000001 x10: 0000000000000008
[  142.519597] x9 : ffff8017dcde09a0 x8 : 0000000000002000
[  142.524889] x7 : ffff8017dcde0a00 x6 : 000000015388f9be
[  142.530187] x5 : 0000000000000001 x4 : 0000000000000000
[  142.535478] x3 : 0000000000000000 x2 : 0000000000000000
[  142.540777] x1 : 0000000000000001 x0 : ffff00001041b194
[  142.546071] Process fio (pid: 1983, stack limit = 0x000000006460a0ea)
[  142.552500] Call trace:
[  142.554926]  __blk_mq_free_request+0x4c/0xa8
[  142.559181]  blk_mq_free_request+0xec/0x118
[  142.563352]  blk_mq_end_request+0xfc/0x120
[  142.567444]  end_cmd+0x3c/0xa8 [null_blk]
[  142.571434]  null_complete_rq+0x20/0x30 [null_blk]
[  142.576194]  blk_mq_complete_request+0x108/0x148
[  142.580797]  null_handle_cmd+0x1d4/0x718 [null_blk]
[  142.585662]  null_queue_rq+0x60/0xa8 [null_blk]
[  142.590171]  blk_mq_try_issue_directly+0x148/0x280
[  142.594949]  blk_mq_try_issue_list_directly+0x9c/0x108
[  142.600064]  blk_mq_sched_insert_requests+0xb0/0xd0
[  142.604926]  blk_mq_flush_plug_list+0x16c/0x2a0
[  142.609441]  blk_flush_plug_list+0xec/0x118
[  142.613608]  blk_finish_plug+0x3c/0x4c
[  142.617348]  blkdev_direct_IO+0x3b4/0x428
[  142.621336]  generic_file_read_iter+0x84/0x180
[  142.625761]  blkdev_read_iter+0x50/0x78
[  142.629579]  aio_read.isra.6+0xf8/0x190
[  142.633409]  __io_submit_one.isra.8+0x148/0x738
[  142.637912]  io_submit_one.isra.9+0x88/0xb8
[  142.642078]  __arm64_sys_io_submit+0xe0/0x238
[  142.646428]  el0_svc_handler+0xa0/0x128
[  142.650238]  el0_svc+0x8/0xc
[  142.653104] Code: b9402a63 f9000a7f 3100047f 540000a0 (f9419a81)
[  142.659202] ---[ end trace 467586bc175eb09d ]---
Fixes: ea86ea2cdc ("sbitmap: ammortize cost of clearing bits")
Reported-and-bisected_and_tested-by: Yi Zhang <yi.zhang@redhat.com>
Cc: Yi Zhang <yi.zhang@redhat.com>
Cc: "jianchao.wang" <jianchao.w.wang@oracle.com>
Reviewed-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
		
	
			
		
			
				
	
	
		
			721 lines
		
	
	
	
		
			17 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			721 lines
		
	
	
	
		
			17 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
/*
 | 
						|
 * Copyright (C) 2016 Facebook
 | 
						|
 * Copyright (C) 2013-2014 Jens Axboe
 | 
						|
 *
 | 
						|
 * This program is free software; you can redistribute it and/or
 | 
						|
 * modify it under the terms of the GNU General Public
 | 
						|
 * License v2 as published by the Free Software Foundation.
 | 
						|
 *
 | 
						|
 * This program is distributed in the hope that it will be useful,
 | 
						|
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
						|
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 | 
						|
 * General Public License for more details.
 | 
						|
 *
 | 
						|
 * You should have received a copy of the GNU General Public License
 | 
						|
 * along with this program.  If not, see <https://www.gnu.org/licenses/>.
 | 
						|
 */
 | 
						|
 | 
						|
#include <linux/sched.h>
 | 
						|
#include <linux/random.h>
 | 
						|
#include <linux/sbitmap.h>
 | 
						|
#include <linux/seq_file.h>
 | 
						|
 | 
						|
/*
 | 
						|
 * See if we have deferred clears that we can batch move
 | 
						|
 */
 | 
						|
static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
 | 
						|
{
 | 
						|
	unsigned long mask, val;
 | 
						|
	bool ret = false;
 | 
						|
	unsigned long flags;
 | 
						|
 | 
						|
	spin_lock_irqsave(&sb->map[index].swap_lock, flags);
 | 
						|
 | 
						|
	if (!sb->map[index].cleared)
 | 
						|
		goto out_unlock;
 | 
						|
 | 
						|
	/*
 | 
						|
	 * First get a stable cleared mask, setting the old mask to 0.
 | 
						|
	 */
 | 
						|
	do {
 | 
						|
		mask = sb->map[index].cleared;
 | 
						|
	} while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask);
 | 
						|
 | 
						|
	/*
 | 
						|
	 * Now clear the masked bits in our free word
 | 
						|
	 */
 | 
						|
	do {
 | 
						|
		val = sb->map[index].word;
 | 
						|
	} while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val);
 | 
						|
 | 
						|
	ret = true;
 | 
						|
out_unlock:
 | 
						|
	spin_unlock_irqrestore(&sb->map[index].swap_lock, flags);
 | 
						|
	return ret;
 | 
						|
}
 | 
						|
 | 
						|
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
 | 
						|
		      gfp_t flags, int node)
 | 
						|
{
 | 
						|
	unsigned int bits_per_word;
 | 
						|
	unsigned int i;
 | 
						|
 | 
						|
	if (shift < 0) {
 | 
						|
		shift = ilog2(BITS_PER_LONG);
 | 
						|
		/*
 | 
						|
		 * If the bitmap is small, shrink the number of bits per word so
 | 
						|
		 * we spread over a few cachelines, at least. If less than 4
 | 
						|
		 * bits, just forget about it, it's not going to work optimally
 | 
						|
		 * anyway.
 | 
						|
		 */
 | 
						|
		if (depth >= 4) {
 | 
						|
			while ((4U << shift) > depth)
 | 
						|
				shift--;
 | 
						|
		}
 | 
						|
	}
 | 
						|
	bits_per_word = 1U << shift;
 | 
						|
	if (bits_per_word > BITS_PER_LONG)
 | 
						|
		return -EINVAL;
 | 
						|
 | 
						|
	sb->shift = shift;
 | 
						|
	sb->depth = depth;
 | 
						|
	sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
 | 
						|
 | 
						|
	if (depth == 0) {
 | 
						|
		sb->map = NULL;
 | 
						|
		return 0;
 | 
						|
	}
 | 
						|
 | 
						|
	sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
 | 
						|
	if (!sb->map)
 | 
						|
		return -ENOMEM;
 | 
						|
 | 
						|
	for (i = 0; i < sb->map_nr; i++) {
 | 
						|
		sb->map[i].depth = min(depth, bits_per_word);
 | 
						|
		depth -= sb->map[i].depth;
 | 
						|
		spin_lock_init(&sb->map[i].swap_lock);
 | 
						|
	}
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(sbitmap_init_node);
 | 
						|
 | 
						|
void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
 | 
						|
{
 | 
						|
	unsigned int bits_per_word = 1U << sb->shift;
 | 
						|
	unsigned int i;
 | 
						|
 | 
						|
	for (i = 0; i < sb->map_nr; i++)
 | 
						|
		sbitmap_deferred_clear(sb, i);
 | 
						|
 | 
						|
	sb->depth = depth;
 | 
						|
	sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
 | 
						|
 | 
						|
	for (i = 0; i < sb->map_nr; i++) {
 | 
						|
		sb->map[i].depth = min(depth, bits_per_word);
 | 
						|
		depth -= sb->map[i].depth;
 | 
						|
	}
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(sbitmap_resize);
 | 
						|
 | 
						|
static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
 | 
						|
			      unsigned int hint, bool wrap)
 | 
						|
{
 | 
						|
	unsigned int orig_hint = hint;
 | 
						|
	int nr;
 | 
						|
 | 
						|
	while (1) {
 | 
						|
		nr = find_next_zero_bit(word, depth, hint);
 | 
						|
		if (unlikely(nr >= depth)) {
 | 
						|
			/*
 | 
						|
			 * We started with an offset, and we didn't reset the
 | 
						|
			 * offset to 0 in a failure case, so start from 0 to
 | 
						|
			 * exhaust the map.
 | 
						|
			 */
 | 
						|
			if (orig_hint && hint && wrap) {
 | 
						|
				hint = orig_hint = 0;
 | 
						|
				continue;
 | 
						|
			}
 | 
						|
			return -1;
 | 
						|
		}
 | 
						|
 | 
						|
		if (!test_and_set_bit_lock(nr, word))
 | 
						|
			break;
 | 
						|
 | 
						|
		hint = nr + 1;
 | 
						|
		if (hint >= depth - 1)
 | 
						|
			hint = 0;
 | 
						|
	}
 | 
						|
 | 
						|
	return nr;
 | 
						|
}
 | 
						|
 | 
						|
static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
 | 
						|
				     unsigned int alloc_hint, bool round_robin)
 | 
						|
{
 | 
						|
	int nr;
 | 
						|
 | 
						|
	do {
 | 
						|
		nr = __sbitmap_get_word(&sb->map[index].word,
 | 
						|
					sb->map[index].depth, alloc_hint,
 | 
						|
					!round_robin);
 | 
						|
		if (nr != -1)
 | 
						|
			break;
 | 
						|
		if (!sbitmap_deferred_clear(sb, index))
 | 
						|
			break;
 | 
						|
	} while (1);
 | 
						|
 | 
						|
	return nr;
 | 
						|
}
 | 
						|
 | 
						|
int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
 | 
						|
{
 | 
						|
	unsigned int i, index;
 | 
						|
	int nr = -1;
 | 
						|
 | 
						|
	index = SB_NR_TO_INDEX(sb, alloc_hint);
 | 
						|
 | 
						|
	/*
 | 
						|
	 * Unless we're doing round robin tag allocation, just use the
 | 
						|
	 * alloc_hint to find the right word index. No point in looping
 | 
						|
	 * twice in find_next_zero_bit() for that case.
 | 
						|
	 */
 | 
						|
	if (round_robin)
 | 
						|
		alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
 | 
						|
	else
 | 
						|
		alloc_hint = 0;
 | 
						|
 | 
						|
	for (i = 0; i < sb->map_nr; i++) {
 | 
						|
		nr = sbitmap_find_bit_in_index(sb, index, alloc_hint,
 | 
						|
						round_robin);
 | 
						|
		if (nr != -1) {
 | 
						|
			nr += index << sb->shift;
 | 
						|
			break;
 | 
						|
		}
 | 
						|
 | 
						|
		/* Jump to next index. */
 | 
						|
		alloc_hint = 0;
 | 
						|
		if (++index >= sb->map_nr)
 | 
						|
			index = 0;
 | 
						|
	}
 | 
						|
 | 
						|
	return nr;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(sbitmap_get);
 | 
						|
 | 
						|
int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
 | 
						|
			unsigned long shallow_depth)
 | 
						|
{
 | 
						|
	unsigned int i, index;
 | 
						|
	int nr = -1;
 | 
						|
 | 
						|
	index = SB_NR_TO_INDEX(sb, alloc_hint);
 | 
						|
 | 
						|
	for (i = 0; i < sb->map_nr; i++) {
 | 
						|
again:
 | 
						|
		nr = __sbitmap_get_word(&sb->map[index].word,
 | 
						|
					min(sb->map[index].depth, shallow_depth),
 | 
						|
					SB_NR_TO_BIT(sb, alloc_hint), true);
 | 
						|
		if (nr != -1) {
 | 
						|
			nr += index << sb->shift;
 | 
						|
			break;
 | 
						|
		}
 | 
						|
 | 
						|
		if (sbitmap_deferred_clear(sb, index))
 | 
						|
			goto again;
 | 
						|
 | 
						|
		/* Jump to next index. */
 | 
						|
		index++;
 | 
						|
		alloc_hint = index << sb->shift;
 | 
						|
 | 
						|
		if (index >= sb->map_nr) {
 | 
						|
			index = 0;
 | 
						|
			alloc_hint = 0;
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	return nr;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
 | 
						|
 | 
						|
bool sbitmap_any_bit_set(const struct sbitmap *sb)
 | 
						|
{
 | 
						|
	unsigned int i;
 | 
						|
 | 
						|
	for (i = 0; i < sb->map_nr; i++) {
 | 
						|
		if (sb->map[i].word & ~sb->map[i].cleared)
 | 
						|
			return true;
 | 
						|
	}
 | 
						|
	return false;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
 | 
						|
 | 
						|
bool sbitmap_any_bit_clear(const struct sbitmap *sb)
 | 
						|
{
 | 
						|
	unsigned int i;
 | 
						|
 | 
						|
	for (i = 0; i < sb->map_nr; i++) {
 | 
						|
		const struct sbitmap_word *word = &sb->map[i];
 | 
						|
		unsigned long mask = word->word & ~word->cleared;
 | 
						|
		unsigned long ret;
 | 
						|
 | 
						|
		ret = find_first_zero_bit(&mask, word->depth);
 | 
						|
		if (ret < word->depth)
 | 
						|
			return true;
 | 
						|
	}
 | 
						|
	return false;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
 | 
						|
 | 
						|
static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
 | 
						|
{
 | 
						|
	unsigned int i, weight = 0;
 | 
						|
 | 
						|
	for (i = 0; i < sb->map_nr; i++) {
 | 
						|
		const struct sbitmap_word *word = &sb->map[i];
 | 
						|
 | 
						|
		if (set)
 | 
						|
			weight += bitmap_weight(&word->word, word->depth);
 | 
						|
		else
 | 
						|
			weight += bitmap_weight(&word->cleared, word->depth);
 | 
						|
	}
 | 
						|
	return weight;
 | 
						|
}
 | 
						|
 | 
						|
static unsigned int sbitmap_weight(const struct sbitmap *sb)
 | 
						|
{
 | 
						|
	return __sbitmap_weight(sb, true);
 | 
						|
}
 | 
						|
 | 
						|
static unsigned int sbitmap_cleared(const struct sbitmap *sb)
 | 
						|
{
 | 
						|
	return __sbitmap_weight(sb, false);
 | 
						|
}
 | 
						|
 | 
						|
void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
 | 
						|
{
 | 
						|
	seq_printf(m, "depth=%u\n", sb->depth);
 | 
						|
	seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb));
 | 
						|
	seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
 | 
						|
	seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
 | 
						|
	seq_printf(m, "map_nr=%u\n", sb->map_nr);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(sbitmap_show);
 | 
						|
 | 
						|
static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
 | 
						|
{
 | 
						|
	if ((offset & 0xf) == 0) {
 | 
						|
		if (offset != 0)
 | 
						|
			seq_putc(m, '\n');
 | 
						|
		seq_printf(m, "%08x:", offset);
 | 
						|
	}
 | 
						|
	if ((offset & 0x1) == 0)
 | 
						|
		seq_putc(m, ' ');
 | 
						|
	seq_printf(m, "%02x", byte);
 | 
						|
}
 | 
						|
 | 
						|
void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
 | 
						|
{
 | 
						|
	u8 byte = 0;
 | 
						|
	unsigned int byte_bits = 0;
 | 
						|
	unsigned int offset = 0;
 | 
						|
	int i;
 | 
						|
 | 
						|
	for (i = 0; i < sb->map_nr; i++) {
 | 
						|
		unsigned long word = READ_ONCE(sb->map[i].word);
 | 
						|
		unsigned int word_bits = READ_ONCE(sb->map[i].depth);
 | 
						|
 | 
						|
		while (word_bits > 0) {
 | 
						|
			unsigned int bits = min(8 - byte_bits, word_bits);
 | 
						|
 | 
						|
			byte |= (word & (BIT(bits) - 1)) << byte_bits;
 | 
						|
			byte_bits += bits;
 | 
						|
			if (byte_bits == 8) {
 | 
						|
				emit_byte(m, offset, byte);
 | 
						|
				byte = 0;
 | 
						|
				byte_bits = 0;
 | 
						|
				offset++;
 | 
						|
			}
 | 
						|
			word >>= bits;
 | 
						|
			word_bits -= bits;
 | 
						|
		}
 | 
						|
	}
 | 
						|
	if (byte_bits) {
 | 
						|
		emit_byte(m, offset, byte);
 | 
						|
		offset++;
 | 
						|
	}
 | 
						|
	if (offset)
 | 
						|
		seq_putc(m, '\n');
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
 | 
						|
 | 
						|
static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
 | 
						|
					unsigned int depth)
 | 
						|
{
 | 
						|
	unsigned int wake_batch;
 | 
						|
	unsigned int shallow_depth;
 | 
						|
 | 
						|
	/*
 | 
						|
	 * For each batch, we wake up one queue. We need to make sure that our
 | 
						|
	 * batch size is small enough that the full depth of the bitmap,
 | 
						|
	 * potentially limited by a shallow depth, is enough to wake up all of
 | 
						|
	 * the queues.
 | 
						|
	 *
 | 
						|
	 * Each full word of the bitmap has bits_per_word bits, and there might
 | 
						|
	 * be a partial word. There are depth / bits_per_word full words and
 | 
						|
	 * depth % bits_per_word bits left over. In bitwise arithmetic:
 | 
						|
	 *
 | 
						|
	 * bits_per_word = 1 << shift
 | 
						|
	 * depth / bits_per_word = depth >> shift
 | 
						|
	 * depth % bits_per_word = depth & ((1 << shift) - 1)
 | 
						|
	 *
 | 
						|
	 * Each word can be limited to sbq->min_shallow_depth bits.
 | 
						|
	 */
 | 
						|
	shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
 | 
						|
	depth = ((depth >> sbq->sb.shift) * shallow_depth +
 | 
						|
		 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
 | 
						|
	wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
 | 
						|
			     SBQ_WAKE_BATCH);
 | 
						|
 | 
						|
	return wake_batch;
 | 
						|
}
 | 
						|
 | 
						|
int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
 | 
						|
			    int shift, bool round_robin, gfp_t flags, int node)
 | 
						|
{
 | 
						|
	int ret;
 | 
						|
	int i;
 | 
						|
 | 
						|
	ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
 | 
						|
	if (ret)
 | 
						|
		return ret;
 | 
						|
 | 
						|
	sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
 | 
						|
	if (!sbq->alloc_hint) {
 | 
						|
		sbitmap_free(&sbq->sb);
 | 
						|
		return -ENOMEM;
 | 
						|
	}
 | 
						|
 | 
						|
	if (depth && !round_robin) {
 | 
						|
		for_each_possible_cpu(i)
 | 
						|
			*per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
 | 
						|
	}
 | 
						|
 | 
						|
	sbq->min_shallow_depth = UINT_MAX;
 | 
						|
	sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
 | 
						|
	atomic_set(&sbq->wake_index, 0);
 | 
						|
	atomic_set(&sbq->ws_active, 0);
 | 
						|
 | 
						|
	sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
 | 
						|
	if (!sbq->ws) {
 | 
						|
		free_percpu(sbq->alloc_hint);
 | 
						|
		sbitmap_free(&sbq->sb);
 | 
						|
		return -ENOMEM;
 | 
						|
	}
 | 
						|
 | 
						|
	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
 | 
						|
		init_waitqueue_head(&sbq->ws[i].wait);
 | 
						|
		atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
 | 
						|
	}
 | 
						|
 | 
						|
	sbq->round_robin = round_robin;
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
 | 
						|
 | 
						|
static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
 | 
						|
					    unsigned int depth)
 | 
						|
{
 | 
						|
	unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
 | 
						|
	int i;
 | 
						|
 | 
						|
	if (sbq->wake_batch != wake_batch) {
 | 
						|
		WRITE_ONCE(sbq->wake_batch, wake_batch);
 | 
						|
		/*
 | 
						|
		 * Pairs with the memory barrier in sbitmap_queue_wake_up()
 | 
						|
		 * to ensure that the batch size is updated before the wait
 | 
						|
		 * counts.
 | 
						|
		 */
 | 
						|
		smp_mb__before_atomic();
 | 
						|
		for (i = 0; i < SBQ_WAIT_QUEUES; i++)
 | 
						|
			atomic_set(&sbq->ws[i].wait_cnt, 1);
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
 | 
						|
{
 | 
						|
	sbitmap_queue_update_wake_batch(sbq, depth);
 | 
						|
	sbitmap_resize(&sbq->sb, depth);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
 | 
						|
 | 
						|
int __sbitmap_queue_get(struct sbitmap_queue *sbq)
 | 
						|
{
 | 
						|
	unsigned int hint, depth;
 | 
						|
	int nr;
 | 
						|
 | 
						|
	hint = this_cpu_read(*sbq->alloc_hint);
 | 
						|
	depth = READ_ONCE(sbq->sb.depth);
 | 
						|
	if (unlikely(hint >= depth)) {
 | 
						|
		hint = depth ? prandom_u32() % depth : 0;
 | 
						|
		this_cpu_write(*sbq->alloc_hint, hint);
 | 
						|
	}
 | 
						|
	nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
 | 
						|
 | 
						|
	if (nr == -1) {
 | 
						|
		/* If the map is full, a hint won't do us much good. */
 | 
						|
		this_cpu_write(*sbq->alloc_hint, 0);
 | 
						|
	} else if (nr == hint || unlikely(sbq->round_robin)) {
 | 
						|
		/* Only update the hint if we used it. */
 | 
						|
		hint = nr + 1;
 | 
						|
		if (hint >= depth - 1)
 | 
						|
			hint = 0;
 | 
						|
		this_cpu_write(*sbq->alloc_hint, hint);
 | 
						|
	}
 | 
						|
 | 
						|
	return nr;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
 | 
						|
 | 
						|
int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
 | 
						|
				unsigned int shallow_depth)
 | 
						|
{
 | 
						|
	unsigned int hint, depth;
 | 
						|
	int nr;
 | 
						|
 | 
						|
	WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
 | 
						|
 | 
						|
	hint = this_cpu_read(*sbq->alloc_hint);
 | 
						|
	depth = READ_ONCE(sbq->sb.depth);
 | 
						|
	if (unlikely(hint >= depth)) {
 | 
						|
		hint = depth ? prandom_u32() % depth : 0;
 | 
						|
		this_cpu_write(*sbq->alloc_hint, hint);
 | 
						|
	}
 | 
						|
	nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
 | 
						|
 | 
						|
	if (nr == -1) {
 | 
						|
		/* If the map is full, a hint won't do us much good. */
 | 
						|
		this_cpu_write(*sbq->alloc_hint, 0);
 | 
						|
	} else if (nr == hint || unlikely(sbq->round_robin)) {
 | 
						|
		/* Only update the hint if we used it. */
 | 
						|
		hint = nr + 1;
 | 
						|
		if (hint >= depth - 1)
 | 
						|
			hint = 0;
 | 
						|
		this_cpu_write(*sbq->alloc_hint, hint);
 | 
						|
	}
 | 
						|
 | 
						|
	return nr;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
 | 
						|
 | 
						|
void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
 | 
						|
				     unsigned int min_shallow_depth)
 | 
						|
{
 | 
						|
	sbq->min_shallow_depth = min_shallow_depth;
 | 
						|
	sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
 | 
						|
 | 
						|
static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
 | 
						|
{
 | 
						|
	int i, wake_index;
 | 
						|
 | 
						|
	if (!atomic_read(&sbq->ws_active))
 | 
						|
		return NULL;
 | 
						|
 | 
						|
	wake_index = atomic_read(&sbq->wake_index);
 | 
						|
	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
 | 
						|
		struct sbq_wait_state *ws = &sbq->ws[wake_index];
 | 
						|
 | 
						|
		if (waitqueue_active(&ws->wait)) {
 | 
						|
			int o = atomic_read(&sbq->wake_index);
 | 
						|
 | 
						|
			if (wake_index != o)
 | 
						|
				atomic_cmpxchg(&sbq->wake_index, o, wake_index);
 | 
						|
			return ws;
 | 
						|
		}
 | 
						|
 | 
						|
		wake_index = sbq_index_inc(wake_index);
 | 
						|
	}
 | 
						|
 | 
						|
	return NULL;
 | 
						|
}
 | 
						|
 | 
						|
static bool __sbq_wake_up(struct sbitmap_queue *sbq)
 | 
						|
{
 | 
						|
	struct sbq_wait_state *ws;
 | 
						|
	unsigned int wake_batch;
 | 
						|
	int wait_cnt;
 | 
						|
 | 
						|
	ws = sbq_wake_ptr(sbq);
 | 
						|
	if (!ws)
 | 
						|
		return false;
 | 
						|
 | 
						|
	wait_cnt = atomic_dec_return(&ws->wait_cnt);
 | 
						|
	if (wait_cnt <= 0) {
 | 
						|
		int ret;
 | 
						|
 | 
						|
		wake_batch = READ_ONCE(sbq->wake_batch);
 | 
						|
 | 
						|
		/*
 | 
						|
		 * Pairs with the memory barrier in sbitmap_queue_resize() to
 | 
						|
		 * ensure that we see the batch size update before the wait
 | 
						|
		 * count is reset.
 | 
						|
		 */
 | 
						|
		smp_mb__before_atomic();
 | 
						|
 | 
						|
		/*
 | 
						|
		 * For concurrent callers of this, the one that failed the
 | 
						|
		 * atomic_cmpxhcg() race should call this function again
 | 
						|
		 * to wakeup a new batch on a different 'ws'.
 | 
						|
		 */
 | 
						|
		ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
 | 
						|
		if (ret == wait_cnt) {
 | 
						|
			sbq_index_atomic_inc(&sbq->wake_index);
 | 
						|
			wake_up_nr(&ws->wait, wake_batch);
 | 
						|
			return false;
 | 
						|
		}
 | 
						|
 | 
						|
		return true;
 | 
						|
	}
 | 
						|
 | 
						|
	return false;
 | 
						|
}
 | 
						|
 | 
						|
void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
 | 
						|
{
 | 
						|
	while (__sbq_wake_up(sbq))
 | 
						|
		;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
 | 
						|
 | 
						|
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
 | 
						|
			 unsigned int cpu)
 | 
						|
{
 | 
						|
	/*
 | 
						|
	 * Once the clear bit is set, the bit may be allocated out.
 | 
						|
	 *
 | 
						|
	 * Orders READ/WRITE on the asssociated instance(such as request
 | 
						|
	 * of blk_mq) by this bit for avoiding race with re-allocation,
 | 
						|
	 * and its pair is the memory barrier implied in __sbitmap_get_word.
 | 
						|
	 *
 | 
						|
	 * One invariant is that the clear bit has to be zero when the bit
 | 
						|
	 * is in use.
 | 
						|
	 */
 | 
						|
	smp_mb__before_atomic();
 | 
						|
	sbitmap_deferred_clear_bit(&sbq->sb, nr);
 | 
						|
 | 
						|
	/*
 | 
						|
	 * Pairs with the memory barrier in set_current_state() to ensure the
 | 
						|
	 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
 | 
						|
	 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
 | 
						|
	 * waiter. See the comment on waitqueue_active().
 | 
						|
	 */
 | 
						|
	smp_mb__after_atomic();
 | 
						|
	sbitmap_queue_wake_up(sbq);
 | 
						|
 | 
						|
	if (likely(!sbq->round_robin && nr < sbq->sb.depth))
 | 
						|
		*per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
 | 
						|
 | 
						|
void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
 | 
						|
{
 | 
						|
	int i, wake_index;
 | 
						|
 | 
						|
	/*
 | 
						|
	 * Pairs with the memory barrier in set_current_state() like in
 | 
						|
	 * sbitmap_queue_wake_up().
 | 
						|
	 */
 | 
						|
	smp_mb();
 | 
						|
	wake_index = atomic_read(&sbq->wake_index);
 | 
						|
	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
 | 
						|
		struct sbq_wait_state *ws = &sbq->ws[wake_index];
 | 
						|
 | 
						|
		if (waitqueue_active(&ws->wait))
 | 
						|
			wake_up(&ws->wait);
 | 
						|
 | 
						|
		wake_index = sbq_index_inc(wake_index);
 | 
						|
	}
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
 | 
						|
 | 
						|
void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
 | 
						|
{
 | 
						|
	bool first;
 | 
						|
	int i;
 | 
						|
 | 
						|
	sbitmap_show(&sbq->sb, m);
 | 
						|
 | 
						|
	seq_puts(m, "alloc_hint={");
 | 
						|
	first = true;
 | 
						|
	for_each_possible_cpu(i) {
 | 
						|
		if (!first)
 | 
						|
			seq_puts(m, ", ");
 | 
						|
		first = false;
 | 
						|
		seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
 | 
						|
	}
 | 
						|
	seq_puts(m, "}\n");
 | 
						|
 | 
						|
	seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
 | 
						|
	seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
 | 
						|
	seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
 | 
						|
 | 
						|
	seq_puts(m, "ws={\n");
 | 
						|
	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
 | 
						|
		struct sbq_wait_state *ws = &sbq->ws[i];
 | 
						|
 | 
						|
		seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
 | 
						|
			   atomic_read(&ws->wait_cnt),
 | 
						|
			   waitqueue_active(&ws->wait) ? "active" : "inactive");
 | 
						|
	}
 | 
						|
	seq_puts(m, "}\n");
 | 
						|
 | 
						|
	seq_printf(m, "round_robin=%d\n", sbq->round_robin);
 | 
						|
	seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(sbitmap_queue_show);
 | 
						|
 | 
						|
void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
 | 
						|
			    struct sbq_wait_state *ws,
 | 
						|
			    struct sbq_wait *sbq_wait)
 | 
						|
{
 | 
						|
	if (!sbq_wait->sbq) {
 | 
						|
		sbq_wait->sbq = sbq;
 | 
						|
		atomic_inc(&sbq->ws_active);
 | 
						|
	}
 | 
						|
	add_wait_queue(&ws->wait, &sbq_wait->wait);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
 | 
						|
 | 
						|
void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
 | 
						|
{
 | 
						|
	list_del_init(&sbq_wait->wait.entry);
 | 
						|
	if (sbq_wait->sbq) {
 | 
						|
		atomic_dec(&sbq_wait->sbq->ws_active);
 | 
						|
		sbq_wait->sbq = NULL;
 | 
						|
	}
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
 | 
						|
 | 
						|
void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
 | 
						|
			     struct sbq_wait_state *ws,
 | 
						|
			     struct sbq_wait *sbq_wait, int state)
 | 
						|
{
 | 
						|
	if (!sbq_wait->sbq) {
 | 
						|
		atomic_inc(&sbq->ws_active);
 | 
						|
		sbq_wait->sbq = sbq;
 | 
						|
	}
 | 
						|
	prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
 | 
						|
 | 
						|
void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
 | 
						|
			 struct sbq_wait *sbq_wait)
 | 
						|
{
 | 
						|
	finish_wait(&ws->wait, &sbq_wait->wait);
 | 
						|
	if (sbq_wait->sbq) {
 | 
						|
		atomic_dec(&sbq->ws_active);
 | 
						|
		sbq_wait->sbq = NULL;
 | 
						|
	}
 | 
						|
}
 | 
						|
EXPORT_SYMBOL_GPL(sbitmap_finish_wait);
 |