mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	blk-mq: abstract out queue map
This is in preparation for allowing multiple sets of maps per queue, if so desired. Reviewed-by: Hannes Reinecke <hare@suse.com> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									a8908939af
								
							
						
					
					
						commit
						ed76e329d7
					
				
					 15 changed files with 64 additions and 50 deletions
				
			
		| 
						 | 
					@ -30,10 +30,10 @@ static int get_first_sibling(unsigned int cpu)
 | 
				
			||||||
	return cpu;
 | 
						return cpu;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int blk_mq_map_queues(struct blk_mq_tag_set *set)
 | 
					int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned int *map = set->mq_map;
 | 
						unsigned int *map = qmap->mq_map;
 | 
				
			||||||
	unsigned int nr_queues = set->nr_hw_queues;
 | 
						unsigned int nr_queues = qmap->nr_queues;
 | 
				
			||||||
	unsigned int cpu, first_sibling;
 | 
						unsigned int cpu, first_sibling;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for_each_possible_cpu(cpu) {
 | 
						for_each_possible_cpu(cpu) {
 | 
				
			||||||
| 
						 | 
					@ -62,12 +62,12 @@ EXPORT_SYMBOL_GPL(blk_mq_map_queues);
 | 
				
			||||||
 * We have no quick way of doing reverse lookups. This is only used at
 | 
					 * We have no quick way of doing reverse lookups. This is only used at
 | 
				
			||||||
 * queue init time, so runtime isn't important.
 | 
					 * queue init time, so runtime isn't important.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
 | 
					int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for_each_possible_cpu(i) {
 | 
						for_each_possible_cpu(i) {
 | 
				
			||||||
		if (index == mq_map[i])
 | 
							if (index == qmap->mq_map[i])
 | 
				
			||||||
			return local_memory_node(cpu_to_node(i));
 | 
								return local_memory_node(cpu_to_node(i));
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -31,26 +31,26 @@
 | 
				
			||||||
 * that maps a queue to the CPUs that have irq affinity for the corresponding
 | 
					 * that maps a queue to the CPUs that have irq affinity for the corresponding
 | 
				
			||||||
 * vector.
 | 
					 * vector.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
 | 
					int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
 | 
				
			||||||
			    int offset)
 | 
								    int offset)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	const struct cpumask *mask;
 | 
						const struct cpumask *mask;
 | 
				
			||||||
	unsigned int queue, cpu;
 | 
						unsigned int queue, cpu;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (queue = 0; queue < set->nr_hw_queues; queue++) {
 | 
						for (queue = 0; queue < qmap->nr_queues; queue++) {
 | 
				
			||||||
		mask = pci_irq_get_affinity(pdev, queue + offset);
 | 
							mask = pci_irq_get_affinity(pdev, queue + offset);
 | 
				
			||||||
		if (!mask)
 | 
							if (!mask)
 | 
				
			||||||
			goto fallback;
 | 
								goto fallback;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		for_each_cpu(cpu, mask)
 | 
							for_each_cpu(cpu, mask)
 | 
				
			||||||
			set->mq_map[cpu] = queue;
 | 
								qmap->mq_map[cpu] = queue;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
fallback:
 | 
					fallback:
 | 
				
			||||||
	WARN_ON_ONCE(set->nr_hw_queues > 1);
 | 
						WARN_ON_ONCE(qmap->nr_queues > 1);
 | 
				
			||||||
	blk_mq_clear_mq_map(set);
 | 
						blk_mq_clear_mq_map(qmap);
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
 | 
					EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -41,12 +41,12 @@ int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
 | 
				
			||||||
			goto fallback;
 | 
								goto fallback;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		for_each_cpu(cpu, mask)
 | 
							for_each_cpu(cpu, mask)
 | 
				
			||||||
			set->mq_map[cpu] = queue;
 | 
								set->map[0].mq_map[cpu] = queue;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
fallback:
 | 
					fallback:
 | 
				
			||||||
	return blk_mq_map_queues(set);
 | 
						return blk_mq_map_queues(&set->map[0]);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
 | 
					EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -29,7 +29,7 @@
 | 
				
			||||||
 * that maps a queue to the CPUs that have irq affinity for the corresponding
 | 
					 * that maps a queue to the CPUs that have irq affinity for the corresponding
 | 
				
			||||||
 * vector.
 | 
					 * vector.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
 | 
					int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
 | 
				
			||||||
		struct virtio_device *vdev, int first_vec)
 | 
							struct virtio_device *vdev, int first_vec)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	const struct cpumask *mask;
 | 
						const struct cpumask *mask;
 | 
				
			||||||
| 
						 | 
					@ -38,17 +38,17 @@ int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
 | 
				
			||||||
	if (!vdev->config->get_vq_affinity)
 | 
						if (!vdev->config->get_vq_affinity)
 | 
				
			||||||
		goto fallback;
 | 
							goto fallback;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (queue = 0; queue < set->nr_hw_queues; queue++) {
 | 
						for (queue = 0; queue < qmap->nr_queues; queue++) {
 | 
				
			||||||
		mask = vdev->config->get_vq_affinity(vdev, first_vec + queue);
 | 
							mask = vdev->config->get_vq_affinity(vdev, first_vec + queue);
 | 
				
			||||||
		if (!mask)
 | 
							if (!mask)
 | 
				
			||||||
			goto fallback;
 | 
								goto fallback;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		for_each_cpu(cpu, mask)
 | 
							for_each_cpu(cpu, mask)
 | 
				
			||||||
			set->mq_map[cpu] = queue;
 | 
								qmap->mq_map[cpu] = queue;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
fallback:
 | 
					fallback:
 | 
				
			||||||
	return blk_mq_map_queues(set);
 | 
						return blk_mq_map_queues(qmap);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(blk_mq_virtio_map_queues);
 | 
					EXPORT_SYMBOL_GPL(blk_mq_virtio_map_queues);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1975,7 +1975,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
 | 
				
			||||||
	struct blk_mq_tags *tags;
 | 
						struct blk_mq_tags *tags;
 | 
				
			||||||
	int node;
 | 
						int node;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
 | 
						node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx);
 | 
				
			||||||
	if (node == NUMA_NO_NODE)
 | 
						if (node == NUMA_NO_NODE)
 | 
				
			||||||
		node = set->numa_node;
 | 
							node = set->numa_node;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2031,7 +2031,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
 | 
				
			||||||
	size_t rq_size, left;
 | 
						size_t rq_size, left;
 | 
				
			||||||
	int node;
 | 
						int node;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
 | 
						node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx);
 | 
				
			||||||
	if (node == NUMA_NO_NODE)
 | 
						if (node == NUMA_NO_NODE)
 | 
				
			||||||
		node = set->numa_node;
 | 
							node = set->numa_node;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2322,7 +2322,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
 | 
				
			||||||
	 * If the cpu isn't present, the cpu is mapped to first hctx.
 | 
						 * If the cpu isn't present, the cpu is mapped to first hctx.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	for_each_possible_cpu(i) {
 | 
						for_each_possible_cpu(i) {
 | 
				
			||||||
		hctx_idx = set->mq_map[i];
 | 
							hctx_idx = set->map[0].mq_map[i];
 | 
				
			||||||
		/* unmapped hw queue can be remapped after CPU topo changed */
 | 
							/* unmapped hw queue can be remapped after CPU topo changed */
 | 
				
			||||||
		if (!set->tags[hctx_idx] &&
 | 
							if (!set->tags[hctx_idx] &&
 | 
				
			||||||
		    !__blk_mq_alloc_rq_map(set, hctx_idx)) {
 | 
							    !__blk_mq_alloc_rq_map(set, hctx_idx)) {
 | 
				
			||||||
| 
						 | 
					@ -2332,7 +2332,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
 | 
				
			||||||
			 * case, remap the current ctx to hctx[0] which
 | 
								 * case, remap the current ctx to hctx[0] which
 | 
				
			||||||
			 * is guaranteed to always have tags allocated
 | 
								 * is guaranteed to always have tags allocated
 | 
				
			||||||
			 */
 | 
								 */
 | 
				
			||||||
			set->mq_map[i] = 0;
 | 
								set->map[0].mq_map[i] = 0;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ctx = per_cpu_ptr(q->queue_ctx, i);
 | 
							ctx = per_cpu_ptr(q->queue_ctx, i);
 | 
				
			||||||
| 
						 | 
					@ -2585,7 +2585,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
 | 
				
			||||||
		int node;
 | 
							int node;
 | 
				
			||||||
		struct blk_mq_hw_ctx *hctx;
 | 
							struct blk_mq_hw_ctx *hctx;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		node = blk_mq_hw_queue_to_node(set->mq_map, i);
 | 
							node = blk_mq_hw_queue_to_node(&set->map[0], i);
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * If the hw queue has been mapped to another numa node,
 | 
							 * If the hw queue has been mapped to another numa node,
 | 
				
			||||||
		 * we need to realloc the hctx. If allocation fails, fallback
 | 
							 * we need to realloc the hctx. If allocation fails, fallback
 | 
				
			||||||
| 
						 | 
					@ -2791,18 +2791,18 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
 | 
				
			||||||
		 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
 | 
							 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
 | 
				
			||||||
		 * 	mask = get_cpu_mask(queue)
 | 
							 * 	mask = get_cpu_mask(queue)
 | 
				
			||||||
		 * 	for_each_cpu(cpu, mask)
 | 
							 * 	for_each_cpu(cpu, mask)
 | 
				
			||||||
		 * 		set->mq_map[cpu] = queue;
 | 
							 * 		set->map.mq_map[cpu] = queue;
 | 
				
			||||||
		 * }
 | 
							 * }
 | 
				
			||||||
		 *
 | 
							 *
 | 
				
			||||||
		 * When we need to remap, the table has to be cleared for
 | 
							 * When we need to remap, the table has to be cleared for
 | 
				
			||||||
		 * killing stale mapping since one CPU may not be mapped
 | 
							 * killing stale mapping since one CPU may not be mapped
 | 
				
			||||||
		 * to any hw queue.
 | 
							 * to any hw queue.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		blk_mq_clear_mq_map(set);
 | 
							blk_mq_clear_mq_map(&set->map[0]);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		return set->ops->map_queues(set);
 | 
							return set->ops->map_queues(set);
 | 
				
			||||||
	} else
 | 
						} else
 | 
				
			||||||
		return blk_mq_map_queues(set);
 | 
							return blk_mq_map_queues(&set->map[0]);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -2857,10 +2857,12 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = -ENOMEM;
 | 
						ret = -ENOMEM;
 | 
				
			||||||
	set->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*set->mq_map),
 | 
						set->map[0].mq_map = kcalloc_node(nr_cpu_ids,
 | 
				
			||||||
 | 
										  sizeof(*set->map[0].mq_map),
 | 
				
			||||||
					  GFP_KERNEL, set->numa_node);
 | 
										  GFP_KERNEL, set->numa_node);
 | 
				
			||||||
	if (!set->mq_map)
 | 
						if (!set->map[0].mq_map)
 | 
				
			||||||
		goto out_free_tags;
 | 
							goto out_free_tags;
 | 
				
			||||||
 | 
						set->map[0].nr_queues = set->nr_hw_queues;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = blk_mq_update_queue_map(set);
 | 
						ret = blk_mq_update_queue_map(set);
 | 
				
			||||||
	if (ret)
 | 
						if (ret)
 | 
				
			||||||
| 
						 | 
					@ -2876,8 +2878,8 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
out_free_mq_map:
 | 
					out_free_mq_map:
 | 
				
			||||||
	kfree(set->mq_map);
 | 
						kfree(set->map[0].mq_map);
 | 
				
			||||||
	set->mq_map = NULL;
 | 
						set->map[0].mq_map = NULL;
 | 
				
			||||||
out_free_tags:
 | 
					out_free_tags:
 | 
				
			||||||
	kfree(set->tags);
 | 
						kfree(set->tags);
 | 
				
			||||||
	set->tags = NULL;
 | 
						set->tags = NULL;
 | 
				
			||||||
| 
						 | 
					@ -2892,8 +2894,8 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
 | 
				
			||||||
	for (i = 0; i < nr_cpu_ids; i++)
 | 
						for (i = 0; i < nr_cpu_ids; i++)
 | 
				
			||||||
		blk_mq_free_map_and_requests(set, i);
 | 
							blk_mq_free_map_and_requests(set, i);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kfree(set->mq_map);
 | 
						kfree(set->map[0].mq_map);
 | 
				
			||||||
	set->mq_map = NULL;
 | 
						set->map[0].mq_map = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kfree(set->tags);
 | 
						kfree(set->tags);
 | 
				
			||||||
	set->tags = NULL;
 | 
						set->tags = NULL;
 | 
				
			||||||
| 
						 | 
					@ -3054,7 +3056,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
 | 
				
			||||||
			pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
 | 
								pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
 | 
				
			||||||
					nr_hw_queues, prev_nr_hw_queues);
 | 
										nr_hw_queues, prev_nr_hw_queues);
 | 
				
			||||||
			set->nr_hw_queues = prev_nr_hw_queues;
 | 
								set->nr_hw_queues = prev_nr_hw_queues;
 | 
				
			||||||
			blk_mq_map_queues(set);
 | 
								blk_mq_map_queues(&set->map[0]);
 | 
				
			||||||
			goto fallback;
 | 
								goto fallback;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		blk_mq_map_swqueue(q);
 | 
							blk_mq_map_swqueue(q);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -70,14 +70,14 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * CPU -> queue mappings
 | 
					 * CPU -> queue mappings
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
 | 
					extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
 | 
					static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
 | 
				
			||||||
		int cpu)
 | 
							int cpu)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct blk_mq_tag_set *set = q->tag_set;
 | 
						struct blk_mq_tag_set *set = q->tag_set;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return q->queue_hw_ctx[set->mq_map[cpu]];
 | 
						return q->queue_hw_ctx[set->map[0].mq_map[cpu]];
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -206,12 +206,12 @@ static inline void blk_mq_put_driver_tag(struct request *rq)
 | 
				
			||||||
	__blk_mq_put_driver_tag(hctx, rq);
 | 
						__blk_mq_put_driver_tag(hctx, rq);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void blk_mq_clear_mq_map(struct blk_mq_tag_set *set)
 | 
					static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int cpu;
 | 
						int cpu;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for_each_possible_cpu(cpu)
 | 
						for_each_possible_cpu(cpu)
 | 
				
			||||||
		set->mq_map[cpu] = 0;
 | 
							qmap->mq_map[cpu] = 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -624,7 +624,7 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct virtio_blk *vblk = set->driver_data;
 | 
						struct virtio_blk *vblk = set->driver_data;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return blk_mq_virtio_map_queues(set, vblk->vdev, 0);
 | 
						return blk_mq_virtio_map_queues(&set->map[0], vblk->vdev, 0);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_VIRTIO_BLK_SCSI
 | 
					#ifdef CONFIG_VIRTIO_BLK_SCSI
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -435,7 +435,7 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct nvme_dev *dev = set->driver_data;
 | 
						struct nvme_dev *dev = set->driver_data;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev),
 | 
						return blk_mq_pci_map_queues(&set->map[0], to_pci_dev(dev->dev),
 | 
				
			||||||
			dev->num_vecs > 1 ? 1 /* admin queue */ : 0);
 | 
								dev->num_vecs > 1 ? 1 /* admin queue */ : 0);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -6934,11 +6934,12 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int rc;
 | 
						int rc;
 | 
				
			||||||
	scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
 | 
						scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
 | 
				
			||||||
 | 
						struct blk_mq_queue_map *qmap = &shost->tag_set.map[0];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (USER_CTRL_IRQ(vha->hw))
 | 
						if (USER_CTRL_IRQ(vha->hw))
 | 
				
			||||||
		rc = blk_mq_map_queues(&shost->tag_set);
 | 
							rc = blk_mq_map_queues(qmap);
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev, 0);
 | 
							rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, 0);
 | 
				
			||||||
	return rc;
 | 
						return rc;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1812,7 +1812,7 @@ static int scsi_map_queues(struct blk_mq_tag_set *set)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (shost->hostt->map_queues)
 | 
						if (shost->hostt->map_queues)
 | 
				
			||||||
		return shost->hostt->map_queues(shost);
 | 
							return shost->hostt->map_queues(shost);
 | 
				
			||||||
	return blk_mq_map_queues(set);
 | 
						return blk_mq_map_queues(&set->map[0]);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
 | 
					void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -5319,7 +5319,8 @@ static int pqi_map_queues(struct Scsi_Host *shost)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
 | 
						struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev, 0);
 | 
						return blk_mq_pci_map_queues(&shost->tag_set.map[0],
 | 
				
			||||||
 | 
										ctrl_info->pci_dev, 0);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
 | 
					static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -719,8 +719,9 @@ static void virtscsi_target_destroy(struct scsi_target *starget)
 | 
				
			||||||
static int virtscsi_map_queues(struct Scsi_Host *shost)
 | 
					static int virtscsi_map_queues(struct Scsi_Host *shost)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct virtio_scsi *vscsi = shost_priv(shost);
 | 
						struct virtio_scsi *vscsi = shost_priv(shost);
 | 
				
			||||||
 | 
						struct blk_mq_queue_map *qmap = &shost->tag_set.map[0];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return blk_mq_virtio_map_queues(&shost->tag_set, vscsi->vdev, 2);
 | 
						return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2,10 +2,10 @@
 | 
				
			||||||
#ifndef _LINUX_BLK_MQ_PCI_H
 | 
					#ifndef _LINUX_BLK_MQ_PCI_H
 | 
				
			||||||
#define _LINUX_BLK_MQ_PCI_H
 | 
					#define _LINUX_BLK_MQ_PCI_H
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct blk_mq_tag_set;
 | 
					struct blk_mq_queue_map;
 | 
				
			||||||
struct pci_dev;
 | 
					struct pci_dev;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
 | 
					int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
 | 
				
			||||||
			  int offset);
 | 
								  int offset);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* _LINUX_BLK_MQ_PCI_H */
 | 
					#endif /* _LINUX_BLK_MQ_PCI_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2,10 +2,10 @@
 | 
				
			||||||
#ifndef _LINUX_BLK_MQ_VIRTIO_H
 | 
					#ifndef _LINUX_BLK_MQ_VIRTIO_H
 | 
				
			||||||
#define _LINUX_BLK_MQ_VIRTIO_H
 | 
					#define _LINUX_BLK_MQ_VIRTIO_H
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct blk_mq_tag_set;
 | 
					struct blk_mq_queue_map;
 | 
				
			||||||
struct virtio_device;
 | 
					struct virtio_device;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
 | 
					int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
 | 
				
			||||||
		struct virtio_device *vdev, int first_vec);
 | 
							struct virtio_device *vdev, int first_vec);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* _LINUX_BLK_MQ_VIRTIO_H */
 | 
					#endif /* _LINUX_BLK_MQ_VIRTIO_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -74,10 +74,19 @@ struct blk_mq_hw_ctx {
 | 
				
			||||||
	struct srcu_struct	srcu[0];
 | 
						struct srcu_struct	srcu[0];
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct blk_mq_tag_set {
 | 
					struct blk_mq_queue_map {
 | 
				
			||||||
	unsigned int *mq_map;
 | 
						unsigned int *mq_map;
 | 
				
			||||||
 | 
						unsigned int nr_queues;
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					enum {
 | 
				
			||||||
 | 
						HCTX_MAX_TYPES = 1,
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct blk_mq_tag_set {
 | 
				
			||||||
 | 
						struct blk_mq_queue_map	map[HCTX_MAX_TYPES];
 | 
				
			||||||
	const struct blk_mq_ops	*ops;
 | 
						const struct blk_mq_ops	*ops;
 | 
				
			||||||
	unsigned int		nr_hw_queues;
 | 
						unsigned int		nr_hw_queues;	/* nr hw queues across maps */
 | 
				
			||||||
	unsigned int		queue_depth;	/* max hw supported */
 | 
						unsigned int		queue_depth;	/* max hw supported */
 | 
				
			||||||
	unsigned int		reserved_tags;
 | 
						unsigned int		reserved_tags;
 | 
				
			||||||
	unsigned int		cmd_size;	/* per-request extra data */
 | 
						unsigned int		cmd_size;	/* per-request extra data */
 | 
				
			||||||
| 
						 | 
					@ -295,7 +304,7 @@ void blk_mq_freeze_queue_wait(struct request_queue *q);
 | 
				
			||||||
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
 | 
					int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
 | 
				
			||||||
				     unsigned long timeout);
 | 
									     unsigned long timeout);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int blk_mq_map_queues(struct blk_mq_tag_set *set);
 | 
					int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
 | 
				
			||||||
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
 | 
					void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
 | 
					void blk_mq_quiesce_queue_nowait(struct request_queue *q);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue