mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	nvme-pci: Separate IO and admin queue IRQ vectors
The admin and first IO queues shared the first irq vector, which has an affinity mask including cpu0. If a system allows cpu0 to be offlined, the admin queue may not be usable if no other CPUs in the affinity mask are online. This is a problem since unlike IO queues, there is only one admin queue that always needs to be usable. To fix, this patch allocates one pre_vector for the admin queue that is assigned all CPUs, so will always be accessible. The IO queues are assigned the remaining managed vectors. In case a controller has only one interrupt vector available, the admin and IO queues will share the pre_vector with all CPUs assigned. Cc: Jianchao Wang <jianchao.w.wang@oracle.com> Cc: Ming Lei <ming.lei@redhat.com> Signed-off-by: Keith Busch <keith.busch@intel.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
		
							parent
							
								
									a6ff7262c2
								
							
						
					
					
						commit
						22b5560195
					
				
					 1 changed files with 17 additions and 6 deletions
				
			
		| 
						 | 
					@ -84,6 +84,7 @@ struct nvme_dev {
 | 
				
			||||||
	struct dma_pool *prp_small_pool;
 | 
						struct dma_pool *prp_small_pool;
 | 
				
			||||||
	unsigned online_queues;
 | 
						unsigned online_queues;
 | 
				
			||||||
	unsigned max_qid;
 | 
						unsigned max_qid;
 | 
				
			||||||
 | 
						unsigned int num_vecs;
 | 
				
			||||||
	int q_depth;
 | 
						int q_depth;
 | 
				
			||||||
	u32 db_stride;
 | 
						u32 db_stride;
 | 
				
			||||||
	void __iomem *bar;
 | 
						void __iomem *bar;
 | 
				
			||||||
| 
						 | 
					@ -414,7 +415,8 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct nvme_dev *dev = set->driver_data;
 | 
						struct nvme_dev *dev = set->driver_data;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev), 0);
 | 
						return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev),
 | 
				
			||||||
 | 
								dev->num_vecs > 1 ? 1 /* admin queue */ : 0);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					@ -1456,7 +1458,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
 | 
				
			||||||
		nvmeq->sq_cmds_io = dev->cmb + offset;
 | 
							nvmeq->sq_cmds_io = dev->cmb + offset;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	nvmeq->cq_vector = qid - 1;
 | 
						/*
 | 
				
			||||||
 | 
						 * A queue's vector matches the queue identifier unless the controller
 | 
				
			||||||
 | 
						 * has only one vector available.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						nvmeq->cq_vector = dev->num_vecs == 1 ? 0 : qid;
 | 
				
			||||||
	result = adapter_alloc_cq(dev, qid, nvmeq);
 | 
						result = adapter_alloc_cq(dev, qid, nvmeq);
 | 
				
			||||||
	if (result < 0)
 | 
						if (result < 0)
 | 
				
			||||||
		goto release_vector;
 | 
							goto release_vector;
 | 
				
			||||||
| 
						 | 
					@ -1910,6 +1916,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
 | 
				
			||||||
	int result, nr_io_queues;
 | 
						int result, nr_io_queues;
 | 
				
			||||||
	unsigned long size;
 | 
						unsigned long size;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						struct irq_affinity affd = {
 | 
				
			||||||
 | 
							.pre_vectors = 1
 | 
				
			||||||
 | 
						};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	nr_io_queues = num_possible_cpus();
 | 
						nr_io_queues = num_possible_cpus();
 | 
				
			||||||
	result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
 | 
						result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
 | 
				
			||||||
	if (result < 0)
 | 
						if (result < 0)
 | 
				
			||||||
| 
						 | 
					@ -1945,11 +1955,12 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
 | 
				
			||||||
	 * setting up the full range we need.
 | 
						 * setting up the full range we need.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	pci_free_irq_vectors(pdev);
 | 
						pci_free_irq_vectors(pdev);
 | 
				
			||||||
	nr_io_queues = pci_alloc_irq_vectors(pdev, 1, nr_io_queues,
 | 
						result = pci_alloc_irq_vectors_affinity(pdev, 1, nr_io_queues + 1,
 | 
				
			||||||
			PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY);
 | 
								PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
 | 
				
			||||||
	if (nr_io_queues <= 0)
 | 
						if (result <= 0)
 | 
				
			||||||
		return -EIO;
 | 
							return -EIO;
 | 
				
			||||||
	dev->max_qid = nr_io_queues;
 | 
						dev->num_vecs = result;
 | 
				
			||||||
 | 
						dev->max_qid = max(result - 1, 1);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Should investigate if there's a performance win from allocating
 | 
						 * Should investigate if there's a performance win from allocating
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue