forked from mirrors/linux
		
	iommu/vt-d: Don't queue_iova() if there is no flush queue
Intel VT-d driver was reworked to use common deferred flushing
implementation. Previously there was one global per-cpu flush queue,
afterwards - one per domain.
Before deferring a flush, the queue should be allocated and initialized.
Currently only domains with IOMMU_DOMAIN_DMA type initialize their flush
queue. It's probably worth to init it for static or unmanaged domains
too, but it may be arguable - I'm leaving it to iommu folks.
Prevent queuing an iova flush if the domain doesn't have a queue.
The defensive check seems to be worth to keep even if queue would be
initialized for all kinds of domains. And is easy backportable.
On 4.19.43 stable kernel it has a user-visible effect: previously for
devices in si domain there were crashes, on sata devices:
 BUG: spinlock bad magic on CPU#6, swapper/0/1
  lock: 0xffff88844f582008, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0
 CPU: 6 PID: 1 Comm: swapper/0 Not tainted 4.19.43 #1
 Call Trace:
  <IRQ>
  dump_stack+0x61/0x7e
  spin_bug+0x9d/0xa3
  do_raw_spin_lock+0x22/0x8e
  _raw_spin_lock_irqsave+0x32/0x3a
  queue_iova+0x45/0x115
  intel_unmap+0x107/0x113
  intel_unmap_sg+0x6b/0x76
  __ata_qc_complete+0x7f/0x103
  ata_qc_complete+0x9b/0x26a
  ata_qc_complete_multiple+0xd0/0xe3
  ahci_handle_port_interrupt+0x3ee/0x48a
  ahci_handle_port_intr+0x73/0xa9
  ahci_single_level_irq_intr+0x40/0x60
  __handle_irq_event_percpu+0x7f/0x19a
  handle_irq_event_percpu+0x32/0x72
  handle_irq_event+0x38/0x56
  handle_edge_irq+0x102/0x121
  handle_irq+0x147/0x15c
  do_IRQ+0x66/0xf2
  common_interrupt+0xf/0xf
 RIP: 0010:__do_softirq+0x8c/0x2df
The same for usb devices that use ehci-pci:
 BUG: spinlock bad magic on CPU#0, swapper/0/1
  lock: 0xffff88844f402008, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0
 CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.19.43 #4
 Call Trace:
  <IRQ>
  dump_stack+0x61/0x7e
  spin_bug+0x9d/0xa3
  do_raw_spin_lock+0x22/0x8e
  _raw_spin_lock_irqsave+0x32/0x3a
  queue_iova+0x77/0x145
  intel_unmap+0x107/0x113
  intel_unmap_page+0xe/0x10
  usb_hcd_unmap_urb_setup_for_dma+0x53/0x9d
  usb_hcd_unmap_urb_for_dma+0x17/0x100
  unmap_urb_for_dma+0x22/0x24
  __usb_hcd_giveback_urb+0x51/0xc3
  usb_giveback_urb_bh+0x97/0xde
  tasklet_action_common.isra.4+0x5f/0xa1
  tasklet_action+0x2d/0x30
  __do_softirq+0x138/0x2df
  irq_exit+0x7d/0x8b
  smp_apic_timer_interrupt+0x10f/0x151
  apic_timer_interrupt+0xf/0x20
  </IRQ>
 RIP: 0010:_raw_spin_unlock_irqrestore+0x17/0x39
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Lu Baolu <baolu.lu@linux.intel.com>
Cc: iommu@lists.linux-foundation.org
Cc: <stable@vger.kernel.org> # 4.14+
Fixes: 13cf017446 ("iommu/vt-d: Make use of iova deferred flushing")
Signed-off-by: Dmitry Safonov <dima@arista.com>
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
			
			
This commit is contained in:
		
							parent
							
								
									557529494d
								
							
						
					
					
						commit
						effa467870
					
				
					 3 changed files with 22 additions and 5 deletions
				
			
		| 
						 | 
					@ -3561,7 +3561,8 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	freelist = domain_unmap(domain, start_pfn, last_pfn);
 | 
						freelist = domain_unmap(domain, start_pfn, last_pfn);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (intel_iommu_strict || (pdev && pdev->untrusted)) {
 | 
						if (intel_iommu_strict || (pdev && pdev->untrusted) ||
 | 
				
			||||||
 | 
								!has_iova_flush_queue(&domain->iovad)) {
 | 
				
			||||||
		iommu_flush_iotlb_psi(iommu, domain, start_pfn,
 | 
							iommu_flush_iotlb_psi(iommu, domain, start_pfn,
 | 
				
			||||||
				      nrpages, !freelist, 0);
 | 
									      nrpages, !freelist, 0);
 | 
				
			||||||
		/* free iova */
 | 
							/* free iova */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -54,9 +54,14 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(init_iova_domain);
 | 
					EXPORT_SYMBOL_GPL(init_iova_domain);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool has_iova_flush_queue(struct iova_domain *iovad)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return !!iovad->fq;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void free_iova_flush_queue(struct iova_domain *iovad)
 | 
					static void free_iova_flush_queue(struct iova_domain *iovad)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (!iovad->fq)
 | 
						if (!has_iova_flush_queue(iovad))
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (timer_pending(&iovad->fq_timer))
 | 
						if (timer_pending(&iovad->fq_timer))
 | 
				
			||||||
| 
						 | 
					@ -74,13 +79,14 @@ static void free_iova_flush_queue(struct iova_domain *iovad)
 | 
				
			||||||
int init_iova_flush_queue(struct iova_domain *iovad,
 | 
					int init_iova_flush_queue(struct iova_domain *iovad,
 | 
				
			||||||
			  iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
 | 
								  iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						struct iova_fq __percpu *queue;
 | 
				
			||||||
	int cpu;
 | 
						int cpu;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	atomic64_set(&iovad->fq_flush_start_cnt,  0);
 | 
						atomic64_set(&iovad->fq_flush_start_cnt,  0);
 | 
				
			||||||
	atomic64_set(&iovad->fq_flush_finish_cnt, 0);
 | 
						atomic64_set(&iovad->fq_flush_finish_cnt, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	iovad->fq = alloc_percpu(struct iova_fq);
 | 
						queue = alloc_percpu(struct iova_fq);
 | 
				
			||||||
	if (!iovad->fq)
 | 
						if (!queue)
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	iovad->flush_cb   = flush_cb;
 | 
						iovad->flush_cb   = flush_cb;
 | 
				
			||||||
| 
						 | 
					@ -89,13 +95,17 @@ int init_iova_flush_queue(struct iova_domain *iovad,
 | 
				
			||||||
	for_each_possible_cpu(cpu) {
 | 
						for_each_possible_cpu(cpu) {
 | 
				
			||||||
		struct iova_fq *fq;
 | 
							struct iova_fq *fq;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		fq = per_cpu_ptr(iovad->fq, cpu);
 | 
							fq = per_cpu_ptr(queue, cpu);
 | 
				
			||||||
		fq->head = 0;
 | 
							fq->head = 0;
 | 
				
			||||||
		fq->tail = 0;
 | 
							fq->tail = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		spin_lock_init(&fq->lock);
 | 
							spin_lock_init(&fq->lock);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						smp_wmb();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						iovad->fq = queue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
 | 
						timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
 | 
				
			||||||
	atomic_set(&iovad->fq_timer_on, 0);
 | 
						atomic_set(&iovad->fq_timer_on, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -155,6 +155,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
 | 
				
			||||||
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
 | 
					void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
 | 
				
			||||||
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
 | 
					void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
 | 
				
			||||||
	unsigned long start_pfn);
 | 
						unsigned long start_pfn);
 | 
				
			||||||
 | 
					bool has_iova_flush_queue(struct iova_domain *iovad);
 | 
				
			||||||
int init_iova_flush_queue(struct iova_domain *iovad,
 | 
					int init_iova_flush_queue(struct iova_domain *iovad,
 | 
				
			||||||
			  iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
 | 
								  iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
 | 
				
			||||||
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
 | 
					struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
 | 
				
			||||||
| 
						 | 
					@ -235,6 +236,11 @@ static inline void init_iova_domain(struct iova_domain *iovad,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool has_iova_flush_queue(struct iova_domain *iovad)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return false;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline int init_iova_flush_queue(struct iova_domain *iovad,
 | 
					static inline int init_iova_flush_queue(struct iova_domain *iovad,
 | 
				
			||||||
					iova_flush_cb flush_cb,
 | 
										iova_flush_cb flush_cb,
 | 
				
			||||||
					iova_entry_dtor entry_dtor)
 | 
										iova_entry_dtor entry_dtor)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue