forked from mirrors/linux
		
	iommu: Introduce Interface for IOMMU TLB Flushing
With the current IOMMU-API the hardware TLBs have to be flushed in every iommu_ops->unmap() call-back. For unmapping large amounts of address space, like it happens when a KVM domain with assigned devices is destroyed, this causes thousands of unnecessary TLB flushes in the IOMMU hardware because the unmap call-back runs for every unmapped physical page. With the TLB Flush Interface and the new iommu_unmap_fast() function introduced here the need to clean the hardware TLBs is removed from the unmapping code-path. Users of iommu_unmap_fast() have to explicitly call the TLB-Flush functions to sync the page-table changes to the hardware. Three functions for TLB-Flushes are introduced: * iommu_flush_tlb_all() - Flushes all TLB entries associated with that domain. TLBs entries are flushed when this function returns. * iommu_tlb_range_add() - This will add a given range to the flush queue for this domain. * iommu_tlb_sync() - Flushes all queued ranges from the hardware TLBs. Returns when the flush is finished. The semantic of this interface is intentionally similar to the iommu_gather_ops from the io-pgtable code. Cc: Alex Williamson <alex.williamson@redhat.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
		
							parent
							
								
									0688a09990
								
							
						
					
					
						commit
						add02cfdc9
					
				
					 2 changed files with 77 additions and 5 deletions
				
			
		| 
						 | 
					@ -527,6 +527,8 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						iommu_flush_tlb_all(domain);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
out:
 | 
					out:
 | 
				
			||||||
	iommu_put_resv_regions(dev, &mappings);
 | 
						iommu_put_resv_regions(dev, &mappings);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1547,13 +1549,16 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(iommu_map);
 | 
					EXPORT_SYMBOL_GPL(iommu_map);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
 | 
					static size_t __iommu_unmap(struct iommu_domain *domain,
 | 
				
			||||||
 | 
								    unsigned long iova, size_t size,
 | 
				
			||||||
 | 
								    bool sync)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						const struct iommu_ops *ops = domain->ops;
 | 
				
			||||||
	size_t unmapped_page, unmapped = 0;
 | 
						size_t unmapped_page, unmapped = 0;
 | 
				
			||||||
	unsigned int min_pagesz;
 | 
					 | 
				
			||||||
	unsigned long orig_iova = iova;
 | 
						unsigned long orig_iova = iova;
 | 
				
			||||||
 | 
						unsigned int min_pagesz;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (unlikely(domain->ops->unmap == NULL ||
 | 
						if (unlikely(ops->unmap == NULL ||
 | 
				
			||||||
		     domain->pgsize_bitmap == 0UL))
 | 
							     domain->pgsize_bitmap == 0UL))
 | 
				
			||||||
		return -ENODEV;
 | 
							return -ENODEV;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1583,10 +1588,13 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
 | 
				
			||||||
	while (unmapped < size) {
 | 
						while (unmapped < size) {
 | 
				
			||||||
		size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
 | 
							size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		unmapped_page = domain->ops->unmap(domain, iova, pgsize);
 | 
							unmapped_page = ops->unmap(domain, iova, pgsize);
 | 
				
			||||||
		if (!unmapped_page)
 | 
							if (!unmapped_page)
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (sync && ops->iotlb_range_add)
 | 
				
			||||||
 | 
								ops->iotlb_range_add(domain, iova, pgsize);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
 | 
							pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
 | 
				
			||||||
			 iova, unmapped_page);
 | 
								 iova, unmapped_page);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1594,11 +1602,27 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
 | 
				
			||||||
		unmapped += unmapped_page;
 | 
							unmapped += unmapped_page;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (sync && ops->iotlb_sync)
 | 
				
			||||||
 | 
							ops->iotlb_sync(domain);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	trace_unmap(orig_iova, size, unmapped);
 | 
						trace_unmap(orig_iova, size, unmapped);
 | 
				
			||||||
	return unmapped;
 | 
						return unmapped;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					size_t iommu_unmap(struct iommu_domain *domain,
 | 
				
			||||||
 | 
							   unsigned long iova, size_t size)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return __iommu_unmap(domain, iova, size, true);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(iommu_unmap);
 | 
					EXPORT_SYMBOL_GPL(iommu_unmap);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					size_t iommu_unmap_fast(struct iommu_domain *domain,
 | 
				
			||||||
 | 
								unsigned long iova, size_t size)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return __iommu_unmap(domain, iova, size, false);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					EXPORT_SYMBOL_GPL(iommu_unmap_fast);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
 | 
					size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
 | 
				
			||||||
			 struct scatterlist *sg, unsigned int nents, int prot)
 | 
								 struct scatterlist *sg, unsigned int nents, int prot)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -167,6 +167,10 @@ struct iommu_resv_region {
 | 
				
			||||||
 * @map: map a physically contiguous memory region to an iommu domain
 | 
					 * @map: map a physically contiguous memory region to an iommu domain
 | 
				
			||||||
 * @unmap: unmap a physically contiguous memory region from an iommu domain
 | 
					 * @unmap: unmap a physically contiguous memory region from an iommu domain
 | 
				
			||||||
 * @map_sg: map a scatter-gather list of physically contiguous memory chunks
 | 
					 * @map_sg: map a scatter-gather list of physically contiguous memory chunks
 | 
				
			||||||
 | 
					 * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain
 | 
				
			||||||
 | 
					 * @tlb_range_add: Add a given iova range to the flush queue for this domain
 | 
				
			||||||
 | 
					 * @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
 | 
				
			||||||
 | 
					 *            queue
 | 
				
			||||||
 * to an iommu domain
 | 
					 * to an iommu domain
 | 
				
			||||||
 * @iova_to_phys: translate iova to physical address
 | 
					 * @iova_to_phys: translate iova to physical address
 | 
				
			||||||
 * @add_device: add device to iommu grouping
 | 
					 * @add_device: add device to iommu grouping
 | 
				
			||||||
| 
						 | 
					@ -199,6 +203,10 @@ struct iommu_ops {
 | 
				
			||||||
		     size_t size);
 | 
							     size_t size);
 | 
				
			||||||
	size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
 | 
						size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
 | 
				
			||||||
			 struct scatterlist *sg, unsigned int nents, int prot);
 | 
								 struct scatterlist *sg, unsigned int nents, int prot);
 | 
				
			||||||
 | 
						void (*flush_iotlb_all)(struct iommu_domain *domain);
 | 
				
			||||||
 | 
						void (*iotlb_range_add)(struct iommu_domain *domain,
 | 
				
			||||||
 | 
									unsigned long iova, size_t size);
 | 
				
			||||||
 | 
						void (*iotlb_sync)(struct iommu_domain *domain);
 | 
				
			||||||
	phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
 | 
						phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
 | 
				
			||||||
	int (*add_device)(struct device *dev);
 | 
						int (*add_device)(struct device *dev);
 | 
				
			||||||
	void (*remove_device)(struct device *dev);
 | 
						void (*remove_device)(struct device *dev);
 | 
				
			||||||
| 
						 | 
					@ -286,7 +294,9 @@ extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
 | 
				
			||||||
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
 | 
					extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
 | 
				
			||||||
		     phys_addr_t paddr, size_t size, int prot);
 | 
							     phys_addr_t paddr, size_t size, int prot);
 | 
				
			||||||
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
 | 
					extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
 | 
				
			||||||
		       size_t size);
 | 
								  size_t size);
 | 
				
			||||||
 | 
					extern size_t iommu_unmap_fast(struct iommu_domain *domain,
 | 
				
			||||||
 | 
								       unsigned long iova, size_t size);
 | 
				
			||||||
extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
 | 
					extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
 | 
				
			||||||
				struct scatterlist *sg,unsigned int nents,
 | 
									struct scatterlist *sg,unsigned int nents,
 | 
				
			||||||
				int prot);
 | 
									int prot);
 | 
				
			||||||
| 
						 | 
					@ -343,6 +353,25 @@ extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
 | 
				
			||||||
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
 | 
					extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
 | 
				
			||||||
			      unsigned long iova, int flags);
 | 
								      unsigned long iova, int flags);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						if (domain->ops->flush_iotlb_all)
 | 
				
			||||||
 | 
							domain->ops->flush_iotlb_all(domain);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void iommu_tlb_range_add(struct iommu_domain *domain,
 | 
				
			||||||
 | 
									       unsigned long iova, size_t size)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						if (domain->ops->iotlb_range_add)
 | 
				
			||||||
 | 
							domain->ops->iotlb_range_add(domain, iova, size);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void iommu_tlb_sync(struct iommu_domain *domain)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						if (domain->ops->iotlb_sync)
 | 
				
			||||||
 | 
							domain->ops->iotlb_sync(domain);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline size_t iommu_map_sg(struct iommu_domain *domain,
 | 
					static inline size_t iommu_map_sg(struct iommu_domain *domain,
 | 
				
			||||||
				  unsigned long iova, struct scatterlist *sg,
 | 
									  unsigned long iova, struct scatterlist *sg,
 | 
				
			||||||
				  unsigned int nents, int prot)
 | 
									  unsigned int nents, int prot)
 | 
				
			||||||
| 
						 | 
					@ -436,6 +465,12 @@ static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
 | 
				
			||||||
	return -ENODEV;
 | 
						return -ENODEV;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline int iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova,
 | 
				
			||||||
 | 
									   int gfp_order)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return -ENODEV;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline size_t iommu_map_sg(struct iommu_domain *domain,
 | 
					static inline size_t iommu_map_sg(struct iommu_domain *domain,
 | 
				
			||||||
				  unsigned long iova, struct scatterlist *sg,
 | 
									  unsigned long iova, struct scatterlist *sg,
 | 
				
			||||||
				  unsigned int nents, int prot)
 | 
									  unsigned int nents, int prot)
 | 
				
			||||||
| 
						 | 
					@ -443,6 +478,19 @@ static inline size_t iommu_map_sg(struct iommu_domain *domain,
 | 
				
			||||||
	return -ENODEV;
 | 
						return -ENODEV;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void iommu_tlb_range_add(struct iommu_domain *domain,
 | 
				
			||||||
 | 
									       unsigned long iova, size_t size)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void iommu_tlb_sync(struct iommu_domain *domain)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline int iommu_domain_window_enable(struct iommu_domain *domain,
 | 
					static inline int iommu_domain_window_enable(struct iommu_domain *domain,
 | 
				
			||||||
					     u32 wnd_nr, phys_addr_t paddr,
 | 
										     u32 wnd_nr, phys_addr_t paddr,
 | 
				
			||||||
					     u64 size, int prot)
 | 
										     u64 size, int prot)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue