mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	dma-mapping updates for Linux 5.19
- don't over-decrypt memory (Robin Murphy)
  - takes min align mask into account for the swiotlb max mapping size
    (Tianyu Lan)
  - use GFP_ATOMIC in dma-debug (Mikulas Patocka)
  - fix DMA_ATTR_NO_KERNEL_MAPPING on xen/arm (me)
  - don't fail on highmem CMA pages in dma_direct_alloc_pages (me)
  - cleanup swiotlb initialization and share more code with swiotlb-xen
    (me, Stefano Stabellini)
 -----BEGIN PGP SIGNATURE-----
 
 iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmKObTQLHGhjaEBsc3Qu
 ZGUACgkQD55TZVIEUYObmA//dIcDB/q4iFGD+WJh4MhM+asx0ZsdF2OJz42WEhgT
 Z9duOrgcneEQundCamqJP9rNTs980LHDA8uWQC5rZEc9vxuRVOdS7bSgYRUwWh6B
 r0ZjOsvQCn+ChoZML8uyk4rfmEINq+EvJuec3G5fgecZOhPuJS2i2uzzv5cHwqgP
 ChC0fwyZlkfdECXgvZXbEoCJLfTgGNlziN6Ai8dirSoqgEQUoCsY89/M7OiEBvV2
 R4XUWD7OvQERfB4t6xLuUHyzf9PAuWB+OiblRVNeAmK3lMjxVrc3k4kIowgklnzD
 8hfmphAa9Zou3zdfi6Gd4fiQRHRVOwKVp1rtqUmJ+lPSiwyMzu64z9ld2+2qac0h
 V4sSr/yJkhxnBT4/0MkTChvhnRobisackpUzNRpiM4ck7cNVb7eAvkISsbH+pWI9
 aEexPhbyskjlV+GOyM4QL4ygG0dpXY0HSyoh6uaSVsaXMycnWIsJCPidXxV1HGV0
 q2/RLHuHwYxia8cYCF01/DQvwOKSjwbU0zModxtRezGD5GYh2C0a+SrA1aX+qiTu
 yGJCs2UHtSQstAt78tTVp499YeDeL/oGSQkPAu8zyRkSczzF+CncGTuXyoJbAWyK
 otcgERWljgZ4scxjfu1uacfoVhKQ7nOu7hiJokL0U80FESAennLC3ZlocvB9h/ff
 HNA=
 =n2rk
 -----END PGP SIGNATURE-----
Merge tag 'dma-mapping-5.19-2022-05-25' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig:
 - don't over-decrypt memory (Robin Murphy)
 - takes min align mask into account for the swiotlb max mapping size
   (Tianyu Lan)
 - use GFP_ATOMIC in dma-debug (Mikulas Patocka)
 - fix DMA_ATTR_NO_KERNEL_MAPPING on xen/arm (me)
 - don't fail on highmem CMA pages in dma_direct_alloc_pages (me)
 - cleanup swiotlb initialization and share more code with swiotlb-xen
   (me, Stefano Stabellini)
* tag 'dma-mapping-5.19-2022-05-25' of git://git.infradead.org/users/hch/dma-mapping: (23 commits)
  dma-direct: don't over-decrypt memory
  swiotlb: max mapping size takes min align mask into account
  swiotlb: use the right nslabs-derived sizes in swiotlb_init_late
  swiotlb: use the right nslabs value in swiotlb_init_remap
  swiotlb: don't panic when the swiotlb buffer can't be allocated
  dma-debug: change allocation mode from GFP_NOWAIT to GFP_ATIOMIC
  dma-direct: don't fail on highmem CMA pages in dma_direct_alloc_pages
  swiotlb-xen: fix DMA_ATTR_NO_KERNEL_MAPPING on arm
  x86: remove cruft from <asm/dma-mapping.h>
  swiotlb: remove swiotlb_init_with_tbl and swiotlb_init_late_with_tbl
  swiotlb: merge swiotlb-xen initialization into swiotlb
  swiotlb: provide swiotlb_init variants that remap the buffer
  swiotlb: pass a gfp_mask argument to swiotlb_init_late
  swiotlb: add a SWIOTLB_ANY flag to lift the low memory restriction
  swiotlb: make the swiotlb_init interface more useful
  x86: centralize setting SWIOTLB_FORCE when guest memory encryption is enabled
  x86: remove the IOMMU table infrastructure
  MIPS/octeon: use swiotlb_init instead of open coding it
  arm/xen: don't check for xen_initial_domain() in xen_create_contiguous_region
  swiotlb: rename swiotlb_late_init_with_default_size
  ...
			
			
This commit is contained in:
		
						commit
						3f306ea2e1
					
				
					 56 changed files with 318 additions and 982 deletions
				
			
		| 
						 | 
					@ -1,2 +0,0 @@
 | 
				
			||||||
/* SPDX-License-Identifier: GPL-2.0 */
 | 
					 | 
				
			||||||
#include <xen/arm/page-coherent.h>
 | 
					 | 
				
			||||||
| 
						 | 
					@ -271,11 +271,7 @@ static void __init free_highpages(void)
 | 
				
			||||||
void __init mem_init(void)
 | 
					void __init mem_init(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
#ifdef CONFIG_ARM_LPAE
 | 
					#ifdef CONFIG_ARM_LPAE
 | 
				
			||||||
	if (swiotlb_force == SWIOTLB_FORCE ||
 | 
						swiotlb_init(max_pfn > arm_dma_pfn_limit, SWIOTLB_VERBOSE);
 | 
				
			||||||
	    max_pfn > arm_dma_pfn_limit)
 | 
					 | 
				
			||||||
		swiotlb_init(1);
 | 
					 | 
				
			||||||
	else
 | 
					 | 
				
			||||||
		swiotlb_force = SWIOTLB_NO_FORCE;
 | 
					 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
 | 
						set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -23,22 +23,20 @@
 | 
				
			||||||
#include <asm/xen/hypercall.h>
 | 
					#include <asm/xen/hypercall.h>
 | 
				
			||||||
#include <asm/xen/interface.h>
 | 
					#include <asm/xen/interface.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
unsigned long xen_get_swiotlb_free_pages(unsigned int order)
 | 
					static gfp_t xen_swiotlb_gfp(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	phys_addr_t base;
 | 
						phys_addr_t base;
 | 
				
			||||||
	gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM;
 | 
					 | 
				
			||||||
	u64 i;
 | 
						u64 i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for_each_mem_range(i, &base, NULL) {
 | 
						for_each_mem_range(i, &base, NULL) {
 | 
				
			||||||
		if (base < (phys_addr_t)0xffffffff) {
 | 
							if (base < (phys_addr_t)0xffffffff) {
 | 
				
			||||||
			if (IS_ENABLED(CONFIG_ZONE_DMA32))
 | 
								if (IS_ENABLED(CONFIG_ZONE_DMA32))
 | 
				
			||||||
				flags |= __GFP_DMA32;
 | 
									return __GFP_DMA32;
 | 
				
			||||||
			else
 | 
								return __GFP_DMA;
 | 
				
			||||||
				flags |= __GFP_DMA;
 | 
					 | 
				
			||||||
			break;
 | 
					 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return __get_free_pages(flags, order);
 | 
					
 | 
				
			||||||
 | 
						return GFP_KERNEL;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static bool hypercall_cflush = false;
 | 
					static bool hypercall_cflush = false;
 | 
				
			||||||
| 
						 | 
					@ -118,23 +116,6 @@ bool xen_arch_need_swiotlb(struct device *dev,
 | 
				
			||||||
		!dev_is_dma_coherent(dev));
 | 
							!dev_is_dma_coherent(dev));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
 | 
					 | 
				
			||||||
				 unsigned int address_bits,
 | 
					 | 
				
			||||||
				 dma_addr_t *dma_handle)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (!xen_initial_domain())
 | 
					 | 
				
			||||||
		return -EINVAL;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* we assume that dom0 is mapped 1:1 for now */
 | 
					 | 
				
			||||||
	*dma_handle = pstart;
 | 
					 | 
				
			||||||
	return 0;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static int __init xen_mm_init(void)
 | 
					static int __init xen_mm_init(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct gnttab_cache_flush cflush;
 | 
						struct gnttab_cache_flush cflush;
 | 
				
			||||||
| 
						 | 
					@ -143,10 +124,13 @@ static int __init xen_mm_init(void)
 | 
				
			||||||
	if (!xen_swiotlb_detect())
 | 
						if (!xen_swiotlb_detect())
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	rc = xen_swiotlb_init();
 | 
					 | 
				
			||||||
	/* we can work with the default swiotlb */
 | 
						/* we can work with the default swiotlb */
 | 
				
			||||||
	if (rc < 0 && rc != -EEXIST)
 | 
						if (!io_tlb_default_mem.nslabs) {
 | 
				
			||||||
 | 
							rc = swiotlb_init_late(swiotlb_size_or_default(),
 | 
				
			||||||
 | 
									       xen_swiotlb_gfp(), NULL);
 | 
				
			||||||
 | 
							if (rc < 0)
 | 
				
			||||||
			return rc;
 | 
								return rc;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cflush.op = 0;
 | 
						cflush.op = 0;
 | 
				
			||||||
	cflush.a.dev_bus_addr = 0;
 | 
						cflush.a.dev_bus_addr = 0;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,2 +0,0 @@
 | 
				
			||||||
/* SPDX-License-Identifier: GPL-2.0 */
 | 
					 | 
				
			||||||
#include <xen/arm/page-coherent.h>
 | 
					 | 
				
			||||||
| 
						 | 
					@ -451,11 +451,7 @@ void __init bootmem_init(void)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void __init mem_init(void)
 | 
					void __init mem_init(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (swiotlb_force == SWIOTLB_FORCE ||
 | 
						swiotlb_init(max_pfn > PFN_DOWN(arm64_dma_phys_limit), SWIOTLB_VERBOSE);
 | 
				
			||||||
	    max_pfn > PFN_DOWN(arm64_dma_phys_limit))
 | 
					 | 
				
			||||||
		swiotlb_init(1);
 | 
					 | 
				
			||||||
	else if (!xen_swiotlb_detect())
 | 
					 | 
				
			||||||
		swiotlb_force = SWIOTLB_NO_FORCE;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* this will put all unused low memory onto the freelists */
 | 
						/* this will put all unused low memory onto the freelists */
 | 
				
			||||||
	memblock_free_all();
 | 
						memblock_free_all();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,7 +0,0 @@
 | 
				
			||||||
/* SPDX-License-Identifier: GPL-2.0 */
 | 
					 | 
				
			||||||
#ifndef _ASM_IA64_IOMMU_TABLE_H
 | 
					 | 
				
			||||||
#define _ASM_IA64_IOMMU_TABLE_H
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define IOMMU_INIT_POST(_detect)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif /* _ASM_IA64_IOMMU_TABLE_H */
 | 
					 | 
				
			||||||
| 
						 | 
					@ -437,9 +437,7 @@ mem_init (void)
 | 
				
			||||||
		if (iommu_detected)
 | 
							if (iommu_detected)
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
#ifdef CONFIG_SWIOTLB
 | 
							swiotlb_init(true, SWIOTLB_VERBOSE);
 | 
				
			||||||
		swiotlb_init(1);
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
	} while (0);
 | 
						} while (0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_FLATMEM
 | 
					#ifdef CONFIG_FLATMEM
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -186,15 +186,12 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
 | 
				
			||||||
	return daddr;
 | 
						return daddr;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
char *octeon_swiotlb;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void __init plat_swiotlb_setup(void)
 | 
					void __init plat_swiotlb_setup(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	phys_addr_t start, end;
 | 
						phys_addr_t start, end;
 | 
				
			||||||
	phys_addr_t max_addr;
 | 
						phys_addr_t max_addr;
 | 
				
			||||||
	phys_addr_t addr_size;
 | 
						phys_addr_t addr_size;
 | 
				
			||||||
	size_t swiotlbsize;
 | 
						size_t swiotlbsize;
 | 
				
			||||||
	unsigned long swiotlb_nslabs;
 | 
					 | 
				
			||||||
	u64 i;
 | 
						u64 i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	max_addr = 0;
 | 
						max_addr = 0;
 | 
				
			||||||
| 
						 | 
					@ -236,15 +233,7 @@ void __init plat_swiotlb_setup(void)
 | 
				
			||||||
	if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul)
 | 
						if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul)
 | 
				
			||||||
		swiotlbsize = 64 * (1<<20);
 | 
							swiotlbsize = 64 * (1<<20);
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
	swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
 | 
					 | 
				
			||||||
	swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
 | 
					 | 
				
			||||||
	swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	octeon_swiotlb = memblock_alloc_low(swiotlbsize, PAGE_SIZE);
 | 
						swiotlb_adjust_size(swiotlbsize);
 | 
				
			||||||
	if (!octeon_swiotlb)
 | 
						swiotlb_init(true, SWIOTLB_VERBOSE);
 | 
				
			||||||
		panic("%s: Failed to allocate %zu bytes align=%lx\n",
 | 
					 | 
				
			||||||
		      __func__, swiotlbsize, PAGE_SIZE);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
 | 
					 | 
				
			||||||
		panic("Cannot allocate SWIOTLB buffer");
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -24,5 +24,5 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void __init plat_swiotlb_setup(void)
 | 
					void __init plat_swiotlb_setup(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	swiotlb_init(1);
 | 
						swiotlb_init(true, SWIOTLB_VERBOSE);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -664,7 +664,7 @@ static int __init octeon_pci_setup(void)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* BAR1 movable regions contiguous to cover the swiotlb */
 | 
							/* BAR1 movable regions contiguous to cover the swiotlb */
 | 
				
			||||||
		octeon_bar1_pci_phys =
 | 
							octeon_bar1_pci_phys =
 | 
				
			||||||
			virt_to_phys(octeon_swiotlb) & ~((1ull << 22) - 1);
 | 
								io_tlb_default_mem.start & ~((1ull << 22) - 1);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		for (index = 0; index < 32; index++) {
 | 
							for (index = 0; index < 32; index++) {
 | 
				
			||||||
			union cvmx_pci_bar1_indexx bar1_index;
 | 
								union cvmx_pci_bar1_indexx bar1_index;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -10,5 +10,5 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void __init plat_swiotlb_setup(void)
 | 
					void __init plat_swiotlb_setup(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	swiotlb_init(1);
 | 
						swiotlb_init(true, SWIOTLB_VERBOSE);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -15,8 +15,6 @@ static inline bool is_secure_guest(void)
 | 
				
			||||||
	return mfmsr() & MSR_S;
 | 
						return mfmsr() & MSR_S;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void __init svm_swiotlb_init(void);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void dtl_cache_ctor(void *addr);
 | 
					void dtl_cache_ctor(void *addr);
 | 
				
			||||||
#define get_dtl_cache_ctor()	(is_secure_guest() ? dtl_cache_ctor : NULL)
 | 
					#define get_dtl_cache_ctor()	(is_secure_guest() ? dtl_cache_ctor : NULL)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -27,8 +25,6 @@ static inline bool is_secure_guest(void)
 | 
				
			||||||
	return false;
 | 
						return false;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void svm_swiotlb_init(void) {}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define get_dtl_cache_ctor() NULL
 | 
					#define get_dtl_cache_ctor() NULL
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* CONFIG_PPC_SVM */
 | 
					#endif /* CONFIG_PPC_SVM */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -9,6 +9,7 @@
 | 
				
			||||||
#include <linux/swiotlb.h>
 | 
					#include <linux/swiotlb.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern unsigned int ppc_swiotlb_enable;
 | 
					extern unsigned int ppc_swiotlb_enable;
 | 
				
			||||||
 | 
					extern unsigned int ppc_swiotlb_flags;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_SWIOTLB
 | 
					#ifdef CONFIG_SWIOTLB
 | 
				
			||||||
void swiotlb_detect_4g(void);
 | 
					void swiotlb_detect_4g(void);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -10,6 +10,7 @@
 | 
				
			||||||
#include <asm/swiotlb.h>
 | 
					#include <asm/swiotlb.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
unsigned int ppc_swiotlb_enable;
 | 
					unsigned int ppc_swiotlb_enable;
 | 
				
			||||||
 | 
					unsigned int ppc_swiotlb_flags;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void __init swiotlb_detect_4g(void)
 | 
					void __init swiotlb_detect_4g(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -17,6 +17,7 @@
 | 
				
			||||||
#include <linux/suspend.h>
 | 
					#include <linux/suspend.h>
 | 
				
			||||||
#include <linux/dma-direct.h>
 | 
					#include <linux/dma-direct.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <asm/swiotlb.h>
 | 
				
			||||||
#include <asm/machdep.h>
 | 
					#include <asm/machdep.h>
 | 
				
			||||||
#include <asm/rtas.h>
 | 
					#include <asm/rtas.h>
 | 
				
			||||||
#include <asm/kasan.h>
 | 
					#include <asm/kasan.h>
 | 
				
			||||||
| 
						 | 
					@ -248,10 +249,7 @@ void __init mem_init(void)
 | 
				
			||||||
	 * back to to-down.
 | 
						 * back to to-down.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	memblock_set_bottom_up(true);
 | 
						memblock_set_bottom_up(true);
 | 
				
			||||||
	if (is_secure_guest())
 | 
						swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags);
 | 
				
			||||||
		svm_swiotlb_init();
 | 
					 | 
				
			||||||
	else
 | 
					 | 
				
			||||||
		swiotlb_init(0);
 | 
					 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 | 
						high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -849,9 +849,6 @@ static void __init pSeries_setup_arch(void)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
 | 
						ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (swiotlb_force == SWIOTLB_FORCE)
 | 
					 | 
				
			||||||
		ppc_swiotlb_enable = 1;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void pseries_panic(char *str)
 | 
					static void pseries_panic(char *str)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -28,7 +28,7 @@ static int __init init_svm(void)
 | 
				
			||||||
	 * need to use the SWIOTLB buffer for DMA even if dma_capable() says
 | 
						 * need to use the SWIOTLB buffer for DMA even if dma_capable() says
 | 
				
			||||||
	 * otherwise.
 | 
						 * otherwise.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	swiotlb_force = SWIOTLB_FORCE;
 | 
						ppc_swiotlb_flags |= SWIOTLB_ANY | SWIOTLB_FORCE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Share the SWIOTLB buffer with the host. */
 | 
						/* Share the SWIOTLB buffer with the host. */
 | 
				
			||||||
	swiotlb_update_mem_attributes();
 | 
						swiotlb_update_mem_attributes();
 | 
				
			||||||
| 
						 | 
					@ -37,30 +37,6 @@ static int __init init_svm(void)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
machine_early_initcall(pseries, init_svm);
 | 
					machine_early_initcall(pseries, init_svm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Initialize SWIOTLB. Essentially the same as swiotlb_init(), except that it
 | 
					 | 
				
			||||||
 * can allocate the buffer anywhere in memory. Since the hypervisor doesn't have
 | 
					 | 
				
			||||||
 * any addressing limitation, we don't need to allocate it in low addresses.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
void __init svm_swiotlb_init(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	unsigned char *vstart;
 | 
					 | 
				
			||||||
	unsigned long bytes, io_tlb_nslabs;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	io_tlb_nslabs = (swiotlb_size_or_default() >> IO_TLB_SHIFT);
 | 
					 | 
				
			||||||
	io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	vstart = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
 | 
					 | 
				
			||||||
	if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, false))
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	memblock_free(vstart, PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
 | 
					 | 
				
			||||||
	panic("SVM: Cannot allocate SWIOTLB buffer");
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
int set_memory_encrypted(unsigned long addr, int numpages)
 | 
					int set_memory_encrypted(unsigned long addr, int numpages)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
 | 
						if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -120,13 +120,7 @@ void __init mem_init(void)
 | 
				
			||||||
	BUG_ON(!mem_map);
 | 
						BUG_ON(!mem_map);
 | 
				
			||||||
#endif /* CONFIG_FLATMEM */
 | 
					#endif /* CONFIG_FLATMEM */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_SWIOTLB
 | 
						swiotlb_init(max_pfn > PFN_DOWN(dma32_phys_limit), SWIOTLB_VERBOSE);
 | 
				
			||||||
	if (swiotlb_force == SWIOTLB_FORCE ||
 | 
					 | 
				
			||||||
	    max_pfn > PFN_DOWN(dma32_phys_limit))
 | 
					 | 
				
			||||||
		swiotlb_init(1);
 | 
					 | 
				
			||||||
	else
 | 
					 | 
				
			||||||
		swiotlb_force = SWIOTLB_NO_FORCE;
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
	memblock_free_all();
 | 
						memblock_free_all();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	print_vm_layout();
 | 
						print_vm_layout();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -185,8 +185,7 @@ static void pv_init(void)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* make sure bounce buffers are shared */
 | 
						/* make sure bounce buffers are shared */
 | 
				
			||||||
	swiotlb_force = SWIOTLB_FORCE;
 | 
						swiotlb_init(true, SWIOTLB_FORCE | SWIOTLB_VERBOSE);
 | 
				
			||||||
	swiotlb_init(1);
 | 
					 | 
				
			||||||
	swiotlb_update_mem_attributes();
 | 
						swiotlb_update_mem_attributes();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2,18 +2,6 @@
 | 
				
			||||||
#ifndef _ASM_X86_DMA_MAPPING_H
 | 
					#ifndef _ASM_X86_DMA_MAPPING_H
 | 
				
			||||||
#define _ASM_X86_DMA_MAPPING_H
 | 
					#define _ASM_X86_DMA_MAPPING_H
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * IOMMU interface. See Documentation/core-api/dma-api-howto.rst and
 | 
					 | 
				
			||||||
 * Documentation/core-api/dma-api.rst for documentation.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#include <linux/scatterlist.h>
 | 
					 | 
				
			||||||
#include <asm/io.h>
 | 
					 | 
				
			||||||
#include <asm/swiotlb.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
extern int iommu_merge;
 | 
					 | 
				
			||||||
extern int panic_on_overflow;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
extern const struct dma_map_ops *dma_ops;
 | 
					extern const struct dma_map_ops *dma_ops;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 | 
					static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -38,7 +38,7 @@ extern int gart_iommu_aperture_disabled;
 | 
				
			||||||
extern void early_gart_iommu_check(void);
 | 
					extern void early_gart_iommu_check(void);
 | 
				
			||||||
extern int gart_iommu_init(void);
 | 
					extern int gart_iommu_init(void);
 | 
				
			||||||
extern void __init gart_parse_options(char *);
 | 
					extern void __init gart_parse_options(char *);
 | 
				
			||||||
extern int gart_iommu_hole_init(void);
 | 
					void gart_iommu_hole_init(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
#define gart_iommu_aperture            0
 | 
					#define gart_iommu_aperture            0
 | 
				
			||||||
| 
						 | 
					@ -51,9 +51,8 @@ static inline void early_gart_iommu_check(void)
 | 
				
			||||||
static inline void gart_parse_options(char *options)
 | 
					static inline void gart_parse_options(char *options)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
static inline int gart_iommu_hole_init(void)
 | 
					static inline void gart_iommu_hole_init(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return -ENODEV;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -8,6 +8,14 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern int force_iommu, no_iommu;
 | 
					extern int force_iommu, no_iommu;
 | 
				
			||||||
extern int iommu_detected;
 | 
					extern int iommu_detected;
 | 
				
			||||||
 | 
					extern int iommu_merge;
 | 
				
			||||||
 | 
					extern int panic_on_overflow;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_SWIOTLB
 | 
				
			||||||
 | 
					extern bool x86_swiotlb_enable;
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
					#define x86_swiotlb_enable false
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* 10 seconds */
 | 
					/* 10 seconds */
 | 
				
			||||||
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
 | 
					#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,102 +0,0 @@
 | 
				
			||||||
/* SPDX-License-Identifier: GPL-2.0 */
 | 
					 | 
				
			||||||
#ifndef _ASM_X86_IOMMU_TABLE_H
 | 
					 | 
				
			||||||
#define _ASM_X86_IOMMU_TABLE_H
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#include <asm/swiotlb.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * History lesson:
 | 
					 | 
				
			||||||
 * The execution chain of IOMMUs in 2.6.36 looks as so:
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 *            [xen-swiotlb]
 | 
					 | 
				
			||||||
 *                 |
 | 
					 | 
				
			||||||
 *         +----[swiotlb *]--+
 | 
					 | 
				
			||||||
 *        /         |         \
 | 
					 | 
				
			||||||
 *       /          |          \
 | 
					 | 
				
			||||||
 *    [GART]     [Calgary]  [Intel VT-d]
 | 
					 | 
				
			||||||
 *     /
 | 
					 | 
				
			||||||
 *    /
 | 
					 | 
				
			||||||
 * [AMD-Vi]
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * *: if SWIOTLB detected 'iommu=soft'/'swiotlb=force' it would skip
 | 
					 | 
				
			||||||
 * over the rest of IOMMUs and unconditionally initialize the SWIOTLB.
 | 
					 | 
				
			||||||
 * Also it would surreptitiously initialize set the swiotlb=1 if there were
 | 
					 | 
				
			||||||
 * more than 4GB and if the user did not pass in 'iommu=off'. The swiotlb
 | 
					 | 
				
			||||||
 * flag would be turned off by all IOMMUs except the Calgary one.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * The IOMMU_INIT* macros allow a similar tree (or more complex if desired)
 | 
					 | 
				
			||||||
 * to be built by defining who we depend on.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * And all that needs to be done is to use one of the macros in the IOMMU
 | 
					 | 
				
			||||||
 * and the pci-dma.c will take care of the rest.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
struct iommu_table_entry {
 | 
					 | 
				
			||||||
	initcall_t	detect;
 | 
					 | 
				
			||||||
	initcall_t	depend;
 | 
					 | 
				
			||||||
	void		(*early_init)(void); /* No memory allocate available. */
 | 
					 | 
				
			||||||
	void		(*late_init)(void); /* Yes, can allocate memory. */
 | 
					 | 
				
			||||||
#define IOMMU_FINISH_IF_DETECTED (1<<0)
 | 
					 | 
				
			||||||
#define IOMMU_DETECTED		 (1<<1)
 | 
					 | 
				
			||||||
	int		flags;
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Macro fills out an entry in the .iommu_table that is equivalent
 | 
					 | 
				
			||||||
 * to the fields that 'struct iommu_table_entry' has. The entries
 | 
					 | 
				
			||||||
 * that are put in the .iommu_table section are not put in any order
 | 
					 | 
				
			||||||
 * hence during boot-time we will have to resort them based on
 | 
					 | 
				
			||||||
 * dependency. */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define __IOMMU_INIT(_detect, _depend, _early_init, _late_init, _finish)\
 | 
					 | 
				
			||||||
	static const struct iommu_table_entry				\
 | 
					 | 
				
			||||||
		__iommu_entry_##_detect __used				\
 | 
					 | 
				
			||||||
	__attribute__ ((unused, __section__(".iommu_table"),		\
 | 
					 | 
				
			||||||
			aligned((sizeof(void *)))))	\
 | 
					 | 
				
			||||||
	= {_detect, _depend, _early_init, _late_init,			\
 | 
					 | 
				
			||||||
	   _finish ? IOMMU_FINISH_IF_DETECTED : 0}
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * The simplest IOMMU definition. Provide the detection routine
 | 
					 | 
				
			||||||
 * and it will be run after the SWIOTLB and the other IOMMUs
 | 
					 | 
				
			||||||
 * that utilize this macro. If the IOMMU is detected (ie, the
 | 
					 | 
				
			||||||
 * detect routine returns a positive value), the other IOMMUs
 | 
					 | 
				
			||||||
 * are also checked. You can use IOMMU_INIT_POST_FINISH if you prefer
 | 
					 | 
				
			||||||
 * to stop detecting the other IOMMUs after yours has been detected.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
#define IOMMU_INIT_POST(_detect)					\
 | 
					 | 
				
			||||||
	__IOMMU_INIT(_detect, pci_swiotlb_detect_4gb,  NULL, NULL, 0)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define IOMMU_INIT_POST_FINISH(detect)					\
 | 
					 | 
				
			||||||
	__IOMMU_INIT(_detect, pci_swiotlb_detect_4gb,  NULL, NULL, 1)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * A more sophisticated version of IOMMU_INIT. This variant requires:
 | 
					 | 
				
			||||||
 *  a). A detection routine function.
 | 
					 | 
				
			||||||
 *  b). The name of the detection routine we depend on to get called
 | 
					 | 
				
			||||||
 *      before us.
 | 
					 | 
				
			||||||
 *  c). The init routine which gets called if the detection routine
 | 
					 | 
				
			||||||
 *      returns a positive value from the pci_iommu_alloc. This means
 | 
					 | 
				
			||||||
 *      no presence of a memory allocator.
 | 
					 | 
				
			||||||
 *  d). Similar to the 'init', except that this gets called from pci_iommu_init
 | 
					 | 
				
			||||||
 *      where we do have a memory allocator.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * The standard IOMMU_INIT differs from the IOMMU_INIT_FINISH variant
 | 
					 | 
				
			||||||
 * in that the former will continue detecting other IOMMUs in the call
 | 
					 | 
				
			||||||
 * list after the detection routine returns a positive number, while the
 | 
					 | 
				
			||||||
 * latter will stop the execution chain upon first successful detection.
 | 
					 | 
				
			||||||
 * Both variants will still call the 'init' and 'late_init' functions if
 | 
					 | 
				
			||||||
 * they are set.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
#define IOMMU_INIT_FINISH(_detect, _depend, _init, _late_init)		\
 | 
					 | 
				
			||||||
	__IOMMU_INIT(_detect, _depend, _init, _late_init, 1)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define IOMMU_INIT(_detect, _depend, _init, _late_init)			\
 | 
					 | 
				
			||||||
	__IOMMU_INIT(_detect, _depend, _init, _late_init, 0)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void sort_iommu_table(struct iommu_table_entry *start,
 | 
					 | 
				
			||||||
		      struct iommu_table_entry *finish);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void check_iommu_entries(struct iommu_table_entry *start,
 | 
					 | 
				
			||||||
			 struct iommu_table_entry *finish);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif /* _ASM_X86_IOMMU_TABLE_H */
 | 
					 | 
				
			||||||
| 
						 | 
					@ -1,30 +0,0 @@
 | 
				
			||||||
/* SPDX-License-Identifier: GPL-2.0 */
 | 
					 | 
				
			||||||
#ifndef _ASM_X86_SWIOTLB_H
 | 
					 | 
				
			||||||
#define _ASM_X86_SWIOTLB_H
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#include <linux/swiotlb.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifdef CONFIG_SWIOTLB
 | 
					 | 
				
			||||||
extern int swiotlb;
 | 
					 | 
				
			||||||
extern int __init pci_swiotlb_detect_override(void);
 | 
					 | 
				
			||||||
extern int __init pci_swiotlb_detect_4gb(void);
 | 
					 | 
				
			||||||
extern void __init pci_swiotlb_init(void);
 | 
					 | 
				
			||||||
extern void __init pci_swiotlb_late_init(void);
 | 
					 | 
				
			||||||
#else
 | 
					 | 
				
			||||||
#define swiotlb 0
 | 
					 | 
				
			||||||
static inline int pci_swiotlb_detect_override(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return 0;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
static inline int pci_swiotlb_detect_4gb(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return 0;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
static inline void pci_swiotlb_init(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
static inline void pci_swiotlb_late_init(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
#endif /* _ASM_X86_SWIOTLB_H */
 | 
					 | 
				
			||||||
| 
						 | 
					@ -1,24 +0,0 @@
 | 
				
			||||||
/* SPDX-License-Identifier: GPL-2.0 */
 | 
					 | 
				
			||||||
#ifndef _ASM_X86_XEN_PAGE_COHERENT_H
 | 
					 | 
				
			||||||
#define _ASM_X86_XEN_PAGE_COHERENT_H
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#include <asm/page.h>
 | 
					 | 
				
			||||||
#include <linux/dma-mapping.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
 | 
					 | 
				
			||||||
		dma_addr_t *dma_handle, gfp_t flags,
 | 
					 | 
				
			||||||
		unsigned long attrs)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	void *vstart = (void*)__get_free_pages(flags, get_order(size));
 | 
					 | 
				
			||||||
	*dma_handle = virt_to_phys(vstart);
 | 
					 | 
				
			||||||
	return vstart;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
 | 
					 | 
				
			||||||
		void *cpu_addr, dma_addr_t dma_handle,
 | 
					 | 
				
			||||||
		unsigned long attrs)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	free_pages((unsigned long) cpu_addr, get_order(size));
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif /* _ASM_X86_XEN_PAGE_COHERENT_H */
 | 
					 | 
				
			||||||
| 
						 | 
					@ -357,9 +357,4 @@ static inline bool xen_arch_need_swiotlb(struct device *dev,
 | 
				
			||||||
	return false;
 | 
						return false;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return __get_free_pages(__GFP_NOWARN, order);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif /* _ASM_X86_XEN_PAGE_H */
 | 
					#endif /* _ASM_X86_XEN_PAGE_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3,11 +3,15 @@
 | 
				
			||||||
#define _ASM_X86_SWIOTLB_XEN_H
 | 
					#define _ASM_X86_SWIOTLB_XEN_H
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_SWIOTLB_XEN
 | 
					#ifdef CONFIG_SWIOTLB_XEN
 | 
				
			||||||
extern int __init pci_xen_swiotlb_detect(void);
 | 
					 | 
				
			||||||
extern int pci_xen_swiotlb_init_late(void);
 | 
					extern int pci_xen_swiotlb_init_late(void);
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
#define pci_xen_swiotlb_detect NULL
 | 
					 | 
				
			||||||
static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; }
 | 
					static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; }
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					int xen_swiotlb_fixup(void *buf, unsigned long nslabs);
 | 
				
			||||||
 | 
					int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
 | 
				
			||||||
 | 
									unsigned int address_bits,
 | 
				
			||||||
 | 
									dma_addr_t *dma_handle);
 | 
				
			||||||
 | 
					void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* _ASM_X86_SWIOTLB_XEN_H */
 | 
					#endif /* _ASM_X86_SWIOTLB_XEN_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -66,7 +66,6 @@ obj-y			+= bootflag.o e820.o
 | 
				
			||||||
obj-y			+= pci-dma.o quirks.o topology.o kdebugfs.o
 | 
					obj-y			+= pci-dma.o quirks.o topology.o kdebugfs.o
 | 
				
			||||||
obj-y			+= alternative.o i8253.o hw_breakpoint.o
 | 
					obj-y			+= alternative.o i8253.o hw_breakpoint.o
 | 
				
			||||||
obj-y			+= tsc.o tsc_msr.o io_delay.o rtc.o
 | 
					obj-y			+= tsc.o tsc_msr.o io_delay.o rtc.o
 | 
				
			||||||
obj-y			+= pci-iommu_table.o
 | 
					 | 
				
			||||||
obj-y			+= resource.o
 | 
					obj-y			+= resource.o
 | 
				
			||||||
obj-y			+= irqflags.o
 | 
					obj-y			+= irqflags.o
 | 
				
			||||||
obj-y			+= static_call.o
 | 
					obj-y			+= static_call.o
 | 
				
			||||||
| 
						 | 
					@ -132,7 +131,6 @@ obj-$(CONFIG_PCSPKR_PLATFORM)	+= pcspeaker.o
 | 
				
			||||||
 | 
					
 | 
				
			||||||
obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
 | 
					obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
 | 
				
			||||||
 | 
					
 | 
				
			||||||
obj-$(CONFIG_SWIOTLB)			+= pci-swiotlb.o
 | 
					 | 
				
			||||||
obj-$(CONFIG_OF)			+= devicetree.o
 | 
					obj-$(CONFIG_OF)			+= devicetree.o
 | 
				
			||||||
obj-$(CONFIG_UPROBES)			+= uprobes.o
 | 
					obj-$(CONFIG_UPROBES)			+= uprobes.o
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -38,11 +38,9 @@
 | 
				
			||||||
#include <asm/iommu.h>
 | 
					#include <asm/iommu.h>
 | 
				
			||||||
#include <asm/gart.h>
 | 
					#include <asm/gart.h>
 | 
				
			||||||
#include <asm/set_memory.h>
 | 
					#include <asm/set_memory.h>
 | 
				
			||||||
#include <asm/swiotlb.h>
 | 
					 | 
				
			||||||
#include <asm/dma.h>
 | 
					#include <asm/dma.h>
 | 
				
			||||||
#include <asm/amd_nb.h>
 | 
					#include <asm/amd_nb.h>
 | 
				
			||||||
#include <asm/x86_init.h>
 | 
					#include <asm/x86_init.h>
 | 
				
			||||||
#include <asm/iommu_table.h>
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
static unsigned long iommu_bus_base;	/* GART remapping area (physical) */
 | 
					static unsigned long iommu_bus_base;	/* GART remapping area (physical) */
 | 
				
			||||||
static unsigned long iommu_size;	/* size of remapping area bytes */
 | 
					static unsigned long iommu_size;	/* size of remapping area bytes */
 | 
				
			||||||
| 
						 | 
					@ -808,7 +806,7 @@ int __init gart_iommu_init(void)
 | 
				
			||||||
	flush_gart();
 | 
						flush_gart();
 | 
				
			||||||
	dma_ops = &gart_dma_ops;
 | 
						dma_ops = &gart_dma_ops;
 | 
				
			||||||
	x86_platform.iommu_shutdown = gart_iommu_shutdown;
 | 
						x86_platform.iommu_shutdown = gart_iommu_shutdown;
 | 
				
			||||||
	swiotlb = 0;
 | 
						x86_swiotlb_enable = false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -842,4 +840,3 @@ void __init gart_parse_options(char *p)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
IOMMU_INIT_POST(gart_iommu_hole_init);
 | 
					 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -392,7 +392,7 @@ void __init early_gart_iommu_check(void)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int __initdata printed_gart_size_msg;
 | 
					static int __initdata printed_gart_size_msg;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int __init gart_iommu_hole_init(void)
 | 
					void __init gart_iommu_hole_init(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	u32 agp_aper_base = 0, agp_aper_order = 0;
 | 
						u32 agp_aper_base = 0, agp_aper_order = 0;
 | 
				
			||||||
	u32 aper_size, aper_alloc = 0, aper_order = 0, last_aper_order = 0;
 | 
						u32 aper_size, aper_alloc = 0, aper_order = 0, last_aper_order = 0;
 | 
				
			||||||
| 
						 | 
					@ -401,11 +401,11 @@ int __init gart_iommu_hole_init(void)
 | 
				
			||||||
	int i, node;
 | 
						int i, node;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!amd_gart_present())
 | 
						if (!amd_gart_present())
 | 
				
			||||||
		return -ENODEV;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (gart_iommu_aperture_disabled || !fix_aperture ||
 | 
						if (gart_iommu_aperture_disabled || !fix_aperture ||
 | 
				
			||||||
	    !early_pci_allowed())
 | 
						    !early_pci_allowed())
 | 
				
			||||||
		return -ENODEV;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pr_info("Checking aperture...\n");
 | 
						pr_info("Checking aperture...\n");
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -491,10 +491,8 @@ int __init gart_iommu_hole_init(void)
 | 
				
			||||||
			 * and fixed up the northbridge
 | 
								 * and fixed up the northbridge
 | 
				
			||||||
			 */
 | 
								 */
 | 
				
			||||||
			exclude_from_core(last_aper_base, last_aper_order);
 | 
								exclude_from_core(last_aper_base, last_aper_order);
 | 
				
			||||||
 | 
					 | 
				
			||||||
			return 1;
 | 
					 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		return 0;
 | 
							return;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!fallback_aper_force) {
 | 
						if (!fallback_aper_force) {
 | 
				
			||||||
| 
						 | 
					@ -527,7 +525,7 @@ int __init gart_iommu_hole_init(void)
 | 
				
			||||||
			panic("Not enough memory for aperture");
 | 
								panic("Not enough memory for aperture");
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		return 0;
 | 
							return;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
| 
						 | 
					@ -561,6 +559,4 @@ int __init gart_iommu_hole_init(void)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	set_up_gart_resume(aper_order, aper_alloc);
 | 
						set_up_gart_resume(aper_order, aper_alloc);
 | 
				
			||||||
 | 
					 | 
				
			||||||
	return 1;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -337,14 +337,6 @@ static void __init ms_hyperv_init_platform(void)
 | 
				
			||||||
			swiotlb_unencrypted_base = ms_hyperv.shared_gpa_boundary;
 | 
								swiotlb_unencrypted_base = ms_hyperv.shared_gpa_boundary;
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifdef CONFIG_SWIOTLB
 | 
					 | 
				
			||||||
		/*
 | 
					 | 
				
			||||||
		 * Enable swiotlb force mode in Isolation VM to
 | 
					 | 
				
			||||||
		 * use swiotlb bounce buffer for dma transaction.
 | 
					 | 
				
			||||||
		 */
 | 
					 | 
				
			||||||
		swiotlb_force = SWIOTLB_FORCE;
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
		/* Isolation VMs are unenlightened SEV-based VMs, thus this check: */
 | 
							/* Isolation VMs are unenlightened SEV-based VMs, thus this check: */
 | 
				
			||||||
		if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
 | 
							if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
 | 
				
			||||||
			if (hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE)
 | 
								if (hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -7,13 +7,16 @@
 | 
				
			||||||
#include <linux/memblock.h>
 | 
					#include <linux/memblock.h>
 | 
				
			||||||
#include <linux/gfp.h>
 | 
					#include <linux/gfp.h>
 | 
				
			||||||
#include <linux/pci.h>
 | 
					#include <linux/pci.h>
 | 
				
			||||||
 | 
					#include <linux/amd-iommu.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include <asm/proto.h>
 | 
					#include <asm/proto.h>
 | 
				
			||||||
#include <asm/dma.h>
 | 
					#include <asm/dma.h>
 | 
				
			||||||
#include <asm/iommu.h>
 | 
					#include <asm/iommu.h>
 | 
				
			||||||
#include <asm/gart.h>
 | 
					#include <asm/gart.h>
 | 
				
			||||||
#include <asm/x86_init.h>
 | 
					#include <asm/x86_init.h>
 | 
				
			||||||
#include <asm/iommu_table.h>
 | 
					
 | 
				
			||||||
 | 
					#include <xen/xen.h>
 | 
				
			||||||
 | 
					#include <xen/swiotlb-xen.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static bool disable_dac_quirk __read_mostly;
 | 
					static bool disable_dac_quirk __read_mostly;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -34,24 +37,90 @@ int no_iommu __read_mostly;
 | 
				
			||||||
/* Set this to 1 if there is a HW IOMMU in the system */
 | 
					/* Set this to 1 if there is a HW IOMMU in the system */
 | 
				
			||||||
int iommu_detected __read_mostly = 0;
 | 
					int iommu_detected __read_mostly = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
 | 
					#ifdef CONFIG_SWIOTLB
 | 
				
			||||||
 | 
					bool x86_swiotlb_enable;
 | 
				
			||||||
 | 
					static unsigned int x86_swiotlb_flags;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void __init pci_swiotlb_detect(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						/* don't initialize swiotlb if iommu=off (no_iommu=1) */
 | 
				
			||||||
 | 
						if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
 | 
				
			||||||
 | 
							x86_swiotlb_enable = true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Set swiotlb to 1 so that bounce buffers are allocated and used for
 | 
				
			||||||
 | 
						 * devices that can't support DMA to encrypted memory.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
 | 
				
			||||||
 | 
							x86_swiotlb_enable = true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Guest with guest memory encryption currently perform all DMA through
 | 
				
			||||||
 | 
						 * bounce buffers as the hypervisor can't access arbitrary VM memory
 | 
				
			||||||
 | 
						 * that is not explicitly shared with it.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
 | 
				
			||||||
 | 
							x86_swiotlb_enable = true;
 | 
				
			||||||
 | 
							x86_swiotlb_flags |= SWIOTLB_FORCE;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
					static inline void __init pci_swiotlb_detect(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					#define x86_swiotlb_flags 0
 | 
				
			||||||
 | 
					#endif /* CONFIG_SWIOTLB */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_SWIOTLB_XEN
 | 
				
			||||||
 | 
					static void __init pci_xen_swiotlb_init(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						if (!xen_initial_domain() && !x86_swiotlb_enable)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
						x86_swiotlb_enable = true;
 | 
				
			||||||
 | 
						x86_swiotlb_flags |= SWIOTLB_ANY;
 | 
				
			||||||
 | 
						swiotlb_init_remap(true, x86_swiotlb_flags, xen_swiotlb_fixup);
 | 
				
			||||||
 | 
						dma_ops = &xen_swiotlb_dma_ops;
 | 
				
			||||||
 | 
						if (IS_ENABLED(CONFIG_PCI))
 | 
				
			||||||
 | 
							pci_request_acs();
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					int pci_xen_swiotlb_init_late(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						if (dma_ops == &xen_swiotlb_dma_ops)
 | 
				
			||||||
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* we can work with the default swiotlb */
 | 
				
			||||||
 | 
						if (!io_tlb_default_mem.nslabs) {
 | 
				
			||||||
 | 
							int rc = swiotlb_init_late(swiotlb_size_or_default(),
 | 
				
			||||||
 | 
										   GFP_KERNEL, xen_swiotlb_fixup);
 | 
				
			||||||
 | 
							if (rc < 0)
 | 
				
			||||||
 | 
								return rc;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* XXX: this switches the dma ops under live devices! */
 | 
				
			||||||
 | 
						dma_ops = &xen_swiotlb_dma_ops;
 | 
				
			||||||
 | 
						if (IS_ENABLED(CONFIG_PCI))
 | 
				
			||||||
 | 
							pci_request_acs();
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					EXPORT_SYMBOL_GPL(pci_xen_swiotlb_init_late);
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
					static inline void __init pci_xen_swiotlb_init(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					#endif /* CONFIG_SWIOTLB_XEN */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void __init pci_iommu_alloc(void)
 | 
					void __init pci_iommu_alloc(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct iommu_table_entry *p;
 | 
						if (xen_pv_domain()) {
 | 
				
			||||||
 | 
							pci_xen_swiotlb_init();
 | 
				
			||||||
	sort_iommu_table(__iommu_table, __iommu_table_end);
 | 
							return;
 | 
				
			||||||
	check_iommu_entries(__iommu_table, __iommu_table_end);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (p = __iommu_table; p < __iommu_table_end; p++) {
 | 
					 | 
				
			||||||
		if (p && p->detect && p->detect() > 0) {
 | 
					 | 
				
			||||||
			p->flags |= IOMMU_DETECTED;
 | 
					 | 
				
			||||||
			if (p->early_init)
 | 
					 | 
				
			||||||
				p->early_init();
 | 
					 | 
				
			||||||
			if (p->flags & IOMMU_FINISH_IF_DETECTED)
 | 
					 | 
				
			||||||
				break;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						pci_swiotlb_detect();
 | 
				
			||||||
 | 
						gart_iommu_hole_init();
 | 
				
			||||||
 | 
						amd_iommu_detect();
 | 
				
			||||||
 | 
						detect_intel_iommu();
 | 
				
			||||||
 | 
						swiotlb_init(x86_swiotlb_enable, x86_swiotlb_flags);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -102,7 +171,7 @@ static __init int iommu_setup(char *p)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
#ifdef CONFIG_SWIOTLB
 | 
					#ifdef CONFIG_SWIOTLB
 | 
				
			||||||
		if (!strncmp(p, "soft", 4))
 | 
							if (!strncmp(p, "soft", 4))
 | 
				
			||||||
			swiotlb = 1;
 | 
								x86_swiotlb_enable = true;
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
		if (!strncmp(p, "pt", 2))
 | 
							if (!strncmp(p, "pt", 2))
 | 
				
			||||||
			iommu_set_default_passthrough(true);
 | 
								iommu_set_default_passthrough(true);
 | 
				
			||||||
| 
						 | 
					@ -121,14 +190,17 @@ early_param("iommu", iommu_setup);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int __init pci_iommu_init(void)
 | 
					static int __init pci_iommu_init(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct iommu_table_entry *p;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	x86_init.iommu.iommu_init();
 | 
						x86_init.iommu.iommu_init();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (p = __iommu_table; p < __iommu_table_end; p++) {
 | 
					#ifdef CONFIG_SWIOTLB
 | 
				
			||||||
		if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
 | 
						/* An IOMMU turned us off. */
 | 
				
			||||||
			p->late_init();
 | 
						if (x86_swiotlb_enable) {
 | 
				
			||||||
 | 
							pr_info("PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
 | 
				
			||||||
 | 
							swiotlb_print_info();
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							swiotlb_exit();
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,77 +0,0 @@
 | 
				
			||||||
// SPDX-License-Identifier: GPL-2.0
 | 
					 | 
				
			||||||
#include <linux/dma-mapping.h>
 | 
					 | 
				
			||||||
#include <asm/iommu_table.h>
 | 
					 | 
				
			||||||
#include <linux/string.h>
 | 
					 | 
				
			||||||
#include <linux/kallsyms.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static struct iommu_table_entry * __init
 | 
					 | 
				
			||||||
find_dependents_of(struct iommu_table_entry *start,
 | 
					 | 
				
			||||||
		   struct iommu_table_entry *finish,
 | 
					 | 
				
			||||||
		   struct iommu_table_entry *q)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct iommu_table_entry *p;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!q)
 | 
					 | 
				
			||||||
		return NULL;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (p = start; p < finish; p++)
 | 
					 | 
				
			||||||
		if (p->detect == q->depend)
 | 
					 | 
				
			||||||
			return p;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return NULL;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void __init sort_iommu_table(struct iommu_table_entry *start,
 | 
					 | 
				
			||||||
			     struct iommu_table_entry *finish) {
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	struct iommu_table_entry *p, *q, tmp;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (p = start; p < finish; p++) {
 | 
					 | 
				
			||||||
again:
 | 
					 | 
				
			||||||
		q = find_dependents_of(start, finish, p);
 | 
					 | 
				
			||||||
		/* We are bit sneaky here. We use the memory address to figure
 | 
					 | 
				
			||||||
		 * out if the node we depend on is past our point, if so, swap.
 | 
					 | 
				
			||||||
		 */
 | 
					 | 
				
			||||||
		if (q > p) {
 | 
					 | 
				
			||||||
			tmp = *p;
 | 
					 | 
				
			||||||
			memmove(p, q, sizeof(*p));
 | 
					 | 
				
			||||||
			*q = tmp;
 | 
					 | 
				
			||||||
			goto again;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifdef DEBUG
 | 
					 | 
				
			||||||
void __init check_iommu_entries(struct iommu_table_entry *start,
 | 
					 | 
				
			||||||
				struct iommu_table_entry *finish)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct iommu_table_entry *p, *q, *x;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* Simple cyclic dependency checker. */
 | 
					 | 
				
			||||||
	for (p = start; p < finish; p++) {
 | 
					 | 
				
			||||||
		q = find_dependents_of(start, finish, p);
 | 
					 | 
				
			||||||
		x = find_dependents_of(start, finish, q);
 | 
					 | 
				
			||||||
		if (p == x) {
 | 
					 | 
				
			||||||
			printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %pS depends on %pS and vice-versa. BREAKING IT.\n",
 | 
					 | 
				
			||||||
			       p->detect, q->detect);
 | 
					 | 
				
			||||||
			/* Heavy handed way..*/
 | 
					 | 
				
			||||||
			x->depend = NULL;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (p = start; p < finish; p++) {
 | 
					 | 
				
			||||||
		q = find_dependents_of(p, finish, p);
 | 
					 | 
				
			||||||
		if (q && q > p) {
 | 
					 | 
				
			||||||
			printk(KERN_ERR "EXECUTION ORDER INVALID! %pS should be called before %pS!\n",
 | 
					 | 
				
			||||||
			       p->detect, q->detect);
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
#else
 | 
					 | 
				
			||||||
void __init check_iommu_entries(struct iommu_table_entry *start,
 | 
					 | 
				
			||||||
				       struct iommu_table_entry *finish)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
| 
						 | 
					@ -1,77 +0,0 @@
 | 
				
			||||||
// SPDX-License-Identifier: GPL-2.0
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#include <linux/pci.h>
 | 
					 | 
				
			||||||
#include <linux/cache.h>
 | 
					 | 
				
			||||||
#include <linux/init.h>
 | 
					 | 
				
			||||||
#include <linux/swiotlb.h>
 | 
					 | 
				
			||||||
#include <linux/memblock.h>
 | 
					 | 
				
			||||||
#include <linux/dma-direct.h>
 | 
					 | 
				
			||||||
#include <linux/cc_platform.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#include <asm/iommu.h>
 | 
					 | 
				
			||||||
#include <asm/swiotlb.h>
 | 
					 | 
				
			||||||
#include <asm/dma.h>
 | 
					 | 
				
			||||||
#include <asm/xen/swiotlb-xen.h>
 | 
					 | 
				
			||||||
#include <asm/iommu_table.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
int swiotlb __read_mostly;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * pci_swiotlb_detect_override - set swiotlb to 1 if necessary
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * This returns non-zero if we are forced to use swiotlb (by the boot
 | 
					 | 
				
			||||||
 * option).
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
int __init pci_swiotlb_detect_override(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (swiotlb_force == SWIOTLB_FORCE)
 | 
					 | 
				
			||||||
		swiotlb = 1;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return swiotlb;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
IOMMU_INIT_FINISH(pci_swiotlb_detect_override,
 | 
					 | 
				
			||||||
		  pci_xen_swiotlb_detect,
 | 
					 | 
				
			||||||
		  pci_swiotlb_init,
 | 
					 | 
				
			||||||
		  pci_swiotlb_late_init);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * If 4GB or more detected (and iommu=off not set) or if SME is active
 | 
					 | 
				
			||||||
 * then set swiotlb to 1 and return 1.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
int __init pci_swiotlb_detect_4gb(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	/* don't initialize swiotlb if iommu=off (no_iommu=1) */
 | 
					 | 
				
			||||||
	if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
 | 
					 | 
				
			||||||
		swiotlb = 1;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * Set swiotlb to 1 so that bounce buffers are allocated and used for
 | 
					 | 
				
			||||||
	 * devices that can't support DMA to encrypted memory.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
 | 
					 | 
				
			||||||
		swiotlb = 1;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return swiotlb;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
IOMMU_INIT(pci_swiotlb_detect_4gb,
 | 
					 | 
				
			||||||
	   pci_swiotlb_detect_override,
 | 
					 | 
				
			||||||
	   pci_swiotlb_init,
 | 
					 | 
				
			||||||
	   pci_swiotlb_late_init);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void __init pci_swiotlb_init(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (swiotlb)
 | 
					 | 
				
			||||||
		swiotlb_init(0);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void __init pci_swiotlb_late_init(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	/* An IOMMU turned us off. */
 | 
					 | 
				
			||||||
	if (!swiotlb)
 | 
					 | 
				
			||||||
		swiotlb_exit();
 | 
					 | 
				
			||||||
	else {
 | 
					 | 
				
			||||||
		printk(KERN_INFO "PCI-DMA: "
 | 
					 | 
				
			||||||
		       "Using software bounce buffering for IO (SWIOTLB)\n");
 | 
					 | 
				
			||||||
		swiotlb_print_info();
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
| 
						 | 
					@ -24,7 +24,6 @@
 | 
				
			||||||
#include <asm/processor.h>
 | 
					#include <asm/processor.h>
 | 
				
			||||||
#include <asm/bootparam.h>
 | 
					#include <asm/bootparam.h>
 | 
				
			||||||
#include <asm/pgalloc.h>
 | 
					#include <asm/pgalloc.h>
 | 
				
			||||||
#include <asm/swiotlb.h>
 | 
					 | 
				
			||||||
#include <asm/fixmap.h>
 | 
					#include <asm/fixmap.h>
 | 
				
			||||||
#include <asm/proto.h>
 | 
					#include <asm/proto.h>
 | 
				
			||||||
#include <asm/setup.h>
 | 
					#include <asm/setup.h>
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -315,18 +315,6 @@ SECTIONS
 | 
				
			||||||
		*(.altinstr_replacement)
 | 
							*(.altinstr_replacement)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * struct iommu_table_entry entries are injected in this section.
 | 
					 | 
				
			||||||
	 * It is an array of IOMMUs which during run time gets sorted depending
 | 
					 | 
				
			||||||
	 * on its dependency order. After rootfs_initcall is complete
 | 
					 | 
				
			||||||
	 * this section can be safely removed.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	.iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) {
 | 
					 | 
				
			||||||
		__iommu_table = .;
 | 
					 | 
				
			||||||
		*(.iommu_table)
 | 
					 | 
				
			||||||
		__iommu_table_end = .;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	. = ALIGN(8);
 | 
						. = ALIGN(8);
 | 
				
			||||||
	.apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) {
 | 
						.apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) {
 | 
				
			||||||
		__apicdrivers = .;
 | 
							__apicdrivers = .;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -495,9 +495,6 @@ void __init sme_early_init(void)
 | 
				
			||||||
	for (i = 0; i < ARRAY_SIZE(protection_map); i++)
 | 
						for (i = 0; i < ARRAY_SIZE(protection_map); i++)
 | 
				
			||||||
		protection_map[i] = pgprot_encrypted(protection_map[i]);
 | 
							protection_map[i] = pgprot_encrypted(protection_map[i]);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
 | 
					 | 
				
			||||||
		swiotlb_force = SWIOTLB_FORCE;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	x86_platform.guest.enc_status_change_prepare = amd_enc_status_change_prepare;
 | 
						x86_platform.guest.enc_status_change_prepare = amd_enc_status_change_prepare;
 | 
				
			||||||
	x86_platform.guest.enc_status_change_finish  = amd_enc_status_change_finish;
 | 
						x86_platform.guest.enc_status_change_finish  = amd_enc_status_change_finish;
 | 
				
			||||||
	x86_platform.guest.enc_tlb_flush_required    = amd_enc_tlb_flush_required;
 | 
						x86_platform.guest.enc_tlb_flush_required    = amd_enc_tlb_flush_required;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -57,7 +57,7 @@ static void sta2x11_new_instance(struct pci_dev *pdev)
 | 
				
			||||||
		int size = STA2X11_SWIOTLB_SIZE;
 | 
							int size = STA2X11_SWIOTLB_SIZE;
 | 
				
			||||||
		/* First instance: register your own swiotlb area */
 | 
							/* First instance: register your own swiotlb area */
 | 
				
			||||||
		dev_info(&pdev->dev, "Using SWIOTLB (size %i)\n", size);
 | 
							dev_info(&pdev->dev, "Using SWIOTLB (size %i)\n", size);
 | 
				
			||||||
		if (swiotlb_late_init_with_default_size(size))
 | 
							if (swiotlb_init_late(size, GFP_DMA, NULL))
 | 
				
			||||||
			dev_emerg(&pdev->dev, "init swiotlb failed\n");
 | 
								dev_emerg(&pdev->dev, "init swiotlb failed\n");
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	list_add(&instance->list, &sta2x11_instance_list);
 | 
						list_add(&instance->list, &sta2x11_instance_list);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -47,6 +47,4 @@ obj-$(CONFIG_XEN_DEBUG_FS)	+= debugfs.o
 | 
				
			||||||
 | 
					
 | 
				
			||||||
obj-$(CONFIG_XEN_PV_DOM0)	+= vga.o
 | 
					obj-$(CONFIG_XEN_PV_DOM0)	+= vga.o
 | 
				
			||||||
 | 
					
 | 
				
			||||||
obj-$(CONFIG_SWIOTLB_XEN)	+= pci-swiotlb-xen.o
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
obj-$(CONFIG_XEN_EFI)		+= efi.o
 | 
					obj-$(CONFIG_XEN_EFI)		+= efi.o
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -80,6 +80,7 @@
 | 
				
			||||||
#include <xen/interface/version.h>
 | 
					#include <xen/interface/version.h>
 | 
				
			||||||
#include <xen/interface/memory.h>
 | 
					#include <xen/interface/memory.h>
 | 
				
			||||||
#include <xen/hvc-console.h>
 | 
					#include <xen/hvc-console.h>
 | 
				
			||||||
 | 
					#include <xen/swiotlb-xen.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include "multicalls.h"
 | 
					#include "multicalls.h"
 | 
				
			||||||
#include "mmu.h"
 | 
					#include "mmu.h"
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,96 +0,0 @@
 | 
				
			||||||
// SPDX-License-Identifier: GPL-2.0
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/* Glue code to lib/swiotlb-xen.c */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#include <linux/dma-map-ops.h>
 | 
					 | 
				
			||||||
#include <linux/pci.h>
 | 
					 | 
				
			||||||
#include <xen/swiotlb-xen.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#include <asm/xen/hypervisor.h>
 | 
					 | 
				
			||||||
#include <xen/xen.h>
 | 
					 | 
				
			||||||
#include <asm/iommu_table.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#include <asm/xen/swiotlb-xen.h>
 | 
					 | 
				
			||||||
#ifdef CONFIG_X86_64
 | 
					 | 
				
			||||||
#include <asm/iommu.h>
 | 
					 | 
				
			||||||
#include <asm/dma.h>
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
#include <linux/export.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static int xen_swiotlb __read_mostly;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * This returns non-zero if we are forced to use xen_swiotlb (by the boot
 | 
					 | 
				
			||||||
 * option).
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
int __init pci_xen_swiotlb_detect(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!xen_pv_domain())
 | 
					 | 
				
			||||||
		return 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* If running as PV guest, either iommu=soft, or swiotlb=force will
 | 
					 | 
				
			||||||
	 * activate this IOMMU. If running as PV privileged, activate it
 | 
					 | 
				
			||||||
	 * irregardless.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (xen_initial_domain() || swiotlb || swiotlb_force == SWIOTLB_FORCE)
 | 
					 | 
				
			||||||
		xen_swiotlb = 1;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* If we are running under Xen, we MUST disable the native SWIOTLB.
 | 
					 | 
				
			||||||
	 * Don't worry about swiotlb_force flag activating the native, as
 | 
					 | 
				
			||||||
	 * the 'swiotlb' flag is the only one turning it on. */
 | 
					 | 
				
			||||||
	swiotlb = 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifdef CONFIG_X86_64
 | 
					 | 
				
			||||||
	/* pci_swiotlb_detect_4gb turns on native SWIOTLB if no_iommu == 0
 | 
					 | 
				
			||||||
	 * (so no iommu=X command line over-writes).
 | 
					 | 
				
			||||||
	 * Considering that PV guests do not want the *native SWIOTLB* but
 | 
					 | 
				
			||||||
	 * only Xen SWIOTLB it is not useful to us so set no_iommu=1 here.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (max_pfn > MAX_DMA32_PFN)
 | 
					 | 
				
			||||||
		no_iommu = 1;
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
	return xen_swiotlb;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void __init pci_xen_swiotlb_init(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (xen_swiotlb) {
 | 
					 | 
				
			||||||
		xen_swiotlb_init_early();
 | 
					 | 
				
			||||||
		dma_ops = &xen_swiotlb_dma_ops;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifdef CONFIG_PCI
 | 
					 | 
				
			||||||
		/* Make sure ACS will be enabled */
 | 
					 | 
				
			||||||
		pci_request_acs();
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
int pci_xen_swiotlb_init_late(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	int rc;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (xen_swiotlb)
 | 
					 | 
				
			||||||
		return 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	rc = xen_swiotlb_init();
 | 
					 | 
				
			||||||
	if (rc)
 | 
					 | 
				
			||||||
		return rc;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	dma_ops = &xen_swiotlb_dma_ops;
 | 
					 | 
				
			||||||
#ifdef CONFIG_PCI
 | 
					 | 
				
			||||||
	/* Make sure ACS will be enabled */
 | 
					 | 
				
			||||||
	pci_request_acs();
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return 0;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
EXPORT_SYMBOL_GPL(pci_xen_swiotlb_init_late);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
IOMMU_INIT_FINISH(pci_xen_swiotlb_detect,
 | 
					 | 
				
			||||||
		  NULL,
 | 
					 | 
				
			||||||
		  pci_xen_swiotlb_init,
 | 
					 | 
				
			||||||
		  NULL);
 | 
					 | 
				
			||||||
| 
						 | 
					@ -27,7 +27,6 @@
 | 
				
			||||||
#include <asm/apic.h>
 | 
					#include <asm/apic.h>
 | 
				
			||||||
#include <asm/gart.h>
 | 
					#include <asm/gart.h>
 | 
				
			||||||
#include <asm/x86_init.h>
 | 
					#include <asm/x86_init.h>
 | 
				
			||||||
#include <asm/iommu_table.h>
 | 
					 | 
				
			||||||
#include <asm/io_apic.h>
 | 
					#include <asm/io_apic.h>
 | 
				
			||||||
#include <asm/irq_remapping.h>
 | 
					#include <asm/irq_remapping.h>
 | 
				
			||||||
#include <asm/set_memory.h>
 | 
					#include <asm/set_memory.h>
 | 
				
			||||||
| 
						 | 
					@ -3257,11 +3256,6 @@ __setup("ivrs_ioapic",		parse_ivrs_ioapic);
 | 
				
			||||||
__setup("ivrs_hpet",		parse_ivrs_hpet);
 | 
					__setup("ivrs_hpet",		parse_ivrs_hpet);
 | 
				
			||||||
__setup("ivrs_acpihid",		parse_ivrs_acpihid);
 | 
					__setup("ivrs_acpihid",		parse_ivrs_acpihid);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
IOMMU_INIT_FINISH(amd_iommu_detect,
 | 
					 | 
				
			||||||
		  gart_iommu_hole_init,
 | 
					 | 
				
			||||||
		  NULL,
 | 
					 | 
				
			||||||
		  NULL);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
bool amd_iommu_v2_supported(void)
 | 
					bool amd_iommu_v2_supported(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return amd_iommu_v2_present;
 | 
						return amd_iommu_v2_present;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1840,7 +1840,10 @@ void amd_iommu_domain_update(struct protection_domain *domain)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void __init amd_iommu_init_dma_ops(void)
 | 
					static void __init amd_iommu_init_dma_ops(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
 | 
						if (iommu_default_passthrough() || sme_me_mask)
 | 
				
			||||||
 | 
							x86_swiotlb_enable = true;
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							x86_swiotlb_enable = false;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int __init amd_iommu_init_api(void)
 | 
					int __init amd_iommu_init_api(void)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -30,7 +30,6 @@
 | 
				
			||||||
#include <linux/numa.h>
 | 
					#include <linux/numa.h>
 | 
				
			||||||
#include <linux/limits.h>
 | 
					#include <linux/limits.h>
 | 
				
			||||||
#include <asm/irq_remapping.h>
 | 
					#include <asm/irq_remapping.h>
 | 
				
			||||||
#include <asm/iommu_table.h>
 | 
					 | 
				
			||||||
#include <trace/events/intel_iommu.h>
 | 
					#include <trace/events/intel_iommu.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include "../irq_remapping.h"
 | 
					#include "../irq_remapping.h"
 | 
				
			||||||
| 
						 | 
					@ -912,7 +911,7 @@ dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int __init detect_intel_iommu(void)
 | 
					void __init detect_intel_iommu(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
	struct dmar_res_callback validate_drhd_cb = {
 | 
						struct dmar_res_callback validate_drhd_cb = {
 | 
				
			||||||
| 
						 | 
					@ -945,8 +944,6 @@ int __init detect_intel_iommu(void)
 | 
				
			||||||
		dmar_tbl = NULL;
 | 
							dmar_tbl = NULL;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	up_write(&dmar_global_lock);
 | 
						up_write(&dmar_global_lock);
 | 
				
			||||||
 | 
					 | 
				
			||||||
	return ret ? ret : 1;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void unmap_iommu(struct intel_iommu *iommu)
 | 
					static void unmap_iommu(struct intel_iommu *iommu)
 | 
				
			||||||
| 
						 | 
					@ -2164,7 +2161,6 @@ static int __init dmar_free_unused_resources(void)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
late_initcall(dmar_free_unused_resources);
 | 
					late_initcall(dmar_free_unused_resources);
 | 
				
			||||||
IOMMU_INIT_POST(detect_intel_iommu);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * DMAR Hotplug Support
 | 
					 * DMAR Hotplug Support
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -36,7 +36,6 @@
 | 
				
			||||||
#include <xen/hvc-console.h>
 | 
					#include <xen/hvc-console.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include <asm/dma-mapping.h>
 | 
					#include <asm/dma-mapping.h>
 | 
				
			||||||
#include <asm/xen/page-coherent.h>
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include <trace/events/swiotlb.h>
 | 
					#include <trace/events/swiotlb.h>
 | 
				
			||||||
#define MAX_DMA_BITS 32
 | 
					#define MAX_DMA_BITS 32
 | 
				
			||||||
| 
						 | 
					@ -104,7 +103,8 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
 | 
					#ifdef CONFIG_X86
 | 
				
			||||||
 | 
					int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int rc;
 | 
						int rc;
 | 
				
			||||||
	unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
 | 
						unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
 | 
				
			||||||
| 
						 | 
					@ -130,223 +130,59 @@ static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
enum xen_swiotlb_err {
 | 
					 | 
				
			||||||
	XEN_SWIOTLB_UNKNOWN = 0,
 | 
					 | 
				
			||||||
	XEN_SWIOTLB_ENOMEM,
 | 
					 | 
				
			||||||
	XEN_SWIOTLB_EFIXUP
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	switch (err) {
 | 
					 | 
				
			||||||
	case XEN_SWIOTLB_ENOMEM:
 | 
					 | 
				
			||||||
		return "Cannot allocate Xen-SWIOTLB buffer\n";
 | 
					 | 
				
			||||||
	case XEN_SWIOTLB_EFIXUP:
 | 
					 | 
				
			||||||
		return "Failed to get contiguous memory for DMA from Xen!\n"\
 | 
					 | 
				
			||||||
		    "You either: don't have the permissions, do not have"\
 | 
					 | 
				
			||||||
		    " enough free memory under 4GB, or the hypervisor memory"\
 | 
					 | 
				
			||||||
		    " is too fragmented!";
 | 
					 | 
				
			||||||
	default:
 | 
					 | 
				
			||||||
		break;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return "";
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
int xen_swiotlb_init(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
 | 
					 | 
				
			||||||
	unsigned long bytes = swiotlb_size_or_default();
 | 
					 | 
				
			||||||
	unsigned long nslabs = bytes >> IO_TLB_SHIFT;
 | 
					 | 
				
			||||||
	unsigned int order, repeat = 3;
 | 
					 | 
				
			||||||
	int rc = -ENOMEM;
 | 
					 | 
				
			||||||
	char *start;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (io_tlb_default_mem.nslabs) {
 | 
					 | 
				
			||||||
		pr_warn("swiotlb buffer already initialized\n");
 | 
					 | 
				
			||||||
		return -EEXIST;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
retry:
 | 
					 | 
				
			||||||
	m_ret = XEN_SWIOTLB_ENOMEM;
 | 
					 | 
				
			||||||
	order = get_order(bytes);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * Get IO TLB memory from any location.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
 | 
					 | 
				
			||||||
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
 | 
					 | 
				
			||||||
	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
 | 
					 | 
				
			||||||
		start = (void *)xen_get_swiotlb_free_pages(order);
 | 
					 | 
				
			||||||
		if (start)
 | 
					 | 
				
			||||||
			break;
 | 
					 | 
				
			||||||
		order--;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	if (!start)
 | 
					 | 
				
			||||||
		goto exit;
 | 
					 | 
				
			||||||
	if (order != get_order(bytes)) {
 | 
					 | 
				
			||||||
		pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
 | 
					 | 
				
			||||||
			(PAGE_SIZE << order) >> 20);
 | 
					 | 
				
			||||||
		nslabs = SLABS_PER_PAGE << order;
 | 
					 | 
				
			||||||
		bytes = nslabs << IO_TLB_SHIFT;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * And replace that memory with pages under 4GB.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	rc = xen_swiotlb_fixup(start, nslabs);
 | 
					 | 
				
			||||||
	if (rc) {
 | 
					 | 
				
			||||||
		free_pages((unsigned long)start, order);
 | 
					 | 
				
			||||||
		m_ret = XEN_SWIOTLB_EFIXUP;
 | 
					 | 
				
			||||||
		goto error;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	rc = swiotlb_late_init_with_tbl(start, nslabs);
 | 
					 | 
				
			||||||
	if (rc)
 | 
					 | 
				
			||||||
		return rc;
 | 
					 | 
				
			||||||
	swiotlb_set_max_segment(PAGE_SIZE);
 | 
					 | 
				
			||||||
	return 0;
 | 
					 | 
				
			||||||
error:
 | 
					 | 
				
			||||||
	if (nslabs > 1024 && repeat--) {
 | 
					 | 
				
			||||||
		/* Min is 2MB */
 | 
					 | 
				
			||||||
		nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
 | 
					 | 
				
			||||||
		bytes = nslabs << IO_TLB_SHIFT;
 | 
					 | 
				
			||||||
		pr_info("Lowering to %luMB\n", bytes >> 20);
 | 
					 | 
				
			||||||
		goto retry;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
exit:
 | 
					 | 
				
			||||||
	pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
 | 
					 | 
				
			||||||
	return rc;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifdef CONFIG_X86
 | 
					 | 
				
			||||||
void __init xen_swiotlb_init_early(void)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	unsigned long bytes = swiotlb_size_or_default();
 | 
					 | 
				
			||||||
	unsigned long nslabs = bytes >> IO_TLB_SHIFT;
 | 
					 | 
				
			||||||
	unsigned int repeat = 3;
 | 
					 | 
				
			||||||
	char *start;
 | 
					 | 
				
			||||||
	int rc;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
retry:
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * Get IO TLB memory from any location.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	start = memblock_alloc(PAGE_ALIGN(bytes),
 | 
					 | 
				
			||||||
			       IO_TLB_SEGSIZE << IO_TLB_SHIFT);
 | 
					 | 
				
			||||||
	if (!start)
 | 
					 | 
				
			||||||
		panic("%s: Failed to allocate %lu bytes\n",
 | 
					 | 
				
			||||||
		      __func__, PAGE_ALIGN(bytes));
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * And replace that memory with pages under 4GB.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	rc = xen_swiotlb_fixup(start, nslabs);
 | 
					 | 
				
			||||||
	if (rc) {
 | 
					 | 
				
			||||||
		memblock_free(start, PAGE_ALIGN(bytes));
 | 
					 | 
				
			||||||
		if (nslabs > 1024 && repeat--) {
 | 
					 | 
				
			||||||
			/* Min is 2MB */
 | 
					 | 
				
			||||||
			nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
 | 
					 | 
				
			||||||
			bytes = nslabs << IO_TLB_SHIFT;
 | 
					 | 
				
			||||||
			pr_info("Lowering to %luMB\n", bytes >> 20);
 | 
					 | 
				
			||||||
			goto retry;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (swiotlb_init_with_tbl(start, nslabs, true))
 | 
					 | 
				
			||||||
		panic("Cannot allocate SWIOTLB buffer");
 | 
					 | 
				
			||||||
	swiotlb_set_max_segment(PAGE_SIZE);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
#endif /* CONFIG_X86 */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void *
 | 
					static void *
 | 
				
			||||||
xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
 | 
					xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
 | 
				
			||||||
			   dma_addr_t *dma_handle, gfp_t flags,
 | 
							dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
 | 
				
			||||||
			   unsigned long attrs)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	void *ret;
 | 
						u64 dma_mask = dev->coherent_dma_mask;
 | 
				
			||||||
	int order = get_order(size);
 | 
						int order = get_order(size);
 | 
				
			||||||
	u64 dma_mask = DMA_BIT_MASK(32);
 | 
					 | 
				
			||||||
	phys_addr_t phys;
 | 
						phys_addr_t phys;
 | 
				
			||||||
	dma_addr_t dev_addr;
 | 
						void *ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
						/* Align the allocation to the Xen page size */
 | 
				
			||||||
	* Ignore region specifiers - the kernel's ideas of
 | 
					 | 
				
			||||||
	* pseudo-phys memory layout has nothing to do with the
 | 
					 | 
				
			||||||
	* machine physical layout.  We can't allocate highmem
 | 
					 | 
				
			||||||
	* because we can't return a pointer to it.
 | 
					 | 
				
			||||||
	*/
 | 
					 | 
				
			||||||
	flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* Convert the size to actually allocated. */
 | 
					 | 
				
			||||||
	size = 1UL << (order + XEN_PAGE_SHIFT);
 | 
						size = 1UL << (order + XEN_PAGE_SHIFT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* On ARM this function returns an ioremap'ped virtual address for
 | 
						ret = (void *)__get_free_pages(flags, get_order(size));
 | 
				
			||||||
	 * which virt_to_phys doesn't return the corresponding physical
 | 
					 | 
				
			||||||
	 * address. In fact on ARM virt_to_phys only works for kernel direct
 | 
					 | 
				
			||||||
	 * mapped RAM memory. Also see comment below.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!ret)
 | 
						if (!ret)
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
 | 
						phys = virt_to_phys(ret);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (hwdev && hwdev->coherent_dma_mask)
 | 
						*dma_handle = xen_phys_to_dma(dev, phys);
 | 
				
			||||||
		dma_mask = hwdev->coherent_dma_mask;
 | 
						if (*dma_handle + size - 1 > dma_mask ||
 | 
				
			||||||
 | 
						    range_straddles_page_boundary(phys, size)) {
 | 
				
			||||||
	/* At this point dma_handle is the dma address, next we are
 | 
							if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
 | 
				
			||||||
	 * going to set it to the machine address.
 | 
									dma_handle) != 0)
 | 
				
			||||||
	 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
 | 
								goto out_free_pages;
 | 
				
			||||||
	 * to *dma_handle. */
 | 
					 | 
				
			||||||
	phys = dma_to_phys(hwdev, *dma_handle);
 | 
					 | 
				
			||||||
	dev_addr = xen_phys_to_dma(hwdev, phys);
 | 
					 | 
				
			||||||
	if (((dev_addr + size - 1 <= dma_mask)) &&
 | 
					 | 
				
			||||||
	    !range_straddles_page_boundary(phys, size))
 | 
					 | 
				
			||||||
		*dma_handle = dev_addr;
 | 
					 | 
				
			||||||
	else {
 | 
					 | 
				
			||||||
		if (xen_create_contiguous_region(phys, order,
 | 
					 | 
				
			||||||
						 fls64(dma_mask), dma_handle) != 0) {
 | 
					 | 
				
			||||||
			xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
 | 
					 | 
				
			||||||
			return NULL;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		*dma_handle = phys_to_dma(hwdev, *dma_handle);
 | 
					 | 
				
			||||||
		SetPageXenRemapped(virt_to_page(ret));
 | 
							SetPageXenRemapped(virt_to_page(ret));
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	memset(ret, 0, size);
 | 
						memset(ret, 0, size);
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					out_free_pages:
 | 
				
			||||||
 | 
						free_pages((unsigned long)ret, get_order(size));
 | 
				
			||||||
 | 
						return NULL;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void
 | 
					static void
 | 
				
			||||||
xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
 | 
					xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
 | 
				
			||||||
			  dma_addr_t dev_addr, unsigned long attrs)
 | 
							dma_addr_t dma_handle, unsigned long attrs)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						phys_addr_t phys = virt_to_phys(vaddr);
 | 
				
			||||||
	int order = get_order(size);
 | 
						int order = get_order(size);
 | 
				
			||||||
	phys_addr_t phys;
 | 
					 | 
				
			||||||
	u64 dma_mask = DMA_BIT_MASK(32);
 | 
					 | 
				
			||||||
	struct page *page;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (hwdev && hwdev->coherent_dma_mask)
 | 
					 | 
				
			||||||
		dma_mask = hwdev->coherent_dma_mask;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* do not use virt_to_phys because on ARM it doesn't return you the
 | 
					 | 
				
			||||||
	 * physical address */
 | 
					 | 
				
			||||||
	phys = xen_dma_to_phys(hwdev, dev_addr);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Convert the size to actually allocated. */
 | 
						/* Convert the size to actually allocated. */
 | 
				
			||||||
	size = 1UL << (order + XEN_PAGE_SHIFT);
 | 
						size = 1UL << (order + XEN_PAGE_SHIFT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (is_vmalloc_addr(vaddr))
 | 
						if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
 | 
				
			||||||
		page = vmalloc_to_page(vaddr);
 | 
						    WARN_ON_ONCE(range_straddles_page_boundary(phys, size)))
 | 
				
			||||||
	else
 | 
						    	return;
 | 
				
			||||||
		page = virt_to_page(vaddr);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
 | 
						if (TestClearPageXenRemapped(virt_to_page(vaddr)))
 | 
				
			||||||
		     range_straddles_page_boundary(phys, size)) &&
 | 
					 | 
				
			||||||
	    TestClearPageXenRemapped(page))
 | 
					 | 
				
			||||||
		xen_destroy_contiguous_region(phys, order);
 | 
							xen_destroy_contiguous_region(phys, order);
 | 
				
			||||||
 | 
						free_pages((unsigned long)vaddr, get_order(size));
 | 
				
			||||||
	xen_free_coherent_pages(hwdev, size, vaddr, phys_to_dma(hwdev, phys),
 | 
					 | 
				
			||||||
				attrs);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					#endif /* CONFIG_X86 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Map a single buffer of the indicated size for DMA in streaming mode.  The
 | 
					 * Map a single buffer of the indicated size for DMA in streaming mode.  The
 | 
				
			||||||
| 
						 | 
					@ -378,7 +214,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 | 
				
			||||||
	/*
 | 
						/*
 | 
				
			||||||
	 * Oh well, have to allocate and map a bounce buffer.
 | 
						 * Oh well, have to allocate and map a bounce buffer.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
 | 
						trace_swiotlb_bounced(dev, dev_addr, size);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	map = swiotlb_tbl_map_single(dev, phys, size, size, 0, dir, attrs);
 | 
						map = swiotlb_tbl_map_single(dev, phys, size, size, 0, dir, attrs);
 | 
				
			||||||
	if (map == (phys_addr_t)DMA_MAPPING_ERROR)
 | 
						if (map == (phys_addr_t)DMA_MAPPING_ERROR)
 | 
				
			||||||
| 
						 | 
					@ -549,8 +385,13 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const struct dma_map_ops xen_swiotlb_dma_ops = {
 | 
					const struct dma_map_ops xen_swiotlb_dma_ops = {
 | 
				
			||||||
 | 
					#ifdef CONFIG_X86
 | 
				
			||||||
	.alloc = xen_swiotlb_alloc_coherent,
 | 
						.alloc = xen_swiotlb_alloc_coherent,
 | 
				
			||||||
	.free = xen_swiotlb_free_coherent,
 | 
						.free = xen_swiotlb_free_coherent,
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
						.alloc = dma_direct_alloc,
 | 
				
			||||||
 | 
						.free = dma_direct_free,
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
 | 
						.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
 | 
				
			||||||
	.sync_single_for_device = xen_swiotlb_sync_single_for_device,
 | 
						.sync_single_for_device = xen_swiotlb_sync_single_for_device,
 | 
				
			||||||
	.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
 | 
						.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -121,7 +121,7 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
 | 
				
			||||||
				 u16 segment, struct dmar_dev_scope *devices,
 | 
									 u16 segment, struct dmar_dev_scope *devices,
 | 
				
			||||||
				 int count);
 | 
									 int count);
 | 
				
			||||||
/* Intel IOMMU detection */
 | 
					/* Intel IOMMU detection */
 | 
				
			||||||
extern int detect_intel_iommu(void);
 | 
					void detect_intel_iommu(void);
 | 
				
			||||||
extern int enable_drhd_fault_handling(void);
 | 
					extern int enable_drhd_fault_handling(void);
 | 
				
			||||||
extern int dmar_device_add(acpi_handle handle);
 | 
					extern int dmar_device_add(acpi_handle handle);
 | 
				
			||||||
extern int dmar_device_remove(acpi_handle handle);
 | 
					extern int dmar_device_remove(acpi_handle handle);
 | 
				
			||||||
| 
						 | 
					@ -197,6 +197,10 @@ static inline bool dmar_platform_optin(void)
 | 
				
			||||||
	return false;
 | 
						return false;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline void detect_intel_iommu(void)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* CONFIG_DMAR_TABLE */
 | 
					#endif /* CONFIG_DMAR_TABLE */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct irte {
 | 
					struct irte {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -13,11 +13,9 @@ struct device;
 | 
				
			||||||
struct page;
 | 
					struct page;
 | 
				
			||||||
struct scatterlist;
 | 
					struct scatterlist;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
enum swiotlb_force {
 | 
					#define SWIOTLB_VERBOSE	(1 << 0) /* verbose initialization */
 | 
				
			||||||
	SWIOTLB_NORMAL,		/* Default - depending on HW DMA mask etc. */
 | 
					#define SWIOTLB_FORCE	(1 << 1) /* force bounce buffering */
 | 
				
			||||||
	SWIOTLB_FORCE,		/* swiotlb=force */
 | 
					#define SWIOTLB_ANY	(1 << 2) /* allow any memory for the buffer */
 | 
				
			||||||
	SWIOTLB_NO_FORCE,	/* swiotlb=noforce */
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Maximum allowable number of contiguous slabs to map,
 | 
					 * Maximum allowable number of contiguous slabs to map,
 | 
				
			||||||
| 
						 | 
					@ -36,11 +34,11 @@ enum swiotlb_force {
 | 
				
			||||||
/* default to 64MB */
 | 
					/* default to 64MB */
 | 
				
			||||||
#define IO_TLB_DEFAULT_SIZE (64UL<<20)
 | 
					#define IO_TLB_DEFAULT_SIZE (64UL<<20)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void swiotlb_init(int verbose);
 | 
					 | 
				
			||||||
int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
 | 
					 | 
				
			||||||
unsigned long swiotlb_size_or_default(void);
 | 
					unsigned long swiotlb_size_or_default(void);
 | 
				
			||||||
extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
 | 
					void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
 | 
				
			||||||
extern int swiotlb_late_init_with_default_size(size_t default_size);
 | 
						int (*remap)(void *tlb, unsigned long nslabs));
 | 
				
			||||||
 | 
					int swiotlb_init_late(size_t size, gfp_t gfp_mask,
 | 
				
			||||||
 | 
						int (*remap)(void *tlb, unsigned long nslabs));
 | 
				
			||||||
extern void __init swiotlb_update_mem_attributes(void);
 | 
					extern void __init swiotlb_update_mem_attributes(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
 | 
					phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
 | 
				
			||||||
| 
						 | 
					@ -126,13 +124,16 @@ static inline bool is_swiotlb_force_bounce(struct device *dev)
 | 
				
			||||||
	return mem && mem->force_bounce;
 | 
						return mem && mem->force_bounce;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void swiotlb_init(bool addressing_limited, unsigned int flags);
 | 
				
			||||||
void __init swiotlb_exit(void);
 | 
					void __init swiotlb_exit(void);
 | 
				
			||||||
unsigned int swiotlb_max_segment(void);
 | 
					unsigned int swiotlb_max_segment(void);
 | 
				
			||||||
size_t swiotlb_max_mapping_size(struct device *dev);
 | 
					size_t swiotlb_max_mapping_size(struct device *dev);
 | 
				
			||||||
bool is_swiotlb_active(struct device *dev);
 | 
					bool is_swiotlb_active(struct device *dev);
 | 
				
			||||||
void __init swiotlb_adjust_size(unsigned long size);
 | 
					void __init swiotlb_adjust_size(unsigned long size);
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
#define swiotlb_force SWIOTLB_NO_FORCE
 | 
					static inline void swiotlb_init(bool addressing_limited, unsigned int flags)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
 | 
					static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return false;
 | 
						return false;
 | 
				
			||||||
| 
						 | 
					@ -164,7 +165,6 @@ static inline void swiotlb_adjust_size(unsigned long size)
 | 
				
			||||||
#endif /* CONFIG_SWIOTLB */
 | 
					#endif /* CONFIG_SWIOTLB */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern void swiotlb_print_info(void);
 | 
					extern void swiotlb_print_info(void);
 | 
				
			||||||
extern void swiotlb_set_max_segment(unsigned int);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_DMA_RESTRICTED_POOL
 | 
					#ifdef CONFIG_DMA_RESTRICTED_POOL
 | 
				
			||||||
struct page *swiotlb_alloc(struct device *dev, size_t size);
 | 
					struct page *swiotlb_alloc(struct device *dev, size_t size);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -8,20 +8,15 @@
 | 
				
			||||||
#include <linux/tracepoint.h>
 | 
					#include <linux/tracepoint.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
TRACE_EVENT(swiotlb_bounced,
 | 
					TRACE_EVENT(swiotlb_bounced,
 | 
				
			||||||
 | 
						TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
 | 
				
			||||||
	TP_PROTO(struct device *dev,
 | 
						TP_ARGS(dev, dev_addr, size),
 | 
				
			||||||
		 dma_addr_t dev_addr,
 | 
					 | 
				
			||||||
		 size_t size,
 | 
					 | 
				
			||||||
		 enum swiotlb_force swiotlb_force),
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	TP_ARGS(dev, dev_addr, size, swiotlb_force),
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	TP_STRUCT__entry(
 | 
						TP_STRUCT__entry(
 | 
				
			||||||
		__string(dev_name, dev_name(dev))
 | 
							__string(dev_name, dev_name(dev))
 | 
				
			||||||
		__field(u64, dma_mask)
 | 
							__field(u64, dma_mask)
 | 
				
			||||||
		__field(dma_addr_t, dev_addr)
 | 
							__field(dma_addr_t, dev_addr)
 | 
				
			||||||
		__field(size_t, size)
 | 
							__field(size_t, size)
 | 
				
			||||||
		__field(	enum swiotlb_force,	swiotlb_force	)
 | 
							__field(bool, force)
 | 
				
			||||||
	),
 | 
						),
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	TP_fast_assign(
 | 
						TP_fast_assign(
 | 
				
			||||||
| 
						 | 
					@ -29,19 +24,15 @@ TRACE_EVENT(swiotlb_bounced,
 | 
				
			||||||
		__entry->dma_mask = (dev->dma_mask ? *dev->dma_mask : 0);
 | 
							__entry->dma_mask = (dev->dma_mask ? *dev->dma_mask : 0);
 | 
				
			||||||
		__entry->dev_addr = dev_addr;
 | 
							__entry->dev_addr = dev_addr;
 | 
				
			||||||
		__entry->size = size;
 | 
							__entry->size = size;
 | 
				
			||||||
		__entry->swiotlb_force = swiotlb_force;
 | 
							__entry->force = is_swiotlb_force_bounce(dev);
 | 
				
			||||||
	),
 | 
						),
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	TP_printk("dev_name: %s dma_mask=%llx dev_addr=%llx "
 | 
						TP_printk("dev_name: %s dma_mask=%llx dev_addr=%llx size=%zu %s",
 | 
				
			||||||
		"size=%zu %s",
 | 
					 | 
				
			||||||
		__get_str(dev_name),
 | 
							__get_str(dev_name),
 | 
				
			||||||
		__entry->dma_mask,
 | 
							__entry->dma_mask,
 | 
				
			||||||
		(unsigned long long)__entry->dev_addr,
 | 
							(unsigned long long)__entry->dev_addr,
 | 
				
			||||||
		__entry->size,
 | 
							__entry->size,
 | 
				
			||||||
		__print_symbolic(__entry->swiotlb_force,
 | 
							__entry->force ? "FORCE" : "NORMAL")
 | 
				
			||||||
			{ SWIOTLB_NORMAL,	"NORMAL" },
 | 
					 | 
				
			||||||
			{ SWIOTLB_FORCE,	"FORCE" },
 | 
					 | 
				
			||||||
			{ SWIOTLB_NO_FORCE,	"NO_FORCE" }))
 | 
					 | 
				
			||||||
);
 | 
					);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /*  _TRACE_SWIOTLB_H */
 | 
					#endif /*  _TRACE_SWIOTLB_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,20 +0,0 @@
 | 
				
			||||||
/* SPDX-License-Identifier: GPL-2.0 */
 | 
					 | 
				
			||||||
#ifndef _XEN_ARM_PAGE_COHERENT_H
 | 
					 | 
				
			||||||
#define _XEN_ARM_PAGE_COHERENT_H
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#include <linux/dma-mapping.h>
 | 
					 | 
				
			||||||
#include <asm/page.h>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
 | 
					 | 
				
			||||||
		dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
 | 
					 | 
				
			||||||
		void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#endif /* _XEN_ARM_PAGE_COHERENT_H */
 | 
					 | 
				
			||||||
| 
						 | 
					@ -115,6 +115,5 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 | 
				
			||||||
bool xen_arch_need_swiotlb(struct device *dev,
 | 
					bool xen_arch_need_swiotlb(struct device *dev,
 | 
				
			||||||
			   phys_addr_t phys,
 | 
								   phys_addr_t phys,
 | 
				
			||||||
			   dma_addr_t dev_addr);
 | 
								   dma_addr_t dev_addr);
 | 
				
			||||||
unsigned long xen_get_swiotlb_free_pages(unsigned int order);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* _ASM_ARM_XEN_PAGE_H */
 | 
					#endif /* _ASM_ARM_XEN_PAGE_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -10,8 +10,6 @@ void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
 | 
				
			||||||
void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
 | 
					void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
 | 
				
			||||||
			     size_t size, enum dma_data_direction dir);
 | 
								     size_t size, enum dma_data_direction dir);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int xen_swiotlb_init(void);
 | 
					 | 
				
			||||||
void __init xen_swiotlb_init_early(void);
 | 
					 | 
				
			||||||
extern const struct dma_map_ops xen_swiotlb_dma_ops;
 | 
					extern const struct dma_map_ops xen_swiotlb_dma_ops;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* __LINUX_SWIOTLB_XEN_H */
 | 
					#endif /* __LINUX_SWIOTLB_XEN_H */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -42,13 +42,6 @@ int xen_setup_shutdown_event(void);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern unsigned long *xen_contiguous_bitmap;
 | 
					extern unsigned long *xen_contiguous_bitmap;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
 | 
					 | 
				
			||||||
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
 | 
					 | 
				
			||||||
				unsigned int address_bits,
 | 
					 | 
				
			||||||
				dma_addr_t *dma_handle);
 | 
					 | 
				
			||||||
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#if defined(CONFIG_XEN_PV)
 | 
					#if defined(CONFIG_XEN_PV)
 | 
				
			||||||
int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
 | 
					int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
 | 
				
			||||||
		  xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
 | 
							  xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -448,7 +448,7 @@ void debug_dma_dump_mappings(struct device *dev)
 | 
				
			||||||
 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
 | 
					 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
 | 
				
			||||||
 * entries into the tree.
 | 
					 * entries into the tree.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
 | 
					static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC);
 | 
				
			||||||
static DEFINE_SPINLOCK(radix_lock);
 | 
					static DEFINE_SPINLOCK(radix_lock);
 | 
				
			||||||
#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
 | 
					#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
 | 
				
			||||||
#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
 | 
					#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -79,7 +79,7 @@ static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (!force_dma_unencrypted(dev))
 | 
						if (!force_dma_unencrypted(dev))
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
	return set_memory_decrypted((unsigned long)vaddr, 1 << get_order(size));
 | 
						return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
 | 
					static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
 | 
				
			||||||
| 
						 | 
					@ -88,7 +88,7 @@ static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!force_dma_unencrypted(dev))
 | 
						if (!force_dma_unencrypted(dev))
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
	ret = set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
 | 
						ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
 | 
				
			||||||
	if (ret)
 | 
						if (ret)
 | 
				
			||||||
		pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
 | 
							pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
| 
						 | 
					@ -115,7 +115,7 @@ static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
 | 
					static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
 | 
				
			||||||
		gfp_t gfp)
 | 
							gfp_t gfp, bool allow_highmem)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int node = dev_to_node(dev);
 | 
						int node = dev_to_node(dev);
 | 
				
			||||||
	struct page *page = NULL;
 | 
						struct page *page = NULL;
 | 
				
			||||||
| 
						 | 
					@ -129,10 +129,13 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
 | 
				
			||||||
	gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
 | 
						gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
 | 
				
			||||||
					   &phys_limit);
 | 
										   &phys_limit);
 | 
				
			||||||
	page = dma_alloc_contiguous(dev, size, gfp);
 | 
						page = dma_alloc_contiguous(dev, size, gfp);
 | 
				
			||||||
	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
 | 
						if (page) {
 | 
				
			||||||
 | 
							if (!dma_coherent_ok(dev, page_to_phys(page), size) ||
 | 
				
			||||||
 | 
							    (!allow_highmem && PageHighMem(page))) {
 | 
				
			||||||
			dma_free_contiguous(dev, page, size);
 | 
								dma_free_contiguous(dev, page, size);
 | 
				
			||||||
			page = NULL;
 | 
								page = NULL;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
again:
 | 
					again:
 | 
				
			||||||
	if (!page)
 | 
						if (!page)
 | 
				
			||||||
		page = alloc_pages_node(node, gfp, get_order(size));
 | 
							page = alloc_pages_node(node, gfp, get_order(size));
 | 
				
			||||||
| 
						 | 
					@ -189,7 +192,7 @@ static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct page *page;
 | 
						struct page *page;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
 | 
						page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
 | 
				
			||||||
	if (!page)
 | 
						if (!page)
 | 
				
			||||||
		return NULL;
 | 
							return NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -262,7 +265,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 | 
				
			||||||
		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 | 
							return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* we always manually zero the memory once we are done */
 | 
						/* we always manually zero the memory once we are done */
 | 
				
			||||||
	page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
 | 
						page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
 | 
				
			||||||
	if (!page)
 | 
						if (!page)
 | 
				
			||||||
		return NULL;
 | 
							return NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -370,19 +373,9 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
 | 
				
			||||||
	if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
 | 
						if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
 | 
				
			||||||
		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 | 
							return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	page = __dma_direct_alloc_pages(dev, size, gfp);
 | 
						page = __dma_direct_alloc_pages(dev, size, gfp, false);
 | 
				
			||||||
	if (!page)
 | 
						if (!page)
 | 
				
			||||||
		return NULL;
 | 
							return NULL;
 | 
				
			||||||
	if (PageHighMem(page)) {
 | 
					 | 
				
			||||||
		/*
 | 
					 | 
				
			||||||
		 * Depending on the cma= arguments and per-arch setup
 | 
					 | 
				
			||||||
		 * dma_alloc_contiguous could return highmem pages.
 | 
					 | 
				
			||||||
		 * Without remapping there is no way to return them here,
 | 
					 | 
				
			||||||
		 * so log an error and fail.
 | 
					 | 
				
			||||||
		 */
 | 
					 | 
				
			||||||
		dev_info(dev, "Rejecting highmem page from CMA.\n");
 | 
					 | 
				
			||||||
		goto out_free_pages;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = page_address(page);
 | 
						ret = page_address(page);
 | 
				
			||||||
	if (dma_set_decrypted(dev, ret, size))
 | 
						if (dma_set_decrypted(dev, ret, size))
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -91,7 +91,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
 | 
				
			||||||
		return swiotlb_map(dev, phys, size, dir, attrs);
 | 
							return swiotlb_map(dev, phys, size, dir, attrs);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
 | 
						if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
 | 
				
			||||||
		if (swiotlb_force != SWIOTLB_NO_FORCE)
 | 
							if (is_swiotlb_active(dev))
 | 
				
			||||||
			return swiotlb_map(dev, phys, size, dir, attrs);
 | 
								return swiotlb_map(dev, phys, size, dir, attrs);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		dev_WARN_ONCE(dev, 1,
 | 
							dev_WARN_ONCE(dev, 1,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -62,18 +62,13 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
 | 
					#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
enum swiotlb_force swiotlb_force;
 | 
					static bool swiotlb_force_bounce;
 | 
				
			||||||
 | 
					static bool swiotlb_force_disable;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct io_tlb_mem io_tlb_default_mem;
 | 
					struct io_tlb_mem io_tlb_default_mem;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
phys_addr_t swiotlb_unencrypted_base;
 | 
					phys_addr_t swiotlb_unencrypted_base;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * Max segment that we can provide which (if pages are contingous) will
 | 
					 | 
				
			||||||
 * not be bounced (unless SWIOTLB_FORCE is set).
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
static unsigned int max_segment;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
 | 
					static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int __init
 | 
					static int __init
 | 
				
			||||||
| 
						 | 
					@ -87,9 +82,9 @@ setup_io_tlb_npages(char *str)
 | 
				
			||||||
	if (*str == ',')
 | 
						if (*str == ',')
 | 
				
			||||||
		++str;
 | 
							++str;
 | 
				
			||||||
	if (!strcmp(str, "force"))
 | 
						if (!strcmp(str, "force"))
 | 
				
			||||||
		swiotlb_force = SWIOTLB_FORCE;
 | 
							swiotlb_force_bounce = true;
 | 
				
			||||||
	else if (!strcmp(str, "noforce"))
 | 
						else if (!strcmp(str, "noforce"))
 | 
				
			||||||
		swiotlb_force = SWIOTLB_NO_FORCE;
 | 
							swiotlb_force_disable = true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -97,18 +92,12 @@ early_param("swiotlb", setup_io_tlb_npages);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
unsigned int swiotlb_max_segment(void)
 | 
					unsigned int swiotlb_max_segment(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return io_tlb_default_mem.nslabs ? max_segment : 0;
 | 
						if (!io_tlb_default_mem.nslabs)
 | 
				
			||||||
 | 
							return 0;
 | 
				
			||||||
 | 
						return rounddown(io_tlb_default_mem.nslabs << IO_TLB_SHIFT, PAGE_SIZE);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(swiotlb_max_segment);
 | 
					EXPORT_SYMBOL_GPL(swiotlb_max_segment);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void swiotlb_set_max_segment(unsigned int val)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	if (swiotlb_force == SWIOTLB_FORCE)
 | 
					 | 
				
			||||||
		max_segment = 1;
 | 
					 | 
				
			||||||
	else
 | 
					 | 
				
			||||||
		max_segment = rounddown(val, PAGE_SIZE);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
unsigned long swiotlb_size_or_default(void)
 | 
					unsigned long swiotlb_size_or_default(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return default_nslabs << IO_TLB_SHIFT;
 | 
						return default_nslabs << IO_TLB_SHIFT;
 | 
				
			||||||
| 
						 | 
					@ -214,7 +203,7 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
 | 
				
			||||||
	mem->index = 0;
 | 
						mem->index = 0;
 | 
				
			||||||
	mem->late_alloc = late_alloc;
 | 
						mem->late_alloc = late_alloc;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (swiotlb_force == SWIOTLB_FORCE)
 | 
						if (swiotlb_force_bounce)
 | 
				
			||||||
		mem->force_bounce = true;
 | 
							mem->force_bounce = true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	spin_lock_init(&mem->lock);
 | 
						spin_lock_init(&mem->lock);
 | 
				
			||||||
| 
						 | 
					@ -236,17 +225,49 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
 | 
				
			||||||
	return;
 | 
						return;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
 | 
					/*
 | 
				
			||||||
 | 
					 * Statically reserve bounce buffer space and initialize bounce buffer data
 | 
				
			||||||
 | 
					 * structures for the software IO TLB used to implement the DMA API.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
 | 
				
			||||||
 | 
							int (*remap)(void *tlb, unsigned long nslabs))
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct io_tlb_mem *mem = &io_tlb_default_mem;
 | 
						struct io_tlb_mem *mem = &io_tlb_default_mem;
 | 
				
			||||||
 | 
						unsigned long nslabs = default_nslabs;
 | 
				
			||||||
	size_t alloc_size;
 | 
						size_t alloc_size;
 | 
				
			||||||
 | 
						size_t bytes;
 | 
				
			||||||
 | 
						void *tlb;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (swiotlb_force == SWIOTLB_NO_FORCE)
 | 
						if (!addressing_limit && !swiotlb_force_bounce)
 | 
				
			||||||
		return 0;
 | 
							return;
 | 
				
			||||||
 | 
						if (swiotlb_force_disable)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* protect against double initialization */
 | 
						/*
 | 
				
			||||||
	if (WARN_ON_ONCE(mem->nslabs))
 | 
						 * By default allocate the bounce buffer memory from low memory, but
 | 
				
			||||||
		return -ENOMEM;
 | 
						 * allow to pick a location everywhere for hypervisors with guest
 | 
				
			||||||
 | 
						 * memory encryption.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
					retry:
 | 
				
			||||||
 | 
						bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
 | 
				
			||||||
 | 
						if (flags & SWIOTLB_ANY)
 | 
				
			||||||
 | 
							tlb = memblock_alloc(bytes, PAGE_SIZE);
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							tlb = memblock_alloc_low(bytes, PAGE_SIZE);
 | 
				
			||||||
 | 
						if (!tlb) {
 | 
				
			||||||
 | 
							pr_warn("%s: failed to allocate tlb structure\n", __func__);
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (remap && remap(tlb, nslabs) < 0) {
 | 
				
			||||||
 | 
							memblock_free(tlb, PAGE_ALIGN(bytes));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
 | 
				
			||||||
 | 
							if (nslabs < IO_TLB_MIN_SLABS)
 | 
				
			||||||
 | 
								panic("%s: Failed to remap %zu bytes\n",
 | 
				
			||||||
 | 
								      __func__, bytes);
 | 
				
			||||||
 | 
							goto retry;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
 | 
						alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
 | 
				
			||||||
	mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
 | 
						mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
 | 
				
			||||||
| 
						 | 
					@ -255,38 +276,15 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
 | 
				
			||||||
		      __func__, alloc_size, PAGE_SIZE);
 | 
							      __func__, alloc_size, PAGE_SIZE);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false);
 | 
						swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false);
 | 
				
			||||||
 | 
						mem->force_bounce = flags & SWIOTLB_FORCE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (verbose)
 | 
						if (flags & SWIOTLB_VERBOSE)
 | 
				
			||||||
		swiotlb_print_info();
 | 
							swiotlb_print_info();
 | 
				
			||||||
	swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT);
 | 
					 | 
				
			||||||
	return 0;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					void __init swiotlb_init(bool addressing_limit, unsigned int flags)
 | 
				
			||||||
 * Statically reserve bounce buffer space and initialize bounce buffer data
 | 
					 | 
				
			||||||
 * structures for the software IO TLB used to implement the DMA API.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
void  __init
 | 
					 | 
				
			||||||
swiotlb_init(int verbose)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	size_t bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT);
 | 
						return swiotlb_init_remap(addressing_limit, flags, NULL);
 | 
				
			||||||
	void *tlb;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (swiotlb_force == SWIOTLB_NO_FORCE)
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* Get IO TLB memory from the low pages */
 | 
					 | 
				
			||||||
	tlb = memblock_alloc_low(bytes, PAGE_SIZE);
 | 
					 | 
				
			||||||
	if (!tlb)
 | 
					 | 
				
			||||||
		goto fail;
 | 
					 | 
				
			||||||
	if (swiotlb_init_with_tbl(tlb, default_nslabs, verbose))
 | 
					 | 
				
			||||||
		goto fail_free_mem;
 | 
					 | 
				
			||||||
	return;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
fail_free_mem:
 | 
					 | 
				
			||||||
	memblock_free(tlb, bytes);
 | 
					 | 
				
			||||||
fail:
 | 
					 | 
				
			||||||
	pr_warn("Cannot allocate buffer");
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -294,72 +292,65 @@ swiotlb_init(int verbose)
 | 
				
			||||||
 * initialize the swiotlb later using the slab allocator if needed.
 | 
					 * initialize the swiotlb later using the slab allocator if needed.
 | 
				
			||||||
 * This should be just like above, but with some error catching.
 | 
					 * This should be just like above, but with some error catching.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
int
 | 
					int swiotlb_init_late(size_t size, gfp_t gfp_mask,
 | 
				
			||||||
swiotlb_late_init_with_default_size(size_t default_size)
 | 
							int (*remap)(void *tlb, unsigned long nslabs))
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long nslabs =
 | 
						struct io_tlb_mem *mem = &io_tlb_default_mem;
 | 
				
			||||||
		ALIGN(default_size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
 | 
						unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
 | 
				
			||||||
	unsigned long bytes;
 | 
					 | 
				
			||||||
	unsigned char *vstart = NULL;
 | 
						unsigned char *vstart = NULL;
 | 
				
			||||||
	unsigned int order;
 | 
						unsigned int order;
 | 
				
			||||||
 | 
						bool retried = false;
 | 
				
			||||||
	int rc = 0;
 | 
						int rc = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (swiotlb_force == SWIOTLB_NO_FORCE)
 | 
						if (swiotlb_force_disable)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
					retry:
 | 
				
			||||||
	 * Get IO TLB memory from the low pages
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	order = get_order(nslabs << IO_TLB_SHIFT);
 | 
						order = get_order(nslabs << IO_TLB_SHIFT);
 | 
				
			||||||
	nslabs = SLABS_PER_PAGE << order;
 | 
						nslabs = SLABS_PER_PAGE << order;
 | 
				
			||||||
	bytes = nslabs << IO_TLB_SHIFT;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
 | 
						while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
 | 
				
			||||||
		vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
 | 
							vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
 | 
				
			||||||
						  order);
 | 
											  order);
 | 
				
			||||||
		if (vstart)
 | 
							if (vstart)
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		order--;
 | 
							order--;
 | 
				
			||||||
 | 
							nslabs = SLABS_PER_PAGE << order;
 | 
				
			||||||
 | 
							retried = true;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!vstart)
 | 
						if (!vstart)
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (order != get_order(bytes)) {
 | 
						if (remap)
 | 
				
			||||||
		pr_warn("only able to allocate %ld MB\n",
 | 
							rc = remap(vstart, nslabs);
 | 
				
			||||||
			(PAGE_SIZE << order) >> 20);
 | 
						if (rc) {
 | 
				
			||||||
		nslabs = SLABS_PER_PAGE << order;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	rc = swiotlb_late_init_with_tbl(vstart, nslabs);
 | 
					 | 
				
			||||||
	if (rc)
 | 
					 | 
				
			||||||
		free_pages((unsigned long)vstart, order);
 | 
							free_pages((unsigned long)vstart, order);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
 | 
				
			||||||
 | 
							if (nslabs < IO_TLB_MIN_SLABS)
 | 
				
			||||||
			return rc;
 | 
								return rc;
 | 
				
			||||||
 | 
							retried = true;
 | 
				
			||||||
 | 
							goto retry;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int
 | 
						if (retried) {
 | 
				
			||||||
swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
 | 
							pr_warn("only able to allocate %ld MB\n",
 | 
				
			||||||
{
 | 
								(PAGE_SIZE << order) >> 20);
 | 
				
			||||||
	struct io_tlb_mem *mem = &io_tlb_default_mem;
 | 
						}
 | 
				
			||||||
	unsigned long bytes = nslabs << IO_TLB_SHIFT;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (swiotlb_force == SWIOTLB_NO_FORCE)
 | 
					 | 
				
			||||||
		return 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* protect against double initialization */
 | 
					 | 
				
			||||||
	if (WARN_ON_ONCE(mem->nslabs))
 | 
					 | 
				
			||||||
		return -ENOMEM;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 | 
						mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 | 
				
			||||||
		get_order(array_size(sizeof(*mem->slots), nslabs)));
 | 
							get_order(array_size(sizeof(*mem->slots), nslabs)));
 | 
				
			||||||
	if (!mem->slots)
 | 
						if (!mem->slots) {
 | 
				
			||||||
 | 
							free_pages((unsigned long)vstart, order);
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
 | 
						set_memory_decrypted((unsigned long)vstart,
 | 
				
			||||||
	swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true);
 | 
								     (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
 | 
				
			||||||
 | 
						swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, true);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	swiotlb_print_info();
 | 
						swiotlb_print_info();
 | 
				
			||||||
	swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT);
 | 
					 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -369,6 +360,9 @@ void __init swiotlb_exit(void)
 | 
				
			||||||
	unsigned long tbl_vaddr;
 | 
						unsigned long tbl_vaddr;
 | 
				
			||||||
	size_t tbl_size, slots_size;
 | 
						size_t tbl_size, slots_size;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (swiotlb_force_bounce)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!mem->nslabs)
 | 
						if (!mem->nslabs)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -717,8 +711,7 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
 | 
				
			||||||
	phys_addr_t swiotlb_addr;
 | 
						phys_addr_t swiotlb_addr;
 | 
				
			||||||
	dma_addr_t dma_addr;
 | 
						dma_addr_t dma_addr;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size,
 | 
						trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size);
 | 
				
			||||||
			      swiotlb_force);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir,
 | 
						swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir,
 | 
				
			||||||
			attrs);
 | 
								attrs);
 | 
				
			||||||
| 
						 | 
					@ -743,7 +736,18 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
size_t swiotlb_max_mapping_size(struct device *dev)
 | 
					size_t swiotlb_max_mapping_size(struct device *dev)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE;
 | 
						int min_align_mask = dma_get_min_align_mask(dev);
 | 
				
			||||||
 | 
						int min_align = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * swiotlb_find_slots() skips slots according to
 | 
				
			||||||
 | 
						 * min align mask. This affects max mapping size.
 | 
				
			||||||
 | 
						 * Take it into acount here.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (min_align_mask)
 | 
				
			||||||
 | 
							min_align = roundup(min_align_mask, IO_TLB_SIZE);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool is_swiotlb_active(struct device *dev)
 | 
					bool is_swiotlb_active(struct device *dev)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue