forked from mirrors/linux
		
	 b28e6315a0
			
		
	
	
		b28e6315a0
		
	
	
	
	
		
			
			- fix a PageHighMem check in dma-coherent initialization (Doug Berger)
  - clean up the coherency defaul initialiation (Jiaxun Yang)
  - add cacheline to user/kernel dma-debug space dump messages
    (Desnes Nunes, Geert Uytterhoeve)
  - swiotlb statistics improvements (Michael Kelley)
  - misc cleanups (Petr Tesarik)
 -----BEGIN PGP SIGNATURE-----
 
 iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmRLYsoLHGhjaEBsc3Qu
 ZGUACgkQD55TZVIEUYP4+RAAwpIqI198CrPxodCuBdwetuxznwncdwFvU3W+NQLF
 cC5gDeUB2ZZevVh3moKITV7gXHrbTJF7jQs9jpWV0QEA5APzu0WDf3Y0m4sXPVpn
 E9jS3jGJyntZ9rIMzHFs/lguI37xzT1YRAHAYgoZ84b7K/9g94NgEE2HecfNKVqZ
 D6PN0UJcA4KQo+5UJ7MWiQxWM3QAwVfSKsP1mXv51tiRGo4UUzNW77Ej2nKRJjhK
 wDNiZ+08khfeS2BuF9J2ebAzpgma5EgweH2z7zmx8Ch5t4Cx6hVAQ4Z6axbZMGjP
 HxXPw5rIwZTnQYoaGU86BrxrFH2j2bb963kWoDzliH+4PQrJ/iIEpkF7vu5Y2oWr
 WtXdOo6CsdQh1rT1UWA87ZYDtkWgj3/ITv5xJrXf8VyD9WHHSPst616XHLzBLGzo
 Hc+lAPhnVm59XZhQbVgXZy37Eqa9qHEG6GIRUkwD13nttSSfLfizO0IlXlH+awQV
 2A+TjbAt2lneUaRzMPfxG/yFt3rPqbBfSWj3o2ClPPn9sKksKxj7IjNW0v81Ztq/
 H6UmYRuq+wlQJzlwiF8+6SzoBXObztrmtIa2ipiM5k+xePG1jsPGFLm98UMlPcxN
 5IMz78DQ/hE3K3fKRt6clImd98xq5R0H9iUQPor2I7C/67fpTjThDRdHDUina1tk
 Oxo=
 =vAit
 -----END PGP SIGNATURE-----
Merge tag 'dma-mapping-6.4-2023-04-28' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig:
 - fix a PageHighMem check in dma-coherent initialization (Doug Berger)
 - clean up the coherency defaul initialiation (Jiaxun Yang)
 - add cacheline to user/kernel dma-debug space dump messages (Desnes
   Nunes, Geert Uytterhoeve)
 - swiotlb statistics improvements (Michael Kelley)
 - misc cleanups (Petr Tesarik)
* tag 'dma-mapping-6.4-2023-04-28' of git://git.infradead.org/users/hch/dma-mapping:
  swiotlb: Omit total_used and used_hiwater if !CONFIG_DEBUG_FS
  swiotlb: track and report io_tlb_used high water marks in debugfs
  swiotlb: fix debugfs reporting of reserved memory pools
  swiotlb: relocate PageHighMem test away from rmem_swiotlb_setup
  of: address: always use dma_default_coherent for default coherency
  dma-mapping: provide CONFIG_ARCH_DMA_DEFAULT_COHERENT
  dma-mapping: provide a fallback dma_default_coherent
  dma-debug: Use %pa to format phys_addr_t
  dma-debug: add cacheline to user/kernel space dump messages
  dma-debug: small dma_debug_entry's comment and variable name updates
  dma-direct: cleanup parameters to dma_direct_optimal_gfp_mask
		
	
			
		
			
				
	
	
		
			192 lines
		
	
	
	
		
			5.7 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			192 lines
		
	
	
	
		
			5.7 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* SPDX-License-Identifier: GPL-2.0 */
 | |
| #ifndef __LINUX_SWIOTLB_H
 | |
| #define __LINUX_SWIOTLB_H
 | |
| 
 | |
| #include <linux/device.h>
 | |
| #include <linux/dma-direction.h>
 | |
| #include <linux/init.h>
 | |
| #include <linux/types.h>
 | |
| #include <linux/limits.h>
 | |
| #include <linux/spinlock.h>
 | |
| 
 | |
| struct device;
 | |
| struct page;
 | |
| struct scatterlist;
 | |
| 
 | |
| #define SWIOTLB_VERBOSE	(1 << 0) /* verbose initialization */
 | |
| #define SWIOTLB_FORCE	(1 << 1) /* force bounce buffering */
 | |
| #define SWIOTLB_ANY	(1 << 2) /* allow any memory for the buffer */
 | |
| 
 | |
| /*
 | |
|  * Maximum allowable number of contiguous slabs to map,
 | |
|  * must be a power of 2.  What is the appropriate value ?
 | |
|  * The complexity of {map,unmap}_single is linearly dependent on this value.
 | |
|  */
 | |
| #define IO_TLB_SEGSIZE	128
 | |
| 
 | |
| /*
 | |
|  * log of the size of each IO TLB slab.  The number of slabs is command line
 | |
|  * controllable.
 | |
|  */
 | |
| #define IO_TLB_SHIFT 11
 | |
| #define IO_TLB_SIZE (1 << IO_TLB_SHIFT)
 | |
| 
 | |
| /* default to 64MB */
 | |
| #define IO_TLB_DEFAULT_SIZE (64UL<<20)
 | |
| 
 | |
| unsigned long swiotlb_size_or_default(void);
 | |
| void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
 | |
| 	int (*remap)(void *tlb, unsigned long nslabs));
 | |
| int swiotlb_init_late(size_t size, gfp_t gfp_mask,
 | |
| 	int (*remap)(void *tlb, unsigned long nslabs));
 | |
| extern void __init swiotlb_update_mem_attributes(void);
 | |
| 
 | |
| phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
 | |
| 		size_t mapping_size, size_t alloc_size,
 | |
| 		unsigned int alloc_aligned_mask, enum dma_data_direction dir,
 | |
| 		unsigned long attrs);
 | |
| 
 | |
| extern void swiotlb_tbl_unmap_single(struct device *hwdev,
 | |
| 				     phys_addr_t tlb_addr,
 | |
| 				     size_t mapping_size,
 | |
| 				     enum dma_data_direction dir,
 | |
| 				     unsigned long attrs);
 | |
| 
 | |
| void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
 | |
| 		size_t size, enum dma_data_direction dir);
 | |
| void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
 | |
| 		size_t size, enum dma_data_direction dir);
 | |
| dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
 | |
| 		size_t size, enum dma_data_direction dir, unsigned long attrs);
 | |
| 
 | |
| #ifdef CONFIG_SWIOTLB
 | |
| 
 | |
| /**
 | |
|  * struct io_tlb_mem - IO TLB Memory Pool Descriptor
 | |
|  *
 | |
|  * @start:	The start address of the swiotlb memory pool. Used to do a quick
 | |
|  *		range check to see if the memory was in fact allocated by this
 | |
|  *		API.
 | |
|  * @end:	The end address of the swiotlb memory pool. Used to do a quick
 | |
|  *		range check to see if the memory was in fact allocated by this
 | |
|  *		API.
 | |
|  * @vaddr:	The vaddr of the swiotlb memory pool. The swiotlb memory pool
 | |
|  *		may be remapped in the memory encrypted case and store virtual
 | |
|  *		address for bounce buffer operation.
 | |
|  * @nslabs:	The number of IO TLB blocks (in groups of 64) between @start and
 | |
|  *		@end. For default swiotlb, this is command line adjustable via
 | |
|  *		setup_io_tlb_npages.
 | |
|  * @used:	The number of used IO TLB block.
 | |
|  * @list:	The free list describing the number of free entries available
 | |
|  *		from each index.
 | |
|  * @orig_addr:	The original address corresponding to a mapped entry.
 | |
|  * @alloc_size:	Size of the allocated buffer.
 | |
|  * @debugfs:	The dentry to debugfs.
 | |
|  * @late_alloc:	%true if allocated using the page allocator
 | |
|  * @force_bounce: %true if swiotlb bouncing is forced
 | |
|  * @for_alloc:  %true if the pool is used for memory allocation
 | |
|  * @nareas:  The area number in the pool.
 | |
|  * @area_nslabs: The slot number in the area.
 | |
|  * @total_used:	The total number of slots in the pool that are currently used
 | |
|  *		across all areas. Used only for calculating used_hiwater in
 | |
|  *		debugfs.
 | |
|  * @used_hiwater: The high water mark for total_used.  Used only for reporting
 | |
|  *		in debugfs.
 | |
|  */
 | |
| struct io_tlb_mem {
 | |
| 	phys_addr_t start;
 | |
| 	phys_addr_t end;
 | |
| 	void *vaddr;
 | |
| 	unsigned long nslabs;
 | |
| 	unsigned long used;
 | |
| 	struct dentry *debugfs;
 | |
| 	bool late_alloc;
 | |
| 	bool force_bounce;
 | |
| 	bool for_alloc;
 | |
| 	unsigned int nareas;
 | |
| 	unsigned int area_nslabs;
 | |
| 	struct io_tlb_area *areas;
 | |
| 	struct io_tlb_slot *slots;
 | |
| #ifdef CONFIG_DEBUG_FS
 | |
| 	atomic_long_t total_used;
 | |
| 	atomic_long_t used_hiwater;
 | |
| #endif
 | |
| };
 | |
| extern struct io_tlb_mem io_tlb_default_mem;
 | |
| 
 | |
| static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
 | |
| {
 | |
| 	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
 | |
| 
 | |
| 	return mem && paddr >= mem->start && paddr < mem->end;
 | |
| }
 | |
| 
 | |
| static inline bool is_swiotlb_force_bounce(struct device *dev)
 | |
| {
 | |
| 	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
 | |
| 
 | |
| 	return mem && mem->force_bounce;
 | |
| }
 | |
| 
 | |
| void swiotlb_init(bool addressing_limited, unsigned int flags);
 | |
| void __init swiotlb_exit(void);
 | |
| size_t swiotlb_max_mapping_size(struct device *dev);
 | |
| bool is_swiotlb_active(struct device *dev);
 | |
| void __init swiotlb_adjust_size(unsigned long size);
 | |
| #else
 | |
| static inline void swiotlb_init(bool addressing_limited, unsigned int flags)
 | |
| {
 | |
| }
 | |
| static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
 | |
| {
 | |
| 	return false;
 | |
| }
 | |
| static inline bool is_swiotlb_force_bounce(struct device *dev)
 | |
| {
 | |
| 	return false;
 | |
| }
 | |
| static inline void swiotlb_exit(void)
 | |
| {
 | |
| }
 | |
| static inline size_t swiotlb_max_mapping_size(struct device *dev)
 | |
| {
 | |
| 	return SIZE_MAX;
 | |
| }
 | |
| 
 | |
| static inline bool is_swiotlb_active(struct device *dev)
 | |
| {
 | |
| 	return false;
 | |
| }
 | |
| 
 | |
| static inline void swiotlb_adjust_size(unsigned long size)
 | |
| {
 | |
| }
 | |
| #endif /* CONFIG_SWIOTLB */
 | |
| 
 | |
| extern void swiotlb_print_info(void);
 | |
| 
 | |
| #ifdef CONFIG_DMA_RESTRICTED_POOL
 | |
| struct page *swiotlb_alloc(struct device *dev, size_t size);
 | |
| bool swiotlb_free(struct device *dev, struct page *page, size_t size);
 | |
| 
 | |
| static inline bool is_swiotlb_for_alloc(struct device *dev)
 | |
| {
 | |
| 	return dev->dma_io_tlb_mem->for_alloc;
 | |
| }
 | |
| #else
 | |
| static inline struct page *swiotlb_alloc(struct device *dev, size_t size)
 | |
| {
 | |
| 	return NULL;
 | |
| }
 | |
| static inline bool swiotlb_free(struct device *dev, struct page *page,
 | |
| 				size_t size)
 | |
| {
 | |
| 	return false;
 | |
| }
 | |
| static inline bool is_swiotlb_for_alloc(struct device *dev)
 | |
| {
 | |
| 	return false;
 | |
| }
 | |
| #endif /* CONFIG_DMA_RESTRICTED_POOL */
 | |
| 
 | |
| #endif /* __LINUX_SWIOTLB_H */
 |