mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	swiotlb: add support for non-coherent DMA
Handle architectures that are not cache coherent directly in the main
swiotlb code by calling arch_sync_dma_for_{device,cpu} in all the right
places from the various dma_map/unmap/sync methods when the device is
non-coherent.
Because swiotlb now uses dma_direct_alloc for the coherent allocation
that side is already taken care of by the dma-direct code calling into
arch_dma_{alloc,free} for devices that are non-coherent.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
			
			
This commit is contained in:
		
							parent
							
								
									fafadcd165
								
							
						
					
					
						commit
						a4a4330db4
					
				
					 1 changed files with 23 additions and 10 deletions
				
			
		| 
						 | 
					@ -21,6 +21,7 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include <linux/cache.h>
 | 
					#include <linux/cache.h>
 | 
				
			||||||
#include <linux/dma-direct.h>
 | 
					#include <linux/dma-direct.h>
 | 
				
			||||||
 | 
					#include <linux/dma-noncoherent.h>
 | 
				
			||||||
#include <linux/mm.h>
 | 
					#include <linux/mm.h>
 | 
				
			||||||
#include <linux/export.h>
 | 
					#include <linux/export.h>
 | 
				
			||||||
#include <linux/spinlock.h>
 | 
					#include <linux/spinlock.h>
 | 
				
			||||||
| 
						 | 
					@ -671,11 +672,17 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
 | 
				
			||||||
	 * we can safely return the device addr and not worry about bounce
 | 
						 * we can safely return the device addr and not worry about bounce
 | 
				
			||||||
	 * buffering it.
 | 
						 * buffering it.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
 | 
						if (!dma_capable(dev, dev_addr, size) ||
 | 
				
			||||||
		return dev_addr;
 | 
						    swiotlb_force == SWIOTLB_FORCE) {
 | 
				
			||||||
 | 
					 | 
				
			||||||
		trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
 | 
							trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
 | 
				
			||||||
	return swiotlb_bounce_page(dev, &phys, size, dir, attrs);
 | 
							dev_addr = swiotlb_bounce_page(dev, &phys, size, dir, attrs);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!dev_is_dma_coherent(dev) &&
 | 
				
			||||||
 | 
						    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
 | 
				
			||||||
 | 
							arch_sync_dma_for_device(dev, phys, size, dir);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return dev_addr;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -694,6 +701,10 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	BUG_ON(dir == DMA_NONE);
 | 
						BUG_ON(dir == DMA_NONE);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!dev_is_dma_coherent(hwdev) &&
 | 
				
			||||||
 | 
						    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
 | 
				
			||||||
 | 
							arch_sync_dma_for_cpu(hwdev, paddr, size, dir);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (is_swiotlb_buffer(paddr)) {
 | 
						if (is_swiotlb_buffer(paddr)) {
 | 
				
			||||||
		swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
 | 
							swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
| 
						 | 
					@ -730,14 +741,16 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	BUG_ON(dir == DMA_NONE);
 | 
						BUG_ON(dir == DMA_NONE);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (is_swiotlb_buffer(paddr)) {
 | 
						if (!dev_is_dma_coherent(hwdev) && target == SYNC_FOR_CPU)
 | 
				
			||||||
 | 
							arch_sync_dma_for_cpu(hwdev, paddr, size, dir);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (is_swiotlb_buffer(paddr))
 | 
				
			||||||
		swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
 | 
							swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (dir != DMA_FROM_DEVICE)
 | 
						if (!dev_is_dma_coherent(hwdev) && target == SYNC_FOR_DEVICE)
 | 
				
			||||||
		return;
 | 
							arch_sync_dma_for_device(hwdev, paddr, size, dir);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!is_swiotlb_buffer(paddr) && dir == DMA_FROM_DEVICE)
 | 
				
			||||||
		dma_mark_clean(phys_to_virt(paddr), size);
 | 
							dma_mark_clean(phys_to_virt(paddr), size);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue