forked from mirrors/linux
		
	dma-buf: remove kmap_atomic interface
Neither used nor correctly implemented anywhere. Just completely remove the interface. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Link: https://patchwork.freedesktop.org/patch/226645/
This commit is contained in:
		
							parent
							
								
									a19741e5e5
								
							
						
					
					
						commit
						f664a52695
					
				
					 17 changed files with 2 additions and 165 deletions
				
			
		| 
						 | 
					@ -405,7 +405,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
 | 
				
			||||||
			  || !exp_info->ops->map_dma_buf
 | 
								  || !exp_info->ops->map_dma_buf
 | 
				
			||||||
			  || !exp_info->ops->unmap_dma_buf
 | 
								  || !exp_info->ops->unmap_dma_buf
 | 
				
			||||||
			  || !exp_info->ops->release
 | 
								  || !exp_info->ops->release
 | 
				
			||||||
			  || !exp_info->ops->map_atomic
 | 
					 | 
				
			||||||
			  || !exp_info->ops->map
 | 
								  || !exp_info->ops->map
 | 
				
			||||||
			  || !exp_info->ops->mmap)) {
 | 
								  || !exp_info->ops->mmap)) {
 | 
				
			||||||
		return ERR_PTR(-EINVAL);
 | 
							return ERR_PTR(-EINVAL);
 | 
				
			||||||
| 
						 | 
					@ -687,26 +686,14 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
 | 
				
			||||||
 *      void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
 | 
					 *      void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
 | 
				
			||||||
 *      void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
 | 
					 *      void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 *   There are also atomic variants of these interfaces. Like for kmap they
 | 
					 *   Implementing the functions is optional for exporters and for importers all
 | 
				
			||||||
 *   facilitate non-blocking fast-paths. Neither the importer nor the exporter
 | 
					 *   the restrictions of using kmap apply.
 | 
				
			||||||
 *   (in the callback) is allowed to block when using these.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 *   Interfaces::
 | 
					 | 
				
			||||||
 *      void \*dma_buf_kmap_atomic(struct dma_buf \*, unsigned long);
 | 
					 | 
				
			||||||
 *      void dma_buf_kunmap_atomic(struct dma_buf \*, unsigned long, void \*);
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 *   For importers all the restrictions of using kmap apply, like the limited
 | 
					 | 
				
			||||||
 *   supply of kmap_atomic slots. Hence an importer shall only hold onto at
 | 
					 | 
				
			||||||
 *   max 2 atomic dma_buf kmaps at the same time (in any given process context).
 | 
					 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 *   dma_buf kmap calls outside of the range specified in begin_cpu_access are
 | 
					 *   dma_buf kmap calls outside of the range specified in begin_cpu_access are
 | 
				
			||||||
 *   undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
 | 
					 *   undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
 | 
				
			||||||
 *   the partial chunks at the beginning and end but may return stale or bogus
 | 
					 *   the partial chunks at the beginning and end but may return stale or bogus
 | 
				
			||||||
 *   data outside of the range (in these partial chunks).
 | 
					 *   data outside of the range (in these partial chunks).
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 *   Note that these calls need to always succeed. The exporter needs to
 | 
					 | 
				
			||||||
 *   complete any preparations that might fail in begin_cpu_access.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 *   For some cases the overhead of kmap can be too high, a vmap interface
 | 
					 *   For some cases the overhead of kmap can be too high, a vmap interface
 | 
				
			||||||
 *   is introduced. This interface should be used very carefully, as vmalloc
 | 
					 *   is introduced. This interface should be used very carefully, as vmalloc
 | 
				
			||||||
 *   space is a limited resources on many architectures.
 | 
					 *   space is a limited resources on many architectures.
 | 
				
			||||||
| 
						 | 
					@ -859,43 +846,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
 | 
					EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					 | 
				
			||||||
 * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
 | 
					 | 
				
			||||||
 * space. The same restrictions as for kmap_atomic and friends apply.
 | 
					 | 
				
			||||||
 * @dmabuf:	[in]	buffer to map page from.
 | 
					 | 
				
			||||||
 * @page_num:	[in]	page in PAGE_SIZE units to map.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * This call must always succeed, any necessary preparations that might fail
 | 
					 | 
				
			||||||
 * need to be done in begin_cpu_access.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	WARN_ON(!dmabuf);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!dmabuf->ops->map_atomic)
 | 
					 | 
				
			||||||
		return NULL;
 | 
					 | 
				
			||||||
	return dmabuf->ops->map_atomic(dmabuf, page_num);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/**
 | 
					 | 
				
			||||||
 * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
 | 
					 | 
				
			||||||
 * @dmabuf:	[in]	buffer to unmap page from.
 | 
					 | 
				
			||||||
 * @page_num:	[in]	page in PAGE_SIZE units to unmap.
 | 
					 | 
				
			||||||
 * @vaddr:	[in]	kernel space pointer obtained from dma_buf_kmap_atomic.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * This call must always succeed.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
 | 
					 | 
				
			||||||
			   void *vaddr)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	WARN_ON(!dmabuf);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (dmabuf->ops->unmap_atomic)
 | 
					 | 
				
			||||||
		dmabuf->ops->unmap_atomic(dmabuf, page_num, vaddr);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
 | 
					 * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
 | 
				
			||||||
 * same restrictions as for kmap and friends apply.
 | 
					 * same restrictions as for kmap and friends apply.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -238,9 +238,7 @@ static const struct dma_buf_ops amdgpu_dmabuf_ops = {
 | 
				
			||||||
	.release = drm_gem_dmabuf_release,
 | 
						.release = drm_gem_dmabuf_release,
 | 
				
			||||||
	.begin_cpu_access = amdgpu_gem_begin_cpu_access,
 | 
						.begin_cpu_access = amdgpu_gem_begin_cpu_access,
 | 
				
			||||||
	.map = drm_gem_dmabuf_kmap,
 | 
						.map = drm_gem_dmabuf_kmap,
 | 
				
			||||||
	.map_atomic = drm_gem_dmabuf_kmap_atomic,
 | 
					 | 
				
			||||||
	.unmap = drm_gem_dmabuf_kunmap,
 | 
						.unmap = drm_gem_dmabuf_kunmap,
 | 
				
			||||||
	.unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
 | 
					 | 
				
			||||||
	.mmap = drm_gem_dmabuf_mmap,
 | 
						.mmap = drm_gem_dmabuf_mmap,
 | 
				
			||||||
	.vmap = drm_gem_dmabuf_vmap,
 | 
						.vmap = drm_gem_dmabuf_vmap,
 | 
				
			||||||
	.vunmap = drm_gem_dmabuf_vunmap,
 | 
						.vunmap = drm_gem_dmabuf_vunmap,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -490,8 +490,6 @@ static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
 | 
				
			||||||
	.map_dma_buf	= armada_gem_prime_map_dma_buf,
 | 
						.map_dma_buf	= armada_gem_prime_map_dma_buf,
 | 
				
			||||||
	.unmap_dma_buf	= armada_gem_prime_unmap_dma_buf,
 | 
						.unmap_dma_buf	= armada_gem_prime_unmap_dma_buf,
 | 
				
			||||||
	.release	= drm_gem_dmabuf_release,
 | 
						.release	= drm_gem_dmabuf_release,
 | 
				
			||||||
	.map_atomic	= armada_gem_dmabuf_no_kmap,
 | 
					 | 
				
			||||||
	.unmap_atomic	= armada_gem_dmabuf_no_kunmap,
 | 
					 | 
				
			||||||
	.map		= armada_gem_dmabuf_no_kmap,
 | 
						.map		= armada_gem_dmabuf_no_kmap,
 | 
				
			||||||
	.unmap		= armada_gem_dmabuf_no_kunmap,
 | 
						.unmap		= armada_gem_dmabuf_no_kunmap,
 | 
				
			||||||
	.mmap		= armada_gem_dmabuf_mmap,
 | 
						.mmap		= armada_gem_dmabuf_mmap,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -433,35 +433,6 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
 | 
					EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					 | 
				
			||||||
 * drm_gem_dmabuf_kmap_atomic - map_atomic implementation for GEM
 | 
					 | 
				
			||||||
 * @dma_buf: buffer to be mapped
 | 
					 | 
				
			||||||
 * @page_num: page number within the buffer
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * Not implemented. This can be used as the &dma_buf_ops.map_atomic callback.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
 | 
					 | 
				
			||||||
				 unsigned long page_num)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return NULL;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
EXPORT_SYMBOL(drm_gem_dmabuf_kmap_atomic);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/**
 | 
					 | 
				
			||||||
 * drm_gem_dmabuf_kunmap_atomic - unmap_atomic implementation for GEM
 | 
					 | 
				
			||||||
 * @dma_buf: buffer to be unmapped
 | 
					 | 
				
			||||||
 * @page_num: page number within the buffer
 | 
					 | 
				
			||||||
 * @addr: virtual address of the buffer
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * Not implemented. This can be used as the &dma_buf_ops.unmap_atomic callback.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
 | 
					 | 
				
			||||||
				  unsigned long page_num, void *addr)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
EXPORT_SYMBOL(drm_gem_dmabuf_kunmap_atomic);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * drm_gem_dmabuf_kmap - map implementation for GEM
 | 
					 * drm_gem_dmabuf_kmap - map implementation for GEM
 | 
				
			||||||
 * @dma_buf: buffer to be mapped
 | 
					 * @dma_buf: buffer to be mapped
 | 
				
			||||||
| 
						 | 
					@ -519,9 +490,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
 | 
				
			||||||
	.unmap_dma_buf = drm_gem_unmap_dma_buf,
 | 
						.unmap_dma_buf = drm_gem_unmap_dma_buf,
 | 
				
			||||||
	.release = drm_gem_dmabuf_release,
 | 
						.release = drm_gem_dmabuf_release,
 | 
				
			||||||
	.map = drm_gem_dmabuf_kmap,
 | 
						.map = drm_gem_dmabuf_kmap,
 | 
				
			||||||
	.map_atomic = drm_gem_dmabuf_kmap_atomic,
 | 
					 | 
				
			||||||
	.unmap = drm_gem_dmabuf_kunmap,
 | 
						.unmap = drm_gem_dmabuf_kunmap,
 | 
				
			||||||
	.unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
 | 
					 | 
				
			||||||
	.mmap = drm_gem_dmabuf_mmap,
 | 
						.mmap = drm_gem_dmabuf_mmap,
 | 
				
			||||||
	.vmap = drm_gem_dmabuf_vmap,
 | 
						.vmap = drm_gem_dmabuf_vmap,
 | 
				
			||||||
	.vunmap = drm_gem_dmabuf_vunmap,
 | 
						.vunmap = drm_gem_dmabuf_vunmap,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -111,15 +111,6 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
 | 
				
			||||||
	i915_gem_object_unpin_map(obj);
 | 
						i915_gem_object_unpin_map(obj);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return NULL;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
 | 
					static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 | 
						struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 | 
				
			||||||
| 
						 | 
					@ -225,9 +216,7 @@ static const struct dma_buf_ops i915_dmabuf_ops =  {
 | 
				
			||||||
	.unmap_dma_buf = i915_gem_unmap_dma_buf,
 | 
						.unmap_dma_buf = i915_gem_unmap_dma_buf,
 | 
				
			||||||
	.release = drm_gem_dmabuf_release,
 | 
						.release = drm_gem_dmabuf_release,
 | 
				
			||||||
	.map = i915_gem_dmabuf_kmap,
 | 
						.map = i915_gem_dmabuf_kmap,
 | 
				
			||||||
	.map_atomic = i915_gem_dmabuf_kmap_atomic,
 | 
					 | 
				
			||||||
	.unmap = i915_gem_dmabuf_kunmap,
 | 
						.unmap = i915_gem_dmabuf_kunmap,
 | 
				
			||||||
	.unmap_atomic = i915_gem_dmabuf_kunmap_atomic,
 | 
					 | 
				
			||||||
	.mmap = i915_gem_dmabuf_mmap,
 | 
						.mmap = i915_gem_dmabuf_mmap,
 | 
				
			||||||
	.vmap = i915_gem_dmabuf_vmap,
 | 
						.vmap = i915_gem_dmabuf_vmap,
 | 
				
			||||||
	.vunmap = i915_gem_dmabuf_vunmap,
 | 
						.vunmap = i915_gem_dmabuf_vunmap,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -130,9 +130,7 @@ static const struct dma_buf_ops mock_dmabuf_ops =  {
 | 
				
			||||||
	.unmap_dma_buf = mock_unmap_dma_buf,
 | 
						.unmap_dma_buf = mock_unmap_dma_buf,
 | 
				
			||||||
	.release = mock_dmabuf_release,
 | 
						.release = mock_dmabuf_release,
 | 
				
			||||||
	.map = mock_dmabuf_kmap,
 | 
						.map = mock_dmabuf_kmap,
 | 
				
			||||||
	.map_atomic = mock_dmabuf_kmap_atomic,
 | 
					 | 
				
			||||||
	.unmap = mock_dmabuf_kunmap,
 | 
						.unmap = mock_dmabuf_kunmap,
 | 
				
			||||||
	.unmap_atomic = mock_dmabuf_kunmap_atomic,
 | 
					 | 
				
			||||||
	.mmap = mock_dmabuf_mmap,
 | 
						.mmap = mock_dmabuf_mmap,
 | 
				
			||||||
	.vmap = mock_dmabuf_vmap,
 | 
						.vmap = mock_dmabuf_vmap,
 | 
				
			||||||
	.vunmap = mock_dmabuf_vunmap,
 | 
						.vunmap = mock_dmabuf_vunmap,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -148,8 +148,6 @@ static const struct dma_buf_ops omap_dmabuf_ops = {
 | 
				
			||||||
	.release = drm_gem_dmabuf_release,
 | 
						.release = drm_gem_dmabuf_release,
 | 
				
			||||||
	.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
 | 
						.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
 | 
				
			||||||
	.end_cpu_access = omap_gem_dmabuf_end_cpu_access,
 | 
						.end_cpu_access = omap_gem_dmabuf_end_cpu_access,
 | 
				
			||||||
	.map_atomic = omap_gem_dmabuf_kmap_atomic,
 | 
					 | 
				
			||||||
	.unmap_atomic = omap_gem_dmabuf_kunmap_atomic,
 | 
					 | 
				
			||||||
	.map = omap_gem_dmabuf_kmap,
 | 
						.map = omap_gem_dmabuf_kmap,
 | 
				
			||||||
	.unmap = omap_gem_dmabuf_kunmap,
 | 
						.unmap = omap_gem_dmabuf_kunmap,
 | 
				
			||||||
	.mmap = omap_gem_dmabuf_mmap,
 | 
						.mmap = omap_gem_dmabuf_mmap,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -596,18 +596,6 @@ static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
 | 
					 | 
				
			||||||
					 unsigned long page)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return NULL;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
 | 
					 | 
				
			||||||
					  unsigned long page,
 | 
					 | 
				
			||||||
					  void *addr)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
 | 
					static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return NULL;
 | 
						return NULL;
 | 
				
			||||||
| 
						 | 
					@ -648,8 +636,6 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
 | 
				
			||||||
	.release = tegra_gem_prime_release,
 | 
						.release = tegra_gem_prime_release,
 | 
				
			||||||
	.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
 | 
						.begin_cpu_access = tegra_gem_prime_begin_cpu_access,
 | 
				
			||||||
	.end_cpu_access = tegra_gem_prime_end_cpu_access,
 | 
						.end_cpu_access = tegra_gem_prime_end_cpu_access,
 | 
				
			||||||
	.map_atomic = tegra_gem_prime_kmap_atomic,
 | 
					 | 
				
			||||||
	.unmap_atomic = tegra_gem_prime_kunmap_atomic,
 | 
					 | 
				
			||||||
	.map = tegra_gem_prime_kmap,
 | 
						.map = tegra_gem_prime_kmap,
 | 
				
			||||||
	.unmap = tegra_gem_prime_kunmap,
 | 
						.unmap = tegra_gem_prime_kunmap,
 | 
				
			||||||
	.mmap = tegra_gem_prime_mmap,
 | 
						.mmap = tegra_gem_prime_mmap,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -157,27 +157,12 @@ static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
 | 
				
			||||||
	return NULL;
 | 
						return NULL;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
 | 
					 | 
				
			||||||
				    unsigned long page_num)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	/* TODO */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return NULL;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
 | 
					static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
 | 
				
			||||||
			      unsigned long page_num, void *addr)
 | 
								      unsigned long page_num, void *addr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	/* TODO */
 | 
						/* TODO */
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
 | 
					 | 
				
			||||||
				     unsigned long page_num,
 | 
					 | 
				
			||||||
				     void *addr)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	/* TODO */
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
 | 
					static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
 | 
				
			||||||
			   struct vm_area_struct *vma)
 | 
								   struct vm_area_struct *vma)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -192,9 +177,7 @@ static const struct dma_buf_ops udl_dmabuf_ops = {
 | 
				
			||||||
	.map_dma_buf		= udl_map_dma_buf,
 | 
						.map_dma_buf		= udl_map_dma_buf,
 | 
				
			||||||
	.unmap_dma_buf		= udl_unmap_dma_buf,
 | 
						.unmap_dma_buf		= udl_unmap_dma_buf,
 | 
				
			||||||
	.map			= udl_dmabuf_kmap,
 | 
						.map			= udl_dmabuf_kmap,
 | 
				
			||||||
	.map_atomic		= udl_dmabuf_kmap_atomic,
 | 
					 | 
				
			||||||
	.unmap			= udl_dmabuf_kunmap,
 | 
						.unmap			= udl_dmabuf_kunmap,
 | 
				
			||||||
	.unmap_atomic		= udl_dmabuf_kunmap_atomic,
 | 
					 | 
				
			||||||
	.mmap			= udl_dmabuf_mmap,
 | 
						.mmap			= udl_dmabuf_mmap,
 | 
				
			||||||
	.release		= drm_gem_dmabuf_release,
 | 
						.release		= drm_gem_dmabuf_release,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -71,17 +71,6 @@ static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
 | 
					 | 
				
			||||||
		unsigned long page_num)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return NULL;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
 | 
					 | 
				
			||||||
		unsigned long page_num, void *addr)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
 | 
					static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
 | 
				
			||||||
		unsigned long page_num)
 | 
							unsigned long page_num)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -108,9 +97,7 @@ const struct dma_buf_ops vmw_prime_dmabuf_ops =  {
 | 
				
			||||||
	.unmap_dma_buf = vmw_prime_unmap_dma_buf,
 | 
						.unmap_dma_buf = vmw_prime_unmap_dma_buf,
 | 
				
			||||||
	.release = NULL,
 | 
						.release = NULL,
 | 
				
			||||||
	.map = vmw_prime_dmabuf_kmap,
 | 
						.map = vmw_prime_dmabuf_kmap,
 | 
				
			||||||
	.map_atomic = vmw_prime_dmabuf_kmap_atomic,
 | 
					 | 
				
			||||||
	.unmap = vmw_prime_dmabuf_kunmap,
 | 
						.unmap = vmw_prime_dmabuf_kunmap,
 | 
				
			||||||
	.unmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
 | 
					 | 
				
			||||||
	.mmap = vmw_prime_dmabuf_mmap,
 | 
						.mmap = vmw_prime_dmabuf_mmap,
 | 
				
			||||||
	.vmap = vmw_prime_dmabuf_vmap,
 | 
						.vmap = vmw_prime_dmabuf_vmap,
 | 
				
			||||||
	.vunmap = vmw_prime_dmabuf_vunmap,
 | 
						.vunmap = vmw_prime_dmabuf_vunmap,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -358,7 +358,6 @@ static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
 | 
				
			||||||
	.map_dma_buf = vb2_dc_dmabuf_ops_map,
 | 
						.map_dma_buf = vb2_dc_dmabuf_ops_map,
 | 
				
			||||||
	.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
 | 
						.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
 | 
				
			||||||
	.map = vb2_dc_dmabuf_ops_kmap,
 | 
						.map = vb2_dc_dmabuf_ops_kmap,
 | 
				
			||||||
	.map_atomic = vb2_dc_dmabuf_ops_kmap,
 | 
					 | 
				
			||||||
	.vmap = vb2_dc_dmabuf_ops_vmap,
 | 
						.vmap = vb2_dc_dmabuf_ops_vmap,
 | 
				
			||||||
	.mmap = vb2_dc_dmabuf_ops_mmap,
 | 
						.mmap = vb2_dc_dmabuf_ops_mmap,
 | 
				
			||||||
	.release = vb2_dc_dmabuf_ops_release,
 | 
						.release = vb2_dc_dmabuf_ops_release,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -507,7 +507,6 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
 | 
				
			||||||
	.map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
 | 
						.map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
 | 
				
			||||||
	.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
 | 
						.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
 | 
				
			||||||
	.map = vb2_dma_sg_dmabuf_ops_kmap,
 | 
						.map = vb2_dma_sg_dmabuf_ops_kmap,
 | 
				
			||||||
	.map_atomic = vb2_dma_sg_dmabuf_ops_kmap,
 | 
					 | 
				
			||||||
	.vmap = vb2_dma_sg_dmabuf_ops_vmap,
 | 
						.vmap = vb2_dma_sg_dmabuf_ops_vmap,
 | 
				
			||||||
	.mmap = vb2_dma_sg_dmabuf_ops_mmap,
 | 
						.mmap = vb2_dma_sg_dmabuf_ops_mmap,
 | 
				
			||||||
	.release = vb2_dma_sg_dmabuf_ops_release,
 | 
						.release = vb2_dma_sg_dmabuf_ops_release,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -346,7 +346,6 @@ static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
 | 
				
			||||||
	.map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
 | 
						.map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
 | 
				
			||||||
	.unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
 | 
						.unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
 | 
				
			||||||
	.map = vb2_vmalloc_dmabuf_ops_kmap,
 | 
						.map = vb2_vmalloc_dmabuf_ops_kmap,
 | 
				
			||||||
	.map_atomic = vb2_vmalloc_dmabuf_ops_kmap,
 | 
					 | 
				
			||||||
	.vmap = vb2_vmalloc_dmabuf_ops_vmap,
 | 
						.vmap = vb2_vmalloc_dmabuf_ops_vmap,
 | 
				
			||||||
	.mmap = vb2_vmalloc_dmabuf_ops_mmap,
 | 
						.mmap = vb2_vmalloc_dmabuf_ops_mmap,
 | 
				
			||||||
	.release = vb2_vmalloc_dmabuf_ops_release,
 | 
						.release = vb2_vmalloc_dmabuf_ops_release,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -369,8 +369,6 @@ static const struct dma_buf_ops dma_buf_ops = {
 | 
				
			||||||
	.detach = ion_dma_buf_detatch,
 | 
						.detach = ion_dma_buf_detatch,
 | 
				
			||||||
	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
 | 
						.begin_cpu_access = ion_dma_buf_begin_cpu_access,
 | 
				
			||||||
	.end_cpu_access = ion_dma_buf_end_cpu_access,
 | 
						.end_cpu_access = ion_dma_buf_end_cpu_access,
 | 
				
			||||||
	.map_atomic = ion_dma_buf_kmap,
 | 
					 | 
				
			||||||
	.unmap_atomic = ion_dma_buf_kunmap,
 | 
					 | 
				
			||||||
	.map = ion_dma_buf_kmap,
 | 
						.map = ion_dma_buf_kmap,
 | 
				
			||||||
	.unmap = ion_dma_buf_kunmap,
 | 
						.unmap = ion_dma_buf_kunmap,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -80,11 +80,6 @@ static void tee_shm_op_release(struct dma_buf *dmabuf)
 | 
				
			||||||
	tee_shm_release(shm);
 | 
						tee_shm_release(shm);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void *tee_shm_op_map_atomic(struct dma_buf *dmabuf, unsigned long pgnum)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	return NULL;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void *tee_shm_op_map(struct dma_buf *dmabuf, unsigned long pgnum)
 | 
					static void *tee_shm_op_map(struct dma_buf *dmabuf, unsigned long pgnum)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return NULL;
 | 
						return NULL;
 | 
				
			||||||
| 
						 | 
					@ -107,7 +102,6 @@ static const struct dma_buf_ops tee_shm_dma_buf_ops = {
 | 
				
			||||||
	.map_dma_buf = tee_shm_op_map_dma_buf,
 | 
						.map_dma_buf = tee_shm_op_map_dma_buf,
 | 
				
			||||||
	.unmap_dma_buf = tee_shm_op_unmap_dma_buf,
 | 
						.unmap_dma_buf = tee_shm_op_unmap_dma_buf,
 | 
				
			||||||
	.release = tee_shm_op_release,
 | 
						.release = tee_shm_op_release,
 | 
				
			||||||
	.map_atomic = tee_shm_op_map_atomic,
 | 
					 | 
				
			||||||
	.map = tee_shm_op_map,
 | 
						.map = tee_shm_op_map,
 | 
				
			||||||
	.mmap = tee_shm_op_mmap,
 | 
						.mmap = tee_shm_op_mmap,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -93,10 +93,6 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
 | 
				
			||||||
			   enum dma_data_direction dir);
 | 
								   enum dma_data_direction dir);
 | 
				
			||||||
void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
 | 
					void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
 | 
				
			||||||
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
 | 
					void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
 | 
				
			||||||
void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
 | 
					 | 
				
			||||||
				 unsigned long page_num);
 | 
					 | 
				
			||||||
void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
 | 
					 | 
				
			||||||
				  unsigned long page_num, void *addr);
 | 
					 | 
				
			||||||
void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num);
 | 
					void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num);
 | 
				
			||||||
void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
 | 
					void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
 | 
				
			||||||
			   void *addr);
 | 
								   void *addr);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -205,8 +205,6 @@ struct dma_buf_ops {
 | 
				
			||||||
	 * to be restarted.
 | 
						 * to be restarted.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
 | 
						int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
 | 
				
			||||||
	void *(*map_atomic)(struct dma_buf *, unsigned long);
 | 
					 | 
				
			||||||
	void (*unmap_atomic)(struct dma_buf *, unsigned long, void *);
 | 
					 | 
				
			||||||
	void *(*map)(struct dma_buf *, unsigned long);
 | 
						void *(*map)(struct dma_buf *, unsigned long);
 | 
				
			||||||
	void (*unmap)(struct dma_buf *, unsigned long, void *);
 | 
						void (*unmap)(struct dma_buf *, unsigned long, void *);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -394,8 +392,6 @@ int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
 | 
				
			||||||
			     enum dma_data_direction dir);
 | 
								     enum dma_data_direction dir);
 | 
				
			||||||
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
 | 
					int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
 | 
				
			||||||
			   enum dma_data_direction dir);
 | 
								   enum dma_data_direction dir);
 | 
				
			||||||
void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
 | 
					 | 
				
			||||||
void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
 | 
					 | 
				
			||||||
void *dma_buf_kmap(struct dma_buf *, unsigned long);
 | 
					void *dma_buf_kmap(struct dma_buf *, unsigned long);
 | 
				
			||||||
void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
 | 
					void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue