mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	drm/amdgpu: use drm_exec for GEM and CSA handling v2
Start using the new component here as well. v2: ignore duplicates to allow per VM BO mappings Signed-off-by: Christian König <christian.koenig@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230711133122.3710-5-christian.koenig@amd.com
This commit is contained in:
		
							parent
							
								
									8abc1eb298
								
							
						
					
					
						commit
						8a206685d3
					
				
					 2 changed files with 71 additions and 86 deletions
				
			
		| 
						 | 
					@ -22,6 +22,8 @@
 | 
				
			||||||
 * * Author: Monk.liu@amd.com
 | 
					 * * Author: Monk.liu@amd.com
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include <drm/drm_exec.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include "amdgpu.h"
 | 
					#include "amdgpu.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
 | 
					uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
 | 
				
			||||||
| 
						 | 
					@ -65,31 +67,25 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 | 
				
			||||||
			  struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
 | 
								  struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
 | 
				
			||||||
			  uint64_t csa_addr, uint32_t size)
 | 
								  uint64_t csa_addr, uint32_t size)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct ww_acquire_ctx ticket;
 | 
						struct drm_exec exec;
 | 
				
			||||||
	struct list_head list;
 | 
					 | 
				
			||||||
	struct amdgpu_bo_list_entry pd;
 | 
					 | 
				
			||||||
	struct ttm_validate_buffer csa_tv;
 | 
					 | 
				
			||||||
	int r;
 | 
						int r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	INIT_LIST_HEAD(&list);
 | 
						drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
 | 
				
			||||||
	INIT_LIST_HEAD(&csa_tv.head);
 | 
						drm_exec_until_all_locked(&exec) {
 | 
				
			||||||
	csa_tv.bo = &bo->tbo;
 | 
							r = amdgpu_vm_lock_pd(vm, &exec, 0);
 | 
				
			||||||
	csa_tv.num_shared = 1;
 | 
							if (likely(!r))
 | 
				
			||||||
 | 
								r = drm_exec_lock_obj(&exec, &bo->tbo.base);
 | 
				
			||||||
	list_add(&csa_tv.head, &list);
 | 
							drm_exec_retry_on_contention(&exec);
 | 
				
			||||||
	amdgpu_vm_get_pd_bo(vm, &list, &pd);
 | 
							if (unlikely(r)) {
 | 
				
			||||||
 | 
								DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
 | 
				
			||||||
	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
 | 
								goto error;
 | 
				
			||||||
	if (r) {
 | 
							}
 | 
				
			||||||
		DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
 | 
					 | 
				
			||||||
		return r;
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	*bo_va = amdgpu_vm_bo_add(adev, vm, bo);
 | 
						*bo_va = amdgpu_vm_bo_add(adev, vm, bo);
 | 
				
			||||||
	if (!*bo_va) {
 | 
						if (!*bo_va) {
 | 
				
			||||||
		ttm_eu_backoff_reservation(&ticket, &list);
 | 
							r = -ENOMEM;
 | 
				
			||||||
		DRM_ERROR("failed to create bo_va for static CSA\n");
 | 
							goto error;
 | 
				
			||||||
		return -ENOMEM;
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
 | 
						r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
 | 
				
			||||||
| 
						 | 
					@ -99,48 +95,42 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 | 
				
			||||||
	if (r) {
 | 
						if (r) {
 | 
				
			||||||
		DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
 | 
							DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
 | 
				
			||||||
		amdgpu_vm_bo_del(adev, *bo_va);
 | 
							amdgpu_vm_bo_del(adev, *bo_va);
 | 
				
			||||||
		ttm_eu_backoff_reservation(&ticket, &list);
 | 
							goto error;
 | 
				
			||||||
		return r;
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ttm_eu_backoff_reservation(&ticket, &list);
 | 
					error:
 | 
				
			||||||
	return 0;
 | 
						drm_exec_fini(&exec);
 | 
				
			||||||
 | 
						return r;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 | 
					int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 | 
				
			||||||
			    struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va,
 | 
								    struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va,
 | 
				
			||||||
			    uint64_t csa_addr)
 | 
								    uint64_t csa_addr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct ww_acquire_ctx ticket;
 | 
						struct drm_exec exec;
 | 
				
			||||||
	struct list_head list;
 | 
					 | 
				
			||||||
	struct amdgpu_bo_list_entry pd;
 | 
					 | 
				
			||||||
	struct ttm_validate_buffer csa_tv;
 | 
					 | 
				
			||||||
	int r;
 | 
						int r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	INIT_LIST_HEAD(&list);
 | 
						drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
 | 
				
			||||||
	INIT_LIST_HEAD(&csa_tv.head);
 | 
						drm_exec_until_all_locked(&exec) {
 | 
				
			||||||
	csa_tv.bo = &bo->tbo;
 | 
							r = amdgpu_vm_lock_pd(vm, &exec, 0);
 | 
				
			||||||
	csa_tv.num_shared = 1;
 | 
							if (likely(!r))
 | 
				
			||||||
 | 
								r = drm_exec_lock_obj(&exec, &bo->tbo.base);
 | 
				
			||||||
	list_add(&csa_tv.head, &list);
 | 
							drm_exec_retry_on_contention(&exec);
 | 
				
			||||||
	amdgpu_vm_get_pd_bo(vm, &list, &pd);
 | 
							if (unlikely(r)) {
 | 
				
			||||||
 | 
								DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
 | 
				
			||||||
	r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
 | 
								goto error;
 | 
				
			||||||
	if (r) {
 | 
							}
 | 
				
			||||||
		DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
 | 
					 | 
				
			||||||
		return r;
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = amdgpu_vm_bo_unmap(adev, bo_va, csa_addr);
 | 
						r = amdgpu_vm_bo_unmap(adev, bo_va, csa_addr);
 | 
				
			||||||
	if (r) {
 | 
						if (r) {
 | 
				
			||||||
		DRM_ERROR("failed to do bo_unmap on static CSA, err=%d\n", r);
 | 
							DRM_ERROR("failed to do bo_unmap on static CSA, err=%d\n", r);
 | 
				
			||||||
		ttm_eu_backoff_reservation(&ticket, &list);
 | 
							goto error;
 | 
				
			||||||
		return r;
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	amdgpu_vm_bo_del(adev, bo_va);
 | 
						amdgpu_vm_bo_del(adev, bo_va);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ttm_eu_backoff_reservation(&ticket, &list);
 | 
					error:
 | 
				
			||||||
 | 
						drm_exec_fini(&exec);
 | 
				
			||||||
	return 0;
 | 
						return r;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -33,6 +33,7 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include <drm/amdgpu_drm.h>
 | 
					#include <drm/amdgpu_drm.h>
 | 
				
			||||||
#include <drm/drm_drv.h>
 | 
					#include <drm/drm_drv.h>
 | 
				
			||||||
 | 
					#include <drm/drm_exec.h>
 | 
				
			||||||
#include <drm/drm_gem_ttm_helper.h>
 | 
					#include <drm/drm_gem_ttm_helper.h>
 | 
				
			||||||
#include <drm/ttm/ttm_tt.h>
 | 
					#include <drm/ttm/ttm_tt.h>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -198,29 +199,24 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
 | 
				
			||||||
	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 | 
						struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 | 
				
			||||||
	struct amdgpu_vm *vm = &fpriv->vm;
 | 
						struct amdgpu_vm *vm = &fpriv->vm;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	struct amdgpu_bo_list_entry vm_pd;
 | 
					 | 
				
			||||||
	struct list_head list, duplicates;
 | 
					 | 
				
			||||||
	struct dma_fence *fence = NULL;
 | 
						struct dma_fence *fence = NULL;
 | 
				
			||||||
	struct ttm_validate_buffer tv;
 | 
					 | 
				
			||||||
	struct ww_acquire_ctx ticket;
 | 
					 | 
				
			||||||
	struct amdgpu_bo_va *bo_va;
 | 
						struct amdgpu_bo_va *bo_va;
 | 
				
			||||||
 | 
						struct drm_exec exec;
 | 
				
			||||||
	long r;
 | 
						long r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	INIT_LIST_HEAD(&list);
 | 
						drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES);
 | 
				
			||||||
	INIT_LIST_HEAD(&duplicates);
 | 
						drm_exec_until_all_locked(&exec) {
 | 
				
			||||||
 | 
							r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1);
 | 
				
			||||||
 | 
							drm_exec_retry_on_contention(&exec);
 | 
				
			||||||
 | 
							if (unlikely(r))
 | 
				
			||||||
 | 
								goto out_unlock;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	tv.bo = &bo->tbo;
 | 
							r = amdgpu_vm_lock_pd(vm, &exec, 0);
 | 
				
			||||||
	tv.num_shared = 2;
 | 
							drm_exec_retry_on_contention(&exec);
 | 
				
			||||||
	list_add(&tv.head, &list);
 | 
							if (unlikely(r))
 | 
				
			||||||
 | 
								goto out_unlock;
 | 
				
			||||||
	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
 | 
					 | 
				
			||||||
	if (r) {
 | 
					 | 
				
			||||||
		dev_err(adev->dev, "leaking bo va because "
 | 
					 | 
				
			||||||
			"we fail to reserve bo (%ld)\n", r);
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	bo_va = amdgpu_vm_bo_find(vm, bo);
 | 
						bo_va = amdgpu_vm_bo_find(vm, bo);
 | 
				
			||||||
	if (!bo_va || --bo_va->ref_count)
 | 
						if (!bo_va || --bo_va->ref_count)
 | 
				
			||||||
		goto out_unlock;
 | 
							goto out_unlock;
 | 
				
			||||||
| 
						 | 
					@ -230,6 +226,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
 | 
				
			||||||
		goto out_unlock;
 | 
							goto out_unlock;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = amdgpu_vm_clear_freed(adev, vm, &fence);
 | 
						r = amdgpu_vm_clear_freed(adev, vm, &fence);
 | 
				
			||||||
 | 
						if (unlikely(r < 0))
 | 
				
			||||||
 | 
							dev_err(adev->dev, "failed to clear page "
 | 
				
			||||||
 | 
								"tables on GEM object close (%ld)\n", r);
 | 
				
			||||||
	if (r || !fence)
 | 
						if (r || !fence)
 | 
				
			||||||
		goto out_unlock;
 | 
							goto out_unlock;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -237,10 +236,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
 | 
				
			||||||
	dma_fence_put(fence);
 | 
						dma_fence_put(fence);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
out_unlock:
 | 
					out_unlock:
 | 
				
			||||||
	if (unlikely(r < 0))
 | 
						if (r)
 | 
				
			||||||
		dev_err(adev->dev, "failed to clear page "
 | 
							dev_err(adev->dev, "leaking bo va (%ld)\n", r);
 | 
				
			||||||
			"tables on GEM object close (%ld)\n", r);
 | 
						drm_exec_fini(&exec);
 | 
				
			||||||
	ttm_eu_backoff_reservation(&ticket, &list);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 | 
					static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 | 
				
			||||||
| 
						 | 
					@ -675,10 +673,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 | 
				
			||||||
	struct amdgpu_fpriv *fpriv = filp->driver_priv;
 | 
						struct amdgpu_fpriv *fpriv = filp->driver_priv;
 | 
				
			||||||
	struct amdgpu_bo *abo;
 | 
						struct amdgpu_bo *abo;
 | 
				
			||||||
	struct amdgpu_bo_va *bo_va;
 | 
						struct amdgpu_bo_va *bo_va;
 | 
				
			||||||
	struct amdgpu_bo_list_entry vm_pd;
 | 
						struct drm_exec exec;
 | 
				
			||||||
	struct ttm_validate_buffer tv;
 | 
					 | 
				
			||||||
	struct ww_acquire_ctx ticket;
 | 
					 | 
				
			||||||
	struct list_head list, duplicates;
 | 
					 | 
				
			||||||
	uint64_t va_flags;
 | 
						uint64_t va_flags;
 | 
				
			||||||
	uint64_t vm_size;
 | 
						uint64_t vm_size;
 | 
				
			||||||
	int r = 0;
 | 
						int r = 0;
 | 
				
			||||||
| 
						 | 
					@ -728,36 +723,38 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	INIT_LIST_HEAD(&list);
 | 
					 | 
				
			||||||
	INIT_LIST_HEAD(&duplicates);
 | 
					 | 
				
			||||||
	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
 | 
						if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
 | 
				
			||||||
	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
 | 
						    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
 | 
				
			||||||
		gobj = drm_gem_object_lookup(filp, args->handle);
 | 
							gobj = drm_gem_object_lookup(filp, args->handle);
 | 
				
			||||||
		if (gobj == NULL)
 | 
							if (gobj == NULL)
 | 
				
			||||||
			return -ENOENT;
 | 
								return -ENOENT;
 | 
				
			||||||
		abo = gem_to_amdgpu_bo(gobj);
 | 
							abo = gem_to_amdgpu_bo(gobj);
 | 
				
			||||||
		tv.bo = &abo->tbo;
 | 
					 | 
				
			||||||
		if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
 | 
					 | 
				
			||||||
			tv.num_shared = 1;
 | 
					 | 
				
			||||||
		else
 | 
					 | 
				
			||||||
			tv.num_shared = 0;
 | 
					 | 
				
			||||||
		list_add(&tv.head, &list);
 | 
					 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		gobj = NULL;
 | 
							gobj = NULL;
 | 
				
			||||||
		abo = NULL;
 | 
							abo = NULL;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
 | 
						drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
 | 
				
			||||||
 | 
							      DRM_EXEC_IGNORE_DUPLICATES);
 | 
				
			||||||
 | 
						drm_exec_until_all_locked(&exec) {
 | 
				
			||||||
 | 
							if (gobj) {
 | 
				
			||||||
 | 
								r = drm_exec_lock_obj(&exec, gobj);
 | 
				
			||||||
 | 
								drm_exec_retry_on_contention(&exec);
 | 
				
			||||||
 | 
								if (unlikely(r))
 | 
				
			||||||
 | 
									goto error;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
 | 
							r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2);
 | 
				
			||||||
	if (r)
 | 
							drm_exec_retry_on_contention(&exec);
 | 
				
			||||||
		goto error_unref;
 | 
							if (unlikely(r))
 | 
				
			||||||
 | 
								goto error;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (abo) {
 | 
						if (abo) {
 | 
				
			||||||
		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
 | 
							bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
 | 
				
			||||||
		if (!bo_va) {
 | 
							if (!bo_va) {
 | 
				
			||||||
			r = -ENOENT;
 | 
								r = -ENOENT;
 | 
				
			||||||
			goto error_backoff;
 | 
								goto error;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
 | 
						} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
 | 
				
			||||||
		bo_va = fpriv->prt_va;
 | 
							bo_va = fpriv->prt_va;
 | 
				
			||||||
| 
						 | 
					@ -794,10 +791,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 | 
				
			||||||
		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
 | 
							amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
 | 
				
			||||||
					args->operation);
 | 
										args->operation);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
error_backoff:
 | 
					error:
 | 
				
			||||||
	ttm_eu_backoff_reservation(&ticket, &list);
 | 
						drm_exec_fini(&exec);
 | 
				
			||||||
 | 
					 | 
				
			||||||
error_unref:
 | 
					 | 
				
			||||||
	drm_gem_object_put(gobj);
 | 
						drm_gem_object_put(gobj);
 | 
				
			||||||
	return r;
 | 
						return r;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue