forked from mirrors/linux
		
	drm/amdgpu: Add amdgpu_bo_is_vm_bo helper
Help code readability by replacing a bunch of: bo->tbo.base.resv == vm->root.bo->tbo.base.resv With: amdgpu_vm_is_bo_always_valid(vm, bo) No functional changes. v2: * Rename helper and move to amdgpu_vm. (Christian) v3: * Use Christian's kerneldoc. v4: * Fixed logic inversion in amdgpu_vm_bo_get_memory. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com> Cc: Christian König <christian.koenig@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
		
							parent
							
								
									e060c7ba7e
								
							
						
					
					
						commit
						26e20235ce
					
				
					 3 changed files with 29 additions and 16 deletions
				
			
		| 
						 | 
				
			
			@ -174,7 +174,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
 | 
			
		|||
		return -EPERM;
 | 
			
		||||
 | 
			
		||||
	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
 | 
			
		||||
	    abo->tbo.base.resv != vm->root.bo->tbo.base.resv)
 | 
			
		||||
	    !amdgpu_vm_is_bo_always_valid(vm, abo))
 | 
			
		||||
		return -EPERM;
 | 
			
		||||
 | 
			
		||||
	r = amdgpu_bo_reserve(abo, false);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -333,7 +333,7 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
 | 
			
		|||
	base->next = bo->vm_bo;
 | 
			
		||||
	bo->vm_bo = base;
 | 
			
		||||
 | 
			
		||||
	if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
 | 
			
		||||
	if (!amdgpu_vm_is_bo_always_valid(vm, bo))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
 | 
			
		||||
| 
						 | 
				
			
			@ -1101,12 +1101,12 @@ static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
 | 
			
		|||
	 * For now ignore BOs which are currently locked and potentially
 | 
			
		||||
	 * changing their location.
 | 
			
		||||
	 */
 | 
			
		||||
	if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv &&
 | 
			
		||||
	if (!amdgpu_vm_is_bo_always_valid(vm, bo) &&
 | 
			
		||||
	    !dma_resv_trylock(bo->tbo.base.resv))
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	amdgpu_bo_get_memory(bo, stats);
 | 
			
		||||
	if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
 | 
			
		||||
	if (!amdgpu_vm_is_bo_always_valid(vm, bo))
 | 
			
		||||
		dma_resv_unlock(bo->tbo.base.resv);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1203,8 +1203,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
 | 
			
		|||
		uncached = false;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (clear || (bo && bo->tbo.base.resv ==
 | 
			
		||||
		      vm->root.bo->tbo.base.resv))
 | 
			
		||||
	if (clear || amdgpu_vm_is_bo_always_valid(vm, bo))
 | 
			
		||||
		last_update = &vm->last_update;
 | 
			
		||||
	else
 | 
			
		||||
		last_update = &bo_va->last_pt_update;
 | 
			
		||||
| 
						 | 
				
			
			@ -1246,7 +1245,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
 | 
			
		|||
	 * the evicted list so that it gets validated again on the
 | 
			
		||||
	 * next command submission.
 | 
			
		||||
	 */
 | 
			
		||||
	if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
 | 
			
		||||
	if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
 | 
			
		||||
		uint32_t mem_type = bo->tbo.resource->mem_type;
 | 
			
		||||
 | 
			
		||||
		if (!(bo->preferred_domains &
 | 
			
		||||
| 
						 | 
				
			
			@ -1640,10 +1639,9 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
 | 
			
		|||
	if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
 | 
			
		||||
		amdgpu_vm_prt_get(adev);
 | 
			
		||||
 | 
			
		||||
	if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
 | 
			
		||||
	    !bo_va->base.moved) {
 | 
			
		||||
	if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
 | 
			
		||||
		amdgpu_vm_bo_moved(&bo_va->base);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	trace_amdgpu_vm_bo_map(bo_va, mapping);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1942,7 +1940,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
 | 
			
		|||
		if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
 | 
			
		||||
			amdgpu_vm_prt_get(adev);
 | 
			
		||||
 | 
			
		||||
		if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
 | 
			
		||||
		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
 | 
			
		||||
		    !before->bo_va->base.moved)
 | 
			
		||||
			amdgpu_vm_bo_moved(&before->bo_va->base);
 | 
			
		||||
	} else {
 | 
			
		||||
| 
						 | 
				
			
			@ -1957,7 +1955,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
 | 
			
		|||
		if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
 | 
			
		||||
			amdgpu_vm_prt_get(adev);
 | 
			
		||||
 | 
			
		||||
		if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
 | 
			
		||||
		if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
 | 
			
		||||
		    !after->bo_va->base.moved)
 | 
			
		||||
			amdgpu_vm_bo_moved(&after->bo_va->base);
 | 
			
		||||
	} else {
 | 
			
		||||
| 
						 | 
				
			
			@ -2037,7 +2035,7 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,
 | 
			
		|||
 | 
			
		||||
	if (bo) {
 | 
			
		||||
		dma_resv_assert_held(bo->tbo.base.resv);
 | 
			
		||||
		if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
 | 
			
		||||
		if (amdgpu_vm_is_bo_always_valid(vm, bo))
 | 
			
		||||
			ttm_bo_set_bulk_move(&bo->tbo, NULL);
 | 
			
		||||
 | 
			
		||||
		for (base = &bo_va->base.bo->vm_bo; *base;
 | 
			
		||||
| 
						 | 
				
			
			@ -2131,7 +2129,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
 | 
			
		|||
	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
 | 
			
		||||
		struct amdgpu_vm *vm = bo_base->vm;
 | 
			
		||||
 | 
			
		||||
		if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
 | 
			
		||||
		if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) {
 | 
			
		||||
			amdgpu_vm_bo_evicted(bo_base);
 | 
			
		||||
			continue;
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			@ -2142,7 +2140,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
 | 
			
		|||
 | 
			
		||||
		if (bo->tbo.type == ttm_bo_type_kernel)
 | 
			
		||||
			amdgpu_vm_bo_relocated(bo_base);
 | 
			
		||||
		else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
 | 
			
		||||
		else if (amdgpu_vm_is_bo_always_valid(vm, bo))
 | 
			
		||||
			amdgpu_vm_bo_moved(bo_base);
 | 
			
		||||
		else
 | 
			
		||||
			amdgpu_vm_bo_invalidated(bo_base);
 | 
			
		||||
| 
						 | 
				
			
			@ -3006,3 +3004,16 @@ void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
 | 
			
		|||
	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
 | 
			
		||||
 *
 | 
			
		||||
 * @vm: VM to test against.
 | 
			
		||||
 * @abo: BO to be tested.
 | 
			
		||||
 *
 | 
			
		||||
 * Returns true if the BO shares the dma_resv object with the root PD and is
 | 
			
		||||
 * always guaranteed to be valid inside the VM.
 | 
			
		||||
 */
 | 
			
		||||
bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
 | 
			
		||||
{
 | 
			
		||||
	return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -580,6 +580,8 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
 | 
			
		|||
 | 
			
		||||
int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 | 
			
		||||
 | 
			
		||||
bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * amdgpu_vm_tlb_seq - return tlb flush sequence number
 | 
			
		||||
 * @vm: the amdgpu_vm structure to query
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue