mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	dma-buf/drivers: make reserving a shared slot mandatory v4
Audit all the users of dma_resv_add_excl_fence() and make sure they reserve a shared slot also when only trying to add an exclusive fence. This is the next step towards handling the exclusive fence like a shared one. v2: fix missed case in amdgpu v3: and two more radeon, rename function v4: add one more case to TTM, fix i915 after rebase Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/20220406075132.3263-2-christian.koenig@amd.com
This commit is contained in:
		
							parent
							
								
									20b734c112
								
							
						
					
					
						commit
						c8d4c18bfb
					
				
					 30 changed files with 184 additions and 122 deletions
				
			
		| 
						 | 
					@ -152,7 +152,7 @@ static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * dma_resv_reserve_shared - Reserve space to add shared fences to
 | 
					 * dma_resv_reserve_fences - Reserve space to add shared fences to
 | 
				
			||||||
 * a dma_resv.
 | 
					 * a dma_resv.
 | 
				
			||||||
 * @obj: reservation object
 | 
					 * @obj: reservation object
 | 
				
			||||||
 * @num_fences: number of fences we want to add
 | 
					 * @num_fences: number of fences we want to add
 | 
				
			||||||
| 
						 | 
					@ -167,7 +167,7 @@ static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
 | 
				
			||||||
 * RETURNS
 | 
					 * RETURNS
 | 
				
			||||||
 * Zero for success, or -errno
 | 
					 * Zero for success, or -errno
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
 | 
					int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dma_resv_list *old, *new;
 | 
						struct dma_resv_list *old, *new;
 | 
				
			||||||
	unsigned int i, j, k, max;
 | 
						unsigned int i, j, k, max;
 | 
				
			||||||
| 
						 | 
					@ -230,7 +230,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
EXPORT_SYMBOL(dma_resv_reserve_shared);
 | 
					EXPORT_SYMBOL(dma_resv_reserve_fences);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_DEBUG_MUTEXES
 | 
					#ifdef CONFIG_DEBUG_MUTEXES
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					@ -238,7 +238,7 @@ EXPORT_SYMBOL(dma_resv_reserve_shared);
 | 
				
			||||||
 * @obj: the dma_resv object to reset
 | 
					 * @obj: the dma_resv object to reset
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * Reset the number of pre-reserved shared slots to test that drivers do
 | 
					 * Reset the number of pre-reserved shared slots to test that drivers do
 | 
				
			||||||
 * correct slot allocation using dma_resv_reserve_shared(). See also
 | 
					 * correct slot allocation using dma_resv_reserve_fences(). See also
 | 
				
			||||||
 * &dma_resv_list.shared_max.
 | 
					 * &dma_resv_list.shared_max.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
void dma_resv_reset_shared_max(struct dma_resv *obj)
 | 
					void dma_resv_reset_shared_max(struct dma_resv *obj)
 | 
				
			||||||
| 
						 | 
					@ -260,7 +260,7 @@ EXPORT_SYMBOL(dma_resv_reset_shared_max);
 | 
				
			||||||
 * @fence: the shared fence to add
 | 
					 * @fence: the shared fence to add
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and
 | 
					 * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and
 | 
				
			||||||
 * dma_resv_reserve_shared() has been called.
 | 
					 * dma_resv_reserve_fences() has been called.
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * See also &dma_resv.fence for a discussion of the semantics.
 | 
					 * See also &dma_resv.fence for a discussion of the semantics.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -75,17 +75,16 @@ static int test_signaling(void *arg, bool shared)
 | 
				
			||||||
		goto err_free;
 | 
							goto err_free;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (shared) {
 | 
						r = dma_resv_reserve_fences(&resv, 1);
 | 
				
			||||||
		r = dma_resv_reserve_shared(&resv, 1);
 | 
					 | 
				
			||||||
	if (r) {
 | 
						if (r) {
 | 
				
			||||||
		pr_err("Resv shared slot allocation failed\n");
 | 
							pr_err("Resv shared slot allocation failed\n");
 | 
				
			||||||
		goto err_unlock;
 | 
							goto err_unlock;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (shared)
 | 
				
			||||||
		dma_resv_add_shared_fence(&resv, f);
 | 
							dma_resv_add_shared_fence(&resv, f);
 | 
				
			||||||
	} else {
 | 
						else
 | 
				
			||||||
		dma_resv_add_excl_fence(&resv, f);
 | 
							dma_resv_add_excl_fence(&resv, f);
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (dma_resv_test_signaled(&resv, shared)) {
 | 
						if (dma_resv_test_signaled(&resv, shared)) {
 | 
				
			||||||
		pr_err("Resv unexpectedly signaled\n");
 | 
							pr_err("Resv unexpectedly signaled\n");
 | 
				
			||||||
| 
						 | 
					@ -134,17 +133,16 @@ static int test_for_each(void *arg, bool shared)
 | 
				
			||||||
		goto err_free;
 | 
							goto err_free;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (shared) {
 | 
						r = dma_resv_reserve_fences(&resv, 1);
 | 
				
			||||||
		r = dma_resv_reserve_shared(&resv, 1);
 | 
					 | 
				
			||||||
	if (r) {
 | 
						if (r) {
 | 
				
			||||||
		pr_err("Resv shared slot allocation failed\n");
 | 
							pr_err("Resv shared slot allocation failed\n");
 | 
				
			||||||
		goto err_unlock;
 | 
							goto err_unlock;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (shared)
 | 
				
			||||||
		dma_resv_add_shared_fence(&resv, f);
 | 
							dma_resv_add_shared_fence(&resv, f);
 | 
				
			||||||
	} else {
 | 
						else
 | 
				
			||||||
		dma_resv_add_excl_fence(&resv, f);
 | 
							dma_resv_add_excl_fence(&resv, f);
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = -ENOENT;
 | 
						r = -ENOENT;
 | 
				
			||||||
	dma_resv_for_each_fence(&cursor, &resv, shared, fence) {
 | 
						dma_resv_for_each_fence(&cursor, &resv, shared, fence) {
 | 
				
			||||||
| 
						 | 
					@ -206,18 +204,17 @@ static int test_for_each_unlocked(void *arg, bool shared)
 | 
				
			||||||
		goto err_free;
 | 
							goto err_free;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (shared) {
 | 
						r = dma_resv_reserve_fences(&resv, 1);
 | 
				
			||||||
		r = dma_resv_reserve_shared(&resv, 1);
 | 
					 | 
				
			||||||
	if (r) {
 | 
						if (r) {
 | 
				
			||||||
		pr_err("Resv shared slot allocation failed\n");
 | 
							pr_err("Resv shared slot allocation failed\n");
 | 
				
			||||||
		dma_resv_unlock(&resv);
 | 
							dma_resv_unlock(&resv);
 | 
				
			||||||
		goto err_free;
 | 
							goto err_free;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (shared)
 | 
				
			||||||
		dma_resv_add_shared_fence(&resv, f);
 | 
							dma_resv_add_shared_fence(&resv, f);
 | 
				
			||||||
	} else {
 | 
						else
 | 
				
			||||||
		dma_resv_add_excl_fence(&resv, f);
 | 
							dma_resv_add_excl_fence(&resv, f);
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	dma_resv_unlock(&resv);
 | 
						dma_resv_unlock(&resv);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = -ENOENT;
 | 
						r = -ENOENT;
 | 
				
			||||||
| 
						 | 
					@ -290,18 +287,17 @@ static int test_get_fences(void *arg, bool shared)
 | 
				
			||||||
		goto err_resv;
 | 
							goto err_resv;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (shared) {
 | 
						r = dma_resv_reserve_fences(&resv, 1);
 | 
				
			||||||
		r = dma_resv_reserve_shared(&resv, 1);
 | 
					 | 
				
			||||||
	if (r) {
 | 
						if (r) {
 | 
				
			||||||
		pr_err("Resv shared slot allocation failed\n");
 | 
							pr_err("Resv shared slot allocation failed\n");
 | 
				
			||||||
		dma_resv_unlock(&resv);
 | 
							dma_resv_unlock(&resv);
 | 
				
			||||||
		goto err_resv;
 | 
							goto err_resv;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (shared)
 | 
				
			||||||
		dma_resv_add_shared_fence(&resv, f);
 | 
							dma_resv_add_shared_fence(&resv, f);
 | 
				
			||||||
	} else {
 | 
						else
 | 
				
			||||||
		dma_resv_add_excl_fence(&resv, f);
 | 
							dma_resv_add_excl_fence(&resv, f);
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	dma_resv_unlock(&resv);
 | 
						dma_resv_unlock(&resv);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = dma_resv_get_fences(&resv, shared, &i, &fences);
 | 
						r = dma_resv_get_fences(&resv, shared, &i, &fences);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1233,7 +1233,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
 | 
				
			||||||
				  AMDGPU_FENCE_OWNER_KFD, false);
 | 
									  AMDGPU_FENCE_OWNER_KFD, false);
 | 
				
			||||||
	if (ret)
 | 
						if (ret)
 | 
				
			||||||
		goto wait_pd_fail;
 | 
							goto wait_pd_fail;
 | 
				
			||||||
	ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1);
 | 
						ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
 | 
				
			||||||
	if (ret)
 | 
						if (ret)
 | 
				
			||||||
		goto reserve_shared_fail;
 | 
							goto reserve_shared_fail;
 | 
				
			||||||
	amdgpu_bo_fence(vm->root.bo,
 | 
						amdgpu_bo_fence(vm->root.bo,
 | 
				
			||||||
| 
						 | 
					@ -2571,7 +2571,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
 | 
				
			||||||
	 * Add process eviction fence to bo so they can
 | 
						 * Add process eviction fence to bo so they can
 | 
				
			||||||
	 * evict each other.
 | 
						 * evict each other.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
 | 
						ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1);
 | 
				
			||||||
	if (ret)
 | 
						if (ret)
 | 
				
			||||||
		goto reserve_shared_fail;
 | 
							goto reserve_shared_fail;
 | 
				
			||||||
	amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
 | 
						amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1388,6 +1388,14 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
 | 
				
			||||||
		     bool shared)
 | 
							     bool shared)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dma_resv *resv = bo->tbo.base.resv;
 | 
						struct dma_resv *resv = bo->tbo.base.resv;
 | 
				
			||||||
 | 
						int r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						r = dma_resv_reserve_fences(resv, 1);
 | 
				
			||||||
 | 
						if (r) {
 | 
				
			||||||
 | 
							/* As last resort on OOM we block for the fence */
 | 
				
			||||||
 | 
							dma_fence_wait(fence, false);
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (shared)
 | 
						if (shared)
 | 
				
			||||||
		dma_resv_add_shared_fence(resv, fence);
 | 
							dma_resv_add_shared_fence(resv, fence);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2926,7 +2926,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 | 
				
			||||||
	if (r)
 | 
						if (r)
 | 
				
			||||||
		goto error_free_root;
 | 
							goto error_free_root;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = dma_resv_reserve_shared(root_bo->tbo.base.resv, 1);
 | 
						r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
 | 
				
			||||||
	if (r)
 | 
						if (r)
 | 
				
			||||||
		goto error_unreserve;
 | 
							goto error_unreserve;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3369,7 +3369,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
 | 
				
			||||||
		value = 0;
 | 
							value = 0;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
 | 
						r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
 | 
				
			||||||
	if (r) {
 | 
						if (r) {
 | 
				
			||||||
		pr_debug("failed %d to reserve fence slot\n", r);
 | 
							pr_debug("failed %d to reserve fence slot\n", r);
 | 
				
			||||||
		goto error_unlock;
 | 
							goto error_unlock;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -548,7 +548,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
 | 
				
			||||||
		goto reserve_bo_failed;
 | 
							goto reserve_bo_failed;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
 | 
						r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
 | 
				
			||||||
	if (r) {
 | 
						if (r) {
 | 
				
			||||||
		pr_debug("failed %d to reserve bo\n", r);
 | 
							pr_debug("failed %d to reserve bo\n", r);
 | 
				
			||||||
		amdgpu_bo_unreserve(bo);
 | 
							amdgpu_bo_unreserve(bo);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -179,11 +179,9 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
 | 
				
			||||||
		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
 | 
							struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
 | 
				
			||||||
		struct dma_resv *robj = bo->obj->base.resv;
 | 
							struct dma_resv *robj = bo->obj->base.resv;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
 | 
							ret = dma_resv_reserve_fences(robj, 1);
 | 
				
			||||||
			ret = dma_resv_reserve_shared(robj, 1);
 | 
					 | 
				
			||||||
		if (ret)
 | 
							if (ret)
 | 
				
			||||||
			return ret;
 | 
								return ret;
 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
 | 
							if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -108,7 +108,8 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
 | 
				
			||||||
	trace_i915_gem_object_clflush(obj);
 | 
						trace_i915_gem_object_clflush(obj);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	clflush = NULL;
 | 
						clflush = NULL;
 | 
				
			||||||
	if (!(flags & I915_CLFLUSH_SYNC))
 | 
						if (!(flags & I915_CLFLUSH_SYNC) &&
 | 
				
			||||||
 | 
						    dma_resv_reserve_fences(obj->base.resv, 1) == 0)
 | 
				
			||||||
		clflush = clflush_work_create(obj);
 | 
							clflush = clflush_work_create(obj);
 | 
				
			||||||
	if (clflush) {
 | 
						if (clflush) {
 | 
				
			||||||
		i915_sw_fence_await_reservation(&clflush->base.chain,
 | 
							i915_sw_fence_await_reservation(&clflush->base.chain,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -998,11 +998,9 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!(ev->flags & EXEC_OBJECT_WRITE)) {
 | 
							err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
 | 
				
			||||||
			err = dma_resv_reserve_shared(vma->obj->base.resv, 1);
 | 
					 | 
				
			||||||
		if (err)
 | 
							if (err)
 | 
				
			||||||
			return err;
 | 
								return err;
 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
 | 
							GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
 | 
				
			||||||
			   eb_vma_misplaced(&eb->exec[i], vma, ev->flags));
 | 
								   eb_vma_misplaced(&eb->exec[i], vma, ev->flags));
 | 
				
			||||||
| 
						 | 
					@ -2303,7 +2301,7 @@ static int eb_parse(struct i915_execbuffer *eb)
 | 
				
			||||||
	if (IS_ERR(batch))
 | 
						if (IS_ERR(batch))
 | 
				
			||||||
		return PTR_ERR(batch);
 | 
							return PTR_ERR(batch);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = dma_resv_reserve_shared(shadow->obj->base.resv, 1);
 | 
						err = dma_resv_reserve_fences(shadow->obj->base.resv, 1);
 | 
				
			||||||
	if (err)
 | 
						if (err)
 | 
				
			||||||
		return err;
 | 
							return err;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -611,7 +611,11 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
 | 
				
			||||||
	assert_object_held(src);
 | 
						assert_object_held(src);
 | 
				
			||||||
	i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
 | 
						i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = dma_resv_reserve_shared(src_bo->base.resv, 1);
 | 
						ret = dma_resv_reserve_fences(src_bo->base.resv, 1);
 | 
				
			||||||
 | 
						if (ret)
 | 
				
			||||||
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = dma_resv_reserve_fences(dst_bo->base.resv, 1);
 | 
				
			||||||
	if (ret)
 | 
						if (ret)
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -216,7 +216,10 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt,
 | 
				
			||||||
					  i915_gem_object_is_lmem(obj),
 | 
										  i915_gem_object_is_lmem(obj),
 | 
				
			||||||
					  0xdeadbeaf, &rq);
 | 
										  0xdeadbeaf, &rq);
 | 
				
			||||||
		if (rq) {
 | 
							if (rq) {
 | 
				
			||||||
			dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
 | 
								err = dma_resv_reserve_fences(obj->base.resv, 1);
 | 
				
			||||||
 | 
								if (!err)
 | 
				
			||||||
 | 
									dma_resv_add_excl_fence(obj->base.resv,
 | 
				
			||||||
 | 
												&rq->fence);
 | 
				
			||||||
			i915_gem_object_set_moving_fence(obj, &rq->fence);
 | 
								i915_gem_object_set_moving_fence(obj, &rq->fence);
 | 
				
			||||||
			i915_request_put(rq);
 | 
								i915_request_put(rq);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1819,6 +1819,12 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
 | 
				
			||||||
			intel_frontbuffer_put(front);
 | 
								intel_frontbuffer_put(front);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
 | 
				
			||||||
 | 
								err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
 | 
				
			||||||
 | 
								if (unlikely(err))
 | 
				
			||||||
 | 
									return err;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (fence) {
 | 
							if (fence) {
 | 
				
			||||||
			dma_resv_add_excl_fence(vma->obj->base.resv, fence);
 | 
								dma_resv_add_excl_fence(vma->obj->base.resv, fence);
 | 
				
			||||||
			obj->write_domain = I915_GEM_DOMAIN_RENDER;
 | 
								obj->write_domain = I915_GEM_DOMAIN_RENDER;
 | 
				
			||||||
| 
						 | 
					@ -1826,7 +1832,7 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
 | 
							if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
 | 
				
			||||||
			err = dma_resv_reserve_shared(vma->obj->base.resv, 1);
 | 
								err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
 | 
				
			||||||
			if (unlikely(err))
 | 
								if (unlikely(err))
 | 
				
			||||||
				return err;
 | 
									return err;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					@ -2044,7 +2050,7 @@ int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
 | 
				
			||||||
	if (!obj->mm.rsgt)
 | 
						if (!obj->mm.rsgt)
 | 
				
			||||||
		return -EBUSY;
 | 
							return -EBUSY;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = dma_resv_reserve_shared(obj->base.resv, 1);
 | 
						err = dma_resv_reserve_fences(obj->base.resv, 1);
 | 
				
			||||||
	if (err)
 | 
						if (err)
 | 
				
			||||||
		return -EBUSY;
 | 
							return -EBUSY;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1043,6 +1043,13 @@ static int igt_lmem_write_cpu(void *arg)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	i915_gem_object_lock(obj, NULL);
 | 
						i915_gem_object_lock(obj, NULL);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err = dma_resv_reserve_fences(obj->base.resv, 1);
 | 
				
			||||||
 | 
						if (err) {
 | 
				
			||||||
 | 
							i915_gem_object_unlock(obj);
 | 
				
			||||||
 | 
							goto out_put;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Put the pages into a known state -- from the gpu for added fun */
 | 
						/* Put the pages into a known state -- from the gpu for added fun */
 | 
				
			||||||
	intel_engine_pm_get(engine);
 | 
						intel_engine_pm_get(engine);
 | 
				
			||||||
	err = intel_context_migrate_clear(engine->gt->migrate.context, NULL,
 | 
						err = intel_context_migrate_clear(engine->gt->migrate.context, NULL,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -257,13 +257,11 @@ int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
 | 
				
			||||||
static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
 | 
					static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
 | 
				
			||||||
			    bool write, bool explicit)
 | 
								    bool write, bool explicit)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int err = 0;
 | 
						int err;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!write) {
 | 
						err = dma_resv_reserve_fences(lima_bo_resv(bo), 1);
 | 
				
			||||||
		err = dma_resv_reserve_shared(lima_bo_resv(bo), 1);
 | 
					 | 
				
			||||||
	if (err)
 | 
						if (err)
 | 
				
			||||||
		return err;
 | 
							return err;
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* explicit sync use user passed dep fence */
 | 
						/* explicit sync use user passed dep fence */
 | 
				
			||||||
	if (explicit)
 | 
						if (explicit)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -320,16 +320,14 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
 | 
				
			||||||
		struct drm_gem_object *obj = &submit->bos[i].obj->base;
 | 
							struct drm_gem_object *obj = &submit->bos[i].obj->base;
 | 
				
			||||||
		bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
 | 
							bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!write) {
 | 
					 | 
				
			||||||
		/* NOTE: _reserve_shared() must happen before
 | 
							/* NOTE: _reserve_shared() must happen before
 | 
				
			||||||
		 * _add_shared_fence(), which makes this a slightly
 | 
							 * _add_shared_fence(), which makes this a slightly
 | 
				
			||||||
		 * strange place to call it.  OTOH this is a
 | 
							 * strange place to call it.  OTOH this is a
 | 
				
			||||||
		 * convenient can-fail point to hook it in.
 | 
							 * convenient can-fail point to hook it in.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
			ret = dma_resv_reserve_shared(obj->resv, 1);
 | 
							ret = dma_resv_reserve_fences(obj->resv, 1);
 | 
				
			||||||
		if (ret)
 | 
							if (ret)
 | 
				
			||||||
			return ret;
 | 
								return ret;
 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* exclusive fences must be ordered */
 | 
							/* exclusive fences must be ordered */
 | 
				
			||||||
		if (no_implicit && !write)
 | 
							if (no_implicit && !write)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -346,11 +346,9 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
 | 
				
			||||||
	struct dma_resv *resv = nvbo->bo.base.resv;
 | 
						struct dma_resv *resv = nvbo->bo.base.resv;
 | 
				
			||||||
	int i, ret;
 | 
						int i, ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!exclusive) {
 | 
						ret = dma_resv_reserve_fences(resv, 1);
 | 
				
			||||||
		ret = dma_resv_reserve_shared(resv, 1);
 | 
					 | 
				
			||||||
	if (ret)
 | 
						if (ret)
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Waiting for the exclusive fence first causes performance regressions
 | 
						/* Waiting for the exclusive fence first causes performance regressions
 | 
				
			||||||
	 * under some circumstances. So manually wait for the shared ones first.
 | 
						 * under some circumstances. So manually wait for the shared ones first.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -247,6 +247,10 @@ static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
 | 
				
			||||||
	int i, ret;
 | 
						int i, ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0; i < bo_count; i++) {
 | 
						for (i = 0; i < bo_count; i++) {
 | 
				
			||||||
 | 
							ret = dma_resv_reserve_fences(bos[i]->resv, 1);
 | 
				
			||||||
 | 
							if (ret)
 | 
				
			||||||
 | 
								return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* panfrost always uses write mode in its current uapi */
 | 
							/* panfrost always uses write mode in its current uapi */
 | 
				
			||||||
		ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
 | 
							ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
 | 
				
			||||||
							      true);
 | 
												      true);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -200,7 +200,7 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
 | 
				
			||||||
			return ret;
 | 
								return ret;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
 | 
						ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
 | 
				
			||||||
	if (ret)
 | 
						if (ret)
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -535,6 +535,10 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
 | 
				
			||||||
			return r;
 | 
								return r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
 | 
							radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
 | 
				
			||||||
 | 
							if (r)
 | 
				
			||||||
 | 
								return r;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return radeon_vm_clear_invalids(rdev, vm);
 | 
						return radeon_vm_clear_invalids(rdev, vm);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -782,6 +782,14 @@ void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
 | 
				
			||||||
		     bool shared)
 | 
							     bool shared)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dma_resv *resv = bo->tbo.base.resv;
 | 
						struct dma_resv *resv = bo->tbo.base.resv;
 | 
				
			||||||
 | 
						int r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						r = dma_resv_reserve_fences(resv, 1);
 | 
				
			||||||
 | 
						if (r) {
 | 
				
			||||||
 | 
							/* As last resort on OOM we block for the fence */
 | 
				
			||||||
 | 
							dma_fence_wait(&fence->base, false);
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (shared)
 | 
						if (shared)
 | 
				
			||||||
		dma_resv_add_shared_fence(resv, &fence->base);
 | 
							dma_resv_add_shared_fence(resv, &fence->base);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -831,7 +831,7 @@ static int radeon_vm_update_ptes(struct radeon_device *rdev,
 | 
				
			||||||
		int r;
 | 
							int r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true);
 | 
							radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true);
 | 
				
			||||||
		r = dma_resv_reserve_shared(pt->tbo.base.resv, 1);
 | 
							r = dma_resv_reserve_fences(pt->tbo.base.resv, 1);
 | 
				
			||||||
		if (r)
 | 
							if (r)
 | 
				
			||||||
			return r;
 | 
								return r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -151,6 +151,10 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = dma_resv_reserve_fences(bo->base.resv, 1);
 | 
				
			||||||
 | 
						if (ret)
 | 
				
			||||||
 | 
							goto out_err;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
 | 
						ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
 | 
				
			||||||
	if (ret) {
 | 
						if (ret) {
 | 
				
			||||||
		if (ret == -EMULTIHOP)
 | 
							if (ret == -EMULTIHOP)
 | 
				
			||||||
| 
						 | 
					@ -735,7 +739,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dma_resv_add_shared_fence(bo->base.resv, fence);
 | 
						dma_resv_add_shared_fence(bo->base.resv, fence);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = dma_resv_reserve_shared(bo->base.resv, 1);
 | 
						ret = dma_resv_reserve_fences(bo->base.resv, 1);
 | 
				
			||||||
	if (unlikely(ret)) {
 | 
						if (unlikely(ret)) {
 | 
				
			||||||
		dma_fence_put(fence);
 | 
							dma_fence_put(fence);
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
| 
						 | 
					@ -794,7 +798,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 | 
				
			||||||
	bool type_found = false;
 | 
						bool type_found = false;
 | 
				
			||||||
	int i, ret;
 | 
						int i, ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = dma_resv_reserve_shared(bo->base.resv, 1);
 | 
						ret = dma_resv_reserve_fences(bo->base.resv, 1);
 | 
				
			||||||
	if (unlikely(ret))
 | 
						if (unlikely(ret))
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -221,9 +221,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	fbo->base = *bo;
 | 
						fbo->base = *bo;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ttm_bo_get(bo);
 | 
					 | 
				
			||||||
	fbo->bo = bo;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/**
 | 
						/**
 | 
				
			||||||
	 * Fix up members that we shouldn't copy directly:
 | 
						 * Fix up members that we shouldn't copy directly:
 | 
				
			||||||
	 * TODO: Explicit member copy would probably be better here.
 | 
						 * TODO: Explicit member copy would probably be better here.
 | 
				
			||||||
| 
						 | 
					@ -250,6 +247,15 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 | 
				
			||||||
	ret = dma_resv_trylock(&fbo->base.base._resv);
 | 
						ret = dma_resv_trylock(&fbo->base.base._resv);
 | 
				
			||||||
	WARN_ON(!ret);
 | 
						WARN_ON(!ret);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
 | 
				
			||||||
 | 
						if (ret) {
 | 
				
			||||||
 | 
							kfree(fbo);
 | 
				
			||||||
 | 
							return ret;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ttm_bo_get(bo);
 | 
				
			||||||
 | 
						fbo->bo = bo;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
 | 
						ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	*new_obj = &fbo->base;
 | 
						*new_obj = &fbo->base;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -90,6 +90,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	list_for_each_entry(entry, list, head) {
 | 
						list_for_each_entry(entry, list, head) {
 | 
				
			||||||
		struct ttm_buffer_object *bo = entry->bo;
 | 
							struct ttm_buffer_object *bo = entry->bo;
 | 
				
			||||||
 | 
							unsigned int num_fences;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
 | 
							ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
 | 
				
			||||||
		if (ret == -EALREADY && dups) {
 | 
							if (ret == -EALREADY && dups) {
 | 
				
			||||||
| 
						 | 
					@ -100,12 +101,10 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							num_fences = min(entry->num_shared, 1u);
 | 
				
			||||||
		if (!ret) {
 | 
							if (!ret) {
 | 
				
			||||||
			if (!entry->num_shared)
 | 
								ret = dma_resv_reserve_fences(bo->base.resv,
 | 
				
			||||||
				continue;
 | 
											      num_fences);
 | 
				
			||||||
 | 
					 | 
				
			||||||
			ret = dma_resv_reserve_shared(bo->base.resv,
 | 
					 | 
				
			||||||
								entry->num_shared);
 | 
					 | 
				
			||||||
			if (!ret)
 | 
								if (!ret)
 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					@ -120,9 +119,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 | 
				
			||||||
			ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
 | 
								ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!ret && entry->num_shared)
 | 
							if (!ret)
 | 
				
			||||||
			ret = dma_resv_reserve_shared(bo->base.resv,
 | 
								ret = dma_resv_reserve_fences(bo->base.resv,
 | 
				
			||||||
								entry->num_shared);
 | 
											      num_fences);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (unlikely(ret != 0)) {
 | 
							if (unlikely(ret != 0)) {
 | 
				
			||||||
			if (ticket) {
 | 
								if (ticket) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -259,16 +259,21 @@ v3d_lock_bo_reservations(struct v3d_job *job,
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0; i < job->bo_count; i++) {
 | 
						for (i = 0; i < job->bo_count; i++) {
 | 
				
			||||||
 | 
							ret = dma_resv_reserve_fences(job->bo[i]->resv, 1);
 | 
				
			||||||
 | 
							if (ret)
 | 
				
			||||||
 | 
								goto fail;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ret = drm_sched_job_add_implicit_dependencies(&job->base,
 | 
							ret = drm_sched_job_add_implicit_dependencies(&job->base,
 | 
				
			||||||
							      job->bo[i], true);
 | 
												      job->bo[i], true);
 | 
				
			||||||
		if (ret) {
 | 
							if (ret)
 | 
				
			||||||
			drm_gem_unlock_reservations(job->bo, job->bo_count,
 | 
								goto fail;
 | 
				
			||||||
						    acquire_ctx);
 | 
					 | 
				
			||||||
			return ret;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					fail:
 | 
				
			||||||
 | 
						drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
 | 
				
			||||||
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -644,7 +644,7 @@ vc4_lock_bo_reservations(struct drm_device *dev,
 | 
				
			||||||
	for (i = 0; i < exec->bo_count; i++) {
 | 
						for (i = 0; i < exec->bo_count; i++) {
 | 
				
			||||||
		bo = &exec->bo[i]->base;
 | 
							bo = &exec->bo[i]->base;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		ret = dma_resv_reserve_shared(bo->resv, 1);
 | 
							ret = dma_resv_reserve_fences(bo->resv, 1);
 | 
				
			||||||
		if (ret) {
 | 
							if (ret) {
 | 
				
			||||||
			vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
 | 
								vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
 | 
				
			||||||
			return ret;
 | 
								return ret;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -157,12 +157,14 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Expose the fence via the dma-buf */
 | 
						/* Expose the fence via the dma-buf */
 | 
				
			||||||
	ret = 0;
 | 
					 | 
				
			||||||
	dma_resv_lock(resv, NULL);
 | 
						dma_resv_lock(resv, NULL);
 | 
				
			||||||
 | 
						ret = dma_resv_reserve_fences(resv, 1);
 | 
				
			||||||
 | 
						if (!ret) {
 | 
				
			||||||
		if (arg->flags & VGEM_FENCE_WRITE)
 | 
							if (arg->flags & VGEM_FENCE_WRITE)
 | 
				
			||||||
			dma_resv_add_excl_fence(resv, fence);
 | 
								dma_resv_add_excl_fence(resv, fence);
 | 
				
			||||||
	else if ((ret = dma_resv_reserve_shared(resv, 1)) == 0)
 | 
							else
 | 
				
			||||||
			dma_resv_add_shared_fence(resv, fence);
 | 
								dma_resv_add_shared_fence(resv, fence);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
	dma_resv_unlock(resv);
 | 
						dma_resv_unlock(resv);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Record the fence in our idr for later signaling */
 | 
						/* Record the fence in our idr for later signaling */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -214,6 +214,7 @@ void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
 | 
					int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						unsigned int i;
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (objs->nents == 1) {
 | 
						if (objs->nents == 1) {
 | 
				
			||||||
| 
						 | 
					@ -222,6 +223,14 @@ int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
 | 
				
			||||||
		ret = drm_gem_lock_reservations(objs->objs, objs->nents,
 | 
							ret = drm_gem_lock_reservations(objs->objs, objs->nents,
 | 
				
			||||||
						&objs->ticket);
 | 
											&objs->ticket);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						if (ret)
 | 
				
			||||||
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < objs->nents; ++i) {
 | 
				
			||||||
 | 
							ret = dma_resv_reserve_fences(objs->objs[i]->resv, 1);
 | 
				
			||||||
 | 
							if (ret)
 | 
				
			||||||
 | 
								return ret;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -747,16 +747,22 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
 | 
				
			||||||
			 struct vmw_fence_obj *fence)
 | 
								 struct vmw_fence_obj *fence)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct ttm_device *bdev = bo->bdev;
 | 
						struct ttm_device *bdev = bo->bdev;
 | 
				
			||||||
 | 
					 | 
				
			||||||
	struct vmw_private *dev_priv =
 | 
						struct vmw_private *dev_priv =
 | 
				
			||||||
		container_of(bdev, struct vmw_private, bdev);
 | 
							container_of(bdev, struct vmw_private, bdev);
 | 
				
			||||||
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (fence == NULL) {
 | 
						if (fence == NULL)
 | 
				
			||||||
		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 | 
							vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							dma_fence_get(&fence->base);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = dma_resv_reserve_fences(bo->base.resv, 1);
 | 
				
			||||||
 | 
						if (!ret)
 | 
				
			||||||
		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
 | 
							dma_resv_add_excl_fence(bo->base.resv, &fence->base);
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							/* Last resort fallback when we are OOM */
 | 
				
			||||||
 | 
							dma_fence_wait(&fence->base, false);
 | 
				
			||||||
	dma_fence_put(&fence->base);
 | 
						dma_fence_put(&fence->base);
 | 
				
			||||||
	} else
 | 
					 | 
				
			||||||
		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -117,7 +117,7 @@ struct dma_resv {
 | 
				
			||||||
	 * A new fence is added by calling dma_resv_add_shared_fence(). Since
 | 
						 * A new fence is added by calling dma_resv_add_shared_fence(). Since
 | 
				
			||||||
	 * this often needs to be done past the point of no return in command
 | 
						 * this often needs to be done past the point of no return in command
 | 
				
			||||||
	 * submission it cannot fail, and therefore sufficient slots need to be
 | 
						 * submission it cannot fail, and therefore sufficient slots need to be
 | 
				
			||||||
	 * reserved by calling dma_resv_reserve_shared().
 | 
						 * reserved by calling dma_resv_reserve_fences().
 | 
				
			||||||
	 *
 | 
						 *
 | 
				
			||||||
	 * Note that actual semantics of what an exclusive or shared fence mean
 | 
						 * Note that actual semantics of what an exclusive or shared fence mean
 | 
				
			||||||
	 * is defined by the user, for reservation objects shared across drivers
 | 
						 * is defined by the user, for reservation objects shared across drivers
 | 
				
			||||||
| 
						 | 
					@ -413,7 +413,7 @@ static inline void dma_resv_unlock(struct dma_resv *obj)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void dma_resv_init(struct dma_resv *obj);
 | 
					void dma_resv_init(struct dma_resv *obj);
 | 
				
			||||||
void dma_resv_fini(struct dma_resv *obj);
 | 
					void dma_resv_fini(struct dma_resv *obj);
 | 
				
			||||||
int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
 | 
					int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences);
 | 
				
			||||||
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
 | 
					void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
 | 
				
			||||||
void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
 | 
					void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
 | 
				
			||||||
			     struct dma_fence *fence);
 | 
								     struct dma_fence *fence);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue