mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	drm: msm: Switch to use drm_gem_object reservation_object
Now that the base struct drm_gem_object has a reservation_object, use it and remove the private BO one. We can't use the drm_gem_reservation_object_wait() helper for MSM because (in theory) msm_gem_cpu_prep() will also do some cache maintenance on the GEM object. Cc: David Airlie <airlied@linux.ie> Cc: linux-arm-msm@vger.kernel.org Cc: dri-devel@lists.freedesktop.org Cc: freedreno@lists.freedesktop.org Signed-off-by: Rob Herring <robh@kernel.org> Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> Acked-by: Rob Clark <robdclark@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190202154158.10443-4-robh@kernel.org Signed-off-by: Maxime Ripard <maxime.ripard@bootlin.com>
This commit is contained in:
		
							parent
							
								
									fa238ea166
								
							
						
					
					
						commit
						dd55cf6929
					
				
					 5 changed files with 13 additions and 31 deletions
				
			
		| 
						 | 
				
			
			@ -1085,7 +1085,6 @@ static struct drm_driver msm_driver = {
 | 
			
		|||
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
 | 
			
		||||
	.gem_prime_export   = drm_gem_prime_export,
 | 
			
		||||
	.gem_prime_import   = drm_gem_prime_import,
 | 
			
		||||
	.gem_prime_res_obj  = msm_gem_prime_res_obj,
 | 
			
		||||
	.gem_prime_pin      = msm_gem_prime_pin,
 | 
			
		||||
	.gem_prime_unpin    = msm_gem_prime_unpin,
 | 
			
		||||
	.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -299,7 +299,6 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
 | 
			
		|||
void *msm_gem_prime_vmap(struct drm_gem_object *obj);
 | 
			
		||||
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 | 
			
		||||
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
 | 
			
		||||
struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
 | 
			
		||||
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
 | 
			
		||||
		struct dma_buf_attachment *attach, struct sg_table *sg);
 | 
			
		||||
int msm_gem_prime_pin(struct drm_gem_object *obj);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -668,14 +668,13 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
 | 
			
		|||
int msm_gem_sync_object(struct drm_gem_object *obj,
 | 
			
		||||
		struct msm_fence_context *fctx, bool exclusive)
 | 
			
		||||
{
 | 
			
		||||
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 | 
			
		||||
	struct reservation_object_list *fobj;
 | 
			
		||||
	struct dma_fence *fence;
 | 
			
		||||
	int i, ret;
 | 
			
		||||
 | 
			
		||||
	fobj = reservation_object_get_list(msm_obj->resv);
 | 
			
		||||
	fobj = reservation_object_get_list(obj->resv);
 | 
			
		||||
	if (!fobj || (fobj->shared_count == 0)) {
 | 
			
		||||
		fence = reservation_object_get_excl(msm_obj->resv);
 | 
			
		||||
		fence = reservation_object_get_excl(obj->resv);
 | 
			
		||||
		/* don't need to wait on our own fences, since ring is fifo */
 | 
			
		||||
		if (fence && (fence->context != fctx->context)) {
 | 
			
		||||
			ret = dma_fence_wait(fence, true);
 | 
			
		||||
| 
						 | 
				
			
			@ -689,7 +688,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
 | 
			
		|||
 | 
			
		||||
	for (i = 0; i < fobj->shared_count; i++) {
 | 
			
		||||
		fence = rcu_dereference_protected(fobj->shared[i],
 | 
			
		||||
						reservation_object_held(msm_obj->resv));
 | 
			
		||||
						reservation_object_held(obj->resv));
 | 
			
		||||
		if (fence->context != fctx->context) {
 | 
			
		||||
			ret = dma_fence_wait(fence, true);
 | 
			
		||||
			if (ret)
 | 
			
		||||
| 
						 | 
				
			
			@ -707,9 +706,9 @@ void msm_gem_move_to_active(struct drm_gem_object *obj,
 | 
			
		|||
	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
 | 
			
		||||
	msm_obj->gpu = gpu;
 | 
			
		||||
	if (exclusive)
 | 
			
		||||
		reservation_object_add_excl_fence(msm_obj->resv, fence);
 | 
			
		||||
		reservation_object_add_excl_fence(obj->resv, fence);
 | 
			
		||||
	else
 | 
			
		||||
		reservation_object_add_shared_fence(msm_obj->resv, fence);
 | 
			
		||||
		reservation_object_add_shared_fence(obj->resv, fence);
 | 
			
		||||
	list_del_init(&msm_obj->mm_list);
 | 
			
		||||
	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -729,13 +728,12 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
 | 
			
		|||
 | 
			
		||||
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 | 
			
		||||
{
 | 
			
		||||
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 | 
			
		||||
	bool write = !!(op & MSM_PREP_WRITE);
 | 
			
		||||
	unsigned long remain =
 | 
			
		||||
		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
 | 
			
		||||
	long ret;
 | 
			
		||||
 | 
			
		||||
	ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
 | 
			
		||||
	ret = reservation_object_wait_timeout_rcu(obj->resv, write,
 | 
			
		||||
						  true,  remain);
 | 
			
		||||
	if (ret == 0)
 | 
			
		||||
		return remain == 0 ? -EBUSY : -ETIMEDOUT;
 | 
			
		||||
| 
						 | 
				
			
			@ -767,7 +765,7 @@ static void describe_fence(struct dma_fence *fence, const char *type,
 | 
			
		|||
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 | 
			
		||||
{
 | 
			
		||||
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 | 
			
		||||
	struct reservation_object *robj = msm_obj->resv;
 | 
			
		||||
	struct reservation_object *robj = obj->resv;
 | 
			
		||||
	struct reservation_object_list *fobj;
 | 
			
		||||
	struct dma_fence *fence;
 | 
			
		||||
	struct msm_gem_vma *vma;
 | 
			
		||||
| 
						 | 
				
			
			@ -879,9 +877,6 @@ void msm_gem_free_object(struct drm_gem_object *obj)
 | 
			
		|||
		put_pages(obj);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (msm_obj->resv == &msm_obj->_resv)
 | 
			
		||||
		reservation_object_fini(msm_obj->resv);
 | 
			
		||||
 | 
			
		||||
	drm_gem_object_release(obj);
 | 
			
		||||
 | 
			
		||||
	mutex_unlock(&msm_obj->lock);
 | 
			
		||||
| 
						 | 
				
			
			@ -941,12 +936,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
 | 
			
		|||
	msm_obj->flags = flags;
 | 
			
		||||
	msm_obj->madv = MSM_MADV_WILLNEED;
 | 
			
		||||
 | 
			
		||||
	if (resv) {
 | 
			
		||||
		msm_obj->resv = resv;
 | 
			
		||||
	} else {
 | 
			
		||||
		msm_obj->resv = &msm_obj->_resv;
 | 
			
		||||
		reservation_object_init(msm_obj->resv);
 | 
			
		||||
	}
 | 
			
		||||
	if (resv)
 | 
			
		||||
		msm_obj->base.resv = resv;
 | 
			
		||||
 | 
			
		||||
	INIT_LIST_HEAD(&msm_obj->submit_entry);
 | 
			
		||||
	INIT_LIST_HEAD(&msm_obj->vmas);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -70,10 +70,3 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
 | 
			
		|||
	if (!obj->import_attach)
 | 
			
		||||
		msm_gem_put_pages(obj);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
 | 
			
		||||
{
 | 
			
		||||
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 | 
			
		||||
 | 
			
		||||
	return msm_obj->resv;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -173,7 +173,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
 | 
			
		|||
		msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace);
 | 
			
		||||
 | 
			
		||||
	if (submit->bos[i].flags & BO_LOCKED)
 | 
			
		||||
		ww_mutex_unlock(&msm_obj->resv->lock);
 | 
			
		||||
		ww_mutex_unlock(&msm_obj->base.resv->lock);
 | 
			
		||||
 | 
			
		||||
	if (backoff && !(submit->bos[i].flags & BO_VALID))
 | 
			
		||||
		submit->bos[i].iova = 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -196,7 +196,7 @@ static int submit_lock_objects(struct msm_gem_submit *submit)
 | 
			
		|||
		contended = i;
 | 
			
		||||
 | 
			
		||||
		if (!(submit->bos[i].flags & BO_LOCKED)) {
 | 
			
		||||
			ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
 | 
			
		||||
			ret = ww_mutex_lock_interruptible(&msm_obj->base.resv->lock,
 | 
			
		||||
					&submit->ticket);
 | 
			
		||||
			if (ret)
 | 
			
		||||
				goto fail;
 | 
			
		||||
| 
						 | 
				
			
			@ -218,7 +218,7 @@ static int submit_lock_objects(struct msm_gem_submit *submit)
 | 
			
		|||
	if (ret == -EDEADLK) {
 | 
			
		||||
		struct msm_gem_object *msm_obj = submit->bos[contended].obj;
 | 
			
		||||
		/* we lost out in a seqno race, lock and retry.. */
 | 
			
		||||
		ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
 | 
			
		||||
		ret = ww_mutex_lock_slow_interruptible(&msm_obj->base.resv->lock,
 | 
			
		||||
				&submit->ticket);
 | 
			
		||||
		if (!ret) {
 | 
			
		||||
			submit->bos[contended].flags |= BO_LOCKED;
 | 
			
		||||
| 
						 | 
				
			
			@ -244,7 +244,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
 | 
			
		|||
			 * strange place to call it.  OTOH this is a
 | 
			
		||||
			 * convenient can-fail point to hook it in.
 | 
			
		||||
			 */
 | 
			
		||||
			ret = reservation_object_reserve_shared(msm_obj->resv,
 | 
			
		||||
			ret = reservation_object_reserve_shared(msm_obj->base.resv,
 | 
			
		||||
								1);
 | 
			
		||||
			if (ret)
 | 
			
		||||
				return ret;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue