forked from mirrors/linux
		
	drm/i915: Acquire the backing storage outside of struct_mutex in set-domain
As we can locklessly (well struct_mutex-lessly) acquire the backing storage, do so in set-domain-ioctl to reduce the contention on the struct_mutex. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-18-chris@chris-wilson.co.uk
This commit is contained in:
		
							parent
							
								
									fe115628d5
								
							
						
					
					
						commit
						40e62d5d6b
					
				
					 1 changed files with 61 additions and 38 deletions
				
			
		| 
						 | 
					@ -1452,6 +1452,30 @@ write_origin(struct drm_i915_gem_object *obj, unsigned domain)
 | 
				
			||||||
		obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
 | 
							obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct drm_i915_private *i915;
 | 
				
			||||||
 | 
						struct list_head *list;
 | 
				
			||||||
 | 
						struct i915_vma *vma;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						list_for_each_entry(vma, &obj->vma_list, obj_link) {
 | 
				
			||||||
 | 
							if (!i915_vma_is_ggtt(vma))
 | 
				
			||||||
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (i915_vma_is_active(vma))
 | 
				
			||||||
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (!drm_mm_node_allocated(&vma->node))
 | 
				
			||||||
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						i915 = to_i915(obj->base.dev);
 | 
				
			||||||
 | 
						list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
 | 
				
			||||||
 | 
						list_move_tail(&obj->global_list, list);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * Called when user space prepares to use an object with the CPU, either
 | 
					 * Called when user space prepares to use an object with the CPU, either
 | 
				
			||||||
 * through the mmap ioctl's mapping or a GTT mapping.
 | 
					 * through the mmap ioctl's mapping or a GTT mapping.
 | 
				
			||||||
| 
						 | 
					@ -1467,7 +1491,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 | 
				
			||||||
	struct drm_i915_gem_object *obj;
 | 
						struct drm_i915_gem_object *obj;
 | 
				
			||||||
	uint32_t read_domains = args->read_domains;
 | 
						uint32_t read_domains = args->read_domains;
 | 
				
			||||||
	uint32_t write_domain = args->write_domain;
 | 
						uint32_t write_domain = args->write_domain;
 | 
				
			||||||
	int ret;
 | 
						int err;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Only handle setting domains to types used by the CPU. */
 | 
						/* Only handle setting domains to types used by the CPU. */
 | 
				
			||||||
	if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
 | 
						if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
 | 
				
			||||||
| 
						 | 
					@ -1487,33 +1511,48 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 | 
				
			||||||
	 * We will repeat the flush holding the lock in the normal manner
 | 
						 * We will repeat the flush holding the lock in the normal manner
 | 
				
			||||||
	 * to catch cases where we are gazumped.
 | 
						 * to catch cases where we are gazumped.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	ret = i915_gem_object_wait(obj,
 | 
						err = i915_gem_object_wait(obj,
 | 
				
			||||||
				   I915_WAIT_INTERRUPTIBLE |
 | 
									   I915_WAIT_INTERRUPTIBLE |
 | 
				
			||||||
				   (write_domain ? I915_WAIT_ALL : 0),
 | 
									   (write_domain ? I915_WAIT_ALL : 0),
 | 
				
			||||||
				   MAX_SCHEDULE_TIMEOUT,
 | 
									   MAX_SCHEDULE_TIMEOUT,
 | 
				
			||||||
				   to_rps_client(file));
 | 
									   to_rps_client(file));
 | 
				
			||||||
	if (ret)
 | 
						if (err)
 | 
				
			||||||
		goto err;
 | 
							goto out_unlocked;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = i915_mutex_lock_interruptible(dev);
 | 
						/* Flush and acquire obj->pages so that we are coherent through
 | 
				
			||||||
	if (ret)
 | 
						 * direct access in memory with previous cached writes through
 | 
				
			||||||
		goto err;
 | 
						 * shmemfs and that our cache domain tracking remains valid.
 | 
				
			||||||
 | 
						 * For example, if the obj->filp was moved to swap without us
 | 
				
			||||||
 | 
						 * being notified and releasing the pages, we would mistakenly
 | 
				
			||||||
 | 
						 * continue to assume that the obj remained out of the CPU cached
 | 
				
			||||||
 | 
						 * domain.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						err = i915_gem_object_pin_pages(obj);
 | 
				
			||||||
 | 
						if (err)
 | 
				
			||||||
 | 
							goto out_unlocked;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err = i915_mutex_lock_interruptible(dev);
 | 
				
			||||||
 | 
						if (err)
 | 
				
			||||||
 | 
							goto out_pages;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (read_domains & I915_GEM_DOMAIN_GTT)
 | 
						if (read_domains & I915_GEM_DOMAIN_GTT)
 | 
				
			||||||
		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
 | 
							err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
 | 
							err = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* And bump the LRU for this access */
 | 
				
			||||||
 | 
						i915_gem_object_bump_inactive_ggtt(obj);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mutex_unlock(&dev->struct_mutex);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (write_domain != 0)
 | 
						if (write_domain != 0)
 | 
				
			||||||
		intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
 | 
							intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	i915_gem_object_put(obj);
 | 
					out_pages:
 | 
				
			||||||
	mutex_unlock(&dev->struct_mutex);
 | 
						i915_gem_object_unpin_pages(obj);
 | 
				
			||||||
	return ret;
 | 
					out_unlocked:
 | 
				
			||||||
 | 
					 | 
				
			||||||
err:
 | 
					 | 
				
			||||||
	i915_gem_object_put_unlocked(obj);
 | 
						i915_gem_object_put_unlocked(obj);
 | 
				
			||||||
	return ret;
 | 
						return err;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					@ -1734,6 +1773,10 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
 | 
				
			||||||
	if (ret)
 | 
						if (ret)
 | 
				
			||||||
		goto err;
 | 
							goto err;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ret = i915_gem_object_pin_pages(obj);
 | 
				
			||||||
 | 
						if (ret)
 | 
				
			||||||
 | 
							goto err;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	intel_runtime_pm_get(dev_priv);
 | 
						intel_runtime_pm_get(dev_priv);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = i915_mutex_lock_interruptible(dev);
 | 
						ret = i915_mutex_lock_interruptible(dev);
 | 
				
			||||||
| 
						 | 
					@ -1816,6 +1859,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
 | 
				
			||||||
	mutex_unlock(&dev->struct_mutex);
 | 
						mutex_unlock(&dev->struct_mutex);
 | 
				
			||||||
err_rpm:
 | 
					err_rpm:
 | 
				
			||||||
	intel_runtime_pm_put(dev_priv);
 | 
						intel_runtime_pm_put(dev_priv);
 | 
				
			||||||
 | 
						i915_gem_object_unpin_pages(obj);
 | 
				
			||||||
err:
 | 
					err:
 | 
				
			||||||
	switch (ret) {
 | 
						switch (ret) {
 | 
				
			||||||
	case -EIO:
 | 
						case -EIO:
 | 
				
			||||||
| 
						 | 
					@ -3269,24 +3313,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
 | 
				
			||||||
					    I915_GEM_DOMAIN_CPU);
 | 
										    I915_GEM_DOMAIN_CPU);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct i915_vma *vma;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 | 
					 | 
				
			||||||
		if (!i915_vma_is_ggtt(vma))
 | 
					 | 
				
			||||||
			continue;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (i915_vma_is_active(vma))
 | 
					 | 
				
			||||||
			continue;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (!drm_mm_node_allocated(&vma->node))
 | 
					 | 
				
			||||||
			continue;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * Moves a single object to the GTT read, and possibly write domain.
 | 
					 * Moves a single object to the GTT read, and possibly write domain.
 | 
				
			||||||
 * @obj: object to act on
 | 
					 * @obj: object to act on
 | 
				
			||||||
| 
						 | 
					@ -3342,7 +3368,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 | 
				
			||||||
	/* It should now be out of any other write domains, and we can update
 | 
						/* It should now be out of any other write domains, and we can update
 | 
				
			||||||
	 * the domain values for our changes.
 | 
						 * the domain values for our changes.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
 | 
						GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
 | 
				
			||||||
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
 | 
						obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
 | 
				
			||||||
	if (write) {
 | 
						if (write) {
 | 
				
			||||||
		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
 | 
							obj->base.read_domains = I915_GEM_DOMAIN_GTT;
 | 
				
			||||||
| 
						 | 
					@ -3354,10 +3380,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 | 
				
			||||||
					    old_read_domains,
 | 
										    old_read_domains,
 | 
				
			||||||
					    old_write_domain);
 | 
										    old_write_domain);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* And bump the LRU for this access */
 | 
					 | 
				
			||||||
	i915_gem_object_bump_inactive_ggtt(obj);
 | 
					 | 
				
			||||||
	i915_gem_object_unpin_pages(obj);
 | 
						i915_gem_object_unpin_pages(obj);
 | 
				
			||||||
 | 
					 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3713,7 +3736,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 | 
				
			||||||
	/* It should now be out of any other write domains, and we can update
 | 
						/* It should now be out of any other write domains, and we can update
 | 
				
			||||||
	 * the domain values for our changes.
 | 
						 * the domain values for our changes.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
 | 
						GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* If we're writing through the CPU, then the GPU read domains will
 | 
						/* If we're writing through the CPU, then the GPU read domains will
 | 
				
			||||||
	 * need to be invalidated at next use.
 | 
						 * need to be invalidated at next use.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue