forked from mirrors/linux
		
	drm/gem: Drop struct_mutex requirement from drm_gem_mmap_obj
Since
commit 131e663bd6
Author: Daniel Vetter <daniel.vetter@ffwll.ch>
Date:   Thu Jul 9 23:32:33 2015 +0200
    drm/gem: rip out drm vma accounting for gem mmaps
there is no need for this any more.
v2: Fixup compile noise spotted by 0-day build.
Link: http://mid.gmane.org/1444894601-5200-9-git-send-email-daniel.vetter@ffwll.ch
Reviewed-by: David Herrmann <dh.herrmann@gmail.com>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
			
			
This commit is contained in:
		
							parent
							
								
									70d994704e
								
							
						
					
					
						commit
						4e270f0880
					
				
					 6 changed files with 0 additions and 19 deletions
				
			
		|  | @ -810,8 +810,6 @@ EXPORT_SYMBOL(drm_gem_vm_close); | |||
|  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So | ||||
|  * callers must verify access restrictions before calling this helper. | ||||
|  * | ||||
|  * NOTE: This function has to be protected with dev->struct_mutex | ||||
|  * | ||||
|  * Return 0 or success or -EINVAL if the object size is smaller than the VMA | ||||
|  * size, or if no gem_vm_ops are provided. | ||||
|  */ | ||||
|  | @ -820,8 +818,6 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, | |||
| { | ||||
| 	struct drm_device *dev = obj->dev; | ||||
| 
 | ||||
| 	lockdep_assert_held(&dev->struct_mutex); | ||||
| 
 | ||||
| 	/* Check for valid size. */ | ||||
| 	if (obj_size < vma->vm_end - vma->vm_start) | ||||
| 		return -EINVAL; | ||||
|  |  | |||
|  | @ -484,9 +484,7 @@ int drm_gem_cma_prime_mmap(struct drm_gem_object *obj, | |||
| 	struct drm_device *dev = obj->dev; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	mutex_lock(&dev->struct_mutex); | ||||
| 	ret = drm_gem_mmap_obj(obj, obj->size, vma); | ||||
| 	mutex_unlock(&dev->struct_mutex); | ||||
| 	if (ret < 0) | ||||
| 		return ret; | ||||
| 
 | ||||
|  |  | |||
|  | @ -68,12 +68,7 @@ static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) | |||
| 	if (drm_device_is_unplugged(dev)) | ||||
| 		return -ENODEV; | ||||
| 
 | ||||
| 	mutex_lock(&dev->struct_mutex); | ||||
| 
 | ||||
| 	ret = drm_gem_mmap_obj(drm_obj, drm_obj->size, vma); | ||||
| 
 | ||||
| 	mutex_unlock(&dev->struct_mutex); | ||||
| 
 | ||||
| 	if (ret) { | ||||
| 		pr_err("%s:drm_gem_mmap_obj fail\n", __func__); | ||||
| 		return ret; | ||||
|  |  | |||
|  | @ -45,9 +45,7 @@ int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) | |||
| { | ||||
| 	int ret; | ||||
| 
 | ||||
| 	mutex_lock(&obj->dev->struct_mutex); | ||||
| 	ret = drm_gem_mmap_obj(obj, obj->size, vma); | ||||
| 	mutex_unlock(&obj->dev->struct_mutex); | ||||
| 	if (ret < 0) | ||||
| 		return ret; | ||||
| 
 | ||||
|  |  | |||
|  | @ -140,15 +140,12 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, | |||
| 		struct vm_area_struct *vma) | ||||
| { | ||||
| 	struct drm_gem_object *obj = buffer->priv; | ||||
| 	struct drm_device *dev = obj->dev; | ||||
| 	int ret = 0; | ||||
| 
 | ||||
| 	if (WARN_ON(!obj->filp)) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	mutex_lock(&dev->struct_mutex); | ||||
| 	ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); | ||||
| 	mutex_unlock(&dev->struct_mutex); | ||||
| 	if (ret < 0) | ||||
| 		return ret; | ||||
| 
 | ||||
|  |  | |||
|  | @ -79,12 +79,9 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, | |||
| int rockchip_gem_mmap_buf(struct drm_gem_object *obj, | ||||
| 			  struct vm_area_struct *vma) | ||||
| { | ||||
| 	struct drm_device *drm = obj->dev; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	mutex_lock(&drm->struct_mutex); | ||||
| 	ret = drm_gem_mmap_obj(obj, obj->size, vma); | ||||
| 	mutex_unlock(&drm->struct_mutex); | ||||
| 	if (ret) | ||||
| 		return ret; | ||||
| 
 | ||||
|  |  | |||
		Loading…
	
		Reference in a new issue
	
	 Daniel Vetter
						Daniel Vetter