forked from mirrors/linux
		
	drm: allow limiting the scatter list size.
Add drm_device argument to drm_prime_pages_to_sg(), so we can call dma_max_mapping_size() to figure the segment size limit and call into __sg_alloc_table_from_pages() with the correct limit. This fixes virtio-gpu with sev. Possibly it'll fix other bugs too given that drm seems to totaly ignore segment size limits so far ... v2: place max_segment in drm driver not gem object. v3: move max_segment next to the other gem fields. v4: just use dma_max_mapping_size(). Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/20200907112425.15610-2-kraxel@redhat.com
This commit is contained in:
		
							parent
							
								
									04e89ff364
								
							
						
					
					
						commit
						707d561f77
					
				
					 14 changed files with 29 additions and 17 deletions
				
			
		| 
						 | 
				
			
			@ -302,7 +302,8 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
 | 
			
		|||
 | 
			
		||||
	switch (bo->tbo.mem.mem_type) {
 | 
			
		||||
	case TTM_PL_TT:
 | 
			
		||||
		sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages,
 | 
			
		||||
		sgt = drm_prime_pages_to_sg(obj->dev,
 | 
			
		||||
					    bo->tbo.ttm->pages,
 | 
			
		||||
					    bo->tbo.num_pages);
 | 
			
		||||
		if (IS_ERR(sgt))
 | 
			
		||||
			return sgt;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -656,7 +656,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
 | 
			
		|||
 | 
			
		||||
	WARN_ON(shmem->base.import_attach);
 | 
			
		||||
 | 
			
		||||
	return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
 | 
			
		||||
	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -802,9 +802,11 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
 | 
			
		|||
 *
 | 
			
		||||
 * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
 | 
			
		||||
 */
 | 
			
		||||
struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
 | 
			
		||||
struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
 | 
			
		||||
				       struct page **pages, unsigned int nr_pages)
 | 
			
		||||
{
 | 
			
		||||
	struct sg_table *sg = NULL;
 | 
			
		||||
	size_t max_segment = 0;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
 | 
			
		||||
| 
						 | 
				
			
			@ -813,8 +815,13 @@ struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_page
 | 
			
		|||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
 | 
			
		||||
				nr_pages << PAGE_SHIFT, GFP_KERNEL);
 | 
			
		||||
	if (dev)
 | 
			
		||||
		max_segment = dma_max_mapping_size(dev->dev);
 | 
			
		||||
	if (max_segment == 0 || max_segment > SCATTERLIST_MAX_SEGMENT)
 | 
			
		||||
		max_segment = SCATTERLIST_MAX_SEGMENT;
 | 
			
		||||
	ret = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
 | 
			
		||||
					  nr_pages << PAGE_SHIFT,
 | 
			
		||||
					  max_segment, GFP_KERNEL);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		goto out;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -103,7 +103,8 @@ struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
 | 
			
		|||
		int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
 | 
			
		||||
		struct sg_table *sgt;
 | 
			
		||||
 | 
			
		||||
		sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
 | 
			
		||||
		sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
 | 
			
		||||
					    etnaviv_obj->pages, npages);
 | 
			
		||||
		if (IS_ERR(sgt)) {
 | 
			
		||||
			dev_err(dev->dev, "failed to allocate sgt: %ld\n",
 | 
			
		||||
				PTR_ERR(sgt));
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -19,7 +19,7 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
 | 
			
		|||
	if (WARN_ON(!etnaviv_obj->pages))  /* should have already pinned! */
 | 
			
		||||
		return ERR_PTR(-EINVAL);
 | 
			
		||||
 | 
			
		||||
	return drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
 | 
			
		||||
	return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -126,7 +126,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
 | 
			
		|||
 | 
			
		||||
		msm_obj->pages = p;
 | 
			
		||||
 | 
			
		||||
		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
 | 
			
		||||
		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
 | 
			
		||||
		if (IS_ERR(msm_obj->sgt)) {
 | 
			
		||||
			void *ptr = ERR_CAST(msm_obj->sgt);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -19,7 +19,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
 | 
			
		|||
	if (WARN_ON(!msm_obj->pages))  /* should have already pinned! */
 | 
			
		||||
		return NULL;
 | 
			
		||||
 | 
			
		||||
	return drm_prime_pages_to_sg(msm_obj->pages, npages);
 | 
			
		||||
	return drm_prime_pages_to_sg(obj->dev, msm_obj->pages, npages);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void *msm_gem_prime_vmap(struct drm_gem_object *obj)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -32,7 +32,7 @@ struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
 | 
			
		|||
	struct nouveau_bo *nvbo = nouveau_gem_object(obj);
 | 
			
		||||
	int npages = nvbo->bo.num_pages;
 | 
			
		||||
 | 
			
		||||
	return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
 | 
			
		||||
	return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, npages);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void *nouveau_gem_prime_vmap(struct drm_gem_object *obj)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -36,7 +36,7 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
 | 
			
		|||
	struct radeon_bo *bo = gem_to_radeon_bo(obj);
 | 
			
		||||
	int npages = bo->tbo.num_pages;
 | 
			
		||||
 | 
			
		||||
	return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
 | 
			
		||||
	return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, npages);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void *radeon_gem_prime_vmap(struct drm_gem_object *obj)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -85,7 +85,8 @@ static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
 | 
			
		|||
 | 
			
		||||
	rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
 | 
			
		||||
 | 
			
		||||
	rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
 | 
			
		||||
	rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev,
 | 
			
		||||
					    rk_obj->pages, rk_obj->num_pages);
 | 
			
		||||
	if (IS_ERR(rk_obj->sgt)) {
 | 
			
		||||
		ret = PTR_ERR(rk_obj->sgt);
 | 
			
		||||
		goto err_put_pages;
 | 
			
		||||
| 
						 | 
				
			
			@ -442,7 +443,7 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
 | 
			
		|||
	int ret;
 | 
			
		||||
 | 
			
		||||
	if (rk_obj->pages)
 | 
			
		||||
		return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
 | 
			
		||||
		return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
 | 
			
		||||
 | 
			
		||||
	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
 | 
			
		||||
	if (!sgt)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -284,7 +284,7 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
 | 
			
		|||
 | 
			
		||||
	bo->num_pages = bo->gem.size >> PAGE_SHIFT;
 | 
			
		||||
 | 
			
		||||
	bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
 | 
			
		||||
	bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
 | 
			
		||||
	if (IS_ERR(bo->sgt)) {
 | 
			
		||||
		err = PTR_ERR(bo->sgt);
 | 
			
		||||
		goto put_pages;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -321,7 +321,7 @@ static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
 | 
			
		|||
{
 | 
			
		||||
	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
 | 
			
		||||
 | 
			
		||||
	return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT);
 | 
			
		||||
	return drm_prime_pages_to_sg(obj->dev, bo->pages, bo->base.size >> PAGE_SHIFT);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -179,7 +179,8 @@ struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
 | 
			
		|||
	if (!xen_obj->pages)
 | 
			
		||||
		return ERR_PTR(-ENOMEM);
 | 
			
		||||
 | 
			
		||||
	return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
 | 
			
		||||
	return drm_prime_pages_to_sg(gem_obj->dev,
 | 
			
		||||
				     xen_obj->pages, xen_obj->num_pages);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct drm_gem_object *
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -88,7 +88,8 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
 | 
			
		|||
int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
 | 
			
		||||
int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
 | 
			
		||||
 | 
			
		||||
struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages);
 | 
			
		||||
struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
 | 
			
		||||
				       struct page **pages, unsigned int nr_pages);
 | 
			
		||||
struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
 | 
			
		||||
				     int flags);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue