forked from mirrors/linux
		
	drm/nouveau: Initialize GEM object before TTM object
TTM assumes that drivers initialize the embedded GEM object before
calling the ttm_bo_init() function. This is not currently the case
in the Nouveau driver. Fix this by splitting up nouveau_bo_new()
into nouveau_bo_alloc() and nouveau_bo_init() so that the GEM can
be initialized before TTM BO initialization when necessary.
Fixes: b96f3e7c80 ("drm/ttm: use gem vma_node")
Acked-by: Gerd Hoffmann <kraxel@redhat.com>
Acked-by: Ben Skeggs <bskeggs@redhat.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190814093524.GA31345@ulmo
			
			
This commit is contained in:
		
							parent
							
								
									343bbb1a7f
								
							
						
					
					
						commit
						019cbd4a4f
					
				
					 4 changed files with 77 additions and 41 deletions
				
			
		| 
						 | 
				
			
			@ -185,31 +185,24 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
 | 
			
		|||
	*size = roundup_64(*size, PAGE_SIZE);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int
 | 
			
		||||
nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
 | 
			
		||||
	       uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
 | 
			
		||||
	       struct sg_table *sg, struct dma_resv *robj,
 | 
			
		||||
	       struct nouveau_bo **pnvbo)
 | 
			
		||||
struct nouveau_bo *
 | 
			
		||||
nouveau_bo_alloc(struct nouveau_cli *cli, u64 size, u32 flags, u32 tile_mode,
 | 
			
		||||
		 u32 tile_flags)
 | 
			
		||||
{
 | 
			
		||||
	struct nouveau_drm *drm = cli->drm;
 | 
			
		||||
	struct nouveau_bo *nvbo;
 | 
			
		||||
	struct nvif_mmu *mmu = &cli->mmu;
 | 
			
		||||
	struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
 | 
			
		||||
	size_t acc_size;
 | 
			
		||||
	int type = ttm_bo_type_device;
 | 
			
		||||
	int ret, i, pi = -1;
 | 
			
		||||
	int i, pi = -1;
 | 
			
		||||
 | 
			
		||||
	if (!size) {
 | 
			
		||||
		NV_WARN(drm, "skipped size %016llx\n", size);
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
		return ERR_PTR(-EINVAL);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (sg)
 | 
			
		||||
		type = ttm_bo_type_sg;
 | 
			
		||||
 | 
			
		||||
	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
 | 
			
		||||
	if (!nvbo)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
		return ERR_PTR(-ENOMEM);
 | 
			
		||||
	INIT_LIST_HEAD(&nvbo->head);
 | 
			
		||||
	INIT_LIST_HEAD(&nvbo->entry);
 | 
			
		||||
	INIT_LIST_HEAD(&nvbo->vma_list);
 | 
			
		||||
| 
						 | 
				
			
			@ -231,7 +224,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
 | 
			
		|||
		nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
 | 
			
		||||
		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
 | 
			
		||||
			kfree(nvbo);
 | 
			
		||||
			return -EINVAL;
 | 
			
		||||
			return ERR_PTR(-EINVAL);
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
 | 
			
		||||
| 
						 | 
				
			
			@ -241,7 +234,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
 | 
			
		|||
		nvbo->comp = (tile_flags & 0x00030000) >> 16;
 | 
			
		||||
		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
 | 
			
		||||
			kfree(nvbo);
 | 
			
		||||
			return -EINVAL;
 | 
			
		||||
			return ERR_PTR(-EINVAL);
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		nvbo->zeta = (tile_flags & 0x00000007);
 | 
			
		||||
| 
						 | 
				
			
			@ -278,7 +271,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	if (WARN_ON(pi < 0))
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
		return ERR_PTR(-EINVAL);
 | 
			
		||||
 | 
			
		||||
	/* Disable compression if suitable settings couldn't be found. */
 | 
			
		||||
	if (nvbo->comp && !vmm->page[pi].comp) {
 | 
			
		||||
| 
						 | 
				
			
			@ -288,23 +281,51 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
 | 
			
		|||
	}
 | 
			
		||||
	nvbo->page = vmm->page[pi].shift;
 | 
			
		||||
 | 
			
		||||
	return nvbo;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int
 | 
			
		||||
nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
 | 
			
		||||
		struct sg_table *sg, struct dma_resv *robj)
 | 
			
		||||
{
 | 
			
		||||
	int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
 | 
			
		||||
	size_t acc_size;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
 | 
			
		||||
 | 
			
		||||
	nouveau_bo_fixup_align(nvbo, flags, &align, &size);
 | 
			
		||||
	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
 | 
			
		||||
	nouveau_bo_placement_set(nvbo, flags, 0);
 | 
			
		||||
 | 
			
		||||
	acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
 | 
			
		||||
				       sizeof(struct nouveau_bo));
 | 
			
		||||
 | 
			
		||||
	ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
 | 
			
		||||
			  type, &nvbo->placement,
 | 
			
		||||
			  align >> PAGE_SHIFT, false, acc_size, sg,
 | 
			
		||||
			  robj, nouveau_bo_del_ttm);
 | 
			
		||||
 | 
			
		||||
	ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
 | 
			
		||||
			  &nvbo->placement, align >> PAGE_SHIFT, false,
 | 
			
		||||
			  acc_size, sg, robj, nouveau_bo_del_ttm);
 | 
			
		||||
	if (ret) {
 | 
			
		||||
		/* ttm will call nouveau_bo_del_ttm if it fails.. */
 | 
			
		||||
		return ret;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int
 | 
			
		||||
nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
 | 
			
		||||
	       uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
 | 
			
		||||
	       struct sg_table *sg, struct dma_resv *robj,
 | 
			
		||||
	       struct nouveau_bo **pnvbo)
 | 
			
		||||
{
 | 
			
		||||
	struct nouveau_bo *nvbo;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	nvbo = nouveau_bo_alloc(cli, size, flags, tile_mode, tile_flags);
 | 
			
		||||
	if (IS_ERR(nvbo))
 | 
			
		||||
		return PTR_ERR(nvbo);
 | 
			
		||||
 | 
			
		||||
	ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		return ret;
 | 
			
		||||
 | 
			
		||||
	*pnvbo = nvbo;
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -71,6 +71,10 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
 | 
			
		|||
extern struct ttm_bo_driver nouveau_bo_driver;
 | 
			
		||||
 | 
			
		||||
void nouveau_bo_move_init(struct nouveau_drm *);
 | 
			
		||||
struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 size, u32 flags,
 | 
			
		||||
				    u32 tile_mode, u32 tile_flags);
 | 
			
		||||
int  nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 flags,
 | 
			
		||||
		     struct sg_table *sg, struct dma_resv *robj);
 | 
			
		||||
int  nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags,
 | 
			
		||||
		    u32 tile_mode, u32 tile_flags, struct sg_table *sg,
 | 
			
		||||
		    struct dma_resv *robj,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -188,11 +188,23 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
 | 
			
		|||
	if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
 | 
			
		||||
		flags |= TTM_PL_FLAG_UNCACHED;
 | 
			
		||||
 | 
			
		||||
	ret = nouveau_bo_new(cli, size, align, flags, tile_mode,
 | 
			
		||||
			     tile_flags, NULL, NULL, pnvbo);
 | 
			
		||||
	if (ret)
 | 
			
		||||
	nvbo = nouveau_bo_alloc(cli, size, flags, tile_mode, tile_flags);
 | 
			
		||||
	if (IS_ERR(nvbo))
 | 
			
		||||
		return PTR_ERR(nvbo);
 | 
			
		||||
 | 
			
		||||
	/* Initialize the embedded gem-object. We return a single gem-reference
 | 
			
		||||
	 * to the caller, instead of a normal nouveau_bo ttm reference. */
 | 
			
		||||
	ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
 | 
			
		||||
	if (ret) {
 | 
			
		||||
		nouveau_bo_ref(NULL, &nvbo);
 | 
			
		||||
		return ret;
 | 
			
		||||
	nvbo = *pnvbo;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ret = nouveau_bo_init(nvbo, size, align, flags, NULL, NULL);
 | 
			
		||||
	if (ret) {
 | 
			
		||||
		nouveau_bo_ref(NULL, &nvbo);
 | 
			
		||||
		return ret;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* we restrict allowed domains on nv50+ to only the types
 | 
			
		||||
	 * that were requested at creation time.  not possibly on
 | 
			
		||||
| 
						 | 
				
			
			@ -203,15 +215,8 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
 | 
			
		|||
	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 | 
			
		||||
		nvbo->valid_domains &= domain;
 | 
			
		||||
 | 
			
		||||
	/* Initialize the embedded gem-object. We return a single gem-reference
 | 
			
		||||
	 * to the caller, instead of a normal nouveau_bo ttm reference. */
 | 
			
		||||
	ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, nvbo->bo.mem.size);
 | 
			
		||||
	if (ret) {
 | 
			
		||||
		nouveau_bo_ref(NULL, pnvbo);
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp;
 | 
			
		||||
	*pnvbo = nvbo;
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -63,28 +63,34 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
 | 
			
		|||
	struct nouveau_drm *drm = nouveau_drm(dev);
 | 
			
		||||
	struct nouveau_bo *nvbo;
 | 
			
		||||
	struct dma_resv *robj = attach->dmabuf->resv;
 | 
			
		||||
	size_t size = attach->dmabuf->size;
 | 
			
		||||
	u32 flags = 0;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	flags = TTM_PL_FLAG_TT;
 | 
			
		||||
 | 
			
		||||
	dma_resv_lock(robj, NULL);
 | 
			
		||||
	ret = nouveau_bo_new(&drm->client, attach->dmabuf->size, 0, flags, 0, 0,
 | 
			
		||||
			     sg, robj, &nvbo);
 | 
			
		||||
	nvbo = nouveau_bo_alloc(&drm->client, size, flags, 0, 0);
 | 
			
		||||
	dma_resv_unlock(robj);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		return ERR_PTR(ret);
 | 
			
		||||
	if (IS_ERR(nvbo))
 | 
			
		||||
		return ERR_CAST(nvbo);
 | 
			
		||||
 | 
			
		||||
	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
 | 
			
		||||
 | 
			
		||||
	/* Initialize the embedded gem-object. We return a single gem-reference
 | 
			
		||||
	 * to the caller, instead of a normal nouveau_bo ttm reference. */
 | 
			
		||||
	ret = drm_gem_object_init(dev, &nvbo->bo.base, nvbo->bo.mem.size);
 | 
			
		||||
	ret = drm_gem_object_init(dev, &nvbo->bo.base, size);
 | 
			
		||||
	if (ret) {
 | 
			
		||||
		nouveau_bo_ref(NULL, &nvbo);
 | 
			
		||||
		return ERR_PTR(-ENOMEM);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ret = nouveau_bo_init(nvbo, size, 0, flags, sg, robj);
 | 
			
		||||
	if (ret) {
 | 
			
		||||
		nouveau_bo_ref(NULL, &nvbo);
 | 
			
		||||
		return ERR_PTR(ret);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &nvbo->bo.base;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue