drm/amdgpu: Add mapping info option for GEM_OP ioctl

Add new GEM_OP_IOCTL option GET_MAPPING_INFO, which
returns a list of mappings associated with a given bo, along with
their positions and offsets.

Userspace for this and the previous change can be found at:
https://github.com/checkpoint-restore/criu/pull/2613

Signed-off-by: David Francis <David.Francis@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
David Francis 2025-06-16 09:49:33 -04:00 committed by Alex Deucher
parent f9db1fc52c
commit 4d82724f7f
3 changed files with 101 additions and 15 deletions

View file

@ -955,17 +955,34 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
struct amdgpu_vm_bo_base *base; struct amdgpu_vm_bo_base *base;
struct amdgpu_bo *robj; struct amdgpu_bo *robj;
struct drm_exec exec;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
int r; int r;
if (args->padding)
return -EINVAL;
gobj = drm_gem_object_lookup(filp, args->handle); gobj = drm_gem_object_lookup(filp, args->handle);
if (!gobj) if (!gobj)
return -ENOENT; return -ENOENT;
robj = gem_to_amdgpu_bo(gobj); robj = gem_to_amdgpu_bo(gobj);
r = amdgpu_bo_reserve(robj, false); drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
if (unlikely(r)) DRM_EXEC_IGNORE_DUPLICATES, 0);
goto out; drm_exec_until_all_locked(&exec) {
r = drm_exec_lock_obj(&exec, gobj);
drm_exec_retry_on_contention(&exec);
if (r)
goto out_exec;
if (args->op == AMDGPU_GEM_OP_GET_MAPPING_INFO) {
r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 0);
drm_exec_retry_on_contention(&exec);
if (r)
goto out_exec;
}
}
switch (args->op) { switch (args->op) {
case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: { case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
@ -976,7 +993,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
info.alignment = robj->tbo.page_alignment << PAGE_SHIFT; info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
info.domains = robj->preferred_domains; info.domains = robj->preferred_domains;
info.domain_flags = robj->flags; info.domain_flags = robj->flags;
amdgpu_bo_unreserve(robj); drm_exec_fini(&exec);
if (copy_to_user(out, &info, sizeof(info))) if (copy_to_user(out, &info, sizeof(info)))
r = -EFAULT; r = -EFAULT;
break; break;
@ -985,20 +1002,17 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
if (drm_gem_is_imported(&robj->tbo.base) && if (drm_gem_is_imported(&robj->tbo.base) &&
args->value & AMDGPU_GEM_DOMAIN_VRAM) { args->value & AMDGPU_GEM_DOMAIN_VRAM) {
r = -EINVAL; r = -EINVAL;
amdgpu_bo_unreserve(robj); goto out_exec;
break;
} }
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) { if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
r = -EPERM; r = -EPERM;
amdgpu_bo_unreserve(robj); goto out_exec;
break;
} }
for (base = robj->vm_bo; base; base = base->next) for (base = robj->vm_bo; base; base = base->next)
if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev), if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) { amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
r = -EINVAL; r = -EINVAL;
amdgpu_bo_unreserve(robj); goto out_exec;
goto out;
} }
@ -1011,15 +1025,63 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
amdgpu_vm_bo_invalidate(robj, true); amdgpu_vm_bo_invalidate(robj, true);
drm_exec_fini(&exec);
amdgpu_bo_unreserve(robj);
break; break;
case AMDGPU_GEM_OP_GET_MAPPING_INFO: {
struct amdgpu_bo_va *bo_va = amdgpu_vm_bo_find(&fpriv->vm, robj);
struct drm_amdgpu_gem_vm_entry *vm_entries;
struct amdgpu_bo_va_mapping *mapping;
int num_mappings = 0;
/*
* num_entries is set as an input to the size of the user-allocated array of
* drm_amdgpu_gem_vm_entry stored at args->value.
* num_entries is sent back as output as the number of mappings the bo has.
* If that number is larger than the size of the array, the ioctl must
* be retried.
*/
vm_entries = kvcalloc(args->num_entries, sizeof(*vm_entries), GFP_KERNEL);
if (!vm_entries)
return -ENOMEM;
amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) {
if (num_mappings < args->num_entries) {
vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE;
vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE;
vm_entries[num_mappings].offset = mapping->offset;
vm_entries[num_mappings].flags = mapping->flags;
}
num_mappings += 1;
}
amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) {
if (num_mappings < args->num_entries) {
vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE;
vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE;
vm_entries[num_mappings].offset = mapping->offset;
vm_entries[num_mappings].flags = mapping->flags;
}
num_mappings += 1;
}
drm_exec_fini(&exec);
if (num_mappings > 0 && num_mappings <= args->num_entries)
r = copy_to_user(u64_to_user_ptr(args->value), vm_entries, num_mappings * sizeof(*vm_entries));
args->num_entries = num_mappings;
kvfree(vm_entries);
break;
}
default: default:
amdgpu_bo_unreserve(robj); drm_exec_fini(&exec);
r = -EINVAL; r = -EINVAL;
} }
out: drm_gem_object_put(gobj);
return r;
out_exec:
drm_exec_fini(&exec);
drm_gem_object_put(gobj); drm_gem_object_put(gobj);
return r; return r;
} }

View file

@ -670,4 +670,9 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev,
void amdgpu_vm_print_task_info(struct amdgpu_device *adev, void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
struct amdgpu_task_info *task_info); struct amdgpu_task_info *task_info);
#define amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) \
list_for_each_entry(mapping, &(bo_va)->valids, list)
#define amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) \
list_for_each_entry(mapping, &(bo_va)->invalids, list)
#endif #endif

View file

@ -802,6 +802,21 @@ union drm_amdgpu_wait_fences {
#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0 #define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0
#define AMDGPU_GEM_OP_SET_PLACEMENT 1 #define AMDGPU_GEM_OP_SET_PLACEMENT 1
#define AMDGPU_GEM_OP_GET_MAPPING_INFO 2
struct drm_amdgpu_gem_vm_entry {
/* Start of mapping (in bytes) */
__u64 addr;
/* Size of mapping (in bytes) */
__u64 size;
/* Mapping offset */
__u64 offset;
/* flags needed to recreate mapping */
__u64 flags;
};
/* Sets or returns a value associated with a buffer. */ /* Sets or returns a value associated with a buffer. */
struct drm_amdgpu_gem_op { struct drm_amdgpu_gem_op {
@ -809,8 +824,12 @@ struct drm_amdgpu_gem_op {
__u32 handle; __u32 handle;
/** AMDGPU_GEM_OP_* */ /** AMDGPU_GEM_OP_* */
__u32 op; __u32 op;
/** Input or return value */ /** Input or return value. For MAPPING_INFO op: pointer to array of struct drm_amdgpu_gem_vm_entry */
__u64 value; __u64 value;
/** For MAPPING_INFO op: number of mappings (in/out) */
__u32 num_entries;
__u32 padding;
}; };
#define AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT (1 << 0) #define AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT (1 << 0)