drm/amdgpu: Add ioctl to get all gem handles for a process

Add new ioctl DRM_IOCTL_AMDGPU_GEM_LIST_HANDLES.

This ioctl returns a list of bos with their handles, sizes,
and flags and domains.

This ioctl is meant to be used during CRIU checkpoint and
provide information needed to reconstruct the bos
in CRIU restore.

Userspace for this and the next change can be found at
https://github.com/checkpoint-restore/criu/pull/2613

Signed-off-by: David Francis <David.Francis@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
David Francis 2025-06-16 09:47:42 -04:00 committed by Alex Deucher
parent 0317e0e224
commit f9db1fc52c
4 changed files with 116 additions and 0 deletions

View file

@ -3051,6 +3051,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_USERQ, amdgpu_userq_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_USERQ, amdgpu_userq_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_SIGNAL, amdgpu_userq_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_SIGNAL, amdgpu_userq_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_WAIT, amdgpu_userq_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_WAIT, amdgpu_userq_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_LIST_HANDLES, amdgpu_gem_list_handles_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
}; };
static const struct drm_driver amdgpu_kms_driver = { static const struct drm_driver amdgpu_kms_driver = {

View file

@ -1024,6 +1024,85 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
return r; return r;
} }
/**
* drm_amdgpu_gem_list_handles_ioctl - get information about a process' buffer objects
*
* @dev: drm device pointer
* @data: drm_amdgpu_gem_list_handles
* @filp: drm file pointer
*
* num_entries is set as an input to the size of the entries array.
* num_entries is sent back as output as the number of bos in the process.
* If that number is larger than the size of the array, the ioctl must
* be retried.
*
* Returns:
* 0 for success, -errno for errors.
*/
int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct drm_amdgpu_gem_list_handles *args = data;
struct drm_amdgpu_gem_list_handles_entry *bo_entries;
struct drm_gem_object *gobj;
int id, ret = 0;
int bo_index = 0;
int num_bos = 0;
spin_lock(&filp->table_lock);
idr_for_each_entry(&filp->object_idr, gobj, id)
num_bos += 1;
spin_unlock(&filp->table_lock);
if (args->num_entries < num_bos) {
args->num_entries = num_bos;
return 0;
}
if (num_bos == 0) {
args->num_entries = 0;
return 0;
}
bo_entries = kvcalloc(num_bos, sizeof(*bo_entries), GFP_KERNEL);
if (!bo_entries)
return -ENOMEM;
spin_lock(&filp->table_lock);
idr_for_each_entry(&filp->object_idr, gobj, id) {
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
struct drm_amdgpu_gem_list_handles_entry *bo_entry;
if (bo_index >= num_bos) {
ret = -EAGAIN;
break;
}
bo_entry = &bo_entries[bo_index];
bo_entry->size = amdgpu_bo_size(bo);
bo_entry->alloc_flags = bo->flags & AMDGPU_GEM_CREATE_SETTABLE_MASK;
bo_entry->preferred_domains = bo->preferred_domains;
bo_entry->gem_handle = id;
bo_entry->alignment = bo->tbo.page_alignment;
if (bo->tbo.base.import_attach)
bo_entry->flags |= AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT;
bo_index += 1;
}
spin_unlock(&filp->table_lock);
args->num_entries = bo_index;
if (!ret)
ret = copy_to_user(u64_to_user_ptr(args->entries), bo_entries, num_bos * sizeof(*bo_entries));
kvfree(bo_entries);
return ret;
}
static int amdgpu_gem_align_pitch(struct amdgpu_device *adev, static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
int width, int width,
int cpp, int cpp,

View file

@ -67,6 +67,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp); struct drm_file *filp);
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp); struct drm_file *filp);
int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp); struct drm_file *filp);

View file

@ -57,6 +57,7 @@ extern "C" {
#define DRM_AMDGPU_USERQ 0x16 #define DRM_AMDGPU_USERQ 0x16
#define DRM_AMDGPU_USERQ_SIGNAL 0x17 #define DRM_AMDGPU_USERQ_SIGNAL 0x17
#define DRM_AMDGPU_USERQ_WAIT 0x18 #define DRM_AMDGPU_USERQ_WAIT 0x18
#define DRM_AMDGPU_GEM_LIST_HANDLES 0x19
#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create) #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap) #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@ -77,6 +78,7 @@ extern "C" {
#define DRM_IOCTL_AMDGPU_USERQ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ, union drm_amdgpu_userq) #define DRM_IOCTL_AMDGPU_USERQ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ, union drm_amdgpu_userq)
#define DRM_IOCTL_AMDGPU_USERQ_SIGNAL DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_SIGNAL, struct drm_amdgpu_userq_signal) #define DRM_IOCTL_AMDGPU_USERQ_SIGNAL DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_SIGNAL, struct drm_amdgpu_userq_signal)
#define DRM_IOCTL_AMDGPU_USERQ_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_WAIT, struct drm_amdgpu_userq_wait) #define DRM_IOCTL_AMDGPU_USERQ_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_WAIT, struct drm_amdgpu_userq_wait)
#define DRM_IOCTL_AMDGPU_GEM_LIST_HANDLES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_LIST_HANDLES, struct drm_amdgpu_gem_list_handles)
/** /**
* DOC: memory domains * DOC: memory domains
@ -811,6 +813,38 @@ struct drm_amdgpu_gem_op {
__u64 value; __u64 value;
}; };
#define AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT (1 << 0)
struct drm_amdgpu_gem_list_handles {
/* User pointer to array of drm_amdgpu_gem_bo_info_entry */
__u64 entries;
/* Size of entries buffer / Number of handles in process (if larger than size of buffer, must retry) */
__u32 num_entries;
__u32 padding;
};
struct drm_amdgpu_gem_list_handles_entry {
/* gem handle of buffer object */
__u32 gem_handle;
/* Currently just one flag: IS_IMPORT */
__u32 flags;
/* Size of bo */
__u64 size;
/* Preferred domains for GEM_CREATE */
__u64 preferred_domains;
/* GEM_CREATE flags for re-creation of buffer */
__u64 alloc_flags;
/* physical start_addr alignment in bytes for some HW requirements */
__u64 alignment;
};
#define AMDGPU_VA_OP_MAP 1 #define AMDGPU_VA_OP_MAP 1
#define AMDGPU_VA_OP_UNMAP 2 #define AMDGPU_VA_OP_UNMAP 2
#define AMDGPU_VA_OP_CLEAR 3 #define AMDGPU_VA_OP_CLEAR 3