mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	drm/amdgpu: add amdgpu_gfx_sched_mask and amdgpu_compute_sched_mask debugfs
compute/gfx may have multiple rings on some hardware. In some cases, userspace wants to run jobs on a specific ring for validation purposes. This debugfs entry helps to disable or enable submitting jobs to a specific ring. This entry is populated only if there are at least two or more cores in the gfx/compute ip. Signed-off-by: Jesse Zhang <jesse.zhang@amd.com> Suggested-by: Alex Deucher <alexander.deucher@amd.com> Reviewed-by: Tim Huang <tim.huang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
		
							parent
							
								
									b78612939d
								
							
						
					
					
						commit
						c5c63d9cb5
					
				
					 3 changed files with 145 additions and 0 deletions
				
			
		| 
						 | 
				
			
			@ -2096,6 +2096,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
 | 
			
		|||
		amdgpu_debugfs_umsch_fwlog_init(adev, &adev->umsch_mm);
 | 
			
		||||
 | 
			
		||||
	amdgpu_debugfs_jpeg_sched_mask_init(adev);
 | 
			
		||||
	amdgpu_debugfs_gfx_sched_mask_init(adev);
 | 
			
		||||
	amdgpu_debugfs_compute_sched_mask_init(adev);
 | 
			
		||||
 | 
			
		||||
	amdgpu_ras_debugfs_create_all(adev);
 | 
			
		||||
	amdgpu_rap_debugfs_init(adev);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1917,3 +1917,144 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
 | 
			
		|||
	}
 | 
			
		||||
	mutex_unlock(&adev->enforce_isolation_mutex);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * debugfs for to enable/disable gfx job submission to specific core.
 | 
			
		||||
 */
 | 
			
		||||
#if defined(CONFIG_DEBUG_FS)
 | 
			
		||||
static int amdgpu_debugfs_gfx_sched_mask_set(void *data, u64 val)
 | 
			
		||||
{
 | 
			
		||||
	struct amdgpu_device *adev = (struct amdgpu_device *)data;
 | 
			
		||||
	u32 i;
 | 
			
		||||
	u64 mask = 0;
 | 
			
		||||
	struct amdgpu_ring *ring;
 | 
			
		||||
 | 
			
		||||
	if (!adev)
 | 
			
		||||
		return -ENODEV;
 | 
			
		||||
 | 
			
		||||
	mask = (1 << adev->gfx.num_gfx_rings) - 1;
 | 
			
		||||
	if ((val & mask) == 0)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
 | 
			
		||||
		ring = &adev->gfx.gfx_ring[i];
 | 
			
		||||
		if (val & (1 << i))
 | 
			
		||||
			ring->sched.ready = true;
 | 
			
		||||
		else
 | 
			
		||||
			ring->sched.ready = false;
 | 
			
		||||
	}
 | 
			
		||||
	/* publish sched.ready flag update effective immediately across smp */
 | 
			
		||||
	smp_rmb();
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int amdgpu_debugfs_gfx_sched_mask_get(void *data, u64 *val)
 | 
			
		||||
{
 | 
			
		||||
	struct amdgpu_device *adev = (struct amdgpu_device *)data;
 | 
			
		||||
	u32 i;
 | 
			
		||||
	u64 mask = 0;
 | 
			
		||||
	struct amdgpu_ring *ring;
 | 
			
		||||
 | 
			
		||||
	if (!adev)
 | 
			
		||||
		return -ENODEV;
 | 
			
		||||
	for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
 | 
			
		||||
		ring = &adev->gfx.gfx_ring[i];
 | 
			
		||||
		if (ring->sched.ready)
 | 
			
		||||
			mask |= 1 << i;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	*val = mask;
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gfx_sched_mask_fops,
 | 
			
		||||
			 amdgpu_debugfs_gfx_sched_mask_get,
 | 
			
		||||
			 amdgpu_debugfs_gfx_sched_mask_set, "%llx\n");
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev)
 | 
			
		||||
{
 | 
			
		||||
#if defined(CONFIG_DEBUG_FS)
 | 
			
		||||
	struct drm_minor *minor = adev_to_drm(adev)->primary;
 | 
			
		||||
	struct dentry *root = minor->debugfs_root;
 | 
			
		||||
	char name[32];
 | 
			
		||||
 | 
			
		||||
	if (!(adev->gfx.num_gfx_rings > 1))
 | 
			
		||||
		return;
 | 
			
		||||
	sprintf(name, "amdgpu_gfx_sched_mask");
 | 
			
		||||
	debugfs_create_file(name, 0600, root, adev,
 | 
			
		||||
			    &amdgpu_debugfs_gfx_sched_mask_fops);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * debugfs for to enable/disable compute job submission to specific core.
 | 
			
		||||
 */
 | 
			
		||||
#if defined(CONFIG_DEBUG_FS)
 | 
			
		||||
static int amdgpu_debugfs_compute_sched_mask_set(void *data, u64 val)
 | 
			
		||||
{
 | 
			
		||||
	struct amdgpu_device *adev = (struct amdgpu_device *)data;
 | 
			
		||||
	u32 i;
 | 
			
		||||
	u64 mask = 0;
 | 
			
		||||
	struct amdgpu_ring *ring;
 | 
			
		||||
 | 
			
		||||
	if (!adev)
 | 
			
		||||
		return -ENODEV;
 | 
			
		||||
 | 
			
		||||
	mask = (1 << adev->gfx.num_compute_rings) - 1;
 | 
			
		||||
	if ((val & mask) == 0)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
 | 
			
		||||
		ring = &adev->gfx.compute_ring[i];
 | 
			
		||||
		if (val & (1 << i))
 | 
			
		||||
			ring->sched.ready = true;
 | 
			
		||||
		else
 | 
			
		||||
			ring->sched.ready = false;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* publish sched.ready flag update effective immediately across smp */
 | 
			
		||||
	smp_rmb();
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int amdgpu_debugfs_compute_sched_mask_get(void *data, u64 *val)
 | 
			
		||||
{
 | 
			
		||||
	struct amdgpu_device *adev = (struct amdgpu_device *)data;
 | 
			
		||||
	u32 i;
 | 
			
		||||
	u64 mask = 0;
 | 
			
		||||
	struct amdgpu_ring *ring;
 | 
			
		||||
 | 
			
		||||
	if (!adev)
 | 
			
		||||
		return -ENODEV;
 | 
			
		||||
	for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
 | 
			
		||||
		ring = &adev->gfx.compute_ring[i];
 | 
			
		||||
		if (ring->sched.ready)
 | 
			
		||||
			mask |= 1 << i;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	*val = mask;
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_compute_sched_mask_fops,
 | 
			
		||||
			 amdgpu_debugfs_compute_sched_mask_get,
 | 
			
		||||
			 amdgpu_debugfs_compute_sched_mask_set, "%llx\n");
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev)
 | 
			
		||||
{
 | 
			
		||||
#if defined(CONFIG_DEBUG_FS)
 | 
			
		||||
	struct drm_minor *minor = adev_to_drm(adev)->primary;
 | 
			
		||||
	struct dentry *root = minor->debugfs_root;
 | 
			
		||||
	char name[32];
 | 
			
		||||
 | 
			
		||||
	if (!(adev->gfx.num_compute_rings > 1))
 | 
			
		||||
		return;
 | 
			
		||||
	sprintf(name, "amdgpu_compute_sched_mask");
 | 
			
		||||
	debugfs_create_file(name, 0600, root, adev,
 | 
			
		||||
			    &amdgpu_debugfs_compute_sched_mask_fops);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -584,6 +584,8 @@ void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev);
 | 
			
		|||
void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work);
 | 
			
		||||
void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring);
 | 
			
		||||
void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring);
 | 
			
		||||
void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev);
 | 
			
		||||
void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev);
 | 
			
		||||
 | 
			
		||||
static inline const char *amdgpu_gfx_compute_mode_desc(int mode)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue