drm/amd/display: fix dmub access race condition

Accessing DC from amdgpu_dm is usually preceded by acquisition of
dc_lock mutex. Most of the DC API that DM calls are under a DC lock.
However, there are a few that are not. Some DC API called from interrupt
context end up sending DMUB commands via a DC API, while other threads were
using DMUB. This was apparent from a race between calls for setting idle
optimization enable/disable and the DC API to set vmin/vmax.

Offload the call to dc_stream_adjust_vmin_vmax() to a thread instead
of directly calling them from the interrupt handler such that it waits
for dc_lock.

Reviewed-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
Signed-off-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
Signed-off-by: Roman Li <roman.li@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Aurabindo Pillai 2025-07-21 11:03:39 -04:00 committed by Alex Deucher
parent fd20627c74
commit c210b757b4
2 changed files with 63 additions and 6 deletions

View file

@ -530,6 +530,50 @@ static void dm_pflip_high_irq(void *interrupt_params)
amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e); amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e);
} }
static void dm_handle_vmin_vmax_update(struct work_struct *offload_work)
{
struct vupdate_offload_work *work = container_of(offload_work, struct vupdate_offload_work, work);
struct amdgpu_device *adev = work->adev;
struct dc_stream_state *stream = work->stream;
struct dc_crtc_timing_adjust *adjust = work->adjust;
mutex_lock(&adev->dm.dc_lock);
dc_stream_adjust_vmin_vmax(adev->dm.dc, stream, adjust);
mutex_unlock(&adev->dm.dc_lock);
dc_stream_release(stream);
kfree(work->adjust);
kfree(work);
}
static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,
struct dc_stream_state *stream,
struct dc_crtc_timing_adjust *adjust)
{
struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL);
if (!offload_work) {
drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");
return;
}
struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_KERNEL);
if (!adjust_copy) {
drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");
kfree(offload_work);
return;
}
dc_stream_retain(stream);
memcpy(adjust_copy, adjust, sizeof(*adjust_copy));
INIT_WORK(&offload_work->work, dm_handle_vmin_vmax_update);
offload_work->adev = adev;
offload_work->stream = stream;
offload_work->adjust = adjust_copy;
queue_work(system_wq, &offload_work->work);
}
static void dm_vupdate_high_irq(void *interrupt_params) static void dm_vupdate_high_irq(void *interrupt_params)
{ {
struct common_irq_params *irq_params = interrupt_params; struct common_irq_params *irq_params = interrupt_params;
@ -579,10 +623,9 @@ static void dm_vupdate_high_irq(void *interrupt_params)
acrtc->dm_irq_params.stream, acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params); &acrtc->dm_irq_params.vrr_params);
dc_stream_adjust_vmin_vmax( schedule_dc_vmin_vmax(adev,
adev->dm.dc, acrtc->dm_irq_params.stream,
acrtc->dm_irq_params.stream, &acrtc->dm_irq_params.vrr_params.adjust);
&acrtc->dm_irq_params.vrr_params.adjust);
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
} }
} }
@ -672,8 +715,8 @@ static void dm_crtc_high_irq(void *interrupt_params)
acrtc->dm_irq_params.stream, acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params); &acrtc->dm_irq_params.vrr_params);
dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, schedule_dc_vmin_vmax(adev, acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params.adjust); &acrtc->dm_irq_params.vrr_params.adjust);
} }
/* /*

View file

@ -153,6 +153,20 @@ struct idle_workqueue {
bool running; bool running;
}; };
/**
* struct dm_vupdate_work - Work data for periodic action in idle
* @work: Kernel work data for the work event
* @adev: amdgpu_device back pointer
* @stream: DC stream associated with the crtc
* @adjust: DC CRTC timing adjust to be applied to the crtc
*/
struct vupdate_offload_work {
struct work_struct work;
struct amdgpu_device *adev;
struct dc_stream_state *stream;
struct dc_crtc_timing_adjust *adjust;
};
#define MAX_LUMINANCE_DATA_POINTS 99 #define MAX_LUMINANCE_DATA_POINTS 99
/** /**