mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 10:40:15 +02:00 
			
		
		
		
	drm/amdgpu: optimize queue reset and stop logic for sdma_v5_0
This patch refactors the SDMA v5.0 queue reset and stop logic to improve
code readability, maintainability, and performance. The key changes include:
1. **Generalized `sdma_v5_0_gfx_stop` Function**:
   - Added an `inst_mask` parameter to allow stopping specific SDMA instances
     instead of all instances. This is useful for resetting individual queues.
2. **Simplified `sdma_v5_0_reset_queue` Function**:
   - Removed redundant loops and checks by directly using the `ring->me` field
     to identify the SDMA instance.
   - Reused the `sdma_v5_0_gfx_stop` function to stop the queue, reducing code
     duplication.
v1: The general coding style is to declare variables like "i" or "r" last. E.g. longest lines first and short lasts. (Chritian)
Signed-off-by: Jesse Zhang <Jesse.Zhang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
			
			
This commit is contained in:
		
							parent
							
								
									47454f2dc0
								
							
						
					
					
						commit
						574f4b5562
					
				
					 1 changed files with 9 additions and 22 deletions
				
			
		| 
						 | 
				
			
			@ -557,15 +557,15 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
 | 
			
		|||
 * sdma_v5_0_gfx_stop - stop the gfx async dma engines
 | 
			
		||||
 *
 | 
			
		||||
 * @adev: amdgpu_device pointer
 | 
			
		||||
 *
 | 
			
		||||
 * @inst_mask: mask of dma engine instances to be disabled
 | 
			
		||||
 * Stop the gfx async dma ring buffers (NAVI10).
 | 
			
		||||
 */
 | 
			
		||||
static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
 | 
			
		||||
static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev, uint32_t inst_mask)
 | 
			
		||||
{
 | 
			
		||||
	u32 rb_cntl, ib_cntl;
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < adev->sdma.num_instances; i++) {
 | 
			
		||||
	for_each_inst(i, inst_mask) {
 | 
			
		||||
		rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
 | 
			
		||||
		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
 | 
			
		||||
		WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
 | 
			
		||||
| 
						 | 
				
			
			@ -657,9 +657,11 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
 | 
			
		|||
{
 | 
			
		||||
	u32 f32_cntl;
 | 
			
		||||
	int i;
 | 
			
		||||
	uint32_t inst_mask;
 | 
			
		||||
 | 
			
		||||
	inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
 | 
			
		||||
	if (!enable) {
 | 
			
		||||
		sdma_v5_0_gfx_stop(adev);
 | 
			
		||||
		sdma_v5_0_gfx_stop(adev, 1 << inst_mask);
 | 
			
		||||
		sdma_v5_0_rlc_stop(adev);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1546,33 +1548,18 @@ static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
 | 
			
		|||
 | 
			
		||||
static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring)
 | 
			
		||||
{
 | 
			
		||||
	u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, stat1_reg;
 | 
			
		||||
	u32 f32_cntl, freeze, cntl, stat1_reg;
 | 
			
		||||
	struct amdgpu_device *adev = ring->adev;
 | 
			
		||||
	int i, j, r = 0;
 | 
			
		||||
 | 
			
		||||
	if (amdgpu_sriov_vf(adev))
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < adev->sdma.num_instances; i++) {
 | 
			
		||||
		if (ring == &adev->sdma.instance[i].ring)
 | 
			
		||||
			break;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (i == adev->sdma.num_instances) {
 | 
			
		||||
		DRM_ERROR("sdma instance not found\n");
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	i = ring->me;
 | 
			
		||||
	amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
 | 
			
		||||
 | 
			
		||||
	/* stop queue */
 | 
			
		||||
	ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
 | 
			
		||||
	ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
 | 
			
		||||
	WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
 | 
			
		||||
 | 
			
		||||
	rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
 | 
			
		||||
	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
 | 
			
		||||
	WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
 | 
			
		||||
	sdma_v5_0_gfx_stop(adev, 1 << i);
 | 
			
		||||
 | 
			
		||||
	/* engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */
 | 
			
		||||
	freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue