mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	amd-drm-next-6.8-2024-01-05:
amdgpu: - VRR fixes - PSR-SU fixes - SubVP fixes - DCN 3.5 fixes - Documentation updates - DMCUB fixes - DML2 fixes - UMC 12.0 updates - GPUVM fix - Misc code cleanups and whitespace cleanups - DP MST fix - Let KFD sync with GPUVM fences - GFX11 reset fix - SMU 13.0.6 fixes - VSC fix for DP/eDP - Navi12 display fix - RN/CZN system aperture fix - DCN 2.1 bandwidth validation fix - DCN INIT cleanup amdkfd: - SVM fixes - Revert TBA/TMA location change -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCZZh7ZQAKCRC93/aFa7yZ 2EPJAQDkoOlSjRLZoqwOPvBCo3WIzVO+4N6pOgohbTrjhHvDFAD+ONEgIH/wydk1 IOdtyizh9o7spo2qN2Oi06MDimclDg8= =bFa3 -----END PGP SIGNATURE----- Merge tag 'amd-drm-next-6.8-2024-01-05' of https://gitlab.freedesktop.org/agd5f/linux into drm-next amd-drm-next-6.8-2024-01-05: amdgpu: - VRR fixes - PSR-SU fixes - SubVP fixes - DCN 3.5 fixes - Documentation updates - DMCUB fixes - DML2 fixes - UMC 12.0 updates - GPUVM fix - Misc code cleanups and whitespace cleanups - DP MST fix - Let KFD sync with GPUVM fences - GFX11 reset fix - SMU 13.0.6 fixes - VSC fix for DP/eDP - Navi12 display fix - RN/CZN system aperture fix - DCN 2.1 bandwidth validation fix - DCN INIT cleanup amdkfd: - SVM fixes - Revert TBA/TMA location change Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240105220522.4976-1-alexander.deucher@amd.com
This commit is contained in:
		
						commit
						e54478fbda
					
				
					 198 changed files with 4092 additions and 2110 deletions
				
			
		| 
						 | 
					@ -7,6 +7,7 @@ SteamDeck, VANGOGH, DCN 3.0.1, 10.3.1, VCN 3.1.0, 5.2.1, 11.5.0
 | 
				
			||||||
Ryzen 5000 series / Ryzen 7x30 series, GREEN SARDINE / Cezanne / Barcelo / Barcelo-R, DCN 2.1, 9.3, VCN 2.2, 4.1.1, 12.0.1
 | 
					Ryzen 5000 series / Ryzen 7x30 series, GREEN SARDINE / Cezanne / Barcelo / Barcelo-R, DCN 2.1, 9.3, VCN 2.2, 4.1.1, 12.0.1
 | 
				
			||||||
Ryzen 6000 series / Ryzen 7x35 series / Ryzen 7x36 series, YELLOW CARP / Rembrandt / Rembrandt-R, 3.1.2, 10.3.3, VCN 3.1.1, 5.2.3, 13.0.3
 | 
					Ryzen 6000 series / Ryzen 7x35 series / Ryzen 7x36 series, YELLOW CARP / Rembrandt / Rembrandt-R, 3.1.2, 10.3.3, VCN 3.1.1, 5.2.3, 13.0.3
 | 
				
			||||||
Ryzen 7000 series (AM5), Raphael, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5
 | 
					Ryzen 7000 series (AM5), Raphael, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5
 | 
				
			||||||
Ryzen 7x45 series (FL1), / Dragon Range, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5
 | 
					Ryzen 7x45 series (FL1), Dragon Range, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5
 | 
				
			||||||
Ryzen 7x20 series, Mendocino, 3.1.6, 10.3.7, 3.1.1, 5.2.7, 13.0.8
 | 
					Ryzen 7x20 series, Mendocino, 3.1.6, 10.3.7, 3.1.1, 5.2.7, 13.0.8
 | 
				
			||||||
Ryzen 7x40 series, Phoenix, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11
 | 
					Ryzen 7x40 series, Phoenix, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11
 | 
				
			||||||
 | 
					Ryzen 8x40 series, Hawk Point, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		
		
			
  | 
| 
						 | 
					@ -330,6 +330,7 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct list_head *reset_device_list = reset_context->reset_device_list;
 | 
						struct list_head *reset_device_list = reset_context->reset_device_list;
 | 
				
			||||||
	struct amdgpu_device *tmp_adev = NULL;
 | 
						struct amdgpu_device *tmp_adev = NULL;
 | 
				
			||||||
 | 
						struct amdgpu_ras *con;
 | 
				
			||||||
	int r;
 | 
						int r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (reset_device_list == NULL)
 | 
						if (reset_device_list == NULL)
 | 
				
			||||||
| 
						 | 
					@ -355,7 +356,30 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		amdgpu_register_gpu_instance(tmp_adev);
 | 
							amdgpu_register_gpu_instance(tmp_adev);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* Resume RAS */
 | 
							/* Resume RAS, ecc_irq */
 | 
				
			||||||
 | 
							con = amdgpu_ras_get_context(tmp_adev);
 | 
				
			||||||
 | 
							if (!amdgpu_sriov_vf(tmp_adev) && con) {
 | 
				
			||||||
 | 
								if (tmp_adev->sdma.ras &&
 | 
				
			||||||
 | 
									tmp_adev->sdma.ras->ras_block.ras_late_init) {
 | 
				
			||||||
 | 
									r = tmp_adev->sdma.ras->ras_block.ras_late_init(tmp_adev,
 | 
				
			||||||
 | 
											&tmp_adev->sdma.ras->ras_block.ras_comm);
 | 
				
			||||||
 | 
									if (r) {
 | 
				
			||||||
 | 
										dev_err(tmp_adev->dev, "SDMA failed to execute ras_late_init! ret:%d\n", r);
 | 
				
			||||||
 | 
										goto end;
 | 
				
			||||||
 | 
									}
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								if (tmp_adev->gfx.ras &&
 | 
				
			||||||
 | 
									tmp_adev->gfx.ras->ras_block.ras_late_init) {
 | 
				
			||||||
 | 
									r = tmp_adev->gfx.ras->ras_block.ras_late_init(tmp_adev,
 | 
				
			||||||
 | 
											&tmp_adev->gfx.ras->ras_block.ras_comm);
 | 
				
			||||||
 | 
									if (r) {
 | 
				
			||||||
 | 
										dev_err(tmp_adev->dev, "GFX failed to execute ras_late_init! ret:%d\n", r);
 | 
				
			||||||
 | 
										goto end;
 | 
				
			||||||
 | 
									}
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		amdgpu_ras_resume(tmp_adev);
 | 
							amdgpu_ras_resume(tmp_adev);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* Update PSP FW topology after reset */
 | 
							/* Update PSP FW topology after reset */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -254,6 +254,8 @@ extern int amdgpu_agp;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern int amdgpu_wbrf;
 | 
					extern int amdgpu_wbrf;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					extern int fw_bo_location;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define AMDGPU_VM_MAX_NUM_CTX			4096
 | 
					#define AMDGPU_VM_MAX_NUM_CTX			4096
 | 
				
			||||||
#define AMDGPU_SG_THRESHOLD			(256*1024*1024)
 | 
					#define AMDGPU_SG_THRESHOLD			(256*1024*1024)
 | 
				
			||||||
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
 | 
					#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -90,7 +90,7 @@ struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
 | 
				
			||||||
		return NULL;
 | 
							return NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	fence = container_of(f, struct amdgpu_amdkfd_fence, base);
 | 
						fence = container_of(f, struct amdgpu_amdkfd_fence, base);
 | 
				
			||||||
	if (fence && f->ops == &amdkfd_fence_ops)
 | 
						if (f->ops == &amdkfd_fence_ops)
 | 
				
			||||||
		return fence;
 | 
							return fence;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return NULL;
 | 
						return NULL;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1103,7 +1103,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
 | 
				
			||||||
			 * DDC line.  The latter is more complex because with DVI<->HDMI adapters
 | 
								 * DDC line.  The latter is more complex because with DVI<->HDMI adapters
 | 
				
			||||||
			 * you don't really know what's connected to which port as both are digital.
 | 
								 * you don't really know what's connected to which port as both are digital.
 | 
				
			||||||
			 */
 | 
								 */
 | 
				
			||||||
			 amdgpu_connector_shared_ddc(&ret, connector, amdgpu_connector);
 | 
								amdgpu_connector_shared_ddc(&ret, connector, amdgpu_connector);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -870,9 +870,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 | 
				
			||||||
		struct amdgpu_bo *bo = e->bo;
 | 
							struct amdgpu_bo *bo = e->bo;
 | 
				
			||||||
		int i;
 | 
							int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
 | 
							e->user_pages = kvcalloc(bo->tbo.ttm->num_pages,
 | 
				
			||||||
					sizeof(struct page *),
 | 
										 sizeof(struct page *),
 | 
				
			||||||
					GFP_KERNEL | __GFP_ZERO);
 | 
										 GFP_KERNEL);
 | 
				
			||||||
		if (!e->user_pages) {
 | 
							if (!e->user_pages) {
 | 
				
			||||||
			DRM_ERROR("kvmalloc_array failure\n");
 | 
								DRM_ERROR("kvmalloc_array failure\n");
 | 
				
			||||||
			r = -ENOMEM;
 | 
								r = -ENOMEM;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -540,7 +540,11 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
 | 
				
			||||||
	while (size) {
 | 
						while (size) {
 | 
				
			||||||
		uint32_t value;
 | 
							uint32_t value;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		value = RREG32_PCIE(*pos);
 | 
							if (upper_32_bits(*pos))
 | 
				
			||||||
 | 
								value = RREG32_PCIE_EXT(*pos);
 | 
				
			||||||
 | 
							else
 | 
				
			||||||
 | 
								value = RREG32_PCIE(*pos);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		r = put_user(value, (uint32_t *)buf);
 | 
							r = put_user(value, (uint32_t *)buf);
 | 
				
			||||||
		if (r)
 | 
							if (r)
 | 
				
			||||||
			goto out;
 | 
								goto out;
 | 
				
			||||||
| 
						 | 
					@ -600,7 +604,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
 | 
				
			||||||
		if (r)
 | 
							if (r)
 | 
				
			||||||
			goto out;
 | 
								goto out;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		WREG32_PCIE(*pos, value);
 | 
							if (upper_32_bits(*pos))
 | 
				
			||||||
 | 
								WREG32_PCIE_EXT(*pos, value);
 | 
				
			||||||
 | 
							else
 | 
				
			||||||
 | 
								WREG32_PCIE(*pos, value);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		result += 4;
 | 
							result += 4;
 | 
				
			||||||
		buf += 4;
 | 
							buf += 4;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2251,15 +2251,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	adev->firmware.gpu_info_fw = NULL;
 | 
						adev->firmware.gpu_info_fw = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (adev->mman.discovery_bin) {
 | 
						if (adev->mman.discovery_bin)
 | 
				
			||||||
		/*
 | 
							return 0;
 | 
				
			||||||
		 * FIXME: The bounding box is still needed by Navi12, so
 | 
					 | 
				
			||||||
		 * temporarily read it from gpu_info firmware. Should be dropped
 | 
					 | 
				
			||||||
		 * when DAL no longer needs it.
 | 
					 | 
				
			||||||
		 */
 | 
					 | 
				
			||||||
		if (adev->asic_type != CHIP_NAVI12)
 | 
					 | 
				
			||||||
			return 0;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	switch (adev->asic_type) {
 | 
						switch (adev->asic_type) {
 | 
				
			||||||
	default:
 | 
						default:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -210,6 +210,7 @@ int amdgpu_seamless = -1; /* auto */
 | 
				
			||||||
uint amdgpu_debug_mask;
 | 
					uint amdgpu_debug_mask;
 | 
				
			||||||
int amdgpu_agp = -1; /* auto */
 | 
					int amdgpu_agp = -1; /* auto */
 | 
				
			||||||
int amdgpu_wbrf = -1;
 | 
					int amdgpu_wbrf = -1;
 | 
				
			||||||
 | 
					int fw_bo_location = -1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
 | 
					static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -989,6 +990,10 @@ MODULE_PARM_DESC(wbrf,
 | 
				
			||||||
	"Enable Wifi RFI interference mitigation (0 = disabled, 1 = enabled, -1 = auto(default)");
 | 
						"Enable Wifi RFI interference mitigation (0 = disabled, 1 = enabled, -1 = auto(default)");
 | 
				
			||||||
module_param_named(wbrf, amdgpu_wbrf, int, 0444);
 | 
					module_param_named(wbrf, amdgpu_wbrf, int, 0444);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					MODULE_PARM_DESC(fw_bo_location,
 | 
				
			||||||
 | 
						"location to put firmware bo for frontdoor loading (-1 = auto (default), 0 = on ram, 1 = on vram");
 | 
				
			||||||
 | 
					module_param(fw_bo_location, int, 0644);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* These devices are not supported by amdgpu.
 | 
					/* These devices are not supported by amdgpu.
 | 
				
			||||||
 * They are supported by the mach64, r128, radeon drivers
 | 
					 * They are supported by the mach64, r128, radeon drivers
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -218,6 +218,7 @@ static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, st
 | 
				
			||||||
int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data)
 | 
					int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct amdgpu_smuio_mcm_config_info mcm_info;
 | 
						struct amdgpu_smuio_mcm_config_info mcm_info;
 | 
				
			||||||
 | 
						struct ras_err_addr err_addr = {0};
 | 
				
			||||||
	struct mca_bank_set mca_set;
 | 
						struct mca_bank_set mca_set;
 | 
				
			||||||
	struct mca_bank_node *node;
 | 
						struct mca_bank_node *node;
 | 
				
			||||||
	struct mca_bank_entry *entry;
 | 
						struct mca_bank_entry *entry;
 | 
				
			||||||
| 
						 | 
					@ -246,10 +247,18 @@ int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_blo
 | 
				
			||||||
		mcm_info.socket_id = entry->info.socket_id;
 | 
							mcm_info.socket_id = entry->info.socket_id;
 | 
				
			||||||
		mcm_info.die_id = entry->info.aid;
 | 
							mcm_info.die_id = entry->info.aid;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (blk == AMDGPU_RAS_BLOCK__UMC) {
 | 
				
			||||||
 | 
								err_addr.err_status = entry->regs[MCA_REG_IDX_STATUS];
 | 
				
			||||||
 | 
								err_addr.err_ipid = entry->regs[MCA_REG_IDX_IPID];
 | 
				
			||||||
 | 
								err_addr.err_addr = entry->regs[MCA_REG_IDX_ADDR];
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (type == AMDGPU_MCA_ERROR_TYPE_UE)
 | 
							if (type == AMDGPU_MCA_ERROR_TYPE_UE)
 | 
				
			||||||
			amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, (uint64_t)count);
 | 
								amdgpu_ras_error_statistic_ue_count(err_data,
 | 
				
			||||||
 | 
									&mcm_info, &err_addr, (uint64_t)count);
 | 
				
			||||||
		else
 | 
							else
 | 
				
			||||||
			amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, (uint64_t)count);
 | 
								amdgpu_ras_error_statistic_ce_count(err_data,
 | 
				
			||||||
 | 
									&mcm_info, &err_addr, (uint64_t)count);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
out_mca_release:
 | 
					out_mca_release:
 | 
				
			||||||
| 
						 | 
					@ -351,6 +360,9 @@ int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_err
 | 
				
			||||||
	const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
 | 
						const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
 | 
				
			||||||
	int count;
 | 
						int count;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!mca_funcs || !mca_funcs->mca_get_mca_entry)
 | 
				
			||||||
 | 
							return -EOPNOTSUPP;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	switch (type) {
 | 
						switch (type) {
 | 
				
			||||||
	case AMDGPU_MCA_ERROR_TYPE_UE:
 | 
						case AMDGPU_MCA_ERROR_TYPE_UE:
 | 
				
			||||||
		count = mca_funcs->max_ue_count;
 | 
							count = mca_funcs->max_ue_count;
 | 
				
			||||||
| 
						 | 
					@ -365,10 +377,7 @@ int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_err
 | 
				
			||||||
	if (idx >= count)
 | 
						if (idx >= count)
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (mca_funcs && mca_funcs->mca_get_mca_entry)
 | 
						return mca_funcs->mca_get_mca_entry(adev, type, idx, entry);
 | 
				
			||||||
		return mca_funcs->mca_get_mca_entry(adev, type, idx, entry);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return -EOPNOTSUPP;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#if defined(CONFIG_DEBUG_FS)
 | 
					#if defined(CONFIG_DEBUG_FS)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -466,7 +466,7 @@ static int psp_sw_init(void *handle)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
 | 
						ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
 | 
				
			||||||
				      amdgpu_sriov_vf(adev) ?
 | 
									      (amdgpu_sriov_vf(adev) || fw_bo_location == 1) ?
 | 
				
			||||||
				      AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
 | 
									      AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
 | 
				
			||||||
				      &psp->fw_pri_bo,
 | 
									      &psp->fw_pri_bo,
 | 
				
			||||||
				      &psp->fw_pri_mc_addr,
 | 
									      &psp->fw_pri_mc_addr,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1156,8 +1156,10 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s
 | 
				
			||||||
		for_each_ras_error(err_node, err_data) {
 | 
							for_each_ras_error(err_node, err_data) {
 | 
				
			||||||
			err_info = &err_node->err_info;
 | 
								err_info = &err_node->err_info;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			amdgpu_ras_error_statistic_ce_count(&obj->err_data, &err_info->mcm_info, err_info->ce_count);
 | 
								amdgpu_ras_error_statistic_ce_count(&obj->err_data,
 | 
				
			||||||
			amdgpu_ras_error_statistic_ue_count(&obj->err_data, &err_info->mcm_info, err_info->ue_count);
 | 
										&err_info->mcm_info, NULL, err_info->ce_count);
 | 
				
			||||||
 | 
								amdgpu_ras_error_statistic_ue_count(&obj->err_data,
 | 
				
			||||||
 | 
										&err_info->mcm_info, NULL, err_info->ue_count);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		/* for legacy asic path which doesn't has error source info */
 | 
							/* for legacy asic path which doesn't has error source info */
 | 
				
			||||||
| 
						 | 
					@ -1174,6 +1176,9 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
 | 
				
			||||||
	enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
 | 
						enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
 | 
				
			||||||
	struct amdgpu_ras_block_object *block_obj = NULL;
 | 
						struct amdgpu_ras_block_object *block_obj = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (blk == AMDGPU_RAS_BLOCK_COUNT)
 | 
				
			||||||
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
 | 
						if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2538,7 +2543,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	data = &con->eh_data;
 | 
						data = &con->eh_data;
 | 
				
			||||||
	*data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
 | 
						*data = kzalloc(sizeof(**data), GFP_KERNEL);
 | 
				
			||||||
	if (!*data) {
 | 
						if (!*data) {
 | 
				
			||||||
		ret = -ENOMEM;
 | 
							ret = -ENOMEM;
 | 
				
			||||||
		goto out;
 | 
							goto out;
 | 
				
			||||||
| 
						 | 
					@ -2825,10 +2830,10 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
 | 
				
			||||||
	if (con)
 | 
						if (con)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	con = kmalloc(sizeof(struct amdgpu_ras) +
 | 
						con = kzalloc(sizeof(*con) +
 | 
				
			||||||
			sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
 | 
								sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
 | 
				
			||||||
			sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
 | 
								sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
 | 
				
			||||||
			GFP_KERNEL|__GFP_ZERO);
 | 
								GFP_KERNEL);
 | 
				
			||||||
	if (!con)
 | 
						if (!con)
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3133,8 +3138,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev)
 | 
				
			||||||
	if (amdgpu_sriov_vf(adev))
 | 
						if (amdgpu_sriov_vf(adev))
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* enable MCA debug on APU device */
 | 
						amdgpu_ras_set_mca_debug_mode(adev, false);
 | 
				
			||||||
	amdgpu_ras_set_mca_debug_mode(adev, !!(adev->flags & AMD_IS_APU));
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
 | 
						list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
 | 
				
			||||||
		if (!node->ras_obj) {
 | 
							if (!node->ras_obj) {
 | 
				
			||||||
| 
						 | 
					@ -3691,7 +3695,8 @@ static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
 | 
					static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
 | 
				
			||||||
						      struct amdgpu_smuio_mcm_config_info *mcm_info)
 | 
									struct amdgpu_smuio_mcm_config_info *mcm_info,
 | 
				
			||||||
 | 
									struct ras_err_addr *err_addr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct ras_err_node *err_node;
 | 
						struct ras_err_node *err_node;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3705,6 +3710,9 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
 | 
						memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (err_addr)
 | 
				
			||||||
 | 
							memcpy(&err_node->err_info.err_addr, err_addr, sizeof(*err_addr));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err_data->err_list_count++;
 | 
						err_data->err_list_count++;
 | 
				
			||||||
	list_add_tail(&err_node->node, &err_data->err_node_list);
 | 
						list_add_tail(&err_node->node, &err_data->err_node_list);
 | 
				
			||||||
	list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
 | 
						list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
 | 
				
			||||||
| 
						 | 
					@ -3713,7 +3721,8 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
 | 
					int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
 | 
				
			||||||
					struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count)
 | 
							struct amdgpu_smuio_mcm_config_info *mcm_info,
 | 
				
			||||||
 | 
							struct ras_err_addr *err_addr, u64 count)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct ras_err_info *err_info;
 | 
						struct ras_err_info *err_info;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3723,7 +3732,7 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
 | 
				
			||||||
	if (!count)
 | 
						if (!count)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
 | 
						err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr);
 | 
				
			||||||
	if (!err_info)
 | 
						if (!err_info)
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3734,7 +3743,8 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
 | 
					int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
 | 
				
			||||||
					struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count)
 | 
							struct amdgpu_smuio_mcm_config_info *mcm_info,
 | 
				
			||||||
 | 
							struct ras_err_addr *err_addr, u64 count)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct ras_err_info *err_info;
 | 
						struct ras_err_info *err_info;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3744,7 +3754,7 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
 | 
				
			||||||
	if (!count)
 | 
						if (!count)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
 | 
						err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr);
 | 
				
			||||||
	if (!err_info)
 | 
						if (!err_info)
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -452,10 +452,17 @@ struct ras_fs_data {
 | 
				
			||||||
	char debugfs_name[32];
 | 
						char debugfs_name[32];
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct ras_err_addr {
 | 
				
			||||||
 | 
						uint64_t err_status;
 | 
				
			||||||
 | 
						uint64_t err_ipid;
 | 
				
			||||||
 | 
						uint64_t err_addr;
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct ras_err_info {
 | 
					struct ras_err_info {
 | 
				
			||||||
	struct amdgpu_smuio_mcm_config_info mcm_info;
 | 
						struct amdgpu_smuio_mcm_config_info mcm_info;
 | 
				
			||||||
	u64 ce_count;
 | 
						u64 ce_count;
 | 
				
			||||||
	u64 ue_count;
 | 
						u64 ue_count;
 | 
				
			||||||
 | 
						struct ras_err_addr err_addr;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct ras_err_node {
 | 
					struct ras_err_node {
 | 
				
			||||||
| 
						 | 
					@ -806,8 +813,10 @@ void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
 | 
				
			||||||
int amdgpu_ras_error_data_init(struct ras_err_data *err_data);
 | 
					int amdgpu_ras_error_data_init(struct ras_err_data *err_data);
 | 
				
			||||||
void amdgpu_ras_error_data_fini(struct ras_err_data *err_data);
 | 
					void amdgpu_ras_error_data_fini(struct ras_err_data *err_data);
 | 
				
			||||||
int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
 | 
					int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
 | 
				
			||||||
					struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count);
 | 
							struct amdgpu_smuio_mcm_config_info *mcm_info,
 | 
				
			||||||
 | 
							struct ras_err_addr *err_addr, u64 count);
 | 
				
			||||||
int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
 | 
					int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
 | 
				
			||||||
					struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count);
 | 
							struct amdgpu_smuio_mcm_config_info *mcm_info,
 | 
				
			||||||
 | 
							struct ras_err_addr *err_addr, u64 count);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -531,13 +531,12 @@ int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev,
 | 
				
			||||||
	if (version_major == 2 && version_minor == 1)
 | 
						if (version_major == 2 && version_minor == 1)
 | 
				
			||||||
		adev->gfx.rlc.is_rlc_v2_1 = true;
 | 
							adev->gfx.rlc.is_rlc_v2_1 = true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (version_minor >= 0) {
 | 
						err = amdgpu_gfx_rlc_init_microcode_v2_0(adev);
 | 
				
			||||||
		err = amdgpu_gfx_rlc_init_microcode_v2_0(adev);
 | 
						if (err) {
 | 
				
			||||||
		if (err) {
 | 
							dev_err(adev->dev, "fail to init rlc v2_0 microcode\n");
 | 
				
			||||||
			dev_err(adev->dev, "fail to init rlc v2_0 microcode\n");
 | 
							return err;
 | 
				
			||||||
			return err;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (version_minor >= 1)
 | 
						if (version_minor >= 1)
 | 
				
			||||||
		amdgpu_gfx_rlc_init_microcode_v2_1(adev);
 | 
							amdgpu_gfx_rlc_init_microcode_v2_1(adev);
 | 
				
			||||||
	if (version_minor >= 2)
 | 
						if (version_minor >= 2)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -191,7 +191,8 @@ static bool amdgpu_sync_test_fence(struct amdgpu_device *adev,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Never sync to VM updates either. */
 | 
						/* Never sync to VM updates either. */
 | 
				
			||||||
	if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
 | 
						if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
 | 
				
			||||||
	    owner != AMDGPU_FENCE_OWNER_UNDEFINED)
 | 
						    owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
 | 
				
			||||||
 | 
						    owner != AMDGPU_FENCE_OWNER_KFD)
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Ignore fences depending on the sync mode */
 | 
						/* Ignore fences depending on the sync mode */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1062,7 +1062,8 @@ int amdgpu_ucode_create_bo(struct amdgpu_device *adev)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) {
 | 
						if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) {
 | 
				
			||||||
		amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
 | 
							amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
 | 
				
			||||||
			amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
 | 
								(amdgpu_sriov_vf(adev) || fw_bo_location == 1) ?
 | 
				
			||||||
 | 
								AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
 | 
				
			||||||
			&adev->firmware.fw_buf,
 | 
								&adev->firmware.fw_buf,
 | 
				
			||||||
			&adev->firmware.fw_buf_mc,
 | 
								&adev->firmware.fw_buf_mc,
 | 
				
			||||||
			&adev->firmware.fw_buf_ptr);
 | 
								&adev->firmware.fw_buf_ptr);
 | 
				
			||||||
| 
						 | 
					@ -1397,9 +1398,13 @@ int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (err)
 | 
						if (err)
 | 
				
			||||||
		return -ENODEV;
 | 
							return -ENODEV;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	err = amdgpu_ucode_validate(*fw);
 | 
						err = amdgpu_ucode_validate(*fw);
 | 
				
			||||||
	if (err)
 | 
						if (err) {
 | 
				
			||||||
		dev_dbg(adev->dev, "\"%s\" failed to validate\n", fw_name);
 | 
							dev_dbg(adev->dev, "\"%s\" failed to validate\n", fw_name);
 | 
				
			||||||
 | 
							release_firmware(*fw);
 | 
				
			||||||
 | 
							*fw = NULL;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return err;
 | 
						return err;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -285,6 +285,7 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
 | 
				
			||||||
	list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
 | 
						list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
 | 
				
			||||||
		struct amdgpu_bo *bo = vm_bo->bo;
 | 
							struct amdgpu_bo *bo = vm_bo->bo;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							vm_bo->moved = true;
 | 
				
			||||||
		if (!bo || bo->tbo.type != ttm_bo_type_kernel)
 | 
							if (!bo || bo->tbo.type != ttm_bo_type_kernel)
 | 
				
			||||||
			list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
 | 
								list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
 | 
				
			||||||
		else if (bo->parent)
 | 
							else if (bo->parent)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1313,10 +1313,10 @@ static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct a
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) {
 | 
						switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) {
 | 
				
			||||||
	case AMDGPU_MCA_ERROR_TYPE_UE:
 | 
						case AMDGPU_MCA_ERROR_TYPE_UE:
 | 
				
			||||||
		amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, 1ULL);
 | 
							amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, NULL, 1ULL);
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	case AMDGPU_MCA_ERROR_TYPE_CE:
 | 
						case AMDGPU_MCA_ERROR_TYPE_CE:
 | 
				
			||||||
		amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, 1ULL);
 | 
							amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, NULL, 1ULL);
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	default:
 | 
						default:
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -395,7 +395,6 @@ static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
 | 
				
			||||||
			(*ptr)++;
 | 
								(*ptr)++;
 | 
				
			||||||
			return;
 | 
								return;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4474,11 +4474,43 @@ static int gfx_v11_0_wait_for_idle(void *handle)
 | 
				
			||||||
	return -ETIMEDOUT;
 | 
						return -ETIMEDOUT;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev,
 | 
				
			||||||
 | 
										     int req)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						u32 i, tmp, val;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < adev->usec_timeout; i++) {
 | 
				
			||||||
 | 
							/* Request with MeId=2, PipeId=0 */
 | 
				
			||||||
 | 
							tmp = REG_SET_FIELD(0, CP_GFX_INDEX_MUTEX, REQUEST, req);
 | 
				
			||||||
 | 
							tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, CLIENTID, 4);
 | 
				
			||||||
 | 
							WREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX, tmp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							val = RREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX);
 | 
				
			||||||
 | 
							if (req) {
 | 
				
			||||||
 | 
								if (val == tmp)
 | 
				
			||||||
 | 
									break;
 | 
				
			||||||
 | 
							} else {
 | 
				
			||||||
 | 
								tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX,
 | 
				
			||||||
 | 
										    REQUEST, 1);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								/* unlocked or locked by firmware */
 | 
				
			||||||
 | 
								if (val != tmp)
 | 
				
			||||||
 | 
									break;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							udelay(1);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (i >= adev->usec_timeout)
 | 
				
			||||||
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int gfx_v11_0_soft_reset(void *handle)
 | 
					static int gfx_v11_0_soft_reset(void *handle)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	u32 grbm_soft_reset = 0;
 | 
						u32 grbm_soft_reset = 0;
 | 
				
			||||||
	u32 tmp;
 | 
						u32 tmp;
 | 
				
			||||||
	int i, j, k;
 | 
						int r, i, j, k;
 | 
				
			||||||
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 | 
						struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
 | 
						tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
 | 
				
			||||||
| 
						 | 
					@ -4518,6 +4550,13 @@ static int gfx_v11_0_soft_reset(void *handle)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Try to acquire the gfx mutex before access to CP_VMID_RESET */
 | 
				
			||||||
 | 
						r = gfx_v11_0_request_gfx_index_mutex(adev, 1);
 | 
				
			||||||
 | 
						if (r) {
 | 
				
			||||||
 | 
							DRM_ERROR("Failed to acquire the gfx mutex during soft reset\n");
 | 
				
			||||||
 | 
							return r;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe);
 | 
						WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Read CP_VMID_RESET register three times.
 | 
						// Read CP_VMID_RESET register three times.
 | 
				
			||||||
| 
						 | 
					@ -4526,6 +4565,13 @@ static int gfx_v11_0_soft_reset(void *handle)
 | 
				
			||||||
	RREG32_SOC15(GC, 0, regCP_VMID_RESET);
 | 
						RREG32_SOC15(GC, 0, regCP_VMID_RESET);
 | 
				
			||||||
	RREG32_SOC15(GC, 0, regCP_VMID_RESET);
 | 
						RREG32_SOC15(GC, 0, regCP_VMID_RESET);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* release the gfx mutex */
 | 
				
			||||||
 | 
						r = gfx_v11_0_request_gfx_index_mutex(adev, 0);
 | 
				
			||||||
 | 
						if (r) {
 | 
				
			||||||
 | 
							DRM_ERROR("Failed to release the gfx mutex during soft reset\n");
 | 
				
			||||||
 | 
							return r;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0; i < adev->usec_timeout; i++) {
 | 
						for (i = 0; i < adev->usec_timeout; i++) {
 | 
				
			||||||
		if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) &&
 | 
							if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) &&
 | 
				
			||||||
		    !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE))
 | 
							    !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE))
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3828,8 +3828,8 @@ static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
 | 
				
			||||||
	/* the caller should make sure initialize value of
 | 
						/* the caller should make sure initialize value of
 | 
				
			||||||
	 * err_data->ue_count and err_data->ce_count
 | 
						 * err_data->ue_count and err_data->ce_count
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
 | 
						amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
 | 
				
			||||||
	amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
 | 
						amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
 | 
					static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -102,7 +102,9 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
 | 
				
			||||||
		WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 | 
							WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 | 
				
			||||||
			min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
 | 
								min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
 | 
							if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
 | 
				
			||||||
 | 
									       AMD_APU_IS_RENOIR |
 | 
				
			||||||
 | 
									       AMD_APU_IS_GREEN_SARDINE))
 | 
				
			||||||
		       /*
 | 
							       /*
 | 
				
			||||||
			* Raven2 has a HW issue that it is unable to use the
 | 
								* Raven2 has a HW issue that it is unable to use the
 | 
				
			||||||
			* vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
 | 
								* vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -139,7 +139,9 @@ gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev,
 | 
				
			||||||
			WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 | 
								WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 | 
				
			||||||
				min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
 | 
									min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (adev->apu_flags & AMD_APU_IS_RAVEN2)
 | 
								if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
 | 
				
			||||||
 | 
										       AMD_APU_IS_RENOIR |
 | 
				
			||||||
 | 
										       AMD_APU_IS_GREEN_SARDINE))
 | 
				
			||||||
			       /*
 | 
								       /*
 | 
				
			||||||
				* Raven2 has a HW issue that it is unable to use the
 | 
									* Raven2 has a HW issue that it is unable to use the
 | 
				
			||||||
				* vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
 | 
									* vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1041,6 +1041,10 @@ static int gmc_v10_0_hw_fini(void *handle)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
 | 
						amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (adev->gmc.ecc_irq.funcs &&
 | 
				
			||||||
 | 
							amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
 | 
				
			||||||
 | 
							amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -941,6 +941,11 @@ static int gmc_v11_0_hw_fini(void *handle)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
 | 
						amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (adev->gmc.ecc_irq.funcs &&
 | 
				
			||||||
 | 
							amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
 | 
				
			||||||
 | 
							amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	gmc_v11_0_gart_disable(adev);
 | 
						gmc_v11_0_gart_disable(adev);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2380,6 +2380,10 @@ static int gmc_v9_0_hw_fini(void *handle)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
 | 
						amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (adev->gmc.ecc_irq.funcs &&
 | 
				
			||||||
 | 
							amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
 | 
				
			||||||
 | 
							amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -96,7 +96,9 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
 | 
				
			||||||
	WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 | 
						WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
 | 
				
			||||||
		     min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
 | 
							     min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
 | 
						if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
 | 
				
			||||||
 | 
								       AMD_APU_IS_RENOIR |
 | 
				
			||||||
 | 
								       AMD_APU_IS_GREEN_SARDINE))
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
		 * Raven2 has a HW issue that it is unable to use the vram which
 | 
							 * Raven2 has a HW issue that it is unable to use the vram which
 | 
				
			||||||
		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
 | 
							 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -652,8 +652,8 @@ static void mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device *adev,
 | 
				
			||||||
					AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
 | 
										AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
 | 
				
			||||||
					&ue_count);
 | 
										&ue_count);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
 | 
						amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
 | 
				
			||||||
	amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
 | 
						amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void mmhub_v1_8_query_ras_error_count(struct amdgpu_device *adev,
 | 
					static void mmhub_v1_8_query_ras_error_count(struct amdgpu_device *adev,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2156,7 +2156,7 @@ static void sdma_v4_4_2_inst_query_ras_error_count(struct amdgpu_device *adev,
 | 
				
			||||||
					AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
 | 
										AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
 | 
				
			||||||
					&ue_count);
 | 
										&ue_count);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
 | 
						amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void sdma_v4_4_2_query_ras_error_count(struct amdgpu_device *adev,
 | 
					static void sdma_v4_4_2_query_ras_error_count(struct amdgpu_device *adev,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -26,6 +26,7 @@
 | 
				
			||||||
#include "amdgpu.h"
 | 
					#include "amdgpu.h"
 | 
				
			||||||
#include "umc/umc_12_0_0_offset.h"
 | 
					#include "umc/umc_12_0_0_offset.h"
 | 
				
			||||||
#include "umc/umc_12_0_0_sh_mask.h"
 | 
					#include "umc/umc_12_0_0_sh_mask.h"
 | 
				
			||||||
 | 
					#include "mp/mp_13_0_6_sh_mask.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const uint32_t
 | 
					const uint32_t
 | 
				
			||||||
	umc_v12_0_channel_idx_tbl[]
 | 
						umc_v12_0_channel_idx_tbl[]
 | 
				
			||||||
| 
						 | 
					@ -88,16 +89,26 @@ static void umc_v12_0_reset_error_count(struct amdgpu_device *adev)
 | 
				
			||||||
		umc_v12_0_reset_error_count_per_channel, NULL);
 | 
							umc_v12_0_reset_error_count_per_channel, NULL);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status)
 | 
					bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						if (amdgpu_ras_is_poison_mode_supported(adev) &&
 | 
				
			||||||
 | 
						    (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
 | 
				
			||||||
 | 
						    (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1))
 | 
				
			||||||
 | 
							return true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
 | 
						return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
 | 
				
			||||||
		(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
 | 
							(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
 | 
				
			||||||
		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
 | 
							REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
 | 
				
			||||||
		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1));
 | 
							REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status)
 | 
					bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						if (amdgpu_ras_is_poison_mode_supported(adev) &&
 | 
				
			||||||
 | 
						    (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
 | 
				
			||||||
 | 
						    (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1))
 | 
				
			||||||
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
 | 
						return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
 | 
				
			||||||
		(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 ||
 | 
							(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 ||
 | 
				
			||||||
		(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 &&
 | 
							(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 &&
 | 
				
			||||||
| 
						 | 
					@ -105,7 +116,7 @@ bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status)
 | 
				
			||||||
		/* Identify data parity error in replay mode */
 | 
							/* Identify data parity error in replay mode */
 | 
				
			||||||
		((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0x5 ||
 | 
							((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0x5 ||
 | 
				
			||||||
		REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0xb) &&
 | 
							REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0xb) &&
 | 
				
			||||||
		!(umc_v12_0_is_uncorrectable_error(mc_umc_status)))));
 | 
							!(umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)))));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev,
 | 
					static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev,
 | 
				
			||||||
| 
						 | 
					@ -124,7 +135,7 @@ static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev,
 | 
				
			||||||
	mc_umc_status =
 | 
						mc_umc_status =
 | 
				
			||||||
		RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
 | 
							RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (umc_v12_0_is_correctable_error(mc_umc_status))
 | 
						if (umc_v12_0_is_correctable_error(adev, mc_umc_status))
 | 
				
			||||||
		*error_count += 1;
 | 
							*error_count += 1;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -142,7 +153,7 @@ static void umc_v12_0_query_uncorrectable_error_count(struct amdgpu_device *adev
 | 
				
			||||||
	mc_umc_status =
 | 
						mc_umc_status =
 | 
				
			||||||
		RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
 | 
							RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (umc_v12_0_is_uncorrectable_error(mc_umc_status))
 | 
						if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status))
 | 
				
			||||||
		*error_count += 1;
 | 
							*error_count += 1;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -166,8 +177,8 @@ static int umc_v12_0_query_error_count(struct amdgpu_device *adev,
 | 
				
			||||||
	umc_v12_0_query_correctable_error_count(adev, umc_reg_offset, &ce_count);
 | 
						umc_v12_0_query_correctable_error_count(adev, umc_reg_offset, &ce_count);
 | 
				
			||||||
	umc_v12_0_query_uncorrectable_error_count(adev, umc_reg_offset, &ue_count);
 | 
						umc_v12_0_query_uncorrectable_error_count(adev, umc_reg_offset, &ue_count);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
 | 
						amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
 | 
				
			||||||
	amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
 | 
						amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -360,6 +371,59 @@ static int umc_v12_0_err_cnt_init_per_channel(struct amdgpu_device *adev,
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void umc_v12_0_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
 | 
				
			||||||
 | 
										void *ras_error_status)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						amdgpu_mca_smu_log_ras_error(adev,
 | 
				
			||||||
 | 
							AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_CE, ras_error_status);
 | 
				
			||||||
 | 
						amdgpu_mca_smu_log_ras_error(adev,
 | 
				
			||||||
 | 
							AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_UE, ras_error_status);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *adev,
 | 
				
			||||||
 | 
										void *ras_error_status)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct ras_err_node *err_node;
 | 
				
			||||||
 | 
						uint64_t mc_umc_status;
 | 
				
			||||||
 | 
						struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for_each_ras_error(err_node, err_data) {
 | 
				
			||||||
 | 
							mc_umc_status = err_node->err_info.err_addr.err_status;
 | 
				
			||||||
 | 
							if (!mc_umc_status)
 | 
				
			||||||
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)) {
 | 
				
			||||||
 | 
								uint64_t mca_addr, err_addr, mca_ipid;
 | 
				
			||||||
 | 
								uint32_t InstanceIdLo;
 | 
				
			||||||
 | 
								struct amdgpu_smuio_mcm_config_info *mcm_info;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								mcm_info = &err_node->err_info.mcm_info;
 | 
				
			||||||
 | 
								mca_addr = err_node->err_info.err_addr.err_addr;
 | 
				
			||||||
 | 
								mca_ipid = err_node->err_info.err_addr.err_ipid;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								err_addr =  REG_GET_FIELD(mca_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
 | 
				
			||||||
 | 
								InstanceIdLo = REG_GET_FIELD(mca_ipid, MCMP1_IPIDT0, InstanceIdLo);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								dev_info(adev->dev, "UMC:IPID:0x%llx, aid:%d, inst:%d, ch:%d, err_addr:0x%llx\n",
 | 
				
			||||||
 | 
									mca_ipid,
 | 
				
			||||||
 | 
									mcm_info->die_id,
 | 
				
			||||||
 | 
									MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
 | 
				
			||||||
 | 
									MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
 | 
				
			||||||
 | 
									err_addr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								umc_v12_0_convert_error_address(adev,
 | 
				
			||||||
 | 
									err_data, err_addr,
 | 
				
			||||||
 | 
									MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
 | 
				
			||||||
 | 
									MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
 | 
				
			||||||
 | 
									mcm_info->die_id);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								/* Clear umc error address content */
 | 
				
			||||||
 | 
								memset(&err_node->err_info.err_addr,
 | 
				
			||||||
 | 
									0, sizeof(err_node->err_info.err_addr));
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void umc_v12_0_err_cnt_init(struct amdgpu_device *adev)
 | 
					static void umc_v12_0_err_cnt_init(struct amdgpu_device *adev)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	amdgpu_umc_loop_channels(adev,
 | 
						amdgpu_umc_loop_channels(adev,
 | 
				
			||||||
| 
						 | 
					@ -386,4 +450,6 @@ struct amdgpu_umc_ras umc_v12_0_ras = {
 | 
				
			||||||
	},
 | 
						},
 | 
				
			||||||
	.err_cnt_init = umc_v12_0_err_cnt_init,
 | 
						.err_cnt_init = umc_v12_0_err_cnt_init,
 | 
				
			||||||
	.query_ras_poison_mode = umc_v12_0_query_ras_poison_mode,
 | 
						.query_ras_poison_mode = umc_v12_0_query_ras_poison_mode,
 | 
				
			||||||
 | 
						.ecc_info_query_ras_error_count = umc_v12_0_ecc_info_query_ras_error_count,
 | 
				
			||||||
 | 
						.ecc_info_query_ras_error_address = umc_v12_0_ecc_info_query_ras_error_address,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -117,8 +117,12 @@
 | 
				
			||||||
		(pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \
 | 
							(pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \
 | 
				
			||||||
	} while (0)
 | 
						} while (0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status);
 | 
					#define MCA_IPID_LO_2_UMC_CH(_ipid_lo) (((((_ipid_lo) >> 20) & 0x1) * 4) + \
 | 
				
			||||||
bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status);
 | 
								(((_ipid_lo) >> 12) & 0xF))
 | 
				
			||||||
 | 
					#define MCA_IPID_LO_2_UMC_INST(_ipid_lo) (((_ipid_lo) >> 21) & 0x7)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
 | 
				
			||||||
 | 
					bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
extern const uint32_t
 | 
					extern const uint32_t
 | 
				
			||||||
	umc_v12_0_channel_idx_tbl[]
 | 
						umc_v12_0_channel_idx_tbl[]
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -330,12 +330,6 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
 | 
				
			||||||
	pdd->gpuvm_limit =
 | 
						pdd->gpuvm_limit =
 | 
				
			||||||
		pdd->dev->kfd->shared_resources.gpuvm_size - 1;
 | 
							pdd->dev->kfd->shared_resources.gpuvm_size - 1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* dGPUs: the reserved space for kernel
 | 
					 | 
				
			||||||
	 * before SVM
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	pdd->qpd.cwsr_base = SVM_CWSR_BASE;
 | 
					 | 
				
			||||||
	pdd->qpd.ib_base = SVM_IB_BASE;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI();
 | 
						pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI();
 | 
				
			||||||
	pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
 | 
						pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -345,18 +339,18 @@ static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
 | 
				
			||||||
	pdd->lds_base = MAKE_LDS_APP_BASE_V9();
 | 
						pdd->lds_base = MAKE_LDS_APP_BASE_V9();
 | 
				
			||||||
	pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
 | 
						pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pdd->gpuvm_base = PAGE_SIZE;
 | 
					        /* Raven needs SVM to support graphic handle, etc. Leave the small
 | 
				
			||||||
 | 
					         * reserved space before SVM on Raven as well, even though we don't
 | 
				
			||||||
 | 
					         * have to.
 | 
				
			||||||
 | 
					         * Set gpuvm_base and gpuvm_limit to CANONICAL addresses so that they
 | 
				
			||||||
 | 
					         * are used in Thunk to reserve SVM.
 | 
				
			||||||
 | 
					         */
 | 
				
			||||||
 | 
					        pdd->gpuvm_base = SVM_USER_BASE;
 | 
				
			||||||
	pdd->gpuvm_limit =
 | 
						pdd->gpuvm_limit =
 | 
				
			||||||
		pdd->dev->kfd->shared_resources.gpuvm_size - 1;
 | 
							pdd->dev->kfd->shared_resources.gpuvm_size - 1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9();
 | 
						pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9();
 | 
				
			||||||
	pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
 | 
						pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
 | 
				
			||||||
 | 
					 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * Place TBA/TMA on opposite side of VM hole to prevent
 | 
					 | 
				
			||||||
	 * stray faults from triggering SVM on these pages.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	pdd->qpd.cwsr_base = pdd->dev->kfd->shared_resources.gpuvm_size;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
int kfd_init_apertures(struct kfd_process *process)
 | 
					int kfd_init_apertures(struct kfd_process *process)
 | 
				
			||||||
| 
						 | 
					@ -413,6 +407,12 @@ int kfd_init_apertures(struct kfd_process *process)
 | 
				
			||||||
					return -EINVAL;
 | 
										return -EINVAL;
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					                        /* dGPUs: the reserved space for kernel
 | 
				
			||||||
 | 
					                         * before SVM
 | 
				
			||||||
 | 
					                         */
 | 
				
			||||||
 | 
					                        pdd->qpd.cwsr_base = SVM_CWSR_BASE;
 | 
				
			||||||
 | 
					                        pdd->qpd.ib_base = SVM_IB_BASE;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		dev_dbg(kfd_device, "node id %u\n", id);
 | 
							dev_dbg(kfd_device, "node id %u\n", id);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -260,19 +260,6 @@ static void svm_migrate_put_sys_page(unsigned long addr)
 | 
				
			||||||
	put_page(page);
 | 
						put_page(page);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	unsigned long cpages = 0;
 | 
					 | 
				
			||||||
	unsigned long i;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = 0; i < migrate->npages; i++) {
 | 
					 | 
				
			||||||
		if (migrate->src[i] & MIGRATE_PFN_VALID &&
 | 
					 | 
				
			||||||
		    migrate->src[i] & MIGRATE_PFN_MIGRATE)
 | 
					 | 
				
			||||||
			cpages++;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return cpages;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
 | 
					static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned long upages = 0;
 | 
						unsigned long upages = 0;
 | 
				
			||||||
| 
						 | 
					@ -402,6 +389,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
 | 
				
			||||||
	struct dma_fence *mfence = NULL;
 | 
						struct dma_fence *mfence = NULL;
 | 
				
			||||||
	struct migrate_vma migrate = { 0 };
 | 
						struct migrate_vma migrate = { 0 };
 | 
				
			||||||
	unsigned long cpages = 0;
 | 
						unsigned long cpages = 0;
 | 
				
			||||||
 | 
						unsigned long mpages = 0;
 | 
				
			||||||
	dma_addr_t *scratch;
 | 
						dma_addr_t *scratch;
 | 
				
			||||||
	void *buf;
 | 
						void *buf;
 | 
				
			||||||
	int r = -ENOMEM;
 | 
						int r = -ENOMEM;
 | 
				
			||||||
| 
						 | 
					@ -450,12 +438,13 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
 | 
				
			||||||
	r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
 | 
						r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
 | 
				
			||||||
	migrate_vma_pages(&migrate);
 | 
						migrate_vma_pages(&migrate);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
 | 
					 | 
				
			||||||
		svm_migrate_successful_pages(&migrate), cpages, migrate.npages);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	svm_migrate_copy_done(adev, mfence);
 | 
						svm_migrate_copy_done(adev, mfence);
 | 
				
			||||||
	migrate_vma_finalize(&migrate);
 | 
						migrate_vma_finalize(&migrate);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mpages = cpages - svm_migrate_unsuccessful_pages(&migrate);
 | 
				
			||||||
 | 
						pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
 | 
				
			||||||
 | 
								 mpages, cpages, migrate.npages);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kfd_smi_event_migration_end(node, p->lead_thread->pid,
 | 
						kfd_smi_event_migration_end(node, p->lead_thread->pid,
 | 
				
			||||||
				    start >> PAGE_SHIFT, end >> PAGE_SHIFT,
 | 
									    start >> PAGE_SHIFT, end >> PAGE_SHIFT,
 | 
				
			||||||
				    0, node->id, trigger);
 | 
									    0, node->id, trigger);
 | 
				
			||||||
| 
						 | 
					@ -465,12 +454,12 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
 | 
				
			||||||
out_free:
 | 
					out_free:
 | 
				
			||||||
	kvfree(buf);
 | 
						kvfree(buf);
 | 
				
			||||||
out:
 | 
					out:
 | 
				
			||||||
	if (!r && cpages) {
 | 
						if (!r && mpages) {
 | 
				
			||||||
		pdd = svm_range_get_pdd_by_node(prange, node);
 | 
							pdd = svm_range_get_pdd_by_node(prange, node);
 | 
				
			||||||
		if (pdd)
 | 
							if (pdd)
 | 
				
			||||||
			WRITE_ONCE(pdd->page_in, pdd->page_in + cpages);
 | 
								WRITE_ONCE(pdd->page_in, pdd->page_in + mpages);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		return cpages;
 | 
							return mpages;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return r;
 | 
						return r;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -498,7 +487,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
 | 
				
			||||||
	struct vm_area_struct *vma;
 | 
						struct vm_area_struct *vma;
 | 
				
			||||||
	uint64_t ttm_res_offset;
 | 
						uint64_t ttm_res_offset;
 | 
				
			||||||
	struct kfd_node *node;
 | 
						struct kfd_node *node;
 | 
				
			||||||
	unsigned long cpages = 0;
 | 
						unsigned long mpages = 0;
 | 
				
			||||||
	long r = 0;
 | 
						long r = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (start_mgr < prange->start || last_mgr > prange->last) {
 | 
						if (start_mgr < prange->start || last_mgr > prange->last) {
 | 
				
			||||||
| 
						 | 
					@ -540,15 +529,15 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
 | 
				
			||||||
			pr_debug("failed %ld to migrate\n", r);
 | 
								pr_debug("failed %ld to migrate\n", r);
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			cpages += r;
 | 
								mpages += r;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		ttm_res_offset += next - addr;
 | 
							ttm_res_offset += next - addr;
 | 
				
			||||||
		addr = next;
 | 
							addr = next;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (cpages) {
 | 
						if (mpages) {
 | 
				
			||||||
		prange->actual_loc = best_loc;
 | 
							prange->actual_loc = best_loc;
 | 
				
			||||||
		prange->vram_pages = prange->vram_pages + cpages;
 | 
							prange->vram_pages += mpages;
 | 
				
			||||||
	} else if (!prange->actual_loc) {
 | 
						} else if (!prange->actual_loc) {
 | 
				
			||||||
		/* if no page migrated and all pages from prange are at
 | 
							/* if no page migrated and all pages from prange are at
 | 
				
			||||||
		 * sys ram drop svm_bo got from svm_range_vram_node_new
 | 
							 * sys ram drop svm_bo got from svm_range_vram_node_new
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -970,7 +970,7 @@ struct kfd_process {
 | 
				
			||||||
	struct work_struct debug_event_workarea;
 | 
						struct work_struct debug_event_workarea;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Tracks debug per-vmid request for debug flags */
 | 
						/* Tracks debug per-vmid request for debug flags */
 | 
				
			||||||
	bool dbg_flags;
 | 
						u32 dbg_flags;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	atomic_t poison;
 | 
						atomic_t poison;
 | 
				
			||||||
	/* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */
 | 
						/* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -158,13 +158,12 @@ svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
 | 
				
			||||||
static int
 | 
					static int
 | 
				
			||||||
svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
 | 
					svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
 | 
				
			||||||
		      unsigned long offset, unsigned long npages,
 | 
							      unsigned long offset, unsigned long npages,
 | 
				
			||||||
		      unsigned long *hmm_pfns, uint32_t gpuidx, uint64_t *vram_pages)
 | 
							      unsigned long *hmm_pfns, uint32_t gpuidx)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	enum dma_data_direction dir = DMA_BIDIRECTIONAL;
 | 
						enum dma_data_direction dir = DMA_BIDIRECTIONAL;
 | 
				
			||||||
	dma_addr_t *addr = prange->dma_addr[gpuidx];
 | 
						dma_addr_t *addr = prange->dma_addr[gpuidx];
 | 
				
			||||||
	struct device *dev = adev->dev;
 | 
						struct device *dev = adev->dev;
 | 
				
			||||||
	struct page *page;
 | 
						struct page *page;
 | 
				
			||||||
	uint64_t vram_pages_dev;
 | 
					 | 
				
			||||||
	int i, r;
 | 
						int i, r;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!addr) {
 | 
						if (!addr) {
 | 
				
			||||||
| 
						 | 
					@ -174,7 +173,6 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
 | 
				
			||||||
		prange->dma_addr[gpuidx] = addr;
 | 
							prange->dma_addr[gpuidx] = addr;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vram_pages_dev = 0;
 | 
					 | 
				
			||||||
	addr += offset;
 | 
						addr += offset;
 | 
				
			||||||
	for (i = 0; i < npages; i++) {
 | 
						for (i = 0; i < npages; i++) {
 | 
				
			||||||
		if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
 | 
							if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
 | 
				
			||||||
| 
						 | 
					@ -184,7 +182,6 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
 | 
				
			||||||
		if (is_zone_device_page(page)) {
 | 
							if (is_zone_device_page(page)) {
 | 
				
			||||||
			struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
 | 
								struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			vram_pages_dev++;
 | 
					 | 
				
			||||||
			addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
 | 
								addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
 | 
				
			||||||
				   bo_adev->vm_manager.vram_base_offset -
 | 
									   bo_adev->vm_manager.vram_base_offset -
 | 
				
			||||||
				   bo_adev->kfd.pgmap.range.start;
 | 
									   bo_adev->kfd.pgmap.range.start;
 | 
				
			||||||
| 
						 | 
					@ -201,14 +198,14 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
 | 
				
			||||||
		pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
 | 
							pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
 | 
				
			||||||
				     addr[i] >> PAGE_SHIFT, page_to_pfn(page));
 | 
									     addr[i] >> PAGE_SHIFT, page_to_pfn(page));
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	*vram_pages = vram_pages_dev;
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int
 | 
					static int
 | 
				
			||||||
svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
 | 
					svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
 | 
				
			||||||
		  unsigned long offset, unsigned long npages,
 | 
							  unsigned long offset, unsigned long npages,
 | 
				
			||||||
		  unsigned long *hmm_pfns, uint64_t *vram_pages)
 | 
							  unsigned long *hmm_pfns)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct kfd_process *p;
 | 
						struct kfd_process *p;
 | 
				
			||||||
	uint32_t gpuidx;
 | 
						uint32_t gpuidx;
 | 
				
			||||||
| 
						 | 
					@ -227,7 +224,7 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
 | 
							r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
 | 
				
			||||||
					  hmm_pfns, gpuidx, vram_pages);
 | 
										  hmm_pfns, gpuidx);
 | 
				
			||||||
		if (r)
 | 
							if (r)
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -885,14 +882,29 @@ static void svm_range_debug_dump(struct svm_range_list *svms)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void *
 | 
					static void *
 | 
				
			||||||
svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements,
 | 
					svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements,
 | 
				
			||||||
		     uint64_t offset)
 | 
							     uint64_t offset, uint64_t *vram_pages)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						unsigned char *src = (unsigned char *)psrc + offset;
 | 
				
			||||||
	unsigned char *dst;
 | 
						unsigned char *dst;
 | 
				
			||||||
 | 
						uint64_t i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dst = kvmalloc_array(num_elements, size, GFP_KERNEL);
 | 
						dst = kvmalloc_array(num_elements, size, GFP_KERNEL);
 | 
				
			||||||
	if (!dst)
 | 
						if (!dst)
 | 
				
			||||||
		return NULL;
 | 
							return NULL;
 | 
				
			||||||
	memcpy(dst, (unsigned char *)psrc + offset, num_elements * size);
 | 
					
 | 
				
			||||||
 | 
						if (!vram_pages) {
 | 
				
			||||||
 | 
							memcpy(dst, src, num_elements * size);
 | 
				
			||||||
 | 
							return (void *)dst;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						*vram_pages = 0;
 | 
				
			||||||
 | 
						for (i = 0; i < num_elements; i++) {
 | 
				
			||||||
 | 
							dma_addr_t *temp;
 | 
				
			||||||
 | 
							temp = (dma_addr_t *)dst + i;
 | 
				
			||||||
 | 
							*temp = *((dma_addr_t *)src + i);
 | 
				
			||||||
 | 
							if (*temp&SVM_RANGE_VRAM_DOMAIN)
 | 
				
			||||||
 | 
								(*vram_pages)++;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return (void *)dst;
 | 
						return (void *)dst;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -906,7 +918,7 @@ svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
 | 
				
			||||||
		if (!src->dma_addr[i])
 | 
							if (!src->dma_addr[i])
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
		dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i],
 | 
							dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i],
 | 
				
			||||||
					sizeof(*src->dma_addr[i]), src->npages, 0);
 | 
										sizeof(*src->dma_addr[i]), src->npages, 0, NULL);
 | 
				
			||||||
		if (!dst->dma_addr[i])
 | 
							if (!dst->dma_addr[i])
 | 
				
			||||||
			return -ENOMEM;
 | 
								return -ENOMEM;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -917,7 +929,7 @@ svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
 | 
				
			||||||
static int
 | 
					static int
 | 
				
			||||||
svm_range_split_array(void *ppnew, void *ppold, size_t size,
 | 
					svm_range_split_array(void *ppnew, void *ppold, size_t size,
 | 
				
			||||||
		      uint64_t old_start, uint64_t old_n,
 | 
							      uint64_t old_start, uint64_t old_n,
 | 
				
			||||||
		      uint64_t new_start, uint64_t new_n)
 | 
							      uint64_t new_start, uint64_t new_n, uint64_t *new_vram_pages)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned char *new, *old, *pold;
 | 
						unsigned char *new, *old, *pold;
 | 
				
			||||||
	uint64_t d;
 | 
						uint64_t d;
 | 
				
			||||||
| 
						 | 
					@ -929,11 +941,12 @@ svm_range_split_array(void *ppnew, void *ppold, size_t size,
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	d = (new_start - old_start) * size;
 | 
						d = (new_start - old_start) * size;
 | 
				
			||||||
	new = svm_range_copy_array(pold, size, new_n, d);
 | 
						/* get dma addr array for new range and calculte its vram page number */
 | 
				
			||||||
 | 
						new = svm_range_copy_array(pold, size, new_n, d, new_vram_pages);
 | 
				
			||||||
	if (!new)
 | 
						if (!new)
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
	d = (new_start == old_start) ? new_n * size : 0;
 | 
						d = (new_start == old_start) ? new_n * size : 0;
 | 
				
			||||||
	old = svm_range_copy_array(pold, size, old_n, d);
 | 
						old = svm_range_copy_array(pold, size, old_n, d, NULL);
 | 
				
			||||||
	if (!old) {
 | 
						if (!old) {
 | 
				
			||||||
		kvfree(new);
 | 
							kvfree(new);
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
| 
						 | 
					@ -955,10 +968,13 @@ svm_range_split_pages(struct svm_range *new, struct svm_range *old,
 | 
				
			||||||
	for (i = 0; i < MAX_GPU_INSTANCE; i++) {
 | 
						for (i = 0; i < MAX_GPU_INSTANCE; i++) {
 | 
				
			||||||
		r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
 | 
							r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
 | 
				
			||||||
					  sizeof(*old->dma_addr[i]), old->start,
 | 
										  sizeof(*old->dma_addr[i]), old->start,
 | 
				
			||||||
					  npages, new->start, new->npages);
 | 
										  npages, new->start, new->npages,
 | 
				
			||||||
 | 
										  old->actual_loc ? &new->vram_pages : NULL);
 | 
				
			||||||
		if (r)
 | 
							if (r)
 | 
				
			||||||
			return r;
 | 
								return r;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
						if (old->actual_loc)
 | 
				
			||||||
 | 
							old->vram_pages -= new->vram_pages;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -982,11 +998,6 @@ svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
 | 
				
			||||||
	new->svm_bo = svm_range_bo_ref(old->svm_bo);
 | 
						new->svm_bo = svm_range_bo_ref(old->svm_bo);
 | 
				
			||||||
	new->ttm_res = old->ttm_res;
 | 
						new->ttm_res = old->ttm_res;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* set new's vram_pages as old range's now, the acurate vram_pages
 | 
					 | 
				
			||||||
	 * will be updated during mapping
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	new->vram_pages = min(old->vram_pages, new->npages);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	spin_lock(&new->svm_bo->list_lock);
 | 
						spin_lock(&new->svm_bo->list_lock);
 | 
				
			||||||
	list_add(&new->svm_bo_list, &new->svm_bo->range_list);
 | 
						list_add(&new->svm_bo_list, &new->svm_bo->range_list);
 | 
				
			||||||
	spin_unlock(&new->svm_bo->list_lock);
 | 
						spin_unlock(&new->svm_bo->list_lock);
 | 
				
			||||||
| 
						 | 
					@ -1109,7 +1120,7 @@ static int
 | 
				
			||||||
svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
 | 
					svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
 | 
				
			||||||
		     struct list_head *insert_list, struct list_head *remap_list)
 | 
							     struct list_head *insert_list, struct list_head *remap_list)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct svm_range *tail;
 | 
						struct svm_range *tail = NULL;
 | 
				
			||||||
	int r = svm_range_split(prange, prange->start, new_last, &tail);
 | 
						int r = svm_range_split(prange, prange->start, new_last, &tail);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!r) {
 | 
						if (!r) {
 | 
				
			||||||
| 
						 | 
					@ -1124,7 +1135,7 @@ static int
 | 
				
			||||||
svm_range_split_head(struct svm_range *prange, uint64_t new_start,
 | 
					svm_range_split_head(struct svm_range *prange, uint64_t new_start,
 | 
				
			||||||
		     struct list_head *insert_list, struct list_head *remap_list)
 | 
							     struct list_head *insert_list, struct list_head *remap_list)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct svm_range *head;
 | 
						struct svm_range *head = NULL;
 | 
				
			||||||
	int r = svm_range_split(prange, new_start, prange->last, &head);
 | 
						int r = svm_range_split(prange, new_start, prange->last, &head);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!r) {
 | 
						if (!r) {
 | 
				
			||||||
| 
						 | 
					@ -1573,7 +1584,6 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
 | 
				
			||||||
	struct svm_validate_context *ctx;
 | 
						struct svm_validate_context *ctx;
 | 
				
			||||||
	unsigned long start, end, addr;
 | 
						unsigned long start, end, addr;
 | 
				
			||||||
	struct kfd_process *p;
 | 
						struct kfd_process *p;
 | 
				
			||||||
	uint64_t vram_pages;
 | 
					 | 
				
			||||||
	void *owner;
 | 
						void *owner;
 | 
				
			||||||
	int32_t idx;
 | 
						int32_t idx;
 | 
				
			||||||
	int r = 0;
 | 
						int r = 0;
 | 
				
			||||||
| 
						 | 
					@ -1648,15 +1658,13 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	vram_pages = 0;
 | 
						start = map_start << PAGE_SHIFT;
 | 
				
			||||||
	start = prange->start << PAGE_SHIFT;
 | 
						end = (map_last + 1) << PAGE_SHIFT;
 | 
				
			||||||
	end = (prange->last + 1) << PAGE_SHIFT;
 | 
					 | 
				
			||||||
	for (addr = start; !r && addr < end; ) {
 | 
						for (addr = start; !r && addr < end; ) {
 | 
				
			||||||
		struct hmm_range *hmm_range;
 | 
							struct hmm_range *hmm_range;
 | 
				
			||||||
		unsigned long map_start_vma;
 | 
							unsigned long map_start_vma;
 | 
				
			||||||
		unsigned long map_last_vma;
 | 
							unsigned long map_last_vma;
 | 
				
			||||||
		struct vm_area_struct *vma;
 | 
							struct vm_area_struct *vma;
 | 
				
			||||||
		uint64_t vram_pages_vma;
 | 
					 | 
				
			||||||
		unsigned long next = 0;
 | 
							unsigned long next = 0;
 | 
				
			||||||
		unsigned long offset;
 | 
							unsigned long offset;
 | 
				
			||||||
		unsigned long npages;
 | 
							unsigned long npages;
 | 
				
			||||||
| 
						 | 
					@ -1683,13 +1691,11 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!r) {
 | 
							if (!r) {
 | 
				
			||||||
			offset = (addr - start) >> PAGE_SHIFT;
 | 
								offset = (addr >> PAGE_SHIFT) - prange->start;
 | 
				
			||||||
			r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
 | 
								r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
 | 
				
			||||||
					      hmm_range->hmm_pfns, &vram_pages_vma);
 | 
										      hmm_range->hmm_pfns);
 | 
				
			||||||
			if (r)
 | 
								if (r)
 | 
				
			||||||
				pr_debug("failed %d to dma map range\n", r);
 | 
									pr_debug("failed %d to dma map range\n", r);
 | 
				
			||||||
			else
 | 
					 | 
				
			||||||
				vram_pages += vram_pages_vma;
 | 
					 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		svm_range_lock(prange);
 | 
							svm_range_lock(prange);
 | 
				
			||||||
| 
						 | 
					@ -1722,19 +1728,6 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
 | 
				
			||||||
		addr = next;
 | 
							addr = next;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (addr == end) {
 | 
					 | 
				
			||||||
		prange->vram_pages = vram_pages;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		/* if prange does not include any vram page and it
 | 
					 | 
				
			||||||
		 * has not released svm_bo drop its svm_bo reference
 | 
					 | 
				
			||||||
		 * and set its actaul_loc to sys ram
 | 
					 | 
				
			||||||
		 */
 | 
					 | 
				
			||||||
		if (!vram_pages && prange->ttm_res) {
 | 
					 | 
				
			||||||
			prange->actual_loc = 0;
 | 
					 | 
				
			||||||
			svm_range_vram_node_free(prange);
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	svm_range_unreserve_bos(ctx);
 | 
						svm_range_unreserve_bos(ctx);
 | 
				
			||||||
	if (!r)
 | 
						if (!r)
 | 
				
			||||||
		prange->validate_timestamp = ktime_get_boottime();
 | 
							prange->validate_timestamp = ktime_get_boottime();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1342,10 +1342,11 @@ static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int g
 | 
				
			||||||
		num_cpu++;
 | 
							num_cpu++;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (list_empty(&kdev->io_link_props))
 | 
				
			||||||
 | 
							return -ENODATA;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	gpu_link = list_first_entry(&kdev->io_link_props,
 | 
						gpu_link = list_first_entry(&kdev->io_link_props,
 | 
				
			||||||
					struct kfd_iolink_properties, list);
 | 
									    struct kfd_iolink_properties, list);
 | 
				
			||||||
	if (!gpu_link)
 | 
					 | 
				
			||||||
		return -ENOMEM;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0; i < num_cpu; i++) {
 | 
						for (i = 0; i < num_cpu; i++) {
 | 
				
			||||||
		/* CPU <--> GPU */
 | 
							/* CPU <--> GPU */
 | 
				
			||||||
| 
						 | 
					@ -1423,15 +1424,17 @@ static int kfd_add_peer_prop(struct kfd_topology_device *kdev,
 | 
				
			||||||
				peer->gpu->adev))
 | 
									peer->gpu->adev))
 | 
				
			||||||
		return ret;
 | 
							return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (list_empty(&kdev->io_link_props))
 | 
				
			||||||
 | 
							return -ENODATA;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	iolink1 = list_first_entry(&kdev->io_link_props,
 | 
						iolink1 = list_first_entry(&kdev->io_link_props,
 | 
				
			||||||
							struct kfd_iolink_properties, list);
 | 
									   struct kfd_iolink_properties, list);
 | 
				
			||||||
	if (!iolink1)
 | 
					
 | 
				
			||||||
		return -ENOMEM;
 | 
						if (list_empty(&peer->io_link_props))
 | 
				
			||||||
 | 
							return -ENODATA;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	iolink2 = list_first_entry(&peer->io_link_props,
 | 
						iolink2 = list_first_entry(&peer->io_link_props,
 | 
				
			||||||
							struct kfd_iolink_properties, list);
 | 
									   struct kfd_iolink_properties, list);
 | 
				
			||||||
	if (!iolink2)
 | 
					 | 
				
			||||||
		return -ENOMEM;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	props = kfd_alloc_struct(props);
 | 
						props = kfd_alloc_struct(props);
 | 
				
			||||||
	if (!props)
 | 
						if (!props)
 | 
				
			||||||
| 
						 | 
					@ -1449,17 +1452,19 @@ static int kfd_add_peer_prop(struct kfd_topology_device *kdev,
 | 
				
			||||||
		/* CPU->CPU  link*/
 | 
							/* CPU->CPU  link*/
 | 
				
			||||||
		cpu_dev = kfd_topology_device_by_proximity_domain(iolink1->node_to);
 | 
							cpu_dev = kfd_topology_device_by_proximity_domain(iolink1->node_to);
 | 
				
			||||||
		if (cpu_dev) {
 | 
							if (cpu_dev) {
 | 
				
			||||||
			list_for_each_entry(iolink3, &cpu_dev->io_link_props, list)
 | 
								list_for_each_entry(iolink3, &cpu_dev->io_link_props, list) {
 | 
				
			||||||
				if (iolink3->node_to == iolink2->node_to)
 | 
									if (iolink3->node_to != iolink2->node_to)
 | 
				
			||||||
					break;
 | 
										continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			props->weight += iolink3->weight;
 | 
									props->weight += iolink3->weight;
 | 
				
			||||||
			props->min_latency += iolink3->min_latency;
 | 
									props->min_latency += iolink3->min_latency;
 | 
				
			||||||
			props->max_latency += iolink3->max_latency;
 | 
									props->max_latency += iolink3->max_latency;
 | 
				
			||||||
			props->min_bandwidth = min(props->min_bandwidth,
 | 
									props->min_bandwidth = min(props->min_bandwidth,
 | 
				
			||||||
							iolink3->min_bandwidth);
 | 
												   iolink3->min_bandwidth);
 | 
				
			||||||
			props->max_bandwidth = min(props->max_bandwidth,
 | 
									props->max_bandwidth = min(props->max_bandwidth,
 | 
				
			||||||
							iolink3->max_bandwidth);
 | 
												   iolink3->max_bandwidth);
 | 
				
			||||||
 | 
									break;
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			WARN(1, "CPU node not found");
 | 
								WARN(1, "CPU node not found");
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -37,6 +37,7 @@
 | 
				
			||||||
#include "dc/dc_dmub_srv.h"
 | 
					#include "dc/dc_dmub_srv.h"
 | 
				
			||||||
#include "dc/dc_edid_parser.h"
 | 
					#include "dc/dc_edid_parser.h"
 | 
				
			||||||
#include "dc/dc_stat.h"
 | 
					#include "dc/dc_stat.h"
 | 
				
			||||||
 | 
					#include "dc/dc_state.h"
 | 
				
			||||||
#include "amdgpu_dm_trace.h"
 | 
					#include "amdgpu_dm_trace.h"
 | 
				
			||||||
#include "dpcd_defs.h"
 | 
					#include "dpcd_defs.h"
 | 
				
			||||||
#include "link/protocols/link_dpcd.h"
 | 
					#include "link/protocols/link_dpcd.h"
 | 
				
			||||||
| 
						 | 
					@ -66,7 +67,6 @@
 | 
				
			||||||
#include "amdgpu_dm_debugfs.h"
 | 
					#include "amdgpu_dm_debugfs.h"
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
#include "amdgpu_dm_psr.h"
 | 
					#include "amdgpu_dm_psr.h"
 | 
				
			||||||
#include "amdgpu_dm_replay.h"
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include "ivsrcid/ivsrcid_vislands30.h"
 | 
					#include "ivsrcid/ivsrcid_vislands30.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1294,7 +1294,9 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
 | 
				
			||||||
	/* AGP aperture is disabled */
 | 
						/* AGP aperture is disabled */
 | 
				
			||||||
	if (agp_bot > agp_top) {
 | 
						if (agp_bot > agp_top) {
 | 
				
			||||||
		logical_addr_low = adev->gmc.fb_start >> 18;
 | 
							logical_addr_low = adev->gmc.fb_start >> 18;
 | 
				
			||||||
		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
 | 
							if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
 | 
				
			||||||
 | 
									       AMD_APU_IS_RENOIR |
 | 
				
			||||||
 | 
									       AMD_APU_IS_GREEN_SARDINE))
 | 
				
			||||||
			/*
 | 
								/*
 | 
				
			||||||
			 * Raven2 has a HW issue that it is unable to use the vram which
 | 
								 * Raven2 has a HW issue that it is unable to use the vram which
 | 
				
			||||||
			 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
 | 
								 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
 | 
				
			||||||
| 
						 | 
					@ -1306,7 +1308,9 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
 | 
				
			||||||
			logical_addr_high = adev->gmc.fb_end >> 18;
 | 
								logical_addr_high = adev->gmc.fb_end >> 18;
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
 | 
							logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
 | 
				
			||||||
		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
 | 
							if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
 | 
				
			||||||
 | 
									       AMD_APU_IS_RENOIR |
 | 
				
			||||||
 | 
									       AMD_APU_IS_GREEN_SARDINE))
 | 
				
			||||||
			/*
 | 
								/*
 | 
				
			||||||
			 * Raven2 has a HW issue that it is unable to use the vram which
 | 
								 * Raven2 has a HW issue that it is unable to use the vram which
 | 
				
			||||||
			 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
 | 
								 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
 | 
				
			||||||
| 
						 | 
					@ -1711,6 +1715,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 | 
				
			||||||
	init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
 | 
						init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
 | 
				
			||||||
	init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0];
 | 
						init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Enable DWB for tested platforms only */
 | 
						/* Enable DWB for tested platforms only */
 | 
				
			||||||
	if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
 | 
						if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
 | 
				
			||||||
		init_data.num_virtual_links = 1;
 | 
							init_data.num_virtual_links = 1;
 | 
				
			||||||
| 
						 | 
					@ -2607,12 +2613,10 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	memset(del_streams, 0, sizeof(del_streams));
 | 
						memset(del_streams, 0, sizeof(del_streams));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	context = dc_create_state(dc);
 | 
						context = dc_state_create_current_copy(dc);
 | 
				
			||||||
	if (context == NULL)
 | 
						if (context == NULL)
 | 
				
			||||||
		goto context_alloc_fail;
 | 
							goto context_alloc_fail;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dc_resource_state_copy_construct_current(dc, context);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* First remove from context all streams */
 | 
						/* First remove from context all streams */
 | 
				
			||||||
	for (i = 0; i < context->stream_count; i++) {
 | 
						for (i = 0; i < context->stream_count; i++) {
 | 
				
			||||||
		struct dc_stream_state *stream = context->streams[i];
 | 
							struct dc_stream_state *stream = context->streams[i];
 | 
				
			||||||
| 
						 | 
					@ -2622,12 +2626,12 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Remove all planes for removed streams and then remove the streams */
 | 
						/* Remove all planes for removed streams and then remove the streams */
 | 
				
			||||||
	for (i = 0; i < del_streams_count; i++) {
 | 
						for (i = 0; i < del_streams_count; i++) {
 | 
				
			||||||
		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
 | 
							if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) {
 | 
				
			||||||
			res = DC_FAIL_DETACH_SURFACES;
 | 
								res = DC_FAIL_DETACH_SURFACES;
 | 
				
			||||||
			goto fail;
 | 
								goto fail;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
 | 
							res = dc_state_remove_stream(dc, context, del_streams[i]);
 | 
				
			||||||
		if (res != DC_OK)
 | 
							if (res != DC_OK)
 | 
				
			||||||
			goto fail;
 | 
								goto fail;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -2635,7 +2639,7 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
 | 
				
			||||||
	res = dc_commit_streams(dc, context->streams, context->stream_count);
 | 
						res = dc_commit_streams(dc, context->streams, context->stream_count);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
fail:
 | 
					fail:
 | 
				
			||||||
	dc_release_state(context);
 | 
						dc_state_release(context);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
context_alloc_fail:
 | 
					context_alloc_fail:
 | 
				
			||||||
	return res;
 | 
						return res;
 | 
				
			||||||
| 
						 | 
					@ -2662,7 +2666,7 @@ static int dm_suspend(void *handle)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		dc_allow_idle_optimizations(adev->dm.dc, false);
 | 
							dc_allow_idle_optimizations(adev->dm.dc, false);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
 | 
							dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
 | 
							dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2856,7 +2860,7 @@ static int dm_resume(void *handle)
 | 
				
			||||||
	bool need_hotplug = false;
 | 
						bool need_hotplug = false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (dm->dc->caps.ips_support) {
 | 
						if (dm->dc->caps.ips_support) {
 | 
				
			||||||
		dc_dmub_srv_exit_low_power_state(dm->dc);
 | 
							dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (amdgpu_in_reset(adev)) {
 | 
						if (amdgpu_in_reset(adev)) {
 | 
				
			||||||
| 
						 | 
					@ -2909,7 +2913,7 @@ static int dm_resume(void *handle)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
 | 
							dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		dc_release_state(dm->cached_dc_state);
 | 
							dc_state_release(dm->cached_dc_state);
 | 
				
			||||||
		dm->cached_dc_state = NULL;
 | 
							dm->cached_dc_state = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		amdgpu_dm_irq_resume_late(adev);
 | 
							amdgpu_dm_irq_resume_late(adev);
 | 
				
			||||||
| 
						 | 
					@ -2919,10 +2923,9 @@ static int dm_resume(void *handle)
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
 | 
						/* Recreate dc_state - DC invalidates it when setting power state to S3. */
 | 
				
			||||||
	dc_release_state(dm_state->context);
 | 
						dc_state_release(dm_state->context);
 | 
				
			||||||
	dm_state->context = dc_create_state(dm->dc);
 | 
						dm_state->context = dc_state_create(dm->dc);
 | 
				
			||||||
	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
 | 
						/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
 | 
				
			||||||
	dc_resource_state_construct(dm->dc, dm_state->context);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Before powering on DC we need to re-initialize DMUB. */
 | 
						/* Before powering on DC we need to re-initialize DMUB. */
 | 
				
			||||||
	dm_dmub_hw_resume(adev);
 | 
						dm_dmub_hw_resume(adev);
 | 
				
			||||||
| 
						 | 
					@ -3998,7 +4001,7 @@ dm_atomic_duplicate_state(struct drm_private_obj *obj)
 | 
				
			||||||
	old_state = to_dm_atomic_state(obj->state);
 | 
						old_state = to_dm_atomic_state(obj->state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (old_state && old_state->context)
 | 
						if (old_state && old_state->context)
 | 
				
			||||||
		new_state->context = dc_copy_state(old_state->context);
 | 
							new_state->context = dc_state_create_copy(old_state->context);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!new_state->context) {
 | 
						if (!new_state->context) {
 | 
				
			||||||
		kfree(new_state);
 | 
							kfree(new_state);
 | 
				
			||||||
| 
						 | 
					@ -4014,7 +4017,7 @@ static void dm_atomic_destroy_state(struct drm_private_obj *obj,
 | 
				
			||||||
	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
 | 
						struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (dm_state && dm_state->context)
 | 
						if (dm_state && dm_state->context)
 | 
				
			||||||
		dc_release_state(dm_state->context);
 | 
							dc_state_release(dm_state->context);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	kfree(dm_state);
 | 
						kfree(dm_state);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -4050,14 +4053,12 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
 | 
				
			||||||
	if (!state)
 | 
						if (!state)
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	state->context = dc_create_state(adev->dm.dc);
 | 
						state->context = dc_state_create_current_copy(adev->dm.dc);
 | 
				
			||||||
	if (!state->context) {
 | 
						if (!state->context) {
 | 
				
			||||||
		kfree(state);
 | 
							kfree(state);
 | 
				
			||||||
		return -ENOMEM;
 | 
							return -ENOMEM;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	drm_atomic_private_obj_init(adev_to_drm(adev),
 | 
						drm_atomic_private_obj_init(adev_to_drm(adev),
 | 
				
			||||||
				    &adev->dm.atomic_obj,
 | 
									    &adev->dm.atomic_obj,
 | 
				
			||||||
				    &state->base,
 | 
									    &state->base,
 | 
				
			||||||
| 
						 | 
					@ -4065,7 +4066,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = amdgpu_display_modeset_create_props(adev);
 | 
						r = amdgpu_display_modeset_create_props(adev);
 | 
				
			||||||
	if (r) {
 | 
						if (r) {
 | 
				
			||||||
		dc_release_state(state->context);
 | 
							dc_state_release(state->context);
 | 
				
			||||||
		kfree(state);
 | 
							kfree(state);
 | 
				
			||||||
		return r;
 | 
							return r;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -4077,7 +4078,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	r = amdgpu_dm_audio_init(adev);
 | 
						r = amdgpu_dm_audio_init(adev);
 | 
				
			||||||
	if (r) {
 | 
						if (r) {
 | 
				
			||||||
		dc_release_state(state->context);
 | 
							dc_state_release(state->context);
 | 
				
			||||||
		kfree(state);
 | 
							kfree(state);
 | 
				
			||||||
		return r;
 | 
							return r;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -4391,7 +4392,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 | 
				
			||||||
	enum dc_connection_type new_connection_type = dc_connection_none;
 | 
						enum dc_connection_type new_connection_type = dc_connection_none;
 | 
				
			||||||
	const struct dc_plane_cap *plane;
 | 
						const struct dc_plane_cap *plane;
 | 
				
			||||||
	bool psr_feature_enabled = false;
 | 
						bool psr_feature_enabled = false;
 | 
				
			||||||
	bool replay_feature_enabled = false;
 | 
					 | 
				
			||||||
	int max_overlay = dm->dc->caps.max_slave_planes;
 | 
						int max_overlay = dm->dc->caps.max_slave_planes;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm->display_indexes_num = dm->dc->caps.max_streams;
 | 
						dm->display_indexes_num = dm->dc->caps.max_streams;
 | 
				
			||||||
| 
						 | 
					@ -4503,20 +4503,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
 | 
					 | 
				
			||||||
		switch (adev->ip_versions[DCE_HWIP][0]) {
 | 
					 | 
				
			||||||
		case IP_VERSION(3, 1, 4):
 | 
					 | 
				
			||||||
		case IP_VERSION(3, 1, 5):
 | 
					 | 
				
			||||||
		case IP_VERSION(3, 1, 6):
 | 
					 | 
				
			||||||
		case IP_VERSION(3, 2, 0):
 | 
					 | 
				
			||||||
		case IP_VERSION(3, 2, 1):
 | 
					 | 
				
			||||||
			replay_feature_enabled = true;
 | 
					 | 
				
			||||||
			break;
 | 
					 | 
				
			||||||
		default:
 | 
					 | 
				
			||||||
			replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
 | 
					 | 
				
			||||||
			break;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	/* loops over all connectors on the board */
 | 
						/* loops over all connectors on the board */
 | 
				
			||||||
	for (i = 0; i < link_cnt; i++) {
 | 
						for (i = 0; i < link_cnt; i++) {
 | 
				
			||||||
		struct dc_link *link = NULL;
 | 
							struct dc_link *link = NULL;
 | 
				
			||||||
| 
						 | 
					@ -4585,12 +4571,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 | 
				
			||||||
				amdgpu_dm_update_connector_after_detect(aconnector);
 | 
									amdgpu_dm_update_connector_after_detect(aconnector);
 | 
				
			||||||
				setup_backlight_device(dm, aconnector);
 | 
									setup_backlight_device(dm, aconnector);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				/*
 | 
					 | 
				
			||||||
				 * Disable psr if replay can be enabled
 | 
					 | 
				
			||||||
				 */
 | 
					 | 
				
			||||||
				if (replay_feature_enabled && amdgpu_dm_setup_replay(link, aconnector))
 | 
					 | 
				
			||||||
					psr_feature_enabled = false;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
				if (psr_feature_enabled)
 | 
									if (psr_feature_enabled)
 | 
				
			||||||
					amdgpu_dm_set_psr_caps(link);
 | 
										amdgpu_dm_set_psr_caps(link);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -6260,8 +6240,9 @@ create_stream_for_sink(struct drm_connector *connector,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
 | 
						if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
 | 
				
			||||||
		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
 | 
							mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
 | 
				
			||||||
 | 
						else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
 | 
				
			||||||
	if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
 | 
								 stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
 | 
				
			||||||
 | 
								 stream->signal == SIGNAL_TYPE_EDP) {
 | 
				
			||||||
		//
 | 
							//
 | 
				
			||||||
		// should decide stream support vsc sdp colorimetry capability
 | 
							// should decide stream support vsc sdp colorimetry capability
 | 
				
			||||||
		// before building vsc info packet
 | 
							// before building vsc info packet
 | 
				
			||||||
| 
						 | 
					@ -6277,8 +6258,9 @@ create_stream_for_sink(struct drm_connector *connector,
 | 
				
			||||||
		if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
 | 
							if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
 | 
				
			||||||
			tf = TRANSFER_FUNC_GAMMA_22;
 | 
								tf = TRANSFER_FUNC_GAMMA_22;
 | 
				
			||||||
		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
 | 
							mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
 | 
				
			||||||
		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (stream->link->psr_settings.psr_feature_enabled)
 | 
				
			||||||
 | 
								aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
finish:
 | 
					finish:
 | 
				
			||||||
	dc_sink_release(sink);
 | 
						dc_sink_release(sink);
 | 
				
			||||||
| 
						 | 
					@ -6658,7 +6640,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
 | 
				
			||||||
	if (!dc_plane_state)
 | 
						if (!dc_plane_state)
 | 
				
			||||||
		goto cleanup;
 | 
							goto cleanup;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dc_state = dc_create_state(dc);
 | 
						dc_state = dc_state_create(dc);
 | 
				
			||||||
	if (!dc_state)
 | 
						if (!dc_state)
 | 
				
			||||||
		goto cleanup;
 | 
							goto cleanup;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -6685,9 +6667,9 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
 | 
				
			||||||
		dc_result = dc_validate_plane(dc, dc_plane_state);
 | 
							dc_result = dc_validate_plane(dc, dc_plane_state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (dc_result == DC_OK)
 | 
						if (dc_result == DC_OK)
 | 
				
			||||||
		dc_result = dc_add_stream_to_ctx(dc, dc_state, stream);
 | 
							dc_result = dc_state_add_stream(dc, dc_state, stream);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (dc_result == DC_OK && !dc_add_plane_to_context(
 | 
						if (dc_result == DC_OK && !dc_state_add_plane(
 | 
				
			||||||
						dc,
 | 
											dc,
 | 
				
			||||||
						stream,
 | 
											stream,
 | 
				
			||||||
						dc_plane_state,
 | 
											dc_plane_state,
 | 
				
			||||||
| 
						 | 
					@ -6699,7 +6681,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
cleanup:
 | 
					cleanup:
 | 
				
			||||||
	if (dc_state)
 | 
						if (dc_state)
 | 
				
			||||||
		dc_release_state(dc_state);
 | 
							dc_state_release(dc_state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (dc_plane_state)
 | 
						if (dc_plane_state)
 | 
				
			||||||
		dc_plane_state_release(dc_plane_state);
 | 
							dc_plane_state_release(dc_plane_state);
 | 
				
			||||||
| 
						 | 
					@ -7007,8 +6989,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
 | 
				
			||||||
	if (IS_ERR(mst_state))
 | 
						if (IS_ERR(mst_state))
 | 
				
			||||||
		return PTR_ERR(mst_state);
 | 
							return PTR_ERR(mst_state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!mst_state->pbn_div.full)
 | 
						mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
 | 
				
			||||||
		mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!state->duplicated) {
 | 
						if (!state->duplicated) {
 | 
				
			||||||
		int max_bpc = conn_state->max_requested_bpc;
 | 
							int max_bpc = conn_state->max_requested_bpc;
 | 
				
			||||||
| 
						 | 
					@ -8858,7 +8839,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
 | 
				
			||||||
					dc_stream_get_status(dm_new_crtc_state->stream);
 | 
										dc_stream_get_status(dm_new_crtc_state->stream);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (!status)
 | 
								if (!status)
 | 
				
			||||||
				status = dc_stream_get_status_from_state(dc_state,
 | 
									status = dc_state_get_stream_status(dc_state,
 | 
				
			||||||
									 dm_new_crtc_state->stream);
 | 
														 dm_new_crtc_state->stream);
 | 
				
			||||||
			if (!status)
 | 
								if (!status)
 | 
				
			||||||
				drm_err(dev,
 | 
									drm_err(dev,
 | 
				
			||||||
| 
						 | 
					@ -9001,7 +8982,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 | 
				
			||||||
			if (new_con_state->crtc &&
 | 
								if (new_con_state->crtc &&
 | 
				
			||||||
				new_con_state->crtc->state->active &&
 | 
									new_con_state->crtc->state->active &&
 | 
				
			||||||
				drm_atomic_crtc_needs_modeset(new_con_state->crtc->state)) {
 | 
									drm_atomic_crtc_needs_modeset(new_con_state->crtc->state)) {
 | 
				
			||||||
				dc_dmub_srv_exit_low_power_state(dm->dc);
 | 
									dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
 | 
				
			||||||
				break;
 | 
									break;
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					@ -9783,7 +9764,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
 | 
				
			||||||
				crtc->base.id);
 | 
									crtc->base.id);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* i.e. reset mode */
 | 
							/* i.e. reset mode */
 | 
				
			||||||
		if (dc_remove_stream_from_ctx(
 | 
							if (dc_state_remove_stream(
 | 
				
			||||||
				dm->dc,
 | 
									dm->dc,
 | 
				
			||||||
				dm_state->context,
 | 
									dm_state->context,
 | 
				
			||||||
				dm_old_crtc_state->stream) != DC_OK) {
 | 
									dm_old_crtc_state->stream) != DC_OK) {
 | 
				
			||||||
| 
						 | 
					@ -9826,7 +9807,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
 | 
				
			||||||
			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
 | 
								DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
 | 
				
			||||||
					 crtc->base.id);
 | 
										 crtc->base.id);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (dc_add_stream_to_ctx(
 | 
								if (dc_state_add_stream(
 | 
				
			||||||
					dm->dc,
 | 
										dm->dc,
 | 
				
			||||||
					dm_state->context,
 | 
										dm_state->context,
 | 
				
			||||||
					dm_new_crtc_state->stream) != DC_OK) {
 | 
										dm_new_crtc_state->stream) != DC_OK) {
 | 
				
			||||||
| 
						 | 
					@ -10148,7 +10129,7 @@ static int dm_update_plane_state(struct dc *dc,
 | 
				
			||||||
		if (ret)
 | 
							if (ret)
 | 
				
			||||||
			return ret;
 | 
								return ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!dc_remove_plane_from_context(
 | 
							if (!dc_state_remove_plane(
 | 
				
			||||||
				dc,
 | 
									dc,
 | 
				
			||||||
				dm_old_crtc_state->stream,
 | 
									dm_old_crtc_state->stream,
 | 
				
			||||||
				dm_old_plane_state->dc_state,
 | 
									dm_old_plane_state->dc_state,
 | 
				
			||||||
| 
						 | 
					@ -10226,7 +10207,7 @@ static int dm_update_plane_state(struct dc *dc,
 | 
				
			||||||
		 * state. It'll be released when the atomic state is
 | 
							 * state. It'll be released when the atomic state is
 | 
				
			||||||
		 * cleaned.
 | 
							 * cleaned.
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (!dc_add_plane_to_context(
 | 
							if (!dc_state_add_plane(
 | 
				
			||||||
				dc,
 | 
									dc,
 | 
				
			||||||
				dm_new_crtc_state->stream,
 | 
									dm_new_crtc_state->stream,
 | 
				
			||||||
				dc_new_plane_state,
 | 
									dc_new_plane_state,
 | 
				
			||||||
| 
						 | 
					@ -10772,7 +10753,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 | 
				
			||||||
			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
 | 
								DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
 | 
				
			||||||
			goto fail;
 | 
								goto fail;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		status = dc_validate_global_state(dc, dm_state->context, true);
 | 
							status = dc_validate_global_state(dc, dm_state->context, false);
 | 
				
			||||||
		if (status != DC_OK) {
 | 
							if (status != DC_OK) {
 | 
				
			||||||
			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
 | 
								DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
 | 
				
			||||||
				       dc_status_to_str(status), status);
 | 
									       dc_status_to_str(status), status);
 | 
				
			||||||
| 
						 | 
					@ -10910,7 +10891,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
 | 
				
			||||||
	input->cea_total_length = total_length;
 | 
						input->cea_total_length = total_length;
 | 
				
			||||||
	memcpy(input->payload, data, length);
 | 
						memcpy(input->payload, data, length);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
 | 
						res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
 | 
				
			||||||
	if (!res) {
 | 
						if (!res) {
 | 
				
			||||||
		DRM_ERROR("EDID CEA parser failed\n");
 | 
							DRM_ERROR("EDID CEA parser failed\n");
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -747,7 +747,7 @@ enum amdgpu_transfer_function {
 | 
				
			||||||
	AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF,
 | 
						AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF,
 | 
				
			||||||
	AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF,
 | 
						AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF,
 | 
				
			||||||
	AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF,
 | 
						AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF,
 | 
				
			||||||
        AMDGPU_TRANSFER_FUNCTION_COUNT
 | 
						AMDGPU_TRANSFER_FUNCTION_COUNT
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct dm_plane_state {
 | 
					struct dm_plane_state {
 | 
				
			||||||
| 
						 | 
					@ -844,7 +844,7 @@ struct dm_crtc_state {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	int abm_level;
 | 
						int abm_level;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        /**
 | 
						/**
 | 
				
			||||||
	 * @regamma_tf:
 | 
						 * @regamma_tf:
 | 
				
			||||||
	 *
 | 
						 *
 | 
				
			||||||
	 * Pre-defined transfer function for converting internal FB -> wire
 | 
						 * Pre-defined transfer function for converting internal FB -> wire
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -85,6 +85,18 @@ void amdgpu_dm_init_color_mod(void)
 | 
				
			||||||
	setup_x_points_distribution();
 | 
						setup_x_points_distribution();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline struct fixed31_32 amdgpu_dm_fixpt_from_s3132(__u64 x)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct fixed31_32 val;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* If negative, convert to 2's complement. */
 | 
				
			||||||
 | 
						if (x & (1ULL << 63))
 | 
				
			||||||
 | 
							x = -(x & ~(1ULL << 63));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						val.value = x;
 | 
				
			||||||
 | 
						return val;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef AMD_PRIVATE_COLOR
 | 
					#ifdef AMD_PRIVATE_COLOR
 | 
				
			||||||
/* Pre-defined Transfer Functions (TF)
 | 
					/* Pre-defined Transfer Functions (TF)
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
| 
						 | 
					@ -430,7 +442,7 @@ static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */
 | 
							/* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */
 | 
				
			||||||
		matrix[i] = dc_fixpt_from_s3132(ctm->matrix[i - (i / 4)]);
 | 
							matrix[i] = amdgpu_dm_fixpt_from_s3132(ctm->matrix[i - (i / 4)]);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -452,7 +464,7 @@ static void __drm_ctm_3x4_to_dc_matrix(const struct drm_color_ctm_3x4 *ctm,
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	for (i = 0; i < 12; i++) {
 | 
						for (i = 0; i < 12; i++) {
 | 
				
			||||||
		/* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */
 | 
							/* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */
 | 
				
			||||||
		matrix[i] = dc_fixpt_from_s3132(ctm->matrix[i]);
 | 
							matrix[i] = amdgpu_dm_fixpt_from_s3132(ctm->matrix[i]);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -630,8 +642,7 @@ static int __set_input_tf(struct dc_color_caps *caps, struct dc_transfer_func *f
 | 
				
			||||||
static enum dc_transfer_func_predefined
 | 
					static enum dc_transfer_func_predefined
 | 
				
			||||||
amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf)
 | 
					amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	switch (tf)
 | 
						switch (tf) {
 | 
				
			||||||
	{
 | 
					 | 
				
			||||||
	default:
 | 
						default:
 | 
				
			||||||
	case AMDGPU_TRANSFER_FUNCTION_DEFAULT:
 | 
						case AMDGPU_TRANSFER_FUNCTION_DEFAULT:
 | 
				
			||||||
	case AMDGPU_TRANSFER_FUNCTION_IDENTITY:
 | 
						case AMDGPU_TRANSFER_FUNCTION_IDENTITY:
 | 
				
			||||||
| 
						 | 
					@ -1137,7 +1148,7 @@ amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
 | 
				
			||||||
	uint32_t shaper_size, lut3d_size, blend_size;
 | 
						uint32_t shaper_size, lut3d_size, blend_size;
 | 
				
			||||||
	int ret;
 | 
						int ret;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dc_plane_state->hdr_mult = dc_fixpt_from_s3132(dm_plane_state->hdr_mult);
 | 
						dc_plane_state->hdr_mult = amdgpu_dm_fixpt_from_s3132(dm_plane_state->hdr_mult);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	shaper_lut = __extract_blob_lut(dm_plane_state->shaper_lut, &shaper_size);
 | 
						shaper_lut = __extract_blob_lut(dm_plane_state->shaper_lut, &shaper_size);
 | 
				
			||||||
	shaper_size = shaper_lut != NULL ? shaper_size : 0;
 | 
						shaper_size = shaper_lut != NULL ? shaper_size : 0;
 | 
				
			||||||
| 
						 | 
					@ -1225,7 +1236,7 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
 | 
				
			||||||
	 * plane and CRTC degamma at the same time. Explicitly reject atomic
 | 
						 * plane and CRTC degamma at the same time. Explicitly reject atomic
 | 
				
			||||||
	 * updates when userspace sets both plane and CRTC degamma properties.
 | 
						 * updates when userspace sets both plane and CRTC degamma properties.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (has_crtc_cm_degamma && ret != -EINVAL){
 | 
						if (has_crtc_cm_degamma && ret != -EINVAL) {
 | 
				
			||||||
		drm_dbg_kms(crtc->base.crtc->dev,
 | 
							drm_dbg_kms(crtc->base.crtc->dev,
 | 
				
			||||||
			    "doesn't support plane and CRTC degamma at the same time\n");
 | 
								    "doesn't support plane and CRTC degamma at the same time\n");
 | 
				
			||||||
			return -EINVAL;
 | 
								return -EINVAL;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -29,7 +29,6 @@
 | 
				
			||||||
#include "dc.h"
 | 
					#include "dc.h"
 | 
				
			||||||
#include "amdgpu.h"
 | 
					#include "amdgpu.h"
 | 
				
			||||||
#include "amdgpu_dm_psr.h"
 | 
					#include "amdgpu_dm_psr.h"
 | 
				
			||||||
#include "amdgpu_dm_replay.h"
 | 
					 | 
				
			||||||
#include "amdgpu_dm_crtc.h"
 | 
					#include "amdgpu_dm_crtc.h"
 | 
				
			||||||
#include "amdgpu_dm_plane.h"
 | 
					#include "amdgpu_dm_plane.h"
 | 
				
			||||||
#include "amdgpu_dm_trace.h"
 | 
					#include "amdgpu_dm_trace.h"
 | 
				
			||||||
| 
						 | 
					@ -124,12 +123,7 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
 | 
				
			||||||
	 * fill_dc_dirty_rects().
 | 
						 * fill_dc_dirty_rects().
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (vblank_work->stream && vblank_work->stream->link) {
 | 
						if (vblank_work->stream && vblank_work->stream->link) {
 | 
				
			||||||
		/*
 | 
							if (vblank_work->enable) {
 | 
				
			||||||
		 * Prioritize replay, instead of psr
 | 
					 | 
				
			||||||
		 */
 | 
					 | 
				
			||||||
		if (vblank_work->stream->link->replay_settings.replay_feature_enabled)
 | 
					 | 
				
			||||||
			amdgpu_dm_replay_enable(vblank_work->stream, false);
 | 
					 | 
				
			||||||
		else if (vblank_work->enable) {
 | 
					 | 
				
			||||||
			if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
 | 
								if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
 | 
				
			||||||
			    vblank_work->stream->link->psr_settings.psr_allow_active)
 | 
								    vblank_work->stream->link->psr_settings.psr_allow_active)
 | 
				
			||||||
				amdgpu_dm_psr_disable(vblank_work->stream);
 | 
									amdgpu_dm_psr_disable(vblank_work->stream);
 | 
				
			||||||
| 
						 | 
					@ -138,7 +132,6 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
 | 
				
			||||||
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
 | 
					#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
 | 
				
			||||||
			   !amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) &&
 | 
								   !amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) &&
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
			   vblank_work->stream->link->panel_config.psr.disallow_replay &&
 | 
					 | 
				
			||||||
			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
 | 
								   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
 | 
				
			||||||
			amdgpu_dm_psr_enable(vblank_work->stream);
 | 
								amdgpu_dm_psr_enable(vblank_work->stream);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					@ -312,7 +305,7 @@ dm_crtc_additional_color_mgmt(struct drm_crtc *crtc)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
 | 
						struct amdgpu_device *adev = drm_to_adev(crtc->dev);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if(adev->dm.dc->caps.color.mpc.ogam_ram)
 | 
						if (adev->dm.dc->caps.color.mpc.ogam_ram)
 | 
				
			||||||
		drm_object_attach_property(&crtc->base,
 | 
							drm_object_attach_property(&crtc->base,
 | 
				
			||||||
					   adev->mode_info.regamma_tf_property,
 | 
										   adev->mode_info.regamma_tf_property,
 | 
				
			||||||
					   AMDGPU_TRANSFER_FUNCTION_DEFAULT);
 | 
										   AMDGPU_TRANSFER_FUNCTION_DEFAULT);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2976,7 +2976,6 @@ static int dmub_trace_mask_set(void *data, u64 val)
 | 
				
			||||||
	struct amdgpu_device *adev = data;
 | 
						struct amdgpu_device *adev = data;
 | 
				
			||||||
	struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub;
 | 
						struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub;
 | 
				
			||||||
	enum dmub_gpint_command cmd;
 | 
						enum dmub_gpint_command cmd;
 | 
				
			||||||
	enum dmub_status status;
 | 
					 | 
				
			||||||
	u64 mask = 0xffff;
 | 
						u64 mask = 0xffff;
 | 
				
			||||||
	u8 shift = 0;
 | 
						u8 shift = 0;
 | 
				
			||||||
	u32 res;
 | 
						u32 res;
 | 
				
			||||||
| 
						 | 
					@ -3003,13 +3002,7 @@ static int dmub_trace_mask_set(void *data, u64 val)
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		status = dmub_srv_send_gpint_command(srv, cmd, res, 30);
 | 
							if (!dc_wake_and_execute_gpint(adev->dm.dc->ctx, cmd, res, NULL, DM_DMUB_WAIT_TYPE_WAIT))
 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (status == DMUB_STATUS_TIMEOUT)
 | 
					 | 
				
			||||||
			return -ETIMEDOUT;
 | 
					 | 
				
			||||||
		else if (status == DMUB_STATUS_INVALID)
 | 
					 | 
				
			||||||
			return -EINVAL;
 | 
					 | 
				
			||||||
		else if (status != DMUB_STATUS_OK)
 | 
					 | 
				
			||||||
			return -EIO;
 | 
								return -EIO;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		usleep_range(100, 1000);
 | 
							usleep_range(100, 1000);
 | 
				
			||||||
| 
						 | 
					@ -3026,7 +3019,6 @@ static int dmub_trace_mask_show(void *data, u64 *val)
 | 
				
			||||||
	enum dmub_gpint_command cmd = DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD0;
 | 
						enum dmub_gpint_command cmd = DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD0;
 | 
				
			||||||
	struct amdgpu_device *adev = data;
 | 
						struct amdgpu_device *adev = data;
 | 
				
			||||||
	struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub;
 | 
						struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub;
 | 
				
			||||||
	enum dmub_status status;
 | 
					 | 
				
			||||||
	u8 shift = 0;
 | 
						u8 shift = 0;
 | 
				
			||||||
	u64 raw = 0;
 | 
						u64 raw = 0;
 | 
				
			||||||
	u64 res = 0;
 | 
						u64 res = 0;
 | 
				
			||||||
| 
						 | 
					@ -3036,23 +3028,12 @@ static int dmub_trace_mask_show(void *data, u64 *val)
 | 
				
			||||||
		return -EINVAL;
 | 
							return -EINVAL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	while (i < 4) {
 | 
						while (i < 4) {
 | 
				
			||||||
		status = dmub_srv_send_gpint_command(srv, cmd, 0, 30);
 | 
							uint32_t response;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (status == DMUB_STATUS_OK) {
 | 
							if (!dc_wake_and_execute_gpint(adev->dm.dc->ctx, cmd, 0, &response, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
 | 
				
			||||||
			status = dmub_srv_get_gpint_response(srv, (u32 *) &raw);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			if (status == DMUB_STATUS_INVALID)
 | 
					 | 
				
			||||||
				return -EINVAL;
 | 
					 | 
				
			||||||
			else if (status != DMUB_STATUS_OK)
 | 
					 | 
				
			||||||
				return -EIO;
 | 
					 | 
				
			||||||
		} else if (status == DMUB_STATUS_TIMEOUT) {
 | 
					 | 
				
			||||||
			return -ETIMEDOUT;
 | 
					 | 
				
			||||||
		} else if (status == DMUB_STATUS_INVALID) {
 | 
					 | 
				
			||||||
			return -EINVAL;
 | 
					 | 
				
			||||||
		} else {
 | 
					 | 
				
			||||||
			return -EIO;
 | 
								return -EIO;
 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							raw = response;
 | 
				
			||||||
		usleep_range(100, 1000);
 | 
							usleep_range(100, 1000);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		cmd++;
 | 
							cmd++;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -51,6 +51,9 @@ static bool link_supports_psrsu(struct dc_link *link)
 | 
				
			||||||
	    !link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)
 | 
						    !link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (amdgpu_dc_debug_mask & DC_DISABLE_PSR_SU)
 | 
				
			||||||
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
 | 
						return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -34,8 +34,6 @@ DC_LIBS += dcn21
 | 
				
			||||||
DC_LIBS += dcn201
 | 
					DC_LIBS += dcn201
 | 
				
			||||||
DC_LIBS += dcn30
 | 
					DC_LIBS += dcn30
 | 
				
			||||||
DC_LIBS += dcn301
 | 
					DC_LIBS += dcn301
 | 
				
			||||||
DC_LIBS += dcn302
 | 
					 | 
				
			||||||
DC_LIBS += dcn303
 | 
					 | 
				
			||||||
DC_LIBS += dcn31
 | 
					DC_LIBS += dcn31
 | 
				
			||||||
DC_LIBS += dcn314
 | 
					DC_LIBS += dcn314
 | 
				
			||||||
DC_LIBS += dcn32
 | 
					DC_LIBS += dcn32
 | 
				
			||||||
| 
						 | 
					@ -62,7 +60,7 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI
 | 
				
			||||||
include $(AMD_DC)
 | 
					include $(AMD_DC)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DISPLAY_CORE = dc.o dc_stat.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
 | 
					DISPLAY_CORE = dc.o dc_stat.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
 | 
				
			||||||
dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o
 | 
					dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o dc_state.o
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DISPLAY_CORE += dc_vm_helper.o
 | 
					DISPLAY_CORE += dc_vm_helper.o
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -103,7 +103,8 @@ void convert_float_matrix(
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static uint32_t find_gcd(uint32_t a, uint32_t b)
 | 
					static uint32_t find_gcd(uint32_t a, uint32_t b)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	uint32_t remainder = 0;
 | 
						uint32_t remainder;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	while (b != 0) {
 | 
						while (b != 0) {
 | 
				
			||||||
		remainder = a % b;
 | 
							remainder = a % b;
 | 
				
			||||||
		a = b;
 | 
							a = b;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1014,13 +1014,20 @@ static enum bp_result get_ss_info_v4_5(
 | 
				
			||||||
		DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
 | 
							DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	case AS_SIGNAL_TYPE_DISPLAY_PORT:
 | 
						case AS_SIGNAL_TYPE_DISPLAY_PORT:
 | 
				
			||||||
		ss_info->spread_spectrum_percentage =
 | 
							if (bp->base.integrated_info) {
 | 
				
			||||||
 | 
								DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", bp->base.integrated_info->gpuclk_ss_percentage);
 | 
				
			||||||
 | 
								ss_info->spread_spectrum_percentage =
 | 
				
			||||||
 | 
										bp->base.integrated_info->gpuclk_ss_percentage;
 | 
				
			||||||
 | 
								ss_info->type.CENTER_MODE =
 | 
				
			||||||
 | 
										bp->base.integrated_info->gpuclk_ss_type;
 | 
				
			||||||
 | 
							} else {
 | 
				
			||||||
 | 
								ss_info->spread_spectrum_percentage =
 | 
				
			||||||
				disp_cntl_tbl->dp_ss_percentage;
 | 
									disp_cntl_tbl->dp_ss_percentage;
 | 
				
			||||||
		ss_info->spread_spectrum_range =
 | 
								ss_info->spread_spectrum_range =
 | 
				
			||||||
				disp_cntl_tbl->dp_ss_rate_10hz * 10;
 | 
									disp_cntl_tbl->dp_ss_rate_10hz * 10;
 | 
				
			||||||
		if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
 | 
								if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
 | 
				
			||||||
			ss_info->type.CENTER_MODE = true;
 | 
									ss_info->type.CENTER_MODE = true;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
		DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
 | 
							DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	case AS_SIGNAL_TYPE_GPU_PLL:
 | 
						case AS_SIGNAL_TYPE_GPU_PLL:
 | 
				
			||||||
| 
						 | 
					@ -2813,6 +2820,8 @@ static enum bp_result get_integrated_info_v2_2(
 | 
				
			||||||
	info->ma_channel_number = info_v2_2->umachannelnumber;
 | 
						info->ma_channel_number = info_v2_2->umachannelnumber;
 | 
				
			||||||
	info->dp_ss_control =
 | 
						info->dp_ss_control =
 | 
				
			||||||
		le16_to_cpu(info_v2_2->reserved1);
 | 
							le16_to_cpu(info_v2_2->reserved1);
 | 
				
			||||||
 | 
						info->gpuclk_ss_percentage = info_v2_2->gpuclk_ss_percentage;
 | 
				
			||||||
 | 
						info->gpuclk_ss_type = info_v2_2->gpuclk_ss_type;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
 | 
						for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
 | 
				
			||||||
		info->ext_disp_conn_info.gu_id[i] =
 | 
							info->ext_disp_conn_info.gu_id[i] =
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -123,7 +123,7 @@ static void encoder_control_dmcub(
 | 
				
			||||||
		sizeof(cmd.digx_encoder_control.header);
 | 
							sizeof(cmd.digx_encoder_control.header);
 | 
				
			||||||
	cmd.digx_encoder_control.encoder_control.dig.stream_param = *dig;
 | 
						cmd.digx_encoder_control.encoder_control.dig.stream_param = *dig;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static enum bp_result encoder_control_digx_v1_5(
 | 
					static enum bp_result encoder_control_digx_v1_5(
 | 
				
			||||||
| 
						 | 
					@ -259,7 +259,7 @@ static void transmitter_control_dmcub(
 | 
				
			||||||
		sizeof(cmd.dig1_transmitter_control.header);
 | 
							sizeof(cmd.dig1_transmitter_control.header);
 | 
				
			||||||
	cmd.dig1_transmitter_control.transmitter_control.dig = *dig;
 | 
						cmd.dig1_transmitter_control.transmitter_control.dig = *dig;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static enum bp_result transmitter_control_v1_6(
 | 
					static enum bp_result transmitter_control_v1_6(
 | 
				
			||||||
| 
						 | 
					@ -321,7 +321,7 @@ static void transmitter_control_dmcub_v1_7(
 | 
				
			||||||
		sizeof(cmd.dig1_transmitter_control.header);
 | 
							sizeof(cmd.dig1_transmitter_control.header);
 | 
				
			||||||
	cmd.dig1_transmitter_control.transmitter_control.dig_v1_7 = *dig;
 | 
						cmd.dig1_transmitter_control.transmitter_control.dig_v1_7 = *dig;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static enum bp_result transmitter_control_v1_7(
 | 
					static enum bp_result transmitter_control_v1_7(
 | 
				
			||||||
| 
						 | 
					@ -429,7 +429,7 @@ static void set_pixel_clock_dmcub(
 | 
				
			||||||
		sizeof(cmd.set_pixel_clock.header);
 | 
							sizeof(cmd.set_pixel_clock.header);
 | 
				
			||||||
	cmd.set_pixel_clock.pixel_clock.clk = *clk;
 | 
						cmd.set_pixel_clock.pixel_clock.clk = *clk;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static enum bp_result set_pixel_clock_v7(
 | 
					static enum bp_result set_pixel_clock_v7(
 | 
				
			||||||
| 
						 | 
					@ -796,7 +796,7 @@ static void enable_disp_power_gating_dmcub(
 | 
				
			||||||
		sizeof(cmd.enable_disp_power_gating.header);
 | 
							sizeof(cmd.enable_disp_power_gating.header);
 | 
				
			||||||
	cmd.enable_disp_power_gating.power_gating.pwr = *pwr;
 | 
						cmd.enable_disp_power_gating.power_gating.pwr = *pwr;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static enum bp_result enable_disp_power_gating_v2_1(
 | 
					static enum bp_result enable_disp_power_gating_v2_1(
 | 
				
			||||||
| 
						 | 
					@ -1006,7 +1006,7 @@ static void enable_lvtma_control_dmcub(
 | 
				
			||||||
			pwrseq_instance;
 | 
								pwrseq_instance;
 | 
				
			||||||
	cmd.lvtma_control.data.bypass_panel_control_wait =
 | 
						cmd.lvtma_control.data.bypass_panel_control_wait =
 | 
				
			||||||
			bypass_panel_control_wait;
 | 
								bypass_panel_control_wait;
 | 
				
			||||||
	dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static enum bp_result enable_lvtma_control(
 | 
					static enum bp_result enable_lvtma_control(
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -29,6 +29,7 @@
 | 
				
			||||||
#include "dc_types.h"
 | 
					#include "dc_types.h"
 | 
				
			||||||
#include "dccg.h"
 | 
					#include "dccg.h"
 | 
				
			||||||
#include "clk_mgr_internal.h"
 | 
					#include "clk_mgr_internal.h"
 | 
				
			||||||
 | 
					#include "dc_state_priv.h"
 | 
				
			||||||
#include "link.h"
 | 
					#include "link.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include "dce100/dce_clk_mgr.h"
 | 
					#include "dce100/dce_clk_mgr.h"
 | 
				
			||||||
| 
						 | 
					@ -63,7 +64,7 @@ int clk_mgr_helper_get_active_display_cnt(
 | 
				
			||||||
		/* Don't count SubVP phantom pipes as part of active
 | 
							/* Don't count SubVP phantom pipes as part of active
 | 
				
			||||||
		 * display count
 | 
							 * display count
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (stream->mall_stream_config.type == SUBVP_PHANTOM)
 | 
							if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*
 | 
							/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -253,7 +253,7 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
 | 
				
			||||||
	cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
 | 
						cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
 | 
				
			||||||
	cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
 | 
						cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
 | 
					static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -284,7 +284,7 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
 | 
				
			||||||
	cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
 | 
						cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
 | 
				
			||||||
	cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
 | 
						cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
 | 
					static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -232,7 +232,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
 | 
				
			||||||
	cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
 | 
						cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
 | 
				
			||||||
	cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
 | 
						cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
 | 
					static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -239,7 +239,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
 | 
				
			||||||
	cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
 | 
						cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
 | 
				
			||||||
	cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
 | 
						cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void dcn316_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
 | 
					static void dcn316_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -25,7 +25,6 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include "dccg.h"
 | 
					#include "dccg.h"
 | 
				
			||||||
#include "clk_mgr_internal.h"
 | 
					#include "clk_mgr_internal.h"
 | 
				
			||||||
 | 
					 | 
				
			||||||
#include "dcn32/dcn32_clk_mgr_smu_msg.h"
 | 
					#include "dcn32/dcn32_clk_mgr_smu_msg.h"
 | 
				
			||||||
#include "dcn20/dcn20_clk_mgr.h"
 | 
					#include "dcn20/dcn20_clk_mgr.h"
 | 
				
			||||||
#include "dce100/dce_clk_mgr.h"
 | 
					#include "dce100/dce_clk_mgr.h"
 | 
				
			||||||
| 
						 | 
					@ -34,7 +33,7 @@
 | 
				
			||||||
#include "core_types.h"
 | 
					#include "core_types.h"
 | 
				
			||||||
#include "dm_helpers.h"
 | 
					#include "dm_helpers.h"
 | 
				
			||||||
#include "link.h"
 | 
					#include "link.h"
 | 
				
			||||||
 | 
					#include "dc_state_priv.h"
 | 
				
			||||||
#include "atomfirmware.h"
 | 
					#include "atomfirmware.h"
 | 
				
			||||||
#include "smu13_driver_if.h"
 | 
					#include "smu13_driver_if.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -458,13 +457,43 @@ static int dcn32_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base)
 | 
				
			||||||
	return 0;
 | 
						return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr_internal *clk_mgr)
 | 
					static bool dcn32_check_native_scaling(struct pipe_ctx *pipe)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						bool is_native_scaling = false;
 | 
				
			||||||
 | 
						int width = pipe->plane_state->src_rect.width;
 | 
				
			||||||
 | 
						int height = pipe->plane_state->src_rect.height;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (pipe->stream->timing.h_addressable == width &&
 | 
				
			||||||
 | 
								pipe->stream->timing.v_addressable == height &&
 | 
				
			||||||
 | 
								pipe->plane_state->dst_rect.width == width &&
 | 
				
			||||||
 | 
								pipe->plane_state->dst_rect.height == height)
 | 
				
			||||||
 | 
							is_native_scaling = true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return is_native_scaling;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void dcn32_auto_dpm_test_log(
 | 
				
			||||||
 | 
							struct dc_clocks *new_clocks,
 | 
				
			||||||
 | 
							struct clk_mgr_internal *clk_mgr,
 | 
				
			||||||
 | 
							struct dc_state *context)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned int dispclk_khz_reg, dppclk_khz_reg, dprefclk_khz_reg, dcfclk_khz_reg, dtbclk_khz_reg,
 | 
						unsigned int dispclk_khz_reg, dppclk_khz_reg, dprefclk_khz_reg, dcfclk_khz_reg, dtbclk_khz_reg,
 | 
				
			||||||
				 fclk_khz_reg;
 | 
									 fclk_khz_reg, mall_ss_size_bytes;
 | 
				
			||||||
	int dramclk_khz_override, fclk_khz_override, num_fclk_levels;
 | 
						int dramclk_khz_override, fclk_khz_override, num_fclk_levels;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	msleep(5);
 | 
						struct pipe_ctx *pipe_ctx_list[MAX_PIPES];
 | 
				
			||||||
 | 
						int active_pipe_count = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (int i = 0; i < MAX_PIPES; i++) {
 | 
				
			||||||
 | 
							struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
 | 
				
			||||||
 | 
								pipe_ctx_list[active_pipe_count] = pipe_ctx;
 | 
				
			||||||
 | 
								active_pipe_count++;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						mall_ss_size_bytes = context->bw_ctx.bw.dcn.mall_ss_size_bytes;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    dispclk_khz_reg    = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK
 | 
					    dispclk_khz_reg    = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK
 | 
				
			||||||
    dppclk_khz_reg     = REG_READ(CLK1_CLK1_CURRENT_CNT); // DPPCLK
 | 
					    dppclk_khz_reg     = REG_READ(CLK1_CLK1_CURRENT_CNT); // DPPCLK
 | 
				
			||||||
| 
						 | 
					@ -494,16 +523,49 @@ static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr
 | 
				
			||||||
	//
 | 
						//
 | 
				
			||||||
	//				AutoDPMTest: clk1:%d - clk2:%d - clk3:%d - clk4:%d\n"
 | 
						//				AutoDPMTest: clk1:%d - clk2:%d - clk3:%d - clk4:%d\n"
 | 
				
			||||||
	////////////////////////////////////////////////////////////////////////////
 | 
						////////////////////////////////////////////////////////////////////////////
 | 
				
			||||||
	if (new_clocks &&
 | 
						if (new_clocks && active_pipe_count > 0 &&
 | 
				
			||||||
		new_clocks->dramclk_khz > 0 &&
 | 
							new_clocks->dramclk_khz > 0 &&
 | 
				
			||||||
		new_clocks->fclk_khz > 0 &&
 | 
							new_clocks->fclk_khz > 0 &&
 | 
				
			||||||
		new_clocks->dcfclk_khz > 0 &&
 | 
							new_clocks->dcfclk_khz > 0 &&
 | 
				
			||||||
		new_clocks->dppclk_khz > 0) {
 | 
							new_clocks->dppclk_khz > 0) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							uint32_t pix_clk_list[MAX_PIPES] = {0};
 | 
				
			||||||
 | 
							int p_state_list[MAX_PIPES] = {0};
 | 
				
			||||||
 | 
							int disp_src_width_list[MAX_PIPES] = {0};
 | 
				
			||||||
 | 
							int disp_src_height_list[MAX_PIPES] = {0};
 | 
				
			||||||
 | 
							uint64_t disp_src_refresh_list[MAX_PIPES] = {0};
 | 
				
			||||||
 | 
							bool is_scaled_list[MAX_PIPES] = {0};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							for (int i = 0; i < active_pipe_count; i++) {
 | 
				
			||||||
 | 
								struct pipe_ctx *curr_pipe_ctx = pipe_ctx_list[i];
 | 
				
			||||||
 | 
								uint64_t refresh_rate;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								pix_clk_list[i] = curr_pipe_ctx->stream->timing.pix_clk_100hz;
 | 
				
			||||||
 | 
								p_state_list[i] = curr_pipe_ctx->p_state_type;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								refresh_rate = (curr_pipe_ctx->stream->timing.pix_clk_100hz * (uint64_t)100 +
 | 
				
			||||||
 | 
									curr_pipe_ctx->stream->timing.v_total * curr_pipe_ctx->stream->timing.h_total - (uint64_t)1);
 | 
				
			||||||
 | 
								refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.v_total);
 | 
				
			||||||
 | 
								refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.h_total);
 | 
				
			||||||
 | 
								disp_src_refresh_list[i] = refresh_rate;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								if (curr_pipe_ctx->plane_state) {
 | 
				
			||||||
 | 
									is_scaled_list[i] = !(dcn32_check_native_scaling(curr_pipe_ctx));
 | 
				
			||||||
 | 
									disp_src_width_list[i] = curr_pipe_ctx->plane_state->src_rect.width;
 | 
				
			||||||
 | 
									disp_src_height_list[i] = curr_pipe_ctx->plane_state->src_rect.height;
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		DC_LOG_AUTO_DPM_TEST("AutoDPMTest: dramclk:%d - fclk:%d - "
 | 
							DC_LOG_AUTO_DPM_TEST("AutoDPMTest: dramclk:%d - fclk:%d - "
 | 
				
			||||||
			"dcfclk:%d - dppclk:%d - dispclk_hw:%d - "
 | 
								"dcfclk:%d - dppclk:%d - dispclk_hw:%d - "
 | 
				
			||||||
			"dppclk_hw:%d - dprefclk_hw:%d - dcfclk_hw:%d - "
 | 
								"dppclk_hw:%d - dprefclk_hw:%d - dcfclk_hw:%d - "
 | 
				
			||||||
			"dtbclk_hw:%d - fclk_hw:%d\n",
 | 
								"dtbclk_hw:%d - fclk_hw:%d - pix_clk_0:%d - pix_clk_1:%d - "
 | 
				
			||||||
 | 
								"pix_clk_2:%d - pix_clk_3:%d - mall_ss_size:%d - p_state_type_0:%d - "
 | 
				
			||||||
 | 
								"p_state_type_1:%d - p_state_type_2:%d - p_state_type_3:%d - "
 | 
				
			||||||
 | 
								"pix_width_0:%d - pix_height_0:%d - refresh_rate_0:%lld - is_scaled_0:%d - "
 | 
				
			||||||
 | 
								"pix_width_1:%d - pix_height_1:%d - refresh_rate_1:%lld - is_scaled_1:%d - "
 | 
				
			||||||
 | 
								"pix_width_2:%d - pix_height_2:%d - refresh_rate_2:%lld - is_scaled_2:%d - "
 | 
				
			||||||
 | 
								"pix_width_3:%d - pix_height_3:%d - refresh_rate_3:%lld - is_scaled_3:%d - LOG_END\n",
 | 
				
			||||||
			dramclk_khz_override,
 | 
								dramclk_khz_override,
 | 
				
			||||||
			fclk_khz_override,
 | 
								fclk_khz_override,
 | 
				
			||||||
			new_clocks->dcfclk_khz,
 | 
								new_clocks->dcfclk_khz,
 | 
				
			||||||
| 
						 | 
					@ -513,7 +575,14 @@ static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr
 | 
				
			||||||
			dprefclk_khz_reg,
 | 
								dprefclk_khz_reg,
 | 
				
			||||||
			dcfclk_khz_reg,
 | 
								dcfclk_khz_reg,
 | 
				
			||||||
			dtbclk_khz_reg,
 | 
								dtbclk_khz_reg,
 | 
				
			||||||
			fclk_khz_reg);
 | 
								fclk_khz_reg,
 | 
				
			||||||
 | 
								pix_clk_list[0], pix_clk_list[1], pix_clk_list[3], pix_clk_list[2],
 | 
				
			||||||
 | 
								mall_ss_size_bytes,
 | 
				
			||||||
 | 
								p_state_list[0], p_state_list[1], p_state_list[2], p_state_list[3],
 | 
				
			||||||
 | 
								disp_src_width_list[0], disp_src_height_list[0], disp_src_refresh_list[0], is_scaled_list[0],
 | 
				
			||||||
 | 
								disp_src_width_list[1], disp_src_height_list[1], disp_src_refresh_list[1], is_scaled_list[1],
 | 
				
			||||||
 | 
								disp_src_width_list[2], disp_src_height_list[2], disp_src_refresh_list[2], is_scaled_list[2],
 | 
				
			||||||
 | 
								disp_src_width_list[3], disp_src_height_list[3], disp_src_refresh_list[3], is_scaled_list[3]);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -686,6 +755,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
 | 
				
			||||||
		/* DCCG requires KHz precision for DTBCLK */
 | 
							/* DCCG requires KHz precision for DTBCLK */
 | 
				
			||||||
		clk_mgr_base->clks.ref_dtbclk_khz =
 | 
							clk_mgr_base->clks.ref_dtbclk_khz =
 | 
				
			||||||
				dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz));
 | 
									dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		dcn32_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
 | 
							dcn32_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -713,8 +783,8 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
 | 
				
			||||||
		dmcu->funcs->set_psr_wait_loop(dmcu,
 | 
							dmcu->funcs->set_psr_wait_loop(dmcu,
 | 
				
			||||||
				clk_mgr_base->clks.dispclk_khz / 1000 / 7);
 | 
									clk_mgr_base->clks.dispclk_khz / 1000 / 7);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (dc->config.enable_auto_dpm_test_logs && safe_to_lower) {
 | 
						if (dc->config.enable_auto_dpm_test_logs) {
 | 
				
			||||||
	    dcn32_auto_dpm_test_log(new_clocks, clk_mgr);
 | 
						    dcn32_auto_dpm_test_log(new_clocks, clk_mgr, context);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -50,6 +50,7 @@
 | 
				
			||||||
#include "dc_dmub_srv.h"
 | 
					#include "dc_dmub_srv.h"
 | 
				
			||||||
#include "link.h"
 | 
					#include "link.h"
 | 
				
			||||||
#include "logger_types.h"
 | 
					#include "logger_types.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#undef DC_LOGGER
 | 
					#undef DC_LOGGER
 | 
				
			||||||
#define DC_LOGGER \
 | 
					#define DC_LOGGER \
 | 
				
			||||||
	clk_mgr->base.base.ctx->logger
 | 
						clk_mgr->base.base.ctx->logger
 | 
				
			||||||
| 
						 | 
					@ -342,7 +343,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
 | 
				
			||||||
	cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
 | 
						cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
 | 
				
			||||||
	cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
 | 
						cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
 | 
					static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
 | 
				
			||||||
| 
						 | 
					@ -417,9 +418,8 @@ bool dcn35_are_clock_states_equal(struct dc_clocks *a,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
 | 
					static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
 | 
				
			||||||
		struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
 | 
							struct clk_mgr_dcn35 *clk_mgr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static struct clk_bw_params dcn35_bw_params = {
 | 
					static struct clk_bw_params dcn35_bw_params = {
 | 
				
			||||||
| 
						 | 
					@ -986,7 +986,6 @@ void dcn35_clk_mgr_construct(
 | 
				
			||||||
		struct dccg *dccg)
 | 
							struct dccg *dccg)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dcn35_smu_dpm_clks smu_dpm_clks = { 0 };
 | 
						struct dcn35_smu_dpm_clks smu_dpm_clks = { 0 };
 | 
				
			||||||
	struct clk_log_info log_info = {0};
 | 
					 | 
				
			||||||
	clk_mgr->base.base.ctx = ctx;
 | 
						clk_mgr->base.base.ctx = ctx;
 | 
				
			||||||
	clk_mgr->base.base.funcs = &dcn35_funcs;
 | 
						clk_mgr->base.base.funcs = &dcn35_funcs;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1039,7 +1038,7 @@ void dcn35_clk_mgr_construct(
 | 
				
			||||||
		dcn35_bw_params.wm_table = ddr5_wm_table;
 | 
							dcn35_bw_params.wm_table = ddr5_wm_table;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	/* Saved clocks configured at boot for debug purposes */
 | 
						/* Saved clocks configured at boot for debug purposes */
 | 
				
			||||||
	dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
 | 
						dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base);
 | 
						clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base);
 | 
				
			||||||
	clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
 | 
						clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -34,6 +34,8 @@
 | 
				
			||||||
#include "dce/dce_hwseq.h"
 | 
					#include "dce/dce_hwseq.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include "resource.h"
 | 
					#include "resource.h"
 | 
				
			||||||
 | 
					#include "dc_state.h"
 | 
				
			||||||
 | 
					#include "dc_state_priv.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include "gpio_service_interface.h"
 | 
					#include "gpio_service_interface.h"
 | 
				
			||||||
#include "clk_mgr.h"
 | 
					#include "clk_mgr.h"
 | 
				
			||||||
| 
						 | 
					@ -409,9 +411,12 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
 | 
				
			||||||
	 * avoid conflicting with firmware updates.
 | 
						 * avoid conflicting with firmware updates.
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (dc->ctx->dce_version > DCE_VERSION_MAX)
 | 
						if (dc->ctx->dce_version > DCE_VERSION_MAX)
 | 
				
			||||||
		if (dc->optimized_required || dc->wm_optimized_required)
 | 
							if (dc->optimized_required)
 | 
				
			||||||
			return false;
 | 
								return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!memcmp(&stream->adjust, adjust, sizeof(*adjust)))
 | 
				
			||||||
 | 
							return true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	stream->adjust.v_total_max = adjust->v_total_max;
 | 
						stream->adjust.v_total_max = adjust->v_total_max;
 | 
				
			||||||
	stream->adjust.v_total_mid = adjust->v_total_mid;
 | 
						stream->adjust.v_total_mid = adjust->v_total_mid;
 | 
				
			||||||
	stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
 | 
						stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
 | 
				
			||||||
| 
						 | 
					@ -519,7 +524,7 @@ dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
 | 
				
			||||||
		cmd.secure_display.roi_info.y_end = rect->y + rect->height;
 | 
							cmd.secure_display.roi_info.y_end = rect->y + rect->height;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline void
 | 
					static inline void
 | 
				
			||||||
| 
						 | 
					@ -808,7 +813,7 @@ static void dc_destruct(struct dc *dc)
 | 
				
			||||||
		link_enc_cfg_init(dc, dc->current_state);
 | 
							link_enc_cfg_init(dc, dc->current_state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (dc->current_state) {
 | 
						if (dc->current_state) {
 | 
				
			||||||
		dc_release_state(dc->current_state);
 | 
							dc_state_release(dc->current_state);
 | 
				
			||||||
		dc->current_state = NULL;
 | 
							dc->current_state = NULL;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1020,18 +1025,6 @@ static bool dc_construct(struct dc *dc,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Creation of current_state must occur after dc->dml
 | 
					 | 
				
			||||||
	 * is initialized in dc_create_resource_pool because
 | 
					 | 
				
			||||||
	 * on creation it copies the contents of dc->dml
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	dc->current_state = dc_create_state(dc);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!dc->current_state) {
 | 
					 | 
				
			||||||
		dm_error("%s: failed to create validate ctx\n", __func__);
 | 
					 | 
				
			||||||
		goto fail;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!create_links(dc, init_params->num_virtual_links))
 | 
						if (!create_links(dc, init_params->num_virtual_links))
 | 
				
			||||||
		goto fail;
 | 
							goto fail;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1041,7 +1034,17 @@ static bool dc_construct(struct dc *dc,
 | 
				
			||||||
	if (!create_link_encoders(dc))
 | 
						if (!create_link_encoders(dc))
 | 
				
			||||||
		goto fail;
 | 
							goto fail;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dc_resource_state_construct(dc, dc->current_state);
 | 
						/* Creation of current_state must occur after dc->dml
 | 
				
			||||||
 | 
						 * is initialized in dc_create_resource_pool because
 | 
				
			||||||
 | 
						 * on creation it copies the contents of dc->dml
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dc->current_state = dc_state_create(dc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!dc->current_state) {
 | 
				
			||||||
 | 
							dm_error("%s: failed to create validate ctx\n", __func__);
 | 
				
			||||||
 | 
							goto fail;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1085,7 +1088,7 @@ static void apply_ctx_interdependent_lock(struct dc *dc,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
 | 
					static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
 | 
						if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
 | 
				
			||||||
		memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
 | 
							memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
 | 
				
			||||||
| 
						 | 
					@ -1105,9 +1108,9 @@ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *conte
 | 
				
			||||||
			if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
 | 
								if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
 | 
				
			||||||
				get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
 | 
									get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
 | 
				
			||||||
			else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
 | 
								else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
 | 
				
			||||||
				get_subvp_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
 | 
									get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
 | 
				
			||||||
			else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH)
 | 
								else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH)
 | 
				
			||||||
				get_mclk_switch_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
 | 
									get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -1115,7 +1118,7 @@ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *conte
 | 
				
			||||||
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
 | 
					static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int i, j;
 | 
						int i, j;
 | 
				
			||||||
	struct dc_state *dangling_context = dc_create_state(dc);
 | 
						struct dc_state *dangling_context = dc_state_create_current_copy(dc);
 | 
				
			||||||
	struct dc_state *current_ctx;
 | 
						struct dc_state *current_ctx;
 | 
				
			||||||
	struct pipe_ctx *pipe;
 | 
						struct pipe_ctx *pipe;
 | 
				
			||||||
	struct timing_generator *tg;
 | 
						struct timing_generator *tg;
 | 
				
			||||||
| 
						 | 
					@ -1123,8 +1126,6 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
 | 
				
			||||||
	if (dangling_context == NULL)
 | 
						if (dangling_context == NULL)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dc_resource_state_copy_construct(dc->current_state, dangling_context);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
						for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
				
			||||||
		struct dc_stream_state *old_stream =
 | 
							struct dc_stream_state *old_stream =
 | 
				
			||||||
				dc->current_state->res_ctx.pipe_ctx[i].stream;
 | 
									dc->current_state->res_ctx.pipe_ctx[i].stream;
 | 
				
			||||||
| 
						 | 
					@ -1161,6 +1162,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (should_disable && old_stream) {
 | 
							if (should_disable && old_stream) {
 | 
				
			||||||
 | 
								bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM;
 | 
				
			||||||
			pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 | 
								pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 | 
				
			||||||
			tg = pipe->stream_res.tg;
 | 
								tg = pipe->stream_res.tg;
 | 
				
			||||||
			/* When disabling plane for a phantom pipe, we must turn on the
 | 
								/* When disabling plane for a phantom pipe, we must turn on the
 | 
				
			||||||
| 
						 | 
					@ -1169,22 +1171,29 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
 | 
				
			||||||
			 * state that can result in underflow or hang when enabling it
 | 
								 * state that can result in underflow or hang when enabling it
 | 
				
			||||||
			 * again for different use.
 | 
								 * again for different use.
 | 
				
			||||||
			 */
 | 
								 */
 | 
				
			||||||
			if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
 | 
								if (is_phantom) {
 | 
				
			||||||
				if (tg->funcs->enable_crtc) {
 | 
									if (tg->funcs->enable_crtc) {
 | 
				
			||||||
					int main_pipe_width, main_pipe_height;
 | 
										int main_pipe_width, main_pipe_height;
 | 
				
			||||||
 | 
										struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
					main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width;
 | 
										main_pipe_width = old_paired_stream->dst.width;
 | 
				
			||||||
					main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height;
 | 
										main_pipe_height = old_paired_stream->dst.height;
 | 
				
			||||||
					if (dc->hwss.blank_phantom)
 | 
										if (dc->hwss.blank_phantom)
 | 
				
			||||||
						dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
 | 
											dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
 | 
				
			||||||
					tg->funcs->enable_crtc(tg);
 | 
										tg->funcs->enable_crtc(tg);
 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
 | 
					
 | 
				
			||||||
 | 
								if (is_phantom)
 | 
				
			||||||
 | 
									dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true);
 | 
				
			||||||
 | 
								else
 | 
				
			||||||
 | 
									dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context);
 | 
				
			||||||
			disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
 | 
								disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (pipe->stream && pipe->plane_state)
 | 
								if (pipe->stream && pipe->plane_state) {
 | 
				
			||||||
				dc_update_viusal_confirm_color(dc, context, pipe);
 | 
									set_p_state_switch_method(dc, context, pipe);
 | 
				
			||||||
 | 
									dc_update_visual_confirm_color(dc, context, pipe);
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (dc->hwss.apply_ctx_for_surface) {
 | 
								if (dc->hwss.apply_ctx_for_surface) {
 | 
				
			||||||
				apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
 | 
									apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
 | 
				
			||||||
| 
						 | 
					@ -1203,7 +1212,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
 | 
				
			||||||
			 * The OTG is set to disable on falling edge of VUPDATE so the plane disable
 | 
								 * The OTG is set to disable on falling edge of VUPDATE so the plane disable
 | 
				
			||||||
			 * will still get it's double buffer update.
 | 
								 * will still get it's double buffer update.
 | 
				
			||||||
			 */
 | 
								 */
 | 
				
			||||||
			if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
 | 
								if (is_phantom) {
 | 
				
			||||||
				if (tg->funcs->disable_phantom_crtc)
 | 
									if (tg->funcs->disable_phantom_crtc)
 | 
				
			||||||
					tg->funcs->disable_phantom_crtc(tg);
 | 
										tg->funcs->disable_phantom_crtc(tg);
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
| 
						 | 
					@ -1212,7 +1221,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	current_ctx = dc->current_state;
 | 
						current_ctx = dc->current_state;
 | 
				
			||||||
	dc->current_state = dangling_context;
 | 
						dc->current_state = dangling_context;
 | 
				
			||||||
	dc_release_state(current_ctx);
 | 
						dc_state_release(current_ctx);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void disable_vbios_mode_if_required(
 | 
					static void disable_vbios_mode_if_required(
 | 
				
			||||||
| 
						 | 
					@ -1284,7 +1293,7 @@ static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
 | 
				
			||||||
		int count = 0;
 | 
							int count = 0;
 | 
				
			||||||
		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
							struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
 | 
							if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* Timeout 100 ms */
 | 
							/* Timeout 100 ms */
 | 
				
			||||||
| 
						 | 
					@ -1510,7 +1519,7 @@ static void program_timing_sync(
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		for (k = 0; k < group_size; k++) {
 | 
							for (k = 0; k < group_size; k++) {
 | 
				
			||||||
			struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
 | 
								struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			status->timing_sync_info.group_id = num_group;
 | 
								status->timing_sync_info.group_id = num_group;
 | 
				
			||||||
			status->timing_sync_info.group_size = group_size;
 | 
								status->timing_sync_info.group_size = group_size;
 | 
				
			||||||
| 
						 | 
					@ -1555,7 +1564,7 @@ static void program_timing_sync(
 | 
				
			||||||
		if (group_size > 1) {
 | 
							if (group_size > 1) {
 | 
				
			||||||
			if (sync_type == TIMING_SYNCHRONIZABLE) {
 | 
								if (sync_type == TIMING_SYNCHRONIZABLE) {
 | 
				
			||||||
				dc->hwss.enable_timing_synchronization(
 | 
									dc->hwss.enable_timing_synchronization(
 | 
				
			||||||
					dc, group_index, group_size, pipe_set);
 | 
										dc, ctx, group_index, group_size, pipe_set);
 | 
				
			||||||
			} else
 | 
								} else
 | 
				
			||||||
				if (sync_type == VBLANK_SYNCHRONIZABLE) {
 | 
									if (sync_type == VBLANK_SYNCHRONIZABLE) {
 | 
				
			||||||
				dc->hwss.enable_vblanks_synchronization(
 | 
									dc->hwss.enable_vblanks_synchronization(
 | 
				
			||||||
| 
						 | 
					@ -1837,7 +1846,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 | 
				
			||||||
		struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 | 
							struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* Check old context for SubVP */
 | 
							/* Check old context for SubVP */
 | 
				
			||||||
		subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
 | 
							subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
 | 
				
			||||||
		if (subvp_prev_use)
 | 
							if (subvp_prev_use)
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -1995,9 +2004,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 | 
				
			||||||
	old_state = dc->current_state;
 | 
						old_state = dc->current_state;
 | 
				
			||||||
	dc->current_state = context;
 | 
						dc->current_state = context;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dc_release_state(old_state);
 | 
						dc_state_release(old_state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dc_retain_state(dc->current_state);
 | 
						dc_state_retain(dc->current_state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return result;
 | 
						return result;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -2068,12 +2077,10 @@ enum dc_status dc_commit_streams(struct dc *dc,
 | 
				
			||||||
	if (handle_exit_odm2to1)
 | 
						if (handle_exit_odm2to1)
 | 
				
			||||||
		res = commit_minimal_transition_state(dc, dc->current_state);
 | 
							res = commit_minimal_transition_state(dc, dc->current_state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	context = dc_create_state(dc);
 | 
						context = dc_state_create_current_copy(dc);
 | 
				
			||||||
	if (!context)
 | 
						if (!context)
 | 
				
			||||||
		goto context_alloc_fail;
 | 
							goto context_alloc_fail;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dc_resource_state_copy_construct_current(dc, context);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	res = dc_validate_with_context(dc, set, stream_count, context, false);
 | 
						res = dc_validate_with_context(dc, set, stream_count, context, false);
 | 
				
			||||||
	if (res != DC_OK) {
 | 
						if (res != DC_OK) {
 | 
				
			||||||
		BREAK_TO_DEBUGGER();
 | 
							BREAK_TO_DEBUGGER();
 | 
				
			||||||
| 
						 | 
					@ -2088,7 +2095,7 @@ enum dc_status dc_commit_streams(struct dc *dc,
 | 
				
			||||||
				streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
 | 
									streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (dc_is_embedded_signal(streams[i]->signal)) {
 | 
								if (dc_is_embedded_signal(streams[i]->signal)) {
 | 
				
			||||||
				struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]);
 | 
									struct dc_stream_status *status = dc_state_get_stream_status(context, streams[i]);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				if (dc->hwss.is_abm_supported)
 | 
									if (dc->hwss.is_abm_supported)
 | 
				
			||||||
					status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]);
 | 
										status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]);
 | 
				
			||||||
| 
						 | 
					@ -2099,7 +2106,7 @@ enum dc_status dc_commit_streams(struct dc *dc,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
fail:
 | 
					fail:
 | 
				
			||||||
	dc_release_state(context);
 | 
						dc_state_release(context);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
context_alloc_fail:
 | 
					context_alloc_fail:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2153,7 +2160,7 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
 | 
				
			||||||
		pipe = &context->res_ctx.pipe_ctx[i];
 | 
							pipe = &context->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Don't check flip pending on phantom pipes
 | 
							// Don't check flip pending on phantom pipes
 | 
				
			||||||
		if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM))
 | 
							if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM))
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* Must set to false to start with, due to OR in update function */
 | 
							/* Must set to false to start with, due to OR in update function */
 | 
				
			||||||
| 
						 | 
					@ -2211,7 +2218,7 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
 | 
				
			||||||
			if (context->res_ctx.pipe_ctx[i].stream == NULL ||
 | 
								if (context->res_ctx.pipe_ctx[i].stream == NULL ||
 | 
				
			||||||
					context->res_ctx.pipe_ctx[i].plane_state == NULL) {
 | 
										context->res_ctx.pipe_ctx[i].plane_state == NULL) {
 | 
				
			||||||
				context->res_ctx.pipe_ctx[i].pipe_idx = i;
 | 
									context->res_ctx.pipe_ctx[i].pipe_idx = i;
 | 
				
			||||||
				dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
 | 
									dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]);
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		process_deferred_updates(dc);
 | 
							process_deferred_updates(dc);
 | 
				
			||||||
| 
						 | 
					@ -2223,104 +2230,6 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dc->optimized_required = false;
 | 
						dc->optimized_required = false;
 | 
				
			||||||
	dc->wm_optimized_required = false;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void init_state(struct dc *dc, struct dc_state *context)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	/* Each context must have their own instance of VBA and in order to
 | 
					 | 
				
			||||||
	 * initialize and obtain IP and SOC the base DML instance from DC is
 | 
					 | 
				
			||||||
	 * initially copied into every context
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
struct dc_state *dc_create_state(struct dc *dc)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct dc_state *context = kvzalloc(sizeof(struct dc_state),
 | 
					 | 
				
			||||||
					    GFP_KERNEL);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!context)
 | 
					 | 
				
			||||||
		return NULL;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	init_state(dc, context);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifdef CONFIG_DRM_AMD_DC_FP
 | 
					 | 
				
			||||||
	if (dc->debug.using_dml2) {
 | 
					 | 
				
			||||||
		dml2_create(dc, &dc->dml2_options, &context->bw_ctx.dml2);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
	kref_init(&context->refcount);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return context;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
struct dc_state *dc_copy_state(struct dc_state *src_ctx)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	int i, j;
 | 
					 | 
				
			||||||
	struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!new_ctx)
 | 
					 | 
				
			||||||
		return NULL;
 | 
					 | 
				
			||||||
	memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifdef CONFIG_DRM_AMD_DC_FP
 | 
					 | 
				
			||||||
	if (new_ctx->bw_ctx.dml2 && !dml2_create_copy(&new_ctx->bw_ctx.dml2, src_ctx->bw_ctx.dml2)) {
 | 
					 | 
				
			||||||
		dc_release_state(new_ctx);
 | 
					 | 
				
			||||||
		return NULL;
 | 
					 | 
				
			||||||
 	}
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = 0; i < MAX_PIPES; i++) {
 | 
					 | 
				
			||||||
			struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			if (cur_pipe->top_pipe)
 | 
					 | 
				
			||||||
				cur_pipe->top_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			if (cur_pipe->bottom_pipe)
 | 
					 | 
				
			||||||
				cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			if (cur_pipe->prev_odm_pipe)
 | 
					 | 
				
			||||||
				cur_pipe->prev_odm_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			if (cur_pipe->next_odm_pipe)
 | 
					 | 
				
			||||||
				cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = 0; i < new_ctx->stream_count; i++) {
 | 
					 | 
				
			||||||
			dc_stream_retain(new_ctx->streams[i]);
 | 
					 | 
				
			||||||
			for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
 | 
					 | 
				
			||||||
				dc_plane_state_retain(
 | 
					 | 
				
			||||||
					new_ctx->stream_status[i].plane_states[j]);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	kref_init(&new_ctx->refcount);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return new_ctx;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void dc_retain_state(struct dc_state *context)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	kref_get(&context->refcount);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static void dc_state_free(struct kref *kref)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct dc_state *context = container_of(kref, struct dc_state, refcount);
 | 
					 | 
				
			||||||
	dc_resource_state_destruct(context);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifdef CONFIG_DRM_AMD_DC_FP
 | 
					 | 
				
			||||||
	dml2_destroy(context->bw_ctx.dml2);
 | 
					 | 
				
			||||||
	context->bw_ctx.dml2 = 0;
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	kvfree(context);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void dc_release_state(struct dc_state *context)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	kref_put(&context->refcount, dc_state_free);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool dc_set_generic_gpio_for_stereo(bool enable,
 | 
					bool dc_set_generic_gpio_for_stereo(bool enable,
 | 
				
			||||||
| 
						 | 
					@ -2743,8 +2652,6 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
 | 
				
			||||||
		} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
 | 
							} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
 | 
				
			||||||
			dc->optimized_required = true;
 | 
								dc->optimized_required = true;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					 | 
				
			||||||
		dc->optimized_required |= dc->wm_optimized_required;
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return type;
 | 
						return type;
 | 
				
			||||||
| 
						 | 
					@ -2952,9 +2859,6 @@ static void copy_stream_update_to_stream(struct dc *dc,
 | 
				
			||||||
	if (update->vrr_active_fixed)
 | 
						if (update->vrr_active_fixed)
 | 
				
			||||||
		stream->vrr_active_fixed = *update->vrr_active_fixed;
 | 
							stream->vrr_active_fixed = *update->vrr_active_fixed;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (update->crtc_timing_adjust)
 | 
					 | 
				
			||||||
		stream->adjust = *update->crtc_timing_adjust;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (update->dpms_off)
 | 
						if (update->dpms_off)
 | 
				
			||||||
		stream->dpms_off = *update->dpms_off;
 | 
							stream->dpms_off = *update->dpms_off;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2995,11 +2899,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
 | 
				
			||||||
				       update->dsc_config->num_slices_v != 0);
 | 
									       update->dsc_config->num_slices_v != 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* Use temporarry context for validating new DSC config */
 | 
							/* Use temporarry context for validating new DSC config */
 | 
				
			||||||
		struct dc_state *dsc_validate_context = dc_create_state(dc);
 | 
							struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (dsc_validate_context) {
 | 
							if (dsc_validate_context) {
 | 
				
			||||||
			dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			stream->timing.dsc_cfg = *update->dsc_config;
 | 
								stream->timing.dsc_cfg = *update->dsc_config;
 | 
				
			||||||
			stream->timing.flags.DSC = enable_dsc;
 | 
								stream->timing.flags.DSC = enable_dsc;
 | 
				
			||||||
			if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
 | 
								if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
 | 
				
			||||||
| 
						 | 
					@ -3008,7 +2910,7 @@ static void copy_stream_update_to_stream(struct dc *dc,
 | 
				
			||||||
				update->dsc_config = NULL;
 | 
									update->dsc_config = NULL;
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			dc_release_state(dsc_validate_context);
 | 
								dc_state_release(dsc_validate_context);
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			DC_ERROR("Failed to allocate new validate context for DSC change\n");
 | 
								DC_ERROR("Failed to allocate new validate context for DSC change\n");
 | 
				
			||||||
			update->dsc_config = NULL;
 | 
								update->dsc_config = NULL;
 | 
				
			||||||
| 
						 | 
					@ -3107,30 +3009,27 @@ static bool update_planes_and_stream_state(struct dc *dc,
 | 
				
			||||||
			new_planes[i] = srf_updates[i].surface;
 | 
								new_planes[i] = srf_updates[i].surface;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* initialize scratch memory for building context */
 | 
							/* initialize scratch memory for building context */
 | 
				
			||||||
		context = dc_create_state(dc);
 | 
							context = dc_state_create_copy(dc->current_state);
 | 
				
			||||||
		if (context == NULL) {
 | 
							if (context == NULL) {
 | 
				
			||||||
			DC_ERROR("Failed to allocate new validate context!\n");
 | 
								DC_ERROR("Failed to allocate new validate context!\n");
 | 
				
			||||||
			return false;
 | 
								return false;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		dc_resource_state_copy_construct(
 | 
					 | 
				
			||||||
				dc->current_state, context);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		/* For each full update, remove all existing phantom pipes first.
 | 
							/* For each full update, remove all existing phantom pipes first.
 | 
				
			||||||
		 * Ensures that we have enough pipes for newly added MPO planes
 | 
							 * Ensures that we have enough pipes for newly added MPO planes
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (dc->res_pool->funcs->remove_phantom_pipes)
 | 
							dc_state_remove_phantom_streams_and_planes(dc, context);
 | 
				
			||||||
			dc->res_pool->funcs->remove_phantom_pipes(dc, context, false);
 | 
							dc_state_release_phantom_streams_and_planes(dc, context);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/*remove old surfaces from context */
 | 
							/*remove old surfaces from context */
 | 
				
			||||||
		if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
 | 
							if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			BREAK_TO_DEBUGGER();
 | 
								BREAK_TO_DEBUGGER();
 | 
				
			||||||
			goto fail;
 | 
								goto fail;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* add surface to context */
 | 
							/* add surface to context */
 | 
				
			||||||
		if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
 | 
							if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			BREAK_TO_DEBUGGER();
 | 
								BREAK_TO_DEBUGGER();
 | 
				
			||||||
			goto fail;
 | 
								goto fail;
 | 
				
			||||||
| 
						 | 
					@ -3155,19 +3054,6 @@ static bool update_planes_and_stream_state(struct dc *dc,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (update_type == UPDATE_TYPE_FULL) {
 | 
						if (update_type == UPDATE_TYPE_FULL) {
 | 
				
			||||||
		if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
 | 
							if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
 | 
				
			||||||
			/* For phantom pipes we remove and create a new set of phantom pipes
 | 
					 | 
				
			||||||
			 * for each full update (because we don't know if we'll need phantom
 | 
					 | 
				
			||||||
			 * pipes until after the first round of validation). However, if validation
 | 
					 | 
				
			||||||
			 * fails we need to keep the existing phantom pipes (because we don't update
 | 
					 | 
				
			||||||
			 * the dc->current_state).
 | 
					 | 
				
			||||||
			 *
 | 
					 | 
				
			||||||
			 * The phantom stream/plane refcount is decremented for validation because
 | 
					 | 
				
			||||||
			 * we assume it'll be removed (the free comes when the dc_state is freed),
 | 
					 | 
				
			||||||
			 * but if validation fails we have to increment back the refcount so it's
 | 
					 | 
				
			||||||
			 * consistent.
 | 
					 | 
				
			||||||
			 */
 | 
					 | 
				
			||||||
			if (dc->res_pool->funcs->retain_phantom_pipes)
 | 
					 | 
				
			||||||
				dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state);
 | 
					 | 
				
			||||||
			BREAK_TO_DEBUGGER();
 | 
								BREAK_TO_DEBUGGER();
 | 
				
			||||||
			goto fail;
 | 
								goto fail;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					@ -3188,7 +3074,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
fail:
 | 
					fail:
 | 
				
			||||||
	dc_release_state(context);
 | 
						dc_state_release(context);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return false;
 | 
						return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3384,7 +3270,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			update_dirty_rect->panel_inst = panel_inst;
 | 
								update_dirty_rect->panel_inst = panel_inst;
 | 
				
			||||||
			update_dirty_rect->pipe_idx = j;
 | 
								update_dirty_rect->pipe_idx = j;
 | 
				
			||||||
			dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
 | 
								dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -3486,18 +3372,24 @@ static void commit_planes_for_stream_fast(struct dc *dc,
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int i, j;
 | 
						int i, j;
 | 
				
			||||||
	struct pipe_ctx *top_pipe_to_program = NULL;
 | 
						struct pipe_ctx *top_pipe_to_program = NULL;
 | 
				
			||||||
 | 
						struct dc_stream_status *stream_status = NULL;
 | 
				
			||||||
	dc_z10_restore(dc);
 | 
						dc_z10_restore(dc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	top_pipe_to_program = resource_get_otg_master_for_stream(
 | 
						top_pipe_to_program = resource_get_otg_master_for_stream(
 | 
				
			||||||
			&context->res_ctx,
 | 
								&context->res_ctx,
 | 
				
			||||||
			stream);
 | 
								stream);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (dc->debug.visual_confirm) {
 | 
						if (!top_pipe_to_program)
 | 
				
			||||||
		for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
							return;
 | 
				
			||||||
			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (pipe->stream && pipe->plane_state)
 | 
						for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
				
			||||||
				dc_update_viusal_confirm_color(dc, context, pipe);
 | 
							struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (pipe->stream && pipe->plane_state) {
 | 
				
			||||||
 | 
								set_p_state_switch_method(dc, context, pipe);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								if (dc->debug.visual_confirm)
 | 
				
			||||||
 | 
									dc_update_visual_confirm_color(dc, context, pipe);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3521,6 +3413,8 @@ static void commit_planes_for_stream_fast(struct dc *dc,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						stream_status = dc_state_get_stream_status(context, stream);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	build_dmub_cmd_list(dc,
 | 
						build_dmub_cmd_list(dc,
 | 
				
			||||||
			srf_updates,
 | 
								srf_updates,
 | 
				
			||||||
			surface_count,
 | 
								surface_count,
 | 
				
			||||||
| 
						 | 
					@ -3533,7 +3427,8 @@ static void commit_planes_for_stream_fast(struct dc *dc,
 | 
				
			||||||
			context->dmub_cmd_count,
 | 
								context->dmub_cmd_count,
 | 
				
			||||||
			context->block_sequence,
 | 
								context->block_sequence,
 | 
				
			||||||
			&(context->block_sequence_steps),
 | 
								&(context->block_sequence_steps),
 | 
				
			||||||
			top_pipe_to_program);
 | 
								top_pipe_to_program,
 | 
				
			||||||
 | 
								stream_status);
 | 
				
			||||||
	hwss_execute_sequence(dc,
 | 
						hwss_execute_sequence(dc,
 | 
				
			||||||
			context->block_sequence,
 | 
								context->block_sequence,
 | 
				
			||||||
			context->block_sequence_steps);
 | 
								context->block_sequence_steps);
 | 
				
			||||||
| 
						 | 
					@ -3629,7 +3524,7 @@ static void commit_planes_for_stream(struct dc *dc,
 | 
				
			||||||
		struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 | 
							struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Check old context for SubVP
 | 
							// Check old context for SubVP
 | 
				
			||||||
		subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
 | 
							subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
 | 
				
			||||||
		if (subvp_prev_use)
 | 
							if (subvp_prev_use)
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -3637,19 +3532,22 @@ static void commit_planes_for_stream(struct dc *dc,
 | 
				
			||||||
	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
						for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
				
			||||||
		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
							struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
 | 
							if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
 | 
				
			||||||
			subvp_curr_use = true;
 | 
								subvp_curr_use = true;
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (dc->debug.visual_confirm)
 | 
						for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
				
			||||||
		for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
							struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
				
			||||||
			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (pipe->stream && pipe->plane_state)
 | 
							if (pipe->stream && pipe->plane_state) {
 | 
				
			||||||
				dc_update_viusal_confirm_color(dc, context, pipe);
 | 
								set_p_state_switch_method(dc, context, pipe);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								if (dc->debug.visual_confirm)
 | 
				
			||||||
 | 
									dc_update_visual_confirm_color(dc, context, pipe);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
 | 
						if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
 | 
				
			||||||
		struct pipe_ctx *mpcc_pipe;
 | 
							struct pipe_ctx *mpcc_pipe;
 | 
				
			||||||
| 
						 | 
					@ -4022,7 +3920,7 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
 | 
				
			||||||
	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
						for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
				
			||||||
		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 | 
							struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
 | 
							if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) {
 | 
				
			||||||
			subvp_active = true;
 | 
								subvp_active = true;
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					@ -4059,7 +3957,7 @@ struct pipe_split_policy_backup {
 | 
				
			||||||
static void release_minimal_transition_state(struct dc *dc,
 | 
					static void release_minimal_transition_state(struct dc *dc,
 | 
				
			||||||
		struct dc_state *context, struct pipe_split_policy_backup *policy)
 | 
							struct dc_state *context, struct pipe_split_policy_backup *policy)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	dc_release_state(context);
 | 
						dc_state_release(context);
 | 
				
			||||||
	/* restore previous pipe split and odm policy */
 | 
						/* restore previous pipe split and odm policy */
 | 
				
			||||||
	if (!dc->config.is_vmin_only_asic)
 | 
						if (!dc->config.is_vmin_only_asic)
 | 
				
			||||||
		dc->debug.pipe_split_policy = policy->mpc_policy;
 | 
							dc->debug.pipe_split_policy = policy->mpc_policy;
 | 
				
			||||||
| 
						 | 
					@ -4070,7 +3968,7 @@ static void release_minimal_transition_state(struct dc *dc,
 | 
				
			||||||
static struct dc_state *create_minimal_transition_state(struct dc *dc,
 | 
					static struct dc_state *create_minimal_transition_state(struct dc *dc,
 | 
				
			||||||
		struct dc_state *base_context, struct pipe_split_policy_backup *policy)
 | 
							struct dc_state *base_context, struct pipe_split_policy_backup *policy)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dc_state *minimal_transition_context = dc_create_state(dc);
 | 
						struct dc_state *minimal_transition_context = NULL;
 | 
				
			||||||
	unsigned int i, j;
 | 
						unsigned int i, j;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!dc->config.is_vmin_only_asic) {
 | 
						if (!dc->config.is_vmin_only_asic) {
 | 
				
			||||||
| 
						 | 
					@ -4082,7 +3980,9 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,
 | 
				
			||||||
	policy->subvp_policy = dc->debug.force_disable_subvp;
 | 
						policy->subvp_policy = dc->debug.force_disable_subvp;
 | 
				
			||||||
	dc->debug.force_disable_subvp = true;
 | 
						dc->debug.force_disable_subvp = true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dc_resource_state_copy_construct(base_context, minimal_transition_context);
 | 
						minimal_transition_context = dc_state_create_copy(base_context);
 | 
				
			||||||
 | 
						if (!minimal_transition_context)
 | 
				
			||||||
 | 
							return NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* commit minimal state */
 | 
						/* commit minimal state */
 | 
				
			||||||
	if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
 | 
						if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
 | 
				
			||||||
| 
						 | 
					@ -4114,7 +4014,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
 | 
				
			||||||
	bool success = false;
 | 
						bool success = false;
 | 
				
			||||||
	struct dc_state *minimal_transition_context;
 | 
						struct dc_state *minimal_transition_context;
 | 
				
			||||||
	struct pipe_split_policy_backup policy;
 | 
						struct pipe_split_policy_backup policy;
 | 
				
			||||||
	struct mall_temp_config mall_temp_config;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* commit based on new context */
 | 
						/* commit based on new context */
 | 
				
			||||||
	/* Since all phantom pipes are removed in full validation,
 | 
						/* Since all phantom pipes are removed in full validation,
 | 
				
			||||||
| 
						 | 
					@ -4123,8 +4022,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
 | 
				
			||||||
	 * pipe as subvp/phantom will be cleared (dc copy constructor
 | 
						 * pipe as subvp/phantom will be cleared (dc copy constructor
 | 
				
			||||||
	 * creates a shallow copy).
 | 
						 * creates a shallow copy).
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	if (dc->res_pool->funcs->save_mall_state)
 | 
					 | 
				
			||||||
		dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config);
 | 
					 | 
				
			||||||
	minimal_transition_context = create_minimal_transition_state(dc,
 | 
						minimal_transition_context = create_minimal_transition_state(dc,
 | 
				
			||||||
			context, &policy);
 | 
								context, &policy);
 | 
				
			||||||
	if (minimal_transition_context) {
 | 
						if (minimal_transition_context) {
 | 
				
			||||||
| 
						 | 
					@ -4137,16 +4034,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
 | 
				
			||||||
			success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
 | 
								success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		release_minimal_transition_state(dc, minimal_transition_context, &policy);
 | 
							release_minimal_transition_state(dc, minimal_transition_context, &policy);
 | 
				
			||||||
		if (dc->res_pool->funcs->restore_mall_state)
 | 
					 | 
				
			||||||
			dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config);
 | 
					 | 
				
			||||||
		/* If we do a minimal transition with plane removal and the context
 | 
					 | 
				
			||||||
		 * has subvp we also have to retain back the phantom stream / planes
 | 
					 | 
				
			||||||
		 * since the refcount is decremented as part of the min transition
 | 
					 | 
				
			||||||
		 * (we commit a state with no subvp, so the phantom streams / planes
 | 
					 | 
				
			||||||
		 * had to be removed).
 | 
					 | 
				
			||||||
		 */
 | 
					 | 
				
			||||||
		if (dc->res_pool->funcs->retain_phantom_pipes)
 | 
					 | 
				
			||||||
			dc->res_pool->funcs->retain_phantom_pipes(dc, context);
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!success) {
 | 
						if (!success) {
 | 
				
			||||||
| 
						 | 
					@ -4214,7 +4101,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
 | 
				
			||||||
	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
						for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
				
			||||||
		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 | 
							struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
 | 
							if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
 | 
				
			||||||
			subvp_in_use = true;
 | 
								subvp_in_use = true;
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					@ -4401,8 +4288,7 @@ static bool full_update_required(struct dc *dc,
 | 
				
			||||||
			stream_update->mst_bw_update ||
 | 
								stream_update->mst_bw_update ||
 | 
				
			||||||
			stream_update->func_shaper ||
 | 
								stream_update->func_shaper ||
 | 
				
			||||||
			stream_update->lut3d_func ||
 | 
								stream_update->lut3d_func ||
 | 
				
			||||||
			stream_update->pending_test_pattern ||
 | 
								stream_update->pending_test_pattern))
 | 
				
			||||||
			stream_update->crtc_timing_adjust))
 | 
					 | 
				
			||||||
		return true;
 | 
							return true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (stream) {
 | 
						if (stream) {
 | 
				
			||||||
| 
						 | 
					@ -4480,7 +4366,6 @@ bool dc_update_planes_and_stream(struct dc *dc,
 | 
				
			||||||
	struct dc_state *context;
 | 
						struct dc_state *context;
 | 
				
			||||||
	enum surface_update_type update_type;
 | 
						enum surface_update_type update_type;
 | 
				
			||||||
	int i;
 | 
						int i;
 | 
				
			||||||
	struct mall_temp_config mall_temp_config;
 | 
					 | 
				
			||||||
	struct dc_fast_update fast_update[MAX_SURFACES] = {0};
 | 
						struct dc_fast_update fast_update[MAX_SURFACES] = {0};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* In cases where MPO and split or ODM are used transitions can
 | 
						/* In cases where MPO and split or ODM are used transitions can
 | 
				
			||||||
| 
						 | 
					@ -4524,23 +4409,10 @@ bool dc_update_planes_and_stream(struct dc *dc,
 | 
				
			||||||
		 * pipe as subvp/phantom will be cleared (dc copy constructor
 | 
							 * pipe as subvp/phantom will be cleared (dc copy constructor
 | 
				
			||||||
		 * creates a shallow copy).
 | 
							 * creates a shallow copy).
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (dc->res_pool->funcs->save_mall_state)
 | 
					 | 
				
			||||||
			dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config);
 | 
					 | 
				
			||||||
		if (!commit_minimal_transition_state(dc, context)) {
 | 
							if (!commit_minimal_transition_state(dc, context)) {
 | 
				
			||||||
			dc_release_state(context);
 | 
								dc_state_release(context);
 | 
				
			||||||
			return false;
 | 
								return false;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		if (dc->res_pool->funcs->restore_mall_state)
 | 
					 | 
				
			||||||
			dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		/* If we do a minimal transition with plane removal and the context
 | 
					 | 
				
			||||||
		 * has subvp we also have to retain back the phantom stream / planes
 | 
					 | 
				
			||||||
		 * since the refcount is decremented as part of the min transition
 | 
					 | 
				
			||||||
		 * (we commit a state with no subvp, so the phantom streams / planes
 | 
					 | 
				
			||||||
		 * had to be removed).
 | 
					 | 
				
			||||||
		 */
 | 
					 | 
				
			||||||
		if (dc->res_pool->funcs->retain_phantom_pipes)
 | 
					 | 
				
			||||||
			dc->res_pool->funcs->retain_phantom_pipes(dc, context);
 | 
					 | 
				
			||||||
		update_type = UPDATE_TYPE_FULL;
 | 
							update_type = UPDATE_TYPE_FULL;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4597,7 +4469,7 @@ bool dc_update_planes_and_stream(struct dc *dc,
 | 
				
			||||||
		struct dc_state *old = dc->current_state;
 | 
							struct dc_state *old = dc->current_state;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		dc->current_state = context;
 | 
							dc->current_state = context;
 | 
				
			||||||
		dc_release_state(old);
 | 
							dc_state_release(old);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// clear any forced full updates
 | 
							// clear any forced full updates
 | 
				
			||||||
		for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
							for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
				
			||||||
| 
						 | 
					@ -4656,14 +4528,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
 | 
				
			||||||
	if (update_type >= UPDATE_TYPE_FULL) {
 | 
						if (update_type >= UPDATE_TYPE_FULL) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* initialize scratch memory for building context */
 | 
							/* initialize scratch memory for building context */
 | 
				
			||||||
		context = dc_create_state(dc);
 | 
							context = dc_state_create_copy(state);
 | 
				
			||||||
		if (context == NULL) {
 | 
							if (context == NULL) {
 | 
				
			||||||
			DC_ERROR("Failed to allocate new validate context!\n");
 | 
								DC_ERROR("Failed to allocate new validate context!\n");
 | 
				
			||||||
			return;
 | 
								return;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		dc_resource_state_copy_construct(state, context);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
							for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
				
			||||||
			struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
 | 
								struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
 | 
				
			||||||
			struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 | 
								struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 | 
				
			||||||
| 
						 | 
					@ -4702,7 +4572,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
 | 
				
			||||||
	if (update_type >= UPDATE_TYPE_FULL) {
 | 
						if (update_type >= UPDATE_TYPE_FULL) {
 | 
				
			||||||
		if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
 | 
							if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
 | 
				
			||||||
			DC_ERROR("Mode validation failed for stream update!\n");
 | 
								DC_ERROR("Mode validation failed for stream update!\n");
 | 
				
			||||||
			dc_release_state(context);
 | 
								dc_state_release(context);
 | 
				
			||||||
			return;
 | 
								return;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -4735,7 +4605,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
 | 
				
			||||||
		struct dc_state *old = dc->current_state;
 | 
							struct dc_state *old = dc->current_state;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		dc->current_state = context;
 | 
							dc->current_state = context;
 | 
				
			||||||
		dc_release_state(old);
 | 
							dc_state_release(old);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
							for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
				
			||||||
			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 | 
								struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 | 
				
			||||||
| 
						 | 
					@ -4808,7 +4678,7 @@ void dc_set_power_state(
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	switch (power_state) {
 | 
						switch (power_state) {
 | 
				
			||||||
	case DC_ACPI_CM_POWER_STATE_D0:
 | 
						case DC_ACPI_CM_POWER_STATE_D0:
 | 
				
			||||||
		dc_resource_state_construct(dc, dc->current_state);
 | 
							dc_state_construct(dc, dc->current_state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		dc_z10_restore(dc);
 | 
							dc_z10_restore(dc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4823,7 +4693,7 @@ void dc_set_power_state(
 | 
				
			||||||
	default:
 | 
						default:
 | 
				
			||||||
		ASSERT(dc->current_state->stream_count == 0);
 | 
							ASSERT(dc->current_state->stream_count == 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		dc_resource_state_destruct(dc->current_state);
 | 
							dc_state_destruct(dc->current_state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		break;
 | 
							break;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -4900,6 +4770,38 @@ bool dc_set_psr_allow_active(struct dc *dc, bool enable)
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* enable/disable eDP Replay without specify stream for eDP */
 | 
				
			||||||
 | 
					bool dc_set_replay_allow_active(struct dc *dc, bool active)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int i;
 | 
				
			||||||
 | 
						bool allow_active;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < dc->current_state->stream_count; i++) {
 | 
				
			||||||
 | 
							struct dc_link *link;
 | 
				
			||||||
 | 
							struct dc_stream_state *stream = dc->current_state->streams[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							link = stream->link;
 | 
				
			||||||
 | 
							if (!link)
 | 
				
			||||||
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (link->replay_settings.replay_feature_enabled) {
 | 
				
			||||||
 | 
								if (active && !link->replay_settings.replay_allow_active) {
 | 
				
			||||||
 | 
									allow_active = true;
 | 
				
			||||||
 | 
									if (!dc_link_set_replay_allow_active(link, &allow_active,
 | 
				
			||||||
 | 
										false, false, NULL))
 | 
				
			||||||
 | 
										return false;
 | 
				
			||||||
 | 
								} else if (!active && link->replay_settings.replay_allow_active) {
 | 
				
			||||||
 | 
									allow_active = false;
 | 
				
			||||||
 | 
									if (!dc_link_set_replay_allow_active(link, &allow_active,
 | 
				
			||||||
 | 
										true, false, NULL))
 | 
				
			||||||
 | 
										return false;
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return true;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void dc_allow_idle_optimizations(struct dc *dc, bool allow)
 | 
					void dc_allow_idle_optimizations(struct dc *dc, bool allow)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (dc->debug.disable_idle_power_optimizations)
 | 
						if (dc->debug.disable_idle_power_optimizations)
 | 
				
			||||||
| 
						 | 
					@ -5093,18 +4995,28 @@ void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
bool dc_is_dmub_outbox_supported(struct dc *dc)
 | 
					bool dc_is_dmub_outbox_supported(struct dc *dc)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	/* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
 | 
						switch (dc->ctx->asic_id.chip_family) {
 | 
				
			||||||
	if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
 | 
					 | 
				
			||||||
	    dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
 | 
					 | 
				
			||||||
	    !dc->debug.dpia_debug.bits.disable_dpia)
 | 
					 | 
				
			||||||
		return true;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
 | 
						case FAMILY_YELLOW_CARP:
 | 
				
			||||||
	    !dc->debug.dpia_debug.bits.disable_dpia)
 | 
							/* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
 | 
				
			||||||
		return true;
 | 
							if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
 | 
				
			||||||
 | 
							    !dc->debug.dpia_debug.bits.disable_dpia)
 | 
				
			||||||
 | 
								return true;
 | 
				
			||||||
 | 
						break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						case AMDGPU_FAMILY_GC_11_0_1:
 | 
				
			||||||
 | 
						case AMDGPU_FAMILY_GC_11_5_0:
 | 
				
			||||||
 | 
							if (!dc->debug.dpia_debug.bits.disable_dpia)
 | 
				
			||||||
 | 
								return true;
 | 
				
			||||||
 | 
						break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						default:
 | 
				
			||||||
 | 
							break;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* dmub aux needs dmub notifications to be enabled */
 | 
						/* dmub aux needs dmub notifications to be enabled */
 | 
				
			||||||
	return dc->debug.enable_dmub_aux_for_legacy_ddc;
 | 
						return dc->debug.enable_dmub_aux_for_legacy_ddc;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					@ -5201,7 +5113,7 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
 | 
				
			||||||
			);
 | 
								);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -5255,7 +5167,7 @@ bool dc_process_dmub_set_config_async(struct dc *dc,
 | 
				
			||||||
	cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
 | 
						cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
 | 
				
			||||||
	cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
 | 
						cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
 | 
						if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
 | 
				
			||||||
		/* command is not processed by dmub */
 | 
							/* command is not processed by dmub */
 | 
				
			||||||
		notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
 | 
							notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
 | 
				
			||||||
		return is_cmd_complete;
 | 
							return is_cmd_complete;
 | 
				
			||||||
| 
						 | 
					@ -5298,7 +5210,7 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
 | 
				
			||||||
	cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
 | 
						cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
 | 
				
			||||||
	cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
 | 
						cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
 | 
						if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
 | 
				
			||||||
		/* command is not processed by dmub */
 | 
							/* command is not processed by dmub */
 | 
				
			||||||
		return DC_ERROR_UNEXPECTED;
 | 
							return DC_ERROR_UNEXPECTED;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -5336,7 +5248,7 @@ void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
 | 
				
			||||||
	cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
 | 
						cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
 | 
				
			||||||
	cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
 | 
						cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
 | 
						DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -5435,6 +5347,8 @@ bool dc_abm_save_restore(
 | 
				
			||||||
	struct dc_link *link = stream->sink->link;
 | 
						struct dc_link *link = stream->sink->link;
 | 
				
			||||||
	struct dc_link *edp_links[MAX_NUM_EDP];
 | 
						struct dc_link *edp_links[MAX_NUM_EDP];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (link->replay_settings.replay_feature_enabled)
 | 
				
			||||||
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*find primary pipe associated with stream*/
 | 
						/*find primary pipe associated with stream*/
 | 
				
			||||||
	for (i = 0; i < MAX_PIPES; i++) {
 | 
						for (i = 0; i < MAX_PIPES; i++) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -31,6 +31,7 @@
 | 
				
			||||||
#include "basics/dc_common.h"
 | 
					#include "basics/dc_common.h"
 | 
				
			||||||
#include "resource.h"
 | 
					#include "resource.h"
 | 
				
			||||||
#include "dc_dmub_srv.h"
 | 
					#include "dc_dmub_srv.h"
 | 
				
			||||||
 | 
					#include "dc_state_priv.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
 | 
					#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -425,45 +426,130 @@ void get_hdr_visual_confirm_color(
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void get_subvp_visual_confirm_color(
 | 
					void get_subvp_visual_confirm_color(
 | 
				
			||||||
		struct dc *dc,
 | 
					 | 
				
			||||||
		struct dc_state *context,
 | 
					 | 
				
			||||||
		struct pipe_ctx *pipe_ctx,
 | 
							struct pipe_ctx *pipe_ctx,
 | 
				
			||||||
		struct tg_color *color)
 | 
							struct tg_color *color)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	uint32_t color_value = MAX_TG_COLOR_VALUE;
 | 
						uint32_t color_value = MAX_TG_COLOR_VALUE;
 | 
				
			||||||
	bool enable_subvp = false;
 | 
						if (pipe_ctx) {
 | 
				
			||||||
	int i;
 | 
							switch (pipe_ctx->p_state_type) {
 | 
				
			||||||
 | 
							case P_STATE_SUB_VP:
 | 
				
			||||||
	if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !context)
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
					 | 
				
			||||||
		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (pipe->stream && pipe->stream->mall_stream_config.paired_stream &&
 | 
					 | 
				
			||||||
		    pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
 | 
					 | 
				
			||||||
			/* SubVP enable - red */
 | 
					 | 
				
			||||||
			color->color_g_y = 0;
 | 
					 | 
				
			||||||
			color->color_b_cb = 0;
 | 
					 | 
				
			||||||
			color->color_r_cr = color_value;
 | 
								color->color_r_cr = color_value;
 | 
				
			||||||
			enable_subvp = true;
 | 
								color->color_g_y  = 0;
 | 
				
			||||||
 | 
								color->color_b_cb = 0;
 | 
				
			||||||
			if (pipe_ctx->stream == pipe->stream)
 | 
								break;
 | 
				
			||||||
				return;
 | 
							case P_STATE_DRR_SUB_VP:
 | 
				
			||||||
 | 
								color->color_r_cr = 0;
 | 
				
			||||||
 | 
								color->color_g_y  = color_value;
 | 
				
			||||||
 | 
								color->color_b_cb = 0;
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							case P_STATE_V_BLANK_SUB_VP:
 | 
				
			||||||
 | 
								color->color_r_cr = 0;
 | 
				
			||||||
 | 
								color->color_g_y  = 0;
 | 
				
			||||||
 | 
								color->color_b_cb = color_value;
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							default:
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (enable_subvp && pipe_ctx->stream->mall_stream_config.type == SUBVP_NONE) {
 | 
					void get_mclk_switch_visual_confirm_color(
 | 
				
			||||||
		color->color_r_cr = 0;
 | 
							struct pipe_ctx *pipe_ctx,
 | 
				
			||||||
		if (pipe_ctx->stream->allow_freesync == 1) {
 | 
							struct tg_color *color)
 | 
				
			||||||
			/* SubVP enable and DRR on - green */
 | 
					{
 | 
				
			||||||
			color->color_b_cb = 0;
 | 
						uint32_t color_value = MAX_TG_COLOR_VALUE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (pipe_ctx) {
 | 
				
			||||||
 | 
							switch (pipe_ctx->p_state_type) {
 | 
				
			||||||
 | 
							case P_STATE_V_BLANK:
 | 
				
			||||||
 | 
								color->color_r_cr = color_value;
 | 
				
			||||||
			color->color_g_y = color_value;
 | 
								color->color_g_y = color_value;
 | 
				
			||||||
		} else {
 | 
								color->color_b_cb = 0;
 | 
				
			||||||
			/* SubVP enable and No DRR - blue */
 | 
								break;
 | 
				
			||||||
			color->color_g_y = 0;
 | 
							case P_STATE_FPO:
 | 
				
			||||||
 | 
								color->color_r_cr = 0;
 | 
				
			||||||
 | 
								color->color_g_y  = color_value;
 | 
				
			||||||
			color->color_b_cb = color_value;
 | 
								color->color_b_cb = color_value;
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							case P_STATE_V_ACTIVE:
 | 
				
			||||||
 | 
								color->color_r_cr = color_value;
 | 
				
			||||||
 | 
								color->color_g_y  = 0;
 | 
				
			||||||
 | 
								color->color_b_cb = color_value;
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							case P_STATE_SUB_VP:
 | 
				
			||||||
 | 
								color->color_r_cr = color_value;
 | 
				
			||||||
 | 
								color->color_g_y  = 0;
 | 
				
			||||||
 | 
								color->color_b_cb = 0;
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							case P_STATE_DRR_SUB_VP:
 | 
				
			||||||
 | 
								color->color_r_cr = 0;
 | 
				
			||||||
 | 
								color->color_g_y  = color_value;
 | 
				
			||||||
 | 
								color->color_b_cb = 0;
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							case P_STATE_V_BLANK_SUB_VP:
 | 
				
			||||||
 | 
								color->color_r_cr = 0;
 | 
				
			||||||
 | 
								color->color_g_y  = 0;
 | 
				
			||||||
 | 
								color->color_b_cb = color_value;
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							default:
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void set_p_state_switch_method(
 | 
				
			||||||
 | 
							struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_state *context,
 | 
				
			||||||
 | 
							struct pipe_ctx *pipe_ctx)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
 | 
				
			||||||
 | 
						bool enable_subvp;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] !=
 | 
				
			||||||
 | 
								dm_dram_clock_change_unsupported) {
 | 
				
			||||||
 | 
							/* MCLK switching is supported */
 | 
				
			||||||
 | 
							if (!pipe_ctx->has_vactive_margin) {
 | 
				
			||||||
 | 
								/* In Vblank - yellow */
 | 
				
			||||||
 | 
								pipe_ctx->p_state_type = P_STATE_V_BLANK;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
 | 
				
			||||||
 | 
									/* FPO + Vblank - cyan */
 | 
				
			||||||
 | 
									pipe_ctx->p_state_type = P_STATE_FPO;
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							} else {
 | 
				
			||||||
 | 
								/* In Vactive - pink */
 | 
				
			||||||
 | 
								pipe_ctx->p_state_type = P_STATE_V_ACTIVE;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							/* SubVP */
 | 
				
			||||||
 | 
							enable_subvp = false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							for (int i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
				
			||||||
 | 
								struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								if (pipe->stream && dc_state_get_paired_subvp_stream(context, pipe->stream) &&
 | 
				
			||||||
 | 
										dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
 | 
				
			||||||
 | 
									/* SubVP enable - red */
 | 
				
			||||||
 | 
									pipe_ctx->p_state_type = P_STATE_SUB_VP;
 | 
				
			||||||
 | 
									enable_subvp = true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
									if (pipe_ctx->stream == pipe->stream)
 | 
				
			||||||
 | 
										return;
 | 
				
			||||||
 | 
									break;
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (enable_subvp && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_NONE) {
 | 
				
			||||||
 | 
								if (pipe_ctx->stream->allow_freesync == 1) {
 | 
				
			||||||
 | 
									/* SubVP enable and DRR on - green */
 | 
				
			||||||
 | 
									pipe_ctx->p_state_type = P_STATE_DRR_SUB_VP;
 | 
				
			||||||
 | 
								} else {
 | 
				
			||||||
 | 
									/* SubVP enable and No DRR - blue */
 | 
				
			||||||
 | 
									pipe_ctx->p_state_type = P_STATE_V_BLANK_SUB_VP;
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -473,7 +559,8 @@ void hwss_build_fast_sequence(struct dc *dc,
 | 
				
			||||||
		unsigned int dmub_cmd_count,
 | 
							unsigned int dmub_cmd_count,
 | 
				
			||||||
		struct block_sequence block_sequence[],
 | 
							struct block_sequence block_sequence[],
 | 
				
			||||||
		int *num_steps,
 | 
							int *num_steps,
 | 
				
			||||||
		struct pipe_ctx *pipe_ctx)
 | 
							struct pipe_ctx *pipe_ctx,
 | 
				
			||||||
 | 
							struct dc_stream_status *stream_status)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dc_plane_state *plane = pipe_ctx->plane_state;
 | 
						struct dc_plane_state *plane = pipe_ctx->plane_state;
 | 
				
			||||||
	struct dc_stream_state *stream = pipe_ctx->stream;
 | 
						struct dc_stream_state *stream = pipe_ctx->stream;
 | 
				
			||||||
| 
						 | 
					@ -490,7 +577,8 @@ void hwss_build_fast_sequence(struct dc *dc,
 | 
				
			||||||
	if (dc->hwss.subvp_pipe_control_lock_fast) {
 | 
						if (dc->hwss.subvp_pipe_control_lock_fast) {
 | 
				
			||||||
		block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc;
 | 
							block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc;
 | 
				
			||||||
		block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = true;
 | 
							block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = true;
 | 
				
			||||||
		block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx;
 | 
							block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.subvp_immediate_flip =
 | 
				
			||||||
 | 
									plane->flip_immediate && stream_status->mall_stream_config.type == SUBVP_MAIN;
 | 
				
			||||||
		block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
 | 
							block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
 | 
				
			||||||
		(*num_steps)++;
 | 
							(*num_steps)++;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -529,7 +617,7 @@ void hwss_build_fast_sequence(struct dc *dc,
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) {
 | 
								if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) {
 | 
				
			||||||
				if (resource_is_pipe_type(current_mpc_pipe, OTG_MASTER) &&
 | 
									if (resource_is_pipe_type(current_mpc_pipe, OTG_MASTER) &&
 | 
				
			||||||
						current_mpc_pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
 | 
											stream_status->mall_stream_config.type == SUBVP_MAIN) {
 | 
				
			||||||
					block_sequence[*num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv;
 | 
										block_sequence[*num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv;
 | 
				
			||||||
					block_sequence[*num_steps].params.subvp_save_surf_addr.addr = ¤t_mpc_pipe->plane_state->address;
 | 
										block_sequence[*num_steps].params.subvp_save_surf_addr.addr = ¤t_mpc_pipe->plane_state->address;
 | 
				
			||||||
					block_sequence[*num_steps].params.subvp_save_surf_addr.subvp_index = current_mpc_pipe->subvp_index;
 | 
										block_sequence[*num_steps].params.subvp_save_surf_addr.subvp_index = current_mpc_pipe->subvp_index;
 | 
				
			||||||
| 
						 | 
					@ -612,7 +700,8 @@ void hwss_build_fast_sequence(struct dc *dc,
 | 
				
			||||||
	if (dc->hwss.subvp_pipe_control_lock_fast) {
 | 
						if (dc->hwss.subvp_pipe_control_lock_fast) {
 | 
				
			||||||
		block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc;
 | 
							block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc;
 | 
				
			||||||
		block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = false;
 | 
							block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = false;
 | 
				
			||||||
		block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx;
 | 
							block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.subvp_immediate_flip =
 | 
				
			||||||
 | 
									plane->flip_immediate && stream_status->mall_stream_config.type == SUBVP_MAIN;
 | 
				
			||||||
		block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
 | 
							block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
 | 
				
			||||||
		(*num_steps)++;
 | 
							(*num_steps)++;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -724,7 +813,7 @@ void hwss_send_dmcub_cmd(union block_sequence_params *params)
 | 
				
			||||||
	union dmub_rb_cmd *cmd = params->send_dmcub_cmd_params.cmd;
 | 
						union dmub_rb_cmd *cmd = params->send_dmcub_cmd_params.cmd;
 | 
				
			||||||
	enum dm_dmub_wait_type wait_type = params->send_dmcub_cmd_params.wait_type;
 | 
						enum dm_dmub_wait_type wait_type = params->send_dmcub_cmd_params.wait_type;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(ctx, cmd, wait_type);
 | 
						dc_wake_and_execute_dmub_cmd(ctx, cmd, wait_type);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void hwss_program_manual_trigger(union block_sequence_params *params)
 | 
					void hwss_program_manual_trigger(union block_sequence_params *params)
 | 
				
			||||||
| 
						 | 
					@ -812,42 +901,6 @@ void hwss_subvp_save_surf_addr(union block_sequence_params *params)
 | 
				
			||||||
	dc_dmub_srv_subvp_save_surf_addr(dc_dmub_srv, addr, subvp_index);
 | 
						dc_dmub_srv_subvp_save_surf_addr(dc_dmub_srv, addr, subvp_index);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void get_mclk_switch_visual_confirm_color(
 | 
					 | 
				
			||||||
		struct dc *dc,
 | 
					 | 
				
			||||||
		struct dc_state *context,
 | 
					 | 
				
			||||||
		struct pipe_ctx *pipe_ctx,
 | 
					 | 
				
			||||||
		struct tg_color *color)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	uint32_t color_value = MAX_TG_COLOR_VALUE;
 | 
					 | 
				
			||||||
	struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context)
 | 
					 | 
				
			||||||
		return;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] !=
 | 
					 | 
				
			||||||
			dm_dram_clock_change_unsupported) {
 | 
					 | 
				
			||||||
		/* MCLK switching is supported */
 | 
					 | 
				
			||||||
		if (!pipe_ctx->has_vactive_margin) {
 | 
					 | 
				
			||||||
			/* In Vblank - yellow */
 | 
					 | 
				
			||||||
			color->color_r_cr = color_value;
 | 
					 | 
				
			||||||
			color->color_g_y = color_value;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
			if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
 | 
					 | 
				
			||||||
				/* FPO + Vblank - cyan */
 | 
					 | 
				
			||||||
				color->color_r_cr = 0;
 | 
					 | 
				
			||||||
				color->color_g_y  = color_value;
 | 
					 | 
				
			||||||
				color->color_b_cb = color_value;
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		} else {
 | 
					 | 
				
			||||||
			/* In Vactive - pink */
 | 
					 | 
				
			||||||
			color->color_r_cr = color_value;
 | 
					 | 
				
			||||||
			color->color_b_cb = color_value;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
		/* SubVP */
 | 
					 | 
				
			||||||
		get_subvp_visual_confirm_color(dc, context, pipe_ctx, color);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void get_surface_tile_visual_confirm_color(
 | 
					void get_surface_tile_visual_confirm_color(
 | 
				
			||||||
		struct pipe_ctx *pipe_ctx,
 | 
							struct pipe_ctx *pipe_ctx,
 | 
				
			||||||
		struct tg_color *color)
 | 
							struct tg_color *color)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -467,6 +467,13 @@ bool dc_link_setup_psr(struct dc_link *link,
 | 
				
			||||||
	return link->dc->link_srv->edp_setup_psr(link, stream, psr_config, psr_context);
 | 
						return link->dc->link_srv->edp_setup_psr(link, stream, psr_config, psr_context);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_link_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
 | 
				
			||||||
 | 
							bool wait, bool force_static, const unsigned int *power_opts)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return link->dc->link_srv->edp_set_replay_allow_active(link, allow_active, wait,
 | 
				
			||||||
 | 
								force_static, power_opts);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool dc_link_get_replay_state(const struct dc_link *link, uint64_t *state)
 | 
					bool dc_link_get_replay_state(const struct dc_link *link, uint64_t *state)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return link->dc->link_srv->edp_get_replay_state(link, state);
 | 
						return link->dc->link_srv->edp_get_replay_state(link, state);
 | 
				
			||||||
| 
						 | 
					@ -497,7 +504,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
 | 
				
			||||||
	link->dc->link_srv->enable_hpd_filter(link, enable);
 | 
						link->dc->link_srv->enable_hpd_filter(link, enable);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
 | 
					bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	return dc->link_srv->validate_dpia_bandwidth(streams, count);
 | 
						return dc->link_srv->validate_dpia_bandwidth(streams, count);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -42,6 +42,7 @@
 | 
				
			||||||
#include "link_enc_cfg.h"
 | 
					#include "link_enc_cfg.h"
 | 
				
			||||||
#include "link.h"
 | 
					#include "link.h"
 | 
				
			||||||
#include "clk_mgr.h"
 | 
					#include "clk_mgr.h"
 | 
				
			||||||
 | 
					#include "dc_state_priv.h"
 | 
				
			||||||
#include "virtual/virtual_link_hwss.h"
 | 
					#include "virtual/virtual_link_hwss.h"
 | 
				
			||||||
#include "link/hwss/link_hwss_dio.h"
 | 
					#include "link/hwss/link_hwss_dio.h"
 | 
				
			||||||
#include "link/hwss/link_hwss_dpia.h"
 | 
					#include "link/hwss/link_hwss_dpia.h"
 | 
				
			||||||
| 
						 | 
					@ -2459,6 +2460,9 @@ void resource_remove_otg_master_for_stream_output(struct dc_state *context,
 | 
				
			||||||
	struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(
 | 
						struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(
 | 
				
			||||||
			&context->res_ctx, stream);
 | 
								&context->res_ctx, stream);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!otg_master)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	ASSERT(resource_get_odm_slice_count(otg_master) == 1);
 | 
						ASSERT(resource_get_odm_slice_count(otg_master) == 1);
 | 
				
			||||||
	ASSERT(otg_master->plane_state == NULL);
 | 
						ASSERT(otg_master->plane_state == NULL);
 | 
				
			||||||
	ASSERT(otg_master->stream_res.stream_enc);
 | 
						ASSERT(otg_master->stream_res.stream_enc);
 | 
				
			||||||
| 
						 | 
					@ -2993,189 +2997,6 @@ bool resource_update_pipes_for_plane_with_slice_count(
 | 
				
			||||||
	return result;
 | 
						return result;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool dc_add_plane_to_context(
 | 
					 | 
				
			||||||
		const struct dc *dc,
 | 
					 | 
				
			||||||
		struct dc_stream_state *stream,
 | 
					 | 
				
			||||||
		struct dc_plane_state *plane_state,
 | 
					 | 
				
			||||||
		struct dc_state *context)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct resource_pool *pool = dc->res_pool;
 | 
					 | 
				
			||||||
	struct pipe_ctx *otg_master_pipe;
 | 
					 | 
				
			||||||
	struct dc_stream_status *stream_status = NULL;
 | 
					 | 
				
			||||||
	bool added = false;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	stream_status = dc_stream_get_status_from_state(context, stream);
 | 
					 | 
				
			||||||
	if (stream_status == NULL) {
 | 
					 | 
				
			||||||
		dm_error("Existing stream not found; failed to attach surface!\n");
 | 
					 | 
				
			||||||
		goto out;
 | 
					 | 
				
			||||||
	} else if (stream_status->plane_count == MAX_SURFACE_NUM) {
 | 
					 | 
				
			||||||
		dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
 | 
					 | 
				
			||||||
				plane_state, MAX_SURFACE_NUM);
 | 
					 | 
				
			||||||
		goto out;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	otg_master_pipe = resource_get_otg_master_for_stream(
 | 
					 | 
				
			||||||
			&context->res_ctx, stream);
 | 
					 | 
				
			||||||
	added = resource_append_dpp_pipes_for_plane_composition(context,
 | 
					 | 
				
			||||||
			dc->current_state, pool, otg_master_pipe, plane_state);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (added) {
 | 
					 | 
				
			||||||
		stream_status->plane_states[stream_status->plane_count] =
 | 
					 | 
				
			||||||
				plane_state;
 | 
					 | 
				
			||||||
		stream_status->plane_count++;
 | 
					 | 
				
			||||||
		dc_plane_state_retain(plane_state);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
out:
 | 
					 | 
				
			||||||
	return added;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
bool dc_remove_plane_from_context(
 | 
					 | 
				
			||||||
		const struct dc *dc,
 | 
					 | 
				
			||||||
		struct dc_stream_state *stream,
 | 
					 | 
				
			||||||
		struct dc_plane_state *plane_state,
 | 
					 | 
				
			||||||
		struct dc_state *context)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	int i;
 | 
					 | 
				
			||||||
	struct dc_stream_status *stream_status = NULL;
 | 
					 | 
				
			||||||
	struct resource_pool *pool = dc->res_pool;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!plane_state)
 | 
					 | 
				
			||||||
		return true;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = 0; i < context->stream_count; i++)
 | 
					 | 
				
			||||||
		if (context->streams[i] == stream) {
 | 
					 | 
				
			||||||
			stream_status = &context->stream_status[i];
 | 
					 | 
				
			||||||
			break;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (stream_status == NULL) {
 | 
					 | 
				
			||||||
		dm_error("Existing stream not found; failed to remove plane.\n");
 | 
					 | 
				
			||||||
		return false;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	resource_remove_dpp_pipes_for_plane_composition(
 | 
					 | 
				
			||||||
			context, pool, plane_state);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = 0; i < stream_status->plane_count; i++) {
 | 
					 | 
				
			||||||
		if (stream_status->plane_states[i] == plane_state) {
 | 
					 | 
				
			||||||
			dc_plane_state_release(stream_status->plane_states[i]);
 | 
					 | 
				
			||||||
			break;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (i == stream_status->plane_count) {
 | 
					 | 
				
			||||||
		dm_error("Existing plane_state not found; failed to detach it!\n");
 | 
					 | 
				
			||||||
		return false;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	stream_status->plane_count--;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */
 | 
					 | 
				
			||||||
	for (; i < stream_status->plane_count; i++)
 | 
					 | 
				
			||||||
		stream_status->plane_states[i] = stream_status->plane_states[i + 1];
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	stream_status->plane_states[stream_status->plane_count] = NULL;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
 | 
					 | 
				
			||||||
		/* ODM combine could prevent us from supporting more planes
 | 
					 | 
				
			||||||
		 * we will reset ODM slice count back to 1 when all planes have
 | 
					 | 
				
			||||||
		 * been removed to maximize the amount of planes supported when
 | 
					 | 
				
			||||||
		 * new planes are added.
 | 
					 | 
				
			||||||
		 */
 | 
					 | 
				
			||||||
		resource_update_pipes_for_stream_with_slice_count(
 | 
					 | 
				
			||||||
				context, dc->current_state, dc->res_pool, stream, 1);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return true;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/**
 | 
					 | 
				
			||||||
 * dc_rem_all_planes_for_stream - Remove planes attached to the target stream.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * @dc: Current dc state.
 | 
					 | 
				
			||||||
 * @stream: Target stream, which we want to remove the attached plans.
 | 
					 | 
				
			||||||
 * @context: New context.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * Return:
 | 
					 | 
				
			||||||
 * Return true if DC was able to remove all planes from the target
 | 
					 | 
				
			||||||
 * stream, otherwise, return false.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
bool dc_rem_all_planes_for_stream(
 | 
					 | 
				
			||||||
		const struct dc *dc,
 | 
					 | 
				
			||||||
		struct dc_stream_state *stream,
 | 
					 | 
				
			||||||
		struct dc_state *context)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	int i, old_plane_count;
 | 
					 | 
				
			||||||
	struct dc_stream_status *stream_status = NULL;
 | 
					 | 
				
			||||||
	struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = 0; i < context->stream_count; i++)
 | 
					 | 
				
			||||||
			if (context->streams[i] == stream) {
 | 
					 | 
				
			||||||
				stream_status = &context->stream_status[i];
 | 
					 | 
				
			||||||
				break;
 | 
					 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (stream_status == NULL) {
 | 
					 | 
				
			||||||
		dm_error("Existing stream %p not found!\n", stream);
 | 
					 | 
				
			||||||
		return false;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	old_plane_count = stream_status->plane_count;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = 0; i < old_plane_count; i++)
 | 
					 | 
				
			||||||
		del_planes[i] = stream_status->plane_states[i];
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = 0; i < old_plane_count; i++)
 | 
					 | 
				
			||||||
		if (!dc_remove_plane_from_context(dc, stream, del_planes[i], context))
 | 
					 | 
				
			||||||
			return false;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return true;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static bool add_all_planes_for_stream(
 | 
					 | 
				
			||||||
		const struct dc *dc,
 | 
					 | 
				
			||||||
		struct dc_stream_state *stream,
 | 
					 | 
				
			||||||
		const struct dc_validation_set set[],
 | 
					 | 
				
			||||||
		int set_count,
 | 
					 | 
				
			||||||
		struct dc_state *context)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	int i, j;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = 0; i < set_count; i++)
 | 
					 | 
				
			||||||
		if (set[i].stream == stream)
 | 
					 | 
				
			||||||
			break;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (i == set_count) {
 | 
					 | 
				
			||||||
		dm_error("Stream %p not found in set!\n", stream);
 | 
					 | 
				
			||||||
		return false;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (j = 0; j < set[i].plane_count; j++)
 | 
					 | 
				
			||||||
		if (!dc_add_plane_to_context(dc, stream, set[i].plane_states[j], context))
 | 
					 | 
				
			||||||
			return false;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return true;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
bool dc_add_all_planes_for_stream(
 | 
					 | 
				
			||||||
		const struct dc *dc,
 | 
					 | 
				
			||||||
		struct dc_stream_state *stream,
 | 
					 | 
				
			||||||
		struct dc_plane_state * const *plane_states,
 | 
					 | 
				
			||||||
		int plane_count,
 | 
					 | 
				
			||||||
		struct dc_state *context)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	struct dc_validation_set set;
 | 
					 | 
				
			||||||
	int i;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	set.stream = stream;
 | 
					 | 
				
			||||||
	set.plane_count = plane_count;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = 0; i < plane_count; i++)
 | 
					 | 
				
			||||||
		set.plane_states[i] = plane_states[i];
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return add_all_planes_for_stream(dc, stream, &set, 1, context);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
 | 
					bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
 | 
				
			||||||
		       struct dc_stream_state *new_stream)
 | 
							       struct dc_stream_state *new_stream)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -3327,84 +3148,6 @@ static struct audio *find_first_free_audio(
 | 
				
			||||||
	return NULL;
 | 
						return NULL;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
enum dc_status dc_add_stream_to_ctx(
 | 
					 | 
				
			||||||
		struct dc *dc,
 | 
					 | 
				
			||||||
		struct dc_state *new_ctx,
 | 
					 | 
				
			||||||
		struct dc_stream_state *stream)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	enum dc_status res;
 | 
					 | 
				
			||||||
	DC_LOGGER_INIT(dc->ctx->logger);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) {
 | 
					 | 
				
			||||||
		DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream);
 | 
					 | 
				
			||||||
		return DC_ERROR_UNEXPECTED;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	new_ctx->streams[new_ctx->stream_count] = stream;
 | 
					 | 
				
			||||||
	dc_stream_retain(stream);
 | 
					 | 
				
			||||||
	new_ctx->stream_count++;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	res = resource_add_otg_master_for_stream_output(
 | 
					 | 
				
			||||||
			new_ctx, dc->res_pool, stream);
 | 
					 | 
				
			||||||
	if (res != DC_OK)
 | 
					 | 
				
			||||||
		DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return res;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/*
 | 
					 | 
				
			||||||
 * dc_remove_stream_from_ctx() - Remove a stream from a dc_state.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
enum dc_status dc_remove_stream_from_ctx(
 | 
					 | 
				
			||||||
			struct dc *dc,
 | 
					 | 
				
			||||||
			struct dc_state *new_ctx,
 | 
					 | 
				
			||||||
			struct dc_stream_state *stream)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	int i;
 | 
					 | 
				
			||||||
	struct dc_context *dc_ctx = dc->ctx;
 | 
					 | 
				
			||||||
	struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream(
 | 
					 | 
				
			||||||
			&new_ctx->res_ctx, stream);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!del_pipe) {
 | 
					 | 
				
			||||||
		DC_ERROR("Pipe not found for stream %p !\n", stream);
 | 
					 | 
				
			||||||
		return DC_ERROR_UNEXPECTED;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	resource_update_pipes_for_stream_with_slice_count(new_ctx,
 | 
					 | 
				
			||||||
			dc->current_state, dc->res_pool, stream, 1);
 | 
					 | 
				
			||||||
	resource_remove_otg_master_for_stream_output(
 | 
					 | 
				
			||||||
			new_ctx, dc->res_pool, stream);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = 0; i < new_ctx->stream_count; i++)
 | 
					 | 
				
			||||||
		if (new_ctx->streams[i] == stream)
 | 
					 | 
				
			||||||
			break;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (new_ctx->streams[i] != stream) {
 | 
					 | 
				
			||||||
		DC_ERROR("Context doesn't have stream %p !\n", stream);
 | 
					 | 
				
			||||||
		return DC_ERROR_UNEXPECTED;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	dc_stream_release(new_ctx->streams[i]);
 | 
					 | 
				
			||||||
	new_ctx->stream_count--;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* Trim back arrays */
 | 
					 | 
				
			||||||
	for (; i < new_ctx->stream_count; i++) {
 | 
					 | 
				
			||||||
		new_ctx->streams[i] = new_ctx->streams[i + 1];
 | 
					 | 
				
			||||||
		new_ctx->stream_status[i] = new_ctx->stream_status[i + 1];
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	new_ctx->streams[new_ctx->stream_count] = NULL;
 | 
					 | 
				
			||||||
	memset(
 | 
					 | 
				
			||||||
			&new_ctx->stream_status[new_ctx->stream_count],
 | 
					 | 
				
			||||||
			0,
 | 
					 | 
				
			||||||
			sizeof(new_ctx->stream_status[0]));
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return DC_OK;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
static struct dc_stream_state *find_pll_sharable_stream(
 | 
					static struct dc_stream_state *find_pll_sharable_stream(
 | 
				
			||||||
		struct dc_stream_state *stream_needs_pll,
 | 
							struct dc_stream_state *stream_needs_pll,
 | 
				
			||||||
		struct dc_state *context)
 | 
							struct dc_state *context)
 | 
				
			||||||
| 
						 | 
					@ -3784,34 +3527,6 @@ enum dc_status resource_map_pool_resources(
 | 
				
			||||||
	return DC_ERROR_UNEXPECTED;
 | 
						return DC_ERROR_UNEXPECTED;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					 | 
				
			||||||
 * dc_resource_state_copy_construct_current() - Creates a new dc_state from existing state
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * @dc: copy out of dc->current_state
 | 
					 | 
				
			||||||
 * @dst_ctx: copy into this
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * This function makes a shallow copy of the current DC state and increments
 | 
					 | 
				
			||||||
 * refcounts on existing streams and planes.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
void dc_resource_state_copy_construct_current(
 | 
					 | 
				
			||||||
		const struct dc *dc,
 | 
					 | 
				
			||||||
		struct dc_state *dst_ctx)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	dc_resource_state_copy_construct(dc->current_state, dst_ctx);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void dc_resource_state_construct(
 | 
					 | 
				
			||||||
		const struct dc *dc,
 | 
					 | 
				
			||||||
		struct dc_state *dst_ctx)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	dst_ctx->clk_mgr = dc->clk_mgr;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* Initialise DIG link encoder resource tracking variables. */
 | 
					 | 
				
			||||||
	link_enc_cfg_init(dc, dst_ctx);
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
bool dc_resource_is_dsc_encoding_supported(const struct dc *dc)
 | 
					bool dc_resource_is_dsc_encoding_supported(const struct dc *dc)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (dc->res_pool == NULL)
 | 
						if (dc->res_pool == NULL)
 | 
				
			||||||
| 
						 | 
					@ -3855,6 +3570,31 @@ static bool planes_changed_for_existing_stream(struct dc_state *context,
 | 
				
			||||||
	return false;
 | 
						return false;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static bool add_all_planes_for_stream(
 | 
				
			||||||
 | 
							const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_stream_state *stream,
 | 
				
			||||||
 | 
							const struct dc_validation_set set[],
 | 
				
			||||||
 | 
							int set_count,
 | 
				
			||||||
 | 
							struct dc_state *state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int i, j;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < set_count; i++)
 | 
				
			||||||
 | 
							if (set[i].stream == stream)
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (i == set_count) {
 | 
				
			||||||
 | 
							dm_error("Stream %p not found in set!\n", stream);
 | 
				
			||||||
 | 
							return false;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (j = 0; j < set[i].plane_count; j++)
 | 
				
			||||||
 | 
							if (!dc_state_add_plane(dc, stream, set[i].plane_states[j], state))
 | 
				
			||||||
 | 
								return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return true;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * dc_validate_with_context - Validate and update the potential new stream in the context object
 | 
					 * dc_validate_with_context - Validate and update the potential new stream in the context object
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
| 
						 | 
					@ -3960,7 +3700,8 @@ enum dc_status dc_validate_with_context(struct dc *dc,
 | 
				
			||||||
						       unchanged_streams[i],
 | 
											       unchanged_streams[i],
 | 
				
			||||||
						       set,
 | 
											       set,
 | 
				
			||||||
						       set_count)) {
 | 
											       set_count)) {
 | 
				
			||||||
			if (!dc_rem_all_planes_for_stream(dc,
 | 
					
 | 
				
			||||||
 | 
								if (!dc_state_rem_all_planes_for_stream(dc,
 | 
				
			||||||
							  unchanged_streams[i],
 | 
												  unchanged_streams[i],
 | 
				
			||||||
							  context)) {
 | 
												  context)) {
 | 
				
			||||||
				res = DC_FAIL_DETACH_SURFACES;
 | 
									res = DC_FAIL_DETACH_SURFACES;
 | 
				
			||||||
| 
						 | 
					@ -3982,12 +3723,24 @@ enum dc_status dc_validate_with_context(struct dc *dc,
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
 | 
							if (dc_state_get_stream_subvp_type(context, del_streams[i]) == SUBVP_PHANTOM) {
 | 
				
			||||||
			res = DC_FAIL_DETACH_SURFACES;
 | 
								/* remove phantoms specifically */
 | 
				
			||||||
			goto fail;
 | 
								if (!dc_state_rem_all_phantom_planes_for_stream(dc, del_streams[i], context, true)) {
 | 
				
			||||||
 | 
									res = DC_FAIL_DETACH_SURFACES;
 | 
				
			||||||
 | 
									goto fail;
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								res = dc_state_remove_phantom_stream(dc, context, del_streams[i]);
 | 
				
			||||||
 | 
								dc_state_release_phantom_stream(dc, context, del_streams[i]);
 | 
				
			||||||
 | 
							} else {
 | 
				
			||||||
 | 
								if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) {
 | 
				
			||||||
 | 
									res = DC_FAIL_DETACH_SURFACES;
 | 
				
			||||||
 | 
									goto fail;
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								res = dc_state_remove_stream(dc, context, del_streams[i]);
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
 | 
					 | 
				
			||||||
		if (res != DC_OK)
 | 
							if (res != DC_OK)
 | 
				
			||||||
			goto fail;
 | 
								goto fail;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -4010,7 +3763,7 @@ enum dc_status dc_validate_with_context(struct dc *dc,
 | 
				
			||||||
	/* Add new streams and then add all planes for the new stream */
 | 
						/* Add new streams and then add all planes for the new stream */
 | 
				
			||||||
	for (i = 0; i < add_streams_count; i++) {
 | 
						for (i = 0; i < add_streams_count; i++) {
 | 
				
			||||||
		calculate_phy_pix_clks(add_streams[i]);
 | 
							calculate_phy_pix_clks(add_streams[i]);
 | 
				
			||||||
		res = dc_add_stream_to_ctx(dc, context, add_streams[i]);
 | 
							res = dc_state_add_stream(dc, context, add_streams[i]);
 | 
				
			||||||
		if (res != DC_OK)
 | 
							if (res != DC_OK)
 | 
				
			||||||
			goto fail;
 | 
								goto fail;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4516,84 +4269,6 @@ static void set_vtem_info_packet(
 | 
				
			||||||
	*info_packet = stream->vtem_infopacket;
 | 
						*info_packet = stream->vtem_infopacket;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void dc_resource_state_destruct(struct dc_state *context)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	int i, j;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = 0; i < context->stream_count; i++) {
 | 
					 | 
				
			||||||
		for (j = 0; j < context->stream_status[i].plane_count; j++)
 | 
					 | 
				
			||||||
			dc_plane_state_release(
 | 
					 | 
				
			||||||
				context->stream_status[i].plane_states[j]);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		context->stream_status[i].plane_count = 0;
 | 
					 | 
				
			||||||
		dc_stream_release(context->streams[i]);
 | 
					 | 
				
			||||||
		context->streams[i] = NULL;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	context->stream_count = 0;
 | 
					 | 
				
			||||||
	context->stream_mask = 0;
 | 
					 | 
				
			||||||
	memset(&context->res_ctx, 0, sizeof(context->res_ctx));
 | 
					 | 
				
			||||||
	memset(&context->pp_display_cfg, 0, sizeof(context->pp_display_cfg));
 | 
					 | 
				
			||||||
	memset(&context->dcn_bw_vars, 0, sizeof(context->dcn_bw_vars));
 | 
					 | 
				
			||||||
	context->clk_mgr = NULL;
 | 
					 | 
				
			||||||
	memset(&context->bw_ctx.bw, 0, sizeof(context->bw_ctx.bw));
 | 
					 | 
				
			||||||
	memset(context->block_sequence, 0, sizeof(context->block_sequence));
 | 
					 | 
				
			||||||
	context->block_sequence_steps = 0;
 | 
					 | 
				
			||||||
	memset(context->dc_dmub_cmd, 0, sizeof(context->dc_dmub_cmd));
 | 
					 | 
				
			||||||
	context->dmub_cmd_count = 0;
 | 
					 | 
				
			||||||
	memset(&context->perf_params, 0, sizeof(context->perf_params));
 | 
					 | 
				
			||||||
	memset(&context->scratch, 0, sizeof(context->scratch));
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void dc_resource_state_copy_construct(
 | 
					 | 
				
			||||||
		const struct dc_state *src_ctx,
 | 
					 | 
				
			||||||
		struct dc_state *dst_ctx)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	int i, j;
 | 
					 | 
				
			||||||
	struct kref refcount = dst_ctx->refcount;
 | 
					 | 
				
			||||||
#ifdef CONFIG_DRM_AMD_DC_FP
 | 
					 | 
				
			||||||
	struct dml2_context *dml2 = NULL;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	// Need to preserve allocated dml2 context
 | 
					 | 
				
			||||||
	if (src_ctx->clk_mgr && src_ctx->clk_mgr->ctx->dc->debug.using_dml2)
 | 
					 | 
				
			||||||
		dml2 = dst_ctx->bw_ctx.dml2;
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	*dst_ctx = *src_ctx;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifdef CONFIG_DRM_AMD_DC_FP
 | 
					 | 
				
			||||||
	// Preserve allocated dml2 context
 | 
					 | 
				
			||||||
	if (src_ctx->clk_mgr && src_ctx->clk_mgr->ctx->dc->debug.using_dml2)
 | 
					 | 
				
			||||||
		dst_ctx->bw_ctx.dml2 = dml2;
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = 0; i < MAX_PIPES; i++) {
 | 
					 | 
				
			||||||
		struct pipe_ctx *cur_pipe = &dst_ctx->res_ctx.pipe_ctx[i];
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (cur_pipe->top_pipe)
 | 
					 | 
				
			||||||
			cur_pipe->top_pipe =  &dst_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (cur_pipe->bottom_pipe)
 | 
					 | 
				
			||||||
			cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (cur_pipe->next_odm_pipe)
 | 
					 | 
				
			||||||
			cur_pipe->next_odm_pipe =  &dst_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (cur_pipe->prev_odm_pipe)
 | 
					 | 
				
			||||||
			cur_pipe->prev_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = 0; i < dst_ctx->stream_count; i++) {
 | 
					 | 
				
			||||||
		dc_stream_retain(dst_ctx->streams[i]);
 | 
					 | 
				
			||||||
		for (j = 0; j < dst_ctx->stream_status[i].plane_count; j++)
 | 
					 | 
				
			||||||
			dc_plane_state_retain(
 | 
					 | 
				
			||||||
				dst_ctx->stream_status[i].plane_states[j]);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* context refcount should not be overridden */
 | 
					 | 
				
			||||||
	dst_ctx->refcount = refcount;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
struct clock_source *dc_resource_find_first_free_pll(
 | 
					struct clock_source *dc_resource_find_first_free_pll(
 | 
				
			||||||
		struct resource_context *res_ctx,
 | 
							struct resource_context *res_ctx,
 | 
				
			||||||
		const struct resource_pool *pool)
 | 
							const struct resource_pool *pool)
 | 
				
			||||||
| 
						 | 
					@ -5311,6 +4986,20 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
 | 
				
			||||||
	return DC_OK;
 | 
						return DC_OK;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool resource_subvp_in_use(struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_state *context)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						uint32_t i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
				
			||||||
 | 
							struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE)
 | 
				
			||||||
 | 
								return true;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return false;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream)
 | 
					bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream))
 | 
						if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream))
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										865
									
								
								drivers/gpu/drm/amd/display/dc/core/dc_state.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										865
									
								
								drivers/gpu/drm/amd/display/dc/core/dc_state.c
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,865 @@
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Copyright 2023 Advanced Micro Devices, Inc.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Permission is hereby granted, free of charge, to any person obtaining a
 | 
				
			||||||
 | 
					 * copy of this software and associated documentation files (the "Software"),
 | 
				
			||||||
 | 
					 * to deal in the Software without restriction, including without limitation
 | 
				
			||||||
 | 
					 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 | 
				
			||||||
 | 
					 * and/or sell copies of the Software, and to permit persons to whom the
 | 
				
			||||||
 | 
					 * Software is furnished to do so, subject to the following conditions:
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * The above copyright notice and this permission notice shall be included in
 | 
				
			||||||
 | 
					 * all copies or substantial portions of the Software.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 | 
				
			||||||
 | 
					 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 | 
				
			||||||
 | 
					 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 | 
				
			||||||
 | 
					 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 | 
				
			||||||
 | 
					 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 | 
				
			||||||
 | 
					 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 | 
				
			||||||
 | 
					 * OTHER DEALINGS IN THE SOFTWARE.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Authors: AMD
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					#include "core_types.h"
 | 
				
			||||||
 | 
					#include "core_status.h"
 | 
				
			||||||
 | 
					#include "dc_state.h"
 | 
				
			||||||
 | 
					#include "dc_state_priv.h"
 | 
				
			||||||
 | 
					#include "dc_stream_priv.h"
 | 
				
			||||||
 | 
					#include "dc_plane_priv.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include "dm_services.h"
 | 
				
			||||||
 | 
					#include "resource.h"
 | 
				
			||||||
 | 
					#include "link_enc_cfg.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include "dml2/dml2_wrapper.h"
 | 
				
			||||||
 | 
					#include "dml2/dml2_internal_types.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define DC_LOGGER \
 | 
				
			||||||
 | 
						dc->ctx->logger
 | 
				
			||||||
 | 
					#define DC_LOGGER_INIT(logger)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* Private dc_state helper functions */
 | 
				
			||||||
 | 
					static bool dc_state_track_phantom_stream(struct dc_state *state,
 | 
				
			||||||
 | 
							struct dc_stream_state *phantom_stream)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						if (state->phantom_stream_count >= MAX_PHANTOM_PIPES)
 | 
				
			||||||
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						state->phantom_streams[state->phantom_stream_count++] = phantom_stream;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return true;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static bool dc_state_untrack_phantom_stream(struct dc_state *state, struct dc_stream_state *phantom_stream)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						bool res = false;
 | 
				
			||||||
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* first find phantom stream in the dc_state */
 | 
				
			||||||
 | 
						for (i = 0; i < state->phantom_stream_count; i++) {
 | 
				
			||||||
 | 
							if (state->phantom_streams[i] == phantom_stream) {
 | 
				
			||||||
 | 
								state->phantom_streams[i] = NULL;
 | 
				
			||||||
 | 
								res = true;
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* failed to find stream in state */
 | 
				
			||||||
 | 
						if (!res)
 | 
				
			||||||
 | 
							return res;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* trim back phantom streams */
 | 
				
			||||||
 | 
						state->phantom_stream_count--;
 | 
				
			||||||
 | 
						for (; i < state->phantom_stream_count; i++)
 | 
				
			||||||
 | 
							state->phantom_streams[i] = state->phantom_streams[i + 1];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return res;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static bool dc_state_is_phantom_stream_tracked(struct dc_state *state, struct dc_stream_state *phantom_stream)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < state->phantom_stream_count; i++) {
 | 
				
			||||||
 | 
							if (state->phantom_streams[i] == phantom_stream)
 | 
				
			||||||
 | 
								return true;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return false;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static bool dc_state_track_phantom_plane(struct dc_state *state,
 | 
				
			||||||
 | 
							struct dc_plane_state *phantom_plane)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						if (state->phantom_plane_count >= MAX_PHANTOM_PIPES)
 | 
				
			||||||
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						state->phantom_planes[state->phantom_plane_count++] = phantom_plane;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return true;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static bool dc_state_untrack_phantom_plane(struct dc_state *state, struct dc_plane_state *phantom_plane)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						bool res = false;
 | 
				
			||||||
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* first find phantom plane in the dc_state */
 | 
				
			||||||
 | 
						for (i = 0; i < state->phantom_plane_count; i++) {
 | 
				
			||||||
 | 
							if (state->phantom_planes[i] == phantom_plane) {
 | 
				
			||||||
 | 
								state->phantom_planes[i] = NULL;
 | 
				
			||||||
 | 
								res = true;
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* failed to find plane in state */
 | 
				
			||||||
 | 
						if (!res)
 | 
				
			||||||
 | 
							return res;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* trim back phantom planes */
 | 
				
			||||||
 | 
						state->phantom_plane_count--;
 | 
				
			||||||
 | 
						for (; i < state->phantom_plane_count; i++)
 | 
				
			||||||
 | 
							state->phantom_planes[i] = state->phantom_planes[i + 1];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return res;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static bool dc_state_is_phantom_plane_tracked(struct dc_state *state, struct dc_plane_state *phantom_plane)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < state->phantom_plane_count; i++) {
 | 
				
			||||||
 | 
							if (state->phantom_planes[i] == phantom_plane)
 | 
				
			||||||
 | 
								return true;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return false;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void dc_state_copy_internal(struct dc_state *dst_state, struct dc_state *src_state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int i, j;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						memcpy(dst_state, src_state, sizeof(struct dc_state));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < MAX_PIPES; i++) {
 | 
				
			||||||
 | 
							struct pipe_ctx *cur_pipe = &dst_state->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (cur_pipe->top_pipe)
 | 
				
			||||||
 | 
								cur_pipe->top_pipe =  &dst_state->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (cur_pipe->bottom_pipe)
 | 
				
			||||||
 | 
								cur_pipe->bottom_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (cur_pipe->prev_odm_pipe)
 | 
				
			||||||
 | 
								cur_pipe->prev_odm_pipe =  &dst_state->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (cur_pipe->next_odm_pipe)
 | 
				
			||||||
 | 
								cur_pipe->next_odm_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* retain phantoms */
 | 
				
			||||||
 | 
						for (i = 0; i < dst_state->phantom_stream_count; i++)
 | 
				
			||||||
 | 
							dc_stream_retain(dst_state->phantom_streams[i]);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < dst_state->phantom_plane_count; i++)
 | 
				
			||||||
 | 
							dc_plane_state_retain(dst_state->phantom_planes[i]);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* retain streams and planes */
 | 
				
			||||||
 | 
						for (i = 0; i < dst_state->stream_count; i++) {
 | 
				
			||||||
 | 
							dc_stream_retain(dst_state->streams[i]);
 | 
				
			||||||
 | 
							for (j = 0; j < dst_state->stream_status[i].plane_count; j++)
 | 
				
			||||||
 | 
								dc_plane_state_retain(
 | 
				
			||||||
 | 
										dst_state->stream_status[i].plane_states[j]);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void init_state(struct dc *dc, struct dc_state *state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						/* Each context must have their own instance of VBA and in order to
 | 
				
			||||||
 | 
						 * initialize and obtain IP and SOC the base DML instance from DC is
 | 
				
			||||||
 | 
						 * initially copied into every context
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						memcpy(&state->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* Public dc_state functions */
 | 
				
			||||||
 | 
					struct dc_state *dc_state_create(struct dc *dc)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct dc_state *state = kvzalloc(sizeof(struct dc_state),
 | 
				
			||||||
 | 
								GFP_KERNEL);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!state)
 | 
				
			||||||
 | 
							return NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						init_state(dc, state);
 | 
				
			||||||
 | 
						dc_state_construct(dc, state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_DRM_AMD_DC_FP
 | 
				
			||||||
 | 
						if (dc->debug.using_dml2)
 | 
				
			||||||
 | 
							dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2);
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						kref_init(&state->refcount);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return state;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct kref refcount = dst_state->refcount;
 | 
				
			||||||
 | 
					#ifdef CONFIG_DRM_AMD_DC_FP
 | 
				
			||||||
 | 
						struct dml2_context *dst_dml2 = dst_state->bw_ctx.dml2;
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dc_state_copy_internal(dst_state, src_state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_DRM_AMD_DC_FP
 | 
				
			||||||
 | 
						dst_state->bw_ctx.dml2 = dst_dml2;
 | 
				
			||||||
 | 
						if (src_state->bw_ctx.dml2)
 | 
				
			||||||
 | 
							dml2_copy(dst_state->bw_ctx.dml2, src_state->bw_ctx.dml2);
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* context refcount should not be overridden */
 | 
				
			||||||
 | 
						dst_state->refcount = refcount;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct dc_state *dc_state_create_copy(struct dc_state *src_state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct dc_state *new_state;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						new_state = kvmalloc(sizeof(struct dc_state),
 | 
				
			||||||
 | 
								GFP_KERNEL);
 | 
				
			||||||
 | 
						if (!new_state)
 | 
				
			||||||
 | 
							return NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dc_state_copy_internal(new_state, src_state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_DRM_AMD_DC_FP
 | 
				
			||||||
 | 
						if (src_state->bw_ctx.dml2 &&
 | 
				
			||||||
 | 
								!dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2)) {
 | 
				
			||||||
 | 
							dc_state_release(new_state);
 | 
				
			||||||
 | 
							return NULL;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						kref_init(&new_state->refcount);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return new_state;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void dc_state_copy_current(struct dc *dc, struct dc_state *dst_state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						dc_state_copy(dst_state, dc->current_state);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct dc_state *dc_state_create_current_copy(struct dc *dc)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return dc_state_create_copy(dc->current_state);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void dc_state_construct(struct dc *dc, struct dc_state *state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						state->clk_mgr = dc->clk_mgr;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Initialise DIG link encoder resource tracking variables. */
 | 
				
			||||||
 | 
						link_enc_cfg_init(dc, state);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void dc_state_destruct(struct dc_state *state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int i, j;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < state->stream_count; i++) {
 | 
				
			||||||
 | 
							for (j = 0; j < state->stream_status[i].plane_count; j++)
 | 
				
			||||||
 | 
								dc_plane_state_release(
 | 
				
			||||||
 | 
										state->stream_status[i].plane_states[j]);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							state->stream_status[i].plane_count = 0;
 | 
				
			||||||
 | 
							dc_stream_release(state->streams[i]);
 | 
				
			||||||
 | 
							state->streams[i] = NULL;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						state->stream_count = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* release tracked phantoms */
 | 
				
			||||||
 | 
						for (i = 0; i < state->phantom_stream_count; i++) {
 | 
				
			||||||
 | 
							dc_stream_release(state->phantom_streams[i]);
 | 
				
			||||||
 | 
							state->phantom_streams[i] = NULL;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < state->phantom_plane_count; i++) {
 | 
				
			||||||
 | 
							dc_plane_state_release(state->phantom_planes[i]);
 | 
				
			||||||
 | 
							state->phantom_planes[i] = NULL;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						state->stream_mask = 0;
 | 
				
			||||||
 | 
						memset(&state->res_ctx, 0, sizeof(state->res_ctx));
 | 
				
			||||||
 | 
						memset(&state->pp_display_cfg, 0, sizeof(state->pp_display_cfg));
 | 
				
			||||||
 | 
						memset(&state->dcn_bw_vars, 0, sizeof(state->dcn_bw_vars));
 | 
				
			||||||
 | 
						state->clk_mgr = NULL;
 | 
				
			||||||
 | 
						memset(&state->bw_ctx.bw, 0, sizeof(state->bw_ctx.bw));
 | 
				
			||||||
 | 
						memset(state->block_sequence, 0, sizeof(state->block_sequence));
 | 
				
			||||||
 | 
						state->block_sequence_steps = 0;
 | 
				
			||||||
 | 
						memset(state->dc_dmub_cmd, 0, sizeof(state->dc_dmub_cmd));
 | 
				
			||||||
 | 
						state->dmub_cmd_count = 0;
 | 
				
			||||||
 | 
						memset(&state->perf_params, 0, sizeof(state->perf_params));
 | 
				
			||||||
 | 
						memset(&state->scratch, 0, sizeof(state->scratch));
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void dc_state_retain(struct dc_state *state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						kref_get(&state->refcount);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void dc_state_free(struct kref *kref)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct dc_state *state = container_of(kref, struct dc_state, refcount);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dc_state_destruct(state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef CONFIG_DRM_AMD_DC_FP
 | 
				
			||||||
 | 
						dml2_destroy(state->bw_ctx.dml2);
 | 
				
			||||||
 | 
						state->bw_ctx.dml2 = 0;
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						kvfree(state);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void dc_state_release(struct dc_state *state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						kref_put(&state->refcount, dc_state_free);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * dc_state_add_stream() - Add a new dc_stream_state to a dc_state.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					enum dc_status dc_state_add_stream(
 | 
				
			||||||
 | 
							struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_state *state,
 | 
				
			||||||
 | 
							struct dc_stream_state *stream)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						enum dc_status res;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						DC_LOGGER_INIT(dc->ctx->logger);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (state->stream_count >= dc->res_pool->timing_generator_count) {
 | 
				
			||||||
 | 
							DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream);
 | 
				
			||||||
 | 
							return DC_ERROR_UNEXPECTED;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						state->streams[state->stream_count] = stream;
 | 
				
			||||||
 | 
						dc_stream_retain(stream);
 | 
				
			||||||
 | 
						state->stream_count++;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						res = resource_add_otg_master_for_stream_output(
 | 
				
			||||||
 | 
								state, dc->res_pool, stream);
 | 
				
			||||||
 | 
						if (res != DC_OK)
 | 
				
			||||||
 | 
							DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return res;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * dc_state_remove_stream() - Remove a stream from a dc_state.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					enum dc_status dc_state_remove_stream(
 | 
				
			||||||
 | 
							struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_state *state,
 | 
				
			||||||
 | 
							struct dc_stream_state *stream)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int i;
 | 
				
			||||||
 | 
						struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream(
 | 
				
			||||||
 | 
								&state->res_ctx, stream);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!del_pipe) {
 | 
				
			||||||
 | 
							dm_error("Pipe not found for stream %p !\n", stream);
 | 
				
			||||||
 | 
							return DC_ERROR_UNEXPECTED;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						resource_update_pipes_for_stream_with_slice_count(state,
 | 
				
			||||||
 | 
								dc->current_state, dc->res_pool, stream, 1);
 | 
				
			||||||
 | 
						resource_remove_otg_master_for_stream_output(
 | 
				
			||||||
 | 
								state, dc->res_pool, stream);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < state->stream_count; i++)
 | 
				
			||||||
 | 
							if (state->streams[i] == stream)
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (state->streams[i] != stream) {
 | 
				
			||||||
 | 
							dm_error("Context doesn't have stream %p !\n", stream);
 | 
				
			||||||
 | 
							return DC_ERROR_UNEXPECTED;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dc_stream_release(state->streams[i]);
 | 
				
			||||||
 | 
						state->stream_count--;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Trim back arrays */
 | 
				
			||||||
 | 
						for (; i < state->stream_count; i++) {
 | 
				
			||||||
 | 
							state->streams[i] = state->streams[i + 1];
 | 
				
			||||||
 | 
							state->stream_status[i] = state->stream_status[i + 1];
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						state->streams[state->stream_count] = NULL;
 | 
				
			||||||
 | 
						memset(
 | 
				
			||||||
 | 
								&state->stream_status[state->stream_count],
 | 
				
			||||||
 | 
								0,
 | 
				
			||||||
 | 
								sizeof(state->stream_status[0]));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return DC_OK;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_state_add_plane(
 | 
				
			||||||
 | 
							const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_stream_state *stream,
 | 
				
			||||||
 | 
							struct dc_plane_state *plane_state,
 | 
				
			||||||
 | 
							struct dc_state *state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct resource_pool *pool = dc->res_pool;
 | 
				
			||||||
 | 
						struct pipe_ctx *otg_master_pipe;
 | 
				
			||||||
 | 
						struct dc_stream_status *stream_status = NULL;
 | 
				
			||||||
 | 
						bool added = false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						stream_status = dc_state_get_stream_status(state, stream);
 | 
				
			||||||
 | 
						if (stream_status == NULL) {
 | 
				
			||||||
 | 
							dm_error("Existing stream not found; failed to attach surface!\n");
 | 
				
			||||||
 | 
							goto out;
 | 
				
			||||||
 | 
						} else if (stream_status->plane_count == MAX_SURFACE_NUM) {
 | 
				
			||||||
 | 
							dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
 | 
				
			||||||
 | 
									plane_state, MAX_SURFACE_NUM);
 | 
				
			||||||
 | 
							goto out;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						otg_master_pipe = resource_get_otg_master_for_stream(
 | 
				
			||||||
 | 
								&state->res_ctx, stream);
 | 
				
			||||||
 | 
						added = resource_append_dpp_pipes_for_plane_composition(state,
 | 
				
			||||||
 | 
								dc->current_state, pool, otg_master_pipe, plane_state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (added) {
 | 
				
			||||||
 | 
							stream_status->plane_states[stream_status->plane_count] =
 | 
				
			||||||
 | 
									plane_state;
 | 
				
			||||||
 | 
							stream_status->plane_count++;
 | 
				
			||||||
 | 
							dc_plane_state_retain(plane_state);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					out:
 | 
				
			||||||
 | 
						return added;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_state_remove_plane(
 | 
				
			||||||
 | 
							const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_stream_state *stream,
 | 
				
			||||||
 | 
							struct dc_plane_state *plane_state,
 | 
				
			||||||
 | 
							struct dc_state *state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int i;
 | 
				
			||||||
 | 
						struct dc_stream_status *stream_status = NULL;
 | 
				
			||||||
 | 
						struct resource_pool *pool = dc->res_pool;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!plane_state)
 | 
				
			||||||
 | 
							return true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < state->stream_count; i++)
 | 
				
			||||||
 | 
							if (state->streams[i] == stream) {
 | 
				
			||||||
 | 
								stream_status = &state->stream_status[i];
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (stream_status == NULL) {
 | 
				
			||||||
 | 
							dm_error("Existing stream not found; failed to remove plane.\n");
 | 
				
			||||||
 | 
							return false;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						resource_remove_dpp_pipes_for_plane_composition(
 | 
				
			||||||
 | 
								state, pool, plane_state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < stream_status->plane_count; i++) {
 | 
				
			||||||
 | 
							if (stream_status->plane_states[i] == plane_state) {
 | 
				
			||||||
 | 
								dc_plane_state_release(stream_status->plane_states[i]);
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (i == stream_status->plane_count) {
 | 
				
			||||||
 | 
							dm_error("Existing plane_state not found; failed to detach it!\n");
 | 
				
			||||||
 | 
							return false;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						stream_status->plane_count--;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */
 | 
				
			||||||
 | 
						for (; i < stream_status->plane_count; i++)
 | 
				
			||||||
 | 
							stream_status->plane_states[i] = stream_status->plane_states[i + 1];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						stream_status->plane_states[stream_status->plane_count] = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
 | 
				
			||||||
 | 
							/* ODM combine could prevent us from supporting more planes
 | 
				
			||||||
 | 
							 * we will reset ODM slice count back to 1 when all planes have
 | 
				
			||||||
 | 
							 * been removed to maximize the amount of planes supported when
 | 
				
			||||||
 | 
							 * new planes are added.
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
 | 
							resource_update_pipes_for_stream_with_slice_count(
 | 
				
			||||||
 | 
									state, dc->current_state, dc->res_pool, stream, 1);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return true;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * dc_state_rem_all_planes_for_stream - Remove planes attached to the target stream.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * @dc: Current dc state.
 | 
				
			||||||
 | 
					 * @stream: Target stream, which we want to remove the attached plans.
 | 
				
			||||||
 | 
					 * @state: context from which the planes are to be removed.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Return:
 | 
				
			||||||
 | 
					 * Return true if DC was able to remove all planes from the target
 | 
				
			||||||
 | 
					 * stream, otherwise, return false.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					bool dc_state_rem_all_planes_for_stream(
 | 
				
			||||||
 | 
							const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_stream_state *stream,
 | 
				
			||||||
 | 
							struct dc_state *state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int i, old_plane_count;
 | 
				
			||||||
 | 
						struct dc_stream_status *stream_status = NULL;
 | 
				
			||||||
 | 
						struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < state->stream_count; i++)
 | 
				
			||||||
 | 
							if (state->streams[i] == stream) {
 | 
				
			||||||
 | 
								stream_status = &state->stream_status[i];
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (stream_status == NULL) {
 | 
				
			||||||
 | 
							dm_error("Existing stream %p not found!\n", stream);
 | 
				
			||||||
 | 
							return false;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						old_plane_count = stream_status->plane_count;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < old_plane_count; i++)
 | 
				
			||||||
 | 
							del_planes[i] = stream_status->plane_states[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < old_plane_count; i++)
 | 
				
			||||||
 | 
							if (!dc_state_remove_plane(dc, stream, del_planes[i], state))
 | 
				
			||||||
 | 
								return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return true;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_state_add_all_planes_for_stream(
 | 
				
			||||||
 | 
							const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_stream_state *stream,
 | 
				
			||||||
 | 
							struct dc_plane_state * const *plane_states,
 | 
				
			||||||
 | 
							int plane_count,
 | 
				
			||||||
 | 
							struct dc_state *state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int i;
 | 
				
			||||||
 | 
						bool result = true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < plane_count; i++)
 | 
				
			||||||
 | 
							if (!dc_state_add_plane(dc, stream, plane_states[i], state)) {
 | 
				
			||||||
 | 
								result = false;
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return result;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* Private dc_state functions */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * dc_state_get_stream_status - Get stream status from given dc state
 | 
				
			||||||
 | 
					 * @state: DC state to find the stream status in
 | 
				
			||||||
 | 
					 * @stream: The stream to get the stream status for
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * The given stream is expected to exist in the given dc state. Otherwise, NULL
 | 
				
			||||||
 | 
					 * will be returned.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					struct dc_stream_status *dc_state_get_stream_status(
 | 
				
			||||||
 | 
							struct dc_state *state,
 | 
				
			||||||
 | 
							struct dc_stream_state *stream)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						uint8_t i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (state == NULL)
 | 
				
			||||||
 | 
							return NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < state->stream_count; i++) {
 | 
				
			||||||
 | 
							if (stream == state->streams[i])
 | 
				
			||||||
 | 
								return &state->stream_status[i];
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return NULL;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					enum mall_stream_type dc_state_get_pipe_subvp_type(const struct dc_state *state,
 | 
				
			||||||
 | 
							const struct pipe_ctx *pipe_ctx)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return dc_state_get_stream_subvp_type(state, pipe_ctx->stream);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					enum mall_stream_type dc_state_get_stream_subvp_type(const struct dc_state *state,
 | 
				
			||||||
 | 
							const struct dc_stream_state *stream)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						enum mall_stream_type type = SUBVP_NONE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < state->stream_count; i++) {
 | 
				
			||||||
 | 
							if (state->streams[i] == stream) {
 | 
				
			||||||
 | 
								type = state->stream_status[i].mall_stream_config.type;
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return type;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state *state,
 | 
				
			||||||
 | 
							const struct dc_stream_state *stream)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						struct dc_stream_state *paired_stream = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < state->stream_count; i++) {
 | 
				
			||||||
 | 
							if (state->streams[i] == stream) {
 | 
				
			||||||
 | 
								paired_stream = state->stream_status[i].mall_stream_config.paired_stream;
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return paired_stream;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_state *state,
 | 
				
			||||||
 | 
							struct dc_stream_state *main_stream)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct dc_stream_state *phantom_stream;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						DC_LOGGER_INIT(dc->ctx->logger);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						phantom_stream = dc_create_stream_for_sink(main_stream->sink);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!phantom_stream) {
 | 
				
			||||||
 | 
							DC_LOG_ERROR("Failed to allocate phantom stream.\n");
 | 
				
			||||||
 | 
							return NULL;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* track phantom stream in dc_state */
 | 
				
			||||||
 | 
						dc_state_track_phantom_stream(state, phantom_stream);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						phantom_stream->is_phantom = true;
 | 
				
			||||||
 | 
						phantom_stream->signal = SIGNAL_TYPE_VIRTUAL;
 | 
				
			||||||
 | 
						phantom_stream->dpms_off = true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return phantom_stream;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void dc_state_release_phantom_stream(const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_state *state,
 | 
				
			||||||
 | 
							struct dc_stream_state *phantom_stream)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						DC_LOGGER_INIT(dc->ctx->logger);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!dc_state_untrack_phantom_stream(state, phantom_stream)) {
 | 
				
			||||||
 | 
							DC_LOG_ERROR("Failed to free phantom stream %p in dc state %p.\n", phantom_stream, state);
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dc_stream_release(phantom_stream);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_state *state,
 | 
				
			||||||
 | 
							struct dc_plane_state *main_plane)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct dc_plane_state *phantom_plane = dc_create_plane_state(dc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						DC_LOGGER_INIT(dc->ctx->logger);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!phantom_plane) {
 | 
				
			||||||
 | 
							DC_LOG_ERROR("Failed to allocate phantom plane.\n");
 | 
				
			||||||
 | 
							return NULL;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* track phantom inside dc_state */
 | 
				
			||||||
 | 
						dc_state_track_phantom_plane(state, phantom_plane);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						phantom_plane->is_phantom = true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return phantom_plane;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void dc_state_release_phantom_plane(const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_state *state,
 | 
				
			||||||
 | 
							struct dc_plane_state *phantom_plane)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						DC_LOGGER_INIT(dc->ctx->logger);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!dc_state_untrack_phantom_plane(state, phantom_plane)) {
 | 
				
			||||||
 | 
							DC_LOG_ERROR("Failed to free phantom plane %p in dc state %p.\n", phantom_plane, state);
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dc_plane_state_release(phantom_plane);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* add phantom streams to context and generate correct meta inside dc_state */
 | 
				
			||||||
 | 
					enum dc_status dc_state_add_phantom_stream(struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_state *state,
 | 
				
			||||||
 | 
							struct dc_stream_state *phantom_stream,
 | 
				
			||||||
 | 
							struct dc_stream_state *main_stream)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct dc_stream_status *main_stream_status;
 | 
				
			||||||
 | 
						struct dc_stream_status *phantom_stream_status;
 | 
				
			||||||
 | 
						enum dc_status res = dc_state_add_stream(dc, state, phantom_stream);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* check if stream is tracked */
 | 
				
			||||||
 | 
						if (res == DC_OK && !dc_state_is_phantom_stream_tracked(state, phantom_stream)) {
 | 
				
			||||||
 | 
							/* stream must be tracked if added to state */
 | 
				
			||||||
 | 
							dc_state_track_phantom_stream(state, phantom_stream);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* setup subvp meta */
 | 
				
			||||||
 | 
						main_stream_status = dc_state_get_stream_status(state, main_stream);
 | 
				
			||||||
 | 
						phantom_stream_status = dc_state_get_stream_status(state, phantom_stream);
 | 
				
			||||||
 | 
						phantom_stream_status->mall_stream_config.type = SUBVP_PHANTOM;
 | 
				
			||||||
 | 
						phantom_stream_status->mall_stream_config.paired_stream = main_stream;
 | 
				
			||||||
 | 
						main_stream_status->mall_stream_config.type = SUBVP_MAIN;
 | 
				
			||||||
 | 
						main_stream_status->mall_stream_config.paired_stream = phantom_stream;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return res;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					enum dc_status dc_state_remove_phantom_stream(struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_state *state,
 | 
				
			||||||
 | 
							struct dc_stream_state *phantom_stream)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct dc_stream_status *main_stream_status;
 | 
				
			||||||
 | 
						struct dc_stream_status *phantom_stream_status;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* reset subvp meta */
 | 
				
			||||||
 | 
						phantom_stream_status = dc_state_get_stream_status(state, phantom_stream);
 | 
				
			||||||
 | 
						main_stream_status = dc_state_get_stream_status(state, phantom_stream_status->mall_stream_config.paired_stream);
 | 
				
			||||||
 | 
						phantom_stream_status->mall_stream_config.type = SUBVP_NONE;
 | 
				
			||||||
 | 
						phantom_stream_status->mall_stream_config.paired_stream = NULL;
 | 
				
			||||||
 | 
						if (main_stream_status) {
 | 
				
			||||||
 | 
							main_stream_status->mall_stream_config.type = SUBVP_NONE;
 | 
				
			||||||
 | 
							main_stream_status->mall_stream_config.paired_stream = NULL;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* remove stream from state */
 | 
				
			||||||
 | 
						return dc_state_remove_stream(dc, state, phantom_stream);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_state_add_phantom_plane(
 | 
				
			||||||
 | 
							const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_stream_state *phantom_stream,
 | 
				
			||||||
 | 
							struct dc_plane_state *phantom_plane,
 | 
				
			||||||
 | 
							struct dc_state *state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						bool res = dc_state_add_plane(dc, phantom_stream, phantom_plane, state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/* check if stream is tracked */
 | 
				
			||||||
 | 
						if (res && !dc_state_is_phantom_plane_tracked(state, phantom_plane)) {
 | 
				
			||||||
 | 
							/* stream must be tracked if added to state */
 | 
				
			||||||
 | 
							dc_state_track_phantom_plane(state, phantom_plane);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return res;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_state_remove_phantom_plane(
 | 
				
			||||||
 | 
							const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_stream_state *phantom_stream,
 | 
				
			||||||
 | 
							struct dc_plane_state *phantom_plane,
 | 
				
			||||||
 | 
							struct dc_state *state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return dc_state_remove_plane(dc, phantom_stream, phantom_plane, state);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_state_rem_all_phantom_planes_for_stream(
 | 
				
			||||||
 | 
							const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_stream_state *phantom_stream,
 | 
				
			||||||
 | 
							struct dc_state *state,
 | 
				
			||||||
 | 
							bool should_release_planes)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int i, old_plane_count;
 | 
				
			||||||
 | 
						struct dc_stream_status *stream_status = NULL;
 | 
				
			||||||
 | 
						struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < state->stream_count; i++)
 | 
				
			||||||
 | 
							if (state->streams[i] == phantom_stream) {
 | 
				
			||||||
 | 
								stream_status = &state->stream_status[i];
 | 
				
			||||||
 | 
								break;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (stream_status == NULL) {
 | 
				
			||||||
 | 
							dm_error("Existing stream %p not found!\n", phantom_stream);
 | 
				
			||||||
 | 
							return false;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						old_plane_count = stream_status->plane_count;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < old_plane_count; i++)
 | 
				
			||||||
 | 
							del_planes[i] = stream_status->plane_states[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < old_plane_count; i++) {
 | 
				
			||||||
 | 
							if (!dc_state_remove_plane(dc, phantom_stream, del_planes[i], state))
 | 
				
			||||||
 | 
								return false;
 | 
				
			||||||
 | 
							if (should_release_planes)
 | 
				
			||||||
 | 
								dc_state_release_phantom_plane(dc, state, del_planes[i]);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return true;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_state_add_all_phantom_planes_for_stream(
 | 
				
			||||||
 | 
							const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_stream_state *phantom_stream,
 | 
				
			||||||
 | 
							struct dc_plane_state * const *phantom_planes,
 | 
				
			||||||
 | 
							int plane_count,
 | 
				
			||||||
 | 
							struct dc_state *state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return dc_state_add_all_planes_for_stream(dc, phantom_stream, phantom_planes, plane_count, state);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_state_remove_phantom_streams_and_planes(
 | 
				
			||||||
 | 
						struct dc *dc,
 | 
				
			||||||
 | 
						struct dc_state *state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int i;
 | 
				
			||||||
 | 
						bool removed_phantom = false;
 | 
				
			||||||
 | 
						struct dc_stream_state *phantom_stream = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
				
			||||||
 | 
							struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (pipe->plane_state && pipe->stream && dc_state_get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) {
 | 
				
			||||||
 | 
								phantom_stream = pipe->stream;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								dc_state_rem_all_phantom_planes_for_stream(dc, phantom_stream, state, false);
 | 
				
			||||||
 | 
								dc_state_remove_phantom_stream(dc, state, phantom_stream);
 | 
				
			||||||
 | 
								removed_phantom = true;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return removed_phantom;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void dc_state_release_phantom_streams_and_planes(
 | 
				
			||||||
 | 
							struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_state *state)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < state->phantom_stream_count; i++)
 | 
				
			||||||
 | 
							dc_state_release_phantom_stream(dc, state, state->phantom_streams[i]);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for (i = 0; i < state->phantom_plane_count; i++)
 | 
				
			||||||
 | 
							dc_state_release_phantom_plane(dc, state, state->phantom_planes[i]);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -31,6 +31,8 @@
 | 
				
			||||||
#include "ipp.h"
 | 
					#include "ipp.h"
 | 
				
			||||||
#include "timing_generator.h"
 | 
					#include "timing_generator.h"
 | 
				
			||||||
#include "dc_dmub_srv.h"
 | 
					#include "dc_dmub_srv.h"
 | 
				
			||||||
 | 
					#include "dc_state_priv.h"
 | 
				
			||||||
 | 
					#include "dc_stream_priv.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define DC_LOGGER dc->ctx->logger
 | 
					#define DC_LOGGER dc->ctx->logger
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -54,7 +56,7 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static bool dc_stream_construct(struct dc_stream_state *stream,
 | 
					bool dc_stream_construct(struct dc_stream_state *stream,
 | 
				
			||||||
	struct dc_sink *dc_sink_data)
 | 
						struct dc_sink *dc_sink_data)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	uint32_t i = 0;
 | 
						uint32_t i = 0;
 | 
				
			||||||
| 
						 | 
					@ -121,13 +123,12 @@ static bool dc_stream_construct(struct dc_stream_state *stream,
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	stream->out_transfer_func->type = TF_TYPE_BYPASS;
 | 
						stream->out_transfer_func->type = TF_TYPE_BYPASS;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	stream->stream_id = stream->ctx->dc_stream_id_count;
 | 
						dc_stream_assign_stream_id(stream);
 | 
				
			||||||
	stream->ctx->dc_stream_id_count++;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void dc_stream_destruct(struct dc_stream_state *stream)
 | 
					void dc_stream_destruct(struct dc_stream_state *stream)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	dc_sink_release(stream->sink);
 | 
						dc_sink_release(stream->sink);
 | 
				
			||||||
	if (stream->out_transfer_func != NULL) {
 | 
						if (stream->out_transfer_func != NULL) {
 | 
				
			||||||
| 
						 | 
					@ -136,6 +137,13 @@ static void dc_stream_destruct(struct dc_stream_state *stream)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void dc_stream_assign_stream_id(struct dc_stream_state *stream)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						/* MSB is reserved to indicate phantoms */
 | 
				
			||||||
 | 
						stream->stream_id = stream->ctx->dc_stream_id_count;
 | 
				
			||||||
 | 
						stream->ctx->dc_stream_id_count++;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void dc_stream_retain(struct dc_stream_state *stream)
 | 
					void dc_stream_retain(struct dc_stream_state *stream)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	kref_get(&stream->refcount);
 | 
						kref_get(&stream->refcount);
 | 
				
			||||||
| 
						 | 
					@ -196,8 +204,7 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
 | 
				
			||||||
	if (new_stream->out_transfer_func)
 | 
						if (new_stream->out_transfer_func)
 | 
				
			||||||
		dc_transfer_func_retain(new_stream->out_transfer_func);
 | 
							dc_transfer_func_retain(new_stream->out_transfer_func);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	new_stream->stream_id = new_stream->ctx->dc_stream_id_count;
 | 
						dc_stream_assign_stream_id(new_stream);
 | 
				
			||||||
	new_stream->ctx->dc_stream_id_count++;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* If using dynamic encoder assignment, wait till stream committed to assign encoder. */
 | 
						/* If using dynamic encoder assignment, wait till stream committed to assign encoder. */
 | 
				
			||||||
	if (new_stream->ctx->dc->res_pool->funcs->link_encs_assign)
 | 
						if (new_stream->ctx->dc->res_pool->funcs->link_encs_assign)
 | 
				
			||||||
| 
						 | 
					@ -208,31 +215,6 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
 | 
				
			||||||
	return new_stream;
 | 
						return new_stream;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					 | 
				
			||||||
 * dc_stream_get_status_from_state - Get stream status from given dc state
 | 
					 | 
				
			||||||
 * @state: DC state to find the stream status in
 | 
					 | 
				
			||||||
 * @stream: The stream to get the stream status for
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * The given stream is expected to exist in the given dc state. Otherwise, NULL
 | 
					 | 
				
			||||||
 * will be returned.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
struct dc_stream_status *dc_stream_get_status_from_state(
 | 
					 | 
				
			||||||
	struct dc_state *state,
 | 
					 | 
				
			||||||
	struct dc_stream_state *stream)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	uint8_t i;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (state == NULL)
 | 
					 | 
				
			||||||
		return NULL;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = 0; i < state->stream_count; i++) {
 | 
					 | 
				
			||||||
		if (stream == state->streams[i])
 | 
					 | 
				
			||||||
			return &state->stream_status[i];
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	return NULL;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * dc_stream_get_status() - Get current stream status of the given stream state
 | 
					 * dc_stream_get_status() - Get current stream status of the given stream state
 | 
				
			||||||
 * @stream: The stream to get the stream status for.
 | 
					 * @stream: The stream to get the stream status for.
 | 
				
			||||||
| 
						 | 
					@ -244,7 +226,7 @@ struct dc_stream_status *dc_stream_get_status(
 | 
				
			||||||
	struct dc_stream_state *stream)
 | 
						struct dc_stream_state *stream)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dc *dc = stream->ctx->dc;
 | 
						struct dc *dc = stream->ctx->dc;
 | 
				
			||||||
	return dc_stream_get_status_from_state(dc->current_state, stream);
 | 
						return dc_state_get_stream_status(dc->current_state, stream);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void program_cursor_attributes(
 | 
					static void program_cursor_attributes(
 | 
				
			||||||
| 
						 | 
					@ -465,7 +447,8 @@ bool dc_stream_add_writeback(struct dc *dc,
 | 
				
			||||||
	if (dc->hwss.enable_writeback) {
 | 
						if (dc->hwss.enable_writeback) {
 | 
				
			||||||
		struct dc_stream_status *stream_status = dc_stream_get_status(stream);
 | 
							struct dc_stream_status *stream_status = dc_stream_get_status(stream);
 | 
				
			||||||
		struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
 | 
							struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
 | 
				
			||||||
		dwb->otg_inst = stream_status->primary_otg_inst;
 | 
							if (stream_status)
 | 
				
			||||||
 | 
								dwb->otg_inst = stream_status->primary_otg_inst;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
 | 
						if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -32,10 +32,12 @@
 | 
				
			||||||
#include "transform.h"
 | 
					#include "transform.h"
 | 
				
			||||||
#include "dpp.h"
 | 
					#include "dpp.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include "dc_plane_priv.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*******************************************************************************
 | 
					/*******************************************************************************
 | 
				
			||||||
 * Private functions
 | 
					 * Private functions
 | 
				
			||||||
 ******************************************************************************/
 | 
					 ******************************************************************************/
 | 
				
			||||||
static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
 | 
					void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	plane_state->ctx = ctx;
 | 
						plane_state->ctx = ctx;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -63,7 +65,7 @@ static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *pl
 | 
				
			||||||
 | 
					
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void dc_plane_destruct(struct dc_plane_state *plane_state)
 | 
					void dc_plane_destruct(struct dc_plane_state *plane_state)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	if (plane_state->gamma_correction != NULL) {
 | 
						if (plane_state->gamma_correction != NULL) {
 | 
				
			||||||
		dc_gamma_release(&plane_state->gamma_correction);
 | 
							dc_gamma_release(&plane_state->gamma_correction);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -27,6 +27,8 @@
 | 
				
			||||||
#define DC_INTERFACE_H_
 | 
					#define DC_INTERFACE_H_
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include "dc_types.h"
 | 
					#include "dc_types.h"
 | 
				
			||||||
 | 
					#include "dc_state.h"
 | 
				
			||||||
 | 
					#include "dc_plane.h"
 | 
				
			||||||
#include "grph_object_defs.h"
 | 
					#include "grph_object_defs.h"
 | 
				
			||||||
#include "logger_types.h"
 | 
					#include "logger_types.h"
 | 
				
			||||||
#include "hdcp_msg_types.h"
 | 
					#include "hdcp_msg_types.h"
 | 
				
			||||||
| 
						 | 
					@ -49,7 +51,7 @@ struct aux_payload;
 | 
				
			||||||
struct set_config_cmd_payload;
 | 
					struct set_config_cmd_payload;
 | 
				
			||||||
struct dmub_notification;
 | 
					struct dmub_notification;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define DC_VER "3.2.264"
 | 
					#define DC_VER "3.2.265"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define MAX_SURFACES 3
 | 
					#define MAX_SURFACES 3
 | 
				
			||||||
#define MAX_PLANES 6
 | 
					#define MAX_PLANES 6
 | 
				
			||||||
| 
						 | 
					@ -461,6 +463,12 @@ enum dml_hostvm_override_opts {
 | 
				
			||||||
	DML_HOSTVM_OVERRIDE_TRUE = 0x2,
 | 
						DML_HOSTVM_OVERRIDE_TRUE = 0x2,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					enum dc_replay_power_opts {
 | 
				
			||||||
 | 
						replay_power_opt_invalid		= 0x0,
 | 
				
			||||||
 | 
						replay_power_opt_smu_opt_static_screen	= 0x1,
 | 
				
			||||||
 | 
						replay_power_opt_z10_static_screen	= 0x10,
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
enum dcc_option {
 | 
					enum dcc_option {
 | 
				
			||||||
	DCC_ENABLE = 0,
 | 
						DCC_ENABLE = 0,
 | 
				
			||||||
	DCC_DISABLE = 1,
 | 
						DCC_DISABLE = 1,
 | 
				
			||||||
| 
						 | 
					@ -979,6 +987,8 @@ struct dc_debug_options {
 | 
				
			||||||
	unsigned int ips2_eval_delay_us;
 | 
						unsigned int ips2_eval_delay_us;
 | 
				
			||||||
	unsigned int ips2_entry_delay_us;
 | 
						unsigned int ips2_entry_delay_us;
 | 
				
			||||||
	bool disable_timeout;
 | 
						bool disable_timeout;
 | 
				
			||||||
 | 
						bool disable_extblankadj;
 | 
				
			||||||
 | 
						unsigned int static_screen_wait_frames;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct gpu_info_soc_bounding_box_v1_0;
 | 
					struct gpu_info_soc_bounding_box_v1_0;
 | 
				
			||||||
| 
						 | 
					@ -1026,7 +1036,6 @@ struct dc {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Require to optimize clocks and bandwidth for added/removed planes */
 | 
						/* Require to optimize clocks and bandwidth for added/removed planes */
 | 
				
			||||||
	bool optimized_required;
 | 
						bool optimized_required;
 | 
				
			||||||
	bool wm_optimized_required;
 | 
					 | 
				
			||||||
	bool idle_optimizations_allowed;
 | 
						bool idle_optimizations_allowed;
 | 
				
			||||||
	bool enable_c20_dtm_b0;
 | 
						bool enable_c20_dtm_b0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1389,13 +1398,6 @@ struct dc_surface_update {
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Create a new surface with default parameters;
 | 
					 * Create a new surface with default parameters;
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
struct dc_plane_state *dc_create_plane_state(struct dc *dc);
 | 
					 | 
				
			||||||
const struct dc_plane_status *dc_plane_get_status(
 | 
					 | 
				
			||||||
		const struct dc_plane_state *plane_state);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void dc_plane_state_retain(struct dc_plane_state *plane_state);
 | 
					 | 
				
			||||||
void dc_plane_state_release(struct dc_plane_state *plane_state);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void dc_gamma_retain(struct dc_gamma *dc_gamma);
 | 
					void dc_gamma_retain(struct dc_gamma *dc_gamma);
 | 
				
			||||||
void dc_gamma_release(struct dc_gamma **dc_gamma);
 | 
					void dc_gamma_release(struct dc_gamma **dc_gamma);
 | 
				
			||||||
struct dc_gamma *dc_create_gamma(void);
 | 
					struct dc_gamma *dc_create_gamma(void);
 | 
				
			||||||
| 
						 | 
					@ -1459,37 +1461,20 @@ enum dc_status dc_validate_global_state(
 | 
				
			||||||
		struct dc_state *new_ctx,
 | 
							struct dc_state *new_ctx,
 | 
				
			||||||
		bool fast_validate);
 | 
							bool fast_validate);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					 | 
				
			||||||
void dc_resource_state_construct(
 | 
					 | 
				
			||||||
		const struct dc *dc,
 | 
					 | 
				
			||||||
		struct dc_state *dst_ctx);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
bool dc_acquire_release_mpc_3dlut(
 | 
					bool dc_acquire_release_mpc_3dlut(
 | 
				
			||||||
		struct dc *dc, bool acquire,
 | 
							struct dc *dc, bool acquire,
 | 
				
			||||||
		struct dc_stream_state *stream,
 | 
							struct dc_stream_state *stream,
 | 
				
			||||||
		struct dc_3dlut **lut,
 | 
							struct dc_3dlut **lut,
 | 
				
			||||||
		struct dc_transfer_func **shaper);
 | 
							struct dc_transfer_func **shaper);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void dc_resource_state_copy_construct(
 | 
					 | 
				
			||||||
		const struct dc_state *src_ctx,
 | 
					 | 
				
			||||||
		struct dc_state *dst_ctx);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void dc_resource_state_copy_construct_current(
 | 
					 | 
				
			||||||
		const struct dc *dc,
 | 
					 | 
				
			||||||
		struct dc_state *dst_ctx);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
void dc_resource_state_destruct(struct dc_state *context);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
 | 
					bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
 | 
				
			||||||
 | 
					void get_audio_check(struct audio_info *aud_modes,
 | 
				
			||||||
 | 
						struct audio_check *aud_chk);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
enum dc_status dc_commit_streams(struct dc *dc,
 | 
					enum dc_status dc_commit_streams(struct dc *dc,
 | 
				
			||||||
				 struct dc_stream_state *streams[],
 | 
									 struct dc_stream_state *streams[],
 | 
				
			||||||
				 uint8_t stream_count);
 | 
									 uint8_t stream_count);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct dc_state *dc_create_state(struct dc *dc);
 | 
					 | 
				
			||||||
struct dc_state *dc_copy_state(struct dc_state *src_ctx);
 | 
					 | 
				
			||||||
void dc_retain_state(struct dc_state *context);
 | 
					 | 
				
			||||||
void dc_release_state(struct dc_state *context);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc,
 | 
					struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc,
 | 
				
			||||||
		struct dc_stream_state *stream,
 | 
							struct dc_stream_state *stream,
 | 
				
			||||||
| 
						 | 
					@ -2098,6 +2083,20 @@ bool dc_link_setup_psr(struct dc_link *dc_link,
 | 
				
			||||||
		const struct dc_stream_state *stream, struct psr_config *psr_config,
 | 
							const struct dc_stream_state *stream, struct psr_config *psr_config,
 | 
				
			||||||
		struct psr_context *psr_context);
 | 
							struct psr_context *psr_context);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Communicate with DMUB to allow or disallow Panel Replay on the specified link:
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * @link: pointer to the dc_link struct instance
 | 
				
			||||||
 | 
					 * @enable: enable(active) or disable(inactive) replay
 | 
				
			||||||
 | 
					 * @wait: state transition need to wait the active set completed.
 | 
				
			||||||
 | 
					 * @force_static: force disable(inactive) the replay
 | 
				
			||||||
 | 
					 * @power_opts: set power optimazation parameters to DMUB.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * return: allow Replay active will return true, else will return false.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					bool dc_link_set_replay_allow_active(struct dc_link *dc_link, const bool *enable,
 | 
				
			||||||
 | 
							bool wait, bool force_static, const unsigned int *power_opts);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool dc_link_get_replay_state(const struct dc_link *dc_link, uint64_t *state);
 | 
					bool dc_link_get_replay_state(const struct dc_link *dc_link, uint64_t *state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* On eDP links this function call will stall until T12 has elapsed.
 | 
					/* On eDP links this function call will stall until T12 has elapsed.
 | 
				
			||||||
| 
						 | 
					@ -2193,11 +2192,11 @@ int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * @dc: pointer to dc struct
 | 
					 * @dc: pointer to dc struct
 | 
				
			||||||
 * @stream: pointer to all possible streams
 | 
					 * @stream: pointer to all possible streams
 | 
				
			||||||
 * @num_streams: number of valid DPIA streams
 | 
					 * @count: number of valid DPIA streams
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
 | 
					 * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams,
 | 
					bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams,
 | 
				
			||||||
		const unsigned int count);
 | 
							const unsigned int count);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Sink Interfaces - A sink corresponds to a display output device */
 | 
					/* Sink Interfaces - A sink corresponds to a display output device */
 | 
				
			||||||
| 
						 | 
					@ -2342,6 +2341,9 @@ void dc_hardware_release(struct dc *dc);
 | 
				
			||||||
void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc);
 | 
					void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool dc_set_psr_allow_active(struct dc *dc, bool enable);
 | 
					bool dc_set_psr_allow_active(struct dc *dc, bool enable);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_set_replay_allow_active(struct dc *dc, bool active);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void dc_z10_restore(const struct dc *dc);
 | 
					void dc_z10_restore(const struct dc *dc);
 | 
				
			||||||
void dc_z10_save_init(struct dc *dc);
 | 
					void dc_z10_save_init(struct dc *dc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -33,6 +33,7 @@
 | 
				
			||||||
#include "cursor_reg_cache.h"
 | 
					#include "cursor_reg_cache.h"
 | 
				
			||||||
#include "resource.h"
 | 
					#include "resource.h"
 | 
				
			||||||
#include "clk_mgr.h"
 | 
					#include "clk_mgr.h"
 | 
				
			||||||
 | 
					#include "dc_state_priv.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define CTX dc_dmub_srv->ctx
 | 
					#define CTX dc_dmub_srv->ctx
 | 
				
			||||||
#define DC_LOGGER CTX->logger
 | 
					#define DC_LOGGER CTX->logger
 | 
				
			||||||
| 
						 | 
					@ -140,7 +141,10 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (status == DMUB_STATUS_QUEUE_FULL) {
 | 
							if (status == DMUB_STATUS_QUEUE_FULL) {
 | 
				
			||||||
			/* Execute and wait for queue to become empty again. */
 | 
								/* Execute and wait for queue to become empty again. */
 | 
				
			||||||
			dmub_srv_cmd_execute(dmub);
 | 
								status = dmub_srv_cmd_execute(dmub);
 | 
				
			||||||
 | 
								if (status == DMUB_STATUS_POWER_STATE_D3)
 | 
				
			||||||
 | 
									return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			dmub_srv_wait_for_idle(dmub, 100000);
 | 
								dmub_srv_wait_for_idle(dmub, 100000);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			/* Requeue the command. */
 | 
								/* Requeue the command. */
 | 
				
			||||||
| 
						 | 
					@ -148,16 +152,20 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (status != DMUB_STATUS_OK) {
 | 
							if (status != DMUB_STATUS_OK) {
 | 
				
			||||||
			DC_ERROR("Error queueing DMUB command: status=%d\n", status);
 | 
								if (status != DMUB_STATUS_POWER_STATE_D3) {
 | 
				
			||||||
			dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 | 
									DC_ERROR("Error queueing DMUB command: status=%d\n", status);
 | 
				
			||||||
 | 
									dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
			return false;
 | 
								return false;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	status = dmub_srv_cmd_execute(dmub);
 | 
						status = dmub_srv_cmd_execute(dmub);
 | 
				
			||||||
	if (status != DMUB_STATUS_OK) {
 | 
						if (status != DMUB_STATUS_OK) {
 | 
				
			||||||
		DC_ERROR("Error starting DMUB execution: status=%d\n", status);
 | 
							if (status != DMUB_STATUS_POWER_STATE_D3) {
 | 
				
			||||||
		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 | 
								DC_ERROR("Error starting DMUB execution: status=%d\n", status);
 | 
				
			||||||
 | 
								dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -218,7 +226,10 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (status == DMUB_STATUS_QUEUE_FULL) {
 | 
							if (status == DMUB_STATUS_QUEUE_FULL) {
 | 
				
			||||||
			/* Execute and wait for queue to become empty again. */
 | 
								/* Execute and wait for queue to become empty again. */
 | 
				
			||||||
			dmub_srv_cmd_execute(dmub);
 | 
								status = dmub_srv_cmd_execute(dmub);
 | 
				
			||||||
 | 
								if (status == DMUB_STATUS_POWER_STATE_D3)
 | 
				
			||||||
 | 
									return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			dmub_srv_wait_for_idle(dmub, 100000);
 | 
								dmub_srv_wait_for_idle(dmub, 100000);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			/* Requeue the command. */
 | 
								/* Requeue the command. */
 | 
				
			||||||
| 
						 | 
					@ -226,16 +237,20 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (status != DMUB_STATUS_OK) {
 | 
							if (status != DMUB_STATUS_OK) {
 | 
				
			||||||
			DC_ERROR("Error queueing DMUB command: status=%d\n", status);
 | 
								if (status != DMUB_STATUS_POWER_STATE_D3) {
 | 
				
			||||||
			dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 | 
									DC_ERROR("Error queueing DMUB command: status=%d\n", status);
 | 
				
			||||||
 | 
									dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
			return false;
 | 
								return false;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	status = dmub_srv_cmd_execute(dmub);
 | 
						status = dmub_srv_cmd_execute(dmub);
 | 
				
			||||||
	if (status != DMUB_STATUS_OK) {
 | 
						if (status != DMUB_STATUS_OK) {
 | 
				
			||||||
		DC_ERROR("Error starting DMUB execution: status=%d\n", status);
 | 
							if (status != DMUB_STATUS_POWER_STATE_D3) {
 | 
				
			||||||
		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 | 
								DC_ERROR("Error starting DMUB execution: status=%d\n", status);
 | 
				
			||||||
 | 
								dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -287,17 +302,11 @@ bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv)
 | 
				
			||||||
bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
 | 
					bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
 | 
				
			||||||
				    unsigned int stream_mask)
 | 
									    unsigned int stream_mask)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dmub_srv *dmub;
 | 
					 | 
				
			||||||
	const uint32_t timeout = 30;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
 | 
						if (!dc_dmub_srv || !dc_dmub_srv->dmub)
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dmub = dc_dmub_srv->dmub;
 | 
						return dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK,
 | 
				
			||||||
 | 
										 stream_mask, NULL, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
	return dmub_srv_send_gpint_command(
 | 
					 | 
				
			||||||
		       dmub, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK,
 | 
					 | 
				
			||||||
		       stream_mask, timeout) == DMUB_STATUS_OK;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv)
 | 
					bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv)
 | 
				
			||||||
| 
						 | 
					@ -346,7 +355,7 @@ void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal
 | 
				
			||||||
	cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
 | 
						cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Send the command to the DMCUB.
 | 
						// Send the command to the DMCUB.
 | 
				
			||||||
	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
 | 
					void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
 | 
				
			||||||
| 
						 | 
					@ -360,7 +369,7 @@ void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
 | 
				
			||||||
	cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
 | 
						cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Send the command to the DMCUB.
 | 
						// Send the command to the DMCUB.
 | 
				
			||||||
	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream)
 | 
					static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream)
 | 
				
			||||||
| 
						 | 
					@ -453,7 +462,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
 | 
				
			||||||
		sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header);
 | 
							sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Send the command to the DMCUB.
 | 
						// Send the command to the DMCUB.
 | 
				
			||||||
	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -474,7 +483,7 @@ void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv)
 | 
				
			||||||
	cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data);
 | 
						cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* If command was processed, copy feature caps to dmub srv */
 | 
						/* If command was processed, copy feature caps to dmub srv */
 | 
				
			||||||
	if (dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
 | 
						if (dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
 | 
				
			||||||
	    cmd.query_feature_caps.header.ret_status == 0) {
 | 
						    cmd.query_feature_caps.header.ret_status == 0) {
 | 
				
			||||||
		memcpy(&dc_dmub_srv->dmub->feature_caps,
 | 
							memcpy(&dc_dmub_srv->dmub->feature_caps,
 | 
				
			||||||
		       &cmd.query_feature_caps.query_feature_caps_data,
 | 
							       &cmd.query_feature_caps.query_feature_caps_data,
 | 
				
			||||||
| 
						 | 
					@ -499,7 +508,7 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
 | 
				
			||||||
	cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst;
 | 
						cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// If command was processed, copy feature caps to dmub srv
 | 
						// If command was processed, copy feature caps to dmub srv
 | 
				
			||||||
	if (dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
 | 
						if (dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
 | 
				
			||||||
		cmd.visual_confirm_color.header.ret_status == 0) {
 | 
							cmd.visual_confirm_color.header.ret_status == 0) {
 | 
				
			||||||
		memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color,
 | 
							memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color,
 | 
				
			||||||
			&cmd.visual_confirm_color.visual_confirm_color_data,
 | 
								&cmd.visual_confirm_color.visual_confirm_color_data,
 | 
				
			||||||
| 
						 | 
					@ -510,10 +519,11 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command
 | 
					 * populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * @dc: [in] current dc state
 | 
					 * @dc: [in] pointer to dc object
 | 
				
			||||||
 * @subvp_pipe: [in] pipe_ctx for the SubVP pipe
 | 
					 * @subvp_pipe: [in] pipe_ctx for the SubVP pipe
 | 
				
			||||||
 * @vblank_pipe: [in] pipe_ctx for the DRR pipe
 | 
					 * @vblank_pipe: [in] pipe_ctx for the DRR pipe
 | 
				
			||||||
 * @pipe_data: [in] Pipe data which stores the VBLANK/DRR info
 | 
					 * @pipe_data: [in] Pipe data which stores the VBLANK/DRR info
 | 
				
			||||||
 | 
					 * @context: [in] DC state for access to phantom stream
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * Populate the DMCUB SubVP command with DRR pipe info. All the information
 | 
					 * Populate the DMCUB SubVP command with DRR pipe info. All the information
 | 
				
			||||||
 * required for calculating the SubVP + DRR microschedule is populated here.
 | 
					 * required for calculating the SubVP + DRR microschedule is populated here.
 | 
				
			||||||
| 
						 | 
					@ -524,12 +534,14 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
 | 
				
			||||||
 * 3. Populate the drr_info with the min and max supported vtotal values
 | 
					 * 3. Populate the drr_info with the min and max supported vtotal values
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void populate_subvp_cmd_drr_info(struct dc *dc,
 | 
					static void populate_subvp_cmd_drr_info(struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_state *context,
 | 
				
			||||||
		struct pipe_ctx *subvp_pipe,
 | 
							struct pipe_ctx *subvp_pipe,
 | 
				
			||||||
		struct pipe_ctx *vblank_pipe,
 | 
							struct pipe_ctx *vblank_pipe,
 | 
				
			||||||
		struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data)
 | 
							struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
						struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
 | 
				
			||||||
	struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
 | 
						struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
 | 
				
			||||||
	struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
 | 
						struct dc_crtc_timing *phantom_timing = &phantom_stream->timing;
 | 
				
			||||||
	struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
 | 
						struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
 | 
				
			||||||
	uint16_t drr_frame_us = 0;
 | 
						uint16_t drr_frame_us = 0;
 | 
				
			||||||
	uint16_t min_drr_supported_us = 0;
 | 
						uint16_t min_drr_supported_us = 0;
 | 
				
			||||||
| 
						 | 
					@ -617,7 +629,7 @@ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc,
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Find the SubVP pipe
 | 
							// Find the SubVP pipe
 | 
				
			||||||
		if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
 | 
							if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -634,7 +646,7 @@ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (vblank_pipe->stream->ignore_msa_timing_param &&
 | 
						if (vblank_pipe->stream->ignore_msa_timing_param &&
 | 
				
			||||||
		(vblank_pipe->stream->allow_freesync || vblank_pipe->stream->vrr_active_variable || vblank_pipe->stream->vrr_active_fixed))
 | 
							(vblank_pipe->stream->allow_freesync || vblank_pipe->stream->vrr_active_variable || vblank_pipe->stream->vrr_active_fixed))
 | 
				
			||||||
		populate_subvp_cmd_drr_info(dc, pipe, vblank_pipe, pipe_data);
 | 
							populate_subvp_cmd_drr_info(dc, context, pipe, vblank_pipe, pipe_data);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
| 
						 | 
					@ -659,10 +671,17 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
 | 
				
			||||||
	uint32_t subvp0_prefetch_us = 0;
 | 
						uint32_t subvp0_prefetch_us = 0;
 | 
				
			||||||
	uint32_t subvp1_prefetch_us = 0;
 | 
						uint32_t subvp1_prefetch_us = 0;
 | 
				
			||||||
	uint32_t prefetch_delta_us = 0;
 | 
						uint32_t prefetch_delta_us = 0;
 | 
				
			||||||
	struct dc_crtc_timing *phantom_timing0 = &subvp_pipes[0]->stream->mall_stream_config.paired_stream->timing;
 | 
						struct dc_stream_state *phantom_stream0 = NULL;
 | 
				
			||||||
	struct dc_crtc_timing *phantom_timing1 = &subvp_pipes[1]->stream->mall_stream_config.paired_stream->timing;
 | 
						struct dc_stream_state *phantom_stream1 = NULL;
 | 
				
			||||||
 | 
						struct dc_crtc_timing *phantom_timing0 = NULL;
 | 
				
			||||||
 | 
						struct dc_crtc_timing *phantom_timing1 = NULL;
 | 
				
			||||||
	struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
 | 
						struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream);
 | 
				
			||||||
 | 
						phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream);
 | 
				
			||||||
 | 
						phantom_timing0 = &phantom_stream0->timing;
 | 
				
			||||||
 | 
						phantom_timing1 = &phantom_stream1->timing;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) *
 | 
						subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) *
 | 
				
			||||||
			(uint64_t)phantom_timing0->h_total * 1000000),
 | 
								(uint64_t)phantom_timing0->h_total * 1000000),
 | 
				
			||||||
			(((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
 | 
								(((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
 | 
				
			||||||
| 
						 | 
					@ -712,8 +731,9 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
 | 
				
			||||||
	uint32_t j;
 | 
						uint32_t j;
 | 
				
			||||||
	struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
 | 
						struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
 | 
				
			||||||
			&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
 | 
								&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
 | 
				
			||||||
 | 
						struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
 | 
				
			||||||
	struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
 | 
						struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
 | 
				
			||||||
	struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
 | 
						struct dc_crtc_timing *phantom_timing = &phantom_stream->timing;
 | 
				
			||||||
	uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den;
 | 
						uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	pipe_data->mode = SUBVP;
 | 
						pipe_data->mode = SUBVP;
 | 
				
			||||||
| 
						 | 
					@ -767,7 +787,7 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
 | 
				
			||||||
	for (j = 0; j < dc->res_pool->pipe_count; j++) {
 | 
						for (j = 0; j < dc->res_pool->pipe_count; j++) {
 | 
				
			||||||
		struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
 | 
							struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (phantom_pipe->stream == subvp_pipe->stream->mall_stream_config.paired_stream) {
 | 
							if (phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {
 | 
				
			||||||
			pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst;
 | 
								pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst;
 | 
				
			||||||
			if (phantom_pipe->bottom_pipe) {
 | 
								if (phantom_pipe->bottom_pipe) {
 | 
				
			||||||
				pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst;
 | 
									pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst;
 | 
				
			||||||
| 
						 | 
					@ -801,6 +821,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
 | 
				
			||||||
	union dmub_rb_cmd cmd;
 | 
						union dmub_rb_cmd cmd;
 | 
				
			||||||
	struct pipe_ctx *subvp_pipes[2];
 | 
						struct pipe_ctx *subvp_pipes[2];
 | 
				
			||||||
	uint32_t wm_val_refclk = 0;
 | 
						uint32_t wm_val_refclk = 0;
 | 
				
			||||||
 | 
						enum mall_stream_type pipe_mall_type;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	memset(&cmd, 0, sizeof(cmd));
 | 
						memset(&cmd, 0, sizeof(cmd));
 | 
				
			||||||
	// FW command for SUBVP
 | 
						// FW command for SUBVP
 | 
				
			||||||
| 
						 | 
					@ -816,7 +837,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (resource_is_pipe_type(pipe, OTG_MASTER) &&
 | 
							if (resource_is_pipe_type(pipe, OTG_MASTER) &&
 | 
				
			||||||
				resource_is_pipe_type(pipe, DPP_PIPE) &&
 | 
									resource_is_pipe_type(pipe, DPP_PIPE) &&
 | 
				
			||||||
				pipe->stream->mall_stream_config.type == SUBVP_MAIN)
 | 
									dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
 | 
				
			||||||
			subvp_pipes[subvp_count++] = pipe;
 | 
								subvp_pipes[subvp_count++] = pipe;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -824,6 +845,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
 | 
				
			||||||
		// For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd
 | 
							// For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd
 | 
				
			||||||
		for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
 | 
							for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
 | 
				
			||||||
			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
								struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
								pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (!pipe->stream)
 | 
								if (!pipe->stream)
 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
| 
						 | 
					@ -834,12 +856,11 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
 | 
				
			||||||
			 */
 | 
								 */
 | 
				
			||||||
			if (resource_is_pipe_type(pipe, OTG_MASTER) &&
 | 
								if (resource_is_pipe_type(pipe, OTG_MASTER) &&
 | 
				
			||||||
					resource_is_pipe_type(pipe, DPP_PIPE) &&
 | 
										resource_is_pipe_type(pipe, DPP_PIPE) &&
 | 
				
			||||||
					pipe->stream->mall_stream_config.paired_stream &&
 | 
										pipe_mall_type == SUBVP_MAIN) {
 | 
				
			||||||
					pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
 | 
					 | 
				
			||||||
				populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
 | 
									populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
 | 
				
			||||||
			} else if (resource_is_pipe_type(pipe, OTG_MASTER) &&
 | 
								} else if (resource_is_pipe_type(pipe, OTG_MASTER) &&
 | 
				
			||||||
					resource_is_pipe_type(pipe, DPP_PIPE) &&
 | 
										resource_is_pipe_type(pipe, DPP_PIPE) &&
 | 
				
			||||||
					pipe->stream->mall_stream_config.type == SUBVP_NONE) {
 | 
										pipe_mall_type == SUBVP_NONE) {
 | 
				
			||||||
				// Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where
 | 
									// Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where
 | 
				
			||||||
				// we run through DML without calculating "natural" P-state support
 | 
									// we run through DML without calculating "natural" P-state support
 | 
				
			||||||
				populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
 | 
									populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
 | 
				
			||||||
| 
						 | 
					@ -861,7 +882,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
 | 
				
			||||||
		cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF;
 | 
							cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data)
 | 
					bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data)
 | 
				
			||||||
| 
						 | 
					@ -1098,7 +1119,7 @@ void dc_send_update_cursor_info_to_dmu(
 | 
				
			||||||
				pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp);
 | 
									pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* Combine 2nd cmds update_curosr_info to DMU */
 | 
							/* Combine 2nd cmds update_curosr_info to DMU */
 | 
				
			||||||
		dm_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
							dc_wake_and_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1112,25 +1133,20 @@ bool dc_dmub_check_min_version(struct dmub_srv *srv)
 | 
				
			||||||
void dc_dmub_srv_enable_dpia_trace(const struct dc *dc)
 | 
					void dc_dmub_srv_enable_dpia_trace(const struct dc *dc)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
 | 
						struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
 | 
				
			||||||
	struct dmub_srv *dmub;
 | 
					 | 
				
			||||||
	enum dmub_status status;
 | 
					 | 
				
			||||||
	static const uint32_t timeout_us = 30;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
 | 
						if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
 | 
				
			||||||
		DC_LOG_ERROR("%s: invalid parameters.", __func__);
 | 
							DC_LOG_ERROR("%s: invalid parameters.", __func__);
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dmub = dc_dmub_srv->dmub;
 | 
						if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1,
 | 
				
			||||||
 | 
									       0x0010, NULL, DM_DMUB_WAIT_TYPE_WAIT)) {
 | 
				
			||||||
	status = dmub_srv_send_gpint_command(dmub, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1, 0x0010, timeout_us);
 | 
					 | 
				
			||||||
	if (status != DMUB_STATUS_OK) {
 | 
					 | 
				
			||||||
		DC_LOG_ERROR("timeout updating trace buffer mask word\n");
 | 
							DC_LOG_ERROR("timeout updating trace buffer mask word\n");
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	status = dmub_srv_send_gpint_command(dmub, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK, 0x0000, timeout_us);
 | 
						if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK,
 | 
				
			||||||
	if (status != DMUB_STATUS_OK) {
 | 
									       0x0000, NULL, DM_DMUB_WAIT_TYPE_WAIT)) {
 | 
				
			||||||
		DC_LOG_ERROR("timeout updating trace buffer mask word\n");
 | 
							DC_LOG_ERROR("timeout updating trace buffer mask word\n");
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
| 
						 | 
					@ -1148,6 +1164,9 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
 | 
				
			||||||
	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
 | 
						struct dc_context *dc_ctx = dc_dmub_srv->ctx;
 | 
				
			||||||
	enum dmub_status status;
 | 
						enum dmub_status status;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!dc_dmub_srv || !dc_dmub_srv->dmub)
 | 
				
			||||||
 | 
							return true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
 | 
						if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
 | 
				
			||||||
		return true;
 | 
							return true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1169,7 +1188,7 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
 | 
					static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	union dmub_rb_cmd cmd = {0};
 | 
						union dmub_rb_cmd cmd = {0};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1190,20 +1209,20 @@ void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
 | 
				
			||||||
			dc->hwss.set_idle_state(dc, true);
 | 
								dc->hwss.set_idle_state(dc, true);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						/* NOTE: This does not use the "wake" interface since this is part of the wake path. */
 | 
				
			||||||
 | 
						/* We also do not perform a wait since DMCUB could enter idle after the notification. */
 | 
				
			||||||
 | 
						dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
 | 
					static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	const uint32_t max_num_polls = 10000;
 | 
					 | 
				
			||||||
	uint32_t allow_state = 0;
 | 
						uint32_t allow_state = 0;
 | 
				
			||||||
	uint32_t commit_state = 0;
 | 
						uint32_t commit_state = 0;
 | 
				
			||||||
	int i;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (dc->debug.dmcub_emulation)
 | 
						if (dc->debug.dmcub_emulation)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!dc->idle_optimizations_allowed)
 | 
						if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
 | 
				
			||||||
		return;
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (dc->hwss.get_idle_state &&
 | 
						if (dc->hwss.get_idle_state &&
 | 
				
			||||||
| 
						 | 
					@ -1215,8 +1234,16 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!(allow_state & DMUB_IPS2_ALLOW_MASK)) {
 | 
							if (!(allow_state & DMUB_IPS2_ALLOW_MASK)) {
 | 
				
			||||||
			// Wait for evaluation time
 | 
								// Wait for evaluation time
 | 
				
			||||||
			udelay(dc->debug.ips2_eval_delay_us);
 | 
								for (;;) {
 | 
				
			||||||
			commit_state = dc->hwss.get_idle_state(dc);
 | 
									udelay(dc->debug.ips2_eval_delay_us);
 | 
				
			||||||
 | 
									commit_state = dc->hwss.get_idle_state(dc);
 | 
				
			||||||
 | 
									if (commit_state & DMUB_IPS2_ALLOW_MASK)
 | 
				
			||||||
 | 
										break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
									/* allow was still set, retry eval delay */
 | 
				
			||||||
 | 
									dc->hwss.set_idle_state(dc, false);
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) {
 | 
								if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) {
 | 
				
			||||||
				// Tell PMFW to exit low power state
 | 
									// Tell PMFW to exit low power state
 | 
				
			||||||
				dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
 | 
									dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
 | 
				
			||||||
| 
						 | 
					@ -1225,17 +1252,13 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
 | 
				
			||||||
				udelay(dc->debug.ips2_entry_delay_us);
 | 
									udelay(dc->debug.ips2_entry_delay_us);
 | 
				
			||||||
				dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
 | 
									dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				for (i = 0; i < max_num_polls; ++i) {
 | 
									for (;;) {
 | 
				
			||||||
					commit_state = dc->hwss.get_idle_state(dc);
 | 
										commit_state = dc->hwss.get_idle_state(dc);
 | 
				
			||||||
					if (commit_state & DMUB_IPS2_COMMIT_MASK)
 | 
										if (commit_state & DMUB_IPS2_COMMIT_MASK)
 | 
				
			||||||
						break;
 | 
											break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
					udelay(1);
 | 
										udelay(1);
 | 
				
			||||||
 | 
					 | 
				
			||||||
					if (dc->debug.disable_timeout)
 | 
					 | 
				
			||||||
						i--;
 | 
					 | 
				
			||||||
				}
 | 
									}
 | 
				
			||||||
				ASSERT(i < max_num_polls);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
				if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
 | 
									if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
 | 
				
			||||||
					ASSERT(0);
 | 
										ASSERT(0);
 | 
				
			||||||
| 
						 | 
					@ -1250,17 +1273,13 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		dc_dmub_srv_notify_idle(dc, false);
 | 
							dc_dmub_srv_notify_idle(dc, false);
 | 
				
			||||||
		if (!(allow_state & DMUB_IPS1_ALLOW_MASK)) {
 | 
							if (!(allow_state & DMUB_IPS1_ALLOW_MASK)) {
 | 
				
			||||||
			for (i = 0; i < max_num_polls; ++i) {
 | 
								for (;;) {
 | 
				
			||||||
				commit_state = dc->hwss.get_idle_state(dc);
 | 
									commit_state = dc->hwss.get_idle_state(dc);
 | 
				
			||||||
				if (commit_state & DMUB_IPS1_COMMIT_MASK)
 | 
									if (commit_state & DMUB_IPS1_COMMIT_MASK)
 | 
				
			||||||
					break;
 | 
										break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				udelay(1);
 | 
									udelay(1);
 | 
				
			||||||
 | 
					 | 
				
			||||||
				if (dc->debug.disable_timeout)
 | 
					 | 
				
			||||||
					i--;
 | 
					 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			ASSERT(i < max_num_polls);
 | 
					 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1282,3 +1301,117 @@ void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_c
 | 
				
			||||||
	else
 | 
						else
 | 
				
			||||||
		dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D3);
 | 
							dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D3);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!dc_dmub_srv || !dc_dmub_srv->dmub)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (dc_dmub_srv->idle_allowed == allow_idle)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * Entering a low power state requires a driver notification.
 | 
				
			||||||
 | 
						 * Powering up the hardware requires notifying PMFW and DMCUB.
 | 
				
			||||||
 | 
						 * Clearing the driver idle allow requires a DMCUB command.
 | 
				
			||||||
 | 
						 * DMCUB commands requires the DMCUB to be powered up and restored.
 | 
				
			||||||
 | 
						 *
 | 
				
			||||||
 | 
						 * Exit out early to prevent an infinite loop of DMCUB commands
 | 
				
			||||||
 | 
						 * triggering exit low power - use software state to track this.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						dc_dmub_srv->idle_allowed = allow_idle;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!allow_idle)
 | 
				
			||||||
 | 
							dc_dmub_srv_exit_low_power_state(dc);
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							dc_dmub_srv_notify_idle(dc, allow_idle);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd,
 | 
				
			||||||
 | 
									  enum dm_dmub_wait_type wait_type)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						return dc_wake_and_execute_dmub_cmd_list(ctx, 1, cmd, wait_type);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count,
 | 
				
			||||||
 | 
									       union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
 | 
				
			||||||
 | 
						bool result = false, reallow_idle = false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!dc_dmub_srv || !dc_dmub_srv->dmub)
 | 
				
			||||||
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (count == 0)
 | 
				
			||||||
 | 
							return true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (dc_dmub_srv->idle_allowed) {
 | 
				
			||||||
 | 
							dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false);
 | 
				
			||||||
 | 
							reallow_idle = true;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						/*
 | 
				
			||||||
 | 
						 * These may have different implementations in DM, so ensure
 | 
				
			||||||
 | 
						 * that we guide it to the expected helper.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (count > 1)
 | 
				
			||||||
 | 
							result = dm_execute_dmub_cmd_list(ctx, count, cmd, wait_type);
 | 
				
			||||||
 | 
						else
 | 
				
			||||||
 | 
							result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (result && reallow_idle)
 | 
				
			||||||
 | 
							dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return result;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static bool dc_dmub_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
 | 
				
			||||||
 | 
									  uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
 | 
				
			||||||
 | 
						const uint32_t wait_us = wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT ? 0 : 30;
 | 
				
			||||||
 | 
						enum dmub_status status;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (response)
 | 
				
			||||||
 | 
							*response = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!dc_dmub_srv || !dc_dmub_srv->dmub)
 | 
				
			||||||
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						status = dmub_srv_send_gpint_command(dc_dmub_srv->dmub, command_code, param, wait_us);
 | 
				
			||||||
 | 
						if (status != DMUB_STATUS_OK) {
 | 
				
			||||||
 | 
							if (status == DMUB_STATUS_TIMEOUT && wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT)
 | 
				
			||||||
 | 
								return true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							return false;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (response && wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
 | 
				
			||||||
 | 
							dmub_srv_get_gpint_response(dc_dmub_srv->dmub, response);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return true;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
 | 
				
			||||||
 | 
								       uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
 | 
				
			||||||
 | 
						bool result = false, reallow_idle = false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (!dc_dmub_srv || !dc_dmub_srv->dmub)
 | 
				
			||||||
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (dc_dmub_srv->idle_allowed) {
 | 
				
			||||||
 | 
							dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false);
 | 
				
			||||||
 | 
							reallow_idle = true;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (result && reallow_idle)
 | 
				
			||||||
 | 
							dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return result;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -50,6 +50,8 @@ struct dc_dmub_srv {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	struct dc_context *ctx;
 | 
						struct dc_context *ctx;
 | 
				
			||||||
	void *dm;
 | 
						void *dm;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						bool idle_allowed;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
 | 
					void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
 | 
				
			||||||
| 
						 | 
					@ -100,8 +102,59 @@ void dc_dmub_srv_enable_dpia_trace(const struct dc *dc);
 | 
				
			||||||
void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, const struct dc_plane_address *addr, uint8_t subvp_index);
 | 
					void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, const struct dc_plane_address *addr, uint8_t subvp_index);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait);
 | 
					bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait);
 | 
				
			||||||
void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle);
 | 
					
 | 
				
			||||||
void dc_dmub_srv_exit_low_power_state(const struct dc *dc);
 | 
					void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState);
 | 
					void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * dc_wake_and_execute_dmub_cmd() - Wrapper for DMUB command execution.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Refer to dc_wake_and_execute_dmub_cmd_list() for usage and limitations,
 | 
				
			||||||
 | 
					 * This function is a convenience wrapper for a single command execution.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * @ctx: DC context
 | 
				
			||||||
 | 
					 * @cmd: The command to send/receive
 | 
				
			||||||
 | 
					 * @wait_type: The wait behavior for the execution
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Return: true on command submission success, false otherwise
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd,
 | 
				
			||||||
 | 
									  enum dm_dmub_wait_type wait_type);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * dc_wake_and_execute_dmub_cmd_list() - Wrapper for DMUB command list execution.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * If the DMCUB hardware was asleep then it wakes the DMUB before
 | 
				
			||||||
 | 
					 * executing the command and attempts to re-enter if the command
 | 
				
			||||||
 | 
					 * submission was successful.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * This should be the preferred command submission interface provided
 | 
				
			||||||
 | 
					 * the DC lock is acquired.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Entry/exit out of idle power optimizations would need to be
 | 
				
			||||||
 | 
					 * manually performed otherwise through dc_allow_idle_optimizations().
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * @ctx: DC context
 | 
				
			||||||
 | 
					 * @count: Number of commands to send/receive
 | 
				
			||||||
 | 
					 * @cmd: Array of commands to send
 | 
				
			||||||
 | 
					 * @wait_type: The wait behavior for the execution
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Return: true on command submission success, false otherwise
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count,
 | 
				
			||||||
 | 
									       union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * dc_wake_and_execute_gpint()
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * @ctx: DC context
 | 
				
			||||||
 | 
					 * @command_code: The command ID to send to DMCUB
 | 
				
			||||||
 | 
					 * @param: The parameter to message DMCUB
 | 
				
			||||||
 | 
					 * @response: Optional response out value - may be NULL.
 | 
				
			||||||
 | 
					 * @wait_type: The wait behavior for the execution
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
 | 
				
			||||||
 | 
								       uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* _DMUB_DC_SRV_H_ */
 | 
					#endif /* _DMUB_DC_SRV_H_ */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1377,6 +1377,12 @@ struct dp_trace {
 | 
				
			||||||
#ifndef DP_TUNNELING_STATUS
 | 
					#ifndef DP_TUNNELING_STATUS
 | 
				
			||||||
#define DP_TUNNELING_STATUS				0xE0025 /* 1.4a */
 | 
					#define DP_TUNNELING_STATUS				0xE0025 /* 1.4a */
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					#ifndef DP_TUNNELING_MAX_LINK_RATE
 | 
				
			||||||
 | 
					#define DP_TUNNELING_MAX_LINK_RATE			0xE0028 /* 1.4a */
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					#ifndef DP_TUNNELING_MAX_LANE_COUNT
 | 
				
			||||||
 | 
					#define DP_TUNNELING_MAX_LANE_COUNT			0xE0029 /* 1.4a */
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
#ifndef DPTX_BW_ALLOCATION_MODE_CONTROL
 | 
					#ifndef DPTX_BW_ALLOCATION_MODE_CONTROL
 | 
				
			||||||
#define DPTX_BW_ALLOCATION_MODE_CONTROL			0xE0030 /* 1.4a */
 | 
					#define DPTX_BW_ALLOCATION_MODE_CONTROL			0xE0030 /* 1.4a */
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -50,7 +50,7 @@ static inline void submit_dmub_read_modify_write(
 | 
				
			||||||
	cmd_buf->header.payload_bytes =
 | 
						cmd_buf->header.payload_bytes =
 | 
				
			||||||
			sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count;
 | 
								sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	memset(cmd_buf, 0, sizeof(*cmd_buf));
 | 
						memset(cmd_buf, 0, sizeof(*cmd_buf));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -67,7 +67,7 @@ static inline void submit_dmub_burst_write(
 | 
				
			||||||
	cmd_buf->header.payload_bytes =
 | 
						cmd_buf->header.payload_bytes =
 | 
				
			||||||
			sizeof(uint32_t) * offload->reg_seq_count;
 | 
								sizeof(uint32_t) * offload->reg_seq_count;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	memset(cmd_buf, 0, sizeof(*cmd_buf));
 | 
						memset(cmd_buf, 0, sizeof(*cmd_buf));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -80,7 +80,7 @@ static inline void submit_dmub_reg_wait(
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
 | 
						struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	memset(cmd_buf, 0, sizeof(*cmd_buf));
 | 
						memset(cmd_buf, 0, sizeof(*cmd_buf));
 | 
				
			||||||
	offload->reg_seq_count = 0;
 | 
						offload->reg_seq_count = 0;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -244,7 +244,7 @@ enum pixel_format {
 | 
				
			||||||
#define DC_MAX_DIRTY_RECTS 3
 | 
					#define DC_MAX_DIRTY_RECTS 3
 | 
				
			||||||
struct dc_flip_addrs {
 | 
					struct dc_flip_addrs {
 | 
				
			||||||
	struct dc_plane_address address;
 | 
						struct dc_plane_address address;
 | 
				
			||||||
	unsigned int flip_timestamp_in_us;
 | 
						unsigned long long flip_timestamp_in_us;
 | 
				
			||||||
	bool flip_immediate;
 | 
						bool flip_immediate;
 | 
				
			||||||
	/* TODO: add flip duration for FreeSync */
 | 
						/* TODO: add flip duration for FreeSync */
 | 
				
			||||||
	bool triplebuffer_flips;
 | 
						bool triplebuffer_flips;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										38
									
								
								drivers/gpu/drm/amd/display/dc/dc_plane.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										38
									
								
								drivers/gpu/drm/amd/display/dc/dc_plane.h
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,38 @@
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Copyright 2023 Advanced Micro Devices, Inc.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Permission is hereby granted, free of charge, to any person obtaining a
 | 
				
			||||||
 | 
					 * copy of this software and associated documentation files (the "Software"),
 | 
				
			||||||
 | 
					 * to deal in the Software without restriction, including without limitation
 | 
				
			||||||
 | 
					 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 | 
				
			||||||
 | 
					 * and/or sell copies of the Software, and to permit persons to whom the
 | 
				
			||||||
 | 
					 * Software is furnished to do so, subject to the following conditions:
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * The above copyright notice and this permission notice shall be included in
 | 
				
			||||||
 | 
					 * all copies or substantial portions of the Software.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 | 
				
			||||||
 | 
					 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 | 
				
			||||||
 | 
					 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 | 
				
			||||||
 | 
					 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 | 
				
			||||||
 | 
					 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 | 
				
			||||||
 | 
					 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 | 
				
			||||||
 | 
					 * OTHER DEALINGS IN THE SOFTWARE.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Authors: AMD
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifndef _DC_PLANE_H_
 | 
				
			||||||
 | 
					#define _DC_PLANE_H_
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include "dc.h"
 | 
				
			||||||
 | 
					#include "dc_hw_types.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct dc_plane_state *dc_create_plane_state(struct dc *dc);
 | 
				
			||||||
 | 
					const struct dc_plane_status *dc_plane_get_status(
 | 
				
			||||||
 | 
							const struct dc_plane_state *plane_state);
 | 
				
			||||||
 | 
					void dc_plane_state_retain(struct dc_plane_state *plane_state);
 | 
				
			||||||
 | 
					void dc_plane_state_release(struct dc_plane_state *plane_state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif /* _DC_PLANE_H_ */
 | 
				
			||||||
							
								
								
									
										34
									
								
								drivers/gpu/drm/amd/display/dc/dc_plane_priv.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										34
									
								
								drivers/gpu/drm/amd/display/dc/dc_plane_priv.h
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,34 @@
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Copyright 2023 Advanced Micro Devices, Inc.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Permission is hereby granted, free of charge, to any person obtaining a
 | 
				
			||||||
 | 
					 * copy of this software and associated documentation files (the "Software"),
 | 
				
			||||||
 | 
					 * to deal in the Software without restriction, including without limitation
 | 
				
			||||||
 | 
					 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 | 
				
			||||||
 | 
					 * and/or sell copies of the Software, and to permit persons to whom the
 | 
				
			||||||
 | 
					 * Software is furnished to do so, subject to the following conditions:
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * The above copyright notice and this permission notice shall be included in
 | 
				
			||||||
 | 
					 * all copies or substantial portions of the Software.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 | 
				
			||||||
 | 
					 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 | 
				
			||||||
 | 
					 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 | 
				
			||||||
 | 
					 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 | 
				
			||||||
 | 
					 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 | 
				
			||||||
 | 
					 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 | 
				
			||||||
 | 
					 * OTHER DEALINGS IN THE SOFTWARE.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Authors: AMD
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifndef _DC_PLANE_PRIV_H_
 | 
				
			||||||
 | 
					#define _DC_PLANE_PRIV_H_
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include "dc_plane.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state);
 | 
				
			||||||
 | 
					void dc_plane_destruct(struct dc_plane_state *plane_state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif /* _DC_PLANE_PRIV_H_ */
 | 
				
			||||||
							
								
								
									
										78
									
								
								drivers/gpu/drm/amd/display/dc/dc_state.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										78
									
								
								drivers/gpu/drm/amd/display/dc/dc_state.h
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,78 @@
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Copyright 2023 Advanced Micro Devices, Inc.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Permission is hereby granted, free of charge, to any person obtaining a
 | 
				
			||||||
 | 
					 * copy of this software and associated documentation files (the "Software"),
 | 
				
			||||||
 | 
					 * to deal in the Software without restriction, including without limitation
 | 
				
			||||||
 | 
					 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 | 
				
			||||||
 | 
					 * and/or sell copies of the Software, and to permit persons to whom the
 | 
				
			||||||
 | 
					 * Software is furnished to do so, subject to the following conditions:
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * The above copyright notice and this permission notice shall be included in
 | 
				
			||||||
 | 
					 * all copies or substantial portions of the Software.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 | 
				
			||||||
 | 
					 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 | 
				
			||||||
 | 
					 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 | 
				
			||||||
 | 
					 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 | 
				
			||||||
 | 
					 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 | 
				
			||||||
 | 
					 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 | 
				
			||||||
 | 
					 * OTHER DEALINGS IN THE SOFTWARE.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Authors: AMD
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifndef _DC_STATE_H_
 | 
				
			||||||
 | 
					#define _DC_STATE_H_
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include "dc.h"
 | 
				
			||||||
 | 
					#include "inc/core_status.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct dc_state *dc_state_create(struct dc *dc);
 | 
				
			||||||
 | 
					void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state);
 | 
				
			||||||
 | 
					struct dc_state *dc_state_create_copy(struct dc_state *src_state);
 | 
				
			||||||
 | 
					void dc_state_copy_current(struct dc *dc, struct dc_state *dst_state);
 | 
				
			||||||
 | 
					struct dc_state *dc_state_create_current_copy(struct dc *dc);
 | 
				
			||||||
 | 
					void dc_state_construct(struct dc *dc, struct dc_state *state);
 | 
				
			||||||
 | 
					void dc_state_destruct(struct dc_state *state);
 | 
				
			||||||
 | 
					void dc_state_retain(struct dc_state *state);
 | 
				
			||||||
 | 
					void dc_state_release(struct dc_state *state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					enum dc_status dc_state_add_stream(struct dc *dc,
 | 
				
			||||||
 | 
									    struct dc_state *state,
 | 
				
			||||||
 | 
									    struct dc_stream_state *stream);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					enum dc_status dc_state_remove_stream(
 | 
				
			||||||
 | 
							struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_state *state,
 | 
				
			||||||
 | 
							struct dc_stream_state *stream);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_state_add_plane(
 | 
				
			||||||
 | 
							const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_stream_state *stream,
 | 
				
			||||||
 | 
							struct dc_plane_state *plane_state,
 | 
				
			||||||
 | 
							struct dc_state *state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_state_remove_plane(
 | 
				
			||||||
 | 
							const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_stream_state *stream,
 | 
				
			||||||
 | 
							struct dc_plane_state *plane_state,
 | 
				
			||||||
 | 
							struct dc_state *state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_state_rem_all_planes_for_stream(
 | 
				
			||||||
 | 
							const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_stream_state *stream,
 | 
				
			||||||
 | 
							struct dc_state *state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_state_add_all_planes_for_stream(
 | 
				
			||||||
 | 
							const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_stream_state *stream,
 | 
				
			||||||
 | 
							struct dc_plane_state * const *plane_states,
 | 
				
			||||||
 | 
							int plane_count,
 | 
				
			||||||
 | 
							struct dc_state *state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct dc_stream_status *dc_state_get_stream_status(
 | 
				
			||||||
 | 
						struct dc_state *state,
 | 
				
			||||||
 | 
						struct dc_stream_state *stream);
 | 
				
			||||||
 | 
					#endif /* _DC_STATE_H_ */
 | 
				
			||||||
							
								
								
									
										102
									
								
								drivers/gpu/drm/amd/display/dc/dc_state_priv.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										102
									
								
								drivers/gpu/drm/amd/display/dc/dc_state_priv.h
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,102 @@
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Copyright 2023 Advanced Micro Devices, Inc.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Permission is hereby granted, free of charge, to any person obtaining a
 | 
				
			||||||
 | 
					 * copy of this software and associated documentation files (the "Software"),
 | 
				
			||||||
 | 
					 * to deal in the Software without restriction, including without limitation
 | 
				
			||||||
 | 
					 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 | 
				
			||||||
 | 
					 * and/or sell copies of the Software, and to permit persons to whom the
 | 
				
			||||||
 | 
					 * Software is furnished to do so, subject to the following conditions:
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * The above copyright notice and this permission notice shall be included in
 | 
				
			||||||
 | 
					 * all copies or substantial portions of the Software.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 | 
				
			||||||
 | 
					 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 | 
				
			||||||
 | 
					 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 | 
				
			||||||
 | 
					 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 | 
				
			||||||
 | 
					 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 | 
				
			||||||
 | 
					 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 | 
				
			||||||
 | 
					 * OTHER DEALINGS IN THE SOFTWARE.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Authors: AMD
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifndef _DC_STATE_PRIV_H_
 | 
				
			||||||
 | 
					#define _DC_STATE_PRIV_H_
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include "dc_state.h"
 | 
				
			||||||
 | 
					#include "dc_stream.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* Get the type of the provided resource (none, phantom, main) based on the provided
 | 
				
			||||||
 | 
					 * context. If the context is unavailable, determine only if phantom or not.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					enum mall_stream_type dc_state_get_pipe_subvp_type(const struct dc_state *state,
 | 
				
			||||||
 | 
							const struct pipe_ctx *pipe_ctx);
 | 
				
			||||||
 | 
					enum mall_stream_type dc_state_get_stream_subvp_type(const struct dc_state *state,
 | 
				
			||||||
 | 
							const struct dc_stream_state *stream);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* Gets the phantom stream if main is provided, gets the main if phantom is provided.*/
 | 
				
			||||||
 | 
					struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state *state,
 | 
				
			||||||
 | 
							const struct dc_stream_state *stream);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* allocate's phantom stream or plane and returns pointer to the object */
 | 
				
			||||||
 | 
					struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_state *state,
 | 
				
			||||||
 | 
							struct dc_stream_state *main_stream);
 | 
				
			||||||
 | 
					struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_state *state,
 | 
				
			||||||
 | 
							struct dc_plane_state *main_plane);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* deallocate's phantom stream or plane */
 | 
				
			||||||
 | 
					void dc_state_release_phantom_stream(const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_state *state,
 | 
				
			||||||
 | 
							struct dc_stream_state *phantom_stream);
 | 
				
			||||||
 | 
					void dc_state_release_phantom_plane(const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_state *state,
 | 
				
			||||||
 | 
							struct dc_plane_state *phantom_plane);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* add/remove phantom stream to context and generate subvp meta data */
 | 
				
			||||||
 | 
					enum dc_status dc_state_add_phantom_stream(struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_state *state,
 | 
				
			||||||
 | 
							struct dc_stream_state *phantom_stream,
 | 
				
			||||||
 | 
							struct dc_stream_state *main_stream);
 | 
				
			||||||
 | 
					enum dc_status dc_state_remove_phantom_stream(struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_state *state,
 | 
				
			||||||
 | 
							struct dc_stream_state *phantom_stream);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_state_add_phantom_plane(
 | 
				
			||||||
 | 
							const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_stream_state *phantom_stream,
 | 
				
			||||||
 | 
							struct dc_plane_state *phantom_plane,
 | 
				
			||||||
 | 
							struct dc_state *state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_state_remove_phantom_plane(
 | 
				
			||||||
 | 
							const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_stream_state *phantom_stream,
 | 
				
			||||||
 | 
							struct dc_plane_state *phantom_plane,
 | 
				
			||||||
 | 
							struct dc_state *state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_state_rem_all_phantom_planes_for_stream(
 | 
				
			||||||
 | 
							const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_stream_state *phantom_stream,
 | 
				
			||||||
 | 
							struct dc_state *state,
 | 
				
			||||||
 | 
							bool should_release_planes);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_state_add_all_phantom_planes_for_stream(
 | 
				
			||||||
 | 
							const struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_stream_state *phantom_stream,
 | 
				
			||||||
 | 
							struct dc_plane_state * const *phantom_planes,
 | 
				
			||||||
 | 
							int plane_count,
 | 
				
			||||||
 | 
							struct dc_state *state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_state_remove_phantom_streams_and_planes(
 | 
				
			||||||
 | 
							struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_state *state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void dc_state_release_phantom_streams_and_planes(
 | 
				
			||||||
 | 
							struct dc *dc,
 | 
				
			||||||
 | 
							struct dc_state *state);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif /* _DC_STATE_PRIV_H_ */
 | 
				
			||||||
| 
						 | 
					@ -38,6 +38,14 @@ struct timing_sync_info {
 | 
				
			||||||
	bool master;
 | 
						bool master;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct mall_stream_config {
 | 
				
			||||||
 | 
						/* MALL stream config to indicate if the stream is phantom or not.
 | 
				
			||||||
 | 
						 * We will use a phantom stream to indicate that the pipe is phantom.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						enum mall_stream_type type;
 | 
				
			||||||
 | 
						struct dc_stream_state *paired_stream;	// master / slave stream
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct dc_stream_status {
 | 
					struct dc_stream_status {
 | 
				
			||||||
	int primary_otg_inst;
 | 
						int primary_otg_inst;
 | 
				
			||||||
	int stream_enc_inst;
 | 
						int stream_enc_inst;
 | 
				
			||||||
| 
						 | 
					@ -50,6 +58,7 @@ struct dc_stream_status {
 | 
				
			||||||
	struct timing_sync_info timing_sync_info;
 | 
						struct timing_sync_info timing_sync_info;
 | 
				
			||||||
	struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
 | 
						struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
 | 
				
			||||||
	bool is_abm_supported;
 | 
						bool is_abm_supported;
 | 
				
			||||||
 | 
						struct mall_stream_config mall_stream_config;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
enum hubp_dmdata_mode {
 | 
					enum hubp_dmdata_mode {
 | 
				
			||||||
| 
						 | 
					@ -130,7 +139,6 @@ union stream_update_flags {
 | 
				
			||||||
		uint32_t wb_update:1;
 | 
							uint32_t wb_update:1;
 | 
				
			||||||
		uint32_t dsc_changed : 1;
 | 
							uint32_t dsc_changed : 1;
 | 
				
			||||||
		uint32_t mst_bw : 1;
 | 
							uint32_t mst_bw : 1;
 | 
				
			||||||
		uint32_t crtc_timing_adjust : 1;
 | 
					 | 
				
			||||||
		uint32_t fams_changed : 1;
 | 
							uint32_t fams_changed : 1;
 | 
				
			||||||
	} bits;
 | 
						} bits;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -147,31 +155,6 @@ struct test_pattern {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define SUBVP_DRR_MARGIN_US 100 // 100us for DRR margin (SubVP + DRR)
 | 
					#define SUBVP_DRR_MARGIN_US 100 // 100us for DRR margin (SubVP + DRR)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
enum mall_stream_type {
 | 
					 | 
				
			||||||
	SUBVP_NONE, // subvp not in use
 | 
					 | 
				
			||||||
	SUBVP_MAIN, // subvp in use, this stream is main stream
 | 
					 | 
				
			||||||
	SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
struct mall_stream_config {
 | 
					 | 
				
			||||||
	/* MALL stream config to indicate if the stream is phantom or not.
 | 
					 | 
				
			||||||
	 * We will use a phantom stream to indicate that the pipe is phantom.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	enum mall_stream_type type;
 | 
					 | 
				
			||||||
	struct dc_stream_state *paired_stream;	// master / slave stream
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/* Temp struct used to save and restore MALL config
 | 
					 | 
				
			||||||
 * during validation.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * TODO: Move MALL config into dc_state instead of stream struct
 | 
					 | 
				
			||||||
 * to avoid needing to save/restore.
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
struct mall_temp_config {
 | 
					 | 
				
			||||||
	struct mall_stream_config mall_stream_config[MAX_PIPES];
 | 
					 | 
				
			||||||
	bool is_phantom_plane[MAX_PIPES];
 | 
					 | 
				
			||||||
};
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
struct dc_stream_debug_options {
 | 
					struct dc_stream_debug_options {
 | 
				
			||||||
	char force_odm_combine_segments;
 | 
						char force_odm_combine_segments;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
| 
						 | 
					@ -301,7 +284,7 @@ struct dc_stream_state {
 | 
				
			||||||
	bool has_non_synchronizable_pclk;
 | 
						bool has_non_synchronizable_pclk;
 | 
				
			||||||
	bool vblank_synchronized;
 | 
						bool vblank_synchronized;
 | 
				
			||||||
	bool fpo_in_use;
 | 
						bool fpo_in_use;
 | 
				
			||||||
	struct mall_stream_config mall_stream_config;
 | 
						bool is_phantom;
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define ABM_LEVEL_IMMEDIATE_DISABLE 255
 | 
					#define ABM_LEVEL_IMMEDIATE_DISABLE 255
 | 
				
			||||||
| 
						 | 
					@ -342,7 +325,6 @@ struct dc_stream_update {
 | 
				
			||||||
	struct dc_3dlut *lut3d_func;
 | 
						struct dc_3dlut *lut3d_func;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	struct test_pattern *pending_test_pattern;
 | 
						struct test_pattern *pending_test_pattern;
 | 
				
			||||||
	struct dc_crtc_timing_adjust *crtc_timing_adjust;
 | 
					 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool dc_is_stream_unchanged(
 | 
					bool dc_is_stream_unchanged(
 | 
				
			||||||
| 
						 | 
					@ -415,41 +397,6 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
 | 
				
			||||||
				  uint32_t *h_position,
 | 
									  uint32_t *h_position,
 | 
				
			||||||
				  uint32_t *v_position);
 | 
									  uint32_t *v_position);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
enum dc_status dc_add_stream_to_ctx(
 | 
					 | 
				
			||||||
			struct dc *dc,
 | 
					 | 
				
			||||||
		struct dc_state *new_ctx,
 | 
					 | 
				
			||||||
		struct dc_stream_state *stream);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
enum dc_status dc_remove_stream_from_ctx(
 | 
					 | 
				
			||||||
		struct dc *dc,
 | 
					 | 
				
			||||||
			struct dc_state *new_ctx,
 | 
					 | 
				
			||||||
			struct dc_stream_state *stream);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
bool dc_add_plane_to_context(
 | 
					 | 
				
			||||||
		const struct dc *dc,
 | 
					 | 
				
			||||||
		struct dc_stream_state *stream,
 | 
					 | 
				
			||||||
		struct dc_plane_state *plane_state,
 | 
					 | 
				
			||||||
		struct dc_state *context);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
bool dc_remove_plane_from_context(
 | 
					 | 
				
			||||||
		const struct dc *dc,
 | 
					 | 
				
			||||||
		struct dc_stream_state *stream,
 | 
					 | 
				
			||||||
		struct dc_plane_state *plane_state,
 | 
					 | 
				
			||||||
		struct dc_state *context);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
bool dc_rem_all_planes_for_stream(
 | 
					 | 
				
			||||||
		const struct dc *dc,
 | 
					 | 
				
			||||||
		struct dc_stream_state *stream,
 | 
					 | 
				
			||||||
		struct dc_state *context);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
bool dc_add_all_planes_for_stream(
 | 
					 | 
				
			||||||
		const struct dc *dc,
 | 
					 | 
				
			||||||
		struct dc_stream_state *stream,
 | 
					 | 
				
			||||||
		struct dc_plane_state * const *plane_states,
 | 
					 | 
				
			||||||
		int plane_count,
 | 
					 | 
				
			||||||
		struct dc_state *context);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
bool dc_stream_add_writeback(struct dc *dc,
 | 
					bool dc_stream_add_writeback(struct dc *dc,
 | 
				
			||||||
		struct dc_stream_state *stream,
 | 
							struct dc_stream_state *stream,
 | 
				
			||||||
		struct dc_writeback_info *wb_info);
 | 
							struct dc_writeback_info *wb_info);
 | 
				
			||||||
| 
						 | 
					@ -518,9 +465,6 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink);
 | 
				
			||||||
void dc_stream_retain(struct dc_stream_state *dc_stream);
 | 
					void dc_stream_retain(struct dc_stream_state *dc_stream);
 | 
				
			||||||
void dc_stream_release(struct dc_stream_state *dc_stream);
 | 
					void dc_stream_release(struct dc_stream_state *dc_stream);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct dc_stream_status *dc_stream_get_status_from_state(
 | 
					 | 
				
			||||||
	struct dc_state *state,
 | 
					 | 
				
			||||||
	struct dc_stream_state *stream);
 | 
					 | 
				
			||||||
struct dc_stream_status *dc_stream_get_status(
 | 
					struct dc_stream_status *dc_stream_get_status(
 | 
				
			||||||
	struct dc_stream_state *dc_stream);
 | 
						struct dc_stream_state *dc_stream);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										37
									
								
								drivers/gpu/drm/amd/display/dc/dc_stream_priv.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								drivers/gpu/drm/amd/display/dc/dc_stream_priv.h
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,37 @@
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					 * Copyright 2023 Advanced Micro Devices, Inc.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Permission is hereby granted, free of charge, to any person obtaining a
 | 
				
			||||||
 | 
					 * copy of this software and associated documentation files (the "Software"),
 | 
				
			||||||
 | 
					 * to deal in the Software without restriction, including without limitation
 | 
				
			||||||
 | 
					 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 | 
				
			||||||
 | 
					 * and/or sell copies of the Software, and to permit persons to whom the
 | 
				
			||||||
 | 
					 * Software is furnished to do so, subject to the following conditions:
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * The above copyright notice and this permission notice shall be included in
 | 
				
			||||||
 | 
					 * all copies or substantial portions of the Software.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 | 
				
			||||||
 | 
					 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 | 
				
			||||||
 | 
					 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 | 
				
			||||||
 | 
					 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 | 
				
			||||||
 | 
					 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 | 
				
			||||||
 | 
					 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 | 
				
			||||||
 | 
					 * OTHER DEALINGS IN THE SOFTWARE.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Authors: AMD
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifndef _DC_STREAM_PRIV_H_
 | 
				
			||||||
 | 
					#define _DC_STREAM_PRIV_H_
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include "dc_stream.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					bool dc_stream_construct(struct dc_stream_state *stream,
 | 
				
			||||||
 | 
						struct dc_sink *dc_sink_data);
 | 
				
			||||||
 | 
					void dc_stream_destruct(struct dc_stream_state *stream);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void dc_stream_assign_stream_id(struct dc_stream_state *stream);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif // _DC_STREAM_PRIV_H_
 | 
				
			||||||
| 
						 | 
					@ -1151,6 +1151,8 @@ struct dc_dpia_bw_alloc {
 | 
				
			||||||
	int bw_granularity;    // BW Granularity
 | 
						int bw_granularity;    // BW Granularity
 | 
				
			||||||
	bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3:  DP-Tx & Dpia & CM
 | 
						bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3:  DP-Tx & Dpia & CM
 | 
				
			||||||
	bool response_ready;   // Response ready from the CM side
 | 
						bool response_ready;   // Response ready from the CM side
 | 
				
			||||||
 | 
						uint8_t nrd_max_lane_count; // Non-reduced max lane count
 | 
				
			||||||
 | 
						uint8_t nrd_max_link_rate; // Non-reduced max link rate
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define MAX_SINKS_PER_LINK 4
 | 
					#define MAX_SINKS_PER_LINK 4
 | 
				
			||||||
| 
						 | 
					@ -1161,4 +1163,9 @@ enum dc_hpd_enable_select {
 | 
				
			||||||
	HPD_EN_FOR_SECONDARY_EDP_ONLY,
 | 
						HPD_EN_FOR_SECONDARY_EDP_ONLY,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					enum mall_stream_type {
 | 
				
			||||||
 | 
						SUBVP_NONE, // subvp not in use
 | 
				
			||||||
 | 
						SUBVP_MAIN, // subvp in use, this stream is main stream
 | 
				
			||||||
 | 
						SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
#endif /* DC_TYPES_H_ */
 | 
					#endif /* DC_TYPES_H_ */
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -135,7 +135,7 @@ static void dmcu_set_backlight_level(
 | 
				
			||||||
			0, 1, 80000);
 | 
								0, 1, 80000);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void dce_abm_init(struct abm *abm, uint32_t backlight)
 | 
					static void dce_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dce_abm *abm_dce = TO_DCE_ABM(abm);
 | 
						struct dce_abm *abm_dce = TO_DCE_ABM(abm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -162,7 +162,7 @@ static void dce_abm_init(struct abm *abm, uint32_t backlight)
 | 
				
			||||||
			BL1_PWM_TARGET_ABM_LEVEL, backlight);
 | 
								BL1_PWM_TARGET_ABM_LEVEL, backlight);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	REG_UPDATE(BL1_PWM_USER_LEVEL,
 | 
						REG_UPDATE(BL1_PWM_USER_LEVEL,
 | 
				
			||||||
			BL1_PWM_USER_LEVEL, backlight);
 | 
								BL1_PWM_USER_LEVEL, user_level);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
 | 
						REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
 | 
				
			||||||
			ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
 | 
								ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -57,9 +57,9 @@ static unsigned int abm_feature_support(struct abm *abm, unsigned int panel_inst
 | 
				
			||||||
	return ret;
 | 
						return ret;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void dmub_abm_init_ex(struct abm *abm, uint32_t backlight)
 | 
					static void dmub_abm_init_ex(struct abm *abm, uint32_t backlight, uint32_t user_level)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	dmub_abm_init(abm, backlight);
 | 
						dmub_abm_init(abm, backlight, user_level);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static unsigned int dmub_abm_get_current_backlight_ex(struct abm *abm)
 | 
					static unsigned int dmub_abm_get_current_backlight_ex(struct abm *abm)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -76,10 +76,10 @@ static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
 | 
				
			||||||
	cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.panel_mask = panel_mask;
 | 
						cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.panel_mask = panel_mask;
 | 
				
			||||||
	cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data);
 | 
						cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void dmub_abm_init(struct abm *abm, uint32_t backlight)
 | 
					void dmub_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
 | 
						struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -106,7 +106,7 @@ void dmub_abm_init(struct abm *abm, uint32_t backlight)
 | 
				
			||||||
			BL1_PWM_TARGET_ABM_LEVEL, backlight);
 | 
								BL1_PWM_TARGET_ABM_LEVEL, backlight);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	REG_UPDATE(BL1_PWM_USER_LEVEL,
 | 
						REG_UPDATE(BL1_PWM_USER_LEVEL,
 | 
				
			||||||
			BL1_PWM_USER_LEVEL, backlight);
 | 
								BL1_PWM_USER_LEVEL, user_level);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
 | 
						REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
 | 
				
			||||||
			ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
 | 
								ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
 | 
				
			||||||
| 
						 | 
					@ -155,7 +155,7 @@ bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask)
 | 
				
			||||||
	cmd.abm_set_level.abm_set_level_data.panel_mask = panel_mask;
 | 
						cmd.abm_set_level.abm_set_level_data.panel_mask = panel_mask;
 | 
				
			||||||
	cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data);
 | 
						cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -186,7 +186,7 @@ void dmub_abm_init_config(struct abm *abm,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data);
 | 
						cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -203,7 +203,7 @@ bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, un
 | 
				
			||||||
	cmd.abm_pause.abm_pause_data.panel_mask = panel_mask;
 | 
						cmd.abm_pause.abm_pause_data.panel_mask = panel_mask;
 | 
				
			||||||
	cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_pause_data);
 | 
						cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_pause_data);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -246,7 +246,7 @@ bool dmub_abm_save_restore(
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore);
 | 
						cmd.abm_save_restore.header.payload_bytes = sizeof(struct dmub_rb_cmd_abm_save_restore);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Copy iramtable data into local structure
 | 
						// Copy iramtable data into local structure
 | 
				
			||||||
	memcpy((void *)pData, dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes);
 | 
						memcpy((void *)pData, dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes);
 | 
				
			||||||
| 
						 | 
					@ -274,7 +274,7 @@ bool dmub_abm_set_pipe(struct abm *abm,
 | 
				
			||||||
	cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
 | 
						cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
 | 
				
			||||||
	cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
 | 
						cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -296,7 +296,7 @@ bool dmub_abm_set_backlight_level(struct abm *abm,
 | 
				
			||||||
	cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst);
 | 
						cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst);
 | 
				
			||||||
	cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
 | 
						cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -30,7 +30,7 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct abm_save_restore;
 | 
					struct abm_save_restore;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void dmub_abm_init(struct abm *abm, uint32_t backlight);
 | 
					void dmub_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level);
 | 
				
			||||||
bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask);
 | 
					bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask);
 | 
				
			||||||
unsigned int dmub_abm_get_current_backlight(struct abm *abm);
 | 
					unsigned int dmub_abm_get_current_backlight(struct abm *abm);
 | 
				
			||||||
unsigned int dmub_abm_get_target_backlight(struct abm *abm);
 | 
					unsigned int dmub_abm_get_target_backlight(struct abm *abm);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -47,7 +47,7 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
 | 
				
			||||||
	if (!lock)
 | 
						if (!lock)
 | 
				
			||||||
		cmd.lock_hw.lock_hw_data.should_release = 1;
 | 
							cmd.lock_hw.lock_hw_data.should_release = 1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
 | 
					void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -48,5 +48,5 @@ void dmub_enable_outbox_notification(struct dc_dmub_srv *dmub_srv)
 | 
				
			||||||
		sizeof(cmd.outbox1_enable.header);
 | 
							sizeof(cmd.outbox1_enable.header);
 | 
				
			||||||
	cmd.outbox1_enable.enable = true;
 | 
						cmd.outbox1_enable.enable = true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -105,23 +105,18 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state, uint8_t panel_inst)
 | 
					static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state, uint8_t panel_inst)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
 | 
					 | 
				
			||||||
	uint32_t raw_state = 0;
 | 
						uint32_t raw_state = 0;
 | 
				
			||||||
	uint32_t retry_count = 0;
 | 
						uint32_t retry_count = 0;
 | 
				
			||||||
	enum dmub_status status;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	do {
 | 
						do {
 | 
				
			||||||
		// Send gpint command and wait for ack
 | 
							// Send gpint command and wait for ack
 | 
				
			||||||
		status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, panel_inst, 30);
 | 
							if (dc_wake_and_execute_gpint(dmub->ctx, DMUB_GPINT__GET_PSR_STATE, panel_inst, &raw_state,
 | 
				
			||||||
 | 
										      DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
 | 
				
			||||||
		if (status == DMUB_STATUS_OK) {
 | 
					 | 
				
			||||||
			// GPINT was executed, get response
 | 
					 | 
				
			||||||
			dmub_srv_get_gpint_response(srv, &raw_state);
 | 
					 | 
				
			||||||
			*state = convert_psr_state(raw_state);
 | 
								*state = convert_psr_state(raw_state);
 | 
				
			||||||
		} else
 | 
							} else {
 | 
				
			||||||
			// Return invalid state when GPINT times out
 | 
								// Return invalid state when GPINT times out
 | 
				
			||||||
			*state = PSR_STATE_INVALID;
 | 
								*state = PSR_STATE_INVALID;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
	} while (++retry_count <= 1000 && *state == PSR_STATE_INVALID);
 | 
						} while (++retry_count <= 1000 && *state == PSR_STATE_INVALID);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Assert if max retry hit
 | 
						// Assert if max retry hit
 | 
				
			||||||
| 
						 | 
					@ -171,7 +166,7 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
 | 
				
			||||||
	cmd.psr_set_version.psr_set_version_data.panel_inst = panel_inst;
 | 
						cmd.psr_set_version.psr_set_version_data.panel_inst = panel_inst;
 | 
				
			||||||
	cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
 | 
						cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -199,7 +194,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait, uint8
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cmd.psr_enable.header.payload_bytes = 0; // Send header only
 | 
						cmd.psr_enable.header.payload_bytes = 0; // Send header only
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc->dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc->dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Below loops 1000 x 500us = 500 ms.
 | 
						/* Below loops 1000 x 500us = 500 ms.
 | 
				
			||||||
	 *  Exit PSR may need to wait 1-2 frames to power up. Timeout after at
 | 
						 *  Exit PSR may need to wait 1-2 frames to power up. Timeout after at
 | 
				
			||||||
| 
						 | 
					@ -248,7 +243,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_
 | 
				
			||||||
	cmd.psr_set_level.psr_set_level_data.psr_level = psr_level;
 | 
						cmd.psr_set_level.psr_set_level_data.psr_level = psr_level;
 | 
				
			||||||
	cmd.psr_set_level.psr_set_level_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
 | 
						cmd.psr_set_level.psr_set_level_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
 | 
				
			||||||
	cmd.psr_set_level.psr_set_level_data.panel_inst = panel_inst;
 | 
						cmd.psr_set_level.psr_set_level_data.panel_inst = panel_inst;
 | 
				
			||||||
	dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -267,7 +262,7 @@ static void dmub_psr_set_sink_vtotal_in_psr_active(struct dmub_psr *dmub,
 | 
				
			||||||
	cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_idle = psr_vtotal_idle;
 | 
						cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_idle = psr_vtotal_idle;
 | 
				
			||||||
	cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_su = psr_vtotal_su;
 | 
						cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_su = psr_vtotal_su;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -286,7 +281,7 @@ static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt
 | 
				
			||||||
	cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt;
 | 
						cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt;
 | 
				
			||||||
	cmd.psr_set_power_opt.psr_set_power_opt_data.panel_inst = panel_inst;
 | 
						cmd.psr_set_power_opt.psr_set_power_opt_data.panel_inst = panel_inst;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -423,7 +418,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
 | 
				
			||||||
		copy_settings_data->relock_delay_frame_cnt = 2;
 | 
							copy_settings_data->relock_delay_frame_cnt = 2;
 | 
				
			||||||
	copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height;
 | 
						copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -444,7 +439,7 @@ static void dmub_psr_force_static(struct dmub_psr *dmub, uint8_t panel_inst)
 | 
				
			||||||
	cmd.psr_force_static.header.sub_type = DMUB_CMD__PSR_FORCE_STATIC;
 | 
						cmd.psr_force_static.header.sub_type = DMUB_CMD__PSR_FORCE_STATIC;
 | 
				
			||||||
	cmd.psr_enable.header.payload_bytes = 0;
 | 
						cmd.psr_enable.header.payload_bytes = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					@ -452,13 +447,11 @@ static void dmub_psr_force_static(struct dmub_psr *dmub, uint8_t panel_inst)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void dmub_psr_get_residency(struct dmub_psr *dmub, uint32_t *residency, uint8_t panel_inst)
 | 
					static void dmub_psr_get_residency(struct dmub_psr *dmub, uint32_t *residency, uint8_t panel_inst)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
 | 
					 | 
				
			||||||
	uint16_t param = (uint16_t)(panel_inst << 8);
 | 
						uint16_t param = (uint16_t)(panel_inst << 8);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* Send gpint command and wait for ack */
 | 
						/* Send gpint command and wait for ack */
 | 
				
			||||||
	dmub_srv_send_gpint_command(srv, DMUB_GPINT__PSR_RESIDENCY, param, 30);
 | 
						dc_wake_and_execute_gpint(dmub->ctx, DMUB_GPINT__PSR_RESIDENCY, param, residency,
 | 
				
			||||||
 | 
									  DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
 | 
				
			||||||
	dmub_srv_get_gpint_response(srv, residency);
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static const struct dmub_psr_funcs psr_funcs = {
 | 
					static const struct dmub_psr_funcs psr_funcs = {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -258,13 +258,97 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
 | 
				
			||||||
		*residency = 0;
 | 
							*residency = 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * Set REPLAY power optimization flags and coasting vtotal.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dmub,
 | 
				
			||||||
 | 
							unsigned int power_opt, uint8_t panel_inst, uint16_t coasting_vtotal)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						union dmub_rb_cmd cmd;
 | 
				
			||||||
 | 
						struct dc_context *dc = dmub->ctx;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						memset(&cmd, 0, sizeof(cmd));
 | 
				
			||||||
 | 
						cmd.replay_set_power_opt_and_coasting_vtotal.header.type = DMUB_CMD__REPLAY;
 | 
				
			||||||
 | 
						cmd.replay_set_power_opt_and_coasting_vtotal.header.sub_type =
 | 
				
			||||||
 | 
							DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL;
 | 
				
			||||||
 | 
						cmd.replay_set_power_opt_and_coasting_vtotal.header.payload_bytes =
 | 
				
			||||||
 | 
							sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal);
 | 
				
			||||||
 | 
						cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_power_opt_data.power_opt = power_opt;
 | 
				
			||||||
 | 
						cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_power_opt_data.panel_inst = panel_inst;
 | 
				
			||||||
 | 
						cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_coasting_vtotal_data.coasting_vtotal = coasting_vtotal;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * send Replay general cmd to DMUB.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static void dmub_replay_send_cmd(struct dmub_replay *dmub,
 | 
				
			||||||
 | 
							enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_element)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
						union dmub_rb_cmd cmd;
 | 
				
			||||||
 | 
						struct dc_context *ctx = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if (dmub == NULL || cmd_element == NULL)
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						ctx = dmub->ctx;
 | 
				
			||||||
 | 
						if (ctx != NULL) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if (msg != Replay_Msg_Not_Support) {
 | 
				
			||||||
 | 
								memset(&cmd, 0, sizeof(cmd));
 | 
				
			||||||
 | 
								//Header
 | 
				
			||||||
 | 
								cmd.replay_set_timing_sync.header.type = DMUB_CMD__REPLAY;
 | 
				
			||||||
 | 
							} else
 | 
				
			||||||
 | 
								return;
 | 
				
			||||||
 | 
						} else
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						switch (msg) {
 | 
				
			||||||
 | 
						case Replay_Set_Timing_Sync_Supported:
 | 
				
			||||||
 | 
							//Header
 | 
				
			||||||
 | 
							cmd.replay_set_timing_sync.header.sub_type =
 | 
				
			||||||
 | 
								DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED;
 | 
				
			||||||
 | 
							cmd.replay_set_timing_sync.header.payload_bytes =
 | 
				
			||||||
 | 
								sizeof(struct dmub_rb_cmd_replay_set_timing_sync);
 | 
				
			||||||
 | 
							//Cmd Body
 | 
				
			||||||
 | 
							cmd.replay_set_timing_sync.replay_set_timing_sync_data.panel_inst =
 | 
				
			||||||
 | 
											cmd_element->sync_data.panel_inst;
 | 
				
			||||||
 | 
							cmd.replay_set_timing_sync.replay_set_timing_sync_data.timing_sync_supported =
 | 
				
			||||||
 | 
											cmd_element->sync_data.timing_sync_supported;
 | 
				
			||||||
 | 
							break;
 | 
				
			||||||
 | 
						case Replay_Set_Residency_Frameupdate_Timer:
 | 
				
			||||||
 | 
							//Header
 | 
				
			||||||
 | 
							cmd.replay_set_frameupdate_timer.header.sub_type =
 | 
				
			||||||
 | 
								DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER;
 | 
				
			||||||
 | 
							cmd.replay_set_frameupdate_timer.header.payload_bytes =
 | 
				
			||||||
 | 
								sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer);
 | 
				
			||||||
 | 
							//Cmd Body
 | 
				
			||||||
 | 
							cmd.replay_set_frameupdate_timer.data.panel_inst =
 | 
				
			||||||
 | 
											cmd_element->panel_inst;
 | 
				
			||||||
 | 
							cmd.replay_set_frameupdate_timer.data.enable =
 | 
				
			||||||
 | 
											cmd_element->timer_data.enable;
 | 
				
			||||||
 | 
							cmd.replay_set_frameupdate_timer.data.frameupdate_count =
 | 
				
			||||||
 | 
											cmd_element->timer_data.frameupdate_count;
 | 
				
			||||||
 | 
							break;
 | 
				
			||||||
 | 
						case Replay_Msg_Not_Support:
 | 
				
			||||||
 | 
						default:
 | 
				
			||||||
 | 
							return;
 | 
				
			||||||
 | 
							break;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static const struct dmub_replay_funcs replay_funcs = {
 | 
					static const struct dmub_replay_funcs replay_funcs = {
 | 
				
			||||||
	.replay_copy_settings		= dmub_replay_copy_settings,
 | 
						.replay_copy_settings				= dmub_replay_copy_settings,
 | 
				
			||||||
	.replay_enable			= dmub_replay_enable,
 | 
						.replay_enable					= dmub_replay_enable,
 | 
				
			||||||
	.replay_get_state		= dmub_replay_get_state,
 | 
						.replay_get_state				= dmub_replay_get_state,
 | 
				
			||||||
	.replay_set_power_opt		= dmub_replay_set_power_opt,
 | 
						.replay_set_power_opt				= dmub_replay_set_power_opt,
 | 
				
			||||||
	.replay_set_coasting_vtotal	= dmub_replay_set_coasting_vtotal,
 | 
						.replay_set_coasting_vtotal			= dmub_replay_set_coasting_vtotal,
 | 
				
			||||||
	.replay_residency		= dmub_replay_residency,
 | 
						.replay_residency				= dmub_replay_residency,
 | 
				
			||||||
 | 
						.replay_set_power_opt_and_coasting_vtotal	= dmub_replay_set_power_opt_and_coasting_vtotal,
 | 
				
			||||||
 | 
						.replay_send_cmd				= dmub_replay_send_cmd,
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -51,6 +51,8 @@ struct dmub_replay_funcs {
 | 
				
			||||||
		uint8_t panel_inst);
 | 
							uint8_t panel_inst);
 | 
				
			||||||
	void (*replay_residency)(struct dmub_replay *dmub,
 | 
						void (*replay_residency)(struct dmub_replay *dmub,
 | 
				
			||||||
		uint8_t panel_inst, uint32_t *residency, const bool is_start, const bool is_alpm);
 | 
							uint8_t panel_inst, uint32_t *residency, const bool is_start, const bool is_alpm);
 | 
				
			||||||
 | 
						void (*replay_set_power_opt_and_coasting_vtotal)(struct dmub_replay *dmub,
 | 
				
			||||||
 | 
							unsigned int power_opt, uint8_t panel_inst, uint16_t coasting_vtotal);
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct dmub_replay *dmub_replay_create(struct dc_context *ctx);
 | 
					struct dmub_replay *dmub_replay_create(struct dc_context *ctx);
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -22,7 +22,7 @@
 | 
				
			||||||
#
 | 
					#
 | 
				
			||||||
# Makefile for DCN.
 | 
					# Makefile for DCN.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DCN10 = dcn10_init.o dcn10_ipp.o \
 | 
					DCN10 = dcn10_ipp.o \
 | 
				
			||||||
		dcn10_hw_sequencer_debug.o \
 | 
							dcn10_hw_sequencer_debug.o \
 | 
				
			||||||
		dcn10_dpp.o dcn10_opp.o \
 | 
							dcn10_dpp.o dcn10_opp.o \
 | 
				
			||||||
		dcn10_hubp.o dcn10_mpc.o \
 | 
							dcn10_hubp.o dcn10_mpc.o \
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2,7 +2,7 @@
 | 
				
			||||||
#
 | 
					#
 | 
				
			||||||
# Makefile for DCN.
 | 
					# Makefile for DCN.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DCN20 = dcn20_init.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
 | 
					DCN20 = dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
 | 
				
			||||||
		dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_mmhubbub.o \
 | 
							dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_mmhubbub.o \
 | 
				
			||||||
		dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \
 | 
							dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \
 | 
				
			||||||
		dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o
 | 
							dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,8 +1,7 @@
 | 
				
			||||||
# SPDX-License-Identifier: MIT
 | 
					# SPDX-License-Identifier: MIT
 | 
				
			||||||
#
 | 
					#
 | 
				
			||||||
# Makefile for DCN.
 | 
					# Makefile for DCN.
 | 
				
			||||||
DCN201 = dcn201_init.o \
 | 
					DCN201 = dcn201_hubbub.o\
 | 
				
			||||||
	dcn201_hubbub.o\
 | 
					 | 
				
			||||||
	dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_dpp.o \
 | 
						dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_dpp.o \
 | 
				
			||||||
	dcn201_dccg.o dcn201_link_encoder.o
 | 
						dcn201_dccg.o dcn201_link_encoder.o
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2,7 +2,7 @@
 | 
				
			||||||
#
 | 
					#
 | 
				
			||||||
# Makefile for DCN21.
 | 
					# Makefile for DCN21.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o \
 | 
					DCN21 = dcn21_hubp.o dcn21_hubbub.o \
 | 
				
			||||||
	 dcn21_link_encoder.o dcn21_dccg.o
 | 
						 dcn21_link_encoder.o dcn21_dccg.o
 | 
				
			||||||
 | 
					
 | 
				
			||||||
AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21))
 | 
					AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21))
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -691,7 +691,7 @@ static void dmcub_PLAT_54186_wa(struct hubp *hubp,
 | 
				
			||||||
	cmd.PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
 | 
						cmd.PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	PERF_TRACE();  // TODO: remove after performance is stable.
 | 
						PERF_TRACE();  // TODO: remove after performance is stable.
 | 
				
			||||||
	dm_execute_dmub_cmd(hubp->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(hubp->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
	PERF_TRACE();  // TODO: remove after performance is stable.
 | 
						PERF_TRACE();  // TODO: remove after performance is stable.
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -23,9 +23,7 @@
 | 
				
			||||||
#
 | 
					#
 | 
				
			||||||
#
 | 
					#
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DCN30 := \
 | 
					DCN30 := dcn30_hubbub.o \
 | 
				
			||||||
	dcn30_init.o \
 | 
					 | 
				
			||||||
	dcn30_hubbub.o \
 | 
					 | 
				
			||||||
	dcn30_hubp.o \
 | 
						dcn30_hubp.o \
 | 
				
			||||||
	dcn30_dpp.o \
 | 
						dcn30_dpp.o \
 | 
				
			||||||
	dcn30_dccg.o \
 | 
						dcn30_dccg.o \
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -10,7 +10,7 @@
 | 
				
			||||||
#
 | 
					#
 | 
				
			||||||
# Makefile for dcn30.
 | 
					# Makefile for dcn30.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DCN301 = dcn301_init.o dcn301_dccg.o \
 | 
					DCN301 = dcn301_dccg.o \
 | 
				
			||||||
		dcn301_dio_link_encoder.o dcn301_panel_cntl.o dcn301_hubbub.o
 | 
							dcn301_dio_link_encoder.o dcn301_panel_cntl.o dcn301_hubbub.o
 | 
				
			||||||
 | 
					
 | 
				
			||||||
AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301))
 | 
					AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301))
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,12 +0,0 @@
 | 
				
			||||||
#
 | 
					 | 
				
			||||||
# (c) Copyright 2020 Advanced Micro Devices, Inc. All the rights reserved
 | 
					 | 
				
			||||||
#
 | 
					 | 
				
			||||||
#  Authors: AMD
 | 
					 | 
				
			||||||
#
 | 
					 | 
				
			||||||
# Makefile for dcn302.
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
DCN3_02 = dcn302_init.o
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
AMD_DAL_DCN3_02 = $(addprefix $(AMDDALPATH)/dc/dcn302/,$(DCN3_02))
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
AMD_DISPLAY_FILES += $(AMD_DAL_DCN3_02)
 | 
					 | 
				
			||||||
| 
						 | 
					@ -10,7 +10,7 @@
 | 
				
			||||||
#
 | 
					#
 | 
				
			||||||
# Makefile for dcn31.
 | 
					# Makefile for dcn31.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DCN31 = dcn31_hubbub.o dcn31_init.o dcn31_hubp.o \
 | 
					DCN31 = dcn31_hubbub.o dcn31_hubp.o \
 | 
				
			||||||
	dcn31_dccg.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \
 | 
						dcn31_dccg.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \
 | 
				
			||||||
	dcn31_apg.o dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o \
 | 
						dcn31_apg.o dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o \
 | 
				
			||||||
	dcn31_afmt.o dcn31_vpg.o
 | 
						dcn31_afmt.o dcn31_vpg.o
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -125,7 +125,7 @@ static bool query_dp_alt_from_dmub(struct link_encoder *enc,
 | 
				
			||||||
	cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data);
 | 
						cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data);
 | 
				
			||||||
	cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
 | 
						cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!dm_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
 | 
						if (!dc_wake_and_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
| 
						 | 
					@ -436,7 +436,7 @@ static bool link_dpia_control(struct dc_context *dc_ctx,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cmd.dig1_dpia_control.dpia_control = *dpia_control;
 | 
						cmd.dig1_dpia_control.dpia_control = *dpia_control;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dm_execute_dmub_cmd(dc_ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
						dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -52,7 +52,7 @@ static bool dcn31_query_backlight_info(struct panel_cntl *panel_cntl, union dmub
 | 
				
			||||||
	cmd->panel_cntl.header.payload_bytes = sizeof(cmd->panel_cntl.data);
 | 
						cmd->panel_cntl.header.payload_bytes = sizeof(cmd->panel_cntl.data);
 | 
				
			||||||
	cmd->panel_cntl.data.pwrseq_inst = dcn31_panel_cntl->base.pwrseq_inst;
 | 
						cmd->panel_cntl.data.pwrseq_inst = dcn31_panel_cntl->base.pwrseq_inst;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return dm_execute_dmub_cmd(dc_dmub_srv->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
 | 
						return dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static uint32_t dcn31_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl)
 | 
					static uint32_t dcn31_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl)
 | 
				
			||||||
| 
						 | 
					@ -85,7 +85,7 @@ static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
 | 
				
			||||||
		panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
 | 
							panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
 | 
				
			||||||
	cmd.panel_cntl.data.bl_pwm_ref_div2 =
 | 
						cmd.panel_cntl.data.bl_pwm_ref_div2 =
 | 
				
			||||||
		panel_cntl->stored_backlight_registers.PANEL_PWRSEQ_REF_DIV2;
 | 
							panel_cntl->stored_backlight_registers.PANEL_PWRSEQ_REF_DIV2;
 | 
				
			||||||
	if (!dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
 | 
						if (!dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
 | 
				
			||||||
		return 0;
 | 
							return 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	panel_cntl->stored_backlight_registers.BL_PWM_CNTL = cmd.panel_cntl.data.bl_pwm_cntl;
 | 
						panel_cntl->stored_backlight_registers.BL_PWM_CNTL = cmd.panel_cntl.data.bl_pwm_cntl;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -10,8 +10,7 @@
 | 
				
			||||||
#
 | 
					#
 | 
				
			||||||
# Makefile for dcn314.
 | 
					# Makefile for dcn314.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DCN314 = dcn314_init.o \
 | 
					DCN314 = dcn314_dio_stream_encoder.o dcn314_dccg.o
 | 
				
			||||||
		dcn314_dio_stream_encoder.o dcn314_dccg.o
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
AMD_DAL_DCN314 = $(addprefix $(AMDDALPATH)/dc/dcn314/,$(DCN314))
 | 
					AMD_DAL_DCN314 = $(addprefix $(AMDDALPATH)/dc/dcn314/,$(DCN314))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -10,7 +10,7 @@
 | 
				
			||||||
#
 | 
					#
 | 
				
			||||||
# Makefile for dcn32.
 | 
					# Makefile for dcn32.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DCN32 = dcn32_hubbub.o dcn32_init.o dcn32_dccg.o \
 | 
					DCN32 = dcn32_hubbub.o dcn32_dccg.o \
 | 
				
			||||||
		dcn32_mmhubbub.o dcn32_dpp.o dcn32_hubp.o dcn32_mpc.o \
 | 
							dcn32_mmhubbub.o dcn32_dpp.o dcn32_hubp.o dcn32_mpc.o \
 | 
				
			||||||
		dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_resource_helpers.o \
 | 
							dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_resource_helpers.o \
 | 
				
			||||||
		dcn32_hpo_dp_link_encoder.o
 | 
							dcn32_hpo_dp_link_encoder.o
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -28,6 +28,7 @@
 | 
				
			||||||
#include "dcn20/dcn20_resource.h"
 | 
					#include "dcn20/dcn20_resource.h"
 | 
				
			||||||
#include "dml/dcn32/display_mode_vba_util_32.h"
 | 
					#include "dml/dcn32/display_mode_vba_util_32.h"
 | 
				
			||||||
#include "dml/dcn32/dcn32_fpu.h"
 | 
					#include "dml/dcn32/dcn32_fpu.h"
 | 
				
			||||||
 | 
					#include "dc_state_priv.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static bool is_dual_plane(enum surface_pixel_format format)
 | 
					static bool is_dual_plane(enum surface_pixel_format format)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -182,20 +183,6 @@ bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
 | 
				
			||||||
	return true;
 | 
						return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
bool dcn32_subvp_in_use(struct dc *dc,
 | 
					 | 
				
			||||||
		struct dc_state *context)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	uint32_t i;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
					 | 
				
			||||||
		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE)
 | 
					 | 
				
			||||||
			return true;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return false;
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
bool dcn32_mpo_in_use(struct dc_state *context)
 | 
					bool dcn32_mpo_in_use(struct dc_state *context)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	uint32_t i;
 | 
						uint32_t i;
 | 
				
			||||||
| 
						 | 
					@ -264,18 +251,17 @@ static void override_det_for_subvp(struct dc *dc, struct dc_state *context, uint
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Do not override if a stream has multiple planes
 | 
						// Do not override if a stream has multiple planes
 | 
				
			||||||
	for (i = 0; i < context->stream_count; i++) {
 | 
						for (i = 0; i < context->stream_count; i++) {
 | 
				
			||||||
		if (context->stream_status[i].plane_count > 1) {
 | 
							if (context->stream_status[i].plane_count > 1)
 | 
				
			||||||
			return;
 | 
								return;
 | 
				
			||||||
		}
 | 
					
 | 
				
			||||||
		if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) {
 | 
							if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM)
 | 
				
			||||||
			stream_count++;
 | 
								stream_count++;
 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
						for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
				
			||||||
		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 | 
							struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (pipe_ctx->stream && pipe_ctx->plane_state && pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) {
 | 
							if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
 | 
				
			||||||
			if (dcn32_allow_subvp_high_refresh_rate(dc, context, pipe_ctx)) {
 | 
								if (dcn32_allow_subvp_high_refresh_rate(dc, context, pipe_ctx)) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) {
 | 
									if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) {
 | 
				
			||||||
| 
						 | 
					@ -290,7 +276,7 @@ static void override_det_for_subvp(struct dc *dc, struct dc_state *context, uint
 | 
				
			||||||
		for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
							for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
				
			||||||
			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 | 
								struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (pipe_ctx->stream && pipe_ctx->plane_state && pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) {
 | 
								if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
 | 
				
			||||||
				if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) {
 | 
									if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) {
 | 
				
			||||||
					if (pipe_segments[i] > 4)
 | 
										if (pipe_segments[i] > 4)
 | 
				
			||||||
						pipe_segments[i] = 4;
 | 
											pipe_segments[i] = 4;
 | 
				
			||||||
| 
						 | 
					@ -337,14 +323,14 @@ void dcn32_determine_det_override(struct dc *dc,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0; i < context->stream_count; i++) {
 | 
						for (i = 0; i < context->stream_count; i++) {
 | 
				
			||||||
		/* Don't count SubVP streams for DET allocation */
 | 
							/* Don't count SubVP streams for DET allocation */
 | 
				
			||||||
		if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM)
 | 
							if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM)
 | 
				
			||||||
			stream_count++;
 | 
								stream_count++;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (stream_count > 0) {
 | 
						if (stream_count > 0) {
 | 
				
			||||||
		stream_segments = 18 / stream_count;
 | 
							stream_segments = 18 / stream_count;
 | 
				
			||||||
		for (i = 0; i < context->stream_count; i++) {
 | 
							for (i = 0; i < context->stream_count; i++) {
 | 
				
			||||||
			if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM)
 | 
								if (dc_state_get_stream_subvp_type(context, context->streams[i]) == SUBVP_PHANTOM)
 | 
				
			||||||
				continue;
 | 
									continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			if (context->stream_status[i].plane_count > 0)
 | 
								if (context->stream_status[i].plane_count > 0)
 | 
				
			||||||
| 
						 | 
					@ -430,71 +416,6 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
 | 
				
			||||||
		dcn32_determine_det_override(dc, context, pipes);
 | 
							dcn32_determine_det_override(dc, context, pipes);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					 | 
				
			||||||
 * dcn32_save_mall_state(): Save MALL (SubVP) state for fast validation cases
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * This function saves the MALL (SubVP) case for fast validation cases. For fast validation,
 | 
					 | 
				
			||||||
 * there are situations where a shallow copy of the dc->current_state is created for the
 | 
					 | 
				
			||||||
 * validation. In this case we want to save and restore the mall config because we always
 | 
					 | 
				
			||||||
 * teardown subvp at the beginning of validation (and don't attempt to add it back if it's
 | 
					 | 
				
			||||||
 * fast validation). If we don't restore the subvp config in cases of fast validation +
 | 
					 | 
				
			||||||
 * shallow copy of the dc->current_state, the dc->current_state will have a partially
 | 
					 | 
				
			||||||
 * removed subvp state when we did not intend to remove it.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * NOTE: This function ONLY works if the streams are not moved to a different pipe in the
 | 
					 | 
				
			||||||
 *       validation. We don't expect this to happen in fast_validation=1 cases.
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * @dc: Current DC state
 | 
					 | 
				
			||||||
 * @context: New DC state to be programmed
 | 
					 | 
				
			||||||
 * @temp_config: struct used to cache the existing MALL state
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * Return: void
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
void dcn32_save_mall_state(struct dc *dc,
 | 
					 | 
				
			||||||
		struct dc_state *context,
 | 
					 | 
				
			||||||
		struct mall_temp_config *temp_config)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	uint32_t i;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
					 | 
				
			||||||
		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (pipe->stream)
 | 
					 | 
				
			||||||
			temp_config->mall_stream_config[i] = pipe->stream->mall_stream_config;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (pipe->plane_state)
 | 
					 | 
				
			||||||
			temp_config->is_phantom_plane[i] = pipe->plane_state->is_phantom;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
/**
 | 
					 | 
				
			||||||
 * dcn32_restore_mall_state(): Restore MALL (SubVP) state for fast validation cases
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * Restore the MALL state based on the previously saved state from dcn32_save_mall_state
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * @dc: Current DC state
 | 
					 | 
				
			||||||
 * @context: New DC state to be programmed, restore MALL state into here
 | 
					 | 
				
			||||||
 * @temp_config: struct that has the cached MALL state
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * Return: void
 | 
					 | 
				
			||||||
 */
 | 
					 | 
				
			||||||
void dcn32_restore_mall_state(struct dc *dc,
 | 
					 | 
				
			||||||
		struct dc_state *context,
 | 
					 | 
				
			||||||
		struct mall_temp_config *temp_config)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
	uint32_t i;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
					 | 
				
			||||||
		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (pipe->stream)
 | 
					 | 
				
			||||||
			pipe->stream->mall_stream_config = temp_config->mall_stream_config[i];
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		if (pipe->plane_state)
 | 
					 | 
				
			||||||
			pipe->plane_state->is_phantom = temp_config->is_phantom_plane[i];
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#define MAX_STRETCHED_V_BLANK 1000 // in micro-seconds (must ensure to match value in FW)
 | 
					#define MAX_STRETCHED_V_BLANK 1000 // in micro-seconds (must ensure to match value in FW)
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * Scaling factor for v_blank stretch calculations considering timing in
 | 
					 * Scaling factor for v_blank stretch calculations considering timing in
 | 
				
			||||||
| 
						 | 
					@ -589,13 +510,14 @@ static int get_refresh_rate(struct dc_stream_state *fpo_candidate_stream)
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 * Return: Pointer to FPO stream candidate if config can support FPO, otherwise NULL
 | 
					 * Return: Pointer to FPO stream candidate if config can support FPO, otherwise NULL
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, const struct dc_state *context)
 | 
					struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	int refresh_rate = 0;
 | 
						int refresh_rate = 0;
 | 
				
			||||||
	const int minimum_refreshrate_supported = 120;
 | 
						const int minimum_refreshrate_supported = 120;
 | 
				
			||||||
	struct dc_stream_state *fpo_candidate_stream = NULL;
 | 
						struct dc_stream_state *fpo_candidate_stream = NULL;
 | 
				
			||||||
	bool is_fpo_vactive = false;
 | 
						bool is_fpo_vactive = false;
 | 
				
			||||||
	uint32_t fpo_vactive_margin_us = 0;
 | 
						uint32_t fpo_vactive_margin_us = 0;
 | 
				
			||||||
 | 
						struct dc_stream_status *fpo_stream_status = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (context == NULL)
 | 
						if (context == NULL)
 | 
				
			||||||
		return NULL;
 | 
							return NULL;
 | 
				
			||||||
| 
						 | 
					@ -618,16 +540,28 @@ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stre
 | 
				
			||||||
		DC_FP_START();
 | 
							DC_FP_START();
 | 
				
			||||||
		dcn32_assign_fpo_vactive_candidate(dc, context, &fpo_candidate_stream);
 | 
							dcn32_assign_fpo_vactive_candidate(dc, context, &fpo_candidate_stream);
 | 
				
			||||||
		DC_FP_END();
 | 
							DC_FP_END();
 | 
				
			||||||
 | 
							if (fpo_candidate_stream)
 | 
				
			||||||
 | 
								fpo_stream_status = dc_state_get_stream_status(context, fpo_candidate_stream);
 | 
				
			||||||
		DC_FP_START();
 | 
							DC_FP_START();
 | 
				
			||||||
		is_fpo_vactive = dcn32_find_vactive_pipe(dc, context, dc->debug.fpo_vactive_min_active_margin_us);
 | 
							is_fpo_vactive = dcn32_find_vactive_pipe(dc, context, dc->debug.fpo_vactive_min_active_margin_us);
 | 
				
			||||||
		DC_FP_END();
 | 
							DC_FP_END();
 | 
				
			||||||
		if (!is_fpo_vactive || dc->debug.disable_fpo_vactive)
 | 
							if (!is_fpo_vactive || dc->debug.disable_fpo_vactive)
 | 
				
			||||||
			return NULL;
 | 
								return NULL;
 | 
				
			||||||
	} else
 | 
						} else {
 | 
				
			||||||
		fpo_candidate_stream = context->streams[0];
 | 
							fpo_candidate_stream = context->streams[0];
 | 
				
			||||||
 | 
							if (fpo_candidate_stream)
 | 
				
			||||||
 | 
								fpo_stream_status = dc_state_get_stream_status(context, fpo_candidate_stream);
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!fpo_candidate_stream)
 | 
						/* In DCN32/321, FPO uses per-pipe P-State force.
 | 
				
			||||||
 | 
						 * If there's no planes, HUBP is power gated and
 | 
				
			||||||
 | 
						 * therefore programming UCLK_PSTATE_FORCE does
 | 
				
			||||||
 | 
						 * nothing (P-State will always be asserted naturally
 | 
				
			||||||
 | 
						 * on a pipe that has HUBP power gated. Therefore we
 | 
				
			||||||
 | 
						 * only want to enable FPO if the FPO pipe has both
 | 
				
			||||||
 | 
						 * a stream and a plane.
 | 
				
			||||||
 | 
						 */
 | 
				
			||||||
 | 
						if (!fpo_candidate_stream || !fpo_stream_status || fpo_stream_status->plane_count == 0)
 | 
				
			||||||
		return NULL;
 | 
							return NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (fpo_candidate_stream->sink->edid_caps.panel_patch.disable_fams)
 | 
						if (fpo_candidate_stream->sink->edid_caps.panel_patch.disable_fams)
 | 
				
			||||||
| 
						 | 
					@ -716,10 +650,11 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
						for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
				
			||||||
		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
							struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
							enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (resource_is_pipe_type(pipe, OPP_HEAD) &&
 | 
							if (resource_is_pipe_type(pipe, OPP_HEAD) &&
 | 
				
			||||||
				resource_is_pipe_type(pipe, DPP_PIPE)) {
 | 
									resource_is_pipe_type(pipe, DPP_PIPE)) {
 | 
				
			||||||
			if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
 | 
								if (pipe_mall_type == SUBVP_MAIN) {
 | 
				
			||||||
				subvp_count++;
 | 
									subvp_count++;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
 | 
									subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
 | 
				
			||||||
| 
						 | 
					@ -728,7 +663,7 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
 | 
				
			||||||
				refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
 | 
									refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
 | 
				
			||||||
				refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
 | 
									refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
 | 
								if (pipe_mall_type == SUBVP_NONE) {
 | 
				
			||||||
				non_subvp_pipes++;
 | 
									non_subvp_pipes++;
 | 
				
			||||||
				drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe));
 | 
									drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe));
 | 
				
			||||||
				if (pipe->stream->ignore_msa_timing_param &&
 | 
									if (pipe->stream->ignore_msa_timing_param &&
 | 
				
			||||||
| 
						 | 
					@ -776,10 +711,11 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
						for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
				
			||||||
		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
							struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
							enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (resource_is_pipe_type(pipe, OPP_HEAD) &&
 | 
							if (resource_is_pipe_type(pipe, OPP_HEAD) &&
 | 
				
			||||||
				resource_is_pipe_type(pipe, DPP_PIPE)) {
 | 
									resource_is_pipe_type(pipe, DPP_PIPE)) {
 | 
				
			||||||
			if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
 | 
								if (pipe_mall_type == SUBVP_MAIN) {
 | 
				
			||||||
				subvp_count++;
 | 
									subvp_count++;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
 | 
									subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
 | 
				
			||||||
| 
						 | 
					@ -788,7 +724,7 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
 | 
				
			||||||
				refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
 | 
									refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
 | 
				
			||||||
				refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
 | 
									refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
 | 
								if (pipe_mall_type == SUBVP_NONE) {
 | 
				
			||||||
				non_subvp_pipes++;
 | 
									non_subvp_pipes++;
 | 
				
			||||||
				vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe));
 | 
									vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe));
 | 
				
			||||||
				if (pipe->stream->ignore_msa_timing_param &&
 | 
									if (pipe->stream->ignore_msa_timing_param &&
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -10,7 +10,7 @@
 | 
				
			||||||
#
 | 
					#
 | 
				
			||||||
# Makefile for DCN35.
 | 
					# Makefile for DCN35.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DCN35 = dcn35_init.o dcn35_dio_stream_encoder.o \
 | 
					DCN35 = dcn35_dio_stream_encoder.o \
 | 
				
			||||||
	dcn35_dio_link_encoder.o dcn35_dccg.o \
 | 
						dcn35_dio_link_encoder.o dcn35_dccg.o \
 | 
				
			||||||
	dcn35_hubp.o dcn35_hubbub.o \
 | 
						dcn35_hubp.o dcn35_hubbub.o \
 | 
				
			||||||
	dcn35_mmhubbub.o dcn35_opp.o dcn35_dpp.o dcn35_pg_cntl.o dcn35_dwb.o
 | 
						dcn35_mmhubbub.o dcn35_opp.o dcn35_dpp.o dcn35_pg_cntl.o dcn35_dwb.o
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -256,6 +256,10 @@ void dcn35_link_encoder_construct(
 | 
				
			||||||
		enc10->base.features.flags.bits.IS_UHBR10_CAPABLE = bp_cap_info.DP_UHBR10_EN;
 | 
							enc10->base.features.flags.bits.IS_UHBR10_CAPABLE = bp_cap_info.DP_UHBR10_EN;
 | 
				
			||||||
		enc10->base.features.flags.bits.IS_UHBR13_5_CAPABLE = bp_cap_info.DP_UHBR13_5_EN;
 | 
							enc10->base.features.flags.bits.IS_UHBR13_5_CAPABLE = bp_cap_info.DP_UHBR13_5_EN;
 | 
				
			||||||
		enc10->base.features.flags.bits.IS_UHBR20_CAPABLE = bp_cap_info.DP_UHBR20_EN;
 | 
							enc10->base.features.flags.bits.IS_UHBR20_CAPABLE = bp_cap_info.DP_UHBR20_EN;
 | 
				
			||||||
 | 
							if (bp_cap_info.DP_IS_USB_C) {
 | 
				
			||||||
 | 
								/*BIOS not switch to use CONNECTOR_ID_USBC = 24 yet*/
 | 
				
			||||||
 | 
								enc10->base.features.flags.bits.DP_IS_USB_C = 1;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	} else {
 | 
						} else {
 | 
				
			||||||
		DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
 | 
							DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
 | 
				
			||||||
| 
						 | 
					@ -264,4 +268,5 @@ void dcn35_link_encoder_construct(
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if (enc10->base.ctx->dc->debug.hdmi20_disable)
 | 
						if (enc10->base.ctx->dc->debug.hdmi20_disable)
 | 
				
			||||||
		enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
 | 
							enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -33,6 +33,7 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include "link.h"
 | 
					#include "link.h"
 | 
				
			||||||
#include "dcn20_fpu.h"
 | 
					#include "dcn20_fpu.h"
 | 
				
			||||||
 | 
					#include "dc_state_priv.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define DC_LOGGER \
 | 
					#define DC_LOGGER \
 | 
				
			||||||
	dc->ctx->logger
 | 
						dc->ctx->logger
 | 
				
			||||||
| 
						 | 
					@ -440,7 +441,115 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
 | 
				
			||||||
	.use_urgent_burst_bw = 0
 | 
						.use_urgent_burst_bw = 0
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
 | 
					struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = {
 | 
				
			||||||
 | 
						.clock_limits = {
 | 
				
			||||||
 | 
							{
 | 
				
			||||||
 | 
								.state = 0,
 | 
				
			||||||
 | 
								.dcfclk_mhz = 560.0,
 | 
				
			||||||
 | 
								.fabricclk_mhz = 560.0,
 | 
				
			||||||
 | 
								.dispclk_mhz = 513.0,
 | 
				
			||||||
 | 
								.dppclk_mhz = 513.0,
 | 
				
			||||||
 | 
								.phyclk_mhz = 540.0,
 | 
				
			||||||
 | 
								.socclk_mhz = 560.0,
 | 
				
			||||||
 | 
								.dscclk_mhz = 171.0,
 | 
				
			||||||
 | 
								.dram_speed_mts = 1069.0,
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							{
 | 
				
			||||||
 | 
								.state = 1,
 | 
				
			||||||
 | 
								.dcfclk_mhz = 694.0,
 | 
				
			||||||
 | 
								.fabricclk_mhz = 694.0,
 | 
				
			||||||
 | 
								.dispclk_mhz = 642.0,
 | 
				
			||||||
 | 
								.dppclk_mhz = 642.0,
 | 
				
			||||||
 | 
								.phyclk_mhz = 600.0,
 | 
				
			||||||
 | 
								.socclk_mhz = 694.0,
 | 
				
			||||||
 | 
								.dscclk_mhz = 214.0,
 | 
				
			||||||
 | 
								.dram_speed_mts = 1324.0,
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							{
 | 
				
			||||||
 | 
								.state = 2,
 | 
				
			||||||
 | 
								.dcfclk_mhz = 875.0,
 | 
				
			||||||
 | 
								.fabricclk_mhz = 875.0,
 | 
				
			||||||
 | 
								.dispclk_mhz = 734.0,
 | 
				
			||||||
 | 
								.dppclk_mhz = 734.0,
 | 
				
			||||||
 | 
								.phyclk_mhz = 810.0,
 | 
				
			||||||
 | 
								.socclk_mhz = 875.0,
 | 
				
			||||||
 | 
								.dscclk_mhz = 245.0,
 | 
				
			||||||
 | 
								.dram_speed_mts = 1670.0,
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							{
 | 
				
			||||||
 | 
								.state = 3,
 | 
				
			||||||
 | 
								.dcfclk_mhz = 1000.0,
 | 
				
			||||||
 | 
								.fabricclk_mhz = 1000.0,
 | 
				
			||||||
 | 
								.dispclk_mhz = 1100.0,
 | 
				
			||||||
 | 
								.dppclk_mhz = 1100.0,
 | 
				
			||||||
 | 
								.phyclk_mhz = 810.0,
 | 
				
			||||||
 | 
								.socclk_mhz = 1000.0,
 | 
				
			||||||
 | 
								.dscclk_mhz = 367.0,
 | 
				
			||||||
 | 
								.dram_speed_mts = 2000.0,
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							{
 | 
				
			||||||
 | 
								.state = 4,
 | 
				
			||||||
 | 
								.dcfclk_mhz = 1200.0,
 | 
				
			||||||
 | 
								.fabricclk_mhz = 1200.0,
 | 
				
			||||||
 | 
								.dispclk_mhz = 1284.0,
 | 
				
			||||||
 | 
								.dppclk_mhz = 1284.0,
 | 
				
			||||||
 | 
								.phyclk_mhz = 810.0,
 | 
				
			||||||
 | 
								.socclk_mhz = 1200.0,
 | 
				
			||||||
 | 
								.dscclk_mhz = 428.0,
 | 
				
			||||||
 | 
								.dram_speed_mts = 2000.0,
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
							{
 | 
				
			||||||
 | 
								.state = 5,
 | 
				
			||||||
 | 
								.dcfclk_mhz = 1200.0,
 | 
				
			||||||
 | 
								.fabricclk_mhz = 1200.0,
 | 
				
			||||||
 | 
								.dispclk_mhz = 1284.0,
 | 
				
			||||||
 | 
								.dppclk_mhz = 1284.0,
 | 
				
			||||||
 | 
								.phyclk_mhz = 810.0,
 | 
				
			||||||
 | 
								.socclk_mhz = 1200.0,
 | 
				
			||||||
 | 
								.dscclk_mhz = 428.0,
 | 
				
			||||||
 | 
								.dram_speed_mts = 2000.0,
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
						},
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						.num_states = 5,
 | 
				
			||||||
 | 
						.sr_exit_time_us = 1.9,
 | 
				
			||||||
 | 
						.sr_enter_plus_exit_time_us = 4.4,
 | 
				
			||||||
 | 
						.urgent_latency_us = 3.0,
 | 
				
			||||||
 | 
						.urgent_latency_pixel_data_only_us = 4.0,
 | 
				
			||||||
 | 
						.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
 | 
				
			||||||
 | 
						.urgent_latency_vm_data_only_us = 4.0,
 | 
				
			||||||
 | 
						.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
 | 
				
			||||||
 | 
						.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
 | 
				
			||||||
 | 
						.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
 | 
				
			||||||
 | 
						.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
 | 
				
			||||||
 | 
						.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
 | 
				
			||||||
 | 
						.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
 | 
				
			||||||
 | 
						.max_avg_sdp_bw_use_normal_percent = 40.0,
 | 
				
			||||||
 | 
						.max_avg_dram_bw_use_normal_percent = 40.0,
 | 
				
			||||||
 | 
						.writeback_latency_us = 12.0,
 | 
				
			||||||
 | 
						.ideal_dram_bw_after_urgent_percent = 40.0,
 | 
				
			||||||
 | 
						.max_request_size_bytes = 256,
 | 
				
			||||||
 | 
						.dram_channel_width_bytes = 16,
 | 
				
			||||||
 | 
						.fabric_datapath_to_dcn_data_return_bytes = 64,
 | 
				
			||||||
 | 
						.dcn_downspread_percent = 0.5,
 | 
				
			||||||
 | 
						.downspread_percent = 0.5,
 | 
				
			||||||
 | 
						.dram_page_open_time_ns = 50.0,
 | 
				
			||||||
 | 
						.dram_rw_turnaround_time_ns = 17.5,
 | 
				
			||||||
 | 
						.dram_return_buffer_per_channel_bytes = 8192,
 | 
				
			||||||
 | 
						.round_trip_ping_latency_dcfclk_cycles = 131,
 | 
				
			||||||
 | 
						.urgent_out_of_order_return_per_channel_bytes = 4096,
 | 
				
			||||||
 | 
						.channel_interleave_bytes = 256,
 | 
				
			||||||
 | 
						.num_banks = 8,
 | 
				
			||||||
 | 
						.num_chans = 16,
 | 
				
			||||||
 | 
						.vmm_page_size_bytes = 4096,
 | 
				
			||||||
 | 
						.dram_clock_change_latency_us = 45.0,
 | 
				
			||||||
 | 
						.writeback_dram_clock_change_latency_us = 23.0,
 | 
				
			||||||
 | 
						.return_bus_width_bytes = 64,
 | 
				
			||||||
 | 
						.dispclk_dppclk_vco_speed_mhz = 3850,
 | 
				
			||||||
 | 
						.xfc_bus_transport_time_us = 20,
 | 
				
			||||||
 | 
						.xfc_xbuf_latency_tolerance_us = 50,
 | 
				
			||||||
 | 
						.use_urgent_burst_bw = 0,
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct _vcs_dpi_ip_params_st dcn2_1_ip = {
 | 
					struct _vcs_dpi_ip_params_st dcn2_1_ip = {
 | 
				
			||||||
	.odm_capable = 1,
 | 
						.odm_capable = 1,
 | 
				
			||||||
| 
						 | 
					@ -1074,7 +1183,7 @@ void dcn20_calculate_dlg_params(struct dc *dc,
 | 
				
			||||||
		pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
 | 
							pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
 | 
				
			||||||
		pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
 | 
							pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
 | 
							if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) {
 | 
				
			||||||
			// Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
 | 
								// Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
 | 
				
			||||||
			context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
 | 
								context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
 | 
				
			||||||
			context->res_ctx.pipe_ctx[i].unbounded_req = false;
 | 
								context->res_ctx.pipe_ctx[i].unbounded_req = false;
 | 
				
			||||||
| 
						 | 
					@ -1424,7 +1533,7 @@ int dcn20_populate_dml_pipes_from_context(struct dc *dc,
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (res_ctx->pipe_ctx[i].plane_state &&
 | 
							if (res_ctx->pipe_ctx[i].plane_state &&
 | 
				
			||||||
				(res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
 | 
									(res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
 | 
				
			||||||
				 res_ctx->pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM))
 | 
									dc_state_get_pipe_subvp_type(context, &res_ctx->pipe_ctx[i]) == SUBVP_PHANTOM))
 | 
				
			||||||
			pipes[pipe_cnt].pipe.src.num_cursors = 0;
 | 
								pipes[pipe_cnt].pipe.src.num_cursors = 0;
 | 
				
			||||||
		else
 | 
							else
 | 
				
			||||||
			pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors;
 | 
								pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -32,6 +32,8 @@
 | 
				
			||||||
#include "clk_mgr/dcn32/dcn32_smu13_driver_if.h"
 | 
					#include "clk_mgr/dcn32/dcn32_smu13_driver_if.h"
 | 
				
			||||||
#include "dcn30/dcn30_resource.h"
 | 
					#include "dcn30/dcn30_resource.h"
 | 
				
			||||||
#include "link.h"
 | 
					#include "link.h"
 | 
				
			||||||
 | 
					#include "dc_state_priv.h"
 | 
				
			||||||
 | 
					#include "resource.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define DC_LOGGER_INIT(logger)
 | 
					#define DC_LOGGER_INIT(logger)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -290,7 +292,7 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		/* for subvp + DRR case, if subvp pipes are still present we support pstate */
 | 
							/* for subvp + DRR case, if subvp pipes are still present we support pstate */
 | 
				
			||||||
		if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported &&
 | 
							if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported &&
 | 
				
			||||||
				dcn32_subvp_in_use(dc, context))
 | 
									resource_subvp_in_use(dc, context))
 | 
				
			||||||
			vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support;
 | 
								vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (vlevel < context->bw_ctx.dml.vba.soc.num_states &&
 | 
							if (vlevel < context->bw_ctx.dml.vba.soc.num_states &&
 | 
				
			||||||
| 
						 | 
					@ -341,7 +343,7 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
 | 
				
			||||||
		if (!pipe->stream)
 | 
							if (!pipe->stream)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
 | 
							if (pipe->plane_state && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
 | 
				
			||||||
			pipes[pipe_idx].pipe.dest.vstartup_start =
 | 
								pipes[pipe_idx].pipe.dest.vstartup_start =
 | 
				
			||||||
				get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
 | 
									get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
 | 
				
			||||||
			pipes[pipe_idx].pipe.dest.vupdate_offset =
 | 
								pipes[pipe_idx].pipe.dest.vupdate_offset =
 | 
				
			||||||
| 
						 | 
					@ -624,7 +626,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
 | 
				
			||||||
		if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
 | 
							if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
 | 
				
			||||||
				!(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
 | 
									!(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
 | 
				
			||||||
				(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
 | 
									(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
 | 
				
			||||||
				pipe->stream->mall_stream_config.type == SUBVP_NONE &&
 | 
									dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
 | 
				
			||||||
				(refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) &&
 | 
									(refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) &&
 | 
				
			||||||
				!pipe->plane_state->address.tmz_surface &&
 | 
									!pipe->plane_state->address.tmz_surface &&
 | 
				
			||||||
				(vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 ||
 | 
									(vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 ||
 | 
				
			||||||
| 
						 | 
					@ -682,7 +684,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Find the minimum pipe split count for non SubVP pipes
 | 
							// Find the minimum pipe split count for non SubVP pipes
 | 
				
			||||||
		if (resource_is_pipe_type(pipe, OPP_HEAD) &&
 | 
							if (resource_is_pipe_type(pipe, OPP_HEAD) &&
 | 
				
			||||||
		    pipe->stream->mall_stream_config.type == SUBVP_NONE) {
 | 
								dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE) {
 | 
				
			||||||
			split_cnt = 0;
 | 
								split_cnt = 0;
 | 
				
			||||||
			while (pipe) {
 | 
								while (pipe) {
 | 
				
			||||||
				split_cnt++;
 | 
									split_cnt++;
 | 
				
			||||||
| 
						 | 
					@ -735,8 +737,8 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
 | 
				
			||||||
		 * and also to store the two main SubVP pipe pointers in subvp_pipes[2].
 | 
							 * and also to store the two main SubVP pipe pointers in subvp_pipes[2].
 | 
				
			||||||
		 */
 | 
							 */
 | 
				
			||||||
		if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
 | 
							if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
 | 
				
			||||||
		    pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
 | 
								dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
 | 
				
			||||||
			phantom = pipe->stream->mall_stream_config.paired_stream;
 | 
								phantom = dc_state_get_paired_subvp_stream(context, pipe->stream);
 | 
				
			||||||
			microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) +
 | 
								microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) +
 | 
				
			||||||
					phantom->timing.v_addressable;
 | 
										phantom->timing.v_addressable;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -804,6 +806,9 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
 | 
				
			||||||
	int16_t stretched_drr_us = 0;
 | 
						int16_t stretched_drr_us = 0;
 | 
				
			||||||
	int16_t drr_stretched_vblank_us = 0;
 | 
						int16_t drr_stretched_vblank_us = 0;
 | 
				
			||||||
	int16_t max_vblank_mallregion = 0;
 | 
						int16_t max_vblank_mallregion = 0;
 | 
				
			||||||
 | 
						struct dc_stream_state *phantom_stream;
 | 
				
			||||||
 | 
						bool subvp_found = false;
 | 
				
			||||||
 | 
						bool drr_found = false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Find SubVP pipe
 | 
						// Find SubVP pipe
 | 
				
			||||||
	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
						for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
				
			||||||
| 
						 | 
					@ -816,8 +821,10 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Find the SubVP pipe
 | 
							// Find the SubVP pipe
 | 
				
			||||||
		if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
 | 
							if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
 | 
				
			||||||
 | 
								subvp_found = true;
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Find the DRR pipe
 | 
						// Find the DRR pipe
 | 
				
			||||||
| 
						 | 
					@ -825,32 +832,37 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
 | 
				
			||||||
		drr_pipe = &context->res_ctx.pipe_ctx[i];
 | 
							drr_pipe = &context->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// We check for master pipe only
 | 
							// We check for master pipe only
 | 
				
			||||||
		if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
 | 
							if (!resource_is_pipe_type(drr_pipe, OTG_MASTER) ||
 | 
				
			||||||
				!resource_is_pipe_type(pipe, DPP_PIPE))
 | 
									!resource_is_pipe_type(drr_pipe, DPP_PIPE))
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (drr_pipe->stream->mall_stream_config.type == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param &&
 | 
							if (dc_state_get_pipe_subvp_type(context, drr_pipe) == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param &&
 | 
				
			||||||
				(drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable || drr_pipe->stream->vrr_active_fixed))
 | 
									(drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable || drr_pipe->stream->vrr_active_fixed)) {
 | 
				
			||||||
 | 
								drr_found = true;
 | 
				
			||||||
			break;
 | 
								break;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	main_timing = &pipe->stream->timing;
 | 
						if (subvp_found && drr_found) {
 | 
				
			||||||
	phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing;
 | 
							phantom_stream = dc_state_get_paired_subvp_stream(context, pipe->stream);
 | 
				
			||||||
	drr_timing = &drr_pipe->stream->timing;
 | 
							main_timing = &pipe->stream->timing;
 | 
				
			||||||
	prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
 | 
							phantom_timing = &phantom_stream->timing;
 | 
				
			||||||
			(double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
 | 
							drr_timing = &drr_pipe->stream->timing;
 | 
				
			||||||
			dc->caps.subvp_prefetch_end_to_mall_start_us;
 | 
							prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
 | 
				
			||||||
	subvp_active_us = main_timing->v_addressable * main_timing->h_total /
 | 
									(double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
 | 
				
			||||||
			(double)(main_timing->pix_clk_100hz * 100) * 1000000;
 | 
									dc->caps.subvp_prefetch_end_to_mall_start_us;
 | 
				
			||||||
	drr_frame_us = drr_timing->v_total * drr_timing->h_total /
 | 
							subvp_active_us = main_timing->v_addressable * main_timing->h_total /
 | 
				
			||||||
			(double)(drr_timing->pix_clk_100hz * 100) * 1000000;
 | 
									(double)(main_timing->pix_clk_100hz * 100) * 1000000;
 | 
				
			||||||
	// P-State allow width and FW delays already included phantom_timing->v_addressable
 | 
							drr_frame_us = drr_timing->v_total * drr_timing->h_total /
 | 
				
			||||||
	mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total /
 | 
									(double)(drr_timing->pix_clk_100hz * 100) * 1000000;
 | 
				
			||||||
			(double)(phantom_timing->pix_clk_100hz * 100) * 1000000;
 | 
							// P-State allow width and FW delays already included phantom_timing->v_addressable
 | 
				
			||||||
	stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
 | 
							mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total /
 | 
				
			||||||
	drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total /
 | 
									(double)(phantom_timing->pix_clk_100hz * 100) * 1000000;
 | 
				
			||||||
			(double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us);
 | 
							stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
 | 
				
			||||||
	max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us;
 | 
							drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total /
 | 
				
			||||||
 | 
									(double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us);
 | 
				
			||||||
 | 
							max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us;
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* We consider SubVP + DRR schedulable if the stretched frame duration of the DRR display (i.e. the
 | 
						/* We consider SubVP + DRR schedulable if the stretched frame duration of the DRR display (i.e. the
 | 
				
			||||||
	 * highest refresh rate + margin that can support UCLK P-State switch) passes the static analysis
 | 
						 * highest refresh rate + margin that can support UCLK P-State switch) passes the static analysis
 | 
				
			||||||
| 
						 | 
					@ -895,6 +907,8 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
 | 
				
			||||||
	struct dc_crtc_timing *main_timing = NULL;
 | 
						struct dc_crtc_timing *main_timing = NULL;
 | 
				
			||||||
	struct dc_crtc_timing *phantom_timing = NULL;
 | 
						struct dc_crtc_timing *phantom_timing = NULL;
 | 
				
			||||||
	struct dc_crtc_timing *vblank_timing = NULL;
 | 
						struct dc_crtc_timing *vblank_timing = NULL;
 | 
				
			||||||
 | 
						struct dc_stream_state *phantom_stream;
 | 
				
			||||||
 | 
						enum mall_stream_type pipe_mall_type;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* For SubVP + VBLANK/DRR cases, we assume there can only be
 | 
						/* For SubVP + VBLANK/DRR cases, we assume there can only be
 | 
				
			||||||
	 * a single VBLANK/DRR display. If DML outputs SubVP + VBLANK
 | 
						 * a single VBLANK/DRR display. If DML outputs SubVP + VBLANK
 | 
				
			||||||
| 
						 | 
					@ -904,6 +918,7 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
 | 
				
			||||||
	 */
 | 
						 */
 | 
				
			||||||
	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
						for (i = 0; i < dc->res_pool->pipe_count; i++) {
 | 
				
			||||||
		pipe = &context->res_ctx.pipe_ctx[i];
 | 
							pipe = &context->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
							pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// We check for master pipe, but it shouldn't matter since we only need
 | 
							// We check for master pipe, but it shouldn't matter since we only need
 | 
				
			||||||
		// the pipe for timing info (stream should be same for any pipe splits)
 | 
							// the pipe for timing info (stream should be same for any pipe splits)
 | 
				
			||||||
| 
						 | 
					@ -911,18 +926,19 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
 | 
				
			||||||
				!resource_is_pipe_type(pipe, DPP_PIPE))
 | 
									!resource_is_pipe_type(pipe, DPP_PIPE))
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
 | 
							if (!found && pipe_mall_type == SUBVP_NONE) {
 | 
				
			||||||
			// Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe).
 | 
								// Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe).
 | 
				
			||||||
			vblank_index = i;
 | 
								vblank_index = i;
 | 
				
			||||||
			found = true;
 | 
								found = true;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN)
 | 
							if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN)
 | 
				
			||||||
			subvp_pipe = pipe;
 | 
								subvp_pipe = pipe;
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if (found) {
 | 
						if (found) {
 | 
				
			||||||
 | 
							phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
 | 
				
			||||||
		main_timing = &subvp_pipe->stream->timing;
 | 
							main_timing = &subvp_pipe->stream->timing;
 | 
				
			||||||
		phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
 | 
							phantom_timing = &phantom_stream->timing;
 | 
				
			||||||
		vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing;
 | 
							vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing;
 | 
				
			||||||
		// Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe
 | 
							// Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe
 | 
				
			||||||
		// Also include the prefetch end to mallstart delay time
 | 
							// Also include the prefetch end to mallstart delay time
 | 
				
			||||||
| 
						 | 
					@ -977,7 +993,7 @@ static bool subvp_subvp_admissable(struct dc *dc,
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (pipe->plane_state && !pipe->top_pipe &&
 | 
							if (pipe->plane_state && !pipe->top_pipe &&
 | 
				
			||||||
				pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
 | 
									dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
 | 
				
			||||||
			refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
 | 
								refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
 | 
				
			||||||
				pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
 | 
									pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
 | 
				
			||||||
			refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
 | 
								refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
 | 
				
			||||||
| 
						 | 
					@ -1026,23 +1042,23 @@ static bool subvp_validate_static_schedulability(struct dc *dc,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
 | 
						for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
 | 
				
			||||||
		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
							struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
							enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!pipe->stream)
 | 
							if (!pipe->stream)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (pipe->plane_state && !pipe->top_pipe) {
 | 
							if (pipe->plane_state && !pipe->top_pipe) {
 | 
				
			||||||
			if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
 | 
								if (pipe_mall_type == SUBVP_MAIN)
 | 
				
			||||||
				subvp_count++;
 | 
									subvp_count++;
 | 
				
			||||||
			if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
 | 
								if (pipe_mall_type == SUBVP_NONE)
 | 
				
			||||||
				non_subvp_pipes++;
 | 
									non_subvp_pipes++;
 | 
				
			||||||
			}
 | 
					 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		// Count how many planes that aren't SubVP/phantom are capable of VACTIVE
 | 
							// Count how many planes that aren't SubVP/phantom are capable of VACTIVE
 | 
				
			||||||
		// switching (SubVP + VACTIVE unsupported). In situations where we force
 | 
							// switching (SubVP + VACTIVE unsupported). In situations where we force
 | 
				
			||||||
		// SubVP for a VACTIVE plane, we don't want to increment the vactive_count.
 | 
							// SubVP for a VACTIVE plane, we don't want to increment the vactive_count.
 | 
				
			||||||
		if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vlevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 &&
 | 
							if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vlevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 &&
 | 
				
			||||||
		    pipe->stream->mall_stream_config.type == SUBVP_NONE) {
 | 
									pipe_mall_type == SUBVP_NONE) {
 | 
				
			||||||
			vactive_count++;
 | 
								vactive_count++;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
		pipe_idx++;
 | 
							pipe_idx++;
 | 
				
			||||||
| 
						 | 
					@ -1078,7 +1094,7 @@ static void assign_subvp_index(struct dc *dc, struct dc_state *context)
 | 
				
			||||||
		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 | 
							struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) &&
 | 
							if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) &&
 | 
				
			||||||
				pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) {
 | 
									dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) {
 | 
				
			||||||
			pipe_ctx->subvp_index = index++;
 | 
								pipe_ctx->subvp_index = index++;
 | 
				
			||||||
		} else {
 | 
							} else {
 | 
				
			||||||
			pipe_ctx->subvp_index = 0;
 | 
								pipe_ctx->subvp_index = 0;
 | 
				
			||||||
| 
						 | 
					@ -1532,7 +1548,8 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
 | 
				
			||||||
		// If SubVP pipe config is unsupported (or cannot be used for UCLK switching)
 | 
							// If SubVP pipe config is unsupported (or cannot be used for UCLK switching)
 | 
				
			||||||
		// remove phantom pipes and repopulate dml pipes
 | 
							// remove phantom pipes and repopulate dml pipes
 | 
				
			||||||
		if (!found_supported_config) {
 | 
							if (!found_supported_config) {
 | 
				
			||||||
			dc->res_pool->funcs->remove_phantom_pipes(dc, context, false);
 | 
								dc_state_remove_phantom_streams_and_planes(dc, context);
 | 
				
			||||||
 | 
								dc_state_release_phantom_streams_and_planes(dc, context);
 | 
				
			||||||
			vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported;
 | 
								vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported;
 | 
				
			||||||
			*pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
 | 
								*pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1684,7 +1701,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
 | 
				
			||||||
		pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt,
 | 
							pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt,
 | 
				
			||||||
				pipe_idx);
 | 
									pipe_idx);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
 | 
							if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) {
 | 
				
			||||||
			// Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
 | 
								// Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
 | 
				
			||||||
			context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
 | 
								context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
 | 
				
			||||||
			context->res_ctx.pipe_ctx[i].unbounded_req = false;
 | 
								context->res_ctx.pipe_ctx[i].unbounded_req = false;
 | 
				
			||||||
| 
						 | 
					@ -1716,7 +1733,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
 | 
				
			||||||
				context->res_ctx.pipe_ctx[i].plane_state != context->res_ctx.pipe_ctx[i].top_pipe->plane_state) &&
 | 
									context->res_ctx.pipe_ctx[i].plane_state != context->res_ctx.pipe_ctx[i].top_pipe->plane_state) &&
 | 
				
			||||||
				context->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
 | 
									context->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
 | 
				
			||||||
			/* SS: all active surfaces stored in MALL */
 | 
								/* SS: all active surfaces stored in MALL */
 | 
				
			||||||
			if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type != SUBVP_PHANTOM) {
 | 
								if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) != SUBVP_PHANTOM) {
 | 
				
			||||||
				context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
 | 
									context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
				if (context->res_ctx.pipe_ctx[i].stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) {
 | 
									if (context->res_ctx.pipe_ctx[i].stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) {
 | 
				
			||||||
| 
						 | 
					@ -1930,7 +1947,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
 | 
				
			||||||
		return false;
 | 
							return false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// For each full update, remove all existing phantom pipes first
 | 
						// For each full update, remove all existing phantom pipes first
 | 
				
			||||||
	dc->res_pool->funcs->remove_phantom_pipes(dc, context, fast_validate);
 | 
						dc_state_remove_phantom_streams_and_planes(dc, context);
 | 
				
			||||||
 | 
						dc_state_release_phantom_streams_and_planes(dc, context);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
 | 
						dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2255,7 +2273,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
 | 
				
			||||||
	unsigned int dummy_latency_index = 0;
 | 
						unsigned int dummy_latency_index = 0;
 | 
				
			||||||
	int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
 | 
						int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
 | 
				
			||||||
	unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
 | 
						unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
 | 
				
			||||||
	bool subvp_in_use = dcn32_subvp_in_use(dc, context);
 | 
						bool subvp_active = resource_subvp_in_use(dc, context);
 | 
				
			||||||
	unsigned int min_dram_speed_mts_margin;
 | 
						unsigned int min_dram_speed_mts_margin;
 | 
				
			||||||
	bool need_fclk_lat_as_dummy = false;
 | 
						bool need_fclk_lat_as_dummy = false;
 | 
				
			||||||
	bool is_subvp_p_drr = false;
 | 
						bool is_subvp_p_drr = false;
 | 
				
			||||||
| 
						 | 
					@ -2264,7 +2282,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
 | 
				
			||||||
	dc_assert_fp_enabled();
 | 
						dc_assert_fp_enabled();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/* need to find dummy latency index for subvp */
 | 
						/* need to find dummy latency index for subvp */
 | 
				
			||||||
	if (subvp_in_use) {
 | 
						if (subvp_active) {
 | 
				
			||||||
		/* Override DRAMClockChangeSupport for SubVP + DRR case where the DRR cannot switch without stretching it's VBLANK */
 | 
							/* Override DRAMClockChangeSupport for SubVP + DRR case where the DRR cannot switch without stretching it's VBLANK */
 | 
				
			||||||
		if (!pstate_en) {
 | 
							if (!pstate_en) {
 | 
				
			||||||
			context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
 | 
								context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
 | 
				
			||||||
| 
						 | 
					@ -2450,7 +2468,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
 | 
				
			||||||
				dc->clk_mgr->bw_params->clk_table.entries[min_dram_speed_mts_offset].memclk_mhz * 16;
 | 
									dc->clk_mgr->bw_params->clk_table.entries[min_dram_speed_mts_offset].memclk_mhz * 16;
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && !subvp_in_use) {
 | 
							if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && !subvp_active) {
 | 
				
			||||||
			/* find largest table entry that is lower than dram speed,
 | 
								/* find largest table entry that is lower than dram speed,
 | 
				
			||||||
			 * but lower than DPM0 still uses DPM0
 | 
								 * but lower than DPM0 still uses DPM0
 | 
				
			||||||
			 */
 | 
								 */
 | 
				
			||||||
| 
						 | 
					@ -3448,7 +3466,15 @@ void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *co
 | 
				
			||||||
	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
 | 
						for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
 | 
				
			||||||
		const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
							const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (!pipe->stream)
 | 
							/* In DCN32/321, FPO uses per-pipe P-State force.
 | 
				
			||||||
 | 
							 * If there's no planes, HUBP is power gated and
 | 
				
			||||||
 | 
							 * therefore programming UCLK_PSTATE_FORCE does
 | 
				
			||||||
 | 
							 * nothing (P-State will always be asserted naturally
 | 
				
			||||||
 | 
							 * on a pipe that has HUBP power gated. Therefore we
 | 
				
			||||||
 | 
							 * only want to enable FPO if the FPO pipe has both
 | 
				
			||||||
 | 
							 * a stream and a plane.
 | 
				
			||||||
 | 
							 */
 | 
				
			||||||
 | 
							if (!pipe->stream || !pipe->plane_state)
 | 
				
			||||||
			continue;
 | 
								continue;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
 | 
							if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
 | 
				
			||||||
| 
						 | 
					@ -3502,7 +3528,7 @@ void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb)
 | 
				
			||||||
void dcn32_override_min_req_memclk(struct dc *dc, struct dc_state *context)
 | 
					void dcn32_override_min_req_memclk(struct dc *dc, struct dc_state *context)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	// WA: restrict FPO and SubVP to use first non-strobe mode (DCN32 BW issue)
 | 
						// WA: restrict FPO and SubVP to use first non-strobe mode (DCN32 BW issue)
 | 
				
			||||||
	if ((context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dcn32_subvp_in_use(dc, context)) &&
 | 
						if ((context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || resource_subvp_in_use(dc, context)) &&
 | 
				
			||||||
			dc->dml.soc.num_chans <= 8) {
 | 
								dc->dml.soc.num_chans <= 8) {
 | 
				
			||||||
		int num_mclk_levels = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels;
 | 
							int num_mclk_levels = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
Some files were not shown because too many files have changed in this diff Show more
		Loading…
	
		Reference in a new issue