Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'amd-drm-next-6.20-2026-02-13' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-6.20-2026-02-13:

amdgpu:
- SMU 13.x fixes
- DC resume lag fix
- MPO fixes
- DCN 3.6 fix
- VSDB fixes
- HWSS clean up
- Replay fixes
- DCE cursor fixes
- DCN 3.5 SR DDR5 latency fixes
- HPD fixes
- Error path unwind fixes
- SMU13/14 mode1 reset fixes
- PSP 15 updates
- SMU 15 updates
- RAS fixes
- Sync fix in amdgpu_dma_buf_move_notify()
- HAINAN fix
- PSP 13.x fix
- GPUVM locking fix

amdkfd:
- APU GTT as VRAM fix

radeon:
- HAINAN fix

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patch.msgid.link/20260213220825.1454189-1-alexander.deucher@amd.com

+606 -384
+13 -6
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 878 878 struct amdgpu_bo *bo[2] = {NULL, NULL}; 879 879 struct amdgpu_bo_va *bo_va; 880 880 bool same_hive = false; 881 + struct drm_exec exec; 881 882 int i, ret; 882 883 883 884 if (!va) { ··· 959 958 goto unwind; 960 959 } 961 960 962 - /* Add BO to VM internal data structures */ 963 - ret = amdgpu_bo_reserve(bo[i], false); 964 - if (ret) { 965 - pr_debug("Unable to reserve BO during memory attach"); 966 - goto unwind; 961 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 962 + drm_exec_until_all_locked(&exec) { 963 + ret = amdgpu_vm_lock_pd(vm, &exec, 0); 964 + drm_exec_retry_on_contention(&exec); 965 + if (unlikely(ret)) 966 + goto unwind; 967 + ret = drm_exec_lock_obj(&exec, &bo[i]->tbo.base); 968 + drm_exec_retry_on_contention(&exec); 969 + if (unlikely(ret)) 970 + goto unwind; 967 971 } 972 + 968 973 bo_va = amdgpu_vm_bo_find(vm, bo[i]); 969 974 if (!bo_va) 970 975 bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]); 971 976 else 972 977 ++bo_va->ref_count; 973 978 attachment[i]->bo_va = bo_va; 974 - amdgpu_bo_unreserve(bo[i]); 979 + drm_exec_fini(&exec); 975 980 if (unlikely(!attachment[i]->bo_va)) { 976 981 ret = -ENOMEM; 977 982 pr_err("Failed to add BO object to VM. ret == %d\n",
-3
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 4652 4652 dev_info(adev->dev, "Pending hive reset.\n"); 4653 4653 amdgpu_set_init_level(adev, 4654 4654 AMDGPU_INIT_LEVEL_MINIMAL_XGMI); 4655 - } else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && 4656 - !amdgpu_device_has_display_hardware(adev)) { 4657 - r = psp_gpu_reset(adev); 4658 4655 } else { 4659 4656 tmp = amdgpu_reset_method; 4660 4657 /* It should do a default reset when loading or reloading the driver,
+4 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
··· 2164 2164 case IP_VERSION(13, 0, 11): 2165 2165 case IP_VERSION(13, 0, 12): 2166 2166 case IP_VERSION(13, 0, 14): 2167 + case IP_VERSION(13, 0, 15): 2167 2168 case IP_VERSION(14, 0, 0): 2168 2169 case IP_VERSION(14, 0, 1): 2169 2170 case IP_VERSION(14, 0, 4): ··· 2989 2988 case IP_VERSION(11, 5, 1): 2990 2989 case IP_VERSION(11, 5, 2): 2991 2990 case IP_VERSION(11, 5, 3): 2992 - case IP_VERSION(11, 5, 4): 2993 2991 adev->family = AMDGPU_FAMILY_GC_11_5_0; 2992 + break; 2993 + case IP_VERSION(11, 5, 4): 2994 + adev->family = AMDGPU_FAMILY_GC_11_5_4; 2994 2995 break; 2995 2996 case IP_VERSION(12, 0, 0): 2996 2997 case IP_VERSION(12, 0, 1):
+8 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
··· 514 514 r = dma_resv_reserve_fences(resv, 2); 515 515 if (!r) 516 516 r = amdgpu_vm_clear_freed(adev, vm, NULL); 517 + 518 + /* Don't pass 'ticket' to amdgpu_vm_handle_moved: we want the clear=true 519 + * path to be used otherwise we might update the PT of another process 520 + * while it's using the BO. 521 + * With clear=true, amdgpu_vm_bo_update will sync to command submission 522 + * from the same VM. 523 + */ 517 524 if (!r) 518 - r = amdgpu_vm_handle_moved(adev, vm, ticket); 525 + r = amdgpu_vm_handle_moved(adev, vm, NULL); 519 526 520 527 if (r && r != -EBUSY) 521 528 DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
+17 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 232 232 struct amdgpu_vm *vm = &fpriv->vm; 233 233 struct amdgpu_bo_va *bo_va; 234 234 struct mm_struct *mm; 235 + struct drm_exec exec; 235 236 int r; 236 237 237 238 mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm); ··· 243 242 !amdgpu_vm_is_bo_always_valid(vm, abo)) 244 243 return -EPERM; 245 244 246 - r = amdgpu_bo_reserve(abo, false); 247 - if (r) 248 - return r; 245 + drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); 246 + drm_exec_until_all_locked(&exec) { 247 + r = drm_exec_prepare_obj(&exec, &abo->tbo.base, 1); 248 + drm_exec_retry_on_contention(&exec); 249 + if (unlikely(r)) 250 + goto out_unlock; 251 + 252 + r = amdgpu_vm_lock_pd(vm, &exec, 0); 253 + drm_exec_retry_on_contention(&exec); 254 + if (unlikely(r)) 255 + goto out_unlock; 256 + } 249 257 250 258 amdgpu_vm_bo_update_shared(abo); 251 259 bo_va = amdgpu_vm_bo_find(vm, abo); ··· 270 260 amdgpu_bo_unreserve(abo); 271 261 return r; 272 262 } 273 - 274 - amdgpu_bo_unreserve(abo); 263 + drm_exec_fini(&exec); 275 264 276 265 /* Validate and add eviction fence to DMABuf imports with dynamic 277 266 * attachment in compute VMs. Re-validation will be done by ··· 303 294 } 304 295 } 305 296 mutex_unlock(&vm->process_info->lock); 297 + return r; 306 298 299 + out_unlock: 300 + drm_exec_fini(&exec); 307 301 return r; 308 302 } 309 303
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
··· 229 229 r = amdgpu_vm_flush(ring, job, need_pipe_sync); 230 230 if (r) { 231 231 amdgpu_ring_undo(ring); 232 - return r; 232 + goto free_fence; 233 233 } 234 234 } 235 235
+10
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 1445 1445 { 1446 1446 struct amdgpu_device *adev = drm_to_adev(dev); 1447 1447 struct amdgpu_fpriv *fpriv; 1448 + struct drm_exec exec; 1448 1449 int r, pasid; 1449 1450 1450 1451 /* Ensure IB tests are run on ring */ ··· 1485 1484 if (r) 1486 1485 goto error_pasid; 1487 1486 1487 + drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); 1488 + drm_exec_until_all_locked(&exec) { 1489 + r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 0); 1490 + drm_exec_retry_on_contention(&exec); 1491 + if (unlikely(r)) 1492 + goto error_vm; 1493 + } 1494 + 1488 1495 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL); 1496 + drm_exec_fini(&exec); 1489 1497 if (!fpriv->prt_va) { 1490 1498 r = -ENOMEM; 1491 1499 goto error_vm;
+5 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
··· 148 148 break; 149 149 case IP_VERSION(13, 0, 6): 150 150 case IP_VERSION(13, 0, 14): 151 + case IP_VERSION(13, 0, 15): 151 152 ret = psp_init_cap_microcode(psp, ucode_prefix); 152 153 ret &= psp_init_ta_microcode(psp, ucode_prefix); 153 154 break; ··· 220 219 psp->autoload_supported = false; 221 220 break; 222 221 case IP_VERSION(13, 0, 12): 222 + case IP_VERSION(13, 0, 15): 223 223 psp_v13_0_set_psp_funcs(psp); 224 224 psp->autoload_supported = false; 225 225 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); ··· 385 383 386 384 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 387 385 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || 388 - amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) 386 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14) || 387 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 15)) 389 388 return false; 390 389 391 390 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; ··· 738 735 ras_intr = amdgpu_ras_intr_triggered(); 739 736 if (ras_intr) 740 737 break; 741 - usleep_range(10, 100); 738 + usleep_range(60, 100); 742 739 amdgpu_device_invalidate_hdp(psp->adev, NULL); 743 740 } 744 741
+3 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
··· 1701 1701 } 1702 1702 1703 1703 res = __verify_ras_table_checksum(control); 1704 - if (res) 1704 + if (res) { 1705 1705 dev_err(adev->dev, 1706 1706 "RAS table incorrect checksum or error:%d\n", 1707 1707 res); 1708 + return -EINVAL; 1709 + } 1708 1710 1709 1711 /* Warn if we are at 90% of the threshold or above 1710 1712 */
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 1735 1735 { 1736 1736 struct amdgpu_bo_va *bo_va; 1737 1737 1738 + amdgpu_vm_assert_locked(vm); 1739 + 1738 1740 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL); 1739 1741 if (bo_va == NULL) { 1740 1742 return NULL;
+2 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
··· 42 42 43 43 #define XGMI_STATE_DISABLE 0xD1 44 44 #define XGMI_STATE_LS0 0x81 45 - #define XGMI_LINK_ACTIVE 1 46 - #define XGMI_LINK_INACTIVE 0 47 45 48 46 static DEFINE_MUTEX(xgmi_mutex); 49 47 ··· 363 365 return -ENOLINK; 364 366 365 367 if ((xgmi_state_reg_val & 0xFF) == XGMI_STATE_LS0) 366 - return XGMI_LINK_ACTIVE; 368 + return AMDGPU_XGMI_LINK_ACTIVE; 367 369 368 - return XGMI_LINK_INACTIVE; 370 + return AMDGPU_XGMI_LINK_INACTIVE; 369 371 } 370 372 371 373 /**
+11 -4
drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
··· 57 57 MODULE_FIRMWARE("amdgpu/psp_13_0_12_ta.bin"); 58 58 MODULE_FIRMWARE("amdgpu/psp_13_0_14_sos.bin"); 59 59 MODULE_FIRMWARE("amdgpu/psp_13_0_14_ta.bin"); 60 + MODULE_FIRMWARE("amdgpu/psp_13_0_15_sos.bin"); 61 + MODULE_FIRMWARE("amdgpu/psp_13_0_15_ta.bin"); 60 62 MODULE_FIRMWARE("amdgpu/psp_14_0_0_toc.bin"); 61 63 MODULE_FIRMWARE("amdgpu/psp_14_0_0_ta.bin"); 62 64 MODULE_FIRMWARE("amdgpu/psp_14_0_1_toc.bin"); ··· 123 121 case IP_VERSION(13, 0, 10): 124 122 case IP_VERSION(13, 0, 12): 125 123 case IP_VERSION(13, 0, 14): 124 + case IP_VERSION(13, 0, 15): 126 125 err = psp_init_sos_microcode(psp, ucode_prefix); 127 126 if (err) 128 127 return err; ··· 159 156 160 157 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 161 158 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || 162 - amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) { 159 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14) || 160 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 15)) { 163 161 at = 0; 164 162 for_each_inst(i, adev->aid_mask) { 165 163 bl_status_reg = ··· 206 202 retry_cnt = 207 203 ((amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 208 204 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || 209 - amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))) ? 205 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14) || 206 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 15))) ? 210 207 PSP_VMBX_POLLING_LIMIT : 211 208 10; 212 209 /* Wait for bootloader to signify that it is ready having bit 31 of ··· 237 232 238 233 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 239 234 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || 240 - amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) { 235 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14) || 236 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 15)) { 241 237 ret = psp_v13_0_wait_for_vmbx_ready(psp); 242 238 if (ret) 243 239 amdgpu_ras_query_boot_status(adev, 4); ··· 878 872 879 873 if ((amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 880 874 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || 881 - amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) && 875 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14) || 876 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 15)) && 882 877 (!(adev->flags & AMD_IS_APU))) { 883 878 reg_data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_127); 884 879 adev->ras_hw_enabled = (reg_data & GENMASK_ULL(23, 0));
+4
drivers/gpu/drm/amd/amdgpu/psp_v15_0.c
··· 45 45 if (err) 46 46 return err; 47 47 48 + err = psp_init_ta_microcode(psp, ucode_prefix); 49 + if (err) 50 + return err; 51 + 48 52 return 0; 49 53 } 50 54
+2 -1
drivers/gpu/drm/amd/amdgpu/soc15.c
··· 1478 1478 if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2)) && 1479 1479 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) && 1480 1480 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 12)) && 1481 - (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 14))) { 1481 + (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 14)) && 1482 + (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 15))) { 1482 1483 /* AMD_CG_SUPPORT_DRM_MGCG */ 1483 1484 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); 1484 1485 if (!(data & 0x01000000))
+22 -2
drivers/gpu/drm/amd/amdgpu/soc21.c
··· 422 422 case IP_VERSION(14, 0, 1): 423 423 case IP_VERSION(14, 0, 4): 424 424 case IP_VERSION(14, 0, 5): 425 + case IP_VERSION(15, 0, 0): 425 426 return AMD_RESET_METHOD_MODE2; 426 427 default: 427 428 if (amdgpu_dpm_is_baco_supported(adev)) ··· 839 838 break; 840 839 case IP_VERSION(11, 5, 4): 841 840 adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG | 842 - AMD_CG_SUPPORT_JPEG_MGCG; 841 + AMD_CG_SUPPORT_JPEG_MGCG | 842 + AMD_CG_SUPPORT_GFX_CGCG | 843 + AMD_CG_SUPPORT_GFX_CGLS | 844 + AMD_CG_SUPPORT_GFX_MGCG | 845 + AMD_CG_SUPPORT_GFX_FGCG | 846 + AMD_CG_SUPPORT_REPEATER_FGCG | 847 + AMD_CG_SUPPORT_GFX_PERF_CLK | 848 + AMD_CG_SUPPORT_GFX_3D_CGCG | 849 + AMD_CG_SUPPORT_GFX_3D_CGLS | 850 + AMD_CG_SUPPORT_MC_MGCG | 851 + AMD_CG_SUPPORT_MC_LS | 852 + AMD_CG_SUPPORT_HDP_LS | 853 + AMD_CG_SUPPORT_HDP_DS | 854 + AMD_CG_SUPPORT_HDP_SD | 855 + AMD_CG_SUPPORT_ATHUB_MGCG | 856 + AMD_CG_SUPPORT_ATHUB_LS | 857 + AMD_CG_SUPPORT_IH_CG | 858 + AMD_CG_SUPPORT_BIF_MGCG | 859 + AMD_CG_SUPPORT_BIF_LS; 843 860 adev->pg_flags = AMD_PG_SUPPORT_VCN | 844 - AMD_PG_SUPPORT_JPEG; 861 + AMD_PG_SUPPORT_JPEG | 862 + AMD_PG_SUPPORT_GFX_PG; 845 863 adev->external_rev_id = adev->rev_id + 0x1; 846 864 break; 847 865 default:
+12 -8
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
··· 404 404 return -ENOMEM; 405 405 } 406 406 407 - static void kfd_dbg_clear_dev_watch_id(struct kfd_process_device *pdd, int watch_id) 407 + static void kfd_dbg_clear_dev_watch_id(struct kfd_process_device *pdd, u32 watch_id) 408 408 { 409 409 spin_lock(&pdd->dev->watch_points_lock); 410 410 411 411 /* process owns device watch point so safe to clear */ 412 - if ((pdd->alloc_watch_ids >> watch_id) & 0x1) { 413 - pdd->alloc_watch_ids &= ~(0x1 << watch_id); 414 - pdd->dev->alloc_watch_ids &= ~(0x1 << watch_id); 412 + if (pdd->alloc_watch_ids & BIT(watch_id)) { 413 + pdd->alloc_watch_ids &= ~BIT(watch_id); 414 + pdd->dev->alloc_watch_ids &= ~BIT(watch_id); 415 415 } 416 416 417 417 spin_unlock(&pdd->dev->watch_points_lock); 418 418 } 419 419 420 - static bool kfd_dbg_owns_dev_watch_id(struct kfd_process_device *pdd, int watch_id) 420 + static bool kfd_dbg_owns_dev_watch_id(struct kfd_process_device *pdd, u32 watch_id) 421 421 { 422 422 bool owns_watch_id = false; 423 423 424 424 spin_lock(&pdd->dev->watch_points_lock); 425 - owns_watch_id = watch_id < MAX_WATCH_ADDRESSES && 426 - ((pdd->alloc_watch_ids >> watch_id) & 0x1); 427 - 425 + owns_watch_id = pdd->alloc_watch_ids & BIT(watch_id); 428 426 spin_unlock(&pdd->dev->watch_points_lock); 429 427 430 428 return owns_watch_id; ··· 432 434 uint32_t watch_id) 433 435 { 434 436 int r; 437 + 438 + if (watch_id >= MAX_WATCH_ADDRESSES) 439 + return -EINVAL; 435 440 436 441 if (!kfd_dbg_owns_dev_watch_id(pdd, watch_id)) 437 442 return -EINVAL; ··· 472 471 473 472 if (r) 474 473 return r; 474 + 475 + if (*watch_id >= MAX_WATCH_ADDRESSES) 476 + return -EINVAL; 475 477 476 478 if (!pdd->dev->kfd->shared_resources.enable_mes) { 477 479 r = debug_lock_and_unmap(pdd->dev->dqm);
+3
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
··· 111 111 112 112 static bool mqd_on_vram(struct amdgpu_device *adev) 113 113 { 114 + if (adev->apu_prefer_gtt) 115 + return false; 116 + 114 117 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 115 118 case IP_VERSION(9, 4, 3): 116 119 case IP_VERSION(9, 5, 0):
+22 -5
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 3479 3479 struct dc_commit_streams_params commit_params = {}; 3480 3480 3481 3481 if (dm->dc->caps.ips_support) { 3482 + if (!amdgpu_in_reset(adev)) 3483 + mutex_lock(&dm->dc_lock); 3484 + 3485 + /* Need to set POWER_STATE_D0 first or it will not execute 3486 + * idle_power_optimizations command to DMUB. 3487 + */ 3488 + dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); 3482 3489 dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false); 3490 + 3491 + if (!amdgpu_in_reset(adev)) 3492 + mutex_unlock(&dm->dc_lock); 3483 3493 } 3484 3494 3485 3495 if (amdgpu_in_reset(adev)) { ··· 10974 10964 continue; 10975 10965 } 10976 10966 for (j = 0; j < status->plane_count; j++) 10977 - dummy_updates[j].surface = status->plane_states[0]; 10967 + dummy_updates[j].surface = status->plane_states[j]; 10978 10968 10979 10969 sort(dummy_updates, status->plane_count, 10980 10970 sizeof(*dummy_updates), dm_plane_layer_index_cmp, NULL); ··· 12309 12299 12310 12300 /* Overlay cursor not supported on HW before DCN 12311 12301 * DCN401 does not have the cursor-on-scaled-plane or cursor-on-yuv-plane restrictions 12312 - * as previous DCN generations, so enable native mode on DCN401 in addition to DCE 12302 + * as previous DCN generations, so enable native mode on DCN401 12313 12303 */ 12314 - if (amdgpu_ip_version(adev, DCE_HWIP, 0) == 0 || 12315 - amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) { 12304 + if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) { 12316 12305 *cursor_mode = DM_CURSOR_NATIVE_MODE; 12317 12306 return 0; 12318 12307 } ··· 12631 12622 * need to be added for DC to not disable a plane by mistake 12632 12623 */ 12633 12624 if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) { 12625 + if (amdgpu_ip_version(adev, DCE_HWIP, 0) == 0) { 12626 + drm_dbg(dev, "Overlay cursor not supported on DCE\n"); 12627 + ret = -EINVAL; 12628 + goto fail; 12629 + } 12630 + 12634 12631 ret = drm_atomic_add_affected_planes(state, crtc); 12635 12632 if (ret) 12636 12633 goto fail; ··· 13154 13139 u8 *edid_ext = NULL; 13155 13140 int i; 13156 13141 int j = 0; 13142 + int total_ext_block_len; 13157 13143 13158 13144 if (edid == NULL || edid->extensions == 0) 13159 13145 return -ENODEV; ··· 13166 13150 break; 13167 13151 } 13168 13152 13169 - while (j < EDID_LENGTH - sizeof(struct amd_vsdb_block)) { 13153 + total_ext_block_len = EDID_LENGTH * edid->extensions; 13154 + while (j < total_ext_block_len - sizeof(struct amd_vsdb_block)) { 13170 13155 struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j]; 13171 13156 unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]); 13172 13157
+4 -5
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
··· 919 919 continue; 920 920 921 921 amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 922 + dc_link = amdgpu_dm_connector->dc_link; 923 + if (!dc_link) 924 + continue; 922 925 923 926 /* 924 927 * Analog connectors may be hot-plugged unlike other connector 925 928 * types that don't support HPD. Only poll analog connectors. 926 929 */ 927 - use_polling |= 928 - amdgpu_dm_connector->dc_link && 929 - dc_connector_supports_analog(amdgpu_dm_connector->dc_link->link_id.id); 930 - 931 - dc_link = amdgpu_dm_connector->dc_link; 930 + use_polling |= dc_connector_supports_analog(dc_link->link_id.id); 932 931 933 932 /* 934 933 * Get a base driver irq reference for hpd ints for the lifetime
+1 -1
drivers/gpu/drm/amd/display/dc/Makefile
··· 22 22 # 23 23 # Makefile for Display Core (dc) component. 24 24 25 - DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc dpp hubbub dccg hubp dio dwb hpo mmhubbub mpc opp pg 25 + DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link dsc resource optc dpp hubbub dccg hubp dio dwb hpo mmhubbub mpc opp pg 26 26 27 27 ifdef CONFIG_DRM_AMD_DC_FP 28 28
+8 -8
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
··· 766 766 .wm_inst = WM_A, 767 767 .wm_type = WM_TYPE_PSTATE_CHG, 768 768 .pstate_latency_us = 11.72, 769 - .sr_exit_time_us = 28.0, 770 - .sr_enter_plus_exit_time_us = 30.0, 769 + .sr_exit_time_us = 31.0, 770 + .sr_enter_plus_exit_time_us = 33.0, 771 771 .valid = true, 772 772 }, 773 773 { 774 774 .wm_inst = WM_B, 775 775 .wm_type = WM_TYPE_PSTATE_CHG, 776 776 .pstate_latency_us = 11.72, 777 - .sr_exit_time_us = 28.0, 778 - .sr_enter_plus_exit_time_us = 30.0, 777 + .sr_exit_time_us = 31.0, 778 + .sr_enter_plus_exit_time_us = 33.0, 779 779 .valid = true, 780 780 }, 781 781 { 782 782 .wm_inst = WM_C, 783 783 .wm_type = WM_TYPE_PSTATE_CHG, 784 784 .pstate_latency_us = 11.72, 785 - .sr_exit_time_us = 28.0, 786 - .sr_enter_plus_exit_time_us = 30.0, 785 + .sr_exit_time_us = 31.0, 786 + .sr_enter_plus_exit_time_us = 33.0, 787 787 .valid = true, 788 788 }, 789 789 { 790 790 .wm_inst = WM_D, 791 791 .wm_type = WM_TYPE_PSTATE_CHG, 792 792 .pstate_latency_us = 11.72, 793 - .sr_exit_time_us = 28.0, 794 - .sr_enter_plus_exit_time_us = 30.0, 793 + .sr_exit_time_us = 31.0, 794 + .sr_enter_plus_exit_time_us = 33.0, 795 795 .valid = true, 796 796 }, 797 797 }
+1 -1
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 53 53 #include "dpp.h" 54 54 #include "timing_generator.h" 55 55 #include "abm.h" 56 - #include "virtual/virtual_link_encoder.h" 56 + #include "dio/virtual/virtual_link_encoder.h" 57 57 #include "hubp.h" 58 58 59 59 #include "link_hwss.h"
+2 -2
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
··· 37 37 #include "dpp.h" 38 38 #include "core_types.h" 39 39 #include "set_mode_types.h" 40 - #include "virtual/virtual_stream_encoder.h" 40 + #include "dio/virtual/virtual_stream_encoder.h" 41 41 #include "dpcd_defs.h" 42 42 #include "link_enc_cfg.h" 43 43 #include "link_service.h" ··· 45 45 #include "dc_state_priv.h" 46 46 #include "dc_stream_priv.h" 47 47 48 - #include "virtual/virtual_link_hwss.h" 48 + #include "link/hwss/link_hwss_virtual.h" 49 49 #include "link/hwss/link_hwss_dio.h" 50 50 #include "link/hwss/link_hwss_dpia.h" 51 51 #include "link/hwss/link_hwss_hpo_dp.h"
-13
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
··· 515 515 } 516 516 } 517 517 518 - /* apply manual trigger */ 519 - int i; 520 - 521 - for (i = 0; i < dc->res_pool->pipe_count; i++) { 522 - struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 523 - 524 - /* trigger event on first pipe with current stream */ 525 - if (stream == pipe_ctx->stream) { 526 - pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); 527 - break; 528 - } 529 - } 530 - 531 518 return true; 532 519 } 533 520
+1 -1
drivers/gpu/drm/amd/display/dc/dc.h
··· 63 63 struct dcn_optc_reg_state; 64 64 struct dcn_dccg_reg_state; 65 65 66 - #define DC_VER "3.2.368" 66 + #define DC_VER "3.2.369" 67 67 68 68 /** 69 69 * MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
+1 -53
drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c
··· 131 131 132 132 void dccg2_init(struct dccg *dccg) 133 133 { 134 - struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 135 - 136 - /* Hardcoded register values for DCN20 137 - * These are specific to 100Mhz refclk 138 - * Different ASICs with different refclk may override this in their own init 139 - */ 140 - REG_WRITE(MICROSECOND_TIME_BASE_DIV, 0x00120264); 141 - REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x001186a0); 142 - REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x0e01003c); 143 - 144 - if (REG(REFCLK_CNTL)) 145 - REG_WRITE(REFCLK_CNTL, 0); 146 - } 147 - 148 - void dccg2_refclk_setup(struct dccg *dccg) 149 - { 150 - struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 151 - 152 - /* REFCLK programming that must occur after hubbub initialization */ 153 - if (REG(REFCLK_CNTL)) 154 - REG_WRITE(REFCLK_CNTL, 0); 155 - } 156 - 157 - bool dccg2_is_s0i3_golden_init_wa_done(struct dccg *dccg) 158 - { 159 - struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 160 - 161 - return REG_READ(MICROSECOND_TIME_BASE_DIV) == 0x00120464; 162 - } 163 - 164 - void dccg2_allow_clock_gating(struct dccg *dccg, bool allow) 165 - { 166 - struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 167 - 168 - if (allow) { 169 - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 170 - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 171 - } else { 172 - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0xFFFFFFFF); 173 - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0xFFFFFFFF); 174 - } 175 - } 176 - 177 - void dccg2_enable_memory_low_power(struct dccg *dccg, bool enable) 178 - { 179 - struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 180 - 181 - REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, enable ? 0 : 1); 182 134 } 183 135 184 136 static const struct dccg_funcs dccg2_funcs = { ··· 139 187 .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, 140 188 .otg_add_pixel = dccg2_otg_add_pixel, 141 189 .otg_drop_pixel = dccg2_otg_drop_pixel, 142 - .dccg_init = dccg2_init, 143 - .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 144 - .allow_clock_gating = dccg2_allow_clock_gating, 145 - .enable_memory_low_power = dccg2_enable_memory_low_power, 146 - .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done 190 + .dccg_init = dccg2_init 147 191 }; 148 192 149 193 struct dccg *dccg2_create(
+3 -15
drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
··· 46 46 DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2),\ 47 47 DCCG_SRII(PIXEL_RATE_CNTL, OTG, 3),\ 48 48 DCCG_SRII(PIXEL_RATE_CNTL, OTG, 4),\ 49 - DCCG_SRII(PIXEL_RATE_CNTL, OTG, 5),\ 50 - SR(DCCG_GATE_DISABLE_CNTL),\ 51 - SR(DCCG_GATE_DISABLE_CNTL2) 49 + DCCG_SRII(PIXEL_RATE_CNTL, OTG, 5) 52 50 53 51 #define DCCG_SF(reg_name, field_name, post_fix)\ 54 52 .field_name = reg_name ## __ ## field_name ## post_fix ··· 81 83 DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 0, mask_sh),\ 82 84 DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 1, mask_sh),\ 83 85 DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 0, mask_sh),\ 84 - DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 1, mask_sh),\ 85 - DCCG_SF(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, mask_sh) 86 + DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 1, mask_sh) 86 87 87 88 88 89 ··· 130 133 type DISPCLK_CHG_FWD_CORR_DISABLE;\ 131 134 type DISPCLK_FREQ_CHANGE_CNTL;\ 132 135 type OTG_ADD_PIXEL[MAX_PIPES];\ 133 - type OTG_DROP_PIXEL[MAX_PIPES];\ 134 - type DC_MEM_GLOBAL_PWR_REQ_DIS; 136 + type OTG_DROP_PIXEL[MAX_PIPES]; 135 137 136 138 #define DCCG3_REG_FIELD_LIST(type) \ 137 139 type HDMICHARCLK0_EN;\ ··· 514 518 515 519 516 520 void dccg2_init(struct dccg *dccg); 517 - 518 - void dccg2_refclk_setup(struct dccg *dccg); 519 - 520 - bool dccg2_is_s0i3_golden_init_wa_done(struct dccg *dccg); 521 - 522 - void dccg2_allow_clock_gating(struct dccg *dccg, bool allow); 523 - 524 - void dccg2_enable_memory_low_power(struct dccg *dccg, bool enable); 525 521 526 522 struct dccg *dccg2_create( 527 523 struct dc_context *ctx,
+9
drivers/gpu/drm/amd/display/dc/dio/Makefile
··· 23 23 # 24 24 # 25 25 26 + ############################################################################### 27 + # VIRTUAL 28 + ############################################################################### 29 + DIO_VIRTUAL = virtual_link_encoder.o virtual_stream_encoder.o 30 + 31 + AMD_DAL_DIO_VIRTUAL = $(addprefix $(AMDDALPATH)/dc/dio/virtual/,$(DIO_VIRTUAL)) 32 + 33 + AMD_DISPLAY_FILES += $(AMD_DAL_DIO_VIRTUAL) 34 + 26 35 ifdef CONFIG_DRM_AMD_DC_FP 27 36 ############################################################################### 28 37 # DCN10
+2 -2
drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
··· 164 164 }, 165 165 }, 166 166 .num_states = 5, 167 - .sr_exit_time_us = 28.0, 168 - .sr_enter_plus_exit_time_us = 30.0, 167 + .sr_exit_time_us = 31.0, 168 + .sr_enter_plus_exit_time_us = 33.0, 169 169 .sr_exit_z8_time_us = 250.0, 170 170 .sr_enter_plus_exit_z8_time_us = 350.0, 171 171 .fclk_change_latency_us = 24.0,
+1 -1
drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c
··· 655 655 dml_print("DML: soc_bbox: refclk_mhz = %f\n", soc->refclk_mhz); 656 656 dml_print("DML: soc_bbox: amclk_mhz = %f\n", soc->amclk_mhz); 657 657 658 - dml_print("DML: soc_bbox: max_outstanding_reqs = %f\n", soc->max_outstanding_reqs); 658 + dml_print("DML: soc_bbox: max_outstanding_reqs = %d\n", soc->max_outstanding_reqs); 659 659 dml_print("DML: soc_bbox: pct_ideal_sdp_bw_after_urgent = %f\n", soc->pct_ideal_sdp_bw_after_urgent); 660 660 dml_print("DML: soc_bbox: pct_ideal_fabric_bw_after_urgent = %f\n", soc->pct_ideal_fabric_bw_after_urgent); 661 661 dml_print("DML: soc_bbox: pct_ideal_dram_bw_after_urgent_pixel_only = %f\n", soc->pct_ideal_dram_bw_after_urgent_pixel_only);
+18 -3
drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
··· 376 376 377 377 tbl_entry.color_space = input_color_space; 378 378 379 - if (color_space >= COLOR_SPACE_YCBCR601) 380 - select = INPUT_CSC_SELECT_ICSC; 381 - else 379 + if (dpp3_should_bypass_post_csc_for_colorspace(color_space)) 382 380 select = INPUT_CSC_SELECT_BYPASS; 381 + else 382 + select = INPUT_CSC_SELECT_ICSC; 383 383 384 384 dpp3_program_post_csc(dpp_base, color_space, select, 385 385 &tbl_entry); ··· 1541 1541 return true; 1542 1542 } 1543 1543 1544 + bool dpp3_should_bypass_post_csc_for_colorspace(enum dc_color_space dc_color_space) 1545 + { 1546 + switch (dc_color_space) { 1547 + case COLOR_SPACE_UNKNOWN: 1548 + case COLOR_SPACE_SRGB: 1549 + case COLOR_SPACE_XR_RGB: 1550 + case COLOR_SPACE_SRGB_LIMITED: 1551 + case COLOR_SPACE_MSREF_SCRGB: 1552 + case COLOR_SPACE_2020_RGB_FULLRANGE: 1553 + case COLOR_SPACE_2020_RGB_LIMITEDRANGE: 1554 + return true; 1555 + default: 1556 + return false; 1557 + } 1558 + }
+4
drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
··· 644 644 645 645 void dpp3_cm_get_gamut_remap(struct dpp *dpp_base, 646 646 struct dpp_grph_csc_adjustment *adjust); 647 + 648 + bool dpp3_should_bypass_post_csc_for_colorspace( 649 + enum dc_color_space dc_color_space); 650 + 647 651 #endif /* __DC_HWSS_DCN30_H__ */
+3 -3
drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
··· 206 206 207 207 tbl_entry.color_space = input_color_space; 208 208 209 - if (color_space >= COLOR_SPACE_YCBCR601) 210 - select = INPUT_CSC_SELECT_ICSC; 211 - else 209 + if (dpp3_should_bypass_post_csc_for_colorspace(color_space)) 212 210 select = INPUT_CSC_SELECT_BYPASS; 211 + else 212 + select = INPUT_CSC_SELECT_ICSC; 213 213 214 214 dpp3_program_post_csc(dpp_base, color_space, select, 215 215 &tbl_entry);
+18 -13
drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
··· 145 145 { 146 146 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); 147 147 148 - ASSERT(info->gfxversion == DcGfxVersion9); 148 + ASSERT(info->gfxversion == DcGfxVersion9 || info->gfxversion == DcGfxBase); 149 149 150 - REG_UPDATE_6(DCSURF_ADDR_CONFIG, 151 - NUM_PIPES, log_2(info->gfx9.num_pipes), 152 - NUM_BANKS, log_2(info->gfx9.num_banks), 153 - PIPE_INTERLEAVE, info->gfx9.pipe_interleave, 154 - NUM_SE, log_2(info->gfx9.num_shader_engines), 155 - NUM_RB_PER_SE, log_2(info->gfx9.num_rb_per_se), 156 - MAX_COMPRESSED_FRAGS, log_2(info->gfx9.max_compressed_frags)); 150 + if (info->gfxversion == DcGfxVersion9) { 151 + REG_UPDATE_6(DCSURF_ADDR_CONFIG, 152 + NUM_PIPES, log_2(info->gfx9.num_pipes), 153 + NUM_BANKS, log_2(info->gfx9.num_banks), 154 + PIPE_INTERLEAVE, info->gfx9.pipe_interleave, 155 + NUM_SE, log_2(info->gfx9.num_shader_engines), 156 + NUM_RB_PER_SE, log_2(info->gfx9.num_rb_per_se), 157 + MAX_COMPRESSED_FRAGS, log_2(info->gfx9.max_compressed_frags)); 157 158 158 - REG_UPDATE_4(DCSURF_TILING_CONFIG, 159 - SW_MODE, info->gfx9.swizzle, 160 - META_LINEAR, info->gfx9.meta_linear, 161 - RB_ALIGNED, info->gfx9.rb_aligned, 162 - PIPE_ALIGNED, info->gfx9.pipe_aligned); 159 + REG_UPDATE_4(DCSURF_TILING_CONFIG, 160 + SW_MODE, info->gfx9.swizzle, 161 + META_LINEAR, info->gfx9.meta_linear, 162 + RB_ALIGNED, info->gfx9.rb_aligned, 163 + PIPE_ALIGNED, info->gfx9.pipe_aligned); 164 + } else { 165 + hubp1_clear_tiling(&hubp1->base); 166 + } 167 + 163 168 } 164 169 165 170 void hubp1_program_size(
+14 -10
drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
··· 313 313 const struct dc_tiling_info *info, 314 314 const enum surface_pixel_format pixel_format) 315 315 { 316 - ASSERT(info->gfxversion == DcGfxVersion9); 316 + ASSERT(info->gfxversion == DcGfxVersion9 || info->gfxversion == DcGfxBase); 317 317 318 - REG_UPDATE_3(DCSURF_ADDR_CONFIG, 319 - NUM_PIPES, log_2(info->gfx9.num_pipes), 320 - PIPE_INTERLEAVE, info->gfx9.pipe_interleave, 321 - MAX_COMPRESSED_FRAGS, log_2(info->gfx9.max_compressed_frags)); 318 + if (info->gfxversion == DcGfxVersion9) { 319 + REG_UPDATE_3(DCSURF_ADDR_CONFIG, 320 + NUM_PIPES, log_2(info->gfx9.num_pipes), 321 + PIPE_INTERLEAVE, info->gfx9.pipe_interleave, 322 + MAX_COMPRESSED_FRAGS, log_2(info->gfx9.max_compressed_frags)); 322 323 323 - REG_UPDATE_4(DCSURF_TILING_CONFIG, 324 - SW_MODE, info->gfx9.swizzle, 325 - META_LINEAR, 0, 326 - RB_ALIGNED, 0, 327 - PIPE_ALIGNED, 0); 324 + REG_UPDATE_4(DCSURF_TILING_CONFIG, 325 + SW_MODE, info->gfx9.swizzle, 326 + META_LINEAR, 0, 327 + RB_ALIGNED, 0, 328 + PIPE_ALIGNED, 0); 329 + } else { 330 + hubp2_clear_tiling(&hubp2->base); 331 + } 328 332 } 329 333 330 334 void hubp2_program_size(
+14 -10
drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
··· 321 321 const struct dc_tiling_info *info, 322 322 const enum surface_pixel_format pixel_format) 323 323 { 324 - ASSERT(info->gfxversion == DcGfxVersion9); 324 + ASSERT(info->gfxversion == DcGfxVersion9 || info->gfxversion == DcGfxBase); 325 325 326 - REG_UPDATE_4(DCSURF_ADDR_CONFIG, 327 - NUM_PIPES, log_2(info->gfx9.num_pipes), 328 - PIPE_INTERLEAVE, info->gfx9.pipe_interleave, 329 - MAX_COMPRESSED_FRAGS, log_2(info->gfx9.max_compressed_frags), 330 - NUM_PKRS, log_2(info->gfx9.num_pkrs)); 326 + if (info->gfxversion == DcGfxVersion9) { 327 + REG_UPDATE_4(DCSURF_ADDR_CONFIG, 328 + NUM_PIPES, log_2(info->gfx9.num_pipes), 329 + PIPE_INTERLEAVE, info->gfx9.pipe_interleave, 330 + MAX_COMPRESSED_FRAGS, log_2(info->gfx9.max_compressed_frags), 331 + NUM_PKRS, log_2(info->gfx9.num_pkrs)); 331 332 332 - REG_UPDATE_3(DCSURF_TILING_CONFIG, 333 - SW_MODE, info->gfx9.swizzle, 334 - META_LINEAR, info->gfx9.meta_linear, 335 - PIPE_ALIGNED, info->gfx9.pipe_aligned); 333 + REG_UPDATE_3(DCSURF_TILING_CONFIG, 334 + SW_MODE, info->gfx9.swizzle, 335 + META_LINEAR, info->gfx9.meta_linear, 336 + PIPE_ALIGNED, info->gfx9.pipe_aligned); 337 + } else { 338 + hubp3_clear_tiling(&hubp2->base); 339 + } 336 340 337 341 } 338 342
+3 -2
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
··· 1887 1887 1888 1888 if (!dc->debug.disable_clock_gate) { 1889 1889 /* enable all DCN clock gating */ 1890 - if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) 1891 - dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); 1890 + REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 1891 + 1892 + REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 1892 1893 1893 1894 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 1894 1895 }
+21 -8
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
··· 357 357 358 358 void dcn20_dccg_init(struct dce_hwseq *hws) 359 359 { 360 - struct dc *dc = hws->ctx->dc; 360 + /* 361 + * set MICROSECOND_TIME_BASE_DIV 362 + * 100Mhz refclk -> 0x120264 363 + * 27Mhz refclk -> 0x12021b 364 + * 48Mhz refclk -> 0x120230 365 + * 366 + */ 367 + REG_WRITE(MICROSECOND_TIME_BASE_DIV, 0x120264); 361 368 362 - if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->dccg_init) 363 - dc->res_pool->dccg->funcs->dccg_init(dc->res_pool->dccg); 369 + /* 370 + * set MILLISECOND_TIME_BASE_DIV 371 + * 100Mhz refclk -> 0x1186a0 372 + * 27Mhz refclk -> 0x106978 373 + * 48Mhz refclk -> 0x10bb80 374 + * 375 + */ 376 + REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x1186a0); 377 + 378 + /* This value is dependent on the hardware pipeline delay so set once per SOC */ 379 + REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0xe01003c); 364 380 } 365 381 366 382 void dcn20_disable_vga( ··· 3156 3140 3157 3141 dcn10_hubbub_global_timer_enable(dc->res_pool->hubbub, true, 2); 3158 3142 3159 - if (hws->funcs.dccg_init) 3160 - hws->funcs.dccg_init(hws); 3161 - 3162 - if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->refclk_setup) 3163 - dc->res_pool->dccg->funcs->refclk_setup(dc->res_pool->dccg); 3143 + if (REG(REFCLK_CNTL)) 3144 + REG_WRITE(REFCLK_CNTL, 0); 3164 3145 // 3165 3146 3166 3147
+3 -2
drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
··· 367 367 368 368 if (!dc->debug.disable_clock_gate) { 369 369 /* enable all DCN clock gating */ 370 - if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) 371 - dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); 370 + REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 371 + 372 + REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 372 373 373 374 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 374 375 }
+5 -4
drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
··· 33 33 #include "vmid.h" 34 34 #include "reg_helper.h" 35 35 #include "hw/clk_mgr.h" 36 - #include "hw/dccg.h" 37 36 #include "dc_dmub_srv.h" 38 37 #include "abm.h" 39 38 #include "link_service.h" ··· 87 88 88 89 bool dcn21_s0i3_golden_init_wa(struct dc *dc) 89 90 { 90 - if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->is_s0i3_golden_init_wa_done) 91 - return !dc->res_pool->dccg->funcs->is_s0i3_golden_init_wa_done(dc->res_pool->dccg); 91 + struct dce_hwseq *hws = dc->hwseq; 92 + uint32_t value = 0; 92 93 93 - return false; 94 + value = REG_READ(MICROSECOND_TIME_BASE_DIV); 95 + 96 + return value != 0x00120464; 94 97 } 95 98 96 99 void dcn21_exit_optimized_pwr_state(
+3 -2
drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
··· 801 801 802 802 if (!dc->debug.disable_clock_gate) { 803 803 /* enable all DCN clock gating */ 804 - if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) 805 - dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); 804 + REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 805 + 806 + REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 806 807 807 808 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 808 809 }
+3 -2
drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
··· 247 247 248 248 if (!dc->debug.disable_clock_gate) { 249 249 /* enable all DCN clock gating */ 250 - if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) 251 - dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); 250 + REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 251 + 252 + REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 252 253 253 254 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 254 255 }
+3 -2
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
··· 963 963 964 964 if (!dc->debug.disable_clock_gate) { 965 965 /* enable all DCN clock gating */ 966 - if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) 967 - dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); 966 + REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 967 + 968 + REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 968 969 969 970 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 970 971 }
+1 -2
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
··· 286 286 } 287 287 288 288 if (dc->debug.disable_mem_low_power) { 289 - if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->enable_memory_low_power) 290 - dc->res_pool->dccg->funcs->enable_memory_low_power(dc->res_pool->dccg, false); 289 + REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 1); 291 290 } 292 291 if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) 293 292 dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
+3 -2
drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
··· 326 326 327 327 if (!dc->debug.disable_clock_gate) { 328 328 /* enable all DCN clock gating */ 329 - if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) 330 - dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); 329 + REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 330 + 331 + REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 331 332 332 333 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 333 334 }
-4
drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
··· 224 224 void (*otg_drop_pixel)(struct dccg *dccg, 225 225 uint32_t otg_inst); 226 226 void (*dccg_init)(struct dccg *dccg); 227 - void (*refclk_setup)(struct dccg *dccg); /* Deprecated - for backward compatibility only */ 228 - void (*allow_clock_gating)(struct dccg *dccg, bool allow); 229 - void (*enable_memory_low_power)(struct dccg *dccg, bool enable); 230 227 void (*set_dpstreamclk_root_clock_gating)( 231 228 struct dccg *dccg, 232 229 int dp_hpo_inst, ··· 334 337 void (*dccg_root_gate_disable_control)(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating); 335 338 void (*dccg_read_reg_state)(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state); 336 339 void (*dccg_enable_global_fgcg)(struct dccg *dccg, bool enable); 337 - bool (*is_s0i3_golden_init_wa_done)(struct dccg *dccg); 338 340 }; 339 341 340 342 #endif //__DAL_DCCG_H__
+2 -1
drivers/gpu/drm/amd/display/dc/link/Makefile
··· 43 43 # hwss 44 44 ############################################################################### 45 45 LINK_HWSS = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o \ 46 - link_hwss_dio_fixed_vs_pe_retimer.o link_hwss_hpo_fixed_vs_pe_retimer_dp.o 46 + link_hwss_dio_fixed_vs_pe_retimer.o link_hwss_hpo_fixed_vs_pe_retimer_dp.o \ 47 + link_hwss_virtual.o 47 48 48 49 AMD_DAL_LINK_HWSS = $(addprefix $(AMDDALPATH)/dc/link/hwss/, \ 49 50 $(LINK_HWSS))
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
··· 31 31 #include "resource.h" 32 32 #include "clk_mgr.h" 33 33 #include "include/irq_service_interface.h" 34 - #include "virtual/virtual_stream_encoder.h" 34 + #include "dio/virtual/virtual_stream_encoder.h" 35 35 #include "dce110/dce110_resource.h" 36 36 #include "dce110/dce110_timing_generator.h" 37 37 #include "irq/dce110/irq_service_dce110.h"
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
··· 35 35 #include "dce112/dce112_resource.h" 36 36 37 37 #include "dce110/dce110_resource.h" 38 - #include "virtual/virtual_stream_encoder.h" 38 + #include "dio/virtual/virtual_stream_encoder.h" 39 39 #include "dce120/dce120_timing_generator.h" 40 40 #include "irq/dce120/irq_service_dce120.h" 41 41 #include "dce/dce_opp.h"
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
··· 48 48 #include "dce/dce_clock_source.h" 49 49 #include "dce/dce_audio.h" 50 50 #include "dce/dce_hwseq.h" 51 - #include "virtual/virtual_stream_encoder.h" 51 + #include "dio/virtual/virtual_stream_encoder.h" 52 52 #include "dce110/dce110_resource.h" 53 53 #include "dce112/dce112_resource.h" 54 54 #include "dcn10/dcn10_hubp.h"
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
··· 55 55 #include "dce/dce_clock_source.h" 56 56 #include "dce/dce_audio.h" 57 57 #include "dce/dce_hwseq.h" 58 - #include "virtual/virtual_stream_encoder.h" 58 + #include "dio/virtual/virtual_stream_encoder.h" 59 59 #include "dce110/dce110_resource.h" 60 60 #include "dml/display_mode_vba.h" 61 61 #include "dcn20/dcn20_dccg.h"
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
··· 51 51 #include "dce/dce_clock_source.h" 52 52 #include "dce/dce_audio.h" 53 53 #include "dce/dce_hwseq.h" 54 - #include "virtual/virtual_stream_encoder.h" 54 + #include "dio/virtual/virtual_stream_encoder.h" 55 55 #include "dce110/dce110_resource.h" 56 56 #include "dce/dce_aux.h" 57 57 #include "dce/dce_i2c.h"
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
··· 57 57 #include "dce/dce_clock_source.h" 58 58 #include "dce/dce_audio.h" 59 59 #include "dce/dce_hwseq.h" 60 - #include "virtual/virtual_stream_encoder.h" 60 + #include "dio/virtual/virtual_stream_encoder.h" 61 61 #include "dml/display_mode_vba.h" 62 62 #include "dcn20/dcn20_dccg.h" 63 63 #include "dcn21/dcn21_dccg.h"
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
··· 55 55 #include "dce/dce_audio.h" 56 56 #include "dce/dce_hwseq.h" 57 57 #include "clk_mgr.h" 58 - #include "virtual/virtual_stream_encoder.h" 58 + #include "dio/virtual/virtual_stream_encoder.h" 59 59 #include "dce110/dce110_resource.h" 60 60 #include "dml/display_mode_vba.h" 61 61 #include "dcn30/dcn30_dccg.h"
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
··· 54 54 #include "dce/dce_audio.h" 55 55 #include "dce/dce_hwseq.h" 56 56 #include "clk_mgr.h" 57 - #include "virtual/virtual_stream_encoder.h" 57 + #include "dio/virtual/virtual_stream_encoder.h" 58 58 #include "dce110/dce110_resource.h" 59 59 #include "dml/display_mode_vba.h" 60 60 #include "dcn301/dcn301_dccg.h"
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
··· 64 64 #include "dce/dce_audio.h" 65 65 #include "dce/dce_hwseq.h" 66 66 #include "clk_mgr.h" 67 - #include "virtual/virtual_stream_encoder.h" 67 + #include "dio/virtual/virtual_stream_encoder.h" 68 68 #include "dce110/dce110_resource.h" 69 69 #include "dml/display_mode_vba.h" 70 70 #include "dml/dcn31/dcn31_fpu.h"
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
··· 66 66 #include "dce/dce_audio.h" 67 67 #include "dce/dce_hwseq.h" 68 68 #include "clk_mgr.h" 69 - #include "virtual/virtual_stream_encoder.h" 69 + #include "dio/virtual/virtual_stream_encoder.h" 70 70 #include "dce110/dce110_resource.h" 71 71 #include "dml/display_mode_vba.h" 72 72 #include "dml/dcn31/dcn31_fpu.h"
+5 -5
drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
··· 63 63 #include "dce/dce_audio.h" 64 64 #include "dce/dce_hwseq.h" 65 65 #include "clk_mgr.h" 66 - #include "virtual/virtual_stream_encoder.h" 66 + #include "dio/virtual/virtual_stream_encoder.h" 67 67 #include "dce110/dce110_resource.h" 68 68 #include "dml/display_mode_vba.h" 69 69 #include "dml/dcn31/dcn31_fpu.h" ··· 1230 1230 /*PHYB is wired off in HW, allow front end to remapping, otherwise needs more changes*/ 1231 1231 1232 1232 /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ 1233 - if (eng_id <= ENGINE_ID_DIGF) { 1234 - vpg_inst = eng_id; 1235 - afmt_inst = eng_id; 1236 - } else 1233 + if (eng_id < 0 || eng_id >= ARRAY_SIZE(stream_enc_regs)) 1237 1234 return NULL; 1235 + 1236 + vpg_inst = eng_id; 1237 + afmt_inst = eng_id; 1238 1238 1239 1239 enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); 1240 1240 vpg = dcn31_vpg_create(ctx, vpg_inst);
+5 -5
drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
··· 63 63 #include "dce/dce_audio.h" 64 64 #include "dce/dce_hwseq.h" 65 65 #include "clk_mgr.h" 66 - #include "virtual/virtual_stream_encoder.h" 66 + #include "dio/virtual/virtual_stream_encoder.h" 67 67 #include "dce110/dce110_resource.h" 68 68 #include "dml/display_mode_vba.h" 69 69 #include "dml/dcn31/dcn31_fpu.h" ··· 1223 1223 int afmt_inst; 1224 1224 1225 1225 /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ 1226 - if (eng_id <= ENGINE_ID_DIGF) { 1227 - vpg_inst = eng_id; 1228 - afmt_inst = eng_id; 1229 - } else 1226 + if (eng_id < 0 || eng_id >= ARRAY_SIZE(stream_enc_regs)) 1230 1227 return NULL; 1228 + 1229 + vpg_inst = eng_id; 1230 + afmt_inst = eng_id; 1231 1231 1232 1232 enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); 1233 1233 vpg = dcn31_vpg_create(ctx, vpg_inst);
+5 -5
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
··· 65 65 #include "dce/dce_audio.h" 66 66 #include "dce/dce_hwseq.h" 67 67 #include "clk_mgr.h" 68 - #include "virtual/virtual_stream_encoder.h" 68 + #include "dio/virtual/virtual_stream_encoder.h" 69 69 #include "dml/display_mode_vba.h" 70 70 #include "dcn32/dcn32_dccg.h" 71 71 #include "dcn10/dcn10_resource.h" ··· 1211 1211 int afmt_inst; 1212 1212 1213 1213 /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ 1214 - if (eng_id <= ENGINE_ID_DIGF) { 1215 - vpg_inst = eng_id; 1216 - afmt_inst = eng_id; 1217 - } else 1214 + if (eng_id < 0 || eng_id >= ARRAY_SIZE(stream_enc_regs)) 1218 1215 return NULL; 1216 + 1217 + vpg_inst = eng_id; 1218 + afmt_inst = eng_id; 1219 1219 1220 1220 enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); 1221 1221 vpg = dcn32_vpg_create(ctx, vpg_inst);
+5 -5
drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
··· 68 68 #include "dce/dce_audio.h" 69 69 #include "dce/dce_hwseq.h" 70 70 #include "clk_mgr.h" 71 - #include "virtual/virtual_stream_encoder.h" 71 + #include "dio/virtual/virtual_stream_encoder.h" 72 72 #include "dml/display_mode_vba.h" 73 73 #include "dcn32/dcn32_dccg.h" 74 74 #include "dcn10/dcn10_resource.h" ··· 1192 1192 int afmt_inst; 1193 1193 1194 1194 /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ 1195 - if (eng_id <= ENGINE_ID_DIGF) { 1196 - vpg_inst = eng_id; 1197 - afmt_inst = eng_id; 1198 - } else 1195 + if (eng_id < 0 || eng_id >= ARRAY_SIZE(stream_enc_regs)) 1199 1196 return NULL; 1197 + 1198 + vpg_inst = eng_id; 1199 + afmt_inst = eng_id; 1200 1200 1201 1201 enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); 1202 1202 vpg = dcn321_vpg_create(ctx, vpg_inst);
+5 -5
drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
··· 70 70 #include "dce/dce_audio.h" 71 71 #include "dce/dce_hwseq.h" 72 72 #include "clk_mgr.h" 73 - #include "virtual/virtual_stream_encoder.h" 73 + #include "dio/virtual/virtual_stream_encoder.h" 74 74 #include "dce110/dce110_resource.h" 75 75 #include "dml/display_mode_vba.h" 76 76 #include "dcn35/dcn35_dccg.h" ··· 1274 1274 int afmt_inst; 1275 1275 1276 1276 /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ 1277 - if (eng_id <= ENGINE_ID_DIGF) { 1278 - vpg_inst = eng_id; 1279 - afmt_inst = eng_id; 1280 - } else 1277 + if (eng_id < 0 || eng_id >= ARRAY_SIZE(stream_enc_regs)) 1281 1278 return NULL; 1279 + 1280 + vpg_inst = eng_id; 1281 + afmt_inst = eng_id; 1282 1282 1283 1283 enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); 1284 1284 vpg = dcn31_vpg_create(ctx, vpg_inst);
+5 -5
drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
··· 49 49 #include "dce/dce_audio.h" 50 50 #include "dce/dce_hwseq.h" 51 51 #include "clk_mgr.h" 52 - #include "virtual/virtual_stream_encoder.h" 52 + #include "dio/virtual/virtual_stream_encoder.h" 53 53 #include "dce110/dce110_resource.h" 54 54 #include "dml/display_mode_vba.h" 55 55 #include "dcn35/dcn35_dccg.h" ··· 1254 1254 int afmt_inst; 1255 1255 1256 1256 /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ 1257 - if (eng_id <= ENGINE_ID_DIGF) { 1258 - vpg_inst = eng_id; 1259 - afmt_inst = eng_id; 1260 - } else 1257 + if (eng_id < 0 || eng_id >= ARRAY_SIZE(stream_enc_regs)) 1261 1258 return NULL; 1259 + 1260 + vpg_inst = eng_id; 1261 + afmt_inst = eng_id; 1262 1262 1263 1263 enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); 1264 1264 vpg = dcn31_vpg_create(ctx, vpg_inst);
+2 -2
drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
··· 49 49 #include "dce/dce_audio.h" 50 50 #include "dce/dce_hwseq.h" 51 51 #include "clk_mgr.h" 52 - #include "virtual/virtual_stream_encoder.h" 52 + #include "dio/virtual/virtual_stream_encoder.h" 53 53 #include "dce110/dce110_resource.h" 54 54 #include "dml/display_mode_vba.h" 55 55 #include "dcn35/dcn35_dccg.h" ··· 775 775 }; 776 776 777 777 static const struct dc_check_config config_defaults = { 778 - .enable_legacy_fast_update = true, 778 + .enable_legacy_fast_update = false, 779 779 }; 780 780 781 781 static const struct dc_panel_config panel_config_defaults = {
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
··· 47 47 #include "dce/dce_audio.h" 48 48 #include "dce/dce_hwseq.h" 49 49 #include "clk_mgr.h" 50 - #include "virtual/virtual_stream_encoder.h" 50 + #include "dio/virtual/virtual_stream_encoder.h" 51 51 #include "dml/display_mode_vba.h" 52 52 #include "dcn401/dcn401_dccg.h" 53 53 #include "dcn10/dcn10_resource.h"
-30
drivers/gpu/drm/amd/display/dc/virtual/Makefile
··· 1 - # 2 - # Copyright 2017 Advanced Micro Devices, Inc. 3 - # 4 - # Permission is hereby granted, free of charge, to any person obtaining a 5 - # copy of this software and associated documentation files (the "Software"), 6 - # to deal in the Software without restriction, including without limitation 7 - # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - # and/or sell copies of the Software, and to permit persons to whom the 9 - # Software is furnished to do so, subject to the following conditions: 10 - # 11 - # The above copyright notice and this permission notice shall be included in 12 - # all copies or substantial portions of the Software. 13 - # 14 - # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - # THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 - # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 - # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 - # OTHER DEALINGS IN THE SOFTWARE. 21 - # 22 - # 23 - # Makefile for the virtual sub-component of DAL. 24 - # It provides the control and status of HW CRTC block. 25 - 26 - VIRTUAL = virtual_link_encoder.o virtual_stream_encoder.o virtual_link_hwss.o 27 - 28 - AMD_DAL_VIRTUAL = $(addprefix $(AMDDALPATH)/dc/virtual/,$(VIRTUAL)) 29 - 30 - AMD_DISPLAY_FILES += $(AMD_DAL_VIRTUAL)
-1
drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_stream_encoder.c
··· 171 171 kfree(enc); 172 172 return NULL; 173 173 } 174 -
drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_stream_encoder.h
+15
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
··· 1638 1638 * DESC: Initiates IPS wake sequence. 1639 1639 */ 1640 1640 DMUB_GPINT__IPS_DEBUG_WAKE = 137, 1641 + /** 1642 + * DESC: Do panel power off sequence 1643 + * ARGS: 1 - Power off 1644 + */ 1645 + DMUB_GPINT__PANEL_POWER_OFF_SEQ = 138, 1641 1646 }; 1642 1647 1643 1648 /** ··· 4413 4408 enum dmub_cmd_panel_replay_state_update_subtype { 4414 4409 PR_STATE_UPDATE_COASTING_VTOTAL = 0x1, 4415 4410 PR_STATE_UPDATE_SYNC_MODE = 0x2, 4411 + PR_STATE_UPDATE_RUNTIME_FLAGS = 0x3, 4416 4412 }; 4417 4413 4418 4414 enum dmub_cmd_panel_replay_general_subtype { ··· 6707 6701 struct dmub_cmd_pr_copy_settings_data data; 6708 6702 }; 6709 6703 6704 + union dmub_pr_runtime_flags { 6705 + struct { 6706 + uint32_t disable_abm_optimization : 1; // Disable ABM optimization for PR 6707 + } bitfields; 6708 + uint32_t u32All; 6709 + }; 6710 + 6710 6711 struct dmub_cmd_pr_update_state_data { 6711 6712 /** 6712 6713 * Panel Instance. ··· 6732 6719 */ 6733 6720 uint32_t coasting_vtotal; 6734 6721 uint32_t sync_mode; 6722 + 6723 + union dmub_pr_runtime_flags pr_runtime_flags; 6735 6724 }; 6736 6725 6737 6726 struct dmub_cmd_pr_general_cmd_data {
+5
drivers/gpu/drm/amd/include/kgd_pp_interface.h
··· 1829 1829 struct gpu_metrics_attr metrics_attrs[]; 1830 1830 }; 1831 1831 1832 + enum amdgpu_xgmi_link_status { 1833 + AMDGPU_XGMI_LINK_INACTIVE = 0, 1834 + AMDGPU_XGMI_LINK_ACTIVE = 1, 1835 + }; 1836 + 1832 1837 #endif
+14 -15
drivers/gpu/drm/amd/pm/amdgpu_pm.c
··· 243 243 enum amd_pm_state_type state; 244 244 int ret; 245 245 246 - if (strncmp("battery", buf, strlen("battery")) == 0) 246 + if (sysfs_streq(buf, "battery")) 247 247 state = POWER_STATE_TYPE_BATTERY; 248 - else if (strncmp("balanced", buf, strlen("balanced")) == 0) 248 + else if (sysfs_streq(buf, "balanced")) 249 249 state = POWER_STATE_TYPE_BALANCED; 250 - else if (strncmp("performance", buf, strlen("performance")) == 0) 250 + else if (sysfs_streq(buf, "performance")) 251 251 state = POWER_STATE_TYPE_PERFORMANCE; 252 252 else 253 253 return -EINVAL; ··· 363 363 enum amd_dpm_forced_level level; 364 364 int ret = 0; 365 365 366 - if (strncmp("low", buf, strlen("low")) == 0) { 366 + if (sysfs_streq(buf, "low")) 367 367 level = AMD_DPM_FORCED_LEVEL_LOW; 368 - } else if (strncmp("high", buf, strlen("high")) == 0) { 368 + else if (sysfs_streq(buf, "high")) 369 369 level = AMD_DPM_FORCED_LEVEL_HIGH; 370 - } else if (strncmp("auto", buf, strlen("auto")) == 0) { 370 + else if (sysfs_streq(buf, "auto")) 371 371 level = AMD_DPM_FORCED_LEVEL_AUTO; 372 - } else if (strncmp("manual", buf, strlen("manual")) == 0) { 372 + else if (sysfs_streq(buf, "manual")) 373 373 level = AMD_DPM_FORCED_LEVEL_MANUAL; 374 - } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) { 374 + else if (sysfs_streq(buf, "profile_exit")) 375 375 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT; 376 - } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) { 376 + else if (sysfs_streq(buf, "profile_standard")) 377 377 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD; 378 - } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) { 378 + else if (sysfs_streq(buf, "profile_min_sclk")) 379 379 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK; 380 - } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) { 380 + else if (sysfs_streq(buf, "profile_min_mclk")) 381 381 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; 382 - } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) { 382 + else if (sysfs_streq(buf, "profile_peak")) 383 383 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 384 - } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) { 384 + else if (sysfs_streq(buf, "perf_determinism")) 385 385 level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM; 386 - } else { 386 + else 387 387 return -EINVAL; 388 - } 389 388 390 389 ret = amdgpu_pm_get_access(adev); 391 390 if (ret < 0)
+5
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
··· 3464 3464 max_sclk = 60000; 3465 3465 max_mclk = 80000; 3466 3466 } 3467 + if ((adev->pdev->device == 0x666f) && 3468 + (adev->pdev->revision == 0x00)) { 3469 + max_sclk = 80000; 3470 + max_mclk = 95000; 3471 + } 3467 3472 } else if (adev->asic_type == CHIP_OLAND) { 3468 3473 if ((adev->pdev->revision == 0xC7) || 3469 3474 (adev->pdev->revision == 0x80) ||
+3 -4
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
··· 810 810 smu->adev = adev; 811 811 smu->pm_enabled = !!amdgpu_dpm; 812 812 smu->is_apu = false; 813 - smu->smu_baco.state = SMU_BACO_STATE_NONE; 813 + smu->smu_baco.state = SMU_BACO_STATE_EXIT; 814 814 smu->smu_baco.platform_support = false; 815 815 smu->smu_baco.maco_support = false; 816 816 smu->user_dpm_profile.fan_mode = -1; ··· 2120 2120 int ret = 0; 2121 2121 2122 2122 if ((!adev->in_runpm) && (!adev->in_suspend) && 2123 - (!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) == 2124 - IP_VERSION(13, 0, 10) && 2125 - !amdgpu_device_has_display_hardware(adev)) 2123 + (!amdgpu_in_reset(adev)) && !smu->is_apu && 2124 + amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(13, 0, 0)) 2126 2125 ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD); 2127 2126 2128 2127 return ret;
-1
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
··· 536 536 enum smu_baco_state { 537 537 SMU_BACO_STATE_ENTER = 0, 538 538 SMU_BACO_STATE_EXIT, 539 - SMU_BACO_STATE_NONE, 540 539 }; 541 540 542 541 struct smu_baco_context {
-2
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v15_0.h
··· 226 226 227 227 int smu_v15_0_set_gfx_power_up_by_imu(struct smu_context *smu); 228 228 229 - int smu_v15_0_set_default_dpm_tables(struct smu_context *smu); 230 - 231 229 int smu_v15_0_get_pptable_from_firmware(struct smu_context *smu, 232 230 void **table, 233 231 uint32_t *size,
+4 -19
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
··· 2615 2615 static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu) 2616 2616 { 2617 2617 struct amdgpu_device *adev = smu->adev; 2618 - u32 smu_version; 2619 - int ret; 2620 2618 2621 2619 /* SRIOV does not support SMU mode1 reset */ 2622 2620 if (amdgpu_sriov_vf(adev)) 2623 - return false; 2624 - 2625 - /* PMFW support is available since 78.41 */ 2626 - ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); 2627 - if (ret) 2628 - return false; 2629 - 2630 - if (smu_version < 0x004e2900) 2631 2621 return false; 2632 2622 2633 2623 return true; ··· 2768 2778 2769 2779 switch (mp1_state) { 2770 2780 case PP_MP1_STATE_UNLOAD: 2771 - ret = smu_cmn_send_smc_msg_with_param(smu, 2772 - SMU_MSG_PrepareMp1ForUnload, 2773 - 0x55, NULL); 2774 - 2775 - if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT) 2776 - ret = smu_v13_0_disable_pmfw_state(smu); 2777 - 2781 + ret = smu_cmn_set_mp1_state(smu, mp1_state); 2778 2782 break; 2779 2783 default: 2780 2784 /* Ignore others */ ··· 2812 2828 /* SMU 13_0_0 PMFW supports RAS fatal error reset from 78.77 */ 2813 2829 smu_v13_0_0_set_mode1_reset_param(smu, 0x004e4d00, &param); 2814 2830 2815 - ret = smu_cmn_send_smc_msg_with_param(smu, 2816 - SMU_MSG_Mode1Reset, param, NULL); 2831 + ret = smu_cmn_send_debug_smc_msg_with_param(smu, 2832 + DEBUGSMC_MSG_Mode1Reset, param); 2833 + 2817 2834 break; 2818 2835 2819 2836 case IP_VERSION(13, 0, 10):
+3
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
··· 823 823 idx++; 824 824 } 825 825 826 + xcp_metrics->accumulation_counter = metrics->AccumulationCounter; 827 + xcp_metrics->firmware_timestamp = metrics->Timestamp; 828 + 826 829 return sizeof(*xcp_metrics); 827 830 } 828 831
+2
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
··· 2668 2668 idx++; 2669 2669 } 2670 2670 } 2671 + xcp_metrics->accumulation_counter = GET_METRIC_FIELD(AccumulationCounter, version); 2672 + xcp_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, version); 2671 2673 2672 2674 return sizeof(*xcp_metrics); 2673 2675 }
+6 -2
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
··· 140 140 SMU_SCALAR(SMU_MATTR(SYSTEM_CLOCK_COUNTER), SMU_MUNIT(TIME_1), \ 141 141 SMU_MTYPE(U64), system_clock_counter); \ 142 142 SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \ 143 - SMU_MTYPE(U32), accumulation_counter); \ 143 + SMU_MTYPE(U64), accumulation_counter); \ 144 144 SMU_SCALAR(SMU_MATTR(PROCHOT_RESIDENCY_ACC), SMU_MUNIT(NONE), \ 145 145 SMU_MTYPE(U32), prochot_residency_acc); \ 146 146 SMU_SCALAR(SMU_MATTR(PPT_RESIDENCY_ACC), SMU_MUNIT(NONE), \ ··· 259 259 SMU_13_0_6_MAX_XCC); \ 260 260 SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \ 261 261 SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \ 262 - SMU_13_0_6_MAX_XCC); 262 + SMU_13_0_6_MAX_XCC); \ 263 + SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \ 264 + SMU_MTYPE(U64), accumulation_counter); \ 265 + SMU_SCALAR(SMU_MATTR(FIRMWARE_TIMESTAMP), SMU_MUNIT(TIME_2), \ 266 + SMU_MTYPE(U64), firmware_timestamp); 263 267 264 268 DECLARE_SMU_METRICS_CLASS(smu_v13_0_6_partition_metrics, 265 269 SMU_13_0_6_PARTITION_METRICS_FIELDS);
+43 -2
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
··· 74 74 75 75 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 76 76 77 + #define mmMP1_SMN_C2PMSG_75 0x028b 78 + #define mmMP1_SMN_C2PMSG_75_BASE_IDX 0 79 + 80 + #define mmMP1_SMN_C2PMSG_53 0x0275 81 + #define mmMP1_SMN_C2PMSG_53_BASE_IDX 0 82 + 83 + #define mmMP1_SMN_C2PMSG_54 0x0276 84 + #define mmMP1_SMN_C2PMSG_54_BASE_IDX 0 85 + 86 + #define DEBUGSMC_MSG_Mode1Reset 2 87 + 77 88 #define PP_OD_FEATURE_GFXCLK_FMIN 0 78 89 #define PP_OD_FEATURE_GFXCLK_FMAX 1 79 90 #define PP_OD_FEATURE_UCLK_FMIN 2 ··· 2745 2734 return ret; 2746 2735 } 2747 2736 2737 + static int smu_v13_0_7_mode1_reset(struct smu_context *smu) 2738 + { 2739 + int ret; 2740 + 2741 + ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset); 2742 + if (!ret) { 2743 + /* disable mmio access while doing mode 1 reset*/ 2744 + smu->adev->no_hw_access = true; 2745 + /* ensure no_hw_access is globally visible before any MMIO */ 2746 + smp_mb(); 2747 + msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); 2748 + } 2749 + 2750 + return ret; 2751 + } 2752 + 2753 + static void smu_v13_0_7_init_msg_ctl(struct smu_context *smu) 2754 + { 2755 + struct amdgpu_device *adev = smu->adev; 2756 + struct smu_msg_ctl *ctl = &smu->msg_ctl; 2757 + 2758 + smu_v13_0_init_msg_ctl(smu, smu_v13_0_7_message_map); 2759 + 2760 + /* Set up debug mailbox registers */ 2761 + ctl->config.debug_param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_53); 2762 + ctl->config.debug_msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_75); 2763 + ctl->config.debug_resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_54); 2764 + ctl->flags |= SMU_MSG_CTL_DEBUG_MAILBOX; 2765 + } 2766 + 2748 2767 static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { 2749 2768 .init_allowed_features = smu_v13_0_7_init_allowed_features, 2750 2769 .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table, ··· 2836 2795 .baco_enter = smu_v13_0_baco_enter, 2837 2796 .baco_exit = smu_v13_0_baco_exit, 2838 2797 .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported, 2839 - .mode1_reset = smu_v13_0_mode1_reset, 2798 + .mode1_reset = smu_v13_0_7_mode1_reset, 2840 2799 .set_mp1_state = smu_v13_0_7_set_mp1_state, 2841 2800 .set_df_cstate = smu_v13_0_7_set_df_cstate, 2842 2801 .gpo_control = smu_v13_0_gpo_control, ··· 2855 2814 smu->pwr_src_map = smu_v13_0_7_pwr_src_map; 2856 2815 smu->workload_map = smu_v13_0_7_workload_map; 2857 2816 smu->smc_driver_if_version = SMU13_0_7_DRIVER_IF_VERSION; 2858 - smu_v13_0_init_msg_ctl(smu, smu_v13_0_7_message_map); 2817 + smu_v13_0_7_init_msg_ctl(smu); 2859 2818 }
-8
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
··· 1726 1726 return ret; 1727 1727 } 1728 1728 1729 - int smu_v15_0_set_default_dpm_tables(struct smu_context *smu) 1730 - { 1731 - struct smu_table_context *smu_table = &smu->smu_table; 1732 - 1733 - return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, 1734 - smu_table->clocks_table, false); 1735 - } 1736 - 1737 1729 int smu_v15_0_od_edit_dpm_table(struct smu_context *smu, 1738 1730 enum PP_OD_DPM_TABLE_COMMAND type, 1739 1731 long input[], uint32_t size)
+137 -28
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
··· 52 52 #define mmMP1_SMN_C2PMSG_32 0x0060 53 53 #define mmMP1_SMN_C2PMSG_32_BASE_IDX 1 54 54 55 - /* MALLPowerController message arguments (Defines for the Cache mode control) */ 56 - #define SMU_MALL_PMFW_CONTROL 0 57 - #define SMU_MALL_DRIVER_CONTROL 1 55 + #define mmMP1_SMN_C2PMSG_33 0x0061 56 + #define mmMP1_SMN_C2PMSG_33_BASE_IDX 1 58 57 59 - /* 60 - * MALLPowerState message arguments 61 - * (Defines for the Allocate/Release Cache mode if in driver mode) 62 - */ 63 - #define SMU_MALL_EXIT_PG 0 64 - #define SMU_MALL_ENTER_PG 1 65 - 66 - #define SMU_MALL_PG_CONFIG_DEFAULT SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON 58 + #define mmMP1_SMN_C2PMSG_34 0x0062 59 + #define mmMP1_SMN_C2PMSG_34_BASE_IDX 1 67 60 68 61 #define SMU_15_0_UMD_PSTATE_GFXCLK 700 69 62 #define SMU_15_0_UMD_PSTATE_SOCCLK 678 ··· 233 240 return ret; 234 241 } 235 242 243 + static int smu_v15_0_0_update_table(struct smu_context *smu, 244 + enum smu_table_id table_index, 245 + int argument, 246 + void *table_data, 247 + bool drv2smu) 248 + { 249 + struct smu_table_context *smu_table = &smu->smu_table; 250 + struct amdgpu_device *adev = smu->adev; 251 + struct smu_table *table = &smu_table->driver_table; 252 + int table_id = smu_cmn_to_asic_specific_index(smu, 253 + CMN2ASIC_MAPPING_TABLE, 254 + table_index); 255 + uint64_t address; 256 + uint32_t table_size; 257 + int ret; 258 + struct smu_msg_ctl *ctl = &smu->msg_ctl; 259 + 260 + if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0) 261 + return -EINVAL; 262 + 263 + table_size = smu_table->tables[table_index].size; 264 + 265 + if (drv2smu) { 266 + memcpy(table->cpu_addr, table_data, table_size); 267 + /* 268 + * Flush hdp cache: to guard the content seen by 269 + * GPU is consitent with CPU. 270 + */ 271 + amdgpu_hdp_flush(adev, NULL); 272 + } 273 + 274 + address = table->mc_address; 275 + 276 + struct smu_msg_args args = { 277 + .msg = drv2smu ? 278 + SMU_MSG_TransferTableDram2Smu : 279 + SMU_MSG_TransferTableSmu2Dram, 280 + .num_args = 3, 281 + .num_out_args = 0, 282 + }; 283 + 284 + args.args[0] = table_id; 285 + args.args[1] = (uint32_t)lower_32_bits(address); 286 + args.args[2] = (uint32_t)upper_32_bits(address); 287 + 288 + ret = ctl->ops->send_msg(ctl, &args); 289 + 290 + if (ret) 291 + return ret; 292 + 293 + if (!drv2smu) { 294 + amdgpu_hdp_invalidate(adev, NULL); 295 + memcpy(table_data, table->cpu_addr, table_size); 296 + } 297 + 298 + return 0; 299 + } 300 + 301 + static int smu_v15_0_0_set_default_dpm_tables(struct smu_context *smu) 302 + { 303 + struct smu_table_context *smu_table = &smu->smu_table; 304 + 305 + return smu_v15_0_0_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, 306 + smu_table->clocks_table, false); 307 + } 308 + 309 + static int smu_v15_0_0_get_metrics_table(struct smu_context *smu, 310 + void *metrics_table, 311 + bool bypass_cache) 312 + { 313 + struct smu_table_context *smu_table = &smu->smu_table; 314 + uint32_t table_size = 315 + smu_table->tables[SMU_TABLE_SMU_METRICS].size; 316 + int ret; 317 + 318 + if (bypass_cache || 319 + !smu_table->metrics_time || 320 + time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) { 321 + ret = smu_v15_0_0_update_table(smu, 322 + SMU_TABLE_SMU_METRICS, 323 + 0, 324 + smu_table->metrics_table, 325 + false); 326 + if (ret) { 327 + dev_info(smu->adev->dev, "Failed to export SMU15_0_0 metrics table!\n"); 328 + return ret; 329 + } 330 + smu_table->metrics_time = jiffies; 331 + } 332 + 333 + if (metrics_table) 334 + memcpy(metrics_table, smu_table->metrics_table, table_size); 335 + 336 + return 0; 337 + } 338 + 236 339 static int smu_v15_0_0_get_smu_metrics_data(struct smu_context *smu, 237 340 MetricsMember_t member, 238 341 uint32_t *value) ··· 338 249 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 339 250 int ret = 0; 340 251 341 - ret = smu_cmn_get_metrics_table(smu, NULL, false); 252 + ret = smu_v15_0_0_get_metrics_table(smu, NULL, false); 342 253 if (ret) 343 254 return ret; 344 255 ··· 532 443 return ret; 533 444 } 534 445 446 + static int smu_v15_0_0_get_enabled_mask(struct smu_context *smu, 447 + struct smu_feature_bits *feature_mask) 448 + { 449 + int ret; 450 + struct smu_msg_ctl *ctl = &smu->msg_ctl; 451 + 452 + if (!feature_mask) 453 + return -EINVAL; 454 + 455 + struct smu_msg_args args = { 456 + .msg = SMU_MSG_GetEnabledSmuFeatures, 457 + .num_args = 0, 458 + .num_out_args = 2, 459 + }; 460 + 461 + ret = ctl->ops->send_msg(ctl, &args); 462 + 463 + if (!ret) 464 + smu_feature_bits_from_arr32(feature_mask, args.out_args, 465 + SMU_FEATURE_NUM_DEFAULT); 466 + 467 + return ret; 468 + } 469 + 535 470 static bool smu_v15_0_0_is_dpm_running(struct smu_context *smu) 536 471 { 537 472 int ret = 0; 538 473 struct smu_feature_bits feature_enabled; 539 474 540 - ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 475 + ret = smu_v15_0_0_get_enabled_mask(smu, &feature_enabled); 541 476 542 477 if (ret) 543 478 return false; ··· 637 524 SmuMetrics_t metrics; 638 525 int ret = 0; 639 526 640 - ret = smu_cmn_get_metrics_table(smu, &metrics, true); 527 + ret = smu_v15_0_0_get_metrics_table(smu, &metrics, false); 641 528 if (ret) 642 529 return ret; 643 530 ··· 1092 979 switch (clk_type) { 1093 980 case SMU_GFXCLK: 1094 981 case SMU_SCLK: 1095 - msg_set_min = SMU_MSG_SetHardMinGfxClk; 982 + msg_set_min = SMU_MSG_SetSoftMinGfxclk; 1096 983 msg_set_max = SMU_MSG_SetSoftMaxGfxClk; 1097 984 break; 1098 985 case SMU_FCLK: 1099 - msg_set_min = SMU_MSG_SetHardMinFclkByFreq; 986 + msg_set_min = SMU_MSG_SetSoftMinFclk; 1100 987 msg_set_max = SMU_MSG_SetSoftMaxFclkByFreq; 1101 988 break; 1102 989 case SMU_SOCCLK: 1103 - msg_set_min = SMU_MSG_SetHardMinSocclkByFreq; 990 + msg_set_min = SMU_MSG_SetSoftMinSocclkByFreq; 1104 991 msg_set_max = SMU_MSG_SetSoftMaxSocclkByFreq; 1105 992 break; 1106 993 case SMU_VCLK: 1107 994 case SMU_DCLK: 1108 - msg_set_min = SMU_MSG_SetHardMinVcn0; 1109 - msg_set_max = SMU_MSG_SetSoftMaxVcn0; 1110 - break; 1111 - case SMU_VCLK1: 1112 - case SMU_DCLK1: 1113 - msg_set_min = SMU_MSG_SetHardMinVcn1; 1114 - msg_set_max = SMU_MSG_SetSoftMaxVcn1; 995 + msg_set_min = SMU_MSG_SetSoftMinVcn; 996 + msg_set_max = SMU_MSG_SetSoftMaxVcn; 1115 997 break; 1116 998 default: 1117 999 return -EINVAL; ··· 1423 1315 .system_features_control = smu_v15_0_0_system_features_control, 1424 1316 .dpm_set_vcn_enable = smu_v15_0_set_vcn_enable, 1425 1317 .dpm_set_jpeg_enable = smu_v15_0_set_jpeg_enable, 1426 - .set_default_dpm_table = smu_v15_0_set_default_dpm_tables, 1318 + .set_default_dpm_table = smu_v15_0_0_set_default_dpm_tables, 1427 1319 .read_sensor = smu_v15_0_0_read_sensor, 1428 1320 .is_dpm_running = smu_v15_0_0_is_dpm_running, 1429 1321 .set_watermarks_table = smu_v15_0_0_set_watermarks_table, 1430 1322 .get_gpu_metrics = smu_v15_0_0_get_gpu_metrics, 1431 - .get_enabled_mask = smu_cmn_get_enabled_mask, 1323 + .get_enabled_mask = smu_v15_0_0_get_enabled_mask, 1432 1324 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 1433 - .set_driver_table_location = smu_v15_0_set_driver_table_location, 1434 1325 .gfx_off_control = smu_v15_0_gfx_off_control, 1435 1326 .mode2_reset = smu_v15_0_0_mode2_reset, 1436 1327 .get_dpm_ultimate_freq = smu_v15_0_common_get_dpm_ultimate_freq, ··· 1454 1347 ctl->config.msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_30); 1455 1348 ctl->config.resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_31); 1456 1349 ctl->config.arg_regs[0] = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_32); 1457 - ctl->config.num_arg_regs = 1; 1350 + ctl->config.arg_regs[1] = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_33); 1351 + ctl->config.arg_regs[2] = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_34); 1352 + ctl->config.num_arg_regs = 3; 1458 1353 ctl->ops = &smu_msg_v1_ops; 1459 1354 ctl->default_timeout = adev->usec_timeout * 20; 1460 1355 ctl->message_map = smu_v15_0_0_message_map;
+5
drivers/gpu/drm/radeon/si_dpm.c
··· 2925 2925 max_sclk = 60000; 2926 2926 max_mclk = 80000; 2927 2927 } 2928 + if ((rdev->pdev->device == 0x666f) && 2929 + (rdev->pdev->revision == 0x00)) { 2930 + max_sclk = 80000; 2931 + max_mclk = 95000; 2932 + } 2928 2933 } else if (rdev->family == CHIP_OLAND) { 2929 2934 if ((rdev->pdev->revision == 0xC7) || 2930 2935 (rdev->pdev->revision == 0x80) ||
+1
include/uapi/drm/amdgpu_drm.h
··· 1667 1667 #define AMDGPU_FAMILY_GC_10_3_6 149 /* GC 10.3.6 */ 1668 1668 #define AMDGPU_FAMILY_GC_10_3_7 151 /* GC 10.3.7 */ 1669 1669 #define AMDGPU_FAMILY_GC_11_5_0 150 /* GC 11.5.0 */ 1670 + #define AMDGPU_FAMILY_GC_11_5_4 154 /* GC 11.5.4 */ 1670 1671 #define AMDGPU_FAMILY_GC_12_0_0 152 /* GC 12.0.0 */ 1671 1672 1672 1673 #if defined(__cplusplus)