Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'drm-next-2026-02-21' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
"This is the fixes and cleanups for the end of the merge window, it's
nearly all amdgpu, with some amdkfd, then a pagemap core fix, i915/xe
display fixes, and some xe driver fixes.

Nothing seems out of the ordinary, except amdgpu is a little more
volume than usual.

pagemap:
- drm/pagemap: pass pagemap_addr by reference

amdgpu:
- DML 2.1 fixes
- Panel replay fixes
- Display writeback fixes
- MES 11 old firmware compat fix
- DC CRC improvements
- DPIA fixes
- XGMI fixes
- ASPM fix
- SMU feature bit handling fixes
- DC LUT fixes
- RAS fixes
- Misc memory leak in error path fixes
- SDMA queue reset fixes
- PG handling fixes
- 5 level GPUVM page table fix
- SR-IOV fix
- Queue reset fix
- SMU 13.x fixes
- DC resume lag fix
- MPO fixes
- DCN 3.6 fix
- VSDB fixes
- HWSS clean up
- Replay fixes
- DCE cursor fixes
- DCN 3.5 SR DDR5 latency fixes
- HPD fixes
- Error path unwind fixes
- SMU13/14 mode1 reset fixes
- PSP 15 updates
- SMU 15 updates
- Sync fix in amdgpu_dma_buf_move_notify()
- HAINAN fix
- PSP 13.x fix
- GPUVM locking fix
- Fixes for DC analog support
- DC FAMS fixes
- DML 2.1 fixes
- eDP fixes
- Misc DC fixes
- Fastboot fix
- 3DLUT fixes
- GPUVM fixes
- 64bpp format fix
- Fix for MacBooks with switchable gfx

amdkfd:
- Fix possible double deletion of validate list
- Event setup fix
- Device disconnect regression fix
- APU GTT as VRAM fix
- Fix piority inversion with MQDs
- NULL check fix

radeon:
- HAINAN fix

i915/xe display:
- Regresion fix for HDR 4k displays (#15503)
- Fixup for Dell XPS 13 7390 eDP rate limit
- Memory leak fix on ACPI _DSM handling
- Add missing slice count check during DP mode validation

xe:
- drm/xe: Prevent VFs from exposing the CCS mode sysfs file
- SRIOV related fixes
- PAT cache fix
- MMIO read fix
- W/a fixes
- Adjust type of xe_modparam.force_vram_bar_size
- Wedge mode fix
- HWMon fix

* tag 'drm-next-2026-02-21' of https://gitlab.freedesktop.org/drm/kernel: (143 commits)
drm/amd/display: Remove unneeded DAC link encoder register
drm/amd/display: Enable DAC in DCE link encoder
drm/amd/display: Set CRTC source for DAC using registers
drm/amd/display: Initialize DAC in DCE link encoder using VBIOS
drm/amd/display: Turn off DAC in DCE link encoder using VBIOS
drm/amd/display: Don't call find_analog_engine() twice
drm/amdgpu: fix 4-level paging if GMC supports 57-bit VA v2
drm/amdgpu: keep vga memory on MacBooks with switchable graphics
drm/amdgpu: Set atomics to true for xgmi
drm/amdkfd: Check for NULL return values
drm/amd/display: Use same max plane scaling limits for all 64 bpp formats
drm/amdgpu: Set vmid0 PAGE_TABLE_DEPTH for GFX12.1
drm/amdkfd: Disable MQD queue priority
drm/amd/display: Remove conditional for shaper 3DLUT power-on
drm/amd/display: Check return of shaper curve to HW format
drm/amd/display: Correct logic check error for fastboot
drm/amd/display: Skip eDP detection when no sink
Revert "drm/amd/display: Add Gfx Base Case For Linear Tiling Handling"
Revert "drm/amd/display: Correct hubp GfxVersion verification"
Revert "drm/amd/display: Add Handling for gfxversion DcGfxBase"
...

+2120 -950
+3 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
··· 1186 1186 if (!dev_info) 1187 1187 ret = amdgpu_acpi_dev_init(&dev_info, xcc_info, sbdf); 1188 1188 1189 - if (ret == -ENOMEM) 1189 + if (ret == -ENOMEM) { 1190 + kfree(xcc_info); 1190 1191 return ret; 1192 + } 1191 1193 1192 1194 if (!dev_info) { 1193 1195 kfree(xcc_info);
+3 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
··· 317 317 void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev) 318 318 { 319 319 if (amdgpu_device_should_recover_gpu(adev)) 320 - amdgpu_reset_domain_schedule(adev->reset_domain, 321 - &adev->kfd.reset_work); 320 + (void)amdgpu_reset_domain_schedule(adev->reset_domain, &adev->kfd.reset_work); 322 321 } 323 322 324 323 int amdgpu_amdkfd_alloc_kernel_mem(struct amdgpu_device *adev, size_t size, ··· 719 720 if (gfx_block != NULL) 720 721 gfx_block->version->funcs->set_powergating_state((void *)gfx_block, state); 721 722 } 722 - amdgpu_dpm_switch_power_profile(adev, 723 - PP_SMC_POWER_PROFILE_COMPUTE, 724 - !idle); 723 + (void)amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_COMPUTE, !idle); 724 + 725 725 } 726 726 727 727 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
··· 107 107 { 108 108 struct amdgpu_amdkfd_fence *fence = to_amdgpu_amdkfd_fence(f); 109 109 110 - return fence->timeline_name; 110 + return fence ? fence->timeline_name : NULL; 111 111 } 112 112 113 113 /**
+13 -6
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 878 878 struct amdgpu_bo *bo[2] = {NULL, NULL}; 879 879 struct amdgpu_bo_va *bo_va; 880 880 bool same_hive = false; 881 + struct drm_exec exec; 881 882 int i, ret; 882 883 883 884 if (!va) { ··· 959 958 goto unwind; 960 959 } 961 960 962 - /* Add BO to VM internal data structures */ 963 - ret = amdgpu_bo_reserve(bo[i], false); 964 - if (ret) { 965 - pr_debug("Unable to reserve BO during memory attach"); 966 - goto unwind; 961 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 962 + drm_exec_until_all_locked(&exec) { 963 + ret = amdgpu_vm_lock_pd(vm, &exec, 0); 964 + drm_exec_retry_on_contention(&exec); 965 + if (unlikely(ret)) 966 + goto unwind; 967 + ret = drm_exec_lock_obj(&exec, &bo[i]->tbo.base); 968 + drm_exec_retry_on_contention(&exec); 969 + if (unlikely(ret)) 970 + goto unwind; 967 971 } 972 + 968 973 bo_va = amdgpu_vm_bo_find(vm, bo[i]); 969 974 if (!bo_va) 970 975 bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]); 971 976 else 972 977 ++bo_va->ref_count; 973 978 attachment[i]->bo_va = bo_va; 974 - amdgpu_bo_unreserve(bo[i]); 979 + drm_exec_fini(&exec); 975 980 if (unlikely(!attachment[i]->bo_va)) { 976 981 ret = -ENOMEM; 977 982 pr_err("Failed to add BO object to VM. ret == %d\n",
+4 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 892 892 struct amdgpu_bo *bo = e->bo; 893 893 894 894 e->range = amdgpu_hmm_range_alloc(NULL); 895 - if (unlikely(!e->range)) 896 - return -ENOMEM; 895 + if (unlikely(!e->range)) { 896 + r = -ENOMEM; 897 + goto out_free_user_pages; 898 + } 897 899 898 900 r = amdgpu_ttm_tt_get_user_pages(bo, e->range); 899 901 if (r)
+15 -11
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 3504 3504 } 3505 3505 } 3506 3506 3507 - amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 3508 - amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 3509 - 3510 3507 amdgpu_amdkfd_suspend(adev, true); 3511 3508 amdgpu_amdkfd_teardown_processes(adev); 3512 3509 amdgpu_userq_suspend(adev); ··· 4615 4618 /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a 4616 4619 * internal path natively support atomics, set have_atomics_support to true. 4617 4620 */ 4618 - } else if ((adev->flags & AMD_IS_APU) && 4619 - (amdgpu_ip_version(adev, GC_HWIP, 0) > 4620 - IP_VERSION(9, 0, 0))) { 4621 + } else if ((adev->flags & AMD_IS_APU && 4622 + amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) || 4623 + (adev->gmc.xgmi.connected_to_cpu && 4624 + amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 1, 0))) { 4621 4625 adev->have_atomics_support = true; 4622 4626 } else { 4623 4627 adev->have_atomics_support = ··· 4653 4655 dev_info(adev->dev, "Pending hive reset.\n"); 4654 4656 amdgpu_set_init_level(adev, 4655 4657 AMDGPU_INIT_LEVEL_MINIMAL_XGMI); 4656 - } else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && 4657 - !amdgpu_device_has_display_hardware(adev)) { 4658 - r = psp_gpu_reset(adev); 4659 4658 } else { 4660 4659 tmp = amdgpu_reset_method; 4661 4660 /* It should do a default reset when loading or reloading the driver, ··· 4897 4902 amdgpu_virt_fini_data_exchange(adev); 4898 4903 } 4899 4904 4905 + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 4906 + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 4907 + 4900 4908 /* disable all interrupts */ 4901 4909 amdgpu_irq_disable_all(adev); 4902 4910 if (adev->mode_info.mode_config_initialized) { ··· 4922 4924 * before ip_fini_early to prevent kfd locking refcount issues by calling 4923 4925 * amdgpu_amdkfd_suspend() 4924 4926 */ 4925 - if (drm_dev_is_unplugged(adev_to_drm(adev))) 4927 + if (pci_dev_is_disconnected(adev->pdev)) 4926 4928 amdgpu_amdkfd_device_fini_sw(adev); 4927 4929 4928 4930 amdgpu_device_ip_fini_early(adev); ··· 4934 4936 4935 4937 amdgpu_gart_dummy_page_fini(adev); 4936 4938 4937 - if (drm_dev_is_unplugged(adev_to_drm(adev))) 4939 + if (pci_dev_is_disconnected(adev->pdev)) 4938 4940 amdgpu_device_unmap_mmio(adev); 4939 4941 4940 4942 } ··· 5730 5732 5731 5733 /* enable mmio access after mode 1 reset completed */ 5732 5734 adev->no_hw_access = false; 5735 + 5736 + /* ensure no_hw_access is updated before we access hw */ 5737 + smp_mb(); 5733 5738 5734 5739 amdgpu_device_load_pci_state(adev->pdev); 5735 5740 ret = amdgpu_psp_wait_for_bootloader(adev); ··· 7357 7356 7358 7357 amdgpu_xcp_dev_unplug(adev); 7359 7358 drm_dev_unplug(ddev); 7359 + 7360 + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 7361 + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 7360 7362 7361 7363 amdgpu_irq_disable_all(adev); 7362 7364
+4 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
··· 2164 2164 case IP_VERSION(13, 0, 11): 2165 2165 case IP_VERSION(13, 0, 12): 2166 2166 case IP_VERSION(13, 0, 14): 2167 + case IP_VERSION(13, 0, 15): 2167 2168 case IP_VERSION(14, 0, 0): 2168 2169 case IP_VERSION(14, 0, 1): 2169 2170 case IP_VERSION(14, 0, 4): ··· 2989 2988 case IP_VERSION(11, 5, 1): 2990 2989 case IP_VERSION(11, 5, 2): 2991 2990 case IP_VERSION(11, 5, 3): 2992 - case IP_VERSION(11, 5, 4): 2993 2991 adev->family = AMDGPU_FAMILY_GC_11_5_0; 2992 + break; 2993 + case IP_VERSION(11, 5, 4): 2994 + adev->family = AMDGPU_FAMILY_GC_11_5_4; 2994 2995 break; 2995 2996 case IP_VERSION(12, 0, 0): 2996 2997 case IP_VERSION(12, 0, 1):
+8 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
··· 514 514 r = dma_resv_reserve_fences(resv, 2); 515 515 if (!r) 516 516 r = amdgpu_vm_clear_freed(adev, vm, NULL); 517 + 518 + /* Don't pass 'ticket' to amdgpu_vm_handle_moved: we want the clear=true 519 + * path to be used otherwise we might update the PT of another process 520 + * while it's using the BO. 521 + * With clear=true, amdgpu_vm_bo_update will sync to command submission 522 + * from the same VM. 523 + */ 517 524 if (!r) 518 - r = amdgpu_vm_handle_moved(adev, vm, ticket); 525 + r = amdgpu_vm_handle_moved(adev, vm, NULL); 519 526 520 527 if (r && r != -EBUSY) 521 528 DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
+17 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 232 232 struct amdgpu_vm *vm = &fpriv->vm; 233 233 struct amdgpu_bo_va *bo_va; 234 234 struct mm_struct *mm; 235 + struct drm_exec exec; 235 236 int r; 236 237 237 238 mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm); ··· 243 242 !amdgpu_vm_is_bo_always_valid(vm, abo)) 244 243 return -EPERM; 245 244 246 - r = amdgpu_bo_reserve(abo, false); 247 - if (r) 248 - return r; 245 + drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); 246 + drm_exec_until_all_locked(&exec) { 247 + r = drm_exec_prepare_obj(&exec, &abo->tbo.base, 1); 248 + drm_exec_retry_on_contention(&exec); 249 + if (unlikely(r)) 250 + goto out_unlock; 251 + 252 + r = amdgpu_vm_lock_pd(vm, &exec, 0); 253 + drm_exec_retry_on_contention(&exec); 254 + if (unlikely(r)) 255 + goto out_unlock; 256 + } 249 257 250 258 amdgpu_vm_bo_update_shared(abo); 251 259 bo_va = amdgpu_vm_bo_find(vm, abo); ··· 270 260 amdgpu_bo_unreserve(abo); 271 261 return r; 272 262 } 273 - 274 - amdgpu_bo_unreserve(abo); 263 + drm_exec_fini(&exec); 275 264 276 265 /* Validate and add eviction fence to DMABuf imports with dynamic 277 266 * attachment in compute VMs. Re-validation will be done by ··· 303 294 } 304 295 } 305 296 mutex_unlock(&vm->process_info->lock); 297 + return r; 306 298 299 + out_unlock: 300 + drm_exec_fini(&exec); 307 301 return r; 308 302 } 309 303
+11 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
··· 1068 1068 case CHIP_RENOIR: 1069 1069 adev->mman.keep_stolen_vga_memory = true; 1070 1070 break; 1071 + case CHIP_POLARIS10: 1072 + case CHIP_POLARIS11: 1073 + case CHIP_POLARIS12: 1074 + /* MacBookPros with switchable graphics put VRAM at 0 when 1075 + * the iGPU is enabled which results in cursor issues if 1076 + * the cursor ends up at 0. Reserve vram at 0 in that case. 1077 + */ 1078 + if (adev->gmc.vram_start == 0) 1079 + adev->mman.keep_stolen_vga_memory = true; 1080 + break; 1071 1081 default: 1072 1082 adev->mman.keep_stolen_vga_memory = false; 1073 1083 break; ··· 1446 1436 if (!*exp_ranges) 1447 1437 *exp_ranges = range_cnt; 1448 1438 err: 1449 - kfree(ranges); 1439 + kvfree(ranges); 1450 1440 1451 1441 return ret; 1452 1442 }
+4 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
··· 33 33 #include "amdgpu_ras.h" 34 34 35 35 /* VA hole for 48bit and 57bit addresses */ 36 - #define AMDGPU_GMC_HOLE_START (adev->vm_manager.root_level == AMDGPU_VM_PDB3 ?\ 36 + #define AMDGPU_GMC_HOLE_START (adev->vm_manager.max_level == 4 ?\ 37 37 0x0100000000000000ULL : 0x0000800000000000ULL) 38 - #define AMDGPU_GMC_HOLE_END (adev->vm_manager.root_level == AMDGPU_VM_PDB3 ?\ 38 + #define AMDGPU_GMC_HOLE_END (adev->vm_manager.max_level == 4 ?\ 39 39 0xff00000000000000ULL : 0xffff800000000000ULL) 40 40 41 41 /* ··· 45 45 * This mask is used to remove the upper 16bits of the VA and so come up with 46 46 * the linear addr value. 47 47 */ 48 - #define AMDGPU_GMC_HOLE_MASK (adev->vm_manager.root_level == AMDGPU_VM_PDB3 ?\ 49 - 0x00ffffffffffffffULL : 0x0000ffffffffffffULL) 48 + #define AMDGPU_GMC_HOLE_MASK (adev->vm_manager.max_level == 4 ?\ 49 + 0x01ffffffffffffffULL : 0x0000ffffffffffffULL) 50 50 51 51 /* 52 52 * Ring size as power of two for the log of recent faults.
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
··· 229 229 r = amdgpu_vm_flush(ring, job, need_pipe_sync); 230 230 if (r) { 231 231 amdgpu_ring_undo(ring); 232 - return r; 232 + goto free_fence; 233 233 } 234 234 } 235 235
+8 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
··· 92 92 struct drm_wedge_task_info *info = NULL; 93 93 struct amdgpu_task_info *ti = NULL; 94 94 struct amdgpu_device *adev = ring->adev; 95 + enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_RESET; 95 96 int idx, r; 96 97 97 98 if (!drm_dev_enter(adev_to_drm(adev), &idx)) { ··· 136 135 ring->funcs->reset) { 137 136 dev_err(adev->dev, "Starting %s ring reset\n", 138 137 s_job->sched->name); 138 + /* Stop the scheduler to prevent anybody else from touching the ring buffer. */ 139 + drm_sched_wqueue_stop(&ring->sched); 139 140 r = amdgpu_ring_reset(ring, job->vmid, job->hw_fence); 140 141 if (!r) { 142 + /* Start the scheduler again */ 143 + drm_sched_wqueue_start(&ring->sched); 141 144 atomic_inc(&ring->adev->gpu_reset_counter); 142 145 dev_err(adev->dev, "Ring %s reset succeeded\n", 143 146 ring->sched.name); 144 147 drm_dev_wedged_event(adev_to_drm(adev), 145 148 DRM_WEDGE_RECOVERY_NONE, info); 149 + /* This is needed to add the job back to the pending list */ 150 + status = DRM_GPU_SCHED_STAT_NO_HANG; 146 151 goto exit; 147 152 } 148 153 dev_err(adev->dev, "Ring %s reset failed\n", ring->sched.name); ··· 184 177 exit: 185 178 amdgpu_vm_put_task_info(ti); 186 179 drm_dev_exit(idx); 187 - return DRM_GPU_SCHED_STAT_RESET; 180 + return status; 188 181 } 189 182 190 183 int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+10
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 1445 1445 { 1446 1446 struct amdgpu_device *adev = drm_to_adev(dev); 1447 1447 struct amdgpu_fpriv *fpriv; 1448 + struct drm_exec exec; 1448 1449 int r, pasid; 1449 1450 1450 1451 /* Ensure IB tests are run on ring */ ··· 1485 1484 if (r) 1486 1485 goto error_pasid; 1487 1486 1487 + drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); 1488 + drm_exec_until_all_locked(&exec) { 1489 + r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 0); 1490 + drm_exec_retry_on_contention(&exec); 1491 + if (unlikely(r)) 1492 + goto error_vm; 1493 + } 1494 + 1488 1495 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL); 1496 + drm_exec_fini(&exec); 1489 1497 if (!fpriv->prt_va) { 1490 1498 r = -ENOMEM; 1491 1499 goto error_vm;
+5 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
··· 148 148 break; 149 149 case IP_VERSION(13, 0, 6): 150 150 case IP_VERSION(13, 0, 14): 151 + case IP_VERSION(13, 0, 15): 151 152 ret = psp_init_cap_microcode(psp, ucode_prefix); 152 153 ret &= psp_init_ta_microcode(psp, ucode_prefix); 153 154 break; ··· 220 219 psp->autoload_supported = false; 221 220 break; 222 221 case IP_VERSION(13, 0, 12): 222 + case IP_VERSION(13, 0, 15): 223 223 psp_v13_0_set_psp_funcs(psp); 224 224 psp->autoload_supported = false; 225 225 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); ··· 385 383 386 384 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 387 385 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || 388 - amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) 386 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14) || 387 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 15)) 389 388 return false; 390 389 391 390 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; ··· 738 735 ras_intr = amdgpu_ras_intr_triggered(); 739 736 if (ras_intr) 740 737 break; 741 - usleep_range(10, 100); 738 + usleep_range(60, 100); 742 739 amdgpu_device_invalidate_hdp(psp->adev, NULL); 743 740 } 744 741
+3 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
··· 4352 4352 * to handle fatal error */ 4353 4353 r = amdgpu_nbio_ras_sw_init(adev); 4354 4354 if (r) 4355 - return r; 4355 + goto release_con; 4356 4356 4357 4357 if (adev->nbio.ras && 4358 4358 adev->nbio.ras->init_ras_controller_interrupt) { ··· 4649 4649 } else 4650 4650 amdgpu_ras_block_late_init_default(adev, &obj->ras_comm); 4651 4651 } 4652 + 4653 + amdgpu_ras_check_bad_page_status(adev); 4652 4654 4653 4655 return 0; 4654 4656 }
+26 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
··· 1701 1701 } 1702 1702 1703 1703 res = __verify_ras_table_checksum(control); 1704 - if (res) 1704 + if (res) { 1705 1705 dev_err(adev->dev, 1706 1706 "RAS table incorrect checksum or error:%d\n", 1707 1707 res); 1708 + return -EINVAL; 1709 + } 1708 1710 1709 1711 /* Warn if we are at 90% of the threshold or above 1710 1712 */ ··· 1714 1712 dev_warn(adev->dev, "RAS records:%u exceeds 90%% of threshold:%d", 1715 1713 control->ras_num_bad_pages, 1716 1714 ras->bad_page_cnt_threshold); 1717 - if (amdgpu_bad_page_threshold != 0 && 1718 - control->ras_num_bad_pages >= ras->bad_page_cnt_threshold) 1719 - amdgpu_dpm_send_rma_reason(adev); 1720 - 1721 1715 } else if (hdr->header == RAS_TABLE_HDR_BAD && 1722 1716 amdgpu_bad_page_threshold != 0) { 1723 1717 if (hdr->version >= RAS_TABLE_VER_V2_1) { ··· 1929 1931 return smu_ras_drv->smu_eeprom_funcs->erase_ras_table(adev, 1930 1932 result); 1931 1933 return -EOPNOTSUPP; 1934 + } 1935 + 1936 + void amdgpu_ras_check_bad_page_status(struct amdgpu_device *adev) 1937 + { 1938 + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 1939 + struct amdgpu_ras_eeprom_control *control = ras ? &ras->eeprom_control : NULL; 1940 + 1941 + if (!control || amdgpu_bad_page_threshold == 0) 1942 + return; 1943 + 1944 + if (control->ras_num_bad_pages >= ras->bad_page_cnt_threshold) { 1945 + if (amdgpu_dpm_send_rma_reason(adev)) 1946 + dev_warn(adev->dev, "Unable to send out-of-band RMA CPER"); 1947 + else 1948 + dev_dbg(adev->dev, "Sent out-of-band RMA CPER"); 1949 + 1950 + if (adev->cper.enabled && !amdgpu_uniras_enabled(adev)) { 1951 + if (amdgpu_cper_generate_bp_threshold_record(adev)) 1952 + dev_warn(adev->dev, "Unable to send in-band RMA CPER"); 1953 + else 1954 + dev_dbg(adev->dev, "Sent in-band RMA CPER"); 1955 + } 1956 + } 1932 1957 }
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
··· 193 193 194 194 int amdgpu_ras_eeprom_update_record_num(struct amdgpu_ras_eeprom_control *control); 195 195 196 + void amdgpu_ras_check_bad_page_status(struct amdgpu_device *adev); 197 + 196 198 extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops; 197 199 extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops; 198 200
-4
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
··· 868 868 void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring, 869 869 struct amdgpu_fence *guilty_fence) 870 870 { 871 - /* Stop the scheduler to prevent anybody else from touching the ring buffer. */ 872 - drm_sched_wqueue_stop(&ring->sched); 873 871 /* back up the non-guilty commands */ 874 872 amdgpu_ring_backup_unprocessed_commands(ring, guilty_fence); 875 873 } ··· 893 895 amdgpu_ring_write(ring, ring->ring_backup[i]); 894 896 amdgpu_ring_commit(ring); 895 897 } 896 - /* Start the scheduler again */ 897 - drm_sched_wqueue_start(&ring->sched); 898 898 return 0; 899 899 } 900 900
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
··· 558 558 struct amdgpu_ring *gfx_ring = &sdma_instance->ring; 559 559 struct amdgpu_ring *page_ring = &sdma_instance->page; 560 560 561 + if (amdgpu_sriov_vf(adev)) 562 + return -EOPNOTSUPP; 563 + 561 564 mutex_lock(&sdma_instance->engine_reset_mutex); 562 565 563 566 if (!caller_handles_kernel_queues) {
+3 -17
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 1735 1735 { 1736 1736 struct amdgpu_bo_va *bo_va; 1737 1737 1738 + amdgpu_vm_assert_locked(vm); 1739 + 1738 1740 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL); 1739 1741 if (bo_va == NULL) { 1740 1742 return NULL; ··· 2362 2360 unsigned max_bits) 2363 2361 { 2364 2362 unsigned int max_size = 1 << (max_bits - 30); 2365 - bool sys_5level_pgtable = false; 2366 2363 unsigned int vm_size; 2367 2364 uint64_t tmp; 2368 - 2369 - #ifdef CONFIG_X86_64 2370 - /* 2371 - * Refer to function configure_5level_paging() for details. 2372 - */ 2373 - sys_5level_pgtable = (native_read_cr4() & X86_CR4_LA57); 2374 - #endif 2375 - 2376 - /* 2377 - * If GPU supports 5-level page table, but system uses 4-level page table, 2378 - * then use 4-level page table on GPU 2379 - */ 2380 - if (max_level == 4 && !sys_5level_pgtable) { 2381 - min_vm_size = 256 * 1024; 2382 - max_level = 3; 2383 - } 2384 2365 2385 2366 /* adjust vm size first */ 2386 2367 if (amdgpu_vm_size != -1) { ··· 2400 2415 } 2401 2416 2402 2417 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; 2418 + adev->vm_manager.max_level = max_level; 2403 2419 2404 2420 tmp = roundup_pow_of_two(adev->vm_manager.max_pfn); 2405 2421 if (amdgpu_vm_block_size != -1)
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
··· 456 456 bool concurrent_flush; 457 457 458 458 uint64_t max_pfn; 459 + uint32_t max_level; 459 460 uint32_t num_level; 460 461 uint32_t block_size; 461 462 uint32_t fragment_size;
+3 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
··· 42 42 43 43 #define XGMI_STATE_DISABLE 0xD1 44 44 #define XGMI_STATE_LS0 0x81 45 - #define XGMI_LINK_ACTIVE 1 46 - #define XGMI_LINK_INACTIVE 0 47 45 48 46 static DEFINE_MUTEX(xgmi_mutex); 49 47 ··· 363 365 return -ENOLINK; 364 366 365 367 if ((xgmi_state_reg_val & 0xFF) == XGMI_STATE_LS0) 366 - return XGMI_LINK_ACTIVE; 368 + return AMDGPU_XGMI_LINK_ACTIVE; 367 369 368 - return XGMI_LINK_INACTIVE; 370 + return AMDGPU_XGMI_LINK_INACTIVE; 369 371 } 370 372 371 373 /** ··· 1174 1176 1175 1177 switch (type) { 1176 1178 case ACA_SMU_TYPE_UE: 1177 - if (ext_error_code != 0 && ext_error_code != 9) 1179 + if (ext_error_code != 0 && ext_error_code != 1 && ext_error_code != 9) 1178 1180 count = 0ULL; 1179 1181 1180 1182 bank->aca_err_type = ACA_ERROR_TYPE_UE;
+4 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v4_2_0.c
··· 395 395 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, 396 396 ENABLE_CONTEXT, 1); 397 397 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, 398 - PAGE_TABLE_DEPTH, 0); 398 + PAGE_TABLE_DEPTH, adev->gmc.vmid0_page_table_depth); 399 + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, 400 + PAGE_TABLE_BLOCK_SIZE, 401 + adev->gmc.vmid0_page_table_block_size); 399 402 tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, 400 403 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); 401 404 WREG32_SOC15(MMHUB, GET_INST(MMHUB, i),
+11 -4
drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
··· 57 57 MODULE_FIRMWARE("amdgpu/psp_13_0_12_ta.bin"); 58 58 MODULE_FIRMWARE("amdgpu/psp_13_0_14_sos.bin"); 59 59 MODULE_FIRMWARE("amdgpu/psp_13_0_14_ta.bin"); 60 + MODULE_FIRMWARE("amdgpu/psp_13_0_15_sos.bin"); 61 + MODULE_FIRMWARE("amdgpu/psp_13_0_15_ta.bin"); 60 62 MODULE_FIRMWARE("amdgpu/psp_14_0_0_toc.bin"); 61 63 MODULE_FIRMWARE("amdgpu/psp_14_0_0_ta.bin"); 62 64 MODULE_FIRMWARE("amdgpu/psp_14_0_1_toc.bin"); ··· 123 121 case IP_VERSION(13, 0, 10): 124 122 case IP_VERSION(13, 0, 12): 125 123 case IP_VERSION(13, 0, 14): 124 + case IP_VERSION(13, 0, 15): 126 125 err = psp_init_sos_microcode(psp, ucode_prefix); 127 126 if (err) 128 127 return err; ··· 159 156 160 157 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 161 158 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || 162 - amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) { 159 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14) || 160 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 15)) { 163 161 at = 0; 164 162 for_each_inst(i, adev->aid_mask) { 165 163 bl_status_reg = ··· 206 202 retry_cnt = 207 203 ((amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 208 204 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || 209 - amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))) ? 205 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14) || 206 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 15))) ? 210 207 PSP_VMBX_POLLING_LIMIT : 211 208 10; 212 209 /* Wait for bootloader to signify that it is ready having bit 31 of ··· 237 232 238 233 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 239 234 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || 240 - amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) { 235 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14) || 236 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 15)) { 241 237 ret = psp_v13_0_wait_for_vmbx_ready(psp); 242 238 if (ret) 243 239 amdgpu_ras_query_boot_status(adev, 4); ··· 878 872 879 873 if ((amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) || 880 874 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) || 881 - amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) && 875 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14) || 876 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 15)) && 882 877 (!(adev->flags & AMD_IS_APU))) { 883 878 reg_data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_127); 884 879 adev->ras_hw_enabled = (reg_data & GENMASK_ULL(23, 0));
+4
drivers/gpu/drm/amd/amdgpu/psp_v15_0.c
··· 45 45 if (err) 46 46 return err; 47 47 48 + err = psp_init_ta_microcode(psp, ucode_prefix); 49 + if (err) 50 + return err; 51 + 48 52 return 0; 49 53 } 50 54
+21
drivers/gpu/drm/amd/amdgpu/psp_v15_0_8.c
··· 187 187 WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_67, value); 188 188 } 189 189 190 + static bool psp_v15_0_8_get_ras_capability(struct psp_context *psp) 191 + { 192 + struct amdgpu_device *adev = psp->adev; 193 + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 194 + u32 reg_data; 195 + 196 + /* query ras cap should be done from host side */ 197 + if (amdgpu_sriov_vf(adev)) 198 + return false; 199 + 200 + if (!con) 201 + return false; 202 + 203 + reg_data = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_127); 204 + adev->ras_hw_enabled = (reg_data & GENMASK_ULL(23, 0)); 205 + con->poison_supported = ((reg_data & GENMASK_ULL(24, 24)) >> 24) ? true : false; 206 + 207 + return true; 208 + } 209 + 190 210 static int psp_v15_0_8_get_fw_type(struct amdgpu_firmware_info *ucode, 191 211 enum psp_gfx_fw_type *type) 192 212 { ··· 354 334 .ring_get_wptr = psp_v15_0_8_ring_get_wptr, 355 335 .ring_set_wptr = psp_v15_0_8_ring_set_wptr, 356 336 .get_fw_type = psp_v15_0_8_get_fw_type, 337 + .get_ras_capability = psp_v15_0_8_get_ras_capability, 357 338 }; 358 339 359 340 void psp_v15_0_8_set_psp_funcs(struct psp_context *psp)
+3 -12
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
··· 1424 1424 1425 1425 adev->sdma.supported_reset = 1426 1426 amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); 1427 - switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 1428 - case IP_VERSION(5, 0, 0): 1429 - case IP_VERSION(5, 0, 2): 1430 - case IP_VERSION(5, 0, 5): 1431 - if ((adev->sdma.instance[0].fw_version >= 35) && 1432 - !amdgpu_sriov_vf(adev) && 1433 - !adev->debug_disable_gpu_ring_reset) 1434 - adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 1435 - break; 1436 - default: 1437 - break; 1438 - } 1427 + if (!amdgpu_sriov_vf(adev) && 1428 + !adev->debug_disable_gpu_ring_reset) 1429 + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 1439 1430 1440 1431 /* Allocate memory for SDMA IP Dump buffer */ 1441 1432 ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
+3 -19
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
··· 1342 1342 1343 1343 adev->sdma.supported_reset = 1344 1344 amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); 1345 - switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 1346 - case IP_VERSION(5, 2, 0): 1347 - case IP_VERSION(5, 2, 2): 1348 - case IP_VERSION(5, 2, 3): 1349 - case IP_VERSION(5, 2, 4): 1350 - if ((adev->sdma.instance[0].fw_version >= 76) && 1351 - !amdgpu_sriov_vf(adev) && 1352 - !adev->debug_disable_gpu_ring_reset) 1353 - adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 1354 - break; 1355 - case IP_VERSION(5, 2, 5): 1356 - if ((adev->sdma.instance[0].fw_version >= 34) && 1357 - !amdgpu_sriov_vf(adev) && 1358 - !adev->debug_disable_gpu_ring_reset) 1359 - adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 1360 - break; 1361 - default: 1362 - break; 1363 - } 1345 + if (!amdgpu_sriov_vf(adev) && 1346 + !adev->debug_disable_gpu_ring_reset) 1347 + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 1364 1348 1365 1349 /* Allocate memory for SDMA IP Dump buffer */ 1366 1350 ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
+3 -12
drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
··· 1364 1364 1365 1365 adev->sdma.supported_reset = 1366 1366 amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); 1367 - switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 1368 - case IP_VERSION(6, 0, 0): 1369 - case IP_VERSION(6, 0, 2): 1370 - case IP_VERSION(6, 0, 3): 1371 - if ((adev->sdma.instance[0].fw_version >= 21) && 1372 - !amdgpu_sriov_vf(adev) && 1373 - !adev->debug_disable_gpu_ring_reset) 1374 - adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 1375 - break; 1376 - default: 1377 - break; 1378 - } 1367 + if (!amdgpu_sriov_vf(adev) && 1368 + !adev->debug_disable_gpu_ring_reset) 1369 + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; 1379 1370 1380 1371 if (amdgpu_sdma_ras_sw_init(adev)) { 1381 1372 dev_err(adev->dev, "Failed to initialize sdma ras block!\n");
+2 -1
drivers/gpu/drm/amd/amdgpu/soc15.c
··· 1478 1478 if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2)) && 1479 1479 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) && 1480 1480 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 12)) && 1481 - (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 14))) { 1481 + (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 14)) && 1482 + (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 15))) { 1482 1483 /* AMD_CG_SUPPORT_DRM_MGCG */ 1483 1484 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); 1484 1485 if (!(data & 0x01000000))
+22 -2
drivers/gpu/drm/amd/amdgpu/soc21.c
··· 422 422 case IP_VERSION(14, 0, 1): 423 423 case IP_VERSION(14, 0, 4): 424 424 case IP_VERSION(14, 0, 5): 425 + case IP_VERSION(15, 0, 0): 425 426 return AMD_RESET_METHOD_MODE2; 426 427 default: 427 428 if (amdgpu_dpm_is_baco_supported(adev)) ··· 839 838 break; 840 839 case IP_VERSION(11, 5, 4): 841 840 adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG | 842 - AMD_CG_SUPPORT_JPEG_MGCG; 841 + AMD_CG_SUPPORT_JPEG_MGCG | 842 + AMD_CG_SUPPORT_GFX_CGCG | 843 + AMD_CG_SUPPORT_GFX_CGLS | 844 + AMD_CG_SUPPORT_GFX_MGCG | 845 + AMD_CG_SUPPORT_GFX_FGCG | 846 + AMD_CG_SUPPORT_REPEATER_FGCG | 847 + AMD_CG_SUPPORT_GFX_PERF_CLK | 848 + AMD_CG_SUPPORT_GFX_3D_CGCG | 849 + AMD_CG_SUPPORT_GFX_3D_CGLS | 850 + AMD_CG_SUPPORT_MC_MGCG | 851 + AMD_CG_SUPPORT_MC_LS | 852 + AMD_CG_SUPPORT_HDP_LS | 853 + AMD_CG_SUPPORT_HDP_DS | 854 + AMD_CG_SUPPORT_HDP_SD | 855 + AMD_CG_SUPPORT_ATHUB_MGCG | 856 + AMD_CG_SUPPORT_ATHUB_LS | 857 + AMD_CG_SUPPORT_IH_CG | 858 + AMD_CG_SUPPORT_BIF_MGCG | 859 + AMD_CG_SUPPORT_BIF_LS; 843 860 adev->pg_flags = AMD_PG_SUPPORT_VCN | 844 - AMD_PG_SUPPORT_JPEG; 861 + AMD_PG_SUPPORT_JPEG | 862 + AMD_PG_SUPPORT_GFX_PG; 845 863 adev->external_rev_id = adev->rev_id + 0x1; 846 864 break; 847 865 default:
+3 -1
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
··· 521 521 RREG32_SOC15(VCN, i, mmUVD_STATUS))) 522 522 vinst->set_pg_state(vinst, AMD_PG_STATE_GATE); 523 523 524 - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) 524 + /* VF doesn't enable interrupt operations for RAS */ 525 + if (!amdgpu_sriov_vf(adev) && 526 + amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) 525 527 amdgpu_irq_put(adev, &vinst->ras_poison_irq, 0); 526 528 } 527 529
+6 -2
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
··· 2804 2804 * SET_SHADER_DEBUGGER clears any stale process context data 2805 2805 * saved in MES. 2806 2806 */ 2807 - if (pdd->dev->kfd->shared_resources.enable_mes) 2808 - kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev)); 2807 + if (pdd->dev->kfd->shared_resources.enable_mes) { 2808 + ret = kfd_dbg_set_mes_debug_mode( 2809 + pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev)); 2810 + if (ret) 2811 + return ret; 2812 + } 2809 2813 } 2810 2814 2811 2815 p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED;
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
··· 2359 2359 if (kdev->kfd->hive_id) { 2360 2360 for (nid = 0; nid < proximity_domain; ++nid) { 2361 2361 peer_dev = kfd_topology_device_by_proximity_domain_no_lock(nid); 2362 - if (!peer_dev->gpu) 2362 + if (!peer_dev || !peer_dev->gpu) 2363 2363 continue; 2364 2364 if (peer_dev->gpu->kfd->hive_id != kdev->kfd->hive_id) 2365 2365 continue;
+29 -15
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
··· 404 404 return -ENOMEM; 405 405 } 406 406 407 - static void kfd_dbg_clear_dev_watch_id(struct kfd_process_device *pdd, int watch_id) 407 + static void kfd_dbg_clear_dev_watch_id(struct kfd_process_device *pdd, u32 watch_id) 408 408 { 409 409 spin_lock(&pdd->dev->watch_points_lock); 410 410 411 411 /* process owns device watch point so safe to clear */ 412 - if ((pdd->alloc_watch_ids >> watch_id) & 0x1) { 413 - pdd->alloc_watch_ids &= ~(0x1 << watch_id); 414 - pdd->dev->alloc_watch_ids &= ~(0x1 << watch_id); 412 + if (pdd->alloc_watch_ids & BIT(watch_id)) { 413 + pdd->alloc_watch_ids &= ~BIT(watch_id); 414 + pdd->dev->alloc_watch_ids &= ~BIT(watch_id); 415 415 } 416 416 417 417 spin_unlock(&pdd->dev->watch_points_lock); 418 418 } 419 419 420 - static bool kfd_dbg_owns_dev_watch_id(struct kfd_process_device *pdd, int watch_id) 420 + static bool kfd_dbg_owns_dev_watch_id(struct kfd_process_device *pdd, u32 watch_id) 421 421 { 422 422 bool owns_watch_id = false; 423 423 424 424 spin_lock(&pdd->dev->watch_points_lock); 425 - owns_watch_id = watch_id < MAX_WATCH_ADDRESSES && 426 - ((pdd->alloc_watch_ids >> watch_id) & 0x1); 427 - 425 + owns_watch_id = pdd->alloc_watch_ids & BIT(watch_id); 428 426 spin_unlock(&pdd->dev->watch_points_lock); 429 427 430 428 return owns_watch_id; ··· 432 434 uint32_t watch_id) 433 435 { 434 436 int r; 437 + 438 + if (watch_id >= MAX_WATCH_ADDRESSES) 439 + return -EINVAL; 435 440 436 441 if (!kfd_dbg_owns_dev_watch_id(pdd, watch_id)) 437 442 return -EINVAL; ··· 472 471 473 472 if (r) 474 473 return r; 474 + 475 + if (*watch_id >= MAX_WATCH_ADDRESSES) 476 + return -EINVAL; 475 477 476 478 if (!pdd->dev->kfd->shared_resources.enable_mes) { 477 479 r = debug_lock_and_unmap(pdd->dev->dqm); ··· 523 519 int i, r = 0, rewind_count = 0; 524 520 525 521 for (i = 0; i < target->n_pdds; i++) { 522 + uint32_t caps; 523 + uint32_t caps2; 526 524 struct kfd_topology_device *topo_dev = 527 - kfd_topology_device_by_id(target->pdds[i]->dev->id); 528 - uint32_t caps = topo_dev->node_props.capability; 529 - uint32_t caps2 = topo_dev->node_props.capability2; 525 + kfd_topology_device_by_id(target->pdds[i]->dev->id); 526 + if (!topo_dev) 527 + return -EINVAL; 528 + 529 + caps = topo_dev->node_props.capability; 530 + caps2 = topo_dev->node_props.capability2; 530 531 531 532 if (!(caps & HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED) && 532 533 (*flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP)) { ··· 584 575 continue; 585 576 586 577 if (!pdd->dev->kfd->shared_resources.enable_mes) 587 - debug_refresh_runlist(pdd->dev->dqm); 578 + (void)debug_refresh_runlist(pdd->dev->dqm); 588 579 else 589 - kfd_dbg_set_mes_debug_mode(pdd, true); 580 + (void)kfd_dbg_set_mes_debug_mode(pdd, true); 590 581 } 591 582 } 592 583 ··· 646 637 pr_err("Failed to release debug vmid on [%i]\n", pdd->dev->id); 647 638 648 639 if (!pdd->dev->kfd->shared_resources.enable_mes) 649 - debug_refresh_runlist(pdd->dev->dqm); 640 + (void)debug_refresh_runlist(pdd->dev->dqm); 650 641 else 651 - kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev)); 642 + (void)kfd_dbg_set_mes_debug_mode(pdd, 643 + !kfd_dbg_has_cwsr_workaround(pdd->dev)); 652 644 } 653 645 654 646 kfd_dbg_set_workaround(target, false); ··· 1091 1081 for (i = 0; i < tmp_num_devices; i++) { 1092 1082 struct kfd_process_device *pdd = target->pdds[i]; 1093 1083 struct kfd_topology_device *topo_dev = kfd_topology_device_by_id(pdd->dev->id); 1084 + if (!topo_dev) { 1085 + r = -EINVAL; 1086 + break; 1087 + } 1094 1088 1095 1089 device_info.gpu_id = pdd->dev->id; 1096 1090 device_info.exception_status = pdd->exception_status;
+6
drivers/gpu/drm/amd/amdkfd/kfd_events.c
··· 331 331 if (p->signal_page) 332 332 return -EBUSY; 333 333 334 + if (size < KFD_SIGNAL_EVENT_LIMIT * 8) { 335 + pr_err("Event page size %llu is too small, need at least %lu bytes\n", 336 + size, (unsigned long)(KFD_SIGNAL_EVENT_LIMIT * 8)); 337 + return -EINVAL; 338 + } 339 + 334 340 page = kzalloc(sizeof(*page), GFP_KERNEL); 335 341 if (!page) 336 342 return -ENOMEM;
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
··· 70 70 static void set_priority(struct cik_mqd *m, struct queue_properties *q) 71 71 { 72 72 m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; 73 - m->cp_hqd_queue_priority = q->priority; 73 + /* m->cp_hqd_queue_priority = q->priority; */ 74 74 } 75 75 76 76 static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm,
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
··· 70 70 static void set_priority(struct v10_compute_mqd *m, struct queue_properties *q) 71 71 { 72 72 m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; 73 - m->cp_hqd_queue_priority = q->priority; 73 + /* m->cp_hqd_queue_priority = q->priority; */ 74 74 } 75 75 76 76 static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm,
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
··· 96 96 static void set_priority(struct v11_compute_mqd *m, struct queue_properties *q) 97 97 { 98 98 m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; 99 - m->cp_hqd_queue_priority = q->priority; 99 + /* m->cp_hqd_queue_priority = q->priority; */ 100 100 } 101 101 102 102 static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm,
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
··· 77 77 static void set_priority(struct v12_compute_mqd *m, struct queue_properties *q) 78 78 { 79 79 m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; 80 - m->cp_hqd_queue_priority = q->priority; 80 + /* m->cp_hqd_queue_priority = q->priority; */ 81 81 } 82 82 83 83 static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm,
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
··· 131 131 static void set_priority(struct v12_1_compute_mqd *m, struct queue_properties *q) 132 132 { 133 133 m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; 134 - m->cp_hqd_queue_priority = q->priority; 134 + /* m->cp_hqd_queue_priority = q->priority; */ 135 135 } 136 136 137 137 static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm,
+4 -1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
··· 106 106 static void set_priority(struct v9_mqd *m, struct queue_properties *q) 107 107 { 108 108 m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; 109 - m->cp_hqd_queue_priority = q->priority; 109 + /* m->cp_hqd_queue_priority = q->priority; */ 110 110 } 111 111 112 112 static bool mqd_on_vram(struct amdgpu_device *adev) 113 113 { 114 + if (adev->apu_prefer_gtt) 115 + return false; 116 + 114 117 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 115 118 case IP_VERSION(9, 4, 3): 116 119 case IP_VERSION(9, 5, 0):
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
··· 73 73 static void set_priority(struct vi_mqd *m, struct queue_properties *q) 74 74 { 75 75 m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; 76 - m->cp_hqd_queue_priority = q->priority; 76 + /* m->cp_hqd_queue_priority = q->priority; */ 77 77 } 78 78 79 79 static struct kfd_mem_obj *allocate_mqd(struct mqd_manager *mm,
-3
drivers/gpu/drm/amd/amdkfd/kfd_process.c
··· 1767 1767 struct kfd_node *dev; 1768 1768 int ret; 1769 1769 1770 - if (!drm_file) 1771 - return -EINVAL; 1772 - 1773 1770 if (pdd->drm_priv) 1774 1771 return -EBUSY; 1775 1772
+51 -20
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 1244 1244 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 1245 1245 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; 1246 1246 const struct firmware *dmub_fw = adev->dm.dmub_fw; 1247 + struct dc *dc = adev->dm.dc; 1247 1248 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; 1248 1249 struct abm *abm = adev->dm.dc->res_pool->abm; 1249 1250 struct dc_context *ctx = adev->dm.dc->ctx; ··· 1350 1349 for (i = 0; i < fb_info->num_fb; ++i) 1351 1350 hw_params.fb[i] = &fb_info->fb[i]; 1352 1351 1353 - switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1354 - case IP_VERSION(3, 1, 3): 1355 - case IP_VERSION(3, 1, 4): 1356 - case IP_VERSION(3, 5, 0): 1357 - case IP_VERSION(3, 5, 1): 1358 - case IP_VERSION(3, 6, 0): 1359 - case IP_VERSION(4, 0, 1): 1352 + /* Enable usb4 dpia in the FW APU */ 1353 + if (dc->caps.is_apu && 1354 + dc->res_pool->usb4_dpia_count != 0 && 1355 + !dc->debug.dpia_debug.bits.disable_dpia) { 1360 1356 hw_params.dpia_supported = true; 1361 - hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; 1362 - break; 1363 - default: 1364 - break; 1357 + hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia; 1358 + hw_params.dpia_hpd_int_enable_supported = false; 1359 + hw_params.enable_non_transparent_setconfig = dc->config.consolidated_dpia_dp_lt; 1360 + hw_params.disable_dpia_bw_allocation = !dc->config.usb4_bw_alloc_support; 1365 1361 } 1366 1362 1367 1363 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { ··· 3479 3481 struct dc_commit_streams_params commit_params = {}; 3480 3482 3481 3483 if (dm->dc->caps.ips_support) { 3484 + if (!amdgpu_in_reset(adev)) 3485 + mutex_lock(&dm->dc_lock); 3486 + 3487 + /* Need to set POWER_STATE_D0 first or it will not execute 3488 + * idle_power_optimizations command to DMUB. 3489 + */ 3490 + dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); 3482 3491 dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false); 3492 + 3493 + if (!amdgpu_in_reset(adev)) 3494 + mutex_unlock(&dm->dc_lock); 3483 3495 } 3484 3496 3485 3497 if (amdgpu_in_reset(adev)) { ··· 3612 3604 * MST connectors, should be skipped 3613 3605 */ 3614 3606 if (aconnector->mst_root) 3607 + continue; 3608 + 3609 + /* Skip eDP detection, when there is no sink present */ 3610 + if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_EDP && 3611 + !aconnector->dc_link->edp_sink_present) 3615 3612 continue; 3616 3613 3617 3614 guard(mutex)(&aconnector->hpd_lock); ··· 10675 10662 10676 10663 wb_info->dwb_params.capture_rate = dwb_capture_rate_0; 10677 10664 10678 - wb_info->dwb_params.scaler_taps.h_taps = 4; 10679 - wb_info->dwb_params.scaler_taps.v_taps = 4; 10680 - wb_info->dwb_params.scaler_taps.h_taps_c = 2; 10681 - wb_info->dwb_params.scaler_taps.v_taps_c = 2; 10665 + wb_info->dwb_params.scaler_taps.h_taps = 1; 10666 + wb_info->dwb_params.scaler_taps.v_taps = 1; 10667 + wb_info->dwb_params.scaler_taps.h_taps_c = 1; 10668 + wb_info->dwb_params.scaler_taps.v_taps_c = 1; 10682 10669 wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING; 10683 10670 10684 10671 wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0]; ··· 10978 10965 continue; 10979 10966 } 10980 10967 for (j = 0; j < status->plane_count; j++) 10981 - dummy_updates[j].surface = status->plane_states[0]; 10968 + dummy_updates[j].surface = status->plane_states[j]; 10982 10969 10983 10970 sort(dummy_updates, status->plane_count, 10984 10971 sizeof(*dummy_updates), dm_plane_layer_index_cmp, NULL); ··· 11694 11681 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 11695 11682 struct dm_crtc_state *old_dm_crtc_state, *new_dm_crtc_state; 11696 11683 struct amdgpu_device *adev = drm_to_adev(plane->dev); 11684 + struct drm_connector_state *new_con_state; 11685 + struct drm_connector *connector; 11697 11686 int i; 11698 11687 11699 11688 /* ··· 11705 11690 if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 2, 0) && 11706 11691 state->allow_modeset) 11707 11692 return true; 11693 + 11694 + /* Check for writeback commit */ 11695 + for_each_new_connector_in_state(state, connector, new_con_state, i) { 11696 + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) 11697 + continue; 11698 + 11699 + if (new_con_state->writeback_job) 11700 + return true; 11701 + } 11708 11702 11709 11703 if (amdgpu_in_reset(adev) && state->allow_modeset) 11710 11704 return true; ··· 12313 12289 12314 12290 /* Overlay cursor not supported on HW before DCN 12315 12291 * DCN401 does not have the cursor-on-scaled-plane or cursor-on-yuv-plane restrictions 12316 - * as previous DCN generations, so enable native mode on DCN401 in addition to DCE 12292 + * as previous DCN generations, so enable native mode on DCN401 12317 12293 */ 12318 - if (amdgpu_ip_version(adev, DCE_HWIP, 0) == 0 || 12319 - amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) { 12294 + if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(4, 0, 1)) { 12320 12295 *cursor_mode = DM_CURSOR_NATIVE_MODE; 12321 12296 return 0; 12322 12297 } ··· 12635 12612 * need to be added for DC to not disable a plane by mistake 12636 12613 */ 12637 12614 if (dm_new_crtc_state->cursor_mode == DM_CURSOR_OVERLAY_MODE) { 12615 + if (amdgpu_ip_version(adev, DCE_HWIP, 0) == 0) { 12616 + drm_dbg(dev, "Overlay cursor not supported on DCE\n"); 12617 + ret = -EINVAL; 12618 + goto fail; 12619 + } 12620 + 12638 12621 ret = drm_atomic_add_affected_planes(state, crtc); 12639 12622 if (ret) 12640 12623 goto fail; ··· 13158 13129 u8 *edid_ext = NULL; 13159 13130 int i; 13160 13131 int j = 0; 13132 + int total_ext_block_len; 13161 13133 13162 13134 if (edid == NULL || edid->extensions == 0) 13163 13135 return -ENODEV; ··· 13170 13140 break; 13171 13141 } 13172 13142 13173 - while (j < EDID_LENGTH - sizeof(struct amd_vsdb_block)) { 13143 + total_ext_block_len = EDID_LENGTH * edid->extensions; 13144 + while (j < total_ext_block_len - sizeof(struct amd_vsdb_block)) { 13174 13145 struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j]; 13175 13146 unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]); 13176 13147
+12 -3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
··· 506 506 struct amdgpu_dm_connector *aconnector = NULL; 507 507 bool enable = amdgpu_dm_is_valid_crc_source(source); 508 508 int ret = 0; 509 + enum crc_poly_mode crc_poly_mode = CRC_POLY_MODE_16; 509 510 510 511 /* Configuration will be deferred to stream enable. */ 511 512 if (!stream_state) ··· 529 528 amdgpu_dm_replay_disable(stream_state); 530 529 } 531 530 531 + /* CRC polynomial selection only support for DCN3.6+ except DCN4.0.1 */ 532 + if ((amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 6, 0)) && 533 + (amdgpu_ip_version(adev, DCE_HWIP, 0) != IP_VERSION(4, 0, 1))) { 534 + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 535 + 536 + crc_poly_mode = acrtc->dm_irq_params.crc_poly_mode; 537 + } 538 + 532 539 /* Enable or disable CRTC CRC generation */ 533 540 if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) { 534 541 if (!dc_stream_configure_crc(stream_state->ctx->dc, 535 - stream_state, NULL, enable, enable, 0, true)) { 542 + stream_state, NULL, enable, enable, 0, true, crc_poly_mode)) { 536 543 ret = -EINVAL; 537 544 goto unlock; 538 545 } ··· 886 877 else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE) 887 878 /* update ROI via dm*/ 888 879 dc_stream_configure_crc(stream_state->ctx->dc, stream_state, 889 - &crc_window, true, true, i, false); 880 + &crc_window, true, true, i, false, (enum crc_poly_mode)acrtc->dm_irq_params.crc_poly_mode); 890 881 891 882 reset_crc_frame_count[i] = true; 892 883 ··· 910 901 else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE) 911 902 /* Avoid ROI window get changed, keep overwriting. */ 912 903 dc_stream_configure_crc(stream_state->ctx->dc, stream_state, 913 - &crc_window, true, true, i, false); 904 + &crc_window, true, true, i, false, (enum crc_poly_mode)acrtc->dm_irq_params.crc_poly_mode); 914 905 915 906 /* crc ready for psp to read out */ 916 907 crtc_ctx->crc_info.crc[i].crc_ready = true;
+69 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
··· 46 46 #include "amdgpu_dm_psr.h" 47 47 #endif 48 48 49 + #define MULTIPLIER_TO_LR 270000 49 50 struct dmub_debugfs_trace_header { 50 51 uint32_t entry_count; 51 52 uint32_t reserved[3]; ··· 303 302 304 303 switch (param[1]) { 305 304 case LINK_RATE_LOW: 305 + case LINK_RATE_RATE_2: 306 + case LINK_RATE_RATE_3: 306 307 case LINK_RATE_HIGH: 307 308 case LINK_RATE_RBR2: 309 + case LINK_RATE_RATE_6: 308 310 case LINK_RATE_HIGH2: 309 311 case LINK_RATE_HIGH3: 310 312 case LINK_RATE_UHBR10: ··· 3508 3504 uint8_t param_nums = 0; 3509 3505 long param[2]; 3510 3506 bool valid_input = true; 3507 + uint8_t supported_link_rates[16] = {0}; 3508 + uint32_t entry = 0; 3509 + uint32_t link_rate_in_khz = 0; 3510 + uint8_t dpcd_rev = 0; 3511 3511 3512 3512 if (size == 0) 3513 3513 return -EINVAL; ··· 3556 3548 return size; 3557 3549 } 3558 3550 3551 + if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_SUPPORTED_LINK_RATES, 3552 + supported_link_rates, sizeof(supported_link_rates))) 3553 + return -EINVAL; 3554 + 3555 + dpcd_rev = link->dpcd_caps.dpcd_rev.raw; 3556 + if (dpcd_rev < DP_DPCD_REV_13 || 3557 + (supported_link_rates[entry + 1] == 0 && supported_link_rates[entry] == 0)) { 3558 + return size; 3559 + } 3560 + 3561 + entry = param[1] * 2; 3562 + link_rate_in_khz = (supported_link_rates[entry + 1] * 0x100 + 3563 + supported_link_rates[entry]) * 200; 3564 + 3559 3565 /* save user force lane_count, link_rate to preferred settings 3560 3566 * spread spectrum will not be changed 3561 3567 */ ··· 3577 3555 prefer_link_settings.lane_count = param[0]; 3578 3556 prefer_link_settings.use_link_rate_set = true; 3579 3557 prefer_link_settings.link_rate_set = param[1]; 3580 - prefer_link_settings.link_rate = link->dpcd_caps.edp_supported_link_rates[param[1]]; 3558 + prefer_link_settings.link_rate = link_rate_in_khz / MULTIPLIER_TO_LR; 3581 3559 3582 3560 mutex_lock(&adev->dm.dc_lock); 3583 3561 dc_link_set_preferred_training_settings(dc, &prefer_link_settings, ··· 3839 3817 3840 3818 DEFINE_DEBUGFS_ATTRIBUTE(crc_win_update_fops, crc_win_update_get, 3841 3819 crc_win_update_set, "%llu\n"); 3820 + 3821 + /* 3822 + * Trigger to set crc polynomial mode 3823 + * 0: 16-bit CRC, 1: 32-bit CRC 3824 + * only accepts 0 or 1 for supported hwip versions 3825 + */ 3826 + static int crc_poly_mode_set(void *data, u64 val) 3827 + { 3828 + struct drm_crtc *crtc = data; 3829 + struct amdgpu_crtc *acrtc; 3830 + struct amdgpu_device *adev = drm_to_adev(crtc->dev); 3831 + 3832 + if ((amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 6, 0)) && 3833 + (amdgpu_ip_version(adev, DCE_HWIP, 0) != IP_VERSION(4, 0, 1)) && 3834 + (val < 2)) { 3835 + acrtc = to_amdgpu_crtc(crtc); 3836 + mutex_lock(&adev->dm.dc_lock); 3837 + spin_lock_irq(&adev_to_drm(adev)->event_lock); 3838 + acrtc->dm_irq_params.crc_poly_mode = val; 3839 + spin_unlock_irq(&adev_to_drm(adev)->event_lock); 3840 + mutex_unlock(&adev->dm.dc_lock); 3841 + } 3842 + 3843 + return 0; 3844 + } 3845 + 3846 + /* 3847 + * Get crc polynomial mode (0: 16-bit CRC, 1: 32-bit CRC) 3848 + */ 3849 + static int crc_poly_mode_get(void *data, u64 *val) 3850 + { 3851 + struct drm_crtc *crtc = data; 3852 + struct drm_device *drm_dev = crtc->dev; 3853 + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 3854 + 3855 + spin_lock_irq(&drm_dev->event_lock); 3856 + *val = acrtc->dm_irq_params.crc_poly_mode; 3857 + spin_unlock_irq(&drm_dev->event_lock); 3858 + 3859 + return 0; 3860 + } 3861 + 3862 + DEFINE_DEBUGFS_ATTRIBUTE(crc_poly_mode_fops, crc_poly_mode_get, 3863 + crc_poly_mode_set, "%llu\n"); 3842 3864 #endif 3843 3865 void crtc_debugfs_init(struct drm_crtc *crtc) 3844 3866 { ··· 3902 3836 &crc_win_y_end_fops); 3903 3837 debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc, 3904 3838 &crc_win_update_fops); 3839 + debugfs_create_file_unsafe("crc_poly_mode", 0644, dir, crtc, 3840 + &crc_poly_mode_fops); 3905 3841 dput(dir); 3906 3842 #endif 3907 3843 debugfs_create_file("amdgpu_current_bpc", 0644, crtc->debugfs_entry,
+10 -2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
··· 1153 1153 1154 1154 void dm_helpers_override_panel_settings( 1155 1155 struct dc_context *ctx, 1156 - struct dc_panel_config *panel_config) 1156 + struct dc_link *link) 1157 1157 { 1158 + unsigned int panel_inst = 0; 1159 + 1158 1160 // Feature DSC 1159 1161 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) 1160 - panel_config->dsc.disable_dsc_edp = true; 1162 + link->panel_config.dsc.disable_dsc_edp = true; 1163 + 1164 + if (dc_get_edp_link_panel_inst(ctx->dc, link, &panel_inst) && panel_inst == 1) { 1165 + link->panel_config.psr.disable_psr = true; 1166 + link->panel_config.psr.disallow_psrsu = true;; 1167 + link->panel_config.psr.disallow_replay = true; 1168 + } 1161 1169 } 1162 1170 1163 1171 void *dm_helpers_allocate_gpu_mem(
+4 -5
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
··· 919 919 continue; 920 920 921 921 amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 922 + dc_link = amdgpu_dm_connector->dc_link; 923 + if (!dc_link) 924 + continue; 922 925 923 926 /* 924 927 * Analog connectors may be hot-plugged unlike other connector 925 928 * types that don't support HPD. Only poll analog connectors. 926 929 */ 927 - use_polling |= 928 - amdgpu_dm_connector->dc_link && 929 - dc_connector_supports_analog(amdgpu_dm_connector->dc_link->link_id.id); 930 - 931 - dc_link = amdgpu_dm_connector->dc_link; 930 + use_polling |= dc_connector_supports_analog(dc_link->link_id.id); 932 931 933 932 /* 934 933 * Get a base driver irq reference for hpd ints for the lifetime
+1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
··· 39 39 40 40 #ifdef CONFIG_DEBUG_FS 41 41 enum amdgpu_dm_pipe_crc_source crc_src; 42 + int crc_poly_mode; /* enum crc_poly_mode from timing_generator.h */ 42 43 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 43 44 struct crc_window_param window_param[MAX_CRC_WINDOW_NUM]; 44 45 /* At least one CRC window is activated or not*/
+6 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
··· 1060 1060 *min_downscale = plane_cap->max_downscale_factor.nv12; 1061 1061 break; 1062 1062 1063 + /* All 64 bpp formats have the same fp16 scaling limits */ 1063 1064 case DRM_FORMAT_XRGB16161616F: 1064 1065 case DRM_FORMAT_ARGB16161616F: 1065 1066 case DRM_FORMAT_XBGR16161616F: 1066 1067 case DRM_FORMAT_ABGR16161616F: 1068 + case DRM_FORMAT_XRGB16161616: 1069 + case DRM_FORMAT_ARGB16161616: 1070 + case DRM_FORMAT_XBGR16161616: 1071 + case DRM_FORMAT_ABGR16161616: 1067 1072 *max_upscale = plane_cap->max_upscale_factor.fp16; 1068 1073 *min_downscale = plane_cap->max_downscale_factor.fp16; 1069 1074 break; ··· 1655 1650 MAX_COLOR_3DLUT_SIZE); 1656 1651 } 1657 1652 1658 - if (dpp_color_caps.ogam_ram) { 1653 + if (dpp_color_caps.ogam_ram || dm->dc->caps.color.mpc.preblend) { 1659 1654 drm_object_attach_property(&plane->base, 1660 1655 mode_info.plane_blend_lut_property, 0); 1661 1656 drm_object_attach_property(&plane->base,
+8
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
··· 80 80 link->psr_settings.psr_feature_enabled = false; 81 81 82 82 } else { 83 + unsigned int panel_inst = 0; 84 + 83 85 if (link_supports_psrsu(link)) 84 86 link->psr_settings.psr_version = DC_PSR_VERSION_SU_1; 85 87 else 86 88 link->psr_settings.psr_version = DC_PSR_VERSION_1; 87 89 88 90 link->psr_settings.psr_feature_enabled = true; 91 + 92 + /*disable allow psr/psrsu/replay on eDP1*/ 93 + if (dc_get_edp_link_panel_inst(link->ctx->dc, link, &panel_inst) && panel_inst == 1) { 94 + link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; 95 + link->psr_settings.psr_feature_enabled = false; 96 + } 89 97 } 90 98 } 91 99
+1 -1
drivers/gpu/drm/amd/display/dc/Makefile
··· 22 22 # 23 23 # Makefile for Display Core (dc) component. 24 24 25 - DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc dpp hubbub dccg hubp dio dwb hpo mmhubbub mpc opp pg 25 + DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link dsc resource optc dpp hubbub dccg hubp dio dwb hpo mmhubbub mpc opp pg 26 26 27 27 ifdef CONFIG_DRM_AMD_DC_FP 28 28
+1 -2
drivers/gpu/drm/amd/display/dc/bios/command_table.c
··· 1874 1874 uint8_t dac_standard) 1875 1875 { 1876 1876 params->ucDacStandard = dac_standard; 1877 - if (action == ENCODER_CONTROL_SETUP || 1878 - action == ENCODER_CONTROL_INIT) 1877 + if (action == ENCODER_CONTROL_INIT) 1879 1878 params->ucAction = ATOM_ENCODER_INIT; 1880 1879 else if (action == ENCODER_CONTROL_ENABLE) 1881 1880 params->ucAction = ATOM_ENABLE;
+9 -9
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
··· 186 186 187 187 return display_count; 188 188 } 189 - static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, 189 + void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, 190 190 bool safe_to_lower, bool disable) 191 191 { 192 192 struct dc *dc = clk_mgr_base->ctx->dc; ··· 766 766 .wm_inst = WM_A, 767 767 .wm_type = WM_TYPE_PSTATE_CHG, 768 768 .pstate_latency_us = 11.72, 769 - .sr_exit_time_us = 28.0, 770 - .sr_enter_plus_exit_time_us = 30.0, 769 + .sr_exit_time_us = 31.0, 770 + .sr_enter_plus_exit_time_us = 33.0, 771 771 .valid = true, 772 772 }, 773 773 { 774 774 .wm_inst = WM_B, 775 775 .wm_type = WM_TYPE_PSTATE_CHG, 776 776 .pstate_latency_us = 11.72, 777 - .sr_exit_time_us = 28.0, 778 - .sr_enter_plus_exit_time_us = 30.0, 777 + .sr_exit_time_us = 31.0, 778 + .sr_enter_plus_exit_time_us = 33.0, 779 779 .valid = true, 780 780 }, 781 781 { 782 782 .wm_inst = WM_C, 783 783 .wm_type = WM_TYPE_PSTATE_CHG, 784 784 .pstate_latency_us = 11.72, 785 - .sr_exit_time_us = 28.0, 786 - .sr_enter_plus_exit_time_us = 30.0, 785 + .sr_exit_time_us = 31.0, 786 + .sr_enter_plus_exit_time_us = 33.0, 787 787 .valid = true, 788 788 }, 789 789 { 790 790 .wm_inst = WM_D, 791 791 .wm_type = WM_TYPE_PSTATE_CHG, 792 792 .pstate_latency_us = 11.72, 793 - .sr_exit_time_us = 28.0, 794 - .sr_enter_plus_exit_time_us = 30.0, 793 + .sr_exit_time_us = 31.0, 794 + .sr_enter_plus_exit_time_us = 33.0, 795 795 .valid = true, 796 796 }, 797 797 }
+6
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h
··· 64 64 struct clk_mgr_dcn35 *clk_mgr, 65 65 struct pp_smu_funcs *pp_smu, 66 66 struct dccg *dccg); 67 + 68 + void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, 69 + struct dc_state *context, 70 + bool safe_to_lower, 71 + bool disable); 72 + 67 73 #endif //__DCN35_CLK_MGR_H__
+23 -6
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 53 53 #include "dpp.h" 54 54 #include "timing_generator.h" 55 55 #include "abm.h" 56 - #include "virtual/virtual_link_encoder.h" 56 + #include "dio/virtual/virtual_link_encoder.h" 57 57 #include "hubp.h" 58 58 59 59 #include "link_hwss.h" ··· 701 701 * once. 702 702 * @idx: Capture CRC on which CRC engine instance 703 703 * @reset: Reset CRC engine before the configuration 704 + * @crc_poly_mode: CRC polynomial mode 704 705 * 705 706 * By default, the entire frame is used to calculate the CRC. 706 707 * ··· 710 709 */ 711 710 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, 712 711 struct crc_params *crc_window, bool enable, bool continuous, 713 - uint8_t idx, bool reset) 712 + uint8_t idx, bool reset, enum crc_poly_mode crc_poly_mode) 714 713 { 715 714 struct pipe_ctx *pipe; 716 715 struct crc_params param; ··· 734 733 param.windowb_y_start = 0; 735 734 param.windowb_x_end = pipe->stream->timing.h_addressable; 736 735 param.windowb_y_end = pipe->stream->timing.v_addressable; 736 + param.crc_poly_mode = crc_poly_mode; 737 737 738 738 if (crc_window) { 739 739 param.windowa_x_start = crc_window->windowa_x_start; ··· 3368 3366 stream->scaler_sharpener_update = *update->scaler_sharpener_update; 3369 3367 if (update->sharpening_required) 3370 3368 stream->sharpening_required = *update->sharpening_required; 3369 + 3370 + if (update->drr_trigger_mode) { 3371 + stream->drr_trigger_mode = *update->drr_trigger_mode; 3372 + } 3371 3373 } 3372 3374 3373 3375 static void backup_planes_and_stream_state( ··· 3866 3860 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) 3867 3861 return; 3868 3862 3869 - if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3863 + if (!dc->config.frame_update_cmd_version2 && !dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3870 3864 return; 3871 3865 3872 3866 memset(&cmd, 0x0, sizeof(cmd)); ··· 3886 3880 if (srf_updates[i].surface->flip_immediate) 3887 3881 continue; 3888 3882 3889 - update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; 3883 + if (dc->config.frame_update_cmd_version2) 3884 + update_dirty_rect->cmd_version = DMUB_CMD_CURSOR_UPDATE_VERSION_2; 3885 + else 3886 + update_dirty_rect->cmd_version = DMUB_CMD_CURSOR_UPDATE_VERSION_1; 3887 + 3890 3888 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; 3891 3889 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, 3892 3890 sizeof(flip_addr->dirty_rects)); ··· 3904 3894 3905 3895 update_dirty_rect->panel_inst = panel_inst; 3906 3896 update_dirty_rect->pipe_idx = j; 3897 + update_dirty_rect->otg_inst = pipe_ctx->stream_res.tg->inst; 3907 3898 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 3908 3899 } 3909 3900 } ··· 3927 3916 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) 3928 3917 return; 3929 3918 3930 - if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3919 + if (!dc->config.frame_update_cmd_version2 && !dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3931 3920 return; 3932 3921 3933 3922 memset(&cmd, 0x0, sizeof(cmd)); ··· 3946 3935 /* Do not send in immediate flip mode */ 3947 3936 if (srf_updates[i].surface->flip_immediate) 3948 3937 continue; 3949 - update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; 3938 + 3939 + if (dc->config.frame_update_cmd_version2) 3940 + update_dirty_rect->cmd_version = DMUB_CMD_CURSOR_UPDATE_VERSION_2; 3941 + else 3942 + update_dirty_rect->cmd_version = DMUB_CMD_CURSOR_UPDATE_VERSION_1; 3943 + 3950 3944 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; 3951 3945 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, 3952 3946 sizeof(flip_addr->dirty_rects)); ··· 3964 3948 continue; 3965 3949 update_dirty_rect->panel_inst = panel_inst; 3966 3950 update_dirty_rect->pipe_idx = j; 3951 + update_dirty_rect->otg_inst = pipe_ctx->stream_res.tg->inst; 3967 3952 dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd; 3968 3953 dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; 3969 3954 (*dmub_cmd_count)++;
+2 -2
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
··· 37 37 #include "dpp.h" 38 38 #include "core_types.h" 39 39 #include "set_mode_types.h" 40 - #include "virtual/virtual_stream_encoder.h" 40 + #include "dio/virtual/virtual_stream_encoder.h" 41 41 #include "dpcd_defs.h" 42 42 #include "link_enc_cfg.h" 43 43 #include "link_service.h" ··· 45 45 #include "dc_state_priv.h" 46 46 #include "dc_stream_priv.h" 47 47 48 - #include "virtual/virtual_link_hwss.h" 48 + #include "link/hwss/link_hwss_virtual.h" 49 49 #include "link/hwss/link_hwss_dio.h" 50 50 #include "link/hwss/link_hwss_dpia.h" 51 51 #include "link/hwss/link_hwss_hpo_dp.h"
+10 -8
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
··· 515 515 } 516 516 } 517 517 518 - /* apply manual trigger */ 519 - int i; 518 + if (stream->drr_trigger_mode == DRR_TRIGGER_ON_FLIP_AND_CURSOR) { 519 + /* apply manual trigger */ 520 + int i; 520 521 521 - for (i = 0; i < dc->res_pool->pipe_count; i++) { 522 - struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 522 + for (i = 0; i < dc->res_pool->pipe_count; i++) { 523 + struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 523 524 524 - /* trigger event on first pipe with current stream */ 525 - if (stream == pipe_ctx->stream) { 526 - pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); 527 - break; 525 + /* trigger event on first pipe with current stream */ 526 + if (stream == pipe_ctx->stream) { 527 + pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); 528 + break; 529 + } 528 530 } 529 531 } 530 532
+2 -2
drivers/gpu/drm/amd/display/dc/dc.h
··· 63 63 struct dcn_optc_reg_state; 64 64 struct dcn_dccg_reg_state; 65 65 66 - #define DC_VER "3.2.367" 66 + #define DC_VER "3.2.369" 67 67 68 68 /** 69 69 * MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC ··· 505 505 } bits; 506 506 unsigned char raw; 507 507 }; 508 - 509 508 /* Structure to hold configuration flags set by dm at dc creation. */ 510 509 struct dc_config { 511 510 bool gpu_vm_support; ··· 559 560 bool enable_dpia_pre_training; 560 561 bool unify_link_enc_assignment; 561 562 bool enable_cursor_offload; 563 + bool frame_update_cmd_version2; 562 564 struct spl_sharpness_range dcn_sharpness_range; 563 565 struct spl_sharpness_range dcn_override_sharpness_range; 564 566 };
+11 -4
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
··· 1034 1034 struct pipe_ctx *pipe_ctx, uint8_t p_idx, 1035 1035 struct dmub_cmd_update_cursor_payload0 *payload) 1036 1036 { 1037 + struct dc *dc = pipe_ctx->stream->ctx->dc; 1037 1038 struct hubp *hubp = pipe_ctx->plane_res.hubp; 1038 1039 unsigned int panel_inst = 0; 1039 1040 1040 - if (!dc_get_edp_link_panel_inst(hubp->ctx->dc, 1041 - pipe_ctx->stream->link, &panel_inst)) 1042 - return; 1041 + if (dc->config.frame_update_cmd_version2 == true) { 1042 + /* Don't need panel_inst for command version2 */ 1043 + payload->cmd_version = DMUB_CMD_CURSOR_UPDATE_VERSION_2; 1044 + } else { 1045 + if (!dc_get_edp_link_panel_inst(hubp->ctx->dc, 1046 + pipe_ctx->stream->link, &panel_inst)) 1047 + return; 1048 + payload->cmd_version = DMUB_CMD_CURSOR_UPDATE_VERSION_1; 1049 + } 1043 1050 1044 1051 /* Payload: Cursor Rect is built from position & attribute 1045 1052 * x & y are obtained from postion ··· 1059 1052 1060 1053 payload->enable = hubp->pos.cur_ctl.bits.cur_enable; 1061 1054 payload->pipe_idx = p_idx; 1062 - payload->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; 1063 1055 payload->panel_inst = panel_inst; 1056 + payload->otg_inst = pipe_ctx->stream_res.tg->inst; 1064 1057 } 1065 1058 1066 1059 static void dc_build_cursor_position_update_payload0(
+11 -1
drivers/gpu/drm/amd/display/dc/dc_stream.h
··· 183 183 int dm_max_decrease_from_nominal; 184 184 }; 185 185 186 + enum dc_drr_trigger_mode { 187 + DRR_TRIGGER_ON_FLIP = 0, 188 + DRR_TRIGGER_ON_FLIP_AND_CURSOR, 189 + }; 190 + 186 191 struct dc_stream_state { 187 192 // sink is deprecated, new code should not reference 188 193 // this pointer ··· 321 316 bool scaler_sharpener_update; 322 317 bool sharpening_required; 323 318 319 + enum dc_drr_trigger_mode drr_trigger_mode; 320 + 324 321 struct dc_update_scratch_space *update_scratch; 325 322 }; 326 323 ··· 373 366 bool *hw_cursor_req; 374 367 bool *scaler_sharpener_update; 375 368 bool *sharpening_required; 369 + 370 + enum dc_drr_trigger_mode *drr_trigger_mode; 376 371 }; 377 372 378 373 bool dc_is_stream_unchanged( ··· 593 584 bool enable, 594 585 bool continuous, 595 586 uint8_t idx, 596 - bool reset); 587 + bool reset, 588 + enum crc_poly_mode crc_poly_mode); 597 589 598 590 bool dc_stream_get_crc(struct dc *dc, 599 591 struct dc_stream_state *stream,
+25 -2
drivers/gpu/drm/amd/display/dc/dc_types.h
··· 1230 1230 uint32_t replay_desync_error_fail_count; 1231 1231 /* The frame skip number dal send to DMUB */ 1232 1232 uint16_t frame_skip_number; 1233 - /* Current Panel Replay event */ 1233 + /* Current Panel Replay events */ 1234 1234 uint32_t replay_events; 1235 1235 }; 1236 1236 ··· 1256 1256 unsigned int max_nonboost_brightness_millinits; 1257 1257 unsigned int min_brightness_millinits; 1258 1258 } nits_brightness; 1259 - /* PSR */ 1259 + /* PSR/Replay */ 1260 1260 struct psr { 1261 1261 bool disable_psr; 1262 1262 bool disallow_psrsu; ··· 1266 1266 bool rc_allow_fullscreen_VPB; 1267 1267 bool read_psrcap_again; 1268 1268 unsigned int replay_enable_option; 1269 + bool enable_frame_skipping; 1270 + bool enable_teams_optimization; 1269 1271 } psr; 1270 1272 /* ABM */ 1271 1273 struct varib { ··· 1284 1282 struct ilr { 1285 1283 bool optimize_edp_link_rate; /* eDP ILR */ 1286 1284 } ilr; 1285 + /* Adaptive VariBright*/ 1286 + struct adaptive_vb { 1287 + bool disable_adaptive_vb; 1288 + unsigned int default_abm_vb_levels; // default value = 0xDCAA6414 1289 + unsigned int default_cacp_vb_levels; 1290 + unsigned int default_abm_vb_hdr_levels; // default value = 0xB4805A40 1291 + unsigned int default_cacp_vb_hdr_levels; 1292 + unsigned int abm_scaling_factors; // default value = 0x23210012 1293 + unsigned int cacp_scaling_factors; 1294 + unsigned int battery_life_configures; // default value = 0x0A141E 1295 + unsigned int abm_backlight_adaptive_pwl_1; // default value = 0x6A4F7244 1296 + unsigned int abm_backlight_adaptive_pwl_2; // default value = 0x4C615659 1297 + unsigned int abm_backlight_adaptive_pwl_3; // default value = 0x0064 1298 + unsigned int cacp_backlight_adaptive_pwl_1; 1299 + unsigned int cacp_backlight_adaptive_pwl_2; 1300 + unsigned int cacp_backlight_adaptive_pwl_3; 1301 + } adaptive_vb; 1302 + /* Ramless Idle Opt*/ 1303 + struct rio { 1304 + bool disable_rio; 1305 + } rio; 1287 1306 }; 1288 1307 1289 1308 #define MAX_SINKS_PER_LINK 4
+53 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c
··· 131 131 132 132 void dccg2_init(struct dccg *dccg) 133 133 { 134 + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 135 + 136 + /* Hardcoded register values for DCN20 137 + * These are specific to 100Mhz refclk 138 + * Different ASICs with different refclk may override this in their own init 139 + */ 140 + REG_WRITE(MICROSECOND_TIME_BASE_DIV, 0x00120264); 141 + REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x001186a0); 142 + REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x0e01003c); 143 + 144 + if (REG(REFCLK_CNTL)) 145 + REG_WRITE(REFCLK_CNTL, 0); 146 + } 147 + 148 + void dccg2_refclk_setup(struct dccg *dccg) 149 + { 150 + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 151 + 152 + /* REFCLK programming that must occur after hubbub initialization */ 153 + if (REG(REFCLK_CNTL)) 154 + REG_WRITE(REFCLK_CNTL, 0); 155 + } 156 + 157 + bool dccg2_is_s0i3_golden_init_wa_done(struct dccg *dccg) 158 + { 159 + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 160 + 161 + return REG_READ(MICROSECOND_TIME_BASE_DIV) == 0x00120464; 162 + } 163 + 164 + void dccg2_allow_clock_gating(struct dccg *dccg, bool allow) 165 + { 166 + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 167 + 168 + if (allow) { 169 + REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 170 + REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 171 + } else { 172 + REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0xFFFFFFFF); 173 + REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0xFFFFFFFF); 174 + } 175 + } 176 + 177 + void dccg2_enable_memory_low_power(struct dccg *dccg, bool enable) 178 + { 179 + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 180 + 181 + REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, enable ? 0 : 1); 134 182 } 135 183 136 184 static const struct dccg_funcs dccg2_funcs = { ··· 187 139 .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, 188 140 .otg_add_pixel = dccg2_otg_add_pixel, 189 141 .otg_drop_pixel = dccg2_otg_drop_pixel, 190 - .dccg_init = dccg2_init 142 + .dccg_init = dccg2_init, 143 + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 144 + .allow_clock_gating = dccg2_allow_clock_gating, 145 + .enable_memory_low_power = dccg2_enable_memory_low_power, 146 + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ 191 147 }; 192 148 193 149 struct dccg *dccg2_create(
+11 -3
drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
··· 37 37 SR(REFCLK_CNTL),\ 38 38 DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\ 39 39 DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1),\ 40 - SR(DISPCLK_FREQ_CHANGE_CNTL) 40 + SR(DISPCLK_FREQ_CHANGE_CNTL),\ 41 + SR(DC_MEM_GLOBAL_PWR_REQ_CNTL) 41 42 42 43 #define DCCG_REG_LIST_DCN2() \ 43 44 DCCG_COMMON_REG_LIST_DCN_BASE(),\ ··· 82 81 DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 0, mask_sh),\ 83 82 DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 1, mask_sh),\ 84 83 DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 0, mask_sh),\ 85 - DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 1, mask_sh) 84 + DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 1, mask_sh),\ 85 + DCCG_SF(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, mask_sh) 86 86 87 87 88 88 ··· 132 130 type DISPCLK_CHG_FWD_CORR_DISABLE;\ 133 131 type DISPCLK_FREQ_CHANGE_CNTL;\ 134 132 type OTG_ADD_PIXEL[MAX_PIPES];\ 135 - type OTG_DROP_PIXEL[MAX_PIPES]; 133 + type OTG_DROP_PIXEL[MAX_PIPES];\ 134 + type DC_MEM_GLOBAL_PWR_REQ_DIS; 136 135 137 136 #define DCCG3_REG_FIELD_LIST(type) \ 138 137 type HDMICHARCLK0_EN;\ ··· 517 514 518 515 519 516 void dccg2_init(struct dccg *dccg); 517 + 518 + void dccg2_refclk_setup(struct dccg *dccg); 519 + void dccg2_allow_clock_gating(struct dccg *dccg, bool allow); 520 + void dccg2_enable_memory_low_power(struct dccg *dccg, bool enable); 521 + bool dccg2_is_s0i3_golden_init_wa_done(struct dccg *dccg); 520 522 521 523 struct dccg *dccg2_create( 522 524 struct dc_context *ctx,
+6 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn201/dcn201_dccg.c
··· 24 24 */ 25 25 26 26 #include "dcn201_dccg.h" 27 + #include "dcn20/dcn20_dccg.h" 27 28 28 29 #include "reg_helper.h" 29 30 #include "core_types.h" ··· 57 56 .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, 58 57 .otg_add_pixel = dccg2_otg_add_pixel, 59 58 .otg_drop_pixel = dccg2_otg_drop_pixel, 60 - .dccg_init = dccg2_init 59 + .dccg_init = dccg2_init, 60 + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 61 + .allow_clock_gating = dccg2_allow_clock_gating, 62 + .enable_memory_low_power = dccg2_enable_memory_low_power, 63 + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ 61 64 }; 62 65 63 66 struct dccg *dccg201_create(
+5 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c
··· 103 103 .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, 104 104 .otg_add_pixel = dccg2_otg_add_pixel, 105 105 .otg_drop_pixel = dccg2_otg_drop_pixel, 106 - .dccg_init = dccg2_init 106 + .dccg_init = dccg2_init, 107 + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 108 + .allow_clock_gating = dccg2_allow_clock_gating, 109 + .enable_memory_low_power = dccg2_enable_memory_low_power, 110 + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ 107 111 }; 108 112 109 113 struct dccg *dccg21_create(
+5 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn30/dcn30_dccg.c
··· 49 49 .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, 50 50 .otg_add_pixel = dccg2_otg_add_pixel, 51 51 .otg_drop_pixel = dccg2_otg_drop_pixel, 52 - .dccg_init = dccg2_init 52 + .dccg_init = dccg2_init, 53 + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 54 + .allow_clock_gating = dccg2_allow_clock_gating, 55 + .enable_memory_low_power = dccg2_enable_memory_low_power, 56 + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ 53 57 }; 54 58 55 59 struct dccg *dccg3_create(
+5 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.c
··· 48 48 .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, 49 49 .otg_add_pixel = dccg2_otg_add_pixel, 50 50 .otg_drop_pixel = dccg2_otg_drop_pixel, 51 - .dccg_init = dccg2_init 51 + .dccg_init = dccg2_init, 52 + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 53 + .allow_clock_gating = dccg2_allow_clock_gating, 54 + .enable_memory_low_power = dccg2_enable_memory_low_power, 55 + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ 52 56 }; 53 57 54 58 struct dccg *dccg301_create(
+5
drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c
··· 26 26 #include "reg_helper.h" 27 27 #include "core_types.h" 28 28 #include "dcn31_dccg.h" 29 + #include "dcn20/dcn20_dccg.h" 29 30 #include "dal_asic_id.h" 30 31 31 32 #define TO_DCN_DCCG(dccg)\ ··· 851 850 .disable_dsc = dccg31_disable_dscclk, 852 851 .enable_dsc = dccg31_enable_dscclk, 853 852 .dccg_read_reg_state = dccg31_read_reg_state, 853 + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 854 + .allow_clock_gating = dccg2_allow_clock_gating, 855 + .enable_memory_low_power = dccg2_enable_memory_low_power, 856 + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ 854 857 }; 855 858 856 859 struct dccg *dccg31_create(
+6 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c
··· 29 29 30 30 #include "dcn31/dcn31_dccg.h" 31 31 #include "dcn314_dccg.h" 32 + #include "dcn20/dcn20_dccg.h" 32 33 33 34 #define TO_DCN_DCCG(dccg)\ 34 35 container_of(dccg, struct dcn_dccg, base) ··· 379 378 .trigger_dio_fifo_resync = dccg314_trigger_dio_fifo_resync, 380 379 .set_valid_pixel_rate = dccg314_set_valid_pixel_rate, 381 380 .set_dtbclk_p_src = dccg314_set_dtbclk_p_src, 382 - .dccg_read_reg_state = dccg31_read_reg_state 381 + .dccg_read_reg_state = dccg31_read_reg_state, 382 + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 383 + .allow_clock_gating = dccg2_allow_clock_gating, 384 + .enable_memory_low_power = dccg2_enable_memory_low_power, 385 + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ 383 386 }; 384 387 385 388 struct dccg *dccg314_create(
+5
drivers/gpu/drm/amd/display/dc/dccg/dcn32/dcn32_dccg.c
··· 26 26 #include "reg_helper.h" 27 27 #include "core_types.h" 28 28 #include "dcn32_dccg.h" 29 + #include "dcn20/dcn20_dccg.h" 29 30 30 31 #define TO_DCN_DCCG(dccg)\ 31 32 container_of(dccg, struct dcn_dccg, base) ··· 348 347 .get_pixel_rate_div = dccg32_get_pixel_rate_div, 349 348 .trigger_dio_fifo_resync = dccg32_trigger_dio_fifo_resync, 350 349 .set_dtbclk_p_src = dccg32_set_dtbclk_p_src, 350 + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 351 + .allow_clock_gating = dccg2_allow_clock_gating, 352 + .enable_memory_low_power = dccg2_enable_memory_low_power, 353 + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ 351 354 }; 352 355 353 356 struct dccg *dccg32_create(
+20 -21
drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
··· 26 26 #include "core_types.h" 27 27 #include "resource.h" 28 28 #include "dcn35_dccg.h" 29 + #include "dcn20/dcn20_dccg.h" 29 30 30 31 #define TO_DCN_DCCG(dccg)\ 31 32 container_of(dccg, struct dcn_dccg, base) ··· 1106 1105 dccg35_set_dpstreamclk_src_new(dccg, src, inst); 1107 1106 } 1108 1107 1109 - static void dccg35_trigger_dio_fifo_resync(struct dccg *dccg) 1108 + void dccg35_trigger_dio_fifo_resync(struct dccg *dccg) 1110 1109 { 1111 1110 struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 1112 1111 uint32_t dispclk_rdivider_value = 0; ··· 1115 1114 if (dispclk_rdivider_value != 0) 1116 1115 REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value); 1117 1116 } 1117 + 1118 1118 static void dccg35_wait_for_dentist_change_done( 1119 1119 struct dccg *dccg) 1120 1120 { ··· 1153 1151 1154 1152 } 1155 1153 1156 - static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst, 1157 - int req_dppclk) 1154 + void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) 1158 1155 { 1159 1156 struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 1160 1157 ··· 1499 1498 __func__, dp_hpo_inst, (src == REFCLK) ? 0 : 1, otg_inst); 1500 1499 } 1501 1500 1502 - 1503 - static void dccg35_set_dpstreamclk_root_clock_gating( 1504 - struct dccg *dccg, 1505 - int dp_hpo_inst, 1506 - bool enable) 1501 + void dccg35_set_dpstreamclk_root_clock_gating(struct dccg *dccg, int dp_hpo_inst, bool enable) 1507 1502 { 1508 1503 struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 1509 1504 ··· 1666 1669 dccg35_set_dtbclk_dto(dccg, &dto_params); 1667 1670 } 1668 1671 1669 - static void dccg35_dpp_root_clock_control( 1670 - struct dccg *dccg, 1671 - unsigned int dpp_inst, 1672 - bool clock_on) 1672 + void dccg35_dpp_root_clock_control(struct dccg *dccg, unsigned int dpp_inst, bool clock_on) 1673 1673 { 1674 1674 struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 1675 1675 ··· 1698 1704 DC_LOG_DEBUG("%s: dpp_inst(%d) clock_on = %d\n", __func__, dpp_inst, clock_on); 1699 1705 } 1700 1706 1701 - static void dccg35_disable_symclk32_se( 1702 - struct dccg *dccg, 1703 - int hpo_se_inst) 1707 + void dccg35_disable_symclk32_se(struct dccg *dccg, int hpo_se_inst) 1704 1708 { 1705 1709 struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 1706 1710 ··· 1805 1813 REG_UPDATE(DCCG_GLOBAL_FGCG_REP_CNTL, DCCG_GLOBAL_FGCG_REP_DIS, !value); 1806 1814 } 1807 1815 1808 - static void dccg35_enable_dscclk(struct dccg *dccg, int inst) 1816 + void dccg35_enable_dscclk(struct dccg *dccg, int inst) 1809 1817 { 1810 1818 struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 1811 1819 ··· 1852 1860 udelay(10); 1853 1861 } 1854 1862 1855 - static void dccg35_disable_dscclk(struct dccg *dccg, 1856 - int inst) 1863 + void dccg35_disable_dscclk(struct dccg *dccg, int inst) 1857 1864 { 1858 1865 struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 1859 1866 ··· 1897 1906 udelay(10); 1898 1907 } 1899 1908 1900 - static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst) 1909 + void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst) 1901 1910 { 1902 1911 struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 1903 1912 ··· 2004 2013 return num_enabled_symclk_fe; 2005 2014 } 2006 2015 2007 - static void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst) 2016 + void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst) 2008 2017 { 2009 2018 uint8_t num_enabled_symclk_fe = 0; 2010 2019 struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); ··· 2412 2421 .enable_symclk_se = dccg35_enable_symclk_se_cb, 2413 2422 .disable_symclk_se = dccg35_disable_symclk_se_cb, 2414 2423 .set_dtbclk_p_src = dccg35_set_dtbclk_p_src_cb, 2424 + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 2425 + .allow_clock_gating = dccg2_allow_clock_gating, 2426 + .enable_memory_low_power = dccg2_enable_memory_low_power, 2427 + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done /* Deprecated - for backward compatibility only */ 2415 2428 }; 2416 2429 2417 2430 static const struct dccg_funcs dccg35_funcs = { ··· 2447 2452 .enable_symclk_se = dccg35_enable_symclk_se, 2448 2453 .disable_symclk_se = dccg35_disable_symclk_se, 2449 2454 .set_dtbclk_p_src = dccg35_set_dtbclk_p_src, 2455 + .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 2456 + .allow_clock_gating = dccg2_allow_clock_gating, 2457 + .enable_memory_low_power = dccg2_enable_memory_low_power, 2458 + .is_s0i3_golden_init_wa_done = dccg2_is_s0i3_golden_init_wa_done, /* Deprecated - for backward compatibility only */ 2450 2459 .dccg_root_gate_disable_control = dccg35_root_gate_disable_control, 2451 - .dccg_read_reg_state = dccg31_read_reg_state, 2460 + .dccg_read_reg_state = dccg31_read_reg_state 2452 2461 }; 2453 2462 2454 2463 struct dccg *dccg35_create(
+17
drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
··· 249 249 250 250 void dccg35_init(struct dccg *dccg); 251 251 252 + void dccg35_trigger_dio_fifo_resync(struct dccg *dccg); 253 + 254 + void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk); 255 + 252 256 void dccg35_enable_global_fgcg_rep(struct dccg *dccg, bool value); 253 257 void dccg35_root_gate_disable_control(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating); 254 258 259 + void dccg35_set_dpstreamclk_root_clock_gating(struct dccg *dccg, int dp_hpo_inst, bool enable); 260 + 261 + void dccg35_set_hdmistreamclk_root_clock_gating(struct dccg *dccg, bool enable); 262 + 263 + void dccg35_dpp_root_clock_control(struct dccg *dccg, unsigned int dpp_inst, bool clock_on); 264 + 265 + void dccg35_disable_symclk32_se(struct dccg *dccg, int hpo_se_inst); 266 + 267 + void dccg35_enable_dscclk(struct dccg *dccg, int inst); 268 + void dccg35_disable_dscclk(struct dccg *dccg, int inst); 269 + 270 + void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst); 271 + void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst); 255 272 256 273 #endif //__DCN35_DCCG_H__
+7 -10
drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
··· 27 27 #include "core_types.h" 28 28 #include "dcn401_dccg.h" 29 29 #include "dcn31/dcn31_dccg.h" 30 + #include "dcn20/dcn20_dccg.h" 30 31 31 32 /* 32 33 #include "dmub_common.h" ··· 596 595 597 596 bool enable = false; 598 597 599 - if (params->otg_inst > 3) { 600 - /* dcn401 only has 4 instances */ 601 - BREAK_TO_DEBUGGER(); 602 - return; 603 - } 604 - if (!params->refclk_hz) { 605 - BREAK_TO_DEBUGGER(); 606 - return; 607 - } 608 - 609 598 if (!dc_is_tmds_signal(params->signal)) { 610 599 uint64_t dto_integer; 611 600 uint64_t dto_phase_hz; 612 601 uint64_t dto_modulo_hz = params->refclk_hz; 613 602 614 603 enable = true; 604 + 605 + if (!params->refclk_hz) { 606 + BREAK_TO_DEBUGGER(); 607 + return; 608 + } 615 609 616 610 /* Set DTO values: 617 611 * int = target_pix_rate / reference_clock ··· 862 866 .update_dpp_dto = dccg401_update_dpp_dto, 863 867 .get_dccg_ref_freq = dccg401_get_dccg_ref_freq, 864 868 .dccg_init = dccg401_init, 869 + .allow_clock_gating = dccg2_allow_clock_gating, 865 870 .set_dpstreamclk = dccg401_set_dpstreamclk, 866 871 .enable_symclk32_se = dccg31_enable_symclk32_se, 867 872 .disable_symclk32_se = dccg31_disable_symclk32_se,
+19 -4
drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
··· 1498 1498 { 1499 1499 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); 1500 1500 1501 - REG_UPDATE(DIG_FE_CNTL, DIG_SOURCE_SELECT, tg_inst); 1501 + if (enc->id == ENGINE_ID_DACA || enc->id == ENGINE_ID_DACB) 1502 + REG_UPDATE(DAC_SOURCE_SELECT, DAC_SOURCE_SELECT, tg_inst); 1503 + else 1504 + REG_UPDATE(DIG_FE_CNTL, DIG_SOURCE_SELECT, tg_inst); 1502 1505 } 1503 1506 1504 1507 static unsigned int dig_source_otg( ··· 1510 1507 uint32_t tg_inst = 0; 1511 1508 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); 1512 1509 1513 - REG_GET(DIG_FE_CNTL, DIG_SOURCE_SELECT, &tg_inst); 1510 + if (enc->id == ENGINE_ID_DACA || enc->id == ENGINE_ID_DACB) 1511 + REG_GET(DAC_SOURCE_SELECT, DAC_SOURCE_SELECT, &tg_inst); 1512 + else 1513 + REG_GET(DIG_FE_CNTL, DIG_SOURCE_SELECT, &tg_inst); 1514 1514 1515 1515 return tg_inst; 1516 1516 } ··· 1574 1568 enc110->se_mask = se_mask; 1575 1569 } 1576 1570 1577 - static const struct stream_encoder_funcs dce110_an_str_enc_funcs = {}; 1571 + static const struct stream_encoder_funcs dce110_an_str_enc_funcs = { 1572 + .dig_connect_to_otg = dig_connect_to_otg, 1573 + .dig_source_otg = dig_source_otg, 1574 + }; 1578 1575 1579 1576 void dce110_analog_stream_encoder_construct( 1580 1577 struct dce110_stream_encoder *enc110, 1581 1578 struct dc_context *ctx, 1582 1579 struct dc_bios *bp, 1583 - enum engine_id eng_id) 1580 + enum engine_id eng_id, 1581 + const struct dce110_stream_enc_registers *regs, 1582 + const struct dce_stream_encoder_shift *se_shift, 1583 + const struct dce_stream_encoder_mask *se_mask) 1584 1584 { 1585 1585 enc110->base.funcs = &dce110_an_str_enc_funcs; 1586 1586 enc110->base.ctx = ctx; 1587 1587 enc110->base.id = eng_id; 1588 1588 enc110->base.bp = bp; 1589 + enc110->regs = regs; 1590 + enc110->se_shift = se_shift; 1591 + enc110->se_mask = se_mask; 1589 1592 }
+10 -2
drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
··· 65 65 SRI(AFMT_60958_1, DIG, id), \ 66 66 SRI(AFMT_60958_2, DIG, id), \ 67 67 SRI(DIG_FE_CNTL, DIG, id), \ 68 + SR(DAC_SOURCE_SELECT), \ 68 69 SRI(HDMI_CONTROL, DIG, id), \ 69 70 SRI(HDMI_GC, DIG, id), \ 70 71 SRI(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \ ··· 291 290 #define SE_COMMON_MASK_SH_LIST_DCE80_100(mask_sh)\ 292 291 SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\ 293 292 SE_SF(TMDS_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\ 294 - SE_SF(TMDS_CNTL, TMDS_COLOR_FORMAT, mask_sh) 293 + SE_SF(TMDS_CNTL, TMDS_COLOR_FORMAT, mask_sh),\ 294 + SE_SF(DAC_SOURCE_SELECT, DAC_SOURCE_SELECT, mask_sh) 295 295 296 296 #define SE_COMMON_MASK_SH_LIST_DCE110(mask_sh)\ 297 297 SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\ ··· 496 494 uint8_t DP_VID_N_MUL; 497 495 uint8_t DP_VID_M_DOUBLE_VALUE_EN; 498 496 uint8_t DIG_SOURCE_SELECT; 497 + uint8_t DAC_SOURCE_SELECT; 499 498 }; 500 499 501 500 struct dce_stream_encoder_mask { ··· 629 626 uint32_t DP_VID_N_MUL; 630 627 uint32_t DP_VID_M_DOUBLE_VALUE_EN; 631 628 uint32_t DIG_SOURCE_SELECT; 629 + uint32_t DAC_SOURCE_SELECT; 632 630 }; 633 631 634 632 struct dce110_stream_enc_registers { ··· 657 653 uint32_t AFMT_60958_1; 658 654 uint32_t AFMT_60958_2; 659 655 uint32_t DIG_FE_CNTL; 656 + uint32_t DAC_SOURCE_SELECT; 660 657 uint32_t DP_MSE_RATE_CNTL; 661 658 uint32_t DP_MSE_RATE_UPDATE; 662 659 uint32_t DP_PIXEL_FORMAT; ··· 717 712 struct dce110_stream_encoder *enc110, 718 713 struct dc_context *ctx, 719 714 struct dc_bios *bp, 720 - enum engine_id eng_id); 715 + enum engine_id eng_id, 716 + const struct dce110_stream_enc_registers *regs, 717 + const struct dce_stream_encoder_shift *se_shift, 718 + const struct dce_stream_encoder_mask *se_mask); 721 719 722 720 void dce110_se_audio_mute_control( 723 721 struct stream_encoder *enc, bool mute);
+10 -1
drivers/gpu/drm/amd/display/dc/dio/Makefile
··· 23 23 # 24 24 # 25 25 26 + ############################################################################### 27 + # VIRTUAL 28 + ############################################################################### 29 + DIO_VIRTUAL = virtual_link_encoder.o virtual_stream_encoder.o 30 + 31 + AMD_DAL_DIO_VIRTUAL = $(addprefix $(AMDDALPATH)/dc/dio/virtual/,$(DIO_VIRTUAL)) 32 + 33 + AMD_DISPLAY_FILES += $(AMD_DAL_DIO_VIRTUAL) 34 + 26 35 ifdef CONFIG_DRM_AMD_DC_FP 27 36 ############################################################################### 28 37 # DCN10 29 38 ############################################################################### 30 - DIO_DCN10 = dcn10_link_encoder.o dcn10_stream_encoder.o 39 + DIO_DCN10 = dcn10_link_encoder.o dcn10_stream_encoder.o dcn10_dio.o 31 40 32 41 AMD_DAL_DIO_DCN10 = $(addprefix $(AMDDALPATH)/dc/dio/dcn10/,$(DIO_DCN10)) 33 42
+47
drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_dio.c
··· 1 + // SPDX-License-Identifier: MIT 2 + // 3 + // Copyright 2025 Advanced Micro Devices, Inc. 4 + 5 + #include "dc_hw_types.h" 6 + #include "dm_services.h" 7 + #include "reg_helper.h" 8 + #include "dcn10_dio.h" 9 + 10 + #define CTX \ 11 + dio10->base.ctx 12 + #define REG(reg)\ 13 + dio10->regs->reg 14 + 15 + #undef FN 16 + #define FN(reg_name, field_name) \ 17 + dio10->shifts->field_name, dio10->masks->field_name 18 + 19 + static void dcn10_dio_mem_pwr_ctrl(struct dio *dio, bool enable_i2c_light_sleep) 20 + { 21 + struct dcn10_dio *dio10 = TO_DCN10_DIO(dio); 22 + 23 + /* power AFMT HDMI memory */ 24 + REG_WRITE(DIO_MEM_PWR_CTRL, 0); 25 + 26 + if (enable_i2c_light_sleep) 27 + REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 1); 28 + } 29 + 30 + static const struct dio_funcs dcn10_dio_funcs = { 31 + .mem_pwr_ctrl = dcn10_dio_mem_pwr_ctrl, 32 + }; 33 + 34 + void dcn10_dio_construct( 35 + struct dcn10_dio *dio10, 36 + struct dc_context *ctx, 37 + const struct dcn_dio_registers *regs, 38 + const struct dcn_dio_shift *shifts, 39 + const struct dcn_dio_mask *masks) 40 + { 41 + dio10->base.ctx = ctx; 42 + dio10->base.funcs = &dcn10_dio_funcs; 43 + 44 + dio10->regs = regs; 45 + dio10->shifts = shifts; 46 + dio10->masks = masks; 47 + }
+42
drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_dio.h
··· 1 + // SPDX-License-Identifier: MIT 2 + // 3 + // Copyright 2025 Advanced Micro Devices, Inc. 4 + 5 + #ifndef __DCN10_DIO_H__ 6 + #define __DCN10_DIO_H__ 7 + 8 + #include "dio.h" 9 + 10 + #define TO_DCN10_DIO(dio_base) \ 11 + container_of(dio_base, struct dcn10_dio, base) 12 + 13 + #define DIO_REG_LIST_DCN10()\ 14 + SR(DIO_MEM_PWR_CTRL) 15 + 16 + struct dcn_dio_registers { 17 + uint32_t DIO_MEM_PWR_CTRL; 18 + }; 19 + 20 + struct dcn_dio_shift { 21 + uint8_t I2C_LIGHT_SLEEP_FORCE; 22 + }; 23 + 24 + struct dcn_dio_mask { 25 + uint32_t I2C_LIGHT_SLEEP_FORCE; 26 + }; 27 + 28 + struct dcn10_dio { 29 + struct dio base; 30 + const struct dcn_dio_registers *regs; 31 + const struct dcn_dio_shift *shifts; 32 + const struct dcn_dio_mask *masks; 33 + }; 34 + 35 + void dcn10_dio_construct( 36 + struct dcn10_dio *dio10, 37 + struct dc_context *ctx, 38 + const struct dcn_dio_registers *regs, 39 + const struct dcn_dio_shift *shifts, 40 + const struct dcn_dio_mask *masks); 41 + 42 + #endif /* __DCN10_DIO_H__ */
+1 -1
drivers/gpu/drm/amd/display/dc/dm_helpers.h
··· 209 209 struct dc_sink *sink); 210 210 void dm_helpers_override_panel_settings( 211 211 struct dc_context *ctx, 212 - struct dc_panel_config *config); 212 + struct dc_link *link); 213 213 int dm_helper_dmub_aux_transfer_sync( 214 214 struct dc_context *ctx, 215 215 const struct dc_link *link,
+2 -2
drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
··· 164 164 }, 165 165 }, 166 166 .num_states = 5, 167 - .sr_exit_time_us = 28.0, 168 - .sr_enter_plus_exit_time_us = 30.0, 167 + .sr_exit_time_us = 31.0, 168 + .sr_enter_plus_exit_time_us = 33.0, 169 169 .sr_exit_z8_time_us = 250.0, 170 170 .sr_enter_plus_exit_z8_time_us = 350.0, 171 171 .fclk_change_latency_us = 24.0,
+1 -1
drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c
··· 655 655 dml_print("DML: soc_bbox: refclk_mhz = %f\n", soc->refclk_mhz); 656 656 dml_print("DML: soc_bbox: amclk_mhz = %f\n", soc->amclk_mhz); 657 657 658 - dml_print("DML: soc_bbox: max_outstanding_reqs = %f\n", soc->max_outstanding_reqs); 658 + dml_print("DML: soc_bbox: max_outstanding_reqs = %d\n", soc->max_outstanding_reqs); 659 659 dml_print("DML: soc_bbox: pct_ideal_sdp_bw_after_urgent = %f\n", soc->pct_ideal_sdp_bw_after_urgent); 660 660 dml_print("DML: soc_bbox: pct_ideal_fabric_bw_after_urgent = %f\n", soc->pct_ideal_fabric_bw_after_urgent); 661 661 dml_print("DML: soc_bbox: pct_ideal_dram_bw_after_urgent_pixel_only = %f\n", soc->pct_ideal_dram_bw_after_urgent_pixel_only);
+3
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h
··· 115 115 unsigned int channel_width_bytes; 116 116 unsigned int channel_count; 117 117 unsigned int transactions_per_clock; 118 + bool alt_clock_bw_conversion; 118 119 }; 119 120 121 + #define ENABLE_WCK 120 122 struct dml2_soc_state_table { 123 + struct dml2_clk_table wck_ratio; 121 124 struct dml2_clk_table uclk; 122 125 struct dml2_clk_table fclk; 123 126 struct dml2_clk_table dcfclk;
+21 -5
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
··· 7077 7077 } 7078 7078 } 7079 7079 7080 - static double uclk_khz_to_dram_bw_mbps(unsigned long uclk_khz, const struct dml2_dram_params *dram_config) 7080 + static double uclk_khz_to_dram_bw_mbps(unsigned long uclk_khz, const struct dml2_dram_params *dram_config, const struct dml2_mcg_dram_bw_to_min_clk_table *dram_bw_table) 7081 7081 { 7082 7082 double bw_mbps = 0; 7083 - bw_mbps = ((double)uclk_khz * dram_config->channel_count * dram_config->channel_width_bytes * dram_config->transactions_per_clock) / 1000.0; 7083 + unsigned int i; 7084 + 7085 + if (!dram_config->alt_clock_bw_conversion) 7086 + bw_mbps = ((double)uclk_khz * dram_config->channel_count * dram_config->channel_width_bytes * dram_config->transactions_per_clock) / 1000.0; 7087 + else 7088 + for (i = 0; i < dram_bw_table->num_entries; i++) 7089 + if (dram_bw_table->entries[i].min_uclk_khz >= uclk_khz) { 7090 + bw_mbps = (double)dram_bw_table->entries[i].pre_derate_dram_bw_kbps / 1000.0; 7091 + break; 7092 + } 7093 + 7094 + DML_ASSERT(bw_mbps > 0); 7084 7095 7085 7096 return bw_mbps; 7086 7097 } ··· 7975 7964 mode_lib->ms.max_dispclk_freq_mhz = (double)min_clk_table->max_ss_clocks_khz.dispclk / 1000; 7976 7965 mode_lib->ms.max_dscclk_freq_mhz = (double)min_clk_table->max_clocks_khz.dscclk / 1000; 7977 7966 mode_lib->ms.max_dppclk_freq_mhz = (double)min_clk_table->max_ss_clocks_khz.dppclk / 1000; 7978 - mode_lib->ms.uclk_freq_mhz = dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config); 7967 + mode_lib->ms.uclk_freq_mhz = (double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_uclk_khz / 1000.0; 7968 + if (!mode_lib->ms.uclk_freq_mhz) 7969 + mode_lib->ms.uclk_freq_mhz = dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config); 7979 7970 mode_lib->ms.dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps / 1000); 7980 7971 mode_lib->ms.max_dram_bw_mbps = ((double)min_clk_table->dram_bw_table.entries[min_clk_table->dram_bw_table.num_entries - 1].pre_derate_dram_bw_kbps / 1000); 7981 7972 mode_lib->ms.qos_param_index = get_qos_param_index((unsigned int) (mode_lib->ms.uclk_freq_mhz * 1000.0), mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params); ··· 10420 10407 10421 10408 mode_lib->mp.Dcfclk = programming->min_clocks.dcn4x.active.dcfclk_khz / 1000.0; 10422 10409 mode_lib->mp.FabricClock = programming->min_clocks.dcn4x.active.fclk_khz / 1000.0; 10423 - mode_lib->mp.dram_bw_mbps = uclk_khz_to_dram_bw_mbps(programming->min_clocks.dcn4x.active.uclk_khz, &mode_lib->soc.clk_table.dram_config); 10410 + mode_lib->mp.dram_bw_mbps = uclk_khz_to_dram_bw_mbps(programming->min_clocks.dcn4x.active.uclk_khz, &mode_lib->soc.clk_table.dram_config, &min_clk_table->dram_bw_table); 10424 10411 mode_lib->mp.uclk_freq_mhz = programming->min_clocks.dcn4x.active.uclk_khz / 1000.0; 10425 10412 mode_lib->mp.GlobalDPPCLK = programming->min_clocks.dcn4x.dpprefclk_khz / 1000.0; 10426 10413 s->SOCCLK = (double)programming->min_clocks.dcn4x.socclk_khz / 1000; ··· 10498 10485 DML_LOG_VERBOSE("DML::%s: SOCCLK = %f\n", __func__, s->SOCCLK); 10499 10486 DML_LOG_VERBOSE("DML::%s: min_clk_index = %0d\n", __func__, in_out_params->min_clk_index); 10500 10487 DML_LOG_VERBOSE("DML::%s: min_clk_table min_fclk_khz = %ld\n", __func__, min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_fclk_khz); 10501 - DML_LOG_VERBOSE("DML::%s: min_clk_table uclk_mhz = %f\n", __func__, dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config)); 10488 + if (min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_uclk_khz) 10489 + DML_LOG_VERBOSE("DML::%s: min_clk_table uclk_mhz = %f\n", __func__, min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].min_uclk_khz / 1000.0); 10490 + else 10491 + DML_LOG_VERBOSE("DML::%s: min_clk_table uclk_mhz = %f\n", __func__, dram_bw_kbps_to_uclk_mhz(min_clk_table->dram_bw_table.entries[in_out_params->min_clk_index].pre_derate_dram_bw_kbps, &mode_lib->soc.clk_table.dram_config)); 10502 10492 for (k = 0; k < mode_lib->mp.num_active_pipes; ++k) { 10503 10493 DML_LOG_VERBOSE("DML::%s: pipe=%d is in plane=%d\n", __func__, k, mode_lib->mp.pipe_plane[k]); 10504 10494 DML_LOG_VERBOSE("DML::%s: Per-plane DPPPerSurface[%0d] = %d\n", __func__, k, mode_lib->mp.NoOfDPP[k]);
+39 -20
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
··· 7 7 #include "dml_top_types.h" 8 8 #include "lib_float_math.h" 9 9 10 - static double dram_bw_kbps_to_uclk_khz(unsigned long long bandwidth_kbps, const struct dml2_dram_params *dram_config) 10 + static double dram_bw_kbps_to_uclk_khz(unsigned long long bandwidth_kbps, const struct dml2_dram_params *dram_config, struct dml2_mcg_dram_bw_to_min_clk_table *dram_bw_table) 11 11 { 12 12 double uclk_khz = 0; 13 - unsigned long uclk_mbytes_per_tick = 0; 14 13 15 - uclk_mbytes_per_tick = dram_config->channel_count * dram_config->channel_width_bytes * dram_config->transactions_per_clock; 14 + if (!dram_config->alt_clock_bw_conversion) { 15 + unsigned long uclk_bytes_per_tick = 0; 16 16 17 - uclk_khz = (double)bandwidth_kbps / uclk_mbytes_per_tick; 17 + uclk_bytes_per_tick = dram_config->channel_count * dram_config->channel_width_bytes * dram_config->transactions_per_clock; 18 + uclk_khz = (double)bandwidth_kbps / uclk_bytes_per_tick; 19 + } else { 20 + unsigned int i; 21 + /* For lpddr5 bytes per tick changes with mpstate, use table to find uclk*/ 22 + for (i = 0; i < dram_bw_table->num_entries; i++) 23 + if (dram_bw_table->entries[i].pre_derate_dram_bw_kbps >= bandwidth_kbps) { 24 + uclk_khz = dram_bw_table->entries[i].min_uclk_khz; 25 + break; 26 + } 27 + } 18 28 19 29 return uclk_khz; 20 30 } ··· 44 34 *dcfclk = in_out->min_clk_table->dram_bw_table.entries[min_clock_index_for_latency].min_dcfclk_khz; 45 35 *fclk = in_out->min_clk_table->dram_bw_table.entries[min_clock_index_for_latency].min_fclk_khz; 46 36 *uclk = dram_bw_kbps_to_uclk_khz(in_out->min_clk_table->dram_bw_table.entries[min_clock_index_for_latency].pre_derate_dram_bw_kbps, 47 - &in_out->soc_bb->clk_table.dram_config); 37 + &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table); 48 38 } 49 39 50 40 static unsigned long dml_round_up(double a) ··· 63 53 double min_uclk_latency, min_fclk_latency, min_dcfclk_latency; 64 54 const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result; 65 55 66 - min_uclk_avg = dram_bw_kbps_to_uclk_khz(mode_support_result->global.active.average_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config); 67 - min_uclk_avg = (double)min_uclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.dram_derate_percent_pixel / 100); 56 + min_uclk_avg = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.active.average_bw_dram_kbps 57 + / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.dram_derate_percent_pixel / 100)), 58 + &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table); 68 59 69 - min_uclk_urgent = dram_bw_kbps_to_uclk_khz(mode_support_result->global.active.urgent_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config); 70 60 if (in_out->display_cfg->display_config.hostvm_enable) 71 - min_uclk_urgent = (double)min_uclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel_and_vm / 100); 61 + min_uclk_urgent = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.active.urgent_bw_dram_kbps 62 + / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel_and_vm / 100)), 63 + &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table); 72 64 else 73 - min_uclk_urgent = (double)min_uclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel / 100); 65 + min_uclk_urgent = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.active.urgent_bw_dram_kbps 66 + / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel / 100)), 67 + &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table); 74 68 75 69 min_uclk_bw = min_uclk_urgent > min_uclk_avg ? min_uclk_urgent : min_uclk_avg; 76 70 ··· 111 97 const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result; 112 98 113 99 /* assumes DF throttling is enabled */ 114 - min_uclk_avg = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.average_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config); 115 - min_uclk_avg = (double)min_uclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.dcn_mall_prefetch_average.dram_derate_percent_pixel / 100); 100 + min_uclk_avg = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.svp_prefetch.average_bw_dram_kbps 101 + / ((double)in_out->soc_bb->qos_parameters.derate_table.dcn_mall_prefetch_average.dram_derate_percent_pixel / 100)), 102 + &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table); 116 103 117 - min_uclk_urgent = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.urgent_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config); 118 - min_uclk_urgent = (double)min_uclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.dcn_mall_prefetch_urgent.dram_derate_percent_pixel / 100); 104 + min_uclk_urgent = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.svp_prefetch.urgent_bw_dram_kbps 105 + / ((double)in_out->soc_bb->qos_parameters.derate_table.dcn_mall_prefetch_urgent.dram_derate_percent_pixel / 100)), 106 + &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table); 119 107 120 108 min_uclk_bw = min_uclk_urgent > min_uclk_avg ? min_uclk_urgent : min_uclk_avg; 121 109 ··· 144 128 in_out->programming->min_clocks.dcn4x.svp_prefetch.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency); 145 129 146 130 /* assumes DF throttling is disabled */ 147 - min_uclk_avg = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.average_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config); 148 - min_uclk_avg = (double)min_uclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.dram_derate_percent_pixel / 100); 131 + min_uclk_avg = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.svp_prefetch.average_bw_dram_kbps 132 + / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.dram_derate_percent_pixel / 100)), 133 + &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table); 149 134 150 - min_uclk_urgent = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.urgent_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config); 151 - min_uclk_urgent = (double)min_uclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel / 100); 135 + min_uclk_urgent = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.svp_prefetch.urgent_bw_dram_kbps 136 + / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel / 100)), 137 + &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table); 152 138 153 139 min_uclk_bw = min_uclk_urgent > min_uclk_avg ? min_uclk_urgent : min_uclk_avg; 154 140 ··· 185 167 double min_uclk_latency, min_fclk_latency, min_dcfclk_latency; 186 168 const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result; 187 169 188 - min_uclk_avg = dram_bw_kbps_to_uclk_khz(mode_support_result->global.active.average_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config); 189 - min_uclk_avg = (double)min_uclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_idle_average.dram_derate_percent_pixel / 100); 170 + min_uclk_avg = dram_bw_kbps_to_uclk_khz((unsigned long long)(mode_support_result->global.active.average_bw_dram_kbps 171 + / ((double)in_out->soc_bb->qos_parameters.derate_table.system_idle_average.dram_derate_percent_pixel / 100)), 172 + &in_out->soc_bb->clk_table.dram_config, &in_out->min_clk_table->dram_bw_table); 190 173 191 174 min_fclk_avg = (double)mode_support_result->global.active.average_bw_sdp_kbps / in_out->soc_bb->fabric_datapath_to_dcn_data_return_bytes; 192 175 min_fclk_avg = (double)min_fclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_idle_average.fclk_derate_percent / 100);
+1
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h
··· 16 16 17 17 struct dram_bw_to_min_clk_table_entry { 18 18 unsigned long long pre_derate_dram_bw_kbps; 19 + unsigned long min_uclk_khz; 19 20 unsigned long min_fclk_khz; 20 21 unsigned long min_dcfclk_khz; 21 22 };
+18 -3
drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
··· 376 376 377 377 tbl_entry.color_space = input_color_space; 378 378 379 - if (color_space >= COLOR_SPACE_YCBCR601) 380 - select = INPUT_CSC_SELECT_ICSC; 381 - else 379 + if (dpp3_should_bypass_post_csc_for_colorspace(color_space)) 382 380 select = INPUT_CSC_SELECT_BYPASS; 381 + else 382 + select = INPUT_CSC_SELECT_ICSC; 383 383 384 384 dpp3_program_post_csc(dpp_base, color_space, select, 385 385 &tbl_entry); ··· 1541 1541 return true; 1542 1542 } 1543 1543 1544 + bool dpp3_should_bypass_post_csc_for_colorspace(enum dc_color_space dc_color_space) 1545 + { 1546 + switch (dc_color_space) { 1547 + case COLOR_SPACE_UNKNOWN: 1548 + case COLOR_SPACE_SRGB: 1549 + case COLOR_SPACE_XR_RGB: 1550 + case COLOR_SPACE_SRGB_LIMITED: 1551 + case COLOR_SPACE_MSREF_SCRGB: 1552 + case COLOR_SPACE_2020_RGB_FULLRANGE: 1553 + case COLOR_SPACE_2020_RGB_LIMITEDRANGE: 1554 + return true; 1555 + default: 1556 + return false; 1557 + } 1558 + }
+4
drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
··· 644 644 645 645 void dpp3_cm_get_gamut_remap(struct dpp *dpp_base, 646 646 struct dpp_grph_csc_adjustment *adjust); 647 + 648 + bool dpp3_should_bypass_post_csc_for_colorspace( 649 + enum dc_color_space dc_color_space); 650 + 647 651 #endif /* __DC_HWSS_DCN30_H__ */
+3 -3
drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
··· 206 206 207 207 tbl_entry.color_space = input_color_space; 208 208 209 - if (color_space >= COLOR_SPACE_YCBCR601) 210 - select = INPUT_CSC_SELECT_ICSC; 211 - else 209 + if (dpp3_should_bypass_post_csc_for_colorspace(color_space)) 212 210 select = INPUT_CSC_SELECT_BYPASS; 211 + else 212 + select = INPUT_CSC_SELECT_ICSC; 213 213 214 214 dpp3_program_post_csc(dpp_base, color_space, select, 215 215 &tbl_entry);
+1
drivers/gpu/drm/amd/display/dc/dsc/dsc.h
··· 115 115 void (*dsc_disconnect)(struct display_stream_compressor *dsc); 116 116 void (*dsc_wait_disconnect_pending_clear)(struct display_stream_compressor *dsc); 117 117 void (*dsc_get_single_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz); 118 + void (*set_fgcg)(struct display_stream_compressor *dsc, bool enable); 118 119 }; 119 120 120 121 #endif
+5 -5
drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
··· 41 41 #define FN(reg_name, field_name) \ 42 42 hubbub2->shifts->field_name, hubbub2->masks->field_name 43 43 44 - static void dcn401_init_crb(struct hubbub *hubbub) 44 + void dcn401_init_crb(struct hubbub *hubbub) 45 45 { 46 46 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 47 47 ··· 1110 1110 return true; 1111 1111 } 1112 1112 1113 - static void dcn401_program_det_segments(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg) 1113 + void dcn401_program_det_segments(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg) 1114 1114 { 1115 1115 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 1116 1116 ··· 1147 1147 } 1148 1148 } 1149 1149 1150 - static void dcn401_program_compbuf_segments(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase) 1150 + void dcn401_program_compbuf_segments(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase) 1151 1151 { 1152 1152 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 1153 1153 ··· 1170 1170 } 1171 1171 } 1172 1172 1173 - static void dcn401_wait_for_det_update(struct hubbub *hubbub, int hubp_inst) 1173 + void dcn401_wait_for_det_update(struct hubbub *hubbub, int hubp_inst) 1174 1174 { 1175 1175 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 1176 1176 ··· 1192 1192 } 1193 1193 } 1194 1194 1195 - static bool dcn401_program_arbiter(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower) 1195 + bool dcn401_program_arbiter(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower) 1196 1196 { 1197 1197 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 1198 1198
+10
drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h
··· 194 194 const struct dc_dcc_surface_param *input, 195 195 struct dc_surface_dcc_cap *output); 196 196 197 + bool dcn401_program_arbiter( 198 + struct hubbub *hubbub, 199 + struct dml2_display_arb_regs *arb_regs, 200 + bool safe_to_lower); 201 + 197 202 void hubbub401_construct(struct dcn20_hubbub *hubbub2, 198 203 struct dc_context *ctx, 199 204 const struct dcn_hubbub_registers *hubbub_regs, ··· 207 202 int det_size_kb, 208 203 int pixel_chunk_size_kb, 209 204 int config_return_buffer_size_kb); 205 + 206 + void dcn401_program_det_segments(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg); 207 + void dcn401_program_compbuf_segments(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase); 208 + void dcn401_wait_for_det_update(struct hubbub *hubbub, int hubp_inst); 209 + void dcn401_init_crb(struct hubbub *hubbub); 210 210 211 211 #endif
+13 -46
drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
··· 660 660 } 661 661 } 662 662 663 - static void 664 - dce110_dac_encoder_control(struct pipe_ctx *pipe_ctx, bool enable) 665 - { 666 - struct dc_link *link = pipe_ctx->stream->link; 667 - struct dc_bios *bios = link->ctx->dc_bios; 668 - struct bp_encoder_control encoder_control = {0}; 669 - 670 - encoder_control.action = enable ? ENCODER_CONTROL_ENABLE : ENCODER_CONTROL_DISABLE; 671 - encoder_control.engine_id = link->link_enc->analog_engine; 672 - encoder_control.pixel_clock = pipe_ctx->stream->timing.pix_clk_100hz / 10; 673 - 674 - bios->funcs->encoder_control(bios, &encoder_control); 675 - } 676 - 677 663 void dce110_enable_stream(struct pipe_ctx *pipe_ctx) 678 664 { 679 665 enum dc_lane_count lane_count = ··· 690 704 691 705 tg->funcs->set_early_control(tg, early_control); 692 706 693 - if (dc_is_rgb_signal(pipe_ctx->stream->signal)) 694 - dce110_dac_encoder_control(pipe_ctx, true); 695 707 } 696 708 697 709 static enum bp_result link_transmitter_control( ··· 1183 1199 dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst, 1184 1200 link_enc->transmitter - TRANSMITTER_UNIPHY_A); 1185 1201 } 1186 - 1187 - if (dc_is_rgb_signal(pipe_ctx->stream->signal)) 1188 - dce110_dac_encoder_control(pipe_ctx, false); 1189 1202 } 1190 1203 1191 1204 void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, ··· 1565 1584 return DC_OK; 1566 1585 } 1567 1586 1568 - static void 1569 - dce110_select_crtc_source(struct pipe_ctx *pipe_ctx) 1570 - { 1571 - struct dc_link *link = pipe_ctx->stream->link; 1572 - struct dc_bios *bios = link->ctx->dc_bios; 1573 - struct bp_crtc_source_select crtc_source_select = {0}; 1574 - enum engine_id engine_id = link->link_enc->preferred_engine; 1575 - 1576 - if (dc_is_rgb_signal(pipe_ctx->stream->signal)) 1577 - engine_id = link->link_enc->analog_engine; 1578 - 1579 - crtc_source_select.controller_id = CONTROLLER_ID_D0 + pipe_ctx->stream_res.tg->inst; 1580 - crtc_source_select.color_depth = pipe_ctx->stream->timing.display_color_depth; 1581 - crtc_source_select.engine_id = engine_id; 1582 - crtc_source_select.sink_signal = pipe_ctx->stream->signal; 1583 - 1584 - bios->funcs->select_crtc_source(bios, &crtc_source_select); 1585 - } 1586 - 1587 1587 enum dc_status dce110_apply_single_controller_ctx_to_hw( 1588 1588 struct pipe_ctx *pipe_ctx, 1589 1589 struct dc_state *context, ··· 1582 1620 1583 1621 if (hws->funcs.disable_stream_gating) { 1584 1622 hws->funcs.disable_stream_gating(dc, pipe_ctx); 1585 - } 1586 - 1587 - if (pipe_ctx->stream->signal == SIGNAL_TYPE_RGB) { 1588 - dce110_select_crtc_source(pipe_ctx); 1589 1623 } 1590 1624 1591 1625 if (pipe_ctx->stream_res.audio != NULL) { ··· 1663 1705 pipe_ctx->stream_res.tg->funcs->set_static_screen_control( 1664 1706 pipe_ctx->stream_res.tg, event_triggers, 2); 1665 1707 1666 - if (!dc_is_virtual_signal(pipe_ctx->stream->signal) && 1667 - !dc_is_rgb_signal(pipe_ctx->stream->signal)) 1708 + if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) 1668 1709 pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg( 1669 1710 pipe_ctx->stream_res.stream_enc, 1670 1711 pipe_ctx->stream_res.tg->inst); ··· 1921 1964 1922 1965 get_edp_streams(context, edp_streams, &edp_stream_num); 1923 1966 1924 - /* Check fastboot support, disable on DCE 6-8 because of blank screens */ 1925 - if (edp_num && edp_stream_num && dc->ctx->dce_version < DCE_VERSION_10_0) { 1967 + /* Check fastboot support, disable on DCE 6-8-10 because of blank screens */ 1968 + if (edp_num && edp_stream_num && dc->ctx->dce_version > DCE_VERSION_10_0) { 1926 1969 for (i = 0; i < edp_num; i++) { 1927 1970 edp_link = edp_links[i]; 1928 1971 if (edp_link != edp_streams[0]->link) ··· 3261 3304 link->phy_state.symclk_state = SYMCLK_ON_TX_ON; 3262 3305 } 3263 3306 3307 + static void dce110_enable_analog_link_output( 3308 + struct dc_link *link, 3309 + uint32_t pix_clk_100hz) 3310 + { 3311 + link->link_enc->funcs->enable_analog_output( 3312 + link->link_enc, 3313 + pix_clk_100hz); 3314 + } 3315 + 3264 3316 void dce110_enable_dp_link_output( 3265 3317 struct dc_link *link, 3266 3318 const struct link_resource *link_res, ··· 3407 3441 .enable_lvds_link_output = dce110_enable_lvds_link_output, 3408 3442 .enable_tmds_link_output = dce110_enable_tmds_link_output, 3409 3443 .enable_dp_link_output = dce110_enable_dp_link_output, 3444 + .enable_analog_link_output = dce110_enable_analog_link_output, 3410 3445 .disable_link_output = dce110_disable_link_output, 3411 3446 }; 3412 3447
+5 -4
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
··· 50 50 #include "link_hwss.h" 51 51 #include "dpcd_defs.h" 52 52 #include "dsc.h" 53 + #include "dio/dcn10/dcn10_dio.h" 53 54 #include "dce/dmub_psr.h" 54 55 #include "dc_dmub_srv.h" 55 56 #include "dce/dmub_hw_lock_mgr.h" ··· 1882 1881 1883 1882 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ 1884 1883 if (!is_optimized_init_done) 1885 - REG_WRITE(DIO_MEM_PWR_CTRL, 0); 1884 + if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl) 1885 + dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, false); 1886 1886 1887 1887 if (!dc->debug.disable_clock_gate) { 1888 1888 /* enable all DCN clock gating */ 1889 - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 1890 - 1891 - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 1889 + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) 1890 + dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); 1892 1891 1893 1892 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 1894 1893 }
+9 -21
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
··· 357 357 358 358 void dcn20_dccg_init(struct dce_hwseq *hws) 359 359 { 360 - /* 361 - * set MICROSECOND_TIME_BASE_DIV 362 - * 100Mhz refclk -> 0x120264 363 - * 27Mhz refclk -> 0x12021b 364 - * 48Mhz refclk -> 0x120230 365 - * 366 - */ 367 - REG_WRITE(MICROSECOND_TIME_BASE_DIV, 0x120264); 360 + struct dc *dc = hws->ctx->dc; 368 361 369 - /* 370 - * set MILLISECOND_TIME_BASE_DIV 371 - * 100Mhz refclk -> 0x1186a0 372 - * 27Mhz refclk -> 0x106978 373 - * 48Mhz refclk -> 0x10bb80 374 - * 375 - */ 376 - REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x1186a0); 377 - 378 - /* This value is dependent on the hardware pipeline delay so set once per SOC */ 379 - REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0xe01003c); 362 + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->dccg_init) 363 + dc->res_pool->dccg->funcs->dccg_init(dc->res_pool->dccg); 380 364 } 381 365 382 366 void dcn20_disable_vga( ··· 3139 3155 REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); 3140 3156 3141 3157 dcn10_hubbub_global_timer_enable(dc->res_pool->hubbub, true, 2); 3142 - if (REG(REFCLK_CNTL)) 3143 - REG_WRITE(REFCLK_CNTL, 0); 3158 + 3159 + if (hws->funcs.dccg_init) 3160 + hws->funcs.dccg_init(hws); 3161 + 3162 + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->refclk_setup) 3163 + dc->res_pool->dccg->funcs->refclk_setup(dc->res_pool->dccg); 3144 3164 // 3145 3165 3146 3166
+6 -4
drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
··· 40 40 #include "clk_mgr.h" 41 41 #include "reg_helper.h" 42 42 #include "dcn10/dcn10_hubbub.h" 43 + #include "dio/dcn10/dcn10_dio.h" 44 + 43 45 44 46 #define CTX \ 45 47 hws->ctx ··· 362 360 } 363 361 364 362 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ 365 - REG_WRITE(DIO_MEM_PWR_CTRL, 0); 363 + if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl) 364 + dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, false); 366 365 367 366 if (!dc->debug.disable_clock_gate) { 368 367 /* enable all DCN clock gating */ 369 - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 370 - 371 - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 368 + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) 369 + dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); 372 370 373 371 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 374 372 }
+4 -5
drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
··· 33 33 #include "vmid.h" 34 34 #include "reg_helper.h" 35 35 #include "hw/clk_mgr.h" 36 + #include "hw/dccg.h" 36 37 #include "dc_dmub_srv.h" 37 38 #include "abm.h" 38 39 #include "link_service.h" ··· 88 87 89 88 bool dcn21_s0i3_golden_init_wa(struct dc *dc) 90 89 { 91 - struct dce_hwseq *hws = dc->hwseq; 92 - uint32_t value = 0; 90 + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->is_s0i3_golden_init_wa_done) 91 + return !dc->res_pool->dccg->funcs->is_s0i3_golden_init_wa_done(dc->res_pool->dccg); 93 92 94 - value = REG_READ(MICROSECOND_TIME_BASE_DIV); 95 - 96 - return value != 0x00120464; 93 + return false; 97 94 } 98 95 99 96 void dcn21_exit_optimized_pwr_state(
+5 -4
drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
··· 53 53 #include "dcn30/dcn30_resource.h" 54 54 #include "link_service.h" 55 55 #include "dc_state_priv.h" 56 + #include "dio/dcn10/dcn10_dio.h" 56 57 57 58 #define TO_DCN_DCCG(dccg)\ 58 59 container_of(dccg, struct dcn_dccg, base) ··· 796 795 } 797 796 798 797 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ 799 - REG_WRITE(DIO_MEM_PWR_CTRL, 0); 798 + if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl) 799 + dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, false); 800 800 801 801 if (!dc->debug.disable_clock_gate) { 802 802 /* enable all DCN clock gating */ 803 - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 804 - 805 - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 803 + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) 804 + dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); 806 805 807 806 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 808 807 }
+6 -9
drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
··· 53 53 #include "dcn30/dcn30_vpg.h" 54 54 #include "dce/dce_i2c_hw.h" 55 55 #include "dce/dmub_abm_lcd.h" 56 + #include "dio/dcn10/dcn10_dio.h" 56 57 57 58 #define DC_LOGGER_INIT(logger) 58 59 ··· 238 237 abms[i]->funcs->abm_init(abms[i], backlight, user_level); 239 238 } 240 239 241 - /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ 242 - REG_WRITE(DIO_MEM_PWR_CTRL, 0); 243 - 244 - // Set i2c to light sleep until engine is setup 245 - if (dc->debug.enable_mem_low_power.bits.i2c) 246 - REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 1); 240 + /* Power on DIO memory (AFMT HDMI) and set I2C to light sleep */ 241 + if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl) 242 + dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, dc->debug.enable_mem_low_power.bits.i2c); 247 243 248 244 if (hws->funcs.setup_hpo_hw_control) 249 245 hws->funcs.setup_hpo_hw_control(hws, false); 250 246 251 247 if (!dc->debug.disable_clock_gate) { 252 248 /* enable all DCN clock gating */ 253 - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 254 - 255 - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 249 + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) 250 + dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); 256 251 257 252 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 258 253 }
+8 -7
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
··· 52 52 #include "link_service.h" 53 53 #include "../dcn20/dcn20_hwseq.h" 54 54 #include "dc_state_priv.h" 55 + #include "dio/dcn10/dcn10_dio.h" 55 56 56 57 #define DC_LOGGER_INIT(logger) 57 58 ··· 486 485 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 487 486 int mpcc_id = pipe_ctx->plane_res.hubp->inst; 488 487 struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; 489 - bool result = true; 488 + bool rval, result; 490 489 const struct pwl_params *lut_params = NULL; 491 490 492 491 // 1D LUT ··· 509 508 lut_params = &plane_state->in_shaper_func.pwl; 510 509 else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { 511 510 // TODO: dpp_base replace 512 - cm3_helper_translate_curve_to_hw_format(plane_state->ctx, 511 + rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx, 513 512 &plane_state->in_shaper_func, 514 513 &dpp_base->shaper_params, true); 515 - lut_params = &dpp_base->shaper_params; 514 + lut_params = rval ? &dpp_base->shaper_params : NULL; 516 515 } 517 516 518 517 mpc->funcs->program_shaper(mpc, lut_params, mpcc_id); ··· 958 957 } 959 958 960 959 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ 961 - REG_WRITE(DIO_MEM_PWR_CTRL, 0); 960 + if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl) 961 + dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, false); 962 962 963 963 if (!dc->debug.disable_clock_gate) { 964 964 /* enable all DCN clock gating */ 965 - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 966 - 967 - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 965 + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) 966 + dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); 968 967 969 968 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 970 969 }
+6 -7
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
··· 53 53 #include "dcn30/dcn30_vpg.h" 54 54 #include "dce/dce_i2c_hw.h" 55 55 #include "dsc.h" 56 + #include "dio/dcn10/dcn10_dio.h" 56 57 #include "dcn20/dcn20_optc.h" 57 58 #include "dcn30/dcn30_cm_common.h" 58 59 #include "dcn31/dcn31_hwseq.h" ··· 273 272 } 274 273 } 275 274 276 - /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ 277 - REG_WRITE(DIO_MEM_PWR_CTRL, 0); 278 - 279 - // Set i2c to light sleep until engine is setup 280 - if (dc->debug.enable_mem_low_power.bits.i2c) 281 - REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 0); 275 + /* Power on DIO memory (AFMT HDMI) and optionally disable I2C light sleep */ 276 + if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl) 277 + dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, !dc->debug.enable_mem_low_power.bits.i2c); 282 278 283 279 if (hws->funcs.setup_hpo_hw_control) 284 280 hws->funcs.setup_hpo_hw_control(hws, false); ··· 286 288 } 287 289 288 290 if (dc->debug.disable_mem_low_power) { 289 - REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 1); 291 + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->enable_memory_low_power) 292 + dc->res_pool->dccg->funcs->enable_memory_low_power(dc->res_pool->dccg, false); 290 293 } 291 294 if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) 292 295 dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
+5 -4
drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
··· 39 39 #include "dc_state_priv.h" 40 40 #include "link_enc_cfg.h" 41 41 #include "../hw_sequencer.h" 42 + #include "dio/dcn10/dcn10_dio.h" 42 43 43 44 #define DC_LOGGER_INIT(logger) 44 45 ··· 321 320 } 322 321 323 322 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ 324 - REG_WRITE(DIO_MEM_PWR_CTRL, 0); 323 + if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl) 324 + dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, false); 325 325 326 326 if (!dc->debug.disable_clock_gate) { 327 327 /* enable all DCN clock gating */ 328 - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 329 - 330 - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 328 + if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating) 329 + dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true); 331 330 332 331 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 333 332 }
+2
drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
··· 1184 1184 const struct link_resource *link_res, 1185 1185 enum clock_source_id clock_source, 1186 1186 uint32_t pixel_clock); 1187 + void (*enable_analog_link_output)(struct dc_link *link, 1188 + uint32_t pixel_clock); 1187 1189 void (*disable_link_output)(struct dc_link *link, 1188 1190 const struct link_resource *link_res, 1189 1191 enum signal_type signal);
+2
drivers/gpu/drm/amd/display/dc/inc/core_types.h
··· 35 35 #include "hubp.h" 36 36 #include "mpc.h" 37 37 #include "dwb.h" 38 + #include "hw/dio.h" 38 39 #include "mcif_wb.h" 39 40 #include "panel_cntl.h" 40 41 #include "dmub/inc/dmub_cmd.h" ··· 251 250 struct timing_generator *timing_generators[MAX_PIPES]; 252 251 struct stream_encoder *stream_enc[MAX_PIPES * 2]; 253 252 struct hubbub *hubbub; 253 + struct dio *dio; 254 254 struct mpc *mpc; 255 255 struct pp_smu_funcs *pp_smu; 256 256 struct dce_aux *engines[MAX_PIPES];
+4
drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
··· 224 224 void (*otg_drop_pixel)(struct dccg *dccg, 225 225 uint32_t otg_inst); 226 226 void (*dccg_init)(struct dccg *dccg); 227 + void (*refclk_setup)(struct dccg *dccg); /* Deprecated - for backward compatibility only */ 228 + void (*allow_clock_gating)(struct dccg *dccg, bool allow); 229 + void (*enable_memory_low_power)(struct dccg *dccg, bool enable); 230 + bool (*is_s0i3_golden_init_wa_done)(struct dccg *dccg); 227 231 void (*set_dpstreamclk_root_clock_gating)( 228 232 struct dccg *dccg, 229 233 int dp_hpo_inst,
+22
drivers/gpu/drm/amd/display/dc/inc/hw/dio.h
··· 1 + // SPDX-License-Identifier: MIT 2 + // 3 + // Copyright 2025 Advanced Micro Devices, Inc. 4 + 5 + #ifndef __DC_DIO_H__ 6 + #define __DC_DIO_H__ 7 + 8 + #include "dc_types.h" 9 + 10 + struct dc_context; 11 + struct dio; 12 + 13 + struct dio_funcs { 14 + void (*mem_pwr_ctrl)(struct dio *dio, bool enable_i2c_light_sleep); 15 + }; 16 + 17 + struct dio { 18 + const struct dio_funcs *funcs; 19 + struct dc_context *ctx; 20 + }; 21 + 22 + #endif /* __DC_DIO_H__ */
+7
drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
··· 122 122 VBLANK_SYNCHRONIZABLE 123 123 }; 124 124 125 + enum crc_poly_mode { 126 + CRC_POLY_MODE_16, 127 + CRC_POLY_MODE_32, 128 + CRC_POLY_MODE_MAX, 129 + }; 130 + 125 131 struct crc_params { 126 132 /* Regions used to calculate CRC*/ 127 133 uint16_t windowa_x_start; ··· 150 144 151 145 uint8_t crc_eng_inst; 152 146 bool reset; 147 + enum crc_poly_mode crc_poly_mode; 153 148 }; 154 149 155 150 struct dcn_otg_state {
+2 -1
drivers/gpu/drm/amd/display/dc/link/Makefile
··· 43 43 # hwss 44 44 ############################################################################### 45 45 LINK_HWSS = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o \ 46 - link_hwss_dio_fixed_vs_pe_retimer.o link_hwss_hpo_fixed_vs_pe_retimer_dp.o 46 + link_hwss_dio_fixed_vs_pe_retimer.o link_hwss_hpo_fixed_vs_pe_retimer_dp.o \ 47 + link_hwss_virtual.o 47 48 48 49 AMD_DAL_LINK_HWSS = $(addprefix $(AMDDALPATH)/dc/link/hwss/, \ 49 50 $(LINK_HWSS))
+1 -2
drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
··· 724 724 return false; 725 725 } 726 726 727 - if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) 728 - mpc32_power_on_shaper_3dlut(mpc, mpcc_id, true); 727 + mpc32_power_on_shaper_3dlut(mpc, mpcc_id, true); 729 728 730 729 current_mode = mpc32_get_shaper_current(mpc, mpcc_id); 731 730
+18 -1
drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
··· 244 244 uint32_t OTG_TRIGB_MANUAL_TRIG; \ 245 245 uint32_t OTG_UPDATE_LOCK; \ 246 246 uint32_t OTG_V_TOTAL_INT_STATUS; \ 247 - uint32_t OTG_VSYNC_NOM_INT_STATUS 247 + uint32_t OTG_VSYNC_NOM_INT_STATUS; \ 248 + uint32_t OTG_CRC0_DATA_R32; \ 249 + uint32_t OTG_CRC0_DATA_G32; \ 250 + uint32_t OTG_CRC0_DATA_B32; \ 251 + uint32_t OTG_CRC1_DATA_R32; \ 252 + uint32_t OTG_CRC1_DATA_G32; \ 253 + uint32_t OTG_CRC1_DATA_B32 248 254 249 255 250 256 struct dcn_optc_registers { ··· 663 657 type OTG_V_COUNT_STOP;\ 664 658 type OTG_V_COUNT_STOP_TIMER; 665 659 660 + #define TG_REG_FIELD_LIST_DCN3_6(type) \ 661 + type OTG_CRC_POLY_SEL; \ 662 + type CRC0_R_CR32; \ 663 + type CRC0_G_Y32; \ 664 + type CRC0_B_CB32; \ 665 + type CRC1_R_CR32; \ 666 + type CRC1_G_Y32; \ 667 + type CRC1_B_CB32; 668 + 666 669 #define TG_REG_FIELD_LIST_DCN401(type) \ 667 670 type OPTC_SEGMENT_WIDTH_LAST;\ 668 671 type OTG_PSTATE_KEEPOUT_START;\ ··· 685 670 TG_REG_FIELD_LIST_DCN2_0(uint8_t) 686 671 TG_REG_FIELD_LIST_DCN3_2(uint8_t) 687 672 TG_REG_FIELD_LIST_DCN3_5(uint8_t) 673 + TG_REG_FIELD_LIST_DCN3_6(uint8_t) 688 674 TG_REG_FIELD_LIST_DCN401(uint8_t) 689 675 }; 690 676 ··· 694 678 TG_REG_FIELD_LIST_DCN2_0(uint32_t) 695 679 TG_REG_FIELD_LIST_DCN3_2(uint32_t) 696 680 TG_REG_FIELD_LIST_DCN3_5(uint32_t) 681 + TG_REG_FIELD_LIST_DCN3_6(uint32_t) 697 682 TG_REG_FIELD_LIST_DCN401(uint32_t) 698 683 }; 699 684
+98 -4
drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
··· 180 180 REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000); 181 181 } 182 182 183 - static bool optc35_configure_crc(struct timing_generator *optc, 183 + /** 184 + * optc35_get_crc - Capture CRC result per component 185 + * 186 + * @optc: timing_generator instance. 187 + * @idx: index of crc engine to get CRC from 188 + * @r_cr: primary CRC signature for red data. 189 + * @g_y: primary CRC signature for green data. 190 + * @b_cb: primary CRC signature for blue data. 191 + * 192 + * This function reads the CRC signature from the OPTC registers. Notice that 193 + * we have three registers to keep the CRC result per color component (RGB). 194 + * 195 + * For different DCN versions: 196 + * - If CRC32 registers (OTG_CRC0_DATA_R32/G32/B32) are available, read from 197 + * 32-bit CRC registers. DCN 3.6+ supports both CRC-32 and CRC-16 polynomials 198 + * selectable via OTG_CRC_POLY_SEL. 199 + * - Otherwise, read from legacy 16-bit CRC registers (OTG_CRC0_DATA_RG/B) 200 + * which only support CRC-16 polynomial. 201 + * 202 + * Returns: 203 + * If CRC is disabled, return false; otherwise, return true, and the CRC 204 + * results in the parameters. 205 + */ 206 + static bool optc35_get_crc(struct timing_generator *optc, uint8_t idx, 207 + uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) 208 + { 209 + uint32_t field = 0; 210 + struct optc *optc1 = DCN10TG_FROM_TG(optc); 211 + 212 + REG_GET(OTG_CRC_CNTL, OTG_CRC_EN, &field); 213 + 214 + /* Early return if CRC is not enabled for this CRTC */ 215 + if (!field) 216 + return false; 217 + 218 + if (optc1->tg_mask->CRC0_R_CR32 != 0 && optc1->tg_mask->CRC1_R_CR32 != 0 && 219 + optc1->tg_mask->CRC0_G_Y32 != 0 && optc1->tg_mask->CRC1_G_Y32 != 0 && 220 + optc1->tg_mask->CRC0_B_CB32 != 0 && optc1->tg_mask->CRC1_B_CB32 != 0) { 221 + switch (idx) { 222 + case 0: 223 + /* OTG_CRC0_DATA_R32/G32/B32 has the CRC32 results */ 224 + REG_GET(OTG_CRC0_DATA_R32, 225 + CRC0_R_CR32, r_cr); 226 + REG_GET(OTG_CRC0_DATA_G32, 227 + CRC0_G_Y32, g_y); 228 + REG_GET(OTG_CRC0_DATA_B32, 229 + CRC0_B_CB32, b_cb); 230 + break; 231 + case 1: 232 + /* OTG_CRC1_DATA_R32/G32/B32 has the CRC32 results */ 233 + REG_GET(OTG_CRC1_DATA_R32, 234 + CRC1_R_CR32, r_cr); 235 + REG_GET(OTG_CRC1_DATA_G32, 236 + CRC1_G_Y32, g_y); 237 + REG_GET(OTG_CRC1_DATA_B32, 238 + CRC1_B_CB32, b_cb); 239 + break; 240 + default: 241 + return false; 242 + } 243 + } else { 244 + switch (idx) { 245 + case 0: 246 + /* OTG_CRC0_DATA_RG has the CRC16 results for the red and green component */ 247 + REG_GET_2(OTG_CRC0_DATA_RG, 248 + CRC0_R_CR, r_cr, 249 + CRC0_G_Y, g_y); 250 + 251 + /* OTG_CRC0_DATA_B has the CRC16 results for the blue component */ 252 + REG_GET(OTG_CRC0_DATA_B, 253 + CRC0_B_CB, b_cb); 254 + break; 255 + case 1: 256 + /* OTG_CRC1_DATA_RG has the CRC16 results for the red and green component */ 257 + REG_GET_2(OTG_CRC1_DATA_RG, 258 + CRC1_R_CR, r_cr, 259 + CRC1_G_Y, g_y); 260 + 261 + /* OTG_CRC1_DATA_B has the CRC16 results for the blue component */ 262 + REG_GET(OTG_CRC1_DATA_B, 263 + CRC1_B_CB, b_cb); 264 + break; 265 + default: 266 + return false; 267 + } 268 + } 269 + 270 + return true; 271 + } 272 + 273 + bool optc35_configure_crc(struct timing_generator *optc, 184 274 const struct crc_params *params) 185 275 { 186 276 struct optc *optc1 = DCN10TG_FROM_TG(optc); ··· 356 266 default: 357 267 return false; 358 268 } 269 + if (optc1->tg_mask->OTG_CRC_POLY_SEL != 0) { 270 + REG_UPDATE(OTG_CRC_CNTL, 271 + OTG_CRC_POLY_SEL, params->crc_poly_mode); 272 + } 359 273 return true; 360 274 } 361 275 ··· 437 343 REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, 0); 438 344 } 439 345 440 - static void optc35_set_long_vtotal( 346 + void optc35_set_long_vtotal( 441 347 struct timing_generator *optc, 442 348 const struct long_vtotal_params *params) 443 349 { ··· 524 430 } 525 431 } 526 432 527 - static void optc35_wait_otg_disable(struct timing_generator *optc) 433 + void optc35_wait_otg_disable(struct timing_generator *optc) 528 434 { 529 435 struct optc *optc1; 530 436 uint32_t is_master_en; ··· 582 488 .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, 583 489 .clear_optc_underflow = optc1_clear_optc_underflow, 584 490 .setup_global_swap_lock = NULL, 585 - .get_crc = optc1_get_crc, 491 + .get_crc = optc35_get_crc, 586 492 .configure_crc = optc35_configure_crc, 587 493 .set_dsc_config = optc3_set_dsc_config, 588 494 .get_dsc_status = optc2_get_dsc_status,
+19
drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
··· 74 74 SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\ 75 75 SF(OTG0_INTERRUPT_DEST, OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST, mask_sh) 76 76 77 + #define OPTC_COMMON_MASK_SH_LIST_DCN3_6(mask_sh)\ 78 + OPTC_COMMON_MASK_SH_LIST_DCN3_5(mask_sh),\ 79 + SF(OTG0_OTG_CRC_CNTL, OTG_CRC_POLY_SEL, mask_sh),\ 80 + SF(OTG_CRC320_OTG_CRC0_DATA_R32, CRC0_R_CR32, mask_sh),\ 81 + SF(OTG_CRC320_OTG_CRC0_DATA_G32, CRC0_G_Y32, mask_sh),\ 82 + SF(OTG_CRC320_OTG_CRC0_DATA_B32, CRC0_B_CB32, mask_sh),\ 83 + SF(OTG_CRC320_OTG_CRC1_DATA_R32, CRC1_R_CR32, mask_sh),\ 84 + SF(OTG_CRC320_OTG_CRC1_DATA_G32, CRC1_G_Y32, mask_sh),\ 85 + SF(OTG_CRC320_OTG_CRC1_DATA_B32, CRC1_B_CB32, mask_sh) 86 + 77 87 void dcn35_timing_generator_init(struct optc *optc1); 78 88 79 89 void dcn35_timing_generator_set_fgcg(struct optc *optc1, bool enable); 80 90 81 91 void optc35_set_drr(struct timing_generator *optc, const struct drr_params *params); 92 + 93 + void optc35_set_long_vtotal( 94 + struct timing_generator *optc, 95 + const struct long_vtotal_params *params); 96 + 97 + bool optc35_configure_crc(struct timing_generator *optc, 98 + const struct crc_params *params); 99 + 100 + void optc35_wait_otg_disable(struct timing_generator *optc); 82 101 83 102 #endif /* __DC_OPTC_DCN35_H__ */
+12 -6
drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
··· 31 31 #include "resource.h" 32 32 #include "clk_mgr.h" 33 33 #include "include/irq_service_interface.h" 34 - #include "virtual/virtual_stream_encoder.h" 34 + #include "dio/virtual/virtual_stream_encoder.h" 35 35 #include "dce110/dce110_resource.h" 36 36 #include "dce110/dce110_timing_generator.h" 37 37 #include "irq/dce110/irq_service_dce110.h" ··· 226 226 link_regs(4), 227 227 link_regs(5), 228 228 link_regs(6), 229 - { .DAC_ENABLE = mmDAC_ENABLE }, 229 + {0} 230 230 }; 231 231 232 232 #define stream_enc_regs(id)\ ··· 242 242 stream_enc_regs(3), 243 243 stream_enc_regs(4), 244 244 stream_enc_regs(5), 245 - stream_enc_regs(6) 245 + stream_enc_regs(6), 246 + {SR(DAC_SOURCE_SELECT),} /* DACA */ 246 247 }; 247 248 248 249 static const struct dce_stream_encoder_shift se_shift = { ··· 492 491 return NULL; 493 492 494 493 if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) { 495 - dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id); 494 + dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, 495 + &stream_enc_regs[eng_id], &se_shift, &se_mask); 496 496 return &enc110->base; 497 497 } 498 498 ··· 640 638 if (!enc110) 641 639 return NULL; 642 640 643 - if (enc_init_data->connector.id == CONNECTOR_ID_VGA) { 641 + if (enc_init_data->connector.id == CONNECTOR_ID_VGA && 642 + enc_init_data->analog_engine != ENGINE_ID_UNKNOWN) { 644 643 dce110_link_encoder_construct(enc110, 645 644 enc_init_data, 646 645 &link_enc_feature, ··· 981 978 struct dc_link *link = stream->link; 982 979 enum engine_id preferred_engine = link->link_enc->preferred_engine; 983 980 984 - if (dc_is_rgb_signal(stream->signal)) 981 + /* Prefer analog engine if the link encoder has one. 982 + * Otherwise, it's an external encoder. 983 + */ 984 + if (dc_is_rgb_signal(stream->signal) && link->link_enc->analog_engine != ENGINE_ID_UNKNOWN) 985 985 preferred_engine = link->link_enc->analog_engine; 986 986 987 987 for (i = 0; i < pool->stream_enc_count; i++) {
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
··· 35 35 #include "dce112/dce112_resource.h" 36 36 37 37 #include "dce110/dce110_resource.h" 38 - #include "virtual/virtual_stream_encoder.h" 38 + #include "dio/virtual/virtual_stream_encoder.h" 39 39 #include "dce120/dce120_timing_generator.h" 40 40 #include "irq/dce120/irq_service_dce120.h" 41 41 #include "dce/dce_opp.h"
+9 -5
drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
··· 243 243 link_regs(4), 244 244 link_regs(5), 245 245 {0}, 246 - { .DAC_ENABLE = mmDAC_ENABLE }, 246 + {0} 247 247 }; 248 248 249 249 #define stream_enc_regs(id)\ ··· 258 258 stream_enc_regs(2), 259 259 stream_enc_regs(3), 260 260 stream_enc_regs(4), 261 - stream_enc_regs(5) 261 + stream_enc_regs(5), 262 + {0}, 263 + {SR(DAC_SOURCE_SELECT),} /* DACA */ 262 264 }; 263 265 264 266 static const struct dce_stream_encoder_shift se_shift = { ··· 609 607 return NULL; 610 608 611 609 if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) { 612 - dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id); 610 + dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, 611 + &stream_enc_regs[eng_id], &se_shift, &se_mask); 613 612 return &enc110->base; 614 613 } 615 614 ··· 736 733 if (!enc110) 737 734 return NULL; 738 735 739 - if (enc_init_data->connector.id == CONNECTOR_ID_VGA) { 740 - dce110_link_encoder_construct(enc110, 736 + if (enc_init_data->connector.id == CONNECTOR_ID_VGA && 737 + enc_init_data->analog_engine != ENGINE_ID_UNKNOWN) { 738 + dce60_link_encoder_construct(enc110, 741 739 enc_init_data, 742 740 &link_enc_feature, 743 741 &link_enc_regs[ENGINE_ID_DACA],
+7 -4
drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
··· 242 242 link_regs(4), 243 243 link_regs(5), 244 244 link_regs(6), 245 - { .DAC_ENABLE = mmDAC_ENABLE }, 245 + {0} 246 246 }; 247 247 248 248 #define stream_enc_regs(id)\ ··· 258 258 stream_enc_regs(3), 259 259 stream_enc_regs(4), 260 260 stream_enc_regs(5), 261 - stream_enc_regs(6) 261 + stream_enc_regs(6), 262 + {SR(DAC_SOURCE_SELECT),} /* DACA */ 262 263 }; 263 264 264 265 static const struct dce_stream_encoder_shift se_shift = { ··· 615 614 return NULL; 616 615 617 616 if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) { 618 - dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id); 617 + dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, 618 + &stream_enc_regs[eng_id], &se_shift, &se_mask); 619 619 return &enc110->base; 620 620 } 621 621 ··· 742 740 if (!enc110) 743 741 return NULL; 744 742 745 - if (enc_init_data->connector.id == CONNECTOR_ID_VGA) { 743 + if (enc_init_data->connector.id == CONNECTOR_ID_VGA && 744 + enc_init_data->analog_engine != ENGINE_ID_UNKNOWN) { 746 745 dce110_link_encoder_construct(enc110, 747 746 enc_init_data, 748 747 &link_enc_feature,
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
··· 48 48 #include "dce/dce_clock_source.h" 49 49 #include "dce/dce_audio.h" 50 50 #include "dce/dce_hwseq.h" 51 - #include "virtual/virtual_stream_encoder.h" 51 + #include "dio/virtual/virtual_stream_encoder.h" 52 52 #include "dce110/dce110_resource.h" 53 53 #include "dce112/dce112_resource.h" 54 54 #include "dcn10/dcn10_hubp.h"
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
··· 55 55 #include "dce/dce_clock_source.h" 56 56 #include "dce/dce_audio.h" 57 57 #include "dce/dce_hwseq.h" 58 - #include "virtual/virtual_stream_encoder.h" 58 + #include "dio/virtual/virtual_stream_encoder.h" 59 59 #include "dce110/dce110_resource.h" 60 60 #include "dml/display_mode_vba.h" 61 61 #include "dcn20/dcn20_dccg.h"
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
··· 51 51 #include "dce/dce_clock_source.h" 52 52 #include "dce/dce_audio.h" 53 53 #include "dce/dce_hwseq.h" 54 - #include "virtual/virtual_stream_encoder.h" 54 + #include "dio/virtual/virtual_stream_encoder.h" 55 55 #include "dce110/dce110_resource.h" 56 56 #include "dce/dce_aux.h" 57 57 #include "dce/dce_i2c.h"
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
··· 57 57 #include "dce/dce_clock_source.h" 58 58 #include "dce/dce_audio.h" 59 59 #include "dce/dce_hwseq.h" 60 - #include "virtual/virtual_stream_encoder.h" 60 + #include "dio/virtual/virtual_stream_encoder.h" 61 61 #include "dml/display_mode_vba.h" 62 62 #include "dcn20/dcn20_dccg.h" 63 63 #include "dcn21/dcn21_dccg.h"
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
··· 55 55 #include "dce/dce_audio.h" 56 56 #include "dce/dce_hwseq.h" 57 57 #include "clk_mgr.h" 58 - #include "virtual/virtual_stream_encoder.h" 58 + #include "dio/virtual/virtual_stream_encoder.h" 59 59 #include "dce110/dce110_resource.h" 60 60 #include "dml/display_mode_vba.h" 61 61 #include "dcn30/dcn30_dccg.h"
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
··· 54 54 #include "dce/dce_audio.h" 55 55 #include "dce/dce_hwseq.h" 56 56 #include "clk_mgr.h" 57 - #include "virtual/virtual_stream_encoder.h" 57 + #include "dio/virtual/virtual_stream_encoder.h" 58 58 #include "dce110/dce110_resource.h" 59 59 #include "dml/display_mode_vba.h" 60 60 #include "dcn301/dcn301_dccg.h"
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
··· 64 64 #include "dce/dce_audio.h" 65 65 #include "dce/dce_hwseq.h" 66 66 #include "clk_mgr.h" 67 - #include "virtual/virtual_stream_encoder.h" 67 + #include "dio/virtual/virtual_stream_encoder.h" 68 68 #include "dce110/dce110_resource.h" 69 69 #include "dml/display_mode_vba.h" 70 70 #include "dml/dcn31/dcn31_fpu.h"
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
··· 66 66 #include "dce/dce_audio.h" 67 67 #include "dce/dce_hwseq.h" 68 68 #include "clk_mgr.h" 69 - #include "virtual/virtual_stream_encoder.h" 69 + #include "dio/virtual/virtual_stream_encoder.h" 70 70 #include "dce110/dce110_resource.h" 71 71 #include "dml/display_mode_vba.h" 72 72 #include "dml/dcn31/dcn31_fpu.h"
+5 -5
drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
··· 63 63 #include "dce/dce_audio.h" 64 64 #include "dce/dce_hwseq.h" 65 65 #include "clk_mgr.h" 66 - #include "virtual/virtual_stream_encoder.h" 66 + #include "dio/virtual/virtual_stream_encoder.h" 67 67 #include "dce110/dce110_resource.h" 68 68 #include "dml/display_mode_vba.h" 69 69 #include "dml/dcn31/dcn31_fpu.h" ··· 1230 1230 /*PHYB is wired off in HW, allow front end to remapping, otherwise needs more changes*/ 1231 1231 1232 1232 /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ 1233 - if (eng_id <= ENGINE_ID_DIGF) { 1234 - vpg_inst = eng_id; 1235 - afmt_inst = eng_id; 1236 - } else 1233 + if (eng_id < 0 || eng_id >= ARRAY_SIZE(stream_enc_regs)) 1237 1234 return NULL; 1235 + 1236 + vpg_inst = eng_id; 1237 + afmt_inst = eng_id; 1238 1238 1239 1239 enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); 1240 1240 vpg = dcn31_vpg_create(ctx, vpg_inst);
+5 -5
drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
··· 63 63 #include "dce/dce_audio.h" 64 64 #include "dce/dce_hwseq.h" 65 65 #include "clk_mgr.h" 66 - #include "virtual/virtual_stream_encoder.h" 66 + #include "dio/virtual/virtual_stream_encoder.h" 67 67 #include "dce110/dce110_resource.h" 68 68 #include "dml/display_mode_vba.h" 69 69 #include "dml/dcn31/dcn31_fpu.h" ··· 1223 1223 int afmt_inst; 1224 1224 1225 1225 /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ 1226 - if (eng_id <= ENGINE_ID_DIGF) { 1227 - vpg_inst = eng_id; 1228 - afmt_inst = eng_id; 1229 - } else 1226 + if (eng_id < 0 || eng_id >= ARRAY_SIZE(stream_enc_regs)) 1230 1227 return NULL; 1228 + 1229 + vpg_inst = eng_id; 1230 + afmt_inst = eng_id; 1231 1231 1232 1232 enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); 1233 1233 vpg = dcn31_vpg_create(ctx, vpg_inst);
+5 -5
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
··· 65 65 #include "dce/dce_audio.h" 66 66 #include "dce/dce_hwseq.h" 67 67 #include "clk_mgr.h" 68 - #include "virtual/virtual_stream_encoder.h" 68 + #include "dio/virtual/virtual_stream_encoder.h" 69 69 #include "dml/display_mode_vba.h" 70 70 #include "dcn32/dcn32_dccg.h" 71 71 #include "dcn10/dcn10_resource.h" ··· 1211 1211 int afmt_inst; 1212 1212 1213 1213 /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ 1214 - if (eng_id <= ENGINE_ID_DIGF) { 1215 - vpg_inst = eng_id; 1216 - afmt_inst = eng_id; 1217 - } else 1214 + if (eng_id < 0 || eng_id >= ARRAY_SIZE(stream_enc_regs)) 1218 1215 return NULL; 1216 + 1217 + vpg_inst = eng_id; 1218 + afmt_inst = eng_id; 1219 1219 1220 1220 enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); 1221 1221 vpg = dcn32_vpg_create(ctx, vpg_inst);
+5 -5
drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
··· 68 68 #include "dce/dce_audio.h" 69 69 #include "dce/dce_hwseq.h" 70 70 #include "clk_mgr.h" 71 - #include "virtual/virtual_stream_encoder.h" 71 + #include "dio/virtual/virtual_stream_encoder.h" 72 72 #include "dml/display_mode_vba.h" 73 73 #include "dcn32/dcn32_dccg.h" 74 74 #include "dcn10/dcn10_resource.h" ··· 1192 1192 int afmt_inst; 1193 1193 1194 1194 /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ 1195 - if (eng_id <= ENGINE_ID_DIGF) { 1196 - vpg_inst = eng_id; 1197 - afmt_inst = eng_id; 1198 - } else 1195 + if (eng_id < 0 || eng_id >= ARRAY_SIZE(stream_enc_regs)) 1199 1196 return NULL; 1197 + 1198 + vpg_inst = eng_id; 1199 + afmt_inst = eng_id; 1200 1200 1201 1201 enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); 1202 1202 vpg = dcn321_vpg_create(ctx, vpg_inst);
+5 -5
drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
··· 70 70 #include "dce/dce_audio.h" 71 71 #include "dce/dce_hwseq.h" 72 72 #include "clk_mgr.h" 73 - #include "virtual/virtual_stream_encoder.h" 73 + #include "dio/virtual/virtual_stream_encoder.h" 74 74 #include "dce110/dce110_resource.h" 75 75 #include "dml/display_mode_vba.h" 76 76 #include "dcn35/dcn35_dccg.h" ··· 1274 1274 int afmt_inst; 1275 1275 1276 1276 /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ 1277 - if (eng_id <= ENGINE_ID_DIGF) { 1278 - vpg_inst = eng_id; 1279 - afmt_inst = eng_id; 1280 - } else 1277 + if (eng_id < 0 || eng_id >= ARRAY_SIZE(stream_enc_regs)) 1281 1278 return NULL; 1279 + 1280 + vpg_inst = eng_id; 1281 + afmt_inst = eng_id; 1282 1282 1283 1283 enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); 1284 1284 vpg = dcn31_vpg_create(ctx, vpg_inst);
+5 -5
drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
··· 49 49 #include "dce/dce_audio.h" 50 50 #include "dce/dce_hwseq.h" 51 51 #include "clk_mgr.h" 52 - #include "virtual/virtual_stream_encoder.h" 52 + #include "dio/virtual/virtual_stream_encoder.h" 53 53 #include "dce110/dce110_resource.h" 54 54 #include "dml/display_mode_vba.h" 55 55 #include "dcn35/dcn35_dccg.h" ··· 1254 1254 int afmt_inst; 1255 1255 1256 1256 /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ 1257 - if (eng_id <= ENGINE_ID_DIGF) { 1258 - vpg_inst = eng_id; 1259 - afmt_inst = eng_id; 1260 - } else 1257 + if (eng_id < 0 || eng_id >= ARRAY_SIZE(stream_enc_regs)) 1261 1258 return NULL; 1259 + 1260 + vpg_inst = eng_id; 1261 + afmt_inst = eng_id; 1262 1262 1263 1263 enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); 1264 1264 vpg = dcn31_vpg_create(ctx, vpg_inst);
+11 -5
drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
··· 49 49 #include "dce/dce_audio.h" 50 50 #include "dce/dce_hwseq.h" 51 51 #include "clk_mgr.h" 52 - #include "virtual/virtual_stream_encoder.h" 52 + #include "dio/virtual/virtual_stream_encoder.h" 53 53 #include "dce110/dce110_resource.h" 54 54 #include "dml/display_mode_vba.h" 55 55 #include "dcn35/dcn35_dccg.h" ··· 460 460 }; 461 461 462 462 #define optc_regs_init(id)\ 463 - OPTC_COMMON_REG_LIST_DCN3_5_RI(id) 463 + OPTC_COMMON_REG_LIST_DCN3_5_RI(id),\ 464 + SRI_ARR(OTG_CRC0_DATA_R32, OTG_CRC32, id),\ 465 + SRI_ARR(OTG_CRC0_DATA_G32, OTG_CRC32, id),\ 466 + SRI_ARR(OTG_CRC0_DATA_B32, OTG_CRC32, id),\ 467 + SRI_ARR(OTG_CRC1_DATA_R32, OTG_CRC32, id),\ 468 + SRI_ARR(OTG_CRC1_DATA_G32, OTG_CRC32, id),\ 469 + SRI_ARR(OTG_CRC1_DATA_B32, OTG_CRC32, id) 464 470 465 471 static struct dcn_optc_registers optc_regs[4]; 466 472 467 473 static const struct dcn_optc_shift optc_shift = { 468 - OPTC_COMMON_MASK_SH_LIST_DCN3_5(__SHIFT) 474 + OPTC_COMMON_MASK_SH_LIST_DCN3_6(__SHIFT) 469 475 }; 470 476 471 477 static const struct dcn_optc_mask optc_mask = { 472 - OPTC_COMMON_MASK_SH_LIST_DCN3_5(_MASK) 478 + OPTC_COMMON_MASK_SH_LIST_DCN3_6(_MASK) 473 479 }; 474 480 475 481 #define hubp_regs_init(id)\ ··· 775 769 }; 776 770 777 771 static const struct dc_check_config config_defaults = { 778 - .enable_legacy_fast_update = true, 772 + .enable_legacy_fast_update = false, 779 773 }; 780 774 781 775 static const struct dc_panel_config panel_config_defaults = {
+47 -1
drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
··· 21 21 #include "dcn401/dcn401_hubbub.h" 22 22 #include "dcn401/dcn401_mpc.h" 23 23 #include "dcn401/dcn401_hubp.h" 24 + #include "dio/dcn10/dcn10_dio.h" 24 25 #include "irq/dcn401/irq_service_dcn401.h" 25 26 #include "dcn401/dcn401_dpp.h" 26 27 #include "dcn401/dcn401_optc.h" ··· 47 46 #include "dce/dce_audio.h" 48 47 #include "dce/dce_hwseq.h" 49 48 #include "clk_mgr.h" 50 - #include "virtual/virtual_stream_encoder.h" 49 + #include "dio/virtual/virtual_stream_encoder.h" 51 50 #include "dml/display_mode_vba.h" 52 51 #include "dcn401/dcn401_dccg.h" 53 52 #include "dcn10/dcn10_resource.h" ··· 635 634 DCN20_VMID_MASK_SH_LIST(_MASK) 636 635 }; 637 636 637 + #define dio_regs_init() \ 638 + DIO_REG_LIST_DCN10() 639 + 640 + static struct dcn_dio_registers dio_regs; 641 + 642 + #define DIO_MASK_SH_LIST_DCN401(mask_sh)\ 643 + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh) 644 + 645 + static const struct dcn_dio_shift dio_shift = { 646 + DIO_MASK_SH_LIST_DCN401(__SHIFT) 647 + }; 648 + 649 + static const struct dcn_dio_mask dio_mask = { 650 + DIO_MASK_SH_LIST_DCN401(_MASK) 651 + }; 652 + 638 653 static const struct resource_caps res_cap_dcn4_01 = { 639 654 .num_timing_generator = 4, 640 655 .num_opp = 4, ··· 896 879 } 897 880 898 881 return &hubbub2->base; 882 + } 883 + 884 + static struct dio *dcn401_dio_create(struct dc_context *ctx) 885 + { 886 + struct dcn10_dio *dio10 = kzalloc(sizeof(struct dcn10_dio), GFP_KERNEL); 887 + 888 + if (!dio10) 889 + return NULL; 890 + 891 + #undef REG_STRUCT 892 + #define REG_STRUCT dio_regs 893 + dio_regs_init(); 894 + 895 + dcn10_dio_construct(dio10, ctx, &dio_regs, &dio_shift, &dio_mask); 896 + 897 + return &dio10->base; 899 898 } 900 899 901 900 static struct hubp *dcn401_hubp_create( ··· 1532 1499 if (pool->base.dccg != NULL) 1533 1500 dcn_dccg_destroy(&pool->base.dccg); 1534 1501 1502 + if (pool->base.dio != NULL) { 1503 + kfree(TO_DCN10_DIO(pool->base.dio)); 1504 + pool->base.dio = NULL; 1505 + } 1506 + 1535 1507 if (pool->base.oem_device != NULL) { 1536 1508 struct dc *dc = pool->base.oem_device->ctx->dc; 1537 1509 ··· 2106 2068 if (pool->base.hubbub == NULL) { 2107 2069 BREAK_TO_DEBUGGER(); 2108 2070 dm_error("DC: failed to create hubbub!\n"); 2071 + goto create_fail; 2072 + } 2073 + 2074 + /* DIO */ 2075 + pool->base.dio = dcn401_dio_create(ctx); 2076 + if (pool->base.dio == NULL) { 2077 + BREAK_TO_DEBUGGER(); 2078 + dm_error("DC: failed to create dio!\n"); 2109 2079 goto create_fail; 2110 2080 } 2111 2081
+2 -2
drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.c
··· 293 293 }; 294 294 295 295 /* Pre-generated 1DLUT for given setup and sharpness level */ 296 - struct isharp_1D_lut_pregen filter_isharp_1D_lut_pregen[NUM_SHARPNESS_SETUPS] = { 296 + static struct isharp_1D_lut_pregen filter_isharp_1D_lut_pregen[NUM_SHARPNESS_SETUPS] = { 297 297 { 298 298 0, 0, 299 299 { ··· 332 332 }, 333 333 }; 334 334 335 - struct scale_ratio_to_sharpness_level_adj sharpness_level_adj[NUM_SHARPNESS_ADJ_LEVELS] = { 335 + static struct scale_ratio_to_sharpness_level_adj sharpness_level_adj[NUM_SHARPNESS_ADJ_LEVELS] = { 336 336 {1125, 1000, 0}, 337 337 {11, 10, 1}, 338 338 {1075, 1000, 2},
-30
drivers/gpu/drm/amd/display/dc/virtual/Makefile
··· 1 - # 2 - # Copyright 2017 Advanced Micro Devices, Inc. 3 - # 4 - # Permission is hereby granted, free of charge, to any person obtaining a 5 - # copy of this software and associated documentation files (the "Software"), 6 - # to deal in the Software without restriction, including without limitation 7 - # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - # and/or sell copies of the Software, and to permit persons to whom the 9 - # Software is furnished to do so, subject to the following conditions: 10 - # 11 - # The above copyright notice and this permission notice shall be included in 12 - # all copies or substantial portions of the Software. 13 - # 14 - # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - # THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 - # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 - # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 - # OTHER DEALINGS IN THE SOFTWARE. 21 - # 22 - # 23 - # Makefile for the virtual sub-component of DAL. 24 - # It provides the control and status of HW CRTC block. 25 - 26 - VIRTUAL = virtual_link_encoder.o virtual_stream_encoder.o virtual_link_hwss.o 27 - 28 - AMD_DAL_VIRTUAL = $(addprefix $(AMDDALPATH)/dc/virtual/,$(VIRTUAL)) 29 - 30 - AMD_DISPLAY_FILES += $(AMD_DAL_VIRTUAL)
-1
drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_stream_encoder.c
··· 171 171 kfree(enc); 172 172 return NULL; 173 173 } 174 -
drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_stream_encoder.h
+25
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
··· 736 736 uint32_t u32All; 737 737 }; 738 738 739 + /** 740 + * Definition of Panel Replay ML Activity Options 741 + */ 742 + enum pr_ml_activity_option { 743 + OPTION_DEFAULT = 0x00, // VESA Option Default (1C) 744 + OPTION_1A = 0x01, // VESA Option 1A 745 + OPTION_1B = 0x02, // VESA Option 1B 746 + OPTION_1C = 0x03, // VESA Option 1C 747 + }; 748 + 739 749 union fw_assisted_mclk_switch_version { 740 750 struct { 741 751 uint8_t minor : 5; ··· 1638 1628 * DESC: Initiates IPS wake sequence. 1639 1629 */ 1640 1630 DMUB_GPINT__IPS_DEBUG_WAKE = 137, 1631 + /** 1632 + * DESC: Do panel power off sequence 1633 + * ARGS: 1 - Power off 1634 + */ 1635 + DMUB_GPINT__PANEL_POWER_OFF_SEQ = 138, 1641 1636 }; 1642 1637 1643 1638 /** ··· 4413 4398 enum dmub_cmd_panel_replay_state_update_subtype { 4414 4399 PR_STATE_UPDATE_COASTING_VTOTAL = 0x1, 4415 4400 PR_STATE_UPDATE_SYNC_MODE = 0x2, 4401 + PR_STATE_UPDATE_RUNTIME_FLAGS = 0x3, 4416 4402 }; 4417 4403 4418 4404 enum dmub_cmd_panel_replay_general_subtype { ··· 6707 6691 struct dmub_cmd_pr_copy_settings_data data; 6708 6692 }; 6709 6693 6694 + union dmub_pr_runtime_flags { 6695 + struct { 6696 + uint32_t disable_abm_optimization : 1; // Disable ABM optimization for PR 6697 + } bitfields; 6698 + uint32_t u32All; 6699 + }; 6700 + 6710 6701 struct dmub_cmd_pr_update_state_data { 6711 6702 /** 6712 6703 * Panel Instance. ··· 6732 6709 */ 6733 6710 uint32_t coasting_vtotal; 6734 6711 uint32_t sync_mode; 6712 + 6713 + union dmub_pr_runtime_flags pr_runtime_flags; 6735 6714 }; 6736 6715 6737 6716 struct dmub_cmd_pr_general_cmd_data {
+5
drivers/gpu/drm/amd/include/kgd_pp_interface.h
··· 1829 1829 struct gpu_metrics_attr metrics_attrs[]; 1830 1830 }; 1831 1831 1832 + enum amdgpu_xgmi_link_status { 1833 + AMDGPU_XGMI_LINK_INACTIVE = 0, 1834 + AMDGPU_XGMI_LINK_ACTIVE = 1, 1835 + }; 1836 + 1832 1837 #endif
+14 -15
drivers/gpu/drm/amd/pm/amdgpu_pm.c
··· 243 243 enum amd_pm_state_type state; 244 244 int ret; 245 245 246 - if (strncmp("battery", buf, strlen("battery")) == 0) 246 + if (sysfs_streq(buf, "battery")) 247 247 state = POWER_STATE_TYPE_BATTERY; 248 - else if (strncmp("balanced", buf, strlen("balanced")) == 0) 248 + else if (sysfs_streq(buf, "balanced")) 249 249 state = POWER_STATE_TYPE_BALANCED; 250 - else if (strncmp("performance", buf, strlen("performance")) == 0) 250 + else if (sysfs_streq(buf, "performance")) 251 251 state = POWER_STATE_TYPE_PERFORMANCE; 252 252 else 253 253 return -EINVAL; ··· 363 363 enum amd_dpm_forced_level level; 364 364 int ret = 0; 365 365 366 - if (strncmp("low", buf, strlen("low")) == 0) { 366 + if (sysfs_streq(buf, "low")) 367 367 level = AMD_DPM_FORCED_LEVEL_LOW; 368 - } else if (strncmp("high", buf, strlen("high")) == 0) { 368 + else if (sysfs_streq(buf, "high")) 369 369 level = AMD_DPM_FORCED_LEVEL_HIGH; 370 - } else if (strncmp("auto", buf, strlen("auto")) == 0) { 370 + else if (sysfs_streq(buf, "auto")) 371 371 level = AMD_DPM_FORCED_LEVEL_AUTO; 372 - } else if (strncmp("manual", buf, strlen("manual")) == 0) { 372 + else if (sysfs_streq(buf, "manual")) 373 373 level = AMD_DPM_FORCED_LEVEL_MANUAL; 374 - } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) { 374 + else if (sysfs_streq(buf, "profile_exit")) 375 375 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT; 376 - } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) { 376 + else if (sysfs_streq(buf, "profile_standard")) 377 377 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD; 378 - } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) { 378 + else if (sysfs_streq(buf, "profile_min_sclk")) 379 379 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK; 380 - } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) { 380 + else if (sysfs_streq(buf, "profile_min_mclk")) 381 381 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; 382 - } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) { 382 + else if (sysfs_streq(buf, "profile_peak")) 383 383 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 384 - } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) { 384 + else if (sysfs_streq(buf, "perf_determinism")) 385 385 level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM; 386 - } else { 386 + else 387 387 return -EINVAL; 388 - } 389 388 390 389 ret = amdgpu_pm_get_access(adev); 391 390 if (ret < 0)
+5
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
··· 3464 3464 max_sclk = 60000; 3465 3465 max_mclk = 80000; 3466 3466 } 3467 + if ((adev->pdev->device == 0x666f) && 3468 + (adev->pdev->revision == 0x00)) { 3469 + max_sclk = 80000; 3470 + max_mclk = 95000; 3471 + } 3467 3472 } else if (adev->asic_type == CHIP_OLAND) { 3468 3473 if ((adev->pdev->revision == 0xC7) || 3469 3474 (adev->pdev->revision == 0x80) ||
+17 -9
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
··· 810 810 smu->adev = adev; 811 811 smu->pm_enabled = !!amdgpu_dpm; 812 812 smu->is_apu = false; 813 - smu->smu_baco.state = SMU_BACO_STATE_NONE; 813 + smu->smu_baco.state = SMU_BACO_STATE_EXIT; 814 814 smu->smu_baco.platform_support = false; 815 815 smu->smu_baco.maco_support = false; 816 816 smu->user_dpm_profile.fan_mode = -1; ··· 1355 1355 int i, ret; 1356 1356 1357 1357 smu->pool_size = adev->pm.smu_prv_buffer_size; 1358 - smu_feature_init(smu, SMU_FEATURE_MAX); 1358 + smu_feature_init(smu, SMU_FEATURE_NUM_DEFAULT); 1359 1359 1360 1360 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); 1361 1361 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn); ··· 1646 1646 { 1647 1647 struct amdgpu_device *adev = smu->adev; 1648 1648 uint8_t pcie_gen = 0, pcie_width = 0; 1649 - uint64_t features_supported; 1649 + struct smu_feature_bits features_supported; 1650 1650 int ret = 0; 1651 1651 1652 1652 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { ··· 1807 1807 return ret; 1808 1808 } 1809 1809 smu_feature_list_set_bits(smu, SMU_FEATURE_LIST_SUPPORTED, 1810 - (unsigned long *)&features_supported); 1810 + features_supported.bits); 1811 1811 1812 1812 if (!smu_is_dpm_running(smu)) 1813 1813 dev_info(adev->dev, "dpm has been disabled\n"); ··· 2120 2120 int ret = 0; 2121 2121 2122 2122 if ((!adev->in_runpm) && (!adev->in_suspend) && 2123 - (!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) == 2124 - IP_VERSION(13, 0, 10) && 2125 - !amdgpu_device_has_display_hardware(adev)) 2123 + (!amdgpu_in_reset(adev)) && !smu->is_apu && 2124 + amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(13, 0, 0)) 2126 2125 ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD); 2127 2126 2128 2127 return ret; ··· 3151 3152 *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100; 3152 3153 *size = 4; 3153 3154 break; 3154 - case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: 3155 - ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data); 3155 + case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: { 3156 + struct smu_feature_bits feature_mask; 3157 + uint32_t features[2]; 3158 + 3159 + /* TBD: need to handle for > 64 bits */ 3160 + ret = smu_feature_get_enabled_mask(smu, &feature_mask); 3161 + if (!ret) { 3162 + smu_feature_bits_to_arr32(&feature_mask, features, 64); 3163 + *(uint64_t *)data = *(uint64_t *)features; 3164 + } 3156 3165 *size = 8; 3157 3166 break; 3167 + } 3158 3168 case AMDGPU_PP_SENSOR_UVD_POWER: 3159 3169 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; 3160 3170 *size = 4;
+27 -3
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
··· 471 471 struct smu_power_gate power_gate; 472 472 }; 473 473 474 - #define SMU_FEATURE_MAX (64) 474 + #define SMU_FEATURE_NUM_DEFAULT (64) 475 + #define SMU_FEATURE_MAX (128) 475 476 476 477 struct smu_feature_bits { 477 478 DECLARE_BITMAP(bits, SMU_FEATURE_MAX); 478 479 }; 480 + 481 + /* 482 + * Helpers for initializing smu_feature_bits statically. 483 + * Use SMU_FEATURE_BIT_INIT() which automatically handles array indexing: 484 + * static const struct smu_feature_bits example = { 485 + * .bits = { 486 + * SMU_FEATURE_BIT_INIT(5), 487 + * SMU_FEATURE_BIT_INIT(10), 488 + * SMU_FEATURE_BIT_INIT(65), 489 + * SMU_FEATURE_BIT_INIT(100) 490 + * } 491 + * }; 492 + */ 493 + #define SMU_FEATURE_BITS_ELEM(bit) ((bit) / BITS_PER_LONG) 494 + #define SMU_FEATURE_BITS_POS(bit) ((bit) % BITS_PER_LONG) 495 + #define SMU_FEATURE_BIT_INIT(bit) \ 496 + [SMU_FEATURE_BITS_ELEM(bit)] = (1UL << SMU_FEATURE_BITS_POS(bit)) 479 497 480 498 enum smu_feature_list { 481 499 SMU_FEATURE_LIST_SUPPORTED, ··· 536 518 enum smu_baco_state { 537 519 SMU_BACO_STATE_ENTER = 0, 538 520 SMU_BACO_STATE_EXIT, 539 - SMU_BACO_STATE_NONE, 540 521 }; 541 522 542 523 struct smu_baco_context { ··· 1229 1212 * on the SMU. 1230 1213 * &feature_mask: Enabled feature mask. 1231 1214 */ 1232 - int (*get_enabled_mask)(struct smu_context *smu, uint64_t *feature_mask); 1215 + int (*get_enabled_mask)(struct smu_context *smu, 1216 + struct smu_feature_bits *feature_mask); 1233 1217 1234 1218 /** 1235 1219 * @feature_is_enabled: Test if a feature is enabled. ··· 2060 2042 unsigned int nbits) 2061 2043 { 2062 2044 return bitmap_empty(bits->bits, nbits); 2045 + } 2046 + 2047 + static inline bool smu_feature_bits_full(const struct smu_feature_bits *bits, 2048 + unsigned int nbits) 2049 + { 2050 + return bitmap_full(bits->bits, nbits); 2063 2051 } 2064 2052 2065 2053 static inline void smu_feature_bits_copy(struct smu_feature_bits *dst,
-2
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v15_0.h
··· 226 226 227 227 int smu_v15_0_set_gfx_power_up_by_imu(struct smu_context *smu); 228 228 229 - int smu_v15_0_set_default_dpm_tables(struct smu_context *smu); 230 - 231 229 int smu_v15_0_get_pptable_from_firmware(struct smu_context *smu, 232 230 void **table, 233 231 uint32_t *size,
+12 -10
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
··· 65 65 #define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000 66 66 #define SMU_FEATURES_HIGH_SHIFT 32 67 67 68 - #define SMC_DPM_FEATURE ( \ 69 - FEATURE_DPM_PREFETCHER_MASK | \ 70 - FEATURE_DPM_GFXCLK_MASK | \ 71 - FEATURE_DPM_UCLK_MASK | \ 72 - FEATURE_DPM_SOCCLK_MASK | \ 73 - FEATURE_DPM_MP0CLK_MASK | \ 74 - FEATURE_DPM_FCLK_MASK | \ 75 - FEATURE_DPM_XGMI_MASK) 68 + static const struct smu_feature_bits arcturus_dpm_features = { 69 + .bits = { SMU_FEATURE_BIT_INIT(FEATURE_DPM_PREFETCHER_BIT), 70 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT), 71 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT), 72 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK_BIT), 73 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_MP0CLK_BIT), 74 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK_BIT), 75 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_XGMI_BIT) } 76 + }; 76 77 77 78 #define smnPCIE_ESM_CTRL 0x111003D0 78 79 ··· 1527 1526 static bool arcturus_is_dpm_running(struct smu_context *smu) 1528 1527 { 1529 1528 int ret = 0; 1530 - uint64_t feature_enabled; 1529 + struct smu_feature_bits feature_enabled; 1531 1530 1532 1531 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 1533 1532 if (ret) 1534 1533 return false; 1535 1534 1536 - return !!(feature_enabled & SMC_DPM_FEATURE); 1535 + return smu_feature_bits_test_mask(&feature_enabled, 1536 + arcturus_dpm_features.bits); 1537 1537 } 1538 1538 1539 1539 static int arcturus_dpm_set_vcn_enable(struct smu_context *smu,
+14 -10
drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
··· 60 60 61 61 static uint32_t cyan_skillfish_sclk_default; 62 62 63 - #define FEATURE_MASK(feature) (1ULL << feature) 64 - #define SMC_DPM_FEATURE ( \ 65 - FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 66 - FEATURE_MASK(FEATURE_SOC_DPM_BIT) | \ 67 - FEATURE_MASK(FEATURE_GFX_DPM_BIT)) 63 + static const struct smu_feature_bits cyan_skillfish_dpm_features = { 64 + .bits = { 65 + SMU_FEATURE_BIT_INIT(FEATURE_FCLK_DPM_BIT), 66 + SMU_FEATURE_BIT_INIT(FEATURE_SOC_DPM_BIT), 67 + SMU_FEATURE_BIT_INIT(FEATURE_GFX_DPM_BIT) 68 + } 69 + }; 68 70 69 71 static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT] = { 70 72 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), ··· 363 361 { 364 362 struct amdgpu_device *adev = smu->adev; 365 363 int ret = 0; 366 - uint64_t feature_enabled; 364 + struct smu_feature_bits feature_enabled; 367 365 368 366 /* we need to re-init after suspend so return false */ 369 367 if (adev->in_suspend) ··· 380 378 cyan_skillfish_get_smu_metrics_data(smu, METRICS_CURR_GFXCLK, 381 379 &cyan_skillfish_sclk_default); 382 380 383 - return !!(feature_enabled & SMC_DPM_FEATURE); 381 + return smu_feature_bits_test_mask(&feature_enabled, 382 + cyan_skillfish_dpm_features.bits); 384 383 } 385 384 386 385 static ssize_t cyan_skillfish_get_gpu_metrics(struct smu_context *smu, ··· 568 565 return 0; 569 566 } 570 567 571 - static int cyan_skillfish_get_enabled_mask(struct smu_context *smu, 572 - uint64_t *feature_mask) 568 + static int 569 + cyan_skillfish_get_enabled_mask(struct smu_context *smu, 570 + struct smu_feature_bits *feature_mask) 573 571 { 574 572 if (!feature_mask) 575 573 return -EINVAL; 576 - memset(feature_mask, 0xff, sizeof(*feature_mask)); 574 + smu_feature_bits_fill(feature_mask); 577 575 578 576 return 0; 579 577 }
+15 -12
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
··· 58 58 #undef pr_info 59 59 #undef pr_debug 60 60 61 - #define FEATURE_MASK(feature) (1ULL << feature) 62 - #define SMC_DPM_FEATURE ( \ 63 - FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \ 64 - FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ 65 - FEATURE_MASK(FEATURE_DPM_GFX_PACE_BIT) | \ 66 - FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ 67 - FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ 68 - FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT) | \ 69 - FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ 70 - FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT)) 61 + static const struct smu_feature_bits navi10_dpm_features = { 62 + .bits = { 63 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_PREFETCHER_BIT), 64 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT), 65 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFX_PACE_BIT), 66 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT), 67 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK_BIT), 68 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_MP0CLK_BIT), 69 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_LINK_BIT), 70 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_DCEFCLK_BIT) 71 + } 72 + }; 71 73 72 74 #define SMU_11_0_GFX_BUSY_THRESHOLD 15 73 75 ··· 1621 1619 static bool navi10_is_dpm_running(struct smu_context *smu) 1622 1620 { 1623 1621 int ret = 0; 1624 - uint64_t feature_enabled; 1622 + struct smu_feature_bits feature_enabled; 1625 1623 1626 1624 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 1627 1625 if (ret) 1628 1626 return false; 1629 1627 1630 - return !!(feature_enabled & SMC_DPM_FEATURE); 1628 + return smu_feature_bits_test_mask(&feature_enabled, 1629 + navi10_dpm_features.bits); 1631 1630 } 1632 1631 1633 1632 static int navi10_get_fan_speed_rpm(struct smu_context *smu,
+15 -12
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
··· 60 60 #undef pr_info 61 61 #undef pr_debug 62 62 63 - #define FEATURE_MASK(feature) (1ULL << feature) 64 - #define SMC_DPM_FEATURE ( \ 65 - FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \ 66 - FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ 67 - FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ 68 - FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ 69 - FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ 70 - FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \ 71 - FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT) | \ 72 - FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)) 63 + static const struct smu_feature_bits sienna_cichlid_dpm_features = { 64 + .bits = { 65 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_PREFETCHER_BIT), 66 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT), 67 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT), 68 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_LINK_BIT), 69 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK_BIT), 70 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK_BIT), 71 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_DCEFCLK_BIT), 72 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_MP0CLK_BIT) 73 + } 74 + }; 73 75 74 76 #define SMU_11_0_7_GFX_BUSY_THRESHOLD 15 75 77 ··· 1536 1534 static bool sienna_cichlid_is_dpm_running(struct smu_context *smu) 1537 1535 { 1538 1536 int ret = 0; 1539 - uint64_t feature_enabled; 1537 + struct smu_feature_bits feature_enabled; 1540 1538 1541 1539 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 1542 1540 if (ret) 1543 1541 return false; 1544 1542 1545 - return !!(feature_enabled & SMC_DPM_FEATURE); 1543 + return smu_feature_bits_test_mask(&feature_enabled, 1544 + sienna_cichlid_dpm_features.bits); 1546 1545 } 1547 1546 1548 1547 static int sienna_cichlid_get_fan_speed_rpm(struct smu_context *smu,
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
··· 751 751 uint32_t feature_mask[2]; 752 752 753 753 if (smu_feature_list_is_empty(smu, SMU_FEATURE_LIST_ALLOWED) || 754 - feature->feature_num < 64) { 754 + feature->feature_num < SMU_FEATURE_NUM_DEFAULT) { 755 755 ret = -EINVAL; 756 756 goto failed; 757 757 }
+16 -13
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
··· 58 58 #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK 0x00000001L 59 59 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L 60 60 61 - #define FEATURE_MASK(feature) (1ULL << feature) 62 - #define SMC_DPM_FEATURE ( \ 63 - FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ 64 - FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ 65 - FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 66 - FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ 67 - FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \ 68 - FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ 69 - FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ 70 - FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \ 71 - FEATURE_MASK(FEATURE_GFX_DPM_BIT)) 61 + static const struct smu_feature_bits vangogh_dpm_features = { 62 + .bits = { 63 + SMU_FEATURE_BIT_INIT(FEATURE_CCLK_DPM_BIT), 64 + SMU_FEATURE_BIT_INIT(FEATURE_VCN_DPM_BIT), 65 + SMU_FEATURE_BIT_INIT(FEATURE_FCLK_DPM_BIT), 66 + SMU_FEATURE_BIT_INIT(FEATURE_SOCCLK_DPM_BIT), 67 + SMU_FEATURE_BIT_INIT(FEATURE_MP0CLK_DPM_BIT), 68 + SMU_FEATURE_BIT_INIT(FEATURE_LCLK_DPM_BIT), 69 + SMU_FEATURE_BIT_INIT(FEATURE_SHUBCLK_DPM_BIT), 70 + SMU_FEATURE_BIT_INIT(FEATURE_DCFCLK_DPM_BIT), 71 + SMU_FEATURE_BIT_INIT(FEATURE_GFX_DPM_BIT) 72 + } 73 + }; 72 74 73 75 static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = { 74 76 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), ··· 506 504 { 507 505 struct amdgpu_device *adev = smu->adev; 508 506 int ret = 0; 509 - uint64_t feature_enabled; 507 + struct smu_feature_bits feature_enabled; 510 508 511 509 /* we need to re-init after suspend so return false */ 512 510 if (adev->in_suspend) ··· 517 515 if (ret) 518 516 return false; 519 517 520 - return !!(feature_enabled & SMC_DPM_FEATURE); 518 + return smu_feature_bits_test_mask(&feature_enabled, 519 + vangogh_dpm_features.bits); 521 520 } 522 521 523 522 static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type,
+2 -2
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
··· 1434 1434 } 1435 1435 1436 1436 static int renoir_get_enabled_mask(struct smu_context *smu, 1437 - uint64_t *feature_mask) 1437 + struct smu_feature_bits *feature_mask) 1438 1438 { 1439 1439 if (!feature_mask) 1440 1440 return -EINVAL; 1441 - memset(feature_mask, 0xff, sizeof(*feature_mask)); 1441 + smu_feature_bits_fill(feature_mask); 1442 1442 1443 1443 return 0; 1444 1444 }
+15 -11
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
··· 61 61 [smu_feature] = {1, (aldebaran_feature)} 62 62 63 63 #define FEATURE_MASK(feature) (1ULL << feature) 64 - #define SMC_DPM_FEATURE ( \ 65 - FEATURE_MASK(FEATURE_DATA_CALCULATIONS) | \ 66 - FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ 67 - FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ 68 - FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ 69 - FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \ 70 - FEATURE_MASK(FEATURE_DPM_LCLK_BIT) | \ 71 - FEATURE_MASK(FEATURE_DPM_XGMI_BIT) | \ 72 - FEATURE_MASK(FEATURE_DPM_VCN_BIT)) 64 + static const struct smu_feature_bits aldebaran_dpm_features = { 65 + .bits = { 66 + SMU_FEATURE_BIT_INIT(FEATURE_DATA_CALCULATIONS), 67 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT), 68 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT), 69 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK_BIT), 70 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK_BIT), 71 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_LCLK_BIT), 72 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_XGMI_BIT), 73 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_VCN_BIT) 74 + } 75 + }; 73 76 74 77 #define smnPCIE_ESM_CTRL 0x111003D0 75 78 ··· 1398 1395 static bool aldebaran_is_dpm_running(struct smu_context *smu) 1399 1396 { 1400 1397 int ret; 1401 - uint64_t feature_enabled; 1398 + struct smu_feature_bits feature_enabled; 1402 1399 1403 1400 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 1404 1401 if (ret) 1405 1402 return false; 1406 - return !!(feature_enabled & SMC_DPM_FEATURE); 1403 + return smu_feature_bits_test_mask(&feature_enabled, 1404 + aldebaran_dpm_features.bits); 1407 1405 } 1408 1406 1409 1407 static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
··· 762 762 uint32_t feature_mask[2]; 763 763 764 764 if (smu_feature_list_is_empty(smu, SMU_FEATURE_LIST_ALLOWED) || 765 - feature->feature_num < 64) 765 + feature->feature_num < SMU_FEATURE_NUM_DEFAULT) 766 766 return -EINVAL; 767 767 768 768 smu_feature_list_to_arr32(smu, SMU_FEATURE_LIST_ALLOWED, feature_mask);
+17 -29
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
··· 59 59 60 60 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) 61 61 62 - #define FEATURE_MASK(feature) (1ULL << feature) 63 - #define SMC_DPM_FEATURE ( \ 64 - FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ 65 - FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ 66 - FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ 67 - FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ 68 - FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \ 69 - FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)) 62 + static const struct smu_feature_bits smu_v13_0_0_dpm_features = { 63 + .bits = { 64 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT), 65 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT), 66 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_LINK_BIT), 67 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK_BIT), 68 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK_BIT), 69 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_MP0CLK_BIT) 70 + } 71 + }; 70 72 71 73 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 72 74 ··· 691 689 static bool smu_v13_0_0_is_dpm_running(struct smu_context *smu) 692 690 { 693 691 int ret = 0; 694 - uint64_t feature_enabled; 692 + struct smu_feature_bits feature_enabled; 695 693 696 694 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 697 695 if (ret) 698 696 return false; 699 697 700 - return !!(feature_enabled & SMC_DPM_FEATURE); 698 + return smu_feature_bits_test_mask(&feature_enabled, 699 + smu_v13_0_0_dpm_features.bits); 701 700 } 702 701 703 702 static int smu_v13_0_0_system_features_control(struct smu_context *smu, ··· 2615 2612 static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu) 2616 2613 { 2617 2614 struct amdgpu_device *adev = smu->adev; 2618 - u32 smu_version; 2619 - int ret; 2620 2615 2621 2616 /* SRIOV does not support SMU mode1 reset */ 2622 2617 if (amdgpu_sriov_vf(adev)) 2623 - return false; 2624 - 2625 - /* PMFW support is available since 78.41 */ 2626 - ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); 2627 - if (ret) 2628 - return false; 2629 - 2630 - if (smu_version < 0x004e2900) 2631 2618 return false; 2632 2619 2633 2620 return true; ··· 2768 2775 2769 2776 switch (mp1_state) { 2770 2777 case PP_MP1_STATE_UNLOAD: 2771 - ret = smu_cmn_send_smc_msg_with_param(smu, 2772 - SMU_MSG_PrepareMp1ForUnload, 2773 - 0x55, NULL); 2774 - 2775 - if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT) 2776 - ret = smu_v13_0_disable_pmfw_state(smu); 2777 - 2778 + ret = smu_cmn_set_mp1_state(smu, mp1_state); 2778 2779 break; 2779 2780 default: 2780 2781 /* Ignore others */ ··· 2812 2825 /* SMU 13_0_0 PMFW supports RAS fatal error reset from 78.77 */ 2813 2826 smu_v13_0_0_set_mode1_reset_param(smu, 0x004e4d00, &param); 2814 2827 2815 - ret = smu_cmn_send_smc_msg_with_param(smu, 2816 - SMU_MSG_Mode1Reset, param, NULL); 2828 + ret = smu_cmn_send_debug_smc_msg_with_param(smu, 2829 + DEBUGSMC_MSG_Mode1Reset, param); 2830 + 2817 2831 break; 2818 2832 2819 2833 case IP_VERSION(13, 0, 10):
+15 -8
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
··· 52 52 #define SMU_13_0_12_FEA_MAP(smu_feature, smu_13_0_12_feature) \ 53 53 [smu_feature] = { 1, (smu_13_0_12_feature) } 54 54 55 - #define FEATURE_MASK(feature) (1ULL << feature) 56 - #define SMC_DPM_FEATURE \ 57 - (FEATURE_MASK(FEATURE_DATA_CALCULATION) | \ 58 - FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_FCLK)) 55 + static const struct smu_feature_bits smu_v13_0_12_dpm_features = { 56 + .bits = { 57 + SMU_FEATURE_BIT_INIT(FEATURE_DATA_CALCULATION), 58 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK), 59 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK) 60 + } 61 + }; 59 62 60 63 #define NUM_JPEG_RINGS_FW 10 61 64 #define NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics) \ ··· 202 199 } 203 200 204 201 static int smu_v13_0_12_get_enabled_mask(struct smu_context *smu, 205 - uint64_t *feature_mask) 202 + struct smu_feature_bits *feature_mask) 206 203 { 207 204 int ret; 208 205 209 206 ret = smu_cmn_get_enabled_mask(smu, feature_mask); 210 207 211 208 if (ret == -EIO) { 212 - *feature_mask = 0; 209 + smu_feature_bits_clearall(feature_mask); 213 210 ret = 0; 214 211 } 215 212 ··· 375 372 bool smu_v13_0_12_is_dpm_running(struct smu_context *smu) 376 373 { 377 374 int ret; 378 - uint64_t feature_enabled; 375 + struct smu_feature_bits feature_enabled; 379 376 380 377 ret = smu_v13_0_12_get_enabled_mask(smu, &feature_enabled); 381 378 382 379 if (ret) 383 380 return false; 384 381 385 - return !!(feature_enabled & SMC_DPM_FEATURE); 382 + return smu_feature_bits_test_mask(&feature_enabled, 383 + smu_v13_0_12_dpm_features.bits); 386 384 } 387 385 388 386 int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, ··· 822 818 } 823 819 idx++; 824 820 } 821 + 822 + xcp_metrics->accumulation_counter = metrics->AccumulationCounter; 823 + xcp_metrics->firmware_timestamp = metrics->Timestamp; 825 824 826 825 return sizeof(*xcp_metrics); 827 826 }
+18 -16
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
··· 52 52 #define mmMP1_SMN_C2PMSG_90 0x029a 53 53 #define mmMP1_SMN_C2PMSG_90_BASE_IDX 1 54 54 55 - #define FEATURE_MASK(feature) (1ULL << feature) 56 - 57 55 #define SMU_13_0_4_UMD_PSTATE_GFXCLK 938 58 56 #define SMU_13_0_4_UMD_PSTATE_SOCCLK 938 59 57 #define SMU_13_0_4_UMD_PSTATE_FCLK 1875 60 58 61 - #define SMC_DPM_FEATURE ( \ 62 - FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ 63 - FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ 64 - FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 65 - FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ 66 - FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \ 67 - FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ 68 - FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ 69 - FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT) | \ 70 - FEATURE_MASK(FEATURE_ISP_DPM_BIT) | \ 71 - FEATURE_MASK(FEATURE_IPU_DPM_BIT) | \ 72 - FEATURE_MASK(FEATURE_GFX_DPM_BIT)) 59 + static const struct smu_feature_bits smu_v13_0_4_dpm_features = { 60 + .bits = { 61 + SMU_FEATURE_BIT_INIT(FEATURE_CCLK_DPM_BIT), 62 + SMU_FEATURE_BIT_INIT(FEATURE_VCN_DPM_BIT), 63 + SMU_FEATURE_BIT_INIT(FEATURE_FCLK_DPM_BIT), 64 + SMU_FEATURE_BIT_INIT(FEATURE_SOCCLK_DPM_BIT), 65 + SMU_FEATURE_BIT_INIT(FEATURE_MP0CLK_DPM_BIT), 66 + SMU_FEATURE_BIT_INIT(FEATURE_LCLK_DPM_BIT), 67 + SMU_FEATURE_BIT_INIT(FEATURE_SHUBCLK_DPM_BIT), 68 + SMU_FEATURE_BIT_INIT(FEATURE_DCFCLK_DPM_BIT), 69 + SMU_FEATURE_BIT_INIT(FEATURE_ISP_DPM_BIT), 70 + SMU_FEATURE_BIT_INIT(FEATURE_IPU_DPM_BIT), 71 + SMU_FEATURE_BIT_INIT(FEATURE_GFX_DPM_BIT) 72 + } 73 + }; 73 74 74 75 static struct cmn2asic_msg_mapping smu_v13_0_4_message_map[SMU_MSG_MAX_COUNT] = { 75 76 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), ··· 213 212 static bool smu_v13_0_4_is_dpm_running(struct smu_context *smu) 214 213 { 215 214 int ret = 0; 216 - uint64_t feature_enabled; 215 + struct smu_feature_bits feature_enabled; 217 216 218 217 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 219 218 220 219 if (ret) 221 220 return false; 222 221 223 - return !!(feature_enabled & SMC_DPM_FEATURE); 222 + return smu_feature_bits_test_mask(&feature_enabled, 223 + smu_v13_0_4_dpm_features.bits); 224 224 } 225 225 226 226 static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
+16 -13
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
··· 51 51 #define mmMP1_C2PMSG_33 (0xbee261 + 0xb00000 / 4) 52 52 #define mmMP1_C2PMSG_33_BASE_IDX 0 53 53 54 - #define FEATURE_MASK(feature) (1ULL << feature) 55 - #define SMC_DPM_FEATURE ( \ 56 - FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ 57 - FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 58 - FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ 59 - FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \ 60 - FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ 61 - FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT) | \ 62 - FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT)| \ 63 - FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT)| \ 64 - FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT)) 54 + static const struct smu_feature_bits smu_v13_0_5_dpm_features = { 55 + .bits = { 56 + SMU_FEATURE_BIT_INIT(FEATURE_CCLK_DPM_BIT), 57 + SMU_FEATURE_BIT_INIT(FEATURE_FCLK_DPM_BIT), 58 + SMU_FEATURE_BIT_INIT(FEATURE_LCLK_DPM_BIT), 59 + SMU_FEATURE_BIT_INIT(FEATURE_GFX_DPM_BIT), 60 + SMU_FEATURE_BIT_INIT(FEATURE_VCN_DPM_BIT), 61 + SMU_FEATURE_BIT_INIT(FEATURE_DCFCLK_DPM_BIT), 62 + SMU_FEATURE_BIT_INIT(FEATURE_SOCCLK_DPM_BIT), 63 + SMU_FEATURE_BIT_INIT(FEATURE_MP0CLK_DPM_BIT), 64 + SMU_FEATURE_BIT_INIT(FEATURE_SHUBCLK_DPM_BIT) 65 + } 66 + }; 65 67 66 68 static struct cmn2asic_msg_mapping smu_v13_0_5_message_map[SMU_MSG_MAX_COUNT] = { 67 69 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), ··· 232 230 static bool smu_v13_0_5_is_dpm_running(struct smu_context *smu) 233 231 { 234 232 int ret = 0; 235 - uint64_t feature_enabled; 233 + struct smu_feature_bits feature_enabled; 236 234 237 235 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 238 236 239 237 if (ret) 240 238 return false; 241 239 242 - return !!(feature_enabled & SMC_DPM_FEATURE); 240 + return smu_feature_bits_test_mask(&feature_enabled, 241 + smu_v13_0_5_dpm_features.bits); 243 242 } 244 243 245 244 static int smu_v13_0_5_mode_reset(struct smu_context *smu, int type)
+32 -24
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
··· 76 76 [smu_feature] = { 1, (smu_13_0_6_feature) } 77 77 78 78 #define FEATURE_MASK(feature) (1ULL << feature) 79 - #define SMC_DPM_FEATURE \ 80 - (FEATURE_MASK(FEATURE_DATA_CALCULATION) | \ 81 - FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_UCLK) | \ 82 - FEATURE_MASK(FEATURE_DPM_SOCCLK) | FEATURE_MASK(FEATURE_DPM_FCLK) | \ 83 - FEATURE_MASK(FEATURE_DPM_LCLK) | FEATURE_MASK(FEATURE_DPM_XGMI) | \ 84 - FEATURE_MASK(FEATURE_DPM_VCN)) 79 + static const struct smu_feature_bits smu_v13_0_6_dpm_features = { 80 + .bits = { 81 + SMU_FEATURE_BIT_INIT(FEATURE_DATA_CALCULATION), 82 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK), 83 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK), 84 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK), 85 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK), 86 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_LCLK), 87 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_XGMI), 88 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_VCN) 89 + } 90 + }; 85 91 86 92 #define smnPCIE_ESM_CTRL 0x93D0 87 93 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x1a340288 ··· 2272 2266 } 2273 2267 2274 2268 static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu, 2275 - uint64_t *feature_mask) 2269 + struct smu_feature_bits *feature_mask) 2276 2270 { 2277 2271 int ret; 2278 2272 2279 2273 ret = smu_cmn_get_enabled_mask(smu, feature_mask); 2280 2274 2281 2275 if (ret == -EIO && !smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) { 2282 - *feature_mask = 0; 2276 + smu_feature_bits_clearall(feature_mask); 2283 2277 ret = 0; 2284 2278 } 2285 2279 ··· 2289 2283 static bool smu_v13_0_6_is_dpm_running(struct smu_context *smu) 2290 2284 { 2291 2285 int ret; 2292 - uint64_t feature_enabled; 2286 + struct smu_feature_bits feature_enabled; 2293 2287 2294 2288 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) 2295 2289 return smu_v13_0_12_is_dpm_running(smu); ··· 2299 2293 if (ret) 2300 2294 return false; 2301 2295 2302 - return !!(feature_enabled & SMC_DPM_FEATURE); 2296 + return smu_feature_bits_test_mask(&feature_enabled, 2297 + smu_v13_0_6_dpm_features.bits); 2303 2298 } 2304 2299 2305 2300 static int smu_v13_0_6_request_i2c_xfer(struct smu_context *smu, ··· 2564 2557 const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3; 2565 2558 int version = smu_v13_0_6_get_metrics_version(smu); 2566 2559 struct smu_v13_0_6_partition_metrics *xcp_metrics; 2567 - MetricsTableV0_t *metrics_v0 __free(kfree) = NULL; 2560 + struct smu_table_context *smu_table = &smu->smu_table; 2568 2561 struct amdgpu_device *adev = smu->adev; 2569 2562 int ret, inst, i, j, k, idx; 2563 + MetricsTableV0_t *metrics_v0; 2570 2564 MetricsTableV1_t *metrics_v1; 2571 2565 MetricsTableV2_t *metrics_v2; 2572 2566 struct amdgpu_xcp *xcp; ··· 2587 2579 xcp_metrics = (struct smu_v13_0_6_partition_metrics *)table; 2588 2580 smu_v13_0_6_partition_metrics_init(xcp_metrics, 1, 1); 2589 2581 2590 - metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); 2591 - if (!metrics_v0) 2592 - return -ENOMEM; 2593 - 2594 - ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false); 2582 + ret = smu_v13_0_6_get_metrics_table(smu, NULL, false); 2595 2583 if (ret) 2596 2584 return ret; 2585 + 2586 + metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table; 2597 2587 2598 2588 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == 2599 2589 IP_VERSION(13, 0, 12) && ··· 2599 2593 return smu_v13_0_12_get_xcp_metrics(smu, xcp, table, 2600 2594 metrics_v0); 2601 2595 2602 - metrics_v1 = (MetricsTableV1_t *)metrics_v0; 2603 - metrics_v2 = (MetricsTableV2_t *)metrics_v0; 2596 + metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table; 2597 + metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table; 2604 2598 2605 2599 per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS)); 2606 2600 ··· 2668 2662 idx++; 2669 2663 } 2670 2664 } 2665 + xcp_metrics->accumulation_counter = GET_METRIC_FIELD(AccumulationCounter, version); 2666 + xcp_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, version); 2671 2667 2672 2668 return sizeof(*xcp_metrics); 2673 2669 } ··· 2678 2670 { 2679 2671 struct smu_v13_0_6_gpu_metrics *gpu_metrics; 2680 2672 int version = smu_v13_0_6_get_metrics_version(smu); 2681 - MetricsTableV0_t *metrics_v0 __free(kfree) = NULL; 2673 + struct smu_table_context *smu_table = &smu->smu_table; 2682 2674 struct amdgpu_device *adev = smu->adev; 2683 2675 int ret = 0, xcc_id, inst, i, j; 2676 + MetricsTableV0_t *metrics_v0; 2684 2677 MetricsTableV1_t *metrics_v1; 2685 2678 MetricsTableV2_t *metrics_v2; 2686 2679 u16 link_width_level; 2687 2680 u8 num_jpeg_rings; 2688 2681 bool per_inst; 2689 2682 2690 - metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); 2691 - ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false); 2683 + ret = smu_v13_0_6_get_metrics_table(smu, NULL, false); 2692 2684 if (ret) 2693 2685 return ret; 2694 2686 2695 - metrics_v2 = (MetricsTableV2_t *)metrics_v0; 2687 + metrics_v0 = (MetricsTableV0_t *)smu_table->metrics_table; 2696 2688 gpu_metrics = (struct smu_v13_0_6_gpu_metrics *)smu_driver_table_ptr( 2697 2689 smu, SMU_DRIVER_TABLE_GPU_METRICS); 2698 2690 ··· 2703 2695 goto fill; 2704 2696 } 2705 2697 2706 - metrics_v1 = (MetricsTableV1_t *)metrics_v0; 2707 - metrics_v2 = (MetricsTableV2_t *)metrics_v0; 2698 + metrics_v1 = (MetricsTableV1_t *)smu_table->metrics_table; 2699 + metrics_v2 = (MetricsTableV2_t *)smu_table->metrics_table; 2708 2700 2709 2701 gpu_metrics->temperature_hotspot = 2710 2702 SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version));
+6 -2
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
··· 140 140 SMU_SCALAR(SMU_MATTR(SYSTEM_CLOCK_COUNTER), SMU_MUNIT(TIME_1), \ 141 141 SMU_MTYPE(U64), system_clock_counter); \ 142 142 SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \ 143 - SMU_MTYPE(U32), accumulation_counter); \ 143 + SMU_MTYPE(U64), accumulation_counter); \ 144 144 SMU_SCALAR(SMU_MATTR(PROCHOT_RESIDENCY_ACC), SMU_MUNIT(NONE), \ 145 145 SMU_MTYPE(U32), prochot_residency_acc); \ 146 146 SMU_SCALAR(SMU_MATTR(PPT_RESIDENCY_ACC), SMU_MUNIT(NONE), \ ··· 259 259 SMU_13_0_6_MAX_XCC); \ 260 260 SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \ 261 261 SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \ 262 - SMU_13_0_6_MAX_XCC); 262 + SMU_13_0_6_MAX_XCC); \ 263 + SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \ 264 + SMU_MTYPE(U64), accumulation_counter); \ 265 + SMU_SCALAR(SMU_MATTR(FIRMWARE_TIMESTAMP), SMU_MUNIT(TIME_2), \ 266 + SMU_MTYPE(U64), firmware_timestamp); 263 267 264 268 DECLARE_SMU_METRICS_CLASS(smu_v13_0_6_partition_metrics, 265 269 SMU_13_0_6_PARTITION_METRICS_FIELDS);
+56 -12
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
··· 59 59 60 60 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) 61 61 62 - #define FEATURE_MASK(feature) (1ULL << feature) 63 - #define SMC_DPM_FEATURE ( \ 64 - FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ 65 - FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ 66 - FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ 67 - FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ 68 - FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \ 69 - FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)) 62 + static const struct smu_feature_bits smu_v13_0_7_dpm_features = { 63 + .bits = { 64 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT), 65 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT), 66 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_LINK_BIT), 67 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK_BIT), 68 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK_BIT), 69 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_MP0CLK_BIT) 70 + } 71 + }; 70 72 71 73 #define smnMP1_FIRMWARE_FLAGS_SMU_13_0_7 0x3b10028 72 74 73 75 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 76 + 77 + #define mmMP1_SMN_C2PMSG_75 0x028b 78 + #define mmMP1_SMN_C2PMSG_75_BASE_IDX 0 79 + 80 + #define mmMP1_SMN_C2PMSG_53 0x0275 81 + #define mmMP1_SMN_C2PMSG_53_BASE_IDX 0 82 + 83 + #define mmMP1_SMN_C2PMSG_54 0x0276 84 + #define mmMP1_SMN_C2PMSG_54_BASE_IDX 0 85 + 86 + #define DEBUGSMC_MSG_Mode1Reset 2 74 87 75 88 #define PP_OD_FEATURE_GFXCLK_FMIN 0 76 89 #define PP_OD_FEATURE_GFXCLK_FMAX 1 ··· 710 697 static bool smu_v13_0_7_is_dpm_running(struct smu_context *smu) 711 698 { 712 699 int ret = 0; 713 - uint64_t feature_enabled; 700 + struct smu_feature_bits feature_enabled; 714 701 715 702 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 716 703 if (ret) 717 704 return false; 718 705 719 - return !!(feature_enabled & SMC_DPM_FEATURE); 706 + return smu_feature_bits_test_mask(&feature_enabled, 707 + smu_v13_0_7_dpm_features.bits); 720 708 } 721 709 722 710 static uint32_t smu_v13_0_7_get_throttler_status(SmuMetrics_t *metrics) ··· 2745 2731 return ret; 2746 2732 } 2747 2733 2734 + static int smu_v13_0_7_mode1_reset(struct smu_context *smu) 2735 + { 2736 + int ret; 2737 + 2738 + ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset); 2739 + if (!ret) { 2740 + /* disable mmio access while doing mode 1 reset*/ 2741 + smu->adev->no_hw_access = true; 2742 + /* ensure no_hw_access is globally visible before any MMIO */ 2743 + smp_mb(); 2744 + msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); 2745 + } 2746 + 2747 + return ret; 2748 + } 2749 + 2750 + static void smu_v13_0_7_init_msg_ctl(struct smu_context *smu) 2751 + { 2752 + struct amdgpu_device *adev = smu->adev; 2753 + struct smu_msg_ctl *ctl = &smu->msg_ctl; 2754 + 2755 + smu_v13_0_init_msg_ctl(smu, smu_v13_0_7_message_map); 2756 + 2757 + /* Set up debug mailbox registers */ 2758 + ctl->config.debug_param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_53); 2759 + ctl->config.debug_msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_75); 2760 + ctl->config.debug_resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_54); 2761 + ctl->flags |= SMU_MSG_CTL_DEBUG_MAILBOX; 2762 + } 2763 + 2748 2764 static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { 2749 2765 .init_allowed_features = smu_v13_0_7_init_allowed_features, 2750 2766 .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table, ··· 2836 2792 .baco_enter = smu_v13_0_baco_enter, 2837 2793 .baco_exit = smu_v13_0_baco_exit, 2838 2794 .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported, 2839 - .mode1_reset = smu_v13_0_mode1_reset, 2795 + .mode1_reset = smu_v13_0_7_mode1_reset, 2840 2796 .set_mp1_state = smu_v13_0_7_set_mp1_state, 2841 2797 .set_df_cstate = smu_v13_0_7_set_df_cstate, 2842 2798 .gpo_control = smu_v13_0_gpo_control, ··· 2855 2811 smu->pwr_src_map = smu_v13_0_7_pwr_src_map; 2856 2812 smu->workload_map = smu_v13_0_7_workload_map; 2857 2813 smu->smc_driver_if_version = SMU13_0_7_DRIVER_IF_VERSION; 2858 - smu_v13_0_init_msg_ctl(smu, smu_v13_0_7_message_map); 2814 + smu_v13_0_7_init_msg_ctl(smu); 2859 2815 }
+16 -13
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
··· 55 55 #define SMU_13_0_1_UMD_PSTATE_SOCCLK 678 56 56 #define SMU_13_0_1_UMD_PSTATE_FCLK 1800 57 57 58 - #define FEATURE_MASK(feature) (1ULL << feature) 59 - #define SMC_DPM_FEATURE ( \ 60 - FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ 61 - FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ 62 - FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 63 - FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ 64 - FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \ 65 - FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ 66 - FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ 67 - FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \ 68 - FEATURE_MASK(FEATURE_GFX_DPM_BIT)) 58 + static const struct smu_feature_bits yellow_carp_dpm_features = { 59 + .bits = { 60 + SMU_FEATURE_BIT_INIT(FEATURE_CCLK_DPM_BIT), 61 + SMU_FEATURE_BIT_INIT(FEATURE_VCN_DPM_BIT), 62 + SMU_FEATURE_BIT_INIT(FEATURE_FCLK_DPM_BIT), 63 + SMU_FEATURE_BIT_INIT(FEATURE_SOCCLK_DPM_BIT), 64 + SMU_FEATURE_BIT_INIT(FEATURE_MP0CLK_DPM_BIT), 65 + SMU_FEATURE_BIT_INIT(FEATURE_LCLK_DPM_BIT), 66 + SMU_FEATURE_BIT_INIT(FEATURE_SHUBCLK_DPM_BIT), 67 + SMU_FEATURE_BIT_INIT(FEATURE_DCFCLK_DPM_BIT), 68 + SMU_FEATURE_BIT_INIT(FEATURE_GFX_DPM_BIT) 69 + } 70 + }; 69 71 70 72 static struct cmn2asic_msg_mapping yellow_carp_message_map[SMU_MSG_MAX_COUNT] = { 71 73 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), ··· 259 257 static bool yellow_carp_is_dpm_running(struct smu_context *smu) 260 258 { 261 259 int ret = 0; 262 - uint64_t feature_enabled; 260 + struct smu_feature_bits feature_enabled; 263 261 264 262 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 265 263 266 264 if (ret) 267 265 return false; 268 266 269 - return !!(feature_enabled & SMC_DPM_FEATURE); 267 + return smu_feature_bits_test_mask(&feature_enabled, 268 + yellow_carp_dpm_features.bits); 270 269 } 271 270 272 271 static int yellow_carp_post_smu_init(struct smu_context *smu)
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
··· 747 747 uint32_t feature_mask[2]; 748 748 749 749 if (smu_feature_list_is_empty(smu, SMU_FEATURE_LIST_ALLOWED) || 750 - feature->feature_num < 64) 750 + feature->feature_num < SMU_FEATURE_NUM_DEFAULT) 751 751 return -EINVAL; 752 752 753 753 smu_feature_list_to_arr32(smu, SMU_FEATURE_LIST_ALLOWED, feature_mask);
+18 -15
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
··· 72 72 #define SMU_14_0_4_UMD_PSTATE_GFXCLK 938 73 73 #define SMU_14_0_4_UMD_PSTATE_SOCCLK 938 74 74 75 - #define FEATURE_MASK(feature) (1ULL << feature) 76 - #define SMC_DPM_FEATURE ( \ 77 - FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ 78 - FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ 79 - FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 80 - FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ 81 - FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ 82 - FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ 83 - FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \ 84 - FEATURE_MASK(FEATURE_ISP_DPM_BIT)| \ 85 - FEATURE_MASK(FEATURE_IPU_DPM_BIT) | \ 86 - FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \ 87 - FEATURE_MASK(FEATURE_VPE_DPM_BIT)) 75 + static const struct smu_feature_bits smu_v14_0_0_dpm_features = { 76 + .bits = { 77 + SMU_FEATURE_BIT_INIT(FEATURE_CCLK_DPM_BIT), 78 + SMU_FEATURE_BIT_INIT(FEATURE_VCN_DPM_BIT), 79 + SMU_FEATURE_BIT_INIT(FEATURE_FCLK_DPM_BIT), 80 + SMU_FEATURE_BIT_INIT(FEATURE_SOCCLK_DPM_BIT), 81 + SMU_FEATURE_BIT_INIT(FEATURE_LCLK_DPM_BIT), 82 + SMU_FEATURE_BIT_INIT(FEATURE_SHUBCLK_DPM_BIT), 83 + SMU_FEATURE_BIT_INIT(FEATURE_DCFCLK_DPM_BIT), 84 + SMU_FEATURE_BIT_INIT(FEATURE_ISP_DPM_BIT), 85 + SMU_FEATURE_BIT_INIT(FEATURE_IPU_DPM_BIT), 86 + SMU_FEATURE_BIT_INIT(FEATURE_GFX_DPM_BIT), 87 + SMU_FEATURE_BIT_INIT(FEATURE_VPE_DPM_BIT) 88 + } 89 + }; 88 90 89 91 enum smu_mall_pg_config { 90 92 SMU_MALL_PG_CONFIG_PMFW_CONTROL = 0, ··· 472 470 static bool smu_v14_0_0_is_dpm_running(struct smu_context *smu) 473 471 { 474 472 int ret = 0; 475 - uint64_t feature_enabled; 473 + struct smu_feature_bits feature_enabled; 476 474 477 475 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 478 476 479 477 if (ret) 480 478 return false; 481 479 482 - return !!(feature_enabled & SMC_DPM_FEATURE); 480 + return smu_feature_bits_test_mask(&feature_enabled, 481 + smu_v14_0_0_dpm_features.bits); 483 482 } 484 483 485 484 static int smu_v14_0_0_set_watermarks_table(struct smu_context *smu,
+10 -9
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
··· 56 56 57 57 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) 58 58 59 - #define FEATURE_MASK(feature) (1ULL << feature) 60 - #define SMC_DPM_FEATURE ( \ 61 - FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ 62 - FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ 63 - FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ 64 - FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ 65 - FEATURE_MASK(FEATURE_DPM_FCLK_BIT)) 59 + static const struct smu_feature_bits smu_v14_0_2_dpm_features = { 60 + .bits = { SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT), 61 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_UCLK_BIT), 62 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_LINK_BIT), 63 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_SOCCLK_BIT), 64 + SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK_BIT) } 65 + }; 66 66 67 67 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 68 68 #define DEBUGSMC_MSG_Mode1Reset 2 ··· 589 589 static bool smu_v14_0_2_is_dpm_running(struct smu_context *smu) 590 590 { 591 591 int ret = 0; 592 - uint64_t feature_enabled; 592 + struct smu_feature_bits feature_enabled; 593 593 594 594 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 595 595 if (ret) 596 596 return false; 597 597 598 - return !!(feature_enabled & SMC_DPM_FEATURE); 598 + return smu_feature_bits_test_mask(&feature_enabled, 599 + smu_v14_0_2_dpm_features.bits); 599 600 } 600 601 601 602 static uint32_t smu_v14_0_2_get_throttler_status(SmuMetrics_t *metrics)
+1 -9
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0.c
··· 716 716 uint32_t feature_mask[2]; 717 717 718 718 if (smu_feature_list_is_empty(smu, SMU_FEATURE_LIST_ALLOWED) || 719 - feature->feature_num < 64) 719 + feature->feature_num < SMU_FEATURE_NUM_DEFAULT) 720 720 return -EINVAL; 721 721 722 722 smu_feature_list_to_arr32(smu, SMU_FEATURE_LIST_ALLOWED, feature_mask); ··· 1724 1724 mutex_unlock(&ctl->lock); 1725 1725 1726 1726 return ret; 1727 - } 1728 - 1729 - int smu_v15_0_set_default_dpm_tables(struct smu_context *smu) 1730 - { 1731 - struct smu_table_context *smu_table = &smu->smu_table; 1732 - 1733 - return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, 1734 - smu_table->clocks_table, false); 1735 1727 } 1736 1728 1737 1729 int smu_v15_0_od_edit_dpm_table(struct smu_context *smu,
+155 -43
drivers/gpu/drm/amd/pm/swsmu/smu15/smu_v15_0_0_ppt.c
··· 52 52 #define mmMP1_SMN_C2PMSG_32 0x0060 53 53 #define mmMP1_SMN_C2PMSG_32_BASE_IDX 1 54 54 55 - /* MALLPowerController message arguments (Defines for the Cache mode control) */ 56 - #define SMU_MALL_PMFW_CONTROL 0 57 - #define SMU_MALL_DRIVER_CONTROL 1 55 + #define mmMP1_SMN_C2PMSG_33 0x0061 56 + #define mmMP1_SMN_C2PMSG_33_BASE_IDX 1 58 57 59 - /* 60 - * MALLPowerState message arguments 61 - * (Defines for the Allocate/Release Cache mode if in driver mode) 62 - */ 63 - #define SMU_MALL_EXIT_PG 0 64 - #define SMU_MALL_ENTER_PG 1 65 - 66 - #define SMU_MALL_PG_CONFIG_DEFAULT SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON 58 + #define mmMP1_SMN_C2PMSG_34 0x0062 59 + #define mmMP1_SMN_C2PMSG_34_BASE_IDX 1 67 60 68 61 #define SMU_15_0_UMD_PSTATE_GFXCLK 700 69 62 #define SMU_15_0_UMD_PSTATE_SOCCLK 678 70 63 #define SMU_15_0_UMD_PSTATE_FCLK 1800 71 64 72 65 73 - #define FEATURE_MASK(feature) (1ULL << feature) 74 - #define SMC_DPM_FEATURE ( \ 75 - FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ 76 - FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ 77 - FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 78 - FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ 79 - FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ 80 - FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ 81 - FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \ 82 - FEATURE_MASK(FEATURE_ISP_DPM_BIT)| \ 83 - FEATURE_MASK(FEATURE_NPU_DPM_BIT) | \ 84 - FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \ 85 - FEATURE_MASK(FEATURE_VPE_DPM_BIT)) 66 + static const struct smu_feature_bits smu_v15_0_0_dpm_features = { 67 + .bits = { 68 + SMU_FEATURE_BIT_INIT(FEATURE_CCLK_DPM_BIT), 69 + SMU_FEATURE_BIT_INIT(FEATURE_VCN_DPM_BIT), 70 + SMU_FEATURE_BIT_INIT(FEATURE_FCLK_DPM_BIT), 71 + SMU_FEATURE_BIT_INIT(FEATURE_SOCCLK_DPM_BIT), 72 + SMU_FEATURE_BIT_INIT(FEATURE_LCLK_DPM_BIT), 73 + SMU_FEATURE_BIT_INIT(FEATURE_SHUBCLK_DPM_BIT), 74 + SMU_FEATURE_BIT_INIT(FEATURE_DCFCLK_DPM_BIT), 75 + SMU_FEATURE_BIT_INIT(FEATURE_ISP_DPM_BIT), 76 + SMU_FEATURE_BIT_INIT(FEATURE_NPU_DPM_BIT), 77 + SMU_FEATURE_BIT_INIT(FEATURE_GFX_DPM_BIT), 78 + SMU_FEATURE_BIT_INIT(FEATURE_VPE_DPM_BIT) 79 + } 80 + }; 86 81 87 82 enum smu_mall_pg_config { 88 83 SMU_MALL_PG_CONFIG_PMFW_CONTROL = 0, ··· 233 238 return ret; 234 239 } 235 240 241 + static int smu_v15_0_0_update_table(struct smu_context *smu, 242 + enum smu_table_id table_index, 243 + int argument, 244 + void *table_data, 245 + bool drv2smu) 246 + { 247 + struct smu_table_context *smu_table = &smu->smu_table; 248 + struct amdgpu_device *adev = smu->adev; 249 + struct smu_table *table = &smu_table->driver_table; 250 + int table_id = smu_cmn_to_asic_specific_index(smu, 251 + CMN2ASIC_MAPPING_TABLE, 252 + table_index); 253 + uint64_t address; 254 + uint32_t table_size; 255 + int ret; 256 + struct smu_msg_ctl *ctl = &smu->msg_ctl; 257 + 258 + if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0) 259 + return -EINVAL; 260 + 261 + table_size = smu_table->tables[table_index].size; 262 + 263 + if (drv2smu) { 264 + memcpy(table->cpu_addr, table_data, table_size); 265 + /* 266 + * Flush hdp cache: to guard the content seen by 267 + * GPU is consitent with CPU. 268 + */ 269 + amdgpu_hdp_flush(adev, NULL); 270 + } 271 + 272 + address = table->mc_address; 273 + 274 + struct smu_msg_args args = { 275 + .msg = drv2smu ? 276 + SMU_MSG_TransferTableDram2Smu : 277 + SMU_MSG_TransferTableSmu2Dram, 278 + .num_args = 3, 279 + .num_out_args = 0, 280 + }; 281 + 282 + args.args[0] = table_id; 283 + args.args[1] = (uint32_t)lower_32_bits(address); 284 + args.args[2] = (uint32_t)upper_32_bits(address); 285 + 286 + ret = ctl->ops->send_msg(ctl, &args); 287 + 288 + if (ret) 289 + return ret; 290 + 291 + if (!drv2smu) { 292 + amdgpu_hdp_invalidate(adev, NULL); 293 + memcpy(table_data, table->cpu_addr, table_size); 294 + } 295 + 296 + return 0; 297 + } 298 + 299 + static int smu_v15_0_0_set_default_dpm_tables(struct smu_context *smu) 300 + { 301 + struct smu_table_context *smu_table = &smu->smu_table; 302 + 303 + return smu_v15_0_0_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, 304 + smu_table->clocks_table, false); 305 + } 306 + 307 + static int smu_v15_0_0_get_metrics_table(struct smu_context *smu, 308 + void *metrics_table, 309 + bool bypass_cache) 310 + { 311 + struct smu_table_context *smu_table = &smu->smu_table; 312 + uint32_t table_size = 313 + smu_table->tables[SMU_TABLE_SMU_METRICS].size; 314 + int ret; 315 + 316 + if (bypass_cache || 317 + !smu_table->metrics_time || 318 + time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) { 319 + ret = smu_v15_0_0_update_table(smu, 320 + SMU_TABLE_SMU_METRICS, 321 + 0, 322 + smu_table->metrics_table, 323 + false); 324 + if (ret) { 325 + dev_info(smu->adev->dev, "Failed to export SMU15_0_0 metrics table!\n"); 326 + return ret; 327 + } 328 + smu_table->metrics_time = jiffies; 329 + } 330 + 331 + if (metrics_table) 332 + memcpy(metrics_table, smu_table->metrics_table, table_size); 333 + 334 + return 0; 335 + } 336 + 236 337 static int smu_v15_0_0_get_smu_metrics_data(struct smu_context *smu, 237 338 MetricsMember_t member, 238 339 uint32_t *value) ··· 338 247 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 339 248 int ret = 0; 340 249 341 - ret = smu_cmn_get_metrics_table(smu, NULL, false); 250 + ret = smu_v15_0_0_get_metrics_table(smu, NULL, false); 342 251 if (ret) 343 252 return ret; 344 253 ··· 532 441 return ret; 533 442 } 534 443 444 + static int smu_v15_0_0_get_enabled_mask(struct smu_context *smu, 445 + struct smu_feature_bits *feature_mask) 446 + { 447 + int ret; 448 + struct smu_msg_ctl *ctl = &smu->msg_ctl; 449 + 450 + if (!feature_mask) 451 + return -EINVAL; 452 + 453 + struct smu_msg_args args = { 454 + .msg = SMU_MSG_GetEnabledSmuFeatures, 455 + .num_args = 0, 456 + .num_out_args = 2, 457 + }; 458 + 459 + ret = ctl->ops->send_msg(ctl, &args); 460 + 461 + if (!ret) 462 + smu_feature_bits_from_arr32(feature_mask, args.out_args, 463 + SMU_FEATURE_NUM_DEFAULT); 464 + 465 + return ret; 466 + } 467 + 535 468 static bool smu_v15_0_0_is_dpm_running(struct smu_context *smu) 536 469 { 537 470 int ret = 0; 538 - uint64_t feature_enabled; 471 + struct smu_feature_bits feature_enabled; 539 472 540 - ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 473 + ret = smu_v15_0_0_get_enabled_mask(smu, &feature_enabled); 541 474 542 475 if (ret) 543 476 return false; 544 477 545 - return !!(feature_enabled & SMC_DPM_FEATURE); 478 + return smu_feature_bits_test_mask(&feature_enabled, 479 + smu_v15_0_0_dpm_features.bits); 546 480 } 547 481 548 482 static int smu_v15_0_0_set_watermarks_table(struct smu_context *smu, ··· 637 521 SmuMetrics_t metrics; 638 522 int ret = 0; 639 523 640 - ret = smu_cmn_get_metrics_table(smu, &metrics, true); 524 + ret = smu_v15_0_0_get_metrics_table(smu, &metrics, false); 641 525 if (ret) 642 526 return ret; 643 527 ··· 1092 976 switch (clk_type) { 1093 977 case SMU_GFXCLK: 1094 978 case SMU_SCLK: 1095 - msg_set_min = SMU_MSG_SetHardMinGfxClk; 979 + msg_set_min = SMU_MSG_SetSoftMinGfxclk; 1096 980 msg_set_max = SMU_MSG_SetSoftMaxGfxClk; 1097 981 break; 1098 982 case SMU_FCLK: 1099 - msg_set_min = SMU_MSG_SetHardMinFclkByFreq; 983 + msg_set_min = SMU_MSG_SetSoftMinFclk; 1100 984 msg_set_max = SMU_MSG_SetSoftMaxFclkByFreq; 1101 985 break; 1102 986 case SMU_SOCCLK: 1103 - msg_set_min = SMU_MSG_SetHardMinSocclkByFreq; 987 + msg_set_min = SMU_MSG_SetSoftMinSocclkByFreq; 1104 988 msg_set_max = SMU_MSG_SetSoftMaxSocclkByFreq; 1105 989 break; 1106 990 case SMU_VCLK: 1107 991 case SMU_DCLK: 1108 - msg_set_min = SMU_MSG_SetHardMinVcn0; 1109 - msg_set_max = SMU_MSG_SetSoftMaxVcn0; 1110 - break; 1111 - case SMU_VCLK1: 1112 - case SMU_DCLK1: 1113 - msg_set_min = SMU_MSG_SetHardMinVcn1; 1114 - msg_set_max = SMU_MSG_SetSoftMaxVcn1; 992 + msg_set_min = SMU_MSG_SetSoftMinVcn; 993 + msg_set_max = SMU_MSG_SetSoftMaxVcn; 1115 994 break; 1116 995 default: 1117 996 return -EINVAL; ··· 1423 1312 .system_features_control = smu_v15_0_0_system_features_control, 1424 1313 .dpm_set_vcn_enable = smu_v15_0_set_vcn_enable, 1425 1314 .dpm_set_jpeg_enable = smu_v15_0_set_jpeg_enable, 1426 - .set_default_dpm_table = smu_v15_0_set_default_dpm_tables, 1315 + .set_default_dpm_table = smu_v15_0_0_set_default_dpm_tables, 1427 1316 .read_sensor = smu_v15_0_0_read_sensor, 1428 1317 .is_dpm_running = smu_v15_0_0_is_dpm_running, 1429 1318 .set_watermarks_table = smu_v15_0_0_set_watermarks_table, 1430 1319 .get_gpu_metrics = smu_v15_0_0_get_gpu_metrics, 1431 - .get_enabled_mask = smu_cmn_get_enabled_mask, 1320 + .get_enabled_mask = smu_v15_0_0_get_enabled_mask, 1432 1321 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 1433 - .set_driver_table_location = smu_v15_0_set_driver_table_location, 1434 1322 .gfx_off_control = smu_v15_0_gfx_off_control, 1435 1323 .mode2_reset = smu_v15_0_0_mode2_reset, 1436 1324 .get_dpm_ultimate_freq = smu_v15_0_common_get_dpm_ultimate_freq, ··· 1454 1344 ctl->config.msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_30); 1455 1345 ctl->config.resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_31); 1456 1346 ctl->config.arg_regs[0] = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_32); 1457 - ctl->config.num_arg_regs = 1; 1347 + ctl->config.arg_regs[1] = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_33); 1348 + ctl->config.arg_regs[2] = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_34); 1349 + ctl->config.num_arg_regs = 3; 1458 1350 ctl->ops = &smu_msg_v1_ops; 1459 1351 ctl->default_timeout = adev->usec_timeout * 20; 1460 1352 ctl->message_map = smu_v15_0_0_message_map;
+39 -37
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
··· 690 690 } 691 691 692 692 static int __smu_get_enabled_features(struct smu_context *smu, 693 - uint64_t *enabled_features) 693 + struct smu_feature_bits *enabled_features) 694 694 { 695 695 return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features); 696 696 } ··· 699 699 enum smu_feature_mask mask) 700 700 { 701 701 struct amdgpu_device *adev = smu->adev; 702 - uint64_t enabled_features; 702 + struct smu_feature_bits enabled_features; 703 703 int feature_id; 704 704 705 705 if (__smu_get_enabled_features(smu, &enabled_features)) { ··· 712 712 * enabled. Also considering they have no feature_map available, the 713 713 * check here can avoid unwanted feature_map check below. 714 714 */ 715 - if (enabled_features == ULLONG_MAX) 715 + if (smu_feature_bits_full(&enabled_features, 716 + smu->smu_feature.feature_num)) 716 717 return 1; 717 718 718 719 feature_id = smu_cmn_to_asic_specific_index(smu, ··· 722 721 if (feature_id < 0) 723 722 return 0; 724 723 725 - return test_bit(feature_id, (unsigned long *)&enabled_features); 724 + return smu_feature_bits_is_set(&enabled_features, feature_id); 726 725 } 727 726 728 727 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu, ··· 764 763 } 765 764 766 765 int smu_cmn_get_enabled_mask(struct smu_context *smu, 767 - uint64_t *feature_mask) 766 + struct smu_feature_bits *feature_mask) 768 767 { 769 - uint32_t *feature_mask_high; 770 - uint32_t *feature_mask_low; 768 + uint32_t features[2]; 771 769 int ret = 0, index = 0; 772 770 773 771 if (!feature_mask) 774 772 return -EINVAL; 775 773 776 - feature_mask_low = &((uint32_t *)feature_mask)[0]; 777 - feature_mask_high = &((uint32_t *)feature_mask)[1]; 778 - 779 774 index = smu_cmn_to_asic_specific_index(smu, 780 775 CMN2ASIC_MAPPING_MSG, 781 776 SMU_MSG_GetEnabledSmuFeatures); 782 777 if (index > 0) { 783 - ret = smu_cmn_send_smc_msg_with_param(smu, 784 - SMU_MSG_GetEnabledSmuFeatures, 785 - 0, 786 - feature_mask_low); 778 + ret = smu_cmn_send_smc_msg_with_param( 779 + smu, SMU_MSG_GetEnabledSmuFeatures, 0, &features[0]); 787 780 if (ret) 788 781 return ret; 789 782 790 - ret = smu_cmn_send_smc_msg_with_param(smu, 791 - SMU_MSG_GetEnabledSmuFeatures, 792 - 1, 793 - feature_mask_high); 783 + ret = smu_cmn_send_smc_msg_with_param( 784 + smu, SMU_MSG_GetEnabledSmuFeatures, 1, &features[1]); 794 785 } else { 795 - ret = smu_cmn_send_smc_msg(smu, 796 - SMU_MSG_GetEnabledSmuFeaturesHigh, 797 - feature_mask_high); 786 + ret = smu_cmn_send_smc_msg( 787 + smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &features[1]); 798 788 if (ret) 799 789 return ret; 800 790 801 - ret = smu_cmn_send_smc_msg(smu, 802 - SMU_MSG_GetEnabledSmuFeaturesLow, 803 - feature_mask_low); 791 + ret = smu_cmn_send_smc_msg( 792 + smu, SMU_MSG_GetEnabledSmuFeaturesLow, &features[0]); 804 793 } 794 + 795 + if (!ret) 796 + smu_feature_bits_from_arr32(feature_mask, features, 797 + SMU_FEATURE_NUM_DEFAULT); 805 798 806 799 return ret; 807 800 } ··· 881 886 char *buf) 882 887 { 883 888 int8_t sort_feature[MAX(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)]; 884 - uint64_t feature_mask; 889 + struct smu_feature_bits feature_mask; 890 + uint32_t features[2]; 885 891 int i, feature_index; 886 892 uint32_t count = 0; 887 893 size_t size = 0; ··· 890 894 if (__smu_get_enabled_features(smu, &feature_mask)) 891 895 return 0; 892 896 893 - size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n", 894 - upper_32_bits(feature_mask), lower_32_bits(feature_mask)); 897 + /* TBD: Need to handle for > 64 bits */ 898 + smu_feature_bits_to_arr32(&feature_mask, features, 64); 899 + size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n", 900 + features[1], features[0]); 895 901 896 902 memset(sort_feature, -1, sizeof(sort_feature)); 897 903 ··· 910 912 size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n", 911 913 "No", "Feature", "Bit", "State"); 912 914 913 - for (feature_index = 0; feature_index < SMU_FEATURE_MAX; feature_index++) { 915 + for (feature_index = 0; feature_index < smu->smu_feature.feature_num; 916 + feature_index++) { 914 917 if (sort_feature[feature_index] < 0) 915 918 continue; 916 919 917 - size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n", 918 - count++, 919 - smu_get_feature_name(smu, sort_feature[feature_index]), 920 - feature_index, 921 - !!test_bit(feature_index, (unsigned long *)&feature_mask) ? 922 - "enabled" : "disabled"); 920 + size += sysfs_emit_at( 921 + buf, size, "%02d. %-20s (%2d) : %s\n", count++, 922 + smu_get_feature_name(smu, sort_feature[feature_index]), 923 + feature_index, 924 + smu_feature_bits_is_set(&feature_mask, feature_index) ? 925 + "enabled" : 926 + "disabled"); 923 927 } 924 928 925 929 return size; ··· 931 931 uint64_t new_mask) 932 932 { 933 933 int ret = 0; 934 - uint64_t feature_mask; 934 + struct smu_feature_bits feature_mask; 935 + uint64_t feature_mask_u64; 935 936 uint64_t feature_2_enabled = 0; 936 937 uint64_t feature_2_disabled = 0; 937 938 ··· 940 939 if (ret) 941 940 return ret; 942 941 943 - feature_2_enabled = ~feature_mask & new_mask; 944 - feature_2_disabled = feature_mask & ~new_mask; 942 + feature_mask_u64 = *(uint64_t *)feature_mask.bits; 943 + feature_2_enabled = ~feature_mask_u64 & new_mask; 944 + feature_2_disabled = feature_mask_u64 & ~new_mask; 945 945 946 946 if (feature_2_enabled) { 947 947 ret = smu_cmn_feature_update_enable_state(smu,
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
··· 141 141 enum smu_clk_type clk_type); 142 142 143 143 int smu_cmn_get_enabled_mask(struct smu_context *smu, 144 - uint64_t *feature_mask); 144 + struct smu_feature_bits *feature_mask); 145 145 146 146 uint64_t smu_cmn_get_indep_throttler_status( 147 147 const unsigned long dep_status,
+1 -1
drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c
··· 299 299 300 300 count = ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]); 301 301 if (bank->ecc_type == RAS_ERR_TYPE__UE) { 302 - if (ext_error_code != 0 && ext_error_code != 9) 302 + if (ext_error_code != 0 && ext_error_code != 1 && ext_error_code != 9) 303 303 count = 0ULL; 304 304 ecc->ue_count = count; 305 305 } else if (bank->ecc_type == RAS_ERR_TYPE__CE) {
+1 -1
drivers/gpu/drm/drm_gpusvm.c
··· 1150 1150 addr->dir); 1151 1151 else if (dpagemap && dpagemap->ops->device_unmap) 1152 1152 dpagemap->ops->device_unmap(dpagemap, 1153 - dev, *addr); 1153 + dev, addr); 1154 1154 i += 1 << addr->order; 1155 1155 } 1156 1156
+1 -1
drivers/gpu/drm/drm_pagemap.c
··· 318 318 struct drm_pagemap_zdd *zdd = page->zone_device_data; 319 319 struct drm_pagemap *dpagemap = zdd->dpagemap; 320 320 321 - dpagemap->ops->device_unmap(dpagemap, dev, pagemap_addr[i]); 321 + dpagemap->ops->device_unmap(dpagemap, dev, &pagemap_addr[i]); 322 322 } else { 323 323 dma_unmap_page(dev, pagemap_addr[i].addr, 324 324 PAGE_SIZE << pagemap_addr[i].order, dir);
+1
drivers/gpu/drm/i915/display/intel_acpi.c
··· 96 96 97 97 if (!pkg->package.count) { 98 98 DRM_DEBUG_DRIVER("no connection in _DSM\n"); 99 + ACPI_FREE(pkg); 99 100 return; 100 101 } 101 102
+20 -3
drivers/gpu/drm/i915/display/intel_dp.c
··· 2557 2557 if (min_bpp_x16 <= 0 || min_bpp_x16 > max_bpp_x16) 2558 2558 return false; 2559 2559 2560 + if (dsc_slice_count == 0) 2561 + return false; 2562 + 2560 2563 return is_bw_sufficient_for_dsc_config(intel_dp, 2561 2564 link_clock, lane_count, 2562 2565 mode_clock, mode_hdisplay, ··· 2668 2665 bool dsc, 2669 2666 struct link_config_limits *limits) 2670 2667 { 2668 + struct intel_display *display = to_intel_display(intel_dp); 2671 2669 bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); 2672 2670 struct intel_connector *connector = 2673 2671 to_intel_connector(conn_state->connector); ··· 2681 2677 limits->min_lane_count = intel_dp_min_lane_count(intel_dp); 2682 2678 limits->max_lane_count = intel_dp_max_lane_count(intel_dp); 2683 2679 2684 - limits->pipe.min_bpp = intel_dp_in_hdr_mode(conn_state) ? 30 : 2685 - intel_dp_min_bpp(crtc_state->output_format); 2680 + limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format); 2686 2681 if (is_mst) { 2687 2682 /* 2688 2683 * FIXME: If all the streams can't fit into the link with their ··· 2695 2692 } else { 2696 2693 limits->pipe.max_bpp = intel_dp_max_bpp(intel_dp, crtc_state, 2697 2694 respect_downstream_limits); 2695 + } 2696 + 2697 + if (!dsc && intel_dp_in_hdr_mode(conn_state)) { 2698 + if (intel_dp_supports_dsc(intel_dp, connector, crtc_state) && 2699 + limits->pipe.max_bpp >= 30) 2700 + limits->pipe.min_bpp = max(limits->pipe.min_bpp, 30); 2701 + else 2702 + drm_dbg_kms(display->drm, 2703 + "[CONNECTOR:%d:%s] Can't force 30 bpp for HDR (pipe bpp: %d-%d DSC-support: %s)\n", 2704 + connector->base.base.id, connector->base.name, 2705 + limits->pipe.min_bpp, limits->pipe.max_bpp, 2706 + str_yes_no(intel_dp_supports_dsc(intel_dp, connector, 2707 + crtc_state))); 2698 2708 } 2699 2709 2700 2710 if (dsc && !intel_dp_dsc_compute_pipe_bpp_limits(connector, limits)) ··· 2841 2825 } 2842 2826 2843 2827 drm_dbg_kms(display->drm, 2844 - "DP lane count %d clock %d bpp input %d compressed " FXP_Q4_FMT " link rate required %d available %d\n", 2828 + "DP lane count %d clock %d bpp input %d compressed " FXP_Q4_FMT " HDR %s link rate required %d available %d\n", 2845 2829 pipe_config->lane_count, pipe_config->port_clock, 2846 2830 pipe_config->pipe_bpp, 2847 2831 FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16), 2832 + str_yes_no(intel_dp_in_hdr_mode(conn_state)), 2848 2833 intel_dp_config_required_rate(pipe_config), 2849 2834 intel_dp_max_link_data_rate(intel_dp, 2850 2835 pipe_config->port_clock,
+1 -1
drivers/gpu/drm/i915/display/intel_quirks.c
··· 239 239 { 0x0f31, 0x103c, 0x220f, quirk_invert_brightness }, 240 240 241 241 /* Dell XPS 13 7390 2-in-1 */ 242 - { 0x8a12, 0x1028, 0x08b0, quirk_edp_limit_rate_hbr2 }, 242 + { 0x8a52, 0x1028, 0x08b0, quirk_edp_limit_rate_hbr2 }, 243 243 }; 244 244 245 245 static const struct intel_dpcd_quirk intel_dpcd_quirks[] = {
+5
drivers/gpu/drm/radeon/si_dpm.c
··· 2925 2925 max_sclk = 60000; 2926 2926 max_mclk = 80000; 2927 2927 } 2928 + if ((rdev->pdev->device == 0x666f) && 2929 + (rdev->pdev->revision == 0x00)) { 2930 + max_sclk = 80000; 2931 + max_mclk = 95000; 2932 + } 2928 2933 } else if (rdev->family == CHIP_OLAND) { 2929 2934 if ((rdev->pdev->revision == 0xC7) || 2930 2935 (rdev->pdev->revision == 0x80) ||
+1 -1
drivers/gpu/drm/xe/xe_bo.c
··· 1941 1941 int err = 0; 1942 1942 int idx; 1943 1943 1944 - if (!drm_dev_enter(&xe->drm, &idx)) 1944 + if (xe_device_wedged(xe) || !drm_dev_enter(&xe->drm, &idx)) 1945 1945 return ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); 1946 1946 1947 1947 ret = xe_bo_cpu_fault_fastpath(vmf, xe, bo, needs_rpm);
+8 -4
drivers/gpu/drm/xe/xe_configfs.h
··· 21 21 bool xe_configfs_media_gt_allowed(struct pci_dev *pdev); 22 22 u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev); 23 23 bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev); 24 - u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum xe_engine_class, 24 + u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, 25 + enum xe_engine_class class, 25 26 const u32 **cs); 26 - u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class, 27 + u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, 28 + enum xe_engine_class class, 27 29 const u32 **cs); 28 30 #ifdef CONFIG_PCI_IOV 29 31 unsigned int xe_configfs_get_max_vfs(struct pci_dev *pdev); ··· 39 37 static inline bool xe_configfs_media_gt_allowed(struct pci_dev *pdev) { return true; } 40 38 static inline u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev) { return U64_MAX; } 41 39 static inline bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev) { return false; } 42 - static inline u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum xe_engine_class, 40 + static inline u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, 41 + enum xe_engine_class class, 43 42 const u32 **cs) { return 0; } 44 - static inline u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class, 43 + static inline u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, 44 + enum xe_engine_class class, 45 45 const u32 **cs) { return 0; } 46 46 static inline unsigned int xe_configfs_get_max_vfs(struct pci_dev *pdev) { return UINT_MAX; } 47 47 #endif
+1 -1
drivers/gpu/drm/xe/xe_gt_ccs_mode.c
··· 191 191 struct xe_device *xe = gt_to_xe(gt); 192 192 int err; 193 193 194 - if (!xe_gt_ccs_mode_enabled(gt)) 194 + if (!xe_gt_ccs_mode_enabled(gt) || IS_SRIOV_VF(xe)) 195 195 return 0; 196 196 197 197 err = sysfs_create_files(gt->sysfs, gt_ccs_mode_attrs);
+3 -3
drivers/gpu/drm/xe/xe_hwmon.c
··· 48 48 CHANNEL_MCTRL, 49 49 CHANNEL_PCIE, 50 50 CHANNEL_VRAM_N, 51 - CHANNEL_VRAM_N_MAX = CHANNEL_VRAM_N + MAX_VRAM_CHANNELS, 51 + CHANNEL_VRAM_N_MAX = CHANNEL_VRAM_N + MAX_VRAM_CHANNELS - 1, 52 52 CHANNEL_MAX, 53 53 }; 54 54 ··· 264 264 return BMG_PACKAGE_TEMPERATURE; 265 265 else if (channel == CHANNEL_VRAM) 266 266 return BMG_VRAM_TEMPERATURE; 267 - else if (in_range(channel, CHANNEL_VRAM_N, CHANNEL_VRAM_N_MAX)) 267 + else if (in_range(channel, CHANNEL_VRAM_N, MAX_VRAM_CHANNELS)) 268 268 return BMG_VRAM_TEMPERATURE_N(channel - CHANNEL_VRAM_N); 269 269 } else if (xe->info.platform == XE_DG2) { 270 270 if (channel == CHANNEL_PKG) ··· 1427 1427 *str = "mctrl"; 1428 1428 else if (channel == CHANNEL_PCIE) 1429 1429 *str = "pcie"; 1430 - else if (in_range(channel, CHANNEL_VRAM_N, CHANNEL_VRAM_N_MAX)) 1430 + else if (in_range(channel, CHANNEL_VRAM_N, MAX_VRAM_CHANNELS)) 1431 1431 *str = hwmon->temp.vram_label[channel - CHANNEL_VRAM_N]; 1432 1432 return 0; 1433 1433 case hwmon_power:
+5 -5
drivers/gpu/drm/xe/xe_mmio.c
··· 256 256 struct xe_reg reg_udw = { .addr = reg.addr + 0x4 }; 257 257 u32 ldw, udw, oldudw, retries; 258 258 259 - reg.addr = xe_mmio_adjusted_addr(mmio, reg.addr); 260 - reg_udw.addr = xe_mmio_adjusted_addr(mmio, reg_udw.addr); 261 - 262 - /* we shouldn't adjust just one register address */ 263 - xe_tile_assert(mmio->tile, reg_udw.addr == reg.addr + 0x4); 259 + /* 260 + * The two dwords of a 64-bit register can never straddle the offset 261 + * adjustment cutoff. 262 + */ 263 + xe_tile_assert(mmio->tile, !in_range(mmio->adj_limit, reg.addr + 1, 7)); 264 264 265 265 oldudw = xe_mmio_read32(mmio, reg_udw); 266 266 for (retries = 5; retries; --retries) {
+1 -1
drivers/gpu/drm/xe/xe_module.h
··· 12 12 struct xe_modparam { 13 13 bool force_execlist; 14 14 bool probe_display; 15 - u32 force_vram_bar_size; 15 + int force_vram_bar_size; 16 16 int guc_log_level; 17 17 char *guc_firmware_path; 18 18 char *huc_firmware_path;
+6
drivers/gpu/drm/xe/xe_pci.c
··· 553 553 struct xe_gt *gt __free(kfree) = NULL; 554 554 int err; 555 555 556 + /* Don't try to read media ver if media GT is not allowed */ 557 + if (type == GMDID_MEDIA && !xe_configfs_media_gt_allowed(to_pci_dev(xe->drm.dev))) { 558 + *ver = *revid = 0; 559 + return 0; 560 + } 561 + 556 562 gt = kzalloc(sizeof(*gt), GFP_KERNEL); 557 563 if (!gt) 558 564 return -ENOMEM;
+26 -28
drivers/gpu/drm/xe/xe_sriov_pf_sysfs.c
··· 349 349 350 350 /* no user serviceable parts below */ 351 351 352 - static struct kobject *create_xe_sriov_kobj(struct xe_device *xe, unsigned int vfid) 352 + static void action_put_kobject(void *arg) 353 + { 354 + struct kobject *kobj = arg; 355 + 356 + kobject_put(kobj); 357 + } 358 + 359 + static struct kobject *create_xe_sriov_kobj(struct xe_device *xe, unsigned int vfid, 360 + const struct kobj_type *ktype) 353 361 { 354 362 struct xe_sriov_kobj *vkobj; 363 + int err; 355 364 356 365 xe_sriov_pf_assert_vfid(xe, vfid); 357 366 358 367 vkobj = kzalloc(sizeof(*vkobj), GFP_KERNEL); 359 368 if (!vkobj) 360 - return NULL; 369 + return ERR_PTR(-ENOMEM); 361 370 362 371 vkobj->xe = xe; 363 372 vkobj->vfid = vfid; 373 + kobject_init(&vkobj->base, ktype); 374 + 375 + err = devm_add_action_or_reset(xe->drm.dev, action_put_kobject, &vkobj->base); 376 + if (err) 377 + return ERR_PTR(err); 378 + 364 379 return &vkobj->base; 365 380 } 366 381 ··· 478 463 xe_sriov_dbg(xe, "Failed to setup sysfs %s (%pe)\n", what, ERR_PTR(err)); 479 464 } 480 465 481 - static void action_put_kobject(void *arg) 482 - { 483 - struct kobject *kobj = arg; 484 - 485 - kobject_put(kobj); 486 - } 487 - 488 466 static int pf_setup_root(struct xe_device *xe) 489 467 { 490 468 struct kobject *parent = &xe->drm.dev->kobj; 491 469 struct kobject *root; 492 470 int err; 493 471 494 - root = create_xe_sriov_kobj(xe, PFID); 495 - if (!root) 496 - return pf_sysfs_error(xe, -ENOMEM, "root obj"); 472 + root = create_xe_sriov_kobj(xe, PFID, &xe_sriov_dev_ktype); 473 + if (IS_ERR(root)) 474 + return pf_sysfs_error(xe, PTR_ERR(root), "root obj"); 497 475 498 - err = devm_add_action_or_reset(xe->drm.dev, action_put_kobject, root); 499 - if (err) 500 - return pf_sysfs_error(xe, err, "root action"); 501 - 502 - err = kobject_init_and_add(root, &xe_sriov_dev_ktype, parent, "sriov_admin"); 476 + err = kobject_add(root, parent, "sriov_admin"); 503 477 if (err) 504 478 return pf_sysfs_error(xe, err, "root init"); 505 479 ··· 509 505 root = xe->sriov.pf.sysfs.root; 510 506 511 507 for (n = 0; n <= totalvfs; n++) { 512 - kobj = create_xe_sriov_kobj(xe, VFID(n)); 513 - if (!kobj) 514 - return pf_sysfs_error(xe, -ENOMEM, "tree obj"); 515 - 516 - err = devm_add_action_or_reset(xe->drm.dev, action_put_kobject, root); 517 - if (err) 518 - return pf_sysfs_error(xe, err, "tree action"); 508 + kobj = create_xe_sriov_kobj(xe, VFID(n), &xe_sriov_vf_ktype); 509 + if (IS_ERR(kobj)) 510 + return pf_sysfs_error(xe, PTR_ERR(kobj), "tree obj"); 519 511 520 512 if (n) 521 - err = kobject_init_and_add(kobj, &xe_sriov_vf_ktype, 522 - root, "vf%u", n); 513 + err = kobject_add(kobj, root, "vf%u", n); 523 514 else 524 - err = kobject_init_and_add(kobj, &xe_sriov_vf_ktype, 525 - root, "pf"); 515 + err = kobject_add(kobj, root, "pf"); 526 516 if (err) 527 517 return pf_sysfs_error(xe, err, "tree init"); 528 518
+4 -4
drivers/gpu/drm/xe/xe_svm.c
··· 1676 1676 1677 1677 static void xe_drm_pagemap_device_unmap(struct drm_pagemap *dpagemap, 1678 1678 struct device *dev, 1679 - struct drm_pagemap_addr addr) 1679 + const struct drm_pagemap_addr *addr) 1680 1680 { 1681 - if (addr.proto != XE_INTERCONNECT_P2P) 1681 + if (addr->proto != XE_INTERCONNECT_P2P) 1682 1682 return; 1683 1683 1684 - dma_unmap_resource(dev, addr.addr, PAGE_SIZE << addr.order, 1685 - addr.dir, DMA_ATTR_SKIP_CPU_SYNC); 1684 + dma_unmap_resource(dev, addr->addr, PAGE_SIZE << addr->order, 1685 + addr->dir, DMA_ATTR_SKIP_CPU_SYNC); 1686 1686 } 1687 1687 1688 1688 static void xe_pagemap_destroy_work(struct work_struct *work)
+6 -1
drivers/gpu/drm/xe/xe_vm_madvise.c
··· 291 291 break; 292 292 case DRM_XE_MEM_RANGE_ATTR_PAT: 293 293 { 294 - u16 coh_mode = xe_pat_index_get_coh_mode(xe, args->pat_index.val); 294 + u16 pat_index, coh_mode; 295 295 296 + if (XE_IOCTL_DBG(xe, args->pat_index.val >= xe->pat.n_entries)) 297 + return false; 298 + 299 + pat_index = array_index_nospec(args->pat_index.val, xe->pat.n_entries); 300 + coh_mode = xe_pat_index_get_coh_mode(xe, pat_index); 296 301 if (XE_IOCTL_DBG(xe, !coh_mode)) 297 302 return false; 298 303
+8 -10
drivers/gpu/drm/xe/xe_wa.c
··· 548 548 FUNC(xe_rtp_match_first_render_or_compute)), 549 549 XE_RTP_ACTIONS(SET(ROW_CHICKEN, EARLY_EOT_DIS)) 550 550 }, 551 - { XE_RTP_NAME("14019988906"), 552 - XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), 553 - FUNC(xe_rtp_match_first_render_or_compute)), 554 - XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FLSH_IGNORES_PSD)) 555 - }, 556 - { XE_RTP_NAME("14019877138"), 557 - XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), 558 - FUNC(xe_rtp_match_first_render_or_compute)), 559 - XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FD_END_COLLECT)) 560 - }, 561 551 { XE_RTP_NAME("14020338487"), 562 552 XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), 563 553 FUNC(xe_rtp_match_first_render_or_compute)), ··· 822 832 { XE_RTP_NAME("14020756599"), 823 833 XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), 824 834 XE_RTP_ACTIONS(SET(WM_CHICKEN3, HIZ_PLANE_COMPRESSION_DIS)) 835 + }, 836 + { XE_RTP_NAME("14019988906"), 837 + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)), 838 + XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FLSH_IGNORES_PSD)) 839 + }, 840 + { XE_RTP_NAME("14019877138"), 841 + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)), 842 + XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FD_END_COLLECT)) 825 843 }, 826 844 { XE_RTP_NAME("14021490052"), 827 845 XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+1 -1
include/drm/drm_pagemap.h
··· 95 95 */ 96 96 void (*device_unmap)(struct drm_pagemap *dpagemap, 97 97 struct device *dev, 98 - struct drm_pagemap_addr addr); 98 + const struct drm_pagemap_addr *addr); 99 99 100 100 /** 101 101 * @populate_mm: Populate part of the mm with @dpagemap memory,
+1
include/uapi/drm/amdgpu_drm.h
··· 1667 1667 #define AMDGPU_FAMILY_GC_10_3_6 149 /* GC 10.3.6 */ 1668 1668 #define AMDGPU_FAMILY_GC_10_3_7 151 /* GC 10.3.7 */ 1669 1669 #define AMDGPU_FAMILY_GC_11_5_0 150 /* GC 11.5.0 */ 1670 + #define AMDGPU_FAMILY_GC_11_5_4 154 /* GC 11.5.4 */ 1670 1671 #define AMDGPU_FAMILY_GC_12_0_0 152 /* GC 12.0.0 */ 1671 1672 1672 1673 #if defined(__cplusplus)