Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'drm-next-2026-04-24' of https://gitlab.freedesktop.org/drm/kernel

Pull drm next fixes from Dave Airlie:
"This is the first of two fixes for the merge PRs, the other is based
on 7.0 branch. This mostly AMD fixes, a couple of weeks of backlog
built up and this weeks. The main complaint I've seen is some boot
warnings around the FP code handling which this should fix. Otherwise
a single rcar-du and a single i915 fix.

amdgpu:
- SMU 14 fixes
- Partition fixes
- SMUIO 15.x fix
- SR-IOV fixes
- JPEG fix
- PSP 15.x fix
- NBIF fix
- Devcoredump fixes
- DPC fix
- RAS fixes
- Aldebaran smu fix
- IP discovery fix
- SDMA 7.1 fix
- Runtime pm fix
- MES 12.1 fix
- DML2 fixes
- DCN 4.2 fixes
- YCbCr fixes
- Freesync fixes
- ISM fixes
- Overlay cursor fix
- DC FP fixes
- UserQ locking fixes
- DC idle state manager fix
- ASPM fix
- GPUVM SVM fix
- DCE 6 fix

amdkfd:
- Fix memory clear handling
- num_of_nodes bounds check fix

i915:
- Fix uninitialized variable in the alignment loop [psr]

rcar-du:
- fix NULL-ptr crash"

* tag 'drm-next-2026-04-24' of https://gitlab.freedesktop.org/drm/kernel: (75 commits)
drm/amdkfd: Add upper bound check for num_of_nodes
drm: rcar-du: Fix crash when no CMM is available
drm/amd/display: Disable 10-bit truncation and dithering on DCE 6.x
drm/amdgpu: OR init_pte_flags into invalid leaf PTE updates
drm/amd: Adjust ASPM support quirk to cover more Intel hosts
drm/amd/display: Undo accidental fix revert in amdgpu_dm_ism.c
drm/i915/psr: Init variable to avoid early exit from et alignment loop
drm/amdgpu: drop userq fence driver refs out of fence process()
drm/amdgpu/userq: unpin and unref doorbell and wptr outside mutex
drm/amdgpu/userq: use pm_runtime_resume_and_get and fix err handling
drm/amdgpu/userq: unmap_helper dont return the queue state
drm/amdgpu/userq: unmap is to be called before freeing doorbell/wptr bo
drm/amdgpu/userq: hold root bo lock in caller of input_va_validate
drm/amdgpu/userq: caller to take reserv lock for vas_list_cleanup
drm/amdgpu/userq: create_mqd does not need userq_mutex
drm/amdgpu/userq: dont lock root bo with userq_mutex held
drm/amdgpu/userq: fix kerneldoc for amdgpu_userq_ensure_ev_fence
drm/amdgpu/userq: clean the VA mapping list for failed queue creation
drm/amdgpu/userq: avoid uneccessary locking in amdgpu_userq_create
drm/amd/display: Fix ISM teardown crash from NULL dc dereference
...

+1380 -535
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 1735 1735 alloc_domain = AMDGPU_GEM_DOMAIN_GTT; 1736 1736 alloc_flags = 0; 1737 1737 } else { 1738 - alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; 1738 + alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE | 1739 + AMDGPU_GEM_CREATE_VRAM_CLEARED; 1739 1740 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? 1740 1741 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0; 1741 1742
+6
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
··· 866 866 if (dret) { 867 867 amdgpu_connector->detected_by_load = false; 868 868 drm_edid_free(amdgpu_connector->edid); 869 + amdgpu_connector->edid = NULL; 869 870 amdgpu_connector_get_edid(connector); 870 871 871 872 if (!amdgpu_connector->edid) { ··· 883 882 */ 884 883 if (amdgpu_connector->use_digital && amdgpu_connector->shared_ddc) { 885 884 drm_edid_free(amdgpu_connector->edid); 885 + amdgpu_connector->edid = NULL; 886 886 ret = connector_status_disconnected; 887 887 } else { 888 888 ret = connector_status_connected; ··· 979 977 if (!amdgpu_display_hpd_sense(adev, 980 978 amdgpu_connector->hpd.hpd)) { 981 979 drm_edid_free(amdgpu_connector->edid); 980 + amdgpu_connector->edid = NULL; 982 981 *status = connector_status_disconnected; 983 982 } 984 983 } ··· 1049 1046 if (dret) { 1050 1047 amdgpu_connector->detected_by_load = false; 1051 1048 drm_edid_free(amdgpu_connector->edid); 1049 + amdgpu_connector->edid = NULL; 1052 1050 amdgpu_connector_get_edid(connector); 1053 1051 1054 1052 if (!amdgpu_connector->edid) { ··· 1066 1062 */ 1067 1063 if ((!amdgpu_connector->use_digital) && amdgpu_connector->shared_ddc) { 1068 1064 drm_edid_free(amdgpu_connector->edid); 1065 + amdgpu_connector->edid = NULL; 1069 1066 ret = connector_status_disconnected; 1070 1067 } else { 1071 1068 ret = connector_status_connected; ··· 1417 1412 } 1418 1413 1419 1414 drm_edid_free(amdgpu_connector->edid); 1415 + amdgpu_connector->edid = NULL; 1420 1416 1421 1417 if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || 1422 1418 (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+27 -9
drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
··· 32 32 static const guid_t CRASHDUMP = AMD_CRASHDUMP; 33 33 static const guid_t RUNTIME = AMD_GPU_NONSTANDARD_ERROR; 34 34 35 + #define CPER_SIGNATURE_SZ (sizeof(((struct cper_hdr *)0)->signature)) 36 + 35 37 static void __inc_entry_length(struct cper_hdr *hdr, uint32_t size) 36 38 { 37 39 hdr->record_length += size; ··· 427 425 428 426 static bool amdgpu_cper_is_hdr(struct amdgpu_ring *ring, u64 pos) 429 427 { 430 - struct cper_hdr *chdr; 428 + char signature[CPER_SIGNATURE_SZ]; 431 429 432 - chdr = (struct cper_hdr *)&(ring->ring[pos]); 433 - return strcmp(chdr->signature, "CPER") ? false : true; 430 + if ((pos << 2) >= ring->ring_size) 431 + return false; 432 + 433 + if ((pos << 2) + CPER_SIGNATURE_SZ <= ring->ring_size) { 434 + memcpy(signature, &ring->ring[pos], CPER_SIGNATURE_SZ); 435 + } else { 436 + u32 chunk = ring->ring_size - (pos << 2); 437 + 438 + memcpy(signature, &ring->ring[pos], chunk); 439 + memcpy(signature + chunk, ring->ring, CPER_SIGNATURE_SZ - chunk); 440 + } 441 + 442 + return !memcmp(signature, "CPER", CPER_SIGNATURE_SZ); 434 443 } 435 444 436 445 static u32 amdgpu_cper_ring_get_ent_sz(struct amdgpu_ring *ring, u64 pos) 437 446 { 438 - struct cper_hdr *chdr; 447 + struct cper_hdr chdr; 439 448 u64 p; 440 449 u32 chunk, rec_len = 0; 441 450 442 - chdr = (struct cper_hdr *)&(ring->ring[pos]); 443 451 chunk = ring->ring_size - (pos << 2); 444 452 445 - if (!strcmp(chdr->signature, "CPER")) { 446 - rec_len = chdr->record_length; 453 + if (amdgpu_cper_is_hdr(ring, pos)) { 454 + if (chunk >= sizeof(chdr)) { 455 + memcpy(&chdr, &ring->ring[pos], sizeof(chdr)); 456 + } else { 457 + memcpy(&chdr, &ring->ring[pos], chunk); 458 + memcpy((u8 *)&chdr + chunk, ring->ring, sizeof(chdr) - chunk); 459 + } 460 + 461 + rec_len = chdr.record_length; 447 462 goto calc; 448 463 } 449 464 ··· 469 450 goto calc; 470 451 471 452 for (p = pos + 1; p <= ring->buf_mask; p++) { 472 - chdr = (struct cper_hdr *)&(ring->ring[p]); 473 - if (!strcmp(chdr->signature, "CPER")) { 453 + if (amdgpu_cper_is_hdr(ring, p)) { 474 454 rec_len = (p - pos) << 2; 475 455 goto calc; 476 456 }
+6 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
··· 464 464 struct amdgpu_device *adev = container_of(work, typeof(*adev), coredump_work); 465 465 struct amdgpu_coredump_info *coredump = adev->coredump; 466 466 467 + if (!coredump) 468 + goto end; 469 + 467 470 /* Do a one-time preparation of the coredump output because 468 471 * repeatingly calling drm_coredump_printer is very slow. 469 472 */ ··· 502 499 int i, off, idx; 503 500 504 501 /* No need to generate a new coredump if there's one in progress already. */ 505 - if (work_pending(&adev->coredump_work)) 502 + if (work_busy(&adev->coredump_work)) 506 503 return; 507 504 508 505 if (job && job->pasid) ··· 514 511 515 512 coredump->skip_vram_check = skip_vram_check; 516 513 coredump->reset_vram_lost = vram_lost; 517 - coredump->pasid = job->pasid; 518 514 519 515 if (job && job->pasid) { 520 516 struct amdgpu_task_info *ti; ··· 523 521 coredump->reset_task_info = *ti; 524 522 amdgpu_vm_put_task_info(ti); 525 523 } 524 + coredump->pasid = job->pasid; 526 525 coredump->num_ibs = job->num_ibs; 527 526 for (i = 0; i < job->num_ibs; ++i) { 528 527 coredump->ibs[i].gpu_addr = job->ibs[i].gpu_addr; ··· 566 563 coredump->rings[idx].offset = off; 567 564 568 565 memcpy(&coredump->rings_dw[off], ring->ring, ring->ring_size); 569 - off += ring->ring_size; 566 + off += ring->ring_size / 4; 570 567 idx++; 571 568 } 572 569 coredump->num_rings = idx;
+10 -10
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 1334 1334 #if IS_ENABLED(CONFIG_X86) 1335 1335 struct cpuinfo_x86 *c = &cpu_data(0); 1336 1336 1337 - if (!(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 0) || 1338 - amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 1))) 1339 - return false; 1340 - 1341 - if (c->x86 == 6 && 1342 - adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) { 1337 + if (c->x86_vendor == X86_VENDOR_INTEL) { 1343 1338 switch (c->x86_model) { 1344 1339 case VFM_MODEL(INTEL_ALDERLAKE): 1345 1340 case VFM_MODEL(INTEL_ALDERLAKE_L): 1346 1341 case VFM_MODEL(INTEL_RAPTORLAKE): 1347 1342 case VFM_MODEL(INTEL_RAPTORLAKE_P): 1348 1343 case VFM_MODEL(INTEL_RAPTORLAKE_S): 1344 + case VFM_MODEL(INTEL_TIGERLAKE): 1345 + case VFM_MODEL(INTEL_TIGERLAKE_L): 1349 1346 return true; 1350 1347 default: 1351 1348 return false; ··· 5515 5518 list_add_tail(&tmp_adev->reset_list, device_list); 5516 5519 if (adev->shutdown) 5517 5520 tmp_adev->shutdown = true; 5518 - if (amdgpu_reset_in_dpc(adev)) 5519 - tmp_adev->pcie_reset_ctx.in_link_reset = true; 5520 5521 } 5521 5522 if (!list_is_first(&adev->reset_list, device_list)) 5522 5523 list_rotate_to_front(&adev->reset_list, device_list); ··· 6286 6291 amdgpu_reset_set_dpc_status(adev, true); 6287 6292 6288 6293 mutex_lock(&hive->hive_lock); 6294 + } else { 6295 + if (amdgpu_device_bus_status_check(adev)) 6296 + amdgpu_reset_set_dpc_status(adev, true); 6289 6297 } 6290 6298 memset(&reset_context, 0, sizeof(reset_context)); 6291 6299 INIT_LIST_HEAD(&device_list); ··· 6409 6411 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) 6410 6412 tmp_adev->pcie_reset_ctx.in_link_reset = true; 6411 6413 } else { 6414 + adev->pcie_reset_ctx.in_link_reset = true; 6412 6415 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); 6413 6416 } 6414 6417 ··· 6466 6467 tmp_adev->pcie_reset_ctx.in_link_reset = false; 6467 6468 list_add_tail(&tmp_adev->reset_list, &device_list); 6468 6469 } 6469 - } else 6470 + } else { 6471 + adev->pcie_reset_ctx.in_link_reset = false; 6470 6472 list_add_tail(&adev->reset_list, &device_list); 6471 - 6473 + } 6472 6474 amdgpu_device_sched_resume(&device_list, NULL, NULL); 6473 6475 amdgpu_device_gpu_resume(adev, &device_list, false); 6474 6476 amdgpu_device_recovery_put_reset_lock(adev, &device_list);
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
··· 535 535 *info = &bhdrv2->table_list[table_id]; 536 536 break; 537 537 case 1: 538 + case 0: 538 539 *info = &bhdr->table_list[table_id]; 539 540 break; 540 541 default: 541 - dev_err(adev->dev, "Invalid ip discovery table version\n"); 542 + dev_err(adev->dev, "Invalid ip discovery table version %d\n",bhdr->version_major); 542 543 return -EINVAL; 543 544 } 544 545
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
··· 2013 2013 (xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode); 2014 2014 device_remove_file(adev->dev, &dev_attr_current_compute_partition); 2015 2015 2016 + device_remove_file(adev->dev, &dev_attr_compute_partition_mem_alloc_mode); 2017 + 2016 2018 if (xcp_switch_supported) 2017 2019 device_remove_file(adev->dev, 2018 2020 &dev_attr_available_compute_partition);
+78 -64
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
··· 239 239 u64 size; 240 240 int r = 0; 241 241 242 + /* Caller must hold vm->root.bo reservation */ 243 + dma_resv_assert_held(queue->vm->root.bo->tbo.base.resv); 244 + 242 245 user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT; 243 246 size = expected_size >> AMDGPU_GPU_PAGE_SHIFT; 244 - 245 - r = amdgpu_bo_reserve(vm->root.bo, false); 246 - if (r) 247 - return r; 248 247 249 248 va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr); 250 249 if (!va_map) { ··· 254 255 if (user_addr >= va_map->start && 255 256 va_map->last - user_addr + 1 >= size) { 256 257 amdgpu_userq_buffer_va_list_add(queue, va_map, user_addr); 257 - amdgpu_bo_unreserve(vm->root.bo); 258 258 return 0; 259 259 } 260 260 261 261 r = -EINVAL; 262 262 out_err: 263 - amdgpu_bo_unreserve(vm->root.bo); 264 263 return r; 265 264 } 266 265 ··· 267 270 struct amdgpu_bo_va_mapping *mapping; 268 271 bool r; 269 272 270 - if (amdgpu_bo_reserve(vm->root.bo, false)) 271 - return false; 273 + dma_resv_assert_held(vm->root.bo->tbo.base.resv); 272 274 273 275 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); 274 276 if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped)) 275 277 r = true; 276 278 else 277 279 r = false; 278 - amdgpu_bo_unreserve(vm->root.bo); 279 280 280 281 return r; 281 282 } ··· 309 314 { 310 315 struct amdgpu_userq_va_cursor *va_cursor, *tmp; 311 316 struct amdgpu_bo_va_mapping *mapping; 312 - int r; 313 317 314 - r = amdgpu_bo_reserve(queue->vm->root.bo, false); 315 - if (r) 316 - return r; 318 + /* Caller must hold vm->root.bo reservation */ 319 + dma_resv_assert_held(queue->vm->root.bo->tbo.base.resv); 317 320 318 321 list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) { 319 322 mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr); 320 323 if (!mapping) { 321 - r = -EINVAL; 322 - goto err; 324 + return -EINVAL; 323 325 } 324 326 dev_dbg(adev->dev, "delete the userq:%p va:%llx\n", 325 327 queue, va_cursor->gpu_addr); 326 328 amdgpu_userq_buffer_va_list_del(mapping, va_cursor); 327 329 } 328 - err: 329 - amdgpu_bo_unreserve(queue->vm->root.bo); 330 - return r; 330 + 331 + return 0; 331 332 } 332 333 333 334 static int amdgpu_userq_preempt_helper(struct amdgpu_usermode_queue *queue) ··· 437 446 /* Wait for mode-1 reset to complete */ 438 447 down_read(&adev->reset_domain->sem); 439 448 440 - /* Drop the userq reference. */ 441 - amdgpu_userq_buffer_vas_list_cleanup(adev, queue); 442 449 uq_funcs->mqd_destroy(queue); 443 450 /* Use interrupt-safe locking since IRQ handlers may access these XArrays */ 444 451 xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index); ··· 444 455 queue->fence_drv = NULL; 445 456 queue->userq_mgr = NULL; 446 457 list_del(&queue->userq_va_list); 447 - kfree(queue); 448 458 449 459 up_read(&adev->reset_domain->sem); 450 460 } 451 461 462 + /** 463 + * amdgpu_userq_ensure_ev_fence - ensure a valid, unsignaled eviction fence exists 464 + * @uq_mgr: the usermode queue manager for this process 465 + * @evf_mgr: the eviction fence manager to check and rearm 466 + * 467 + * Ensures that a valid and not yet signaled eviction fence is attached to the 468 + * usermode queue before any queue operations proceed. If it is signalled, then 469 + * rearm a new eviction fence. 470 + */ 452 471 void 453 472 amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr, 454 473 struct amdgpu_eviction_fence_mgr *evf_mgr) ··· 616 619 amdgpu_userq_destroy(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue) 617 620 { 618 621 struct amdgpu_device *adev = uq_mgr->adev; 622 + struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); 623 + struct amdgpu_vm *vm = &fpriv->vm; 624 + 619 625 int r = 0; 620 626 621 627 cancel_delayed_work_sync(&uq_mgr->resume_work); ··· 626 626 /* Cancel any pending hang detection work and cleanup */ 627 627 cancel_delayed_work_sync(&queue->hang_detect_work); 628 628 629 + r = amdgpu_bo_reserve(vm->root.bo, false); 630 + if (r) { 631 + drm_file_err(uq_mgr->file, "Failed to reserve root bo during userqueue destroy\n"); 632 + return r; 633 + } 634 + amdgpu_userq_buffer_vas_list_cleanup(adev, queue); 635 + amdgpu_bo_unreserve(vm->root.bo); 636 + 629 637 mutex_lock(&uq_mgr->userq_mutex); 630 638 queue->hang_detect_fence = NULL; 631 639 amdgpu_userq_wait_for_last_fence(queue); 632 640 633 - r = amdgpu_bo_reserve(queue->db_obj.obj, true); 634 - if (!r) { 635 - amdgpu_bo_unpin(queue->db_obj.obj); 636 - amdgpu_bo_unreserve(queue->db_obj.obj); 637 - } 638 - amdgpu_bo_unref(&queue->db_obj.obj); 639 - 640 - r = amdgpu_bo_reserve(queue->wptr_obj.obj, true); 641 - if (!r) { 642 - amdgpu_bo_unpin(queue->wptr_obj.obj); 643 - amdgpu_bo_unreserve(queue->wptr_obj.obj); 644 - } 645 - amdgpu_bo_unref(&queue->wptr_obj.obj); 646 - 647 - atomic_dec(&uq_mgr->userq_count[queue->queue_type]); 648 641 #if defined(CONFIG_DEBUG_FS) 649 642 debugfs_remove_recursive(queue->debugfs_queue); 650 643 #endif 651 644 amdgpu_userq_detect_and_reset_queues(uq_mgr); 652 645 r = amdgpu_userq_unmap_helper(queue); 653 646 /*TODO: It requires a reset for userq hw unmap error*/ 654 - if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) { 647 + if (r) { 655 648 drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n"); 656 649 queue->state = AMDGPU_USERQ_STATE_HUNG; 657 650 } 651 + 652 + atomic_dec(&uq_mgr->userq_count[queue->queue_type]); 658 653 amdgpu_userq_cleanup(queue); 659 654 mutex_unlock(&uq_mgr->userq_mutex); 655 + 656 + amdgpu_bo_reserve(queue->db_obj.obj, true); 657 + amdgpu_bo_unpin(queue->db_obj.obj); 658 + amdgpu_bo_unreserve(queue->db_obj.obj); 659 + amdgpu_bo_unref(&queue->db_obj.obj); 660 + 661 + amdgpu_bo_reserve(queue->wptr_obj.obj, true); 662 + amdgpu_bo_unpin(queue->wptr_obj.obj); 663 + amdgpu_bo_unreserve(queue->wptr_obj.obj); 664 + amdgpu_bo_unref(&queue->wptr_obj.obj); 665 + kfree(queue); 660 666 661 667 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 662 668 ··· 736 730 if (r) 737 731 return r; 738 732 739 - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 733 + r = pm_runtime_resume_and_get(adev_to_drm(adev)->dev); 740 734 if (r < 0) { 741 - drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n"); 742 - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 735 + drm_file_err(uq_mgr->file, "pm_runtime_resume_and_get() failed for userqueue create\n"); 743 736 return r; 744 737 } 745 - 746 - /* 747 - * There could be a situation that we are creating a new queue while 748 - * the other queues under this UQ_mgr are suspended. So if there is any 749 - * resume work pending, wait for it to get done. 750 - * 751 - * This will also make sure we have a valid eviction fence ready to be used. 752 - */ 753 - amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr); 754 738 755 739 uq_funcs = adev->userq_funcs[args->in.ip_type]; 756 740 if (!uq_funcs) { 757 741 drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n", 758 742 args->in.ip_type); 759 743 r = -EINVAL; 760 - goto unlock; 744 + goto err_pm_runtime; 761 745 } 762 746 763 747 queue = kzalloc_obj(struct amdgpu_usermode_queue); 764 748 if (!queue) { 765 749 drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n"); 766 750 r = -ENOMEM; 767 - goto unlock; 751 + goto err_pm_runtime; 768 752 } 769 753 770 754 INIT_LIST_HEAD(&queue->userq_va_list); ··· 769 773 db_info.doorbell_offset = args->in.doorbell_offset; 770 774 771 775 queue->userq_mgr = uq_mgr; 776 + 772 777 /* Validate the userq virtual address.*/ 778 + r = amdgpu_bo_reserve(fpriv->vm.root.bo, false); 779 + if (r) 780 + goto free_queue; 781 + 773 782 if (amdgpu_userq_input_va_validate(adev, queue, args->in.queue_va, args->in.queue_size) || 774 783 amdgpu_userq_input_va_validate(adev, queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) || 775 784 amdgpu_userq_input_va_validate(adev, queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) { 776 785 r = -EINVAL; 777 - goto free_queue; 786 + amdgpu_bo_unreserve(fpriv->vm.root.bo); 787 + goto clean_mapping; 778 788 } 789 + amdgpu_bo_unreserve(fpriv->vm.root.bo); 779 790 780 791 /* Convert relative doorbell offset into absolute doorbell index */ 781 792 index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp); 782 793 if (index == (uint64_t)-EINVAL) { 783 794 drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n"); 784 795 r = -EINVAL; 785 - goto free_queue; 796 + goto clean_mapping; 786 797 } 787 798 788 799 queue->doorbell_index = index; ··· 797 794 r = amdgpu_userq_fence_driver_alloc(adev, &queue->fence_drv); 798 795 if (r) { 799 796 drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n"); 800 - goto free_queue; 797 + goto clean_mapping; 801 798 } 802 799 803 800 r = uq_funcs->mqd_create(queue, &args->in); ··· 805 802 drm_file_err(uq_mgr->file, "Failed to create Queue\n"); 806 803 goto clean_fence_driver; 807 804 } 805 + 806 + amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr); 808 807 809 808 /* don't map the queue if scheduling is halted */ 810 809 if (adev->userq_halt_for_enforce_isolation && ··· 819 814 r = amdgpu_userq_map_helper(queue); 820 815 if (r) { 821 816 drm_file_err(uq_mgr->file, "Failed to map Queue\n"); 822 - down_read(&adev->reset_domain->sem); 823 817 goto clean_mqd; 824 818 } 825 819 } ··· 834 830 if (r) { 835 831 if (!skip_map_queue) 836 832 amdgpu_userq_unmap_helper(queue); 837 - 838 833 r = -ENOMEM; 839 - goto clean_mqd; 834 + goto clean_reset_domain; 840 835 } 841 836 842 837 r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL)); ··· 843 840 xa_erase(&uq_mgr->userq_xa, qid); 844 841 if (!skip_map_queue) 845 842 amdgpu_userq_unmap_helper(queue); 846 - 847 - goto clean_mqd; 843 + goto clean_reset_domain; 848 844 } 849 845 up_read(&adev->reset_domain->sem); 850 846 ··· 855 853 mutex_unlock(&uq_mgr->userq_mutex); 856 854 return 0; 857 855 858 - clean_mqd: 859 - uq_funcs->mqd_destroy(queue); 856 + clean_reset_domain: 860 857 up_read(&adev->reset_domain->sem); 858 + clean_mqd: 859 + mutex_unlock(&uq_mgr->userq_mutex); 860 + uq_funcs->mqd_destroy(queue); 861 861 clean_fence_driver: 862 862 amdgpu_userq_fence_driver_free(queue); 863 + clean_mapping: 864 + amdgpu_bo_reserve(fpriv->vm.root.bo, true); 865 + amdgpu_userq_buffer_vas_list_cleanup(adev, queue); 866 + amdgpu_bo_unreserve(fpriv->vm.root.bo); 863 867 free_queue: 864 868 kfree(queue); 865 - unlock: 866 - mutex_unlock(&uq_mgr->userq_mutex); 867 - 869 + err_pm_runtime: 870 + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 868 871 return r; 869 872 } 870 873 ··· 999 992 static int 1000 993 amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr) 1001 994 { 995 + struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); 996 + struct amdgpu_vm *vm = &fpriv->vm; 1002 997 struct amdgpu_usermode_queue *queue; 1003 998 unsigned long queue_id; 1004 999 int ret = 0, r; 1000 + 1001 + 1002 + if (amdgpu_bo_reserve(vm->root.bo, false)) 1003 + return false; 1005 1004 1006 1005 mutex_lock(&uq_mgr->userq_mutex); 1007 1006 /* Resume all the queues for this process */ ··· 1026 1013 1027 1014 } 1028 1015 mutex_unlock(&uq_mgr->userq_mutex); 1016 + amdgpu_bo_unreserve(vm->root.bo); 1029 1017 1030 1018 if (ret) 1031 1019 drm_file_err(uq_mgr->file,
+33 -14
drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
··· 145 145 amdgpu_userq_fence_driver_put(userq->fence_drv); 146 146 } 147 147 148 + static void 149 + amdgpu_userq_fence_put_fence_drv_array(struct amdgpu_userq_fence *userq_fence) 150 + { 151 + unsigned long i; 152 + for (i = 0; i < userq_fence->fence_drv_array_count; i++) 153 + amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]); 154 + userq_fence->fence_drv_array_count = 0; 155 + } 156 + 148 157 void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv) 149 158 { 150 159 struct amdgpu_userq_fence *userq_fence, *tmp; 160 + LIST_HEAD(to_be_signaled); 151 161 struct dma_fence *fence; 152 162 unsigned long flags; 153 163 u64 rptr; 154 - int i; 155 164 156 165 if (!fence_drv) 157 166 return; ··· 168 159 spin_lock_irqsave(&fence_drv->fence_list_lock, flags); 169 160 rptr = amdgpu_userq_fence_read(fence_drv); 170 161 171 - list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) { 172 - fence = &userq_fence->base; 173 - 174 - if (rptr < fence->seqno) 162 + list_for_each_entry(userq_fence, &fence_drv->fences, link) { 163 + if (rptr < userq_fence->base.seqno) 175 164 break; 165 + } 176 166 167 + list_cut_before(&to_be_signaled, &fence_drv->fences, 168 + &userq_fence->link); 169 + spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags); 170 + 171 + list_for_each_entry_safe(userq_fence, tmp, &to_be_signaled, link) { 172 + fence = &userq_fence->base; 173 + list_del_init(&userq_fence->link); 177 174 dma_fence_signal(fence); 178 - 179 - for (i = 0; i < userq_fence->fence_drv_array_count; i++) 180 - amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]); 181 - 182 - list_del(&userq_fence->link); 175 + /* Drop fence_drv_array outside fence_list_lock 176 + * to avoid the recursion lock. 177 + */ 178 + amdgpu_userq_fence_put_fence_drv_array(userq_fence); 183 179 dma_fence_put(fence); 184 180 } 185 - spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags); 181 + 186 182 } 187 183 188 184 void amdgpu_userq_fence_driver_destroy(struct kref *ref) ··· 242 228 struct amdgpu_userq_fence_driver *fence_drv; 243 229 struct dma_fence *fence; 244 230 unsigned long flags; 231 + bool signaled = false; 245 232 246 233 fence_drv = userq->fence_drv; 247 234 if (!fence_drv) ··· 289 274 290 275 /* Check if hardware has already processed the job */ 291 276 spin_lock_irqsave(&fence_drv->fence_list_lock, flags); 292 - if (!dma_fence_is_signaled(fence)) 277 + if (!dma_fence_is_signaled(fence)) { 293 278 list_add_tail(&userq_fence->link, &fence_drv->fences); 294 - else 279 + } else { 280 + signaled = true; 295 281 dma_fence_put(fence); 296 - 282 + } 297 283 spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags); 284 + 285 + if (signaled) 286 + amdgpu_userq_fence_put_fence_drv_array(userq_fence); 298 287 299 288 *f = fence; 300 289
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
··· 34 34 #include "amdgpu.h" 35 35 #include "amdgpu_pm.h" 36 36 #include "amdgpu_vcn.h" 37 + #include "amdgpu_reset.h" 37 38 #include "soc15d.h" 38 39 39 40 /* Firmware Names */ ··· 362 361 363 362 /* err_event_athub and dpc recovery will corrupt VCPU buffer, so we need to 364 363 * restore fw data and clear buffer in amdgpu_vcn_resume() */ 365 - if (in_ras_intr || adev->pcie_reset_ctx.in_link_reset) 364 + if (in_ras_intr || amdgpu_reset_in_dpc(adev)) 366 365 return 0; 367 366 368 367 return amdgpu_vcn_save_vcpu_bo_inst(adev, i);
+11 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
··· 21 21 */ 22 22 23 23 #include "amdgpu_vm.h" 24 + #include "amdgpu.h" 25 + #include "amdgpu_reset.h" 24 26 #include "amdgpu_object.h" 25 27 #include "amdgpu_trace.h" 26 28 ··· 110 108 static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p, 111 109 struct dma_fence **fence) 112 110 { 111 + struct amdgpu_device *adev = p->adev; 112 + 113 113 if (p->needs_flush) 114 114 atomic64_inc(&p->vm->tlb_seq); 115 115 116 116 mb(); 117 - amdgpu_device_flush_hdp(p->adev, NULL); 117 + /* A reset flushed the HDP anyway, so that here can be skipped when a reset is ongoing */ 118 + if (!down_read_trylock(&adev->reset_domain->sem)) 119 + return 0; 120 + 121 + amdgpu_device_flush_hdp(adev, NULL); 122 + up_read(&adev->reset_domain->sem); 123 + 118 124 return 0; 119 125 } 120 126
+5 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
··· 693 693 !(flags & AMDGPU_PTE_VALID) && 694 694 !(flags & AMDGPU_PTE_PRT_FLAG(params->adev))) { 695 695 696 - /* Workaround for fault priority problem on GMC9 */ 697 - flags |= AMDGPU_PTE_EXECUTABLE; 696 + /* Workaround for fault priority problem on GMC9 and GFX12, 697 + * EXECUTABLE for GMC9 fault priority and init_pte_flags 698 + * (e.g. AMDGPU_PTE_IS_PTE on GFX12) 699 + */ 700 + flags |= AMDGPU_PTE_EXECUTABLE | adev->gmc.init_pte_flags; 698 701 } 699 702 700 703 /*
+26 -5
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
··· 64 64 #define regPC_CONFIG_CNTL_1 0x194d 65 65 #define regPC_CONFIG_CNTL_1_BASE_IDX 1 66 66 67 + #define regGOLDEN_TSC_COUNT_UPPER_smu_15_0_0 0x0030 68 + #define regGOLDEN_TSC_COUNT_UPPER_smu_15_0_0_BASE_IDX 1 69 + #define regGOLDEN_TSC_COUNT_LOWER_smu_15_0_0 0x0031 70 + #define regGOLDEN_TSC_COUNT_LOWER_smu_15_0_0_BASE_IDX 1 71 + 67 72 #define regCP_GFX_MQD_CONTROL_DEFAULT 0x00000100 68 73 #define regCP_GFX_HQD_VMID_DEFAULT 0x00000000 69 74 #define regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT 0x00000000 ··· 5239 5234 amdgpu_gfx_off_ctrl(adev, true); 5240 5235 } else { 5241 5236 preempt_disable(); 5242 - clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); 5243 - clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); 5244 - clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); 5245 - if (clock_counter_hi_pre != clock_counter_hi_after) 5246 - clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); 5237 + if (amdgpu_ip_version(adev, SMUIO_HWIP, 0) < IP_VERSION(15, 0, 0)) { 5238 + clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, 5239 + regGOLDEN_TSC_COUNT_UPPER); 5240 + clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, 5241 + regGOLDEN_TSC_COUNT_LOWER); 5242 + clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, 5243 + regGOLDEN_TSC_COUNT_UPPER); 5244 + if (clock_counter_hi_pre != clock_counter_hi_after) 5245 + clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, 5246 + regGOLDEN_TSC_COUNT_LOWER); 5247 + } else { 5248 + clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, 5249 + regGOLDEN_TSC_COUNT_UPPER_smu_15_0_0); 5250 + clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, 5251 + regGOLDEN_TSC_COUNT_LOWER_smu_15_0_0); 5252 + clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, 5253 + regGOLDEN_TSC_COUNT_UPPER_smu_15_0_0); 5254 + if (clock_counter_hi_pre != clock_counter_hi_after) 5255 + clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, 5256 + regGOLDEN_TSC_COUNT_LOWER_smu_15_0_0); 5257 + } 5247 5258 preempt_enable(); 5248 5259 } 5249 5260 clock = clock_counter_lo | (clock_counter_hi_after << 32ULL);
+46 -6
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
··· 736 736 */ 737 737 void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring) 738 738 { 739 - if (!amdgpu_sriov_vf(ring->adev)) { 739 + struct amdgpu_device *adev = ring->adev; 740 + 741 + if (!amdgpu_sriov_vf(adev)) { 742 + int jpeg_inst = GET_INST(JPEG, ring->me); 743 + uint32_t value = 0x80004000; /* default DS14 */ 744 + 740 745 amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 741 746 0, 0, PACKETJ_TYPE0)); 742 - amdgpu_ring_write(ring, 0x62a04); /* PCTL0_MMHUB_DEEPSLEEP_IB */ 747 + 748 + /* PCTL0__MMHUB_DEEPSLEEP_IB could be different on different mmhub version */ 749 + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 750 + case IP_VERSION(4, 1, 0): 751 + amdgpu_ring_write(ring, 0x69004); 752 + value = 0x80010000; 753 + break; 754 + case IP_VERSION(4, 2, 0): 755 + amdgpu_ring_write(ring, 0x60804); 756 + if (jpeg_inst & 1) 757 + value = 0x80010000; 758 + break; 759 + default: 760 + amdgpu_ring_write(ring, 0x62a04); 761 + break; 762 + } 743 763 744 764 amdgpu_ring_write(ring, 745 765 PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0, 746 766 0, PACKETJ_TYPE0)); 747 - amdgpu_ring_write(ring, 0x80004000); 767 + amdgpu_ring_write(ring, value); 748 768 } 749 769 } 750 770 ··· 777 757 */ 778 758 void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring) 779 759 { 780 - if (!amdgpu_sriov_vf(ring->adev)) { 760 + struct amdgpu_device *adev = ring->adev; 761 + 762 + if (!amdgpu_sriov_vf(adev)) { 763 + int jpeg_inst = GET_INST(JPEG, ring->me); 764 + uint32_t value = 0x00004000; /* default DS14 */ 765 + 781 766 amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 782 767 0, 0, PACKETJ_TYPE0)); 783 - amdgpu_ring_write(ring, 0x62a04); 768 + 769 + /* PCTL0__MMHUB_DEEPSLEEP_IB could be different on different mmhub version */ 770 + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 771 + case IP_VERSION(4, 1, 0): 772 + amdgpu_ring_write(ring, 0x69004); 773 + value = 0x00010000; 774 + break; 775 + case IP_VERSION(4, 2, 0): 776 + amdgpu_ring_write(ring, 0x60804); 777 + if (jpeg_inst & 1) 778 + value = 0x00010000; 779 + break; 780 + default: 781 + amdgpu_ring_write(ring, 0x62a04); 782 + break; 783 + } 784 784 785 785 amdgpu_ring_write(ring, 786 786 PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0, 787 787 0, PACKETJ_TYPE0)); 788 - amdgpu_ring_write(ring, 0x00004000); 788 + amdgpu_ring_write(ring, value); 789 789 } 790 790 } 791 791
+23 -2
drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
··· 322 322 goto free_mqd; 323 323 } 324 324 325 + r = amdgpu_bo_reserve(queue->vm->root.bo, false); 326 + if (r) { 327 + kfree(compute_mqd); 328 + goto free_mqd; 329 + } 325 330 r = amdgpu_userq_input_va_validate(adev, queue, compute_mqd->eop_va, 326 331 2048); 332 + amdgpu_bo_unreserve(queue->vm->root.bo); 327 333 if (r) { 328 334 kfree(compute_mqd); 329 335 goto free_mqd; ··· 371 365 userq_props->tmz_queue = 372 366 mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE; 373 367 374 - r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->shadow_va, 375 - shadow_info.shadow_size); 368 + r = amdgpu_bo_reserve(queue->vm->root.bo, false); 376 369 if (r) { 377 370 kfree(mqd_gfx_v11); 378 371 goto free_mqd; 379 372 } 373 + r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->shadow_va, 374 + shadow_info.shadow_size); 375 + if (r) { 376 + amdgpu_bo_unreserve(queue->vm->root.bo); 377 + kfree(mqd_gfx_v11); 378 + goto free_mqd; 379 + } 380 + 380 381 r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->csa_va, 381 382 shadow_info.csa_size); 383 + amdgpu_bo_unreserve(queue->vm->root.bo); 382 384 if (r) { 383 385 kfree(mqd_gfx_v11); 384 386 goto free_mqd; ··· 408 394 r = -ENOMEM; 409 395 goto free_mqd; 410 396 } 397 + 398 + r = amdgpu_bo_reserve(queue->vm->root.bo, false); 399 + if (r) { 400 + kfree(mqd_sdma_v11); 401 + goto free_mqd; 402 + } 411 403 r = amdgpu_userq_input_va_validate(adev, queue, mqd_sdma_v11->csa_va, 412 404 32); 405 + amdgpu_bo_unreserve(queue->vm->root.bo); 413 406 if (r) { 414 407 kfree(mqd_sdma_v11); 415 408 goto free_mqd;
+4 -4
drivers/gpu/drm/amd/amdgpu/mes_v12_1.c
··· 2028 2028 int num_xcc = NUM_XCC(adev->gfx.xcc_mask); 2029 2029 int sdma_ring_align = 0x10, compute_ring_align = 0x100; 2030 2030 uint32_t tmp, xcc_offset; 2031 - int r = 0, i, wptr = 0; 2031 + int r = 0, i, j, wptr = 0; 2032 2032 2033 2033 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) { 2034 2034 if (!adev->mes.enable_coop_mode) { ··· 2077 2077 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), 2078 2078 regSCRATCH_REG0); 2079 2079 } else { 2080 - for (i = 0; i < num_xcc; i++) { 2081 - if (xcc_id != adev->mes.master_xcc_ids[i]) 2080 + for (j = 0; j < num_xcc; j++) { 2081 + if (xcc_id != adev->mes.master_xcc_ids[j]) 2082 2082 continue; 2083 2083 2084 - tmp = RREG32_SOC15(GC, GET_INST(GC, i), 2084 + tmp = RREG32_SOC15(GC, GET_INST(GC, j), 2085 2085 regSCRATCH_REG0); 2086 2086 if (tmp != 0xDEADBEEF) 2087 2087 break;
+8 -1
drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
··· 54 54 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL_nbif_4_10_BASE_IDX 3 55 55 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL1_nbif_4_10 0x4f0af6 56 56 #define regGDC_S2A0_S2A_DOORBELL_ENTRY_5_CTRL1_nbif_4_10_BASE_IDX 3 57 + #define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_nbif_4_10 0x0021 58 + #define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_nbif_4_10_BASE_IDX 2 57 59 58 60 static void nbif_v6_3_1_remap_hdp_registers(struct amdgpu_device *adev) 59 61 { ··· 67 65 68 66 static u32 nbif_v6_3_1_get_rev_id(struct amdgpu_device *adev) 69 67 { 70 - u32 tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0); 68 + u32 tmp; 69 + 70 + if (amdgpu_ip_version(adev, NBIO_HWIP, 0) == IP_VERSION(7, 11, 4)) 71 + tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_nbif_4_10); 72 + else 73 + tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0); 71 74 72 75 tmp &= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK; 73 76 tmp >>= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
+1
drivers/gpu/drm/amd/amdgpu/psp_v15_0.c
··· 32 32 #include "mp/mp_15_0_0_sh_mask.h" 33 33 34 34 MODULE_FIRMWARE("amdgpu/psp_15_0_0_toc.bin"); 35 + MODULE_FIRMWARE("amdgpu/psp_15_0_0_ta.bin"); 35 36 36 37 static int psp_v15_0_0_init_microcode(struct psp_context *psp) 37 38 {
+12
drivers/gpu/drm/amd/amdgpu/sdma_v7_1.c
··· 1268 1268 struct amdgpu_device *adev = ip_block->adev; 1269 1269 int r; 1270 1270 1271 + switch (amdgpu_user_queue) { 1272 + case -1: 1273 + default: 1274 + adev->sdma.no_user_submission = true; 1275 + adev->sdma.disable_uq = true; 1276 + break; 1277 + case 0: 1278 + adev->sdma.no_user_submission = false; 1279 + adev->sdma.disable_uq = true; 1280 + break; 1281 + } 1282 + 1271 1283 r = amdgpu_sdma_init_microcode(adev, 0, true); 1272 1284 if (r) { 1273 1285 DRM_ERROR("Failed to init sdma firmware!\n");
+3
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
··· 776 776 goto out_unlock; 777 777 } 778 778 779 + if (args->num_of_nodes > kfd_topology_get_num_devices()) 780 + return -EINVAL; 781 + 779 782 /* Fill in process-aperture information for all available 780 783 * nodes, but not more than args->num_of_nodes as that is 781 784 * the amount of memory allocated by user
+1
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
··· 1191 1191 return NULL; 1192 1192 } 1193 1193 int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev); 1194 + uint32_t kfd_topology_get_num_devices(void); 1194 1195 int kfd_numa_node_to_apic_id(int numa_node_id); 1195 1196 uint32_t kfd_gpu_node_num(void); 1196 1197
+11
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
··· 2297 2297 return res; 2298 2298 } 2299 2299 2300 + uint32_t kfd_topology_get_num_devices(void) 2301 + { 2302 + uint32_t num_devices; 2303 + 2304 + down_read(&topology_lock); 2305 + num_devices = sys_props.num_devices; 2306 + up_read(&topology_lock); 2307 + 2308 + return num_devices; 2309 + } 2310 + 2300 2311 /* kfd_topology_enum_kfd_devices - Enumerate through all devices in KFD 2301 2312 * topology. If GPU device is found @idx, then valid kfd_dev pointer is 2302 2313 * returned through @kdev
+106 -29
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 95 95 #include <drm/drm_utils.h> 96 96 #include <drm/drm_vblank.h> 97 97 #include <drm/drm_audio_component.h> 98 + #include <drm/drm_colorop.h> 98 99 #include <drm/drm_gem_atomic_helper.h> 99 100 100 101 #include <media/cec-notifier.h> ··· 2256 2255 adev->dm.idle_workqueue = NULL; 2257 2256 } 2258 2257 2258 + /* Disable ISM before dc_destroy() invalidates dm->dc */ 2259 + scoped_guard(mutex, &adev->dm.dc_lock) 2260 + amdgpu_dm_ism_disable(&adev->dm); 2261 + 2259 2262 amdgpu_dm_destroy_drm_device(&adev->dm); 2260 2263 2261 2264 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) ··· 3999 3994 4000 3995 if (sink) { 4001 3996 if (aconnector->dc_sink) { 4002 - amdgpu_dm_update_freesync_caps(connector, NULL); 3997 + amdgpu_dm_update_freesync_caps(connector, NULL, true); 4003 3998 /* 4004 3999 * retain and release below are used to 4005 4000 * bump up refcount for sink because the link doesn't point ··· 4011 4006 aconnector->dc_sink = sink; 4012 4007 dc_sink_retain(aconnector->dc_sink); 4013 4008 amdgpu_dm_update_freesync_caps(connector, 4014 - aconnector->drm_edid); 4009 + aconnector->drm_edid, true); 4015 4010 } else { 4016 - amdgpu_dm_update_freesync_caps(connector, NULL); 4011 + amdgpu_dm_update_freesync_caps(connector, NULL, true); 4017 4012 if (!aconnector->dc_sink) { 4018 4013 aconnector->dc_sink = aconnector->dc_em_sink; 4019 4014 dc_sink_retain(aconnector->dc_sink); ··· 4057 4052 * If yes, put it here. 4058 4053 */ 4059 4054 if (aconnector->dc_sink) { 4060 - amdgpu_dm_update_freesync_caps(connector, NULL); 4055 + amdgpu_dm_update_freesync_caps(connector, NULL, true); 4061 4056 dc_sink_release(aconnector->dc_sink); 4062 4057 } 4063 4058 ··· 4090 4085 "failed to create aconnector->requested_timing\n"); 4091 4086 } 4092 4087 4093 - amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid); 4088 + amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid, true); 4094 4089 update_connector_ext_caps(aconnector); 4095 4090 dm_set_panel_type(aconnector); 4096 4091 } else { 4097 4092 hdmi_cec_unset_edid(aconnector); 4098 4093 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); 4099 - amdgpu_dm_update_freesync_caps(connector, NULL); 4094 + amdgpu_dm_update_freesync_caps(connector, NULL, true); 4100 4095 aconnector->num_modes = 0; 4101 4096 dc_sink_release(aconnector->dc_sink); 4102 4097 aconnector->dc_sink = NULL; ··· 8860 8855 * drm_edid_connector_add_modes() and need to be 8861 8856 * restored here. 8862 8857 */ 8863 - amdgpu_dm_update_freesync_caps(connector, drm_edid); 8858 + amdgpu_dm_update_freesync_caps(connector, drm_edid, false); 8864 8859 } else { 8865 8860 amdgpu_dm_connector->num_modes = 0; 8866 8861 } ··· 12335 12330 */ 12336 12331 12337 12332 /** 12333 + * dm_plane_color_pipeline_active() - Check if a plane's color pipeline active. 12334 + * @state: DRM atomic state 12335 + * @plane: DRM plane to check 12336 + * @use_old: if true, inspect the old colorop states; otherwise the new ones 12337 + * 12338 + * A color pipeline may be selected (color_pipeline != NULL) but still is 12339 + * inactive if every colorop in the chain is bypassed. Only return 12340 + * true when at least one colorop has bypass == false, meaning the cursor 12341 + * would be subjected to the transformation in native mode. 12342 + * 12343 + * Return: true if the pipeline modifies pixels, false otherwise. 12344 + */ 12345 + static bool dm_plane_color_pipeline_active(struct drm_atomic_state *state, 12346 + struct drm_plane *plane, 12347 + bool use_old) 12348 + { 12349 + struct drm_colorop *colorop; 12350 + struct drm_colorop_state *old_colorop_state, *new_colorop_state; 12351 + int i; 12352 + 12353 + for_each_oldnew_colorop_in_state(state, colorop, old_colorop_state, new_colorop_state, i) { 12354 + struct drm_colorop_state *cstate = use_old ? old_colorop_state : new_colorop_state; 12355 + 12356 + if (cstate->colorop->plane != plane) 12357 + continue; 12358 + if (!cstate->bypass) 12359 + return true; 12360 + } 12361 + return false; 12362 + } 12363 + 12364 + /** 12338 12365 * dm_crtc_get_cursor_mode() - Determine the required cursor mode on crtc 12339 12366 * @adev: amdgpu device 12340 12367 * @state: DRM atomic state ··· 12377 12340 * the dm_crtc_state. 12378 12341 * 12379 12342 * The cursor should be enabled in overlay mode if there exists an underlying 12380 - * plane - on which the cursor may be blended - that is either YUV formatted, or 12381 - * scaled differently from the cursor. 12343 + * plane - on which the cursor may be blended - that is either YUV formatted, 12344 + * scaled differently from the cursor, or has a color pipeline active. 12382 12345 * 12383 12346 * Since zpos info is required, drm_atomic_normalize_zpos must be called before 12384 12347 * calling this function. ··· 12416 12379 12417 12380 /* 12418 12381 * Cursor mode can change if a plane's format changes, scale changes, is 12419 - * enabled/disabled, or z-order changes. 12382 + * enabled/disabled, z-order changes, or color management properties change. 12420 12383 */ 12421 12384 for_each_oldnew_plane_in_state(state, plane, old_plane_state, plane_state, i) { 12422 12385 int new_scale_w, new_scale_h, old_scale_w, old_scale_h; ··· 12438 12401 dm_get_plane_scale(plane_state, &new_scale_w, &new_scale_h); 12439 12402 dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h); 12440 12403 if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) { 12404 + consider_mode_change = true; 12405 + break; 12406 + } 12407 + 12408 + if (dm_plane_color_pipeline_active(state, plane, true) != 12409 + dm_plane_color_pipeline_active(state, plane, false)) { 12441 12410 consider_mode_change = true; 12442 12411 break; 12443 12412 } ··· 12483 12440 12484 12441 /* Underlying plane is YUV format - use overlay cursor */ 12485 12442 if (amdgpu_dm_plane_is_video_format(plane_state->fb->format->format)) { 12443 + *cursor_mode = DM_CURSOR_OVERLAY_MODE; 12444 + return 0; 12445 + } 12446 + 12447 + /* Underlying plane has an active color pipeline - cursor would be transformed */ 12448 + if (dm_plane_color_pipeline_active(state, plane, false)) { 12486 12449 *cursor_mode = DM_CURSOR_OVERLAY_MODE; 12487 12450 return 0; 12488 12451 } ··· 12872 12823 goto fail; 12873 12824 } else if (required_cursor_mode == DM_CURSOR_OVERLAY_MODE) { 12874 12825 drm_dbg_driver(crtc->dev, 12875 - "[CRTC:%d:%s] Cannot enable native cursor due to scaling or YUV restrictions\n", 12826 + "[CRTC:%d:%s] Cannot enable native cursor due to scaling, YUV, or color pipeline restrictions\n", 12876 12827 crtc->base.id, crtc->name); 12877 12828 ret = -EINVAL; 12878 12829 goto fail; ··· 13137 13088 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version; 13138 13089 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate; 13139 13090 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate; 13091 + vsdb->freesync_mccs_vcp_code = output->amd_vsdb.freesync_mccs_vcp_code; 13140 13092 } else { 13141 13093 drm_warn(adev_to_drm(dm->adev), "Unknown EDID CEA parser results\n"); 13142 13094 return false; ··· 13172 13122 vsdb_info->amd_vsdb_version = version; 13173 13123 vsdb_info->min_refresh_rate_hz = min_rate; 13174 13124 vsdb_info->max_refresh_rate_hz = max_rate; 13125 + /* Not enabled on DMCU*/ 13126 + vsdb_info->freesync_mccs_vcp_code = 0; 13175 13127 return true; 13176 13128 } 13177 13129 /* not amd vsdb */ ··· 13312 13260 * 13313 13261 * @connector: Connector to query. 13314 13262 * @drm_edid: DRM EDID from monitor 13263 + * @do_mccs: Controls whether MCCS (Monitor Control Command Set) over 13264 + * DDC (Display Data Channel) transactions are performed. When true, 13265 + * the driver queries the monitor to get or update additional FreeSync 13266 + * capability information. When false, these transactions are skipped. 13315 13267 * 13316 13268 * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep 13317 13269 * track of some of the display information in the internal data struct used by ··· 13323 13267 * FreeSync parameters. 13324 13268 */ 13325 13269 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, 13326 - const struct drm_edid *drm_edid) 13270 + const struct drm_edid *drm_edid, bool do_mccs) 13327 13271 { 13328 13272 int i = 0; 13329 13273 struct amdgpu_dm_connector *amdgpu_dm_connector = ··· 13389 13333 13390 13334 } else if (drm_edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { 13391 13335 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 13392 - if (i >= 0 && vsdb_info.freesync_supported) { 13393 - amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 13394 - amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 13395 - if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 13396 - freesync_capable = true; 13336 + if (i >= 0) { 13337 + amdgpu_dm_connector->vsdb_info = vsdb_info; 13338 + sink->edid_caps.freesync_vcp_code = vsdb_info.freesync_mccs_vcp_code; 13397 13339 13398 - connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 13399 - connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 13340 + if (vsdb_info.freesync_supported) { 13341 + amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 13342 + amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 13343 + if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 13344 + freesync_capable = true; 13345 + 13346 + connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 13347 + connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 13348 + } 13400 13349 } 13401 13350 } 13402 13351 ··· 13410 13349 13411 13350 if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { 13412 13351 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 13413 - if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) { 13414 - 13415 - amdgpu_dm_connector->pack_sdp_v1_3 = true; 13416 - amdgpu_dm_connector->as_type = as_type; 13352 + if (i >= 0) { 13417 13353 amdgpu_dm_connector->vsdb_info = vsdb_info; 13354 + sink->edid_caps.freesync_vcp_code = vsdb_info.freesync_mccs_vcp_code; 13418 13355 13419 - amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 13420 - amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 13421 - if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 13422 - freesync_capable = true; 13356 + if (vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) { 13357 + amdgpu_dm_connector->pack_sdp_v1_3 = true; 13358 + amdgpu_dm_connector->as_type = as_type; 13423 13359 13424 - connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 13425 - connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 13360 + amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 13361 + amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 13362 + if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 13363 + freesync_capable = true; 13364 + 13365 + connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 13366 + connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 13367 + } 13426 13368 } 13427 13369 } 13370 + 13371 + /* Handle MCCS */ 13372 + if (do_mccs) 13373 + dm_helpers_read_mccs_caps(adev->dm.dc->ctx, amdgpu_dm_connector->dc_link, sink); 13374 + 13375 + if ((sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A || 13376 + as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) && 13377 + (!sink->edid_caps.freesync_vcp_code || 13378 + (sink->edid_caps.freesync_vcp_code && !sink->mccs_caps.freesync_supported))) 13379 + freesync_capable = false; 13380 + 13381 + if (do_mccs && sink->mccs_caps.freesync_supported && freesync_capable) 13382 + dm_helpers_mccs_vcp_set(adev->dm.dc->ctx, amdgpu_dm_connector->dc_link, sink); 13428 13383 13429 13384 update: 13430 13385 if (dm_con_state)
+6 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
··· 759 759 unsigned int max_refresh_rate_hz; 760 760 761 761 /** 762 + * @freesync_mccs_vcp_code: MCCS VCP code for freesync state 763 + */ 764 + unsigned int freesync_mccs_vcp_code; 765 + 766 + /** 762 767 * @replay_mode: Replay supported 763 768 */ 764 769 bool replay_mode; ··· 1071 1066 struct drm_connector *connector); 1072 1067 1073 1068 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, 1074 - const struct drm_edid *drm_edid); 1069 + const struct drm_edid *drm_edid, bool do_mccs); 1075 1070 1076 1071 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev); 1077 1072
+5 -2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
··· 457 457 458 458 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) 459 459 { 460 - struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 460 + /* 461 + * amdgpu_dm_ism_fini() is intentionally called in amdgpu_dm_fini(). 462 + * It must be called before dc_destroy() in amdgpu_dm_fini() 463 + * to avoid ISM accessing an invalid dc handle once dc is released. 464 + */ 461 465 462 - amdgpu_dm_ism_fini(&acrtc->ism); 463 466 drm_crtc_cleanup(crtc); 464 467 kfree(crtc); 465 468 }
+241
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
··· 49 49 #include "ddc_service_types.h" 50 50 #include "clk_mgr.h" 51 51 52 + #define MCCS_DEST_ADDR (0x6E >> 1) 53 + #define MCCS_SRC_ADDR 0x51 54 + #define MCCS_LENGTH_OFFSET 0x80 55 + #define MCCS_MAX_DATA_SIZE 0x20 56 + 57 + enum mccs_op_code { 58 + MCCS_OP_CODE_VCP_REQUEST = 0x01, 59 + MCCS_OP_CODE_VCP_REPLY = 0x02, 60 + MCCS_OP_CODE_VCP_SET = 0x03, 61 + MCCS_OP_CODE_VCP_RESET = 0x09, 62 + MCCS_OP_CODE_CAP_REQUEST = 0xF3, 63 + MCCS_OP_CODE_CAP_REPLY = 0xE3 64 + }; 65 + 66 + enum mccs_op_buff_size { 67 + MCCS_OP_BUFF_SIZE__WR_VCP_REQUEST = 5, 68 + MCCS_OP_BUFF_SIZE_RD_VCP_REQUEST = 11, 69 + MCCS_OP_BUFF_SIZE_WR_VCP_SET = 7, 70 + }; 71 + 72 + enum vcp_reply_mask { 73 + FREESYNC_SUPPORTED = 0x1 74 + }; 75 + 76 + union vcp_reply { 77 + struct { 78 + unsigned char src_addr; 79 + unsigned char length; /* Length is offset by MccsLengthOffs = 0x80 */ 80 + unsigned char reply_op_code; /* Should return MCCS_OP_CODE_VCP_REPLY = 0x02 */ 81 + unsigned char result_code; /* 00h No Error, 01h Unsupported VCP Code */ 82 + unsigned char request_code; /* Should return mccs vcp code sent in the vcp request */ 83 + unsigned char type_code; /* VCP type code: 00h Set parameter, 01h Momentary */ 84 + unsigned char max_value[2]; /* 2 bytes returning max value current value */ 85 + unsigned char present_value[2]; /* NOTE: Byte0 is MSB, Byte1 is LSB */ 86 + unsigned char check_sum; 87 + } bytes; 88 + unsigned char raw[11]; 89 + }; 90 + 52 91 static u32 edid_extract_panel_id(struct edid *edid) 53 92 { 54 93 return (u32)edid->mfg_id[0] << 24 | ··· 1439 1400 case DP_BRANCH_DEVICE_ID_0060AD: 1440 1401 case DP_BRANCH_DEVICE_ID_00E04C: 1441 1402 case DP_BRANCH_DEVICE_ID_90CC24: 1403 + case DP_BRANCH_DEVICE_ID_001CF8: 1404 + case DP_BRANCH_DEVICE_ID_001FF2: 1442 1405 ret_val = true; 1443 1406 break; 1444 1407 default: ··· 1480 1439 // TODO 1481 1440 return false; 1482 1441 } 1442 + 1443 + static int mccs_operation_vcp_request(unsigned int vcp_code, struct dc_link *link, 1444 + union vcp_reply *reply) 1445 + { 1446 + const unsigned char retry_interval_ms = 40; 1447 + unsigned char retry = 5; 1448 + struct amdgpu_dm_connector *aconnector = link->priv; 1449 + struct i2c_adapter *ddc; 1450 + struct i2c_msg msg = {0}; 1451 + int ret = 0; 1452 + int idx; 1453 + 1454 + unsigned char wr_data[MCCS_OP_BUFF_SIZE__WR_VCP_REQUEST] = { 1455 + MCCS_SRC_ADDR, /* Byte0 - Src Addr */ 1456 + MCCS_LENGTH_OFFSET + 2, /* Byte1 - Length */ 1457 + MCCS_OP_CODE_VCP_REQUEST, /* Byte2 - MCCS Command */ 1458 + (unsigned char) vcp_code, /* Byte3 - VCP Code */ 1459 + MCCS_DEST_ADDR << 1 /* Byte4 - CheckSum */ 1460 + }; 1461 + 1462 + /* calculate checksum */ 1463 + for (idx = 0; idx < (MCCS_OP_BUFF_SIZE__WR_VCP_REQUEST - 1); idx++) 1464 + wr_data[(MCCS_OP_BUFF_SIZE__WR_VCP_REQUEST-1)] ^= wr_data[idx]; 1465 + 1466 + if (link->aux_mode) 1467 + ddc = &aconnector->dm_dp_aux.aux.ddc; 1468 + else 1469 + ddc = &aconnector->i2c->base; 1470 + 1471 + do { 1472 + msg.addr = MCCS_DEST_ADDR; 1473 + msg.flags = 0; 1474 + msg.len = MCCS_OP_BUFF_SIZE__WR_VCP_REQUEST; 1475 + msg.buf = wr_data; 1476 + 1477 + ret = i2c_transfer(ddc, &msg, 1); 1478 + if (ret != 1) 1479 + goto mccs_retry; 1480 + 1481 + msleep(retry_interval_ms); 1482 + 1483 + msg.addr = MCCS_DEST_ADDR; 1484 + msg.flags = I2C_M_RD; 1485 + msg.len = MCCS_OP_BUFF_SIZE_RD_VCP_REQUEST; 1486 + msg.buf = reply->raw; 1487 + 1488 + ret = i2c_transfer(ddc, &msg, 1); 1489 + 1490 + /* sink might reply with null msg if it can't reply in time */ 1491 + if (ret == 1 && reply->bytes.length > MCCS_LENGTH_OFFSET) 1492 + break; 1493 + mccs_retry: 1494 + retry--; 1495 + msleep(retry_interval_ms); 1496 + } while (retry); 1497 + 1498 + if (!retry) { 1499 + drm_dbg_driver(aconnector->base.dev, 1500 + "%s: MCCS VCP request failed after retries", __func__); 1501 + return -EIO; 1502 + } 1503 + 1504 + return 0; 1505 + } 1506 + 1507 + void dm_helpers_read_mccs_caps(struct dc_context *ctx, struct dc_link *link, 1508 + struct dc_sink *sink) 1509 + { 1510 + bool mccs_op = false; 1511 + struct dpcd_caps *dpcd_caps; 1512 + struct drm_device *dev; 1513 + uint16_t freesync_vcp_value = 0; 1514 + union vcp_reply vcp_reply_value = {0}; 1515 + 1516 + if (!ctx) 1517 + return; 1518 + dev = adev_to_drm(ctx->driver_context); 1519 + 1520 + if (!link || !sink) { 1521 + drm_dbg_driver(dev, "%s: link or sink is NULL", __func__); 1522 + return; 1523 + } 1524 + 1525 + sink->mccs_caps.freesync_supported = false; 1526 + dpcd_caps = &link->dpcd_caps; 1527 + 1528 + if (sink->edid_caps.freesync_vcp_code != 0) { 1529 + if (dc_is_dp_signal(link->connector_signal)) { 1530 + if ((dpcd_caps->dpcd_rev.raw >= DPCD_REV_14) && 1531 + (dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) && 1532 + dm_is_freesync_pcon_whitelist(dpcd_caps->branch_dev_id) && 1533 + (dpcd_caps->adaptive_sync_caps.dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT == true)) 1534 + mccs_op = true; 1535 + 1536 + if ((dpcd_caps->dongle_type != DISPLAY_DONGLE_NONE && 1537 + dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)) { 1538 + if (mccs_op == false) 1539 + drm_dbg_driver(dev, "%s: Legacy Pcon support", __func__); 1540 + mccs_op = true; 1541 + } 1542 + 1543 + if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 1544 + // Todo: Freesync over MST 1545 + mccs_op = false; 1546 + } 1547 + } 1548 + 1549 + if (dc_is_hdmi_signal(link->connector_signal)) { 1550 + drm_dbg_driver(dev, "%s: Local HDMI sink", __func__); 1551 + mccs_op = true; 1552 + } 1553 + 1554 + if (mccs_op == true) { 1555 + // MCCS VCP request to get VCP value 1556 + if (!mccs_operation_vcp_request(sink->edid_caps.freesync_vcp_code, link, 1557 + &vcp_reply_value)) { 1558 + freesync_vcp_value = vcp_reply_value.bytes.present_value[1]; 1559 + freesync_vcp_value |= (uint16_t) vcp_reply_value.bytes.present_value[0] << 8; 1560 + } 1561 + // If VCP Value bit 0 is 1, freesyncSupport = true 1562 + sink->mccs_caps.freesync_supported = 1563 + (freesync_vcp_value & FREESYNC_SUPPORTED) ? true : false; 1564 + } 1565 + } 1566 + } 1567 + 1568 + static int mccs_operation_vcp_set(unsigned int vcp_code, struct dc_link *link, uint16_t value) 1569 + { 1570 + const unsigned char retry_interval_ms = 40; 1571 + unsigned char retry = 5; 1572 + struct amdgpu_dm_connector *aconnector = link->priv; 1573 + struct i2c_adapter *ddc; 1574 + struct i2c_msg msg = {0}; 1575 + int ret = 0; 1576 + int idx; 1577 + 1578 + unsigned char wr_data[MCCS_OP_BUFF_SIZE_WR_VCP_SET] = { 1579 + MCCS_SRC_ADDR, /* Byte0 - Src Addr */ 1580 + MCCS_LENGTH_OFFSET + 4, /* Byte1 - Length */ 1581 + MCCS_OP_CODE_VCP_SET, /* Byte2 - MCCS Command */ 1582 + (unsigned char)vcp_code, /* Byte3 - VCP Code */ 1583 + (unsigned char)(value >> 8), /* Byte4 - Value High Byte */ 1584 + (unsigned char)(value & 0xFF), /* Byte5 - Value Low Byte */ 1585 + MCCS_DEST_ADDR << 1 /* Byte6 - CheckSum */ 1586 + }; 1587 + 1588 + /* calculate checksum */ 1589 + for (idx = 0; idx < (MCCS_OP_BUFF_SIZE_WR_VCP_SET - 1); idx++) 1590 + wr_data[MCCS_OP_BUFF_SIZE_WR_VCP_SET - 1] ^= wr_data[idx]; 1591 + 1592 + if (link->aux_mode) 1593 + ddc = &aconnector->dm_dp_aux.aux.ddc; 1594 + else 1595 + ddc = &aconnector->i2c->base; 1596 + 1597 + do { 1598 + msg.addr = MCCS_DEST_ADDR; 1599 + msg.flags = 0; 1600 + msg.len = MCCS_OP_BUFF_SIZE_WR_VCP_SET; 1601 + msg.buf = wr_data; 1602 + 1603 + ret = i2c_transfer(ddc, &msg, 1); 1604 + if (ret == 1) 1605 + break; 1606 + 1607 + retry--; 1608 + msleep(retry_interval_ms); 1609 + } while (retry); 1610 + 1611 + if (!retry) 1612 + return -EIO; 1613 + 1614 + return 0; 1615 + } 1616 + 1617 + void dm_helpers_mccs_vcp_set(struct dc_context *ctx, struct dc_link *link, 1618 + struct dc_sink *sink) 1619 + { 1620 + struct drm_device *dev; 1621 + const uint16_t enable = 0x0101; 1622 + 1623 + if (!ctx) 1624 + return; 1625 + dev = adev_to_drm(ctx->driver_context); 1626 + 1627 + if (!link || !sink) { 1628 + drm_dbg_driver(dev, "%s: link or sink is NULL", __func__); 1629 + return; 1630 + } 1631 + 1632 + if (!sink->mccs_caps.freesync_supported) { 1633 + drm_dbg_driver(dev, "%s: MCCS freesync not supported on this sink", __func__); 1634 + return; 1635 + } 1636 + 1637 + if (mccs_operation_vcp_set(sink->edid_caps.freesync_vcp_code, link, enable)) 1638 + drm_dbg_driver(dev, "%s: Failed to set VCP code %d", __func__, 1639 + sink->edid_caps.freesync_vcp_code); 1640 + } 1641 +
+5 -10
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_ism.c
··· 270 270 struct amdgpu_crtc *acrtc = ism_to_amdgpu_crtc(ism); 271 271 struct amdgpu_device *adev = drm_to_adev(acrtc->base.dev); 272 272 struct amdgpu_display_manager *dm = &adev->dm; 273 - int r; 274 273 275 274 trace_amdgpu_dm_ism_commit(dm->active_vblank_irq_count, 276 275 vblank_enabled, ··· 322 323 */ 323 324 if (!vblank_enabled && dm->active_vblank_irq_count == 0) { 324 325 dc_post_update_surfaces_to_stream(dm->dc); 325 - 326 - r = amdgpu_dpm_pause_power_profile(adev, true); 327 - if (r) 328 - dev_warn(adev->dev, "failed to set default power profile mode\n"); 329 - 330 326 dc_allow_idle_optimizations(dm->dc, true); 331 - 332 - r = amdgpu_dpm_pause_power_profile(adev, false); 333 - if (r) 334 - dev_warn(adev->dev, "failed to restore the power profile mode\n"); 335 327 } 336 328 } 337 329 ··· 462 472 /* ISM transitions must be called with mutex acquired */ 463 473 ASSERT(mutex_is_locked(&dm->dc_lock)); 464 474 475 + /* ISM should not run after dc is destroyed */ 476 + ASSERT(dm->dc); 477 + 465 478 if (!acrtc_state) { 466 479 trace_amdgpu_dm_ism_event(acrtc->crtc_id, "NO_STATE", 467 480 "NO_STATE", "N/A"); ··· 537 544 struct drm_crtc *crtc; 538 545 struct amdgpu_crtc *acrtc; 539 546 struct amdgpu_dm_ism *ism; 547 + 548 + ASSERT(mutex_is_locked(&dm->dc_lock)); 540 549 541 550 drm_for_each_crtc(crtc, dm->ddev) { 542 551 acrtc = to_amdgpu_crtc(crtc);
+1 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 474 474 475 475 if (aconnector->dc_sink) { 476 476 amdgpu_dm_update_freesync_caps( 477 - connector, aconnector->drm_edid); 477 + connector, aconnector->drm_edid, true); 478 478 479 479 #if defined(CONFIG_DRM_AMD_DC_FP) 480 480 if (!validate_dsc_caps_on_connector(aconnector))
-78
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c
··· 611 611 612 612 }; 613 613 614 - static struct wm_table ddr5_wm_table = { 615 - .entries = { 616 - { 617 - .wm_inst = WM_A, 618 - .wm_type = WM_TYPE_PSTATE_CHG, 619 - .pstate_latency_us = 11.72, 620 - .sr_exit_time_us = 28.0, 621 - .sr_enter_plus_exit_time_us = 30.0, 622 - .valid = true, 623 - }, 624 - { 625 - .wm_inst = WM_B, 626 - .wm_type = WM_TYPE_PSTATE_CHG, 627 - .pstate_latency_us = 11.72, 628 - .sr_exit_time_us = 28.0, 629 - .sr_enter_plus_exit_time_us = 30.0, 630 - .valid = true, 631 - }, 632 - { 633 - .wm_inst = WM_C, 634 - .wm_type = WM_TYPE_PSTATE_CHG, 635 - .pstate_latency_us = 11.72, 636 - .sr_exit_time_us = 28.0, 637 - .sr_enter_plus_exit_time_us = 30.0, 638 - .valid = true, 639 - }, 640 - { 641 - .wm_inst = WM_D, 642 - .wm_type = WM_TYPE_PSTATE_CHG, 643 - .pstate_latency_us = 11.72, 644 - .sr_exit_time_us = 28.0, 645 - .sr_enter_plus_exit_time_us = 30.0, 646 - .valid = true, 647 - }, 648 - } 649 - }; 650 - 651 - static struct wm_table lpddr5_wm_table = { 652 - .entries = { 653 - { 654 - .wm_inst = WM_A, 655 - .wm_type = WM_TYPE_PSTATE_CHG, 656 - .pstate_latency_us = 11.65333, 657 - .sr_exit_time_us = 28.0, 658 - .sr_enter_plus_exit_time_us = 30.0, 659 - .valid = true, 660 - }, 661 - { 662 - .wm_inst = WM_B, 663 - .wm_type = WM_TYPE_PSTATE_CHG, 664 - .pstate_latency_us = 11.65333, 665 - .sr_exit_time_us = 28.0, 666 - .sr_enter_plus_exit_time_us = 30.0, 667 - .valid = true, 668 - }, 669 - { 670 - .wm_inst = WM_C, 671 - .wm_type = WM_TYPE_PSTATE_CHG, 672 - .pstate_latency_us = 11.65333, 673 - .sr_exit_time_us = 28.0, 674 - .sr_enter_plus_exit_time_us = 30.0, 675 - .valid = true, 676 - }, 677 - { 678 - .wm_inst = WM_D, 679 - .wm_type = WM_TYPE_PSTATE_CHG, 680 - .pstate_latency_us = 11.65333, 681 - .sr_exit_time_us = 28.0, 682 - .sr_enter_plus_exit_time_us = 30.0, 683 - .valid = true, 684 - }, 685 - } 686 - }; 687 - 688 614 struct dcn42_ss_info_table dcn42_ss_info_table = { 689 615 .ss_divider = 1000, 690 616 .ss_percentage = {0, 0, 375, 375, 375} ··· 1067 1141 if (ctx->dc_bios->integrated_info) { 1068 1142 clk_mgr->base.base.dentist_vco_freq_khz = ctx->dc_bios->integrated_info->dentist_vco_freq; 1069 1143 1070 - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) 1071 - dcn42_bw_params.wm_table = lpddr5_wm_table; 1072 - else 1073 - dcn42_bw_params.wm_table = ddr5_wm_table; 1074 1144 dcn42_bw_params.vram_type = ctx->dc_bios->integrated_info->memory_type; 1075 1145 dcn42_bw_params.dram_channel_width_bytes = ctx->dc_bios->integrated_info->memory_type == 0x22 ? 8 : 4; 1076 1146 dcn42_bw_params.num_channels = ctx->dc_bios->integrated_info->ma_channel_number ? ctx->dc_bios->integrated_info->ma_channel_number : 1;
-2
drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
··· 23 23 * 24 24 */ 25 25 26 - #include <linux/array_size.h> 27 - 28 26 #include "dm_services.h" 29 27 #include "core_types.h" 30 28 #include "timing_generator.h"
+6
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
··· 5069 5069 } 5070 5070 } 5071 5071 5072 + if (stream->ctx->dce_version < DCE_VERSION_8_0 && 5073 + stream->timing.display_color_depth >= COLOR_DEPTH_101010) { 5074 + /* DCE 6.x doesn't support 10-bit truncation or dither options. */ 5075 + option = DITHER_OPTION_DISABLE; 5076 + } 5077 + 5072 5078 if (option == DITHER_OPTION_DISABLE) 5073 5079 return; 5074 5080
+8 -5
drivers/gpu/drm/amd/display/dc/dc.h
··· 63 63 struct dcn_optc_reg_state; 64 64 struct dcn_dccg_reg_state; 65 65 66 - #define DC_VER "3.2.376" 66 + #define DC_VER "3.2.378" 67 67 68 68 /** 69 69 * MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC ··· 562 562 bool frame_update_cmd_version2; 563 563 struct spl_sharpness_range dcn_sharpness_range; 564 564 struct spl_sharpness_range dcn_override_sharpness_range; 565 + bool no_native422_support; 565 566 }; 566 567 567 568 enum visual_confirm { ··· 987 986 * causing an issue or not. 988 987 */ 989 988 struct dc_debug_options { 990 - bool native422_support; 991 989 bool disable_dsc; 992 990 enum visual_confirm visual_confirm; 993 991 int visual_confirm_rect_height; ··· 1061 1061 bool hdmi20_disable; 1062 1062 bool skip_detection_link_training; 1063 1063 uint32_t edid_read_retry_times; 1064 - unsigned int force_odm_combine; //bit vector based on otg inst 1065 - unsigned int seamless_boot_odm_combine; 1066 - unsigned int force_odm_combine_4to1; //bit vector based on otg inst 1064 + 1065 + uint8_t force_odm_combine; //bit vector based on otg inst 1066 + uint8_t seamless_boot_odm_combine; 1067 + uint8_t force_odm_combine_4to1; //bit vector based on otg inst 1068 + 1067 1069 int minimum_z8_residency_time; 1068 1070 int minimum_z10_residency_time; 1069 1071 bool disable_z9_mpc; ··· 2727 2725 struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX]; 2728 2726 bool converter_disable_audio; 2729 2727 2728 + struct mccs_caps mccs_caps; 2730 2729 struct scdc_caps scdc_caps; 2731 2730 struct dc_sink_dsc_caps dsc_caps; 2732 2731 struct dc_sink_fec_caps fec_caps;
+1
drivers/gpu/drm/amd/display/dc/dc_dsc.h
··· 52 52 uint32_t max_target_bpp; 53 53 uint32_t min_target_bpp; 54 54 bool enable_dsc_when_not_needed; 55 + bool ycbcr422_simple; 55 56 }; 56 57 57 58 struct dc_dsc_config_options {
+2 -2
drivers/gpu/drm/amd/display/dc/dc_stream.h
··· 162 162 #define SUBVP_DRR_MARGIN_US 100 // 100us for DRR margin (SubVP + DRR) 163 163 164 164 struct dc_stream_debug_options { 165 - char force_odm_combine_segments; 165 + uint8_t force_odm_combine_segments; 166 166 /* 167 167 * When force_odm_combine_segments is non zero, allow dc to 168 168 * temporarily transition to ODM bypass when minimal transition state 169 169 * is required to prevent visual glitches showing on the screen 170 170 */ 171 - char allow_transition_for_forced_odm; 171 + uint8_t allow_transition_for_forced_odm; 172 172 }; 173 173 174 174 #define LUMINANCE_DATA_TABLE_SIZE 10
+6
drivers/gpu/drm/amd/display/dc/dc_types.h
··· 205 205 uint32_t audio_latency; 206 206 uint32_t video_latency; 207 207 208 + unsigned char freesync_vcp_code; 209 + 208 210 uint8_t qs_bit; 209 211 uint8_t qy_bit; 210 212 ··· 1313 1311 struct rio { 1314 1312 bool disable_rio; 1315 1313 } rio; 1314 + }; 1315 + 1316 + struct mccs_caps { 1317 + bool freesync_supported; 1316 1318 }; 1317 1319 1318 1320 #define MAX_SINKS_PER_LINK 4
-2
drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
··· 23 23 * 24 24 */ 25 25 26 - #include <linux/array_size.h> 27 - 28 26 #include "dm_services.h" 29 27 30 28
+10
drivers/gpu/drm/amd/display/dc/dm_helpers.h
··· 181 181 struct dc_link *link, 182 182 struct dc_sink *sink); 183 183 184 + void dm_helpers_read_mccs_caps( 185 + struct dc_context *ctx, 186 + struct dc_link *link, 187 + struct dc_sink *sink); 188 + 189 + void dm_helpers_mccs_vcp_set( 190 + struct dc_context *ctx, 191 + struct dc_link *link, 192 + struct dc_sink *sink); 193 + 184 194 bool dm_helpers_dp_handle_test_pattern_request( 185 195 struct dc_context *ctx, 186 196 const struct dc_link *link,
+1 -1
drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
··· 2399 2399 return low_pstate_lvl; 2400 2400 } 2401 2401 2402 - void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) 2402 + void dcn21_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params) 2403 2403 { 2404 2404 struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits; 2405 2405 struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
+1 -1
drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
··· 78 78 enum dc_validate_mode validate_mode); 79 79 bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, enum 80 80 dc_validate_mode, display_e2e_pipe_params_st *pipes); 81 - void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); 81 + void dcn21_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params); 82 82 83 83 void dcn21_clk_mgr_set_bw_params_wm_table(struct clk_bw_params *bw_params); 84 84
+3 -3
drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
··· 587 587 context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - total_det; 588 588 } 589 589 590 - void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) 590 + void dcn31_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params) 591 591 { 592 592 struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits; 593 593 struct clk_limit_table *clk_table = &bw_params->clk_table; ··· 665 665 dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31); 666 666 } 667 667 668 - void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) 668 + void dcn315_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params) 669 669 { 670 670 struct clk_limit_table *clk_table = &bw_params->clk_table; 671 671 int i, max_dispclk_mhz = 0, max_dppclk_mhz = 0; ··· 726 726 dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN315); 727 727 } 728 728 729 - void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) 729 + void dcn316_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params) 730 730 { 731 731 struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits; 732 732 struct clk_limit_table *clk_table = &bw_params->clk_table;
+3 -3
drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
··· 44 44 int pipe_cnt, 45 45 int vlevel); 46 46 47 - void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); 48 - void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); 49 - void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); 47 + void dcn31_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params); 48 + void dcn315_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params); 49 + void dcn316_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params); 50 50 int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc); 51 51 int dcn_get_approx_det_segs_required_for_pstate( 52 52 struct _vcs_dpi_soc_bounding_box_st *soc,
-37
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
··· 1610 1610 return false; 1611 1611 } 1612 1612 1613 - static void dcn20_adjust_freesync_v_startup(const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start) 1614 - { 1615 - struct dc_crtc_timing patched_crtc_timing; 1616 - uint32_t asic_blank_end = 0; 1617 - uint32_t asic_blank_start = 0; 1618 - uint32_t newVstartup = 0; 1619 - 1620 - patched_crtc_timing = *dc_crtc_timing; 1621 - 1622 - if (patched_crtc_timing.flags.INTERLACE == 1) { 1623 - if (patched_crtc_timing.v_front_porch < 2) 1624 - patched_crtc_timing.v_front_porch = 2; 1625 - } else { 1626 - if (patched_crtc_timing.v_front_porch < 1) 1627 - patched_crtc_timing.v_front_porch = 1; 1628 - } 1629 - 1630 - /* blank_start = frame end - front porch */ 1631 - asic_blank_start = patched_crtc_timing.v_total - 1632 - patched_crtc_timing.v_front_porch; 1633 - 1634 - /* blank_end = blank_start - active */ 1635 - asic_blank_end = asic_blank_start - 1636 - patched_crtc_timing.v_border_bottom - 1637 - patched_crtc_timing.v_addressable - 1638 - patched_crtc_timing.v_border_top; 1639 - 1640 - newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start); 1641 - 1642 - *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start); 1643 - } 1644 - 1645 1613 static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, 1646 1614 display_e2e_pipe_params_st *pipes, 1647 1615 int pipe_cnt, int vlevel) ··· 1723 1755 context->bw_ctx.bw.dcn.mall_subvp_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes; 1724 1756 } 1725 1757 } 1726 - 1727 - if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid) 1728 - dcn20_adjust_freesync_v_startup( 1729 - &context->res_ctx.pipe_ctx[i].stream->timing, 1730 - &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start); 1731 1758 1732 1759 pipe_idx++; 1733 1760 }
+1
drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
··· 100 100 DML21 += src/dml2_pmo/dml2_pmo_dcn3.o 101 101 DML21 += src/dml2_pmo/dml2_pmo_factory.o 102 102 DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o 103 + DML21 += src/dml2_pmo/dml2_pmo_dcn42.o 103 104 DML21 += src/dml2_standalone_libraries/lib_float_math.o 104 105 DML21 += dml21_translation_helper.o 105 106 DML21 += dml21_wrapper.o
+17
drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c
··· 1812 1812 dml_uint_t WritebackLineBufferSize, 1813 1813 dml_float_t DISPCLKDPPCLKVCOSpeed) 1814 1814 { 1815 + (void)WritebackPixelFormat; 1816 + (void)WritebackVRatio; 1815 1817 dml_float_t DISPCLK_H, DISPCLK_V, DISPCLK_HB; 1816 1818 1817 1819 DISPCLK_H = PixelClock * dml_ceil(WritebackHTaps / 8.0, 1) / WritebackHRatio; ··· 1832 1830 dml_uint_t WritebackSourceHeight, 1833 1831 dml_uint_t HTotal) 1834 1832 { 1833 + (void)WritebackPixelFormat; 1834 + (void)WritebackHRatio; 1835 1835 dml_float_t CalculateWriteBackDelay; 1836 1836 dml_float_t Line_length; 1837 1837 dml_float_t Output_lines_last_notclamped; ··· 1981 1977 dml_float_t *final_flip_bw, 1982 1978 dml_bool_t *ImmediateFlipSupportedForPipe) 1983 1979 { 1980 + (void)HostVMMinPageSize; 1984 1981 dml_float_t min_row_time = 0.0; 1985 1982 dml_uint_t HostVMDynamicLevelsTrips = 0; 1986 1983 dml_float_t TimeForFetchingMetaPTEImmediateFlip = 0; ··· 2123 2118 dml_uint_t *IndependentBlockLuma, 2124 2119 dml_uint_t *IndependentBlockChroma) 2125 2120 { 2121 + (void)SurfaceWidthChroma; 2122 + (void)SurfaceHeightChroma; 2123 + (void)TilingFormat; 2124 + (void)BytePerPixelDETY; 2125 + (void)BytePerPixelDETC; 2126 2126 dml_uint_t DETBufferSizeForDCC = nomDETInKByte * 1024; 2127 2127 2128 2128 dml_uint_t yuv420; ··· 2499 2489 dml_uint_t *DPDE0BytesFrame, 2500 2490 dml_uint_t *MetaPTEBytesFrame) 2501 2491 { 2492 + (void)SourcePixelFormat; 2502 2493 dml_uint_t MPDEBytesFrame; 2503 2494 dml_uint_t DCCMetaSurfaceBytes; 2504 2495 dml_uint_t ExtraDPDEBytesFrame; ··· 3673 3662 dml_float_t TimePerVMRequestVBlank[], 3674 3663 dml_float_t TimePerVMRequestFlip[]) 3675 3664 { 3665 + (void)dpte_row_width_luma_ub; 3666 + (void)dpte_row_width_chroma_ub; 3676 3667 dml_uint_t num_group_per_lower_vm_stage; 3677 3668 dml_uint_t num_req_per_lower_vm_stage; 3678 3669 ··· 3775 3762 static void CalculateStutterEfficiency(struct display_mode_lib_scratch_st *scratch, 3776 3763 struct CalculateStutterEfficiency_params_st *p) 3777 3764 { 3765 + (void)scratch; 3778 3766 dml_float_t DETBufferingTimeY = 0; 3779 3767 dml_float_t SwathWidthYCriticalSurface = 0; 3780 3768 dml_float_t SwathHeightYCriticalSurface = 0; ··· 4099 4085 static void CalculateSwathAndDETConfiguration(struct display_mode_lib_scratch_st *scratch, 4100 4086 struct CalculateSwathAndDETConfiguration_params_st *p) 4101 4087 { 4088 + (void)scratch; 4102 4089 dml_uint_t MaximumSwathHeightY[__DML_NUM_PLANES__]; 4103 4090 dml_uint_t MaximumSwathHeightC[__DML_NUM_PLANES__]; 4104 4091 dml_uint_t RoundedUpMaxSwathSizeBytesY[__DML_NUM_PLANES__]; ··· 4346 4331 dml_uint_t swath_width_luma_ub[], // per-pipe 4347 4332 dml_uint_t swath_width_chroma_ub[]) // per-pipe 4348 4333 { 4334 + (void)BytePerPixY; 4349 4335 enum dml_odm_mode MainSurfaceODMMode; 4350 4336 dml_uint_t surface_width_ub_l; 4351 4337 dml_uint_t surface_height_ub_l; ··· 5045 5029 dml_uint_t *nomDETInKByte, 5046 5030 dml_uint_t *MinCompressedBufferSizeInKByte) 5047 5031 { 5032 + (void)ROBBufferSizeInKByte; 5048 5033 *MaxTotalDETInKByte = ConfigReturnBufferSizeInKByte - ConfigReturnBufferSegmentSizeInKByte; 5049 5034 *nomDETInKByte = (dml_uint_t)(dml_floor((dml_float_t) *MaxTotalDETInKByte / (dml_float_t) MaxNumDPP, ConfigReturnBufferSegmentSizeInKByte)); 5050 5035 *MinCompressedBufferSizeInKByte = ConfigReturnBufferSizeInKByte - *MaxTotalDETInKByte;
+14
drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c
··· 178 178 179 179 dml_float_t dml_round(dml_float_t val, dml_bool_t bankers_rounding) 180 180 { 181 + (void)bankers_rounding; 181 182 // if (bankers_rounding) 182 183 // return (dml_float_t) lrint(val); 183 184 // else { ··· 218 217 219 218 void dml_print_data_rq_regs_st(const dml_display_plane_rq_regs_st *rq_regs) 220 219 { 220 + (void)rq_regs; 221 221 dml_print("DML: ===================================== \n"); 222 222 dml_print("DML: DISPLAY_PLANE_RQ_REGS_ST\n"); 223 223 dml_print("DML: chunk_size = 0x%x\n", rq_regs->chunk_size); ··· 250 248 251 249 void dml_print_dlg_regs_st(const dml_display_dlg_regs_st *dlg_regs) 252 250 { 251 + (void)dlg_regs; 253 252 dml_print("DML: ===================================== \n"); 254 253 dml_print("DML: DISPLAY_DLG_REGS_ST \n"); 255 254 dml_print("DML: refcyc_h_blank_end = 0x%x\n", dlg_regs->refcyc_h_blank_end); ··· 302 299 303 300 void dml_print_ttu_regs_st(const dml_display_ttu_regs_st *ttu_regs) 304 301 { 302 + (void)ttu_regs; 305 303 dml_print("DML: ===================================== \n"); 306 304 dml_print("DML: DISPLAY_TTU_REGS_ST \n"); 307 305 dml_print("DML: qos_level_low_wm = 0x%x\n", ttu_regs->qos_level_low_wm); ··· 330 326 331 327 void dml_print_dml_policy(const struct dml_mode_eval_policy_st *policy) 332 328 { 329 + (void)policy; 333 330 dml_print("DML: ===================================== \n"); 334 331 dml_print("DML: DML_MODE_EVAL_POLICY_ST\n"); 335 332 dml_print("DML: Policy: UseUnboundedRequesting = 0x%x\n", policy->UseUnboundedRequesting); ··· 358 353 359 354 void dml_print_mode_support(struct display_mode_lib_st *mode_lib, dml_uint_t j) 360 355 { 356 + (void)j; 357 + (void)mode_lib; 361 358 dml_print("DML: MODE SUPPORT: ===============================================\n"); 362 359 dml_print("DML: MODE SUPPORT: Voltage State %d\n", j); 363 360 dml_print("DML: MODE SUPPORT: Mode Supported : %s\n", mode_lib->ms.support.ModeSupport[j] == true ? "Supported" : "NOT Supported"); ··· 533 526 534 527 void dml_print_dml_display_cfg_timing(const struct dml_timing_cfg_st *timing, dml_uint_t num_plane) 535 528 { 529 + (void)timing; 536 530 for (dml_uint_t i = 0; i < num_plane; i++) { 537 531 dml_print("DML: timing_cfg: plane=%d, HTotal = %d\n", i, timing->HTotal[i]); 538 532 dml_print("DML: timing_cfg: plane=%d, VTotal = %d\n", i, timing->VTotal[i]); ··· 550 542 551 543 void dml_print_dml_display_cfg_plane(const struct dml_plane_cfg_st *plane, dml_uint_t num_plane) 552 544 { 545 + (void)plane; 553 546 dml_print("DML: plane_cfg: num_plane = %d\n", num_plane); 554 547 dml_print("DML: plane_cfg: GPUVMEnable = %d\n", plane->GPUVMEnable); 555 548 dml_print("DML: plane_cfg: HostVMEnable = %d\n", plane->HostVMEnable); ··· 599 590 600 591 void dml_print_dml_display_cfg_surface(const struct dml_surface_cfg_st *surface, dml_uint_t num_plane) 601 592 { 593 + (void)surface; 602 594 for (dml_uint_t i = 0; i < num_plane; i++) { 603 595 dml_print("DML: surface_cfg: plane=%d, PitchY = %d\n", i, surface->PitchY[i]); 604 596 dml_print("DML: surface_cfg: plane=%d, SurfaceWidthY = %d\n", i, surface->SurfaceWidthY[i]); ··· 619 609 620 610 void dml_print_dml_display_cfg_hw_resource(const struct dml_hw_resource_st *hw, dml_uint_t num_plane) 621 611 { 612 + (void)hw; 622 613 for (dml_uint_t i = 0; i < num_plane; i++) { 623 614 dml_print("DML: hw_resource: plane=%d, ODMMode = %d\n", i, hw->ODMMode[i]); 624 615 dml_print("DML: hw_resource: plane=%d, DPPPerSurface = %d\n", i, hw->DPPPerSurface[i]); ··· 631 620 632 621 __DML_DLL_EXPORT__ void dml_print_soc_state_bounding_box(const struct soc_state_bounding_box_st *state) 633 622 { 623 + (void)state; 634 624 dml_print("DML: state_bbox: socclk_mhz = %f\n", state->socclk_mhz); 635 625 dml_print("DML: state_bbox: dscclk_mhz = %f\n", state->dscclk_mhz); 636 626 dml_print("DML: state_bbox: phyclk_mhz = %f\n", state->phyclk_mhz); ··· 661 649 662 650 __DML_DLL_EXPORT__ void dml_print_soc_bounding_box(const struct soc_bounding_box_st *soc) 663 651 { 652 + (void)soc; 664 653 dml_print("DML: soc_bbox: dprefclk_mhz = %f\n", soc->dprefclk_mhz); 665 654 dml_print("DML: soc_bbox: xtalclk_mhz = %f\n", soc->xtalclk_mhz); 666 655 dml_print("DML: soc_bbox: pcierefclk_mhz = %f\n", soc->pcierefclk_mhz); ··· 699 686 700 687 __DML_DLL_EXPORT__ void dml_print_clk_cfg(const struct dml_clk_cfg_st *clk_cfg) 701 688 { 689 + (void)clk_cfg; 702 690 dml_print("DML: clk_cfg: 0-use_required, 1-use pipe.clks_cfg, 2-use state bbox\n"); 703 691 dml_print("DML: clk_cfg: dcfclk_option = %d\n", clk_cfg->dcfclk_option); 704 692 dml_print("DML: clk_cfg: dispclk_option = %d\n", clk_cfg->dispclk_option);
+18 -6
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
··· 389 389 surface->tiling = dml2_sw_64kb_2d; 390 390 } 391 391 392 - static void populate_dml21_dummy_plane_cfg(struct dml2_plane_parameters *plane, const struct dc_stream_state *stream) 392 + static void populate_dml21_dummy_plane_cfg(struct dml2_plane_parameters *plane, 393 + const struct dc_stream_state *stream, 394 + const struct dml2_soc_bb *soc_bb) 393 395 { 394 396 unsigned int width, height; 395 397 ··· 435 433 plane->pixel_format = dml2_444_32; 436 434 437 435 plane->dynamic_meta_data.enable = false; 438 - plane->overrides.gpuvm_min_page_size_kbytes = 256; 436 + plane->overrides.gpuvm_min_page_size_kbytes = soc_bb->gpuvm_min_page_size_kbytes; 437 + plane->overrides.hostvm_min_page_size_kbytes = soc_bb->hostvm_min_page_size_kbytes; 439 438 } 440 439 441 440 static void populate_dml21_surface_config_from_plane_state( ··· 444 441 struct dml2_surface_cfg *surface, 445 442 const struct dc_plane_state *plane_state) 446 443 { 444 + (void)in_dc; 447 445 surface->plane0.pitch = plane_state->plane_size.surface_pitch; 448 446 surface->plane1.pitch = plane_state->plane_size.chroma_pitch; 449 447 surface->plane0.height = plane_state->plane_size.surface_size.height; ··· 507 503 508 504 static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dml_ctx, 509 505 struct dml2_plane_parameters *plane, const struct dc_plane_state *plane_state, 510 - const struct dc_state *context, unsigned int stream_index) 506 + const struct dc_state *context, unsigned int stream_index, const struct dml2_soc_bb *soc_bb) 511 507 { 512 508 const struct scaler_data *scaler_data = get_scaler_data_for_plane(dml_ctx, plane_state, context); 513 509 struct dc_stream_state *stream = context->streams[stream_index]; ··· 651 647 plane->composition.rotation_angle = (enum dml2_rotation_angle) plane_state->rotation; 652 648 plane->stream_index = stream_index; 653 649 654 - plane->overrides.gpuvm_min_page_size_kbytes = 256; 650 + plane->overrides.gpuvm_min_page_size_kbytes = soc_bb->gpuvm_min_page_size_kbytes; 651 + plane->overrides.hostvm_min_page_size_kbytes = soc_bb->hostvm_min_page_size_kbytes; 655 652 656 653 plane->immediate_flip = plane_state->flip_immediate; 657 654 ··· 790 785 if (context->stream_status[stream_index].plane_count == 0) { 791 786 disp_cfg_plane_location = dml_dispcfg->num_planes++; 792 787 populate_dml21_dummy_surface_cfg(&dml_dispcfg->plane_descriptors[disp_cfg_plane_location].surface, context->streams[stream_index]); 793 - populate_dml21_dummy_plane_cfg(&dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->streams[stream_index]); 788 + populate_dml21_dummy_plane_cfg( 789 + &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], 790 + context->streams[stream_index], &dml_ctx->v21.dml_init.soc_bb); 794 791 dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location; 795 792 } else { 796 793 for (plane_index = 0; plane_index < context->stream_status[stream_index].plane_count; plane_index++) { ··· 804 797 ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__); 805 798 806 799 populate_dml21_surface_config_from_plane_state(in_dc, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location].surface, context->stream_status[stream_index].plane_states[plane_index]); 807 - populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index); 800 + populate_dml21_plane_config_from_plane_state( 801 + dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], 802 + context->stream_status[stream_index].plane_states[plane_index], 803 + context, stream_index, &dml_ctx->v21.dml_init.soc_bb); 808 804 dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location; 809 805 810 806 if (dml21_wrapper_get_plane_id(context, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location])) ··· 883 873 884 874 void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_set *watermarks, struct dml2_context *in_ctx) 885 875 { 876 + (void)in_dc; 886 877 const struct dml2_display_cfg_programming *programming = in_ctx->v21.mode_programming.programming; 887 878 888 879 unsigned int wm_index; ··· 918 907 struct dml2_per_plane_programming *pln_prog, 919 908 struct dml2_pipe_configuration_descriptor *mcache_pipe_config) 920 909 { 910 + (void)context; 921 911 mcache_pipe_config->plane0.viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x; 922 912 mcache_pipe_config->plane0.viewport_width = pipe_ctx->plane_res.scl_data.viewport.width; 923 913
+2
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c
··· 88 88 struct pipe_ctx *dc_phantom_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__], 89 89 int dml_plane_idx) 90 90 { 91 + (void)in_dc; 91 92 unsigned int dml_stream_index; 92 93 unsigned int main_stream_id; 93 94 unsigned int dc_plane_index; ··· 283 282 struct dc_plane_state *main_plane, 284 283 struct dml2_per_plane_programming *plane_programming) 285 284 { 285 + (void)plane_programming; 286 286 struct dc_plane_state *phantom_plane; 287 287 288 288 phantom_plane = dml_ctx->config.svp_pstate.callbacks.create_phantom_plane(dc, context, main_plane);
+2 -2
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c
··· 58 58 59 59 void dml21_destroy(struct dml2_context *dml2) 60 60 { 61 - vfree(dml2->v21.dml_init.dml2_instance); 62 - vfree(dml2->v21.mode_programming.programming); 61 + DC_RUN_WITH_PREEMPTION_ENABLED(vfree(dml2->v21.dml_init.dml2_instance)); 62 + DC_RUN_WITH_PREEMPTION_ENABLED(vfree(dml2->v21.mode_programming.programming)); 63 63 } 64 64 65 65 void dml21_copy(struct dml2_context *dst_dml_ctx,
+2
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper_fpu.c
··· 51 51 static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state, 52 52 struct dml2_context *in_ctx, unsigned int pipe_cnt) 53 53 { 54 + (void)out_new_hw_state; 55 + (void)pipe_cnt; 54 56 unsigned int dml_prog_idx = 0, dc_pipe_index = 0, num_dpps_required = 0; 55 57 struct dml2_per_plane_programming *pln_prog = NULL; 56 58 struct dml2_per_stream_programming *stream_prog = NULL;
+19 -6
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn42_soc_bb.h
··· 68 68 .qos_type = dml2_qos_param_type_dcn3, 69 69 }; 70 70 71 + /* Default SOC bounding box for DCN42 based on LPDDR5/LPCAMM2 latencies*/ 71 72 static const struct dml2_soc_bb dml2_socbb_dcn42 = { 72 73 .clk_table = { 73 74 .wck_ratio = { ··· 186 185 .qos_type = dml2_qos_param_type_dcn3, 187 186 }, 188 187 188 + /* DCN42 params for LPDDR5/LPCAMM2 */ 189 189 .power_management_parameters = { 190 - .dram_clk_change_blackout_us = 29, 190 + .dram_clk_change_blackout_us = 36, 191 191 .fclk_change_blackout_us = 0, 192 192 .g7_ppt_blackout_us = 0, 193 - .stutter_enter_plus_exit_latency_us = 11, 194 - .stutter_exit_latency_us = 9, 193 + .stutter_enter_plus_exit_latency_us = 14, 194 + .stutter_exit_latency_us = 12, 195 195 .z8_stutter_enter_plus_exit_latency_us = 300, 196 196 .z8_stutter_exit_latency_us = 200, 197 197 }, ··· 205 203 .xtalclk_mhz = 24, 206 204 .pcie_refclk_mhz = 100, 207 205 .dchub_refclk_mhz = 50, 208 - .mall_allocated_for_dcn_mbytes = 64, 206 + .mall_allocated_for_dcn_mbytes = 0, 209 207 .max_outstanding_reqs = 256, 210 208 .fabric_datapath_to_dcn_data_return_bytes = 32, 211 209 .return_bus_width_bytes = 64, 212 210 .hostvm_min_page_size_kbytes = 4, 213 - .gpuvm_min_page_size_kbytes = 256, 211 + .gpuvm_min_page_size_kbytes = 4, 214 212 .gpuvm_max_page_table_levels = 1, 215 213 .hostvm_max_non_cached_page_table_levels = 2, 216 214 .phy_downspread_percent = 0.38, ··· 222 220 .mcache_size_bytes = 2048, 223 221 .mcache_line_size_bytes = 32, 224 222 .max_fclk_for_uclk_dpm_khz = 2200 * 1000, 223 + }; 224 + 225 + /* DCN42 params for DDR5 */ 226 + struct dml2_soc_power_management_parameters dcn42_ddr5_power_management_parameters = { 227 + .dram_clk_change_blackout_us = 36, 228 + .fclk_change_blackout_us = 0, 229 + .g7_ppt_blackout_us = 0, 230 + .stutter_enter_plus_exit_latency_us = 23.5, 231 + .stutter_exit_latency_us = 21.5, 232 + .z8_stutter_enter_plus_exit_latency_us = 300, 233 + .z8_stutter_exit_latency_us = 200, 225 234 }; 226 235 227 236 static const struct dml2_ip_capabilities dml2_dcn42_max_ip_caps = { ··· 247 234 .config_return_buffer_segment_size_in_kbytes = 64, 248 235 .meta_fifo_size_in_kentries = 32, 249 236 .compressed_buffer_segment_size_in_kbytes = 64, 250 - .cursor_buffer_size = 24, 237 + .cursor_buffer_size = 42, 251 238 .max_flip_time_us = 110, 252 239 .max_flip_time_lines = 50, 253 240 .hostvm_mode = 0,
-14
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h
··· 26 26 dml2_gfx11_sw_64kb_r_x, 27 27 dml2_gfx11_sw_256kb_d_x, 28 28 dml2_gfx11_sw_256kb_r_x, 29 - 30 - dml2_sw_linear_256b, // GFX10 SW_LINEAR only accepts 256 byte aligned pitch 31 - dml2_gfx10_sw_64kb_r_x, 32 - dml2_gfx102_sw_64kb_s, 33 - dml2_gfx102_sw_64kb_s_t, 34 - dml2_gfx102_sw_64kb_s_x, 35 - dml2_gfx102_sw_64kb_r_x, 36 - 37 - dml2_linear_64elements, // GFX7 LINEAR_ALIGNED accepts pitch alignment of the maximum of 64 elements or 256 bytes 38 - dml2_gfx7_1d_thin, 39 - dml2_gfx7_2d_thin_gen_zero, 40 - dml2_gfx7_2d_thin_gen_one, 41 - dml2_gfx7_2d_thin_arlene, 42 - dml2_gfx7_2d_thin_anubis 43 29 }; 44 30 45 31 enum dml2_source_format_class {
+3 -2
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c
··· 135 135 .cursor_64bpp_support = true, 136 136 .dynamic_metadata_vm_enabled = false, 137 137 138 - .max_num_hdmi_frl_outputs = 0, 138 + .max_num_hdmi_frl_outputs = 1, 139 139 .max_num_dp2p0_outputs = 2, 140 140 .max_num_dp2p0_streams = 4, 141 141 .imall_supported = 1, ··· 155 155 .min_meta_chunk_size_bytes = 256, 156 156 157 157 .dchub_arb_to_ret_delay = 102, 158 - .hostvm_mode = 1, 158 + .hostvm_mode = 0, 159 159 }; 160 160 161 161 static void patch_ip_caps_with_explicit_ip_params(struct dml2_ip_capabilities *ip_caps, const struct dml2_core_ip_params *ip_params) ··· 281 281 static void create_phantom_plane_from_main_plane(struct dml2_plane_parameters *phantom, const struct dml2_plane_parameters *main, 282 282 const struct dml2_stream_parameters *phantom_stream, int phantom_stream_index, const struct dml2_stream_parameters *main_stream) 283 283 { 284 + (void)main_stream; 284 285 memcpy(phantom, main, sizeof(struct dml2_plane_parameters)); 285 286 286 287 phantom->stream_index = phantom_stream_index;
+29 -8
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
··· 840 840 unsigned int swath_width_luma_ub[], // per-pipe 841 841 unsigned int swath_width_chroma_ub[]) // per-pipe 842 842 { 843 + (void)BytePerPixY; 843 844 enum dml2_odm_mode MainSurfaceODMMode; 844 845 double odm_hactive_factor = 1.0; 845 846 unsigned int req_width_horz_y; ··· 1284 1283 // Output 1285 1284 unsigned int *RequiredSlots) 1286 1285 { 1286 + (void)DSCInputBitPerComponent; 1287 + (void)RequiredSlots; 1287 1288 double MaxLinkBPP; 1288 1289 unsigned int MinDSCBPP; 1289 1290 double MaxDSCBPP; ··· 1925 1922 double *dpte_row_bw, 1926 1923 double *meta_row_bw) 1927 1924 { 1925 + (void)use_one_row_for_frame; 1928 1926 if (!DCCEnable || !mrq_present) { 1929 1927 *meta_row_bw = 0; 1930 1928 } else if (dml_is_420(SourcePixelFormat) || SourcePixelFormat == dml2_rgbe_alpha) { ··· 2024 2020 unsigned int *IndependentBlockLuma, 2025 2021 unsigned int *IndependentBlockChroma) 2026 2022 { 2023 + (void)SurfaceWidthChroma; 2024 + (void)SurfaceHeightChroma; 2025 + (void)TilingFormat; 2026 + (void)BytePerPixelDETY; 2027 + (void)BytePerPixelDETC; 2027 2028 unsigned int DETBufferSizeForDCC = nomDETInKByte * 1024; 2028 2029 2029 2030 unsigned int segment_order_horz_contiguous_luma; ··· 2279 2270 struct dml2_core_internal_scratch *scratch, 2280 2271 struct dml2_core_calcs_calculate_mcache_row_bytes_params *p) 2281 2272 { 2273 + (void)scratch; 2282 2274 unsigned int vmpg_bytes = 0; 2283 2275 unsigned int blk_bytes = 0; 2284 2276 float meta_per_mvmpg_per_channel = 0; ··· 3652 3642 unsigned int WritebackSourceHeight, 3653 3643 unsigned int HTotal) 3654 3644 { 3645 + (void)WritebackPixelFormat; 3646 + (void)WritebackHRatio; 3655 3647 double CalculateWriteBackDelay; 3656 3648 double Line_length; 3657 3649 double Output_lines_last_notclamped; ··· 3971 3959 double SurfaceRequiredDISPCLKWithODMCombineThreeToOne, 3972 3960 double SurfaceRequiredDISPCLKWithODMCombineFourToOne) 3973 3961 { 3962 + (void)SurfaceRequiredDISPCLKWithODMCombineFourToOne; 3974 3963 enum dml2_odm_mode MinimumRequiredODMModeForMaxDispClock; 3975 3964 enum dml2_odm_mode MinimumRequiredODMModeForMaxDSCHActive; 3976 3965 enum dml2_odm_mode MinimumRequiredODMModeForMax420HActive; ··· 4473 4460 unsigned int HTotal, 4474 4461 unsigned int WritebackLineBufferSize) 4475 4462 { 4463 + (void)WritebackPixelFormat; 4464 + (void)WritebackVRatio; 4476 4465 double DISPCLK_H, DISPCLK_V, DISPCLK_HB; 4477 4466 4478 4467 DISPCLK_H = PixelClock * math_ceil2((double)WritebackHTaps / 8.0, 1) / WritebackHRatio; ··· 4576 4561 unsigned int SurfaceSizeInMALL[], 4577 4562 bool *ExceededMALLSize) 4578 4563 { 4564 + (void)Read256BytesBlockWidthY; 4565 + (void)Read256BytesBlockWidthC; 4566 + (void)Read256BytesBlockHeightY; 4567 + (void)Read256BytesBlockHeightC; 4579 4568 unsigned int TotalSurfaceSizeInMALLForSS = 0; 4580 4569 unsigned int TotalSurfaceSizeInMALLForSubVP = 0; 4581 4570 unsigned int MALLAllocatedForDCNInBytes = MALLAllocatedForDCN * 1024 * 1024; ··· 4639 4620 struct dml2_core_internal_scratch *scratch, 4640 4621 struct dml2_core_calcs_calculate_tdlut_setting_params *p) 4641 4622 { 4623 + (void)scratch; 4642 4624 // locals 4643 4625 unsigned int tdlut_bpe = 8; 4644 4626 unsigned int tdlut_width; ··· 6523 6503 double *final_flip_bw, 6524 6504 bool *ImmediateFlipSupportedForPipe) 6525 6505 { 6506 + (void)use_one_row_for_frame_flip; 6526 6507 struct dml2_core_shared_CalculateFlipSchedule_locals *l = &s->CalculateFlipSchedule_locals; 6527 6508 6528 6509 l->dual_plane = dml_is_420(SourcePixelFormat) || SourcePixelFormat == dml2_rgbe_alpha; ··· 7402 7381 s->tdlut_bytes_per_group, 7403 7382 s->HostVMInefficiencyFactor, 7404 7383 s->HostVMInefficiencyFactorPrefetch, 7405 - mode_lib->soc.hostvm_min_page_size_kbytes, 7384 + mode_lib->soc.hostvm_min_page_size_kbytes * 1024, 7406 7385 mode_lib->soc.qos_parameters.qos_type, 7407 7386 !(display_cfg->overrides.max_outstanding_when_urgent_expected_disable), 7408 7387 mode_lib->soc.max_outstanding_reqs, ··· 7498 7477 CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format; 7499 7478 CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters; 7500 7479 CalculatePrefetchSchedule_params->VStartup = s->MaximumVStartup[k]; 7501 - CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes; 7480 + CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes * 1024; 7502 7481 CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable; 7503 7482 CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled; 7504 7483 CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = display_cfg->plane_descriptors[k].dynamic_meta_data.lines_before_active_required; 7505 7484 CalculatePrefetchSchedule_params->DynamicMetadataTransmittedBytes = display_cfg->plane_descriptors[k].dynamic_meta_data.transmitted_bytes; 7506 - CalculatePrefetchSchedule_params->UrgentLatency = mode_lib->ms.UrgLatency; 7507 7485 CalculatePrefetchSchedule_params->ExtraLatencyPrefetch = mode_lib->ms.ExtraLatencyPrefetch; 7508 7486 CalculatePrefetchSchedule_params->TCalc = mode_lib->ms.TimeCalc; 7509 7487 CalculatePrefetchSchedule_params->vm_bytes = mode_lib->ms.vm_bytes[k]; ··· 8985 8965 CalculateVMRowAndSwath_params->MALLAllocatedForDCN = mode_lib->soc.mall_allocated_for_dcn_mbytes; 8986 8966 CalculateVMRowAndSwath_params->SwathWidthY = mode_lib->ms.SwathWidthY; 8987 8967 CalculateVMRowAndSwath_params->SwathWidthC = mode_lib->ms.SwathWidthC; 8988 - CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes; 8968 + CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes * 1024; 8989 8969 CalculateVMRowAndSwath_params->DCCMetaBufferSizeBytes = mode_lib->ip.dcc_meta_buffer_size_bytes; 8990 8970 CalculateVMRowAndSwath_params->mrq_present = mode_lib->ip.dcn_mrq_present; 8991 8971 ··· 9988 9968 double TimePerVMRequestVBlank[], 9989 9969 double TimePerVMRequestFlip[]) 9990 9970 { 9971 + (void)dpte_row_width_luma_ub; 9972 + (void)dpte_row_width_chroma_ub; 9991 9973 unsigned int num_group_per_lower_vm_stage = 0; 9992 9974 unsigned int num_req_per_lower_vm_stage = 0; 9993 9975 unsigned int num_group_per_lower_vm_stage_flip; ··· 10777 10755 CalculateVMRowAndSwath_params->MALLAllocatedForDCN = mode_lib->soc.mall_allocated_for_dcn_mbytes; 10778 10756 CalculateVMRowAndSwath_params->SwathWidthY = mode_lib->mp.SwathWidthY; 10779 10757 CalculateVMRowAndSwath_params->SwathWidthC = mode_lib->mp.SwathWidthC; 10780 - CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes; 10758 + CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes * 1024; 10781 10759 CalculateVMRowAndSwath_params->DCCMetaBufferSizeBytes = mode_lib->ip.dcc_meta_buffer_size_bytes; 10782 10760 CalculateVMRowAndSwath_params->mrq_present = mode_lib->ip.dcn_mrq_present; 10783 10761 ··· 10993 10971 s->tdlut_bytes_per_group, 10994 10972 s->HostVMInefficiencyFactor, 10995 10973 s->HostVMInefficiencyFactorPrefetch, 10996 - mode_lib->soc.hostvm_min_page_size_kbytes, 10974 + mode_lib->soc.hostvm_min_page_size_kbytes * 1024, 10997 10975 mode_lib->soc.qos_parameters.qos_type, 10998 10976 !(display_cfg->overrides.max_outstanding_when_urgent_expected_disable), 10999 10977 mode_lib->soc.max_outstanding_reqs, ··· 11286 11264 CalculatePrefetchSchedule_params->OutputFormat = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].output.output_format; 11287 11265 CalculatePrefetchSchedule_params->MaxInterDCNTileRepeaters = mode_lib->ip.max_inter_dcn_tile_repeaters; 11288 11266 CalculatePrefetchSchedule_params->VStartup = s->MaxVStartupLines[k]; 11289 - CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes; 11267 + CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->soc.hostvm_min_page_size_kbytes * 1024; 11290 11268 CalculatePrefetchSchedule_params->DynamicMetadataEnable = display_cfg->plane_descriptors[k].dynamic_meta_data.enable; 11291 11269 CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ip.dynamic_metadata_vm_enabled; 11292 11270 CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = display_cfg->plane_descriptors[k].dynamic_meta_data.lines_before_active_required; 11293 11271 CalculatePrefetchSchedule_params->DynamicMetadataTransmittedBytes = display_cfg->plane_descriptors[k].dynamic_meta_data.transmitted_bytes; 11294 - CalculatePrefetchSchedule_params->UrgentLatency = mode_lib->mp.UrgentLatency; 11295 11272 CalculatePrefetchSchedule_params->ExtraLatencyPrefetch = mode_lib->mp.ExtraLatencyPrefetch; 11296 11273 CalculatePrefetchSchedule_params->TCalc = mode_lib->mp.TCalc; 11297 11274 CalculatePrefetchSchedule_params->vm_bytes = mode_lib->mp.vm_bytes[k];
+22 -18
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h
··· 269 269 bool global_dram_clock_change_supported; 270 270 bool global_fclk_change_supported; 271 271 bool global_temp_read_or_ppt_supported; 272 + bool fclk_pstate_schedule_admissible; 273 + bool temp_read_pstate_schedule_admissible; 274 + bool ppt_pstate_schedule_admissible; 272 275 bool USRRetrainingSupport; 273 276 bool AvgBandwidthSupport; 274 277 bool UrgVactiveBandwidthSupport; ··· 1066 1063 bool dummy_boolean_array[2][DML2_MAX_PLANES]; 1067 1064 double dummy_single[3]; 1068 1065 double dummy_single_array[DML2_MAX_PLANES]; 1066 + double dummy_double_array[3][DML2_MAX_PLANES]; 1067 + enum dml2_pstate_method dummy_pstate_method_array[DML2_MAX_PLANES]; 1069 1068 struct dml2_core_internal_watermarks dummy_watermark; 1070 1069 double dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; 1071 1070 double surface_dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max][DML2_MAX_PLANES]; ··· 1726 1721 double ReturnBW; 1727 1722 bool SynchronizeTimings; 1728 1723 bool SynchronizeDRRDisplaysForUCLKPStateChange; 1729 - unsigned int *dpte_group_bytes; 1724 + const unsigned int *dpte_group_bytes; 1730 1725 struct dml2_core_internal_SOCParametersList mmSOCParameters; 1731 1726 unsigned int WritebackChunkSize; 1732 1727 double SOCCLK; 1733 1728 double DCFClkDeepSleep; 1734 - unsigned int *DETBufferSizeY; 1735 - unsigned int *DETBufferSizeC; 1736 - unsigned int *SwathHeightY; 1737 - unsigned int *SwathHeightC; 1738 - unsigned int *SwathWidthY; 1739 - unsigned int *SwathWidthC; 1740 - unsigned int *DPPPerSurface; 1741 - double *BytePerPixelDETY; 1742 - double *BytePerPixelDETC; 1743 - unsigned int *DSTXAfterScaler; 1744 - unsigned int *DSTYAfterScaler; 1729 + const unsigned int *DETBufferSizeY; 1730 + const unsigned int *DETBufferSizeC; 1731 + const unsigned int *SwathHeightY; 1732 + const unsigned int *SwathHeightC; 1733 + const unsigned int *SwathWidthY; 1734 + const unsigned int *SwathWidthC; 1735 + const unsigned int *DPPPerSurface; 1736 + const double *BytePerPixelDETY; 1737 + const double *BytePerPixelDETC; 1738 + const unsigned int *DSTXAfterScaler; 1739 + const unsigned int *DSTYAfterScaler; 1745 1740 bool UnboundedRequestEnabled; 1746 1741 unsigned int CompressedBufferSizeInkByte; 1747 1742 bool max_outstanding_when_urgent_expected; 1748 - unsigned int max_outstanding_requests; 1749 - unsigned int max_request_size_bytes; 1750 - unsigned int *meta_row_height_l; 1751 - unsigned int *meta_row_height_c; 1752 - enum dml2_pstate_method *uclk_pstate_switch_modes; 1743 + const unsigned int max_outstanding_requests; 1744 + const unsigned int max_request_size_bytes; 1745 + const unsigned int *meta_row_height_l; 1746 + const unsigned int *meta_row_height_c; 1747 + const enum dml2_pstate_method *uclk_pstate_switch_modes; 1753 1748 1754 1749 // Output 1755 1750 struct dml2_core_internal_watermarks *Watermark; ··· 1936 1931 bool DynamicMetadataVMEnabled; 1937 1932 unsigned int DynamicMetadataLinesBeforeActiveRequired; 1938 1933 unsigned int DynamicMetadataTransmittedBytes; 1939 - double UrgentLatency; 1940 1934 double ExtraLatencyPrefetch; 1941 1935 double TCalc; 1942 1936 unsigned int vm_bytes;
+11 -62
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c
··· 428 428 429 429 unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel) 430 430 { 431 - if (dml2_core_utils_get_gfx_version(sw_mode) == 10 || dml2_core_utils_get_gfx_version(sw_mode) == 7) { 432 - return dml2_core_utils_get_tile_block_size_bytes_backcompat(sw_mode, byte_per_pixel); 433 - } 434 - 435 431 if (sw_mode == dml2_sw_linear) 436 432 return 256; 437 433 else if (sw_mode == dml2_sw_256b_2d) ··· 458 462 }; 459 463 } 460 464 461 - unsigned int dml2_core_utils_get_tile_block_size_bytes_backcompat(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel) 462 - { 463 - if (sw_mode == dml2_sw_linear_256b) 464 - return 256; 465 - else if (sw_mode == dml2_gfx10_sw_64kb_r_x) 466 - return 65536; 467 - else if (sw_mode == dml2_gfx102_sw_64kb_s) 468 - return 65536; 469 - else if (sw_mode == dml2_gfx102_sw_64kb_s_t) 470 - return 65536; 471 - else if (sw_mode == dml2_gfx102_sw_64kb_s_x) 472 - return 65536; 473 - else if (sw_mode == dml2_gfx102_sw_64kb_r_x) 474 - return 65536; 475 - else if (sw_mode == dml2_linear_64elements) 476 - return 256; 477 - else if (sw_mode == dml2_gfx7_1d_thin) 478 - return 256; 479 - else if (sw_mode == dml2_gfx7_2d_thin_gen_zero) 480 - return (128 * 64 * byte_per_pixel); 481 - else if (sw_mode == dml2_gfx7_2d_thin_gen_one) 482 - return (128 * 128 * byte_per_pixel); 483 - else if (sw_mode == dml2_gfx7_2d_thin_arlene) 484 - return (64 * 32 * byte_per_pixel); 485 - else if (sw_mode == dml2_gfx7_2d_thin_anubis) 486 - return (128 * 128 * byte_per_pixel); 487 - else { 488 - DML_ASSERT(0); 489 - return 256; 490 - }; 491 - } 492 - 493 465 bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel) 494 466 { 495 - if (dml2_core_utils_get_gfx_version(sw_mode) == 10 || dml2_core_utils_get_gfx_version(sw_mode) == 7) { 496 - return dml2_core_utils_get_segment_horizontal_contiguous_backcompat(sw_mode, byte_per_pixel); 497 - } else { 498 - return (byte_per_pixel != 2); 499 - } 500 - } 501 - 502 - bool dml2_core_utils_get_segment_horizontal_contiguous_backcompat(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel) 503 - { 504 - return !((byte_per_pixel == 4) && 505 - ((sw_mode == dml2_gfx10_sw_64kb_r_x) || (sw_mode == dml2_gfx102_sw_64kb_s) || (sw_mode == dml2_gfx102_sw_64kb_s_t) || (sw_mode == dml2_gfx102_sw_64kb_s_x))); 467 + return (byte_per_pixel != 2); 506 468 } 507 469 508 470 bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode) 509 471 { 510 - return (sw_mode == dml2_sw_linear || sw_mode == dml2_sw_linear_256b || sw_mode == dml2_linear_64elements); 472 + return sw_mode == dml2_sw_linear; 511 473 }; 512 474 513 475 ··· 498 544 sw_mode == dml2_gfx11_sw_256kb_d_x || 499 545 sw_mode == dml2_gfx11_sw_256kb_r_x) 500 546 version = 11; 501 - else if (sw_mode == dml2_sw_linear_256b || 502 - sw_mode == dml2_gfx10_sw_64kb_r_x || 503 - sw_mode == dml2_gfx102_sw_64kb_s || 504 - sw_mode == dml2_gfx102_sw_64kb_s_t || 505 - sw_mode == dml2_gfx102_sw_64kb_s_x || 506 - sw_mode == dml2_gfx102_sw_64kb_r_x) 507 - version = 10; 508 - else if (sw_mode == dml2_linear_64elements || 509 - sw_mode == dml2_gfx7_1d_thin || 510 - sw_mode == dml2_gfx7_2d_thin_gen_zero || 511 - sw_mode == dml2_gfx7_2d_thin_gen_one || 512 - sw_mode == dml2_gfx7_2d_thin_arlene || 513 - sw_mode == dml2_gfx7_2d_thin_anubis) 514 - version = 7; 515 547 else { 516 548 DML_LOG_VERBOSE("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode); 517 549 DML_ASSERT(0); ··· 588 648 static void create_phantom_plane_from_main_plane(struct dml2_plane_parameters *phantom, const struct dml2_plane_parameters *main, 589 649 const struct dml2_stream_parameters *phantom_stream, int phantom_stream_index, const struct dml2_stream_parameters *main_stream) 590 650 { 651 + (void)main_stream; 591 652 memcpy(phantom, main, sizeof(struct dml2_plane_parameters)); 592 653 593 654 phantom->stream_index = phantom_stream_index; ··· 785 844 default: 786 845 return false; 787 846 } 847 + } 848 + 849 + double dml2_core_utils_get_frame_time_us(const struct dml2_stream_parameters *stream) 850 + { 851 + double otg_vline_time_us = (double)stream->timing.h_total / (double)stream->timing.pixel_clock_khz * 1000.0; 852 + double non_vtotal = stream->timing.vblank_nom + stream->timing.v_active; 853 + double frame_time_us = non_vtotal * otg_vline_time_us; 854 + return frame_time_us; 788 855 }
+1 -2
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h
··· 22 22 bool dml2_core_utils_is_phantom_pipe(const struct dml2_plane_parameters *plane_cfg); 23 23 unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel); 24 24 bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel); 25 - unsigned int dml2_core_utils_get_tile_block_size_bytes_backcompat(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel); 26 - bool dml2_core_utils_get_segment_horizontal_contiguous_backcompat(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel); 27 25 bool dml2_core_utils_is_vertical_rotation(enum dml2_rotation_angle Scan); 28 26 bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode); 29 27 int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode); ··· 39 41 bool dml2_core_utils_is_dp_8b_10b_link_rate(enum dml2_output_link_dp_rate rate); 40 42 bool dml2_core_utils_is_dp_128b_132b_link_rate(enum dml2_output_link_dp_rate rate); 41 43 bool dml2_core_utils_is_odm_split(enum dml2_odm_mode odm_mode); 44 + double dml2_core_utils_get_frame_time_us(const struct dml2_stream_parameters *stream); 42 45 43 46 #endif /* __DML2_CORE_UTILS_H__ */
+1
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
··· 552 552 553 553 static int get_displays_with_fams_mask(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out, int latency_hiding_requirement_us) 554 554 { 555 + (void)latency_hiding_requirement_us; 555 556 unsigned int i; 556 557 int displays_with_fams_mask = 0x0; 557 558
+2
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
··· 8 8 9 9 static bool dummy_map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out) 10 10 { 11 + (void)in_out; 11 12 return true; 12 13 } 13 14 14 15 static bool dummy_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_out) 15 16 { 17 + (void)in_out; 16 18 return true; 17 19 } 18 20
+1
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c
··· 9 9 10 10 static bool dummy_build_min_clock_table(struct dml2_mcg_build_min_clock_table_params_in_out *in_out) 11 11 { 12 + (void)in_out; 12 13 return true; 13 14 } 14 15
+192
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn42.c
··· 1 + // SPDX-License-Identifier: MIT 2 + // 3 + // Copyright 2026 Advanced Micro Devices, Inc. 4 + 5 + #include "dml2_pmo_dcn42.h" 6 + #include "lib_float_math.h" 7 + #include "dml2_debug.h" 8 + #include "dml2_pmo_dcn4_fams2.h" 9 + 10 + /* 11 + * DCN42 PMO Policy Implementation 12 + * This implementation provides VBlank-only strategies for 1, 2, 3, and 4 display 13 + * configurations, ensuring p-state watermark support in the blank period only. 14 + */ 15 + 16 + static const struct dml2_pmo_pstate_strategy dcn42_strategy_list_1_display[] = { 17 + // VBlank only 18 + { 19 + .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na }, 20 + .allow_state_increase = true, 21 + }, 22 + }; 23 + 24 + static const int dcn42_strategy_list_1_display_size = sizeof(dcn42_strategy_list_1_display) / sizeof(struct dml2_pmo_pstate_strategy); 25 + 26 + static const struct dml2_pmo_pstate_strategy dcn42_strategy_list_2_display[] = { 27 + // VBlank only for both displays 28 + { 29 + .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na }, 30 + .allow_state_increase = true, 31 + }, 32 + }; 33 + 34 + static const int dcn42_strategy_list_2_display_size = sizeof(dcn42_strategy_list_2_display) / sizeof(struct dml2_pmo_pstate_strategy); 35 + 36 + static const struct dml2_pmo_pstate_strategy dcn42_strategy_list_3_display[] = { 37 + // VBlank only for all three displays 38 + { 39 + .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na }, 40 + .allow_state_increase = true, 41 + }, 42 + }; 43 + 44 + static const int dcn42_strategy_list_3_display_size = sizeof(dcn42_strategy_list_3_display) / sizeof(struct dml2_pmo_pstate_strategy); 45 + 46 + static const struct dml2_pmo_pstate_strategy dcn42_strategy_list_4_display[] = { 47 + // VBlank only for all four displays 48 + { 49 + .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank }, 50 + .allow_state_increase = true, 51 + }, 52 + }; 53 + 54 + static const int dcn42_strategy_list_4_display_size = sizeof(dcn42_strategy_list_4_display) / sizeof(struct dml2_pmo_pstate_strategy); 55 + 56 + bool pmo_dcn42_test_for_pstate_support(struct dml2_pmo_test_for_pstate_support_in_out *in_out) 57 + { 58 + const struct dml2_pmo_scratch *s = &in_out->instance->scratch; 59 + const int REQUIRED_RESERVED_TIME = 60 + (int)in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us; 61 + bool p_state_supported = true; 62 + unsigned int stream_index; 63 + 64 + if (in_out->base_display_config->display_config.overrides.all_streams_blanked) 65 + return true; 66 + 67 + if (s->pmo_dcn4.cur_pstate_candidate < 0) 68 + return false; 69 + 70 + for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) { 71 + if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank) { 72 + if (dcn4_get_minimum_reserved_time_us_for_planes(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < REQUIRED_RESERVED_TIME || 73 + dcn4_get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > 0) { 74 + p_state_supported = false; 75 + break; 76 + } 77 + } else { 78 + p_state_supported = false; 79 + break; 80 + } 81 + } 82 + 83 + return p_state_supported; 84 + } 85 + 86 + bool pmo_dcn42_initialize(struct dml2_pmo_initialize_in_out *in_out) 87 + { 88 + int i = 0; 89 + struct dml2_pmo_instance *pmo = in_out->instance; 90 + 91 + unsigned int base_list_size = 0; 92 + const struct dml2_pmo_pstate_strategy *base_list = NULL; 93 + unsigned int *expanded_list_size = NULL; 94 + struct dml2_pmo_pstate_strategy *expanded_list = NULL; 95 + 96 + DML_LOG_COMP_IF_ENTER(); 97 + 98 + pmo->soc_bb = in_out->soc_bb; 99 + pmo->ip_caps = in_out->ip_caps; 100 + pmo->mpc_combine_limit = 2; 101 + pmo->odm_combine_limit = 4; 102 + pmo->mcg_clock_table_size = in_out->mcg_clock_table_size; 103 + 104 + /* 105 + * DCN42 does not support FAMS features like SubVP and DRR. 106 + * These parameters are initialized to safe values but won't be used 107 + * since our strategies only use VBlank. 108 + */ 109 + pmo->fams_params.v2.subvp.refresh_rate_limit_max = 0; 110 + pmo->fams_params.v2.subvp.refresh_rate_limit_min = 0; 111 + pmo->fams_params.v2.drr.refresh_rate_limit_max = 0; 112 + pmo->fams_params.v2.drr.refresh_rate_limit_min = 0; 113 + 114 + pmo->options = in_out->options; 115 + 116 + /* Generate permutations of p-state configs from base strategy list */ 117 + for (i = 0; i < PMO_DCN4_MAX_DISPLAYS; i++) { 118 + switch (i+1) { 119 + case 1: 120 + if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) { 121 + base_list = pmo->options->override_strategy_lists[i]; 122 + base_list_size = pmo->options->num_override_strategies_per_list[i]; 123 + } else { 124 + base_list = dcn42_strategy_list_1_display; 125 + base_list_size = dcn42_strategy_list_1_display_size; 126 + } 127 + 128 + expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i]; 129 + expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display; 130 + 131 + break; 132 + case 2: 133 + if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) { 134 + base_list = pmo->options->override_strategy_lists[i]; 135 + base_list_size = pmo->options->num_override_strategies_per_list[i]; 136 + } else { 137 + base_list = dcn42_strategy_list_2_display; 138 + base_list_size = dcn42_strategy_list_2_display_size; 139 + } 140 + 141 + expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i]; 142 + expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display; 143 + 144 + break; 145 + case 3: 146 + if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) { 147 + base_list = pmo->options->override_strategy_lists[i]; 148 + base_list_size = pmo->options->num_override_strategies_per_list[i]; 149 + } else { 150 + base_list = dcn42_strategy_list_3_display; 151 + base_list_size = dcn42_strategy_list_3_display_size; 152 + } 153 + 154 + expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i]; 155 + expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display; 156 + 157 + break; 158 + case 4: 159 + if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) { 160 + base_list = pmo->options->override_strategy_lists[i]; 161 + base_list_size = pmo->options->num_override_strategies_per_list[i]; 162 + } else { 163 + base_list = dcn42_strategy_list_4_display; 164 + base_list_size = dcn42_strategy_list_4_display_size; 165 + } 166 + 167 + expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i]; 168 + expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display; 169 + 170 + break; 171 + } 172 + 173 + DML_ASSERT(base_list_size <= PMO_DCN4_MAX_BASE_STRATEGIES); 174 + 175 + /* 176 + * Populate list using DCN4 FAMS2 expansion function. 177 + * Since our strategies only contain VBlank methods, the expansion 178 + * will not introduce any FAMS-specific logic. 179 + */ 180 + pmo_dcn4_fams2_expand_base_pstate_strategies( 181 + base_list, 182 + base_list_size, 183 + i + 1, 184 + expanded_list, 185 + expanded_list_size); 186 + } 187 + 188 + DML_LOG_DEBUG("%s exit with true\n", __func__); 189 + DML_LOG_COMP_IF_EXIT(); 190 + 191 + return true; 192 + }
+17
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn42.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright 2026 Advanced Micro Devices, Inc. 4 + */ 5 + 6 + #ifndef __DML2_PMO_DCN42_H__ 7 + #define __DML2_PMO_DCN42_H__ 8 + 9 + #include "dml2_internal_shared_types.h" 10 + 11 + struct dml2_pmo_initialize_in_out; 12 + struct dml2_pmo_test_for_pstate_support_in_out; 13 + 14 + bool pmo_dcn42_initialize(struct dml2_pmo_initialize_in_out *in_out); 15 + bool pmo_dcn42_test_for_pstate_support(struct dml2_pmo_test_for_pstate_support_in_out *in_out); 16 + 17 + #endif /* __DML2_PMO_DCN42_H__ */
+14 -7
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
··· 428 428 struct dml2_pmo_pstate_strategy *expanded_strategy_list, 429 429 unsigned int *num_expanded_strategies) 430 430 { 431 + (void)stream_count; 431 432 if (expanded_strategy_list && num_expanded_strategies) { 432 433 memcpy(&expanded_strategy_list[*num_expanded_strategies], per_stream_pstate_strategy, sizeof(struct dml2_pmo_pstate_strategy)); 433 434 ··· 521 520 const unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS], 522 521 const unsigned int stream_count) 523 522 { 523 + (void)variant_strategy; 524 524 bool valid = true; 525 525 unsigned int i; 526 526 ··· 1182 1180 1183 1181 static void insert_into_candidate_list(const struct dml2_pmo_pstate_strategy *pstate_strategy, int stream_count, struct dml2_pmo_scratch *scratch) 1184 1182 { 1183 + (void)stream_count; 1185 1184 scratch->pmo_dcn4.pstate_strategy_candidates[scratch->pmo_dcn4.num_pstate_candidates] = *pstate_strategy; 1186 1185 scratch->pmo_dcn4.num_pstate_candidates++; 1187 1186 } ··· 1662 1659 return is_config_schedulable(pmo, display_cfg, pstate_strategy); 1663 1660 } 1664 1661 1665 - static int get_vactive_pstate_margin(const struct display_configuation_with_meta *display_cfg, int plane_mask) 1662 + int dcn4_get_vactive_pstate_margin(const struct display_configuation_with_meta *display_cfg, int plane_mask) 1666 1663 { 1667 1664 unsigned int i; 1668 1665 int min_vactive_margin_us = 0xFFFFFFF; ··· 1850 1847 struct display_configuation_with_meta *display_config, 1851 1848 int stream_index) 1852 1849 { 1850 + (void)display_config; 1853 1851 struct dml2_implicit_svp_meta *stream_svp_meta = &pmo->scratch.pmo_dcn4.stream_svp_meta[stream_index]; 1854 1852 struct dml2_pstate_meta *stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index]; 1855 1853 ··· 1907 1903 1908 1904 // Figure out which streams can do vactive, and also build up implicit SVP and FAMS2 meta 1909 1905 for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) { 1910 - if (get_vactive_pstate_margin(display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) >= (int)(MIN_VACTIVE_MARGIN_PCT * pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us)) 1906 + if (dcn4_get_vactive_pstate_margin(display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) >= (int)(MIN_VACTIVE_MARGIN_PCT * pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us)) 1911 1907 set_bit_in_bitfield(&s->pmo_dcn4.stream_vactive_capability_mask, stream_index); 1912 1908 1913 1909 /* FAMS2 meta */ ··· 1994 1990 struct dml2_pmo_instance *pmo, 1995 1991 int plane_mask) 1996 1992 { 1993 + (void)pmo; 1997 1994 unsigned int plane_index; 1998 1995 struct dml2_plane_parameters *plane; 1999 1996 ··· 2182 2177 return success; 2183 2178 } 2184 2179 2185 - static int get_minimum_reserved_time_us_for_planes(struct display_configuation_with_meta *display_config, int plane_mask) 2180 + int dcn4_get_minimum_reserved_time_us_for_planes( 2181 + const struct display_configuation_with_meta *display_config, 2182 + int plane_mask) 2186 2183 { 2187 2184 int min_time_us = 0xFFFFFF; 2188 2185 unsigned int plane_index = 0; ··· 2224 2217 2225 2218 if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive || 2226 2219 s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) { 2227 - if (get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < (MIN_VACTIVE_MARGIN_PCT * in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) || 2220 + if (dcn4_get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < (MIN_VACTIVE_MARGIN_PCT * in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) || 2228 2221 get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us) { 2229 2222 p_state_supported = false; 2230 2223 break; 2231 2224 } 2232 2225 } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank || 2233 2226 s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vblank_drr) { 2234 - if (get_minimum_reserved_time_us_for_planes(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < 2227 + if (dcn4_get_minimum_reserved_time_us_for_planes(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < 2235 2228 REQUIRED_RESERVED_TIME || 2236 - get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_VBLANK) { 2229 + dcn4_get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_VBLANK) { 2237 2230 p_state_supported = false; 2238 2231 break; 2239 2232 } ··· 2245 2238 } 2246 2239 } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_drr) { 2247 2240 if (!all_planes_match_method(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index], dml2_pstate_method_fw_drr) || 2248 - get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_DRR) { 2241 + dcn4_get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_DRR) { 2249 2242 p_state_supported = false; 2250 2243 break; 2251 2244 }
+10
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
··· 7 7 8 8 #include "dml2_internal_shared_types.h" 9 9 10 + struct display_configuation_with_meta; 11 + 12 + int dcn4_get_vactive_pstate_margin( 13 + const struct display_configuation_with_meta *display_cfg, 14 + int plane_mask); 15 + 16 + int dcn4_get_minimum_reserved_time_us_for_planes( 17 + const struct display_configuation_with_meta *display_config, 18 + int plane_mask); 19 + 10 20 bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out); 11 21 12 22 bool pmo_dcn4_fams2_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out);
+3
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c
··· 9 9 10 10 static bool dummy_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in_out) 11 11 { 12 + (void)in_out; 12 13 return false; 13 14 } 14 15 15 16 static bool dummy_test_for_stutter(struct dml2_pmo_test_for_stutter_in_out *in_out) 16 17 { 18 + (void)in_out; 17 19 return true; 18 20 } 19 21 20 22 static bool dummy_optimize_for_stutter(struct dml2_pmo_optimize_for_stutter_in_out *in_out) 21 23 { 24 + (void)in_out; 22 25 return false; 23 26 } 24 27
+1 -1
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c
··· 23 23 return arg2; 24 24 if (isNaN(arg2)) 25 25 return arg1; 26 - return arg1 - arg1 * ((int)(arg1 / arg2)); 26 + return arg1 - arg2 * ((int)(arg1 / arg2)); 27 27 } 28 28 29 29 double math_min2(const double arg1, const double arg2)
+2
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c
··· 17 17 18 18 static void setup_speculative_display_config_with_meta(const struct dml2_instance *dml, struct display_configuation_with_meta *out, const struct dml2_display_cfg *display_config) 19 19 { 20 + (void)dml; 20 21 memcpy(&out->display_config, display_config, sizeof(struct dml2_display_cfg)); 21 22 out->stage1.min_clk_index_for_latency = 0; 22 23 } ··· 473 472 static bool calculate_h_split_for_scaling_transform(int full_vp_width, int h_active, int num_pipes, 474 473 enum dml2_scaling_transform scaling_transform, int *pipe_vp_x_start, int *pipe_vp_x_end) 475 474 { 475 + (void)h_active; 476 476 int i, slice_width; 477 477 const char MAX_SCL_VP_OVERLAP = 3; 478 478 bool success = false;
+9
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
··· 178 178 179 179 static bool validate_pipe_assignment(const struct dml2_context *ctx, const struct dc_state *state, const struct dml_display_cfg_st *disp_cfg, const struct dml2_dml_to_dc_pipe_mapping *mapping) 180 180 { 181 + (void)ctx; 182 + (void)disp_cfg; 183 + (void)mapping; 184 + (void)state; 181 185 // int i, j, k; 182 186 // 183 187 // unsigned int plane_id; ··· 296 292 const unsigned int stream_id, 297 293 unsigned int *last_resort_pipe_candidates) 298 294 { 295 + (void)stream_id; 299 296 unsigned int num_last_resort_candidates = 0; 300 297 int i; 301 298 ··· 546 541 struct dc_pipe_mapping_scratch *scratch, 547 542 unsigned int odm_slice_index) 548 543 { 544 + (void)ctx; 549 545 struct pipe_ctx *pipe = NULL; 550 546 int i; 551 547 ··· 573 567 unsigned int odm_slice, 574 568 struct pipe_ctx *top_pipe) 575 569 { 570 + (void)ctx; 571 + (void)plane; 576 572 int i; 577 573 578 574 for (i = 0; i < pipe_pool->num_pipes_assigned_to_plane_for_mpcc_combine; i++) { ··· 730 722 731 723 static void remove_pipes_from_blend_trees(struct dml2_context *ctx, struct dc_state *state, struct dc_plane_pipe_pool *pipe_pool, unsigned int odm_slice) 732 724 { 725 + (void)ctx; 733 726 struct pipe_ctx *pipe; 734 727 int i; 735 728
+6
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c
··· 33 33 34 34 void dml2_init_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out) 35 35 { 36 + (void)in_dc; 36 37 switch (dml2->v20.dml_core_ctx.project) { 37 38 case dml_project_dcn32: 38 39 case dml_project_dcn321: ··· 245 244 246 245 void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, struct soc_bounding_box_st *out) 247 246 { 247 + (void)in_dc; 248 248 out->dprefclk_mhz = dml2->config.bbox_overrides.dprefclk_mhz; 249 249 out->xtalclk_mhz = dml2->config.bbox_overrides.xtalclk_mhz; 250 250 out->pcierefclk_mhz = 100; ··· 330 328 void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, 331 329 const struct soc_bounding_box_st *in_bbox, struct soc_states_st *out) 332 330 { 331 + (void)in_dc; 333 332 struct dml2_policy_build_synthetic_soc_states_scratch *s = &dml2->v20.scratch.create_scratch.build_synthetic_socbb_scratch; 334 333 struct dml2_policy_build_synthetic_soc_states_params *p = &dml2->v20.scratch.build_synthetic_socbb_params; 335 334 int dcfclk_stas_mhz[NUM_DCFCLK_STAS] = {0}; ··· 785 782 static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *out, unsigned int location, 786 783 const struct dc_stream_state *in, const struct pipe_ctx *pipe, struct dml2_context *dml2) 787 784 { 785 + (void)pipe; 788 786 unsigned int output_bpc; 789 787 790 788 out->DSCEnable[location] = (enum dml_dsc_enable)in->timing.flags.DSC; ··· 1137 1133 static unsigned int map_stream_to_dml_display_cfg(const struct dml2_context *dml2, 1138 1134 const struct dc_stream_state *stream, const struct dml_display_cfg_st *dml_dispcfg) 1139 1135 { 1136 + (void)dml_dispcfg; 1140 1137 int i = 0; 1141 1138 int location = -1; 1142 1139 ··· 1178 1173 static unsigned int map_plane_to_dml_display_cfg(const struct dml2_context *dml2, const struct dc_plane_state *plane, 1179 1174 const struct dc_state *context, const struct dml_display_cfg_st *dml_dispcfg, unsigned int stream_id, int plane_index) 1180 1175 { 1176 + (void)dml_dispcfg; 1181 1177 unsigned int plane_id; 1182 1178 unsigned int i = 0; 1183 1179 unsigned int location = UINT_MAX;
+1
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c
··· 465 465 466 466 static unsigned int find_planes_per_stream_and_stream_count(struct dml2_context *in_ctx, struct dml_display_cfg_st *dml_dispcfg, int *num_of_planes_per_stream) 467 467 { 468 + (void)in_ctx; 468 469 unsigned int plane_index, stream_index = 0, num_of_streams; 469 470 470 471 for (plane_index = 0; plane_index < dml_dispcfg->num_surfaces; plane_index++) {
+11
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c
··· 108 108 return true; 109 109 } 110 110 111 + void dml2_destroy(struct dml2_context *dml2) 112 + { 113 + if (!dml2) 114 + return; 115 + 116 + if (dml2->architecture == dml2_architecture_21) 117 + dml21_destroy(dml2); 118 + 119 + DC_RUN_WITH_PREEMPTION_ENABLED(vfree(dml2)); 120 + } 121 + 111 122 void dml2_reinit(const struct dc *in_dc, 112 123 const struct dml2_configuration_options *config, 113 124 struct dml2_context **dml2)
-10
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper_fpu.c
··· 548 548 } 549 549 } 550 550 551 - void dml2_destroy(struct dml2_context *dml2) 552 - { 553 - if (!dml2) 554 - return; 555 - 556 - if (dml2->architecture == dml2_architecture_21) 557 - dml21_destroy(dml2); 558 - vfree(dml2); 559 - } 560 - 561 551 void dml2_extract_dram_and_fclk_change_support(struct dml2_context *dml2, 562 552 unsigned int *fclk_change_support, unsigned int *dram_clk_change_support) 563 553 {
+1
drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c
··· 563 563 564 564 void dml_rq_dlg_get_arb_params(struct display_mode_lib_st *mode_lib, dml_display_arb_params_st *arb_param) 565 565 { 566 + (void)mode_lib; 566 567 memset(arb_param, 0, sizeof(*arb_param)); 567 568 arb_param->max_req_outstanding = 256; 568 569 arb_param->min_req_outstanding = 256; // turn off the sat level feature if this set to max
+6 -7
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
··· 680 680 } else { 681 681 build_dsc_enc_caps(dsc, dsc_enc_caps); 682 682 } 683 - 684 - if (dsc->ctx->dc->debug.native422_support) 685 - dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; 686 683 } 687 684 688 685 /* Returns 'false' if no intersection was found for at least one capability. ··· 1097 1100 branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_0_mps; 1098 1101 break; 1099 1102 case PIXEL_ENCODING_YCBCR422: 1100 - is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_NATIVE_422; 1101 - sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_1_mps; 1102 - branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_1_mps; 1103 - if (!is_dsc_possible) { 1103 + if (policy.ycbcr422_simple) { 1104 1104 is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_SIMPLE_422; 1105 1105 dsc_cfg->ycbcr422_simple = is_dsc_possible; 1106 1106 sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_0_mps; 1107 + } else { 1108 + is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_NATIVE_422; 1109 + sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_1_mps; 1110 + branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_1_mps; 1107 1111 } 1108 1112 break; 1109 1113 case PIXEL_ENCODING_YCBCR420: ··· 1404 1406 policy->min_target_bpp = 8; 1405 1407 /* DP specs limits to 3 x bpc */ 1406 1408 policy->max_target_bpp = 3 * bpc; 1409 + policy->ycbcr422_simple = true; 1407 1410 break; 1408 1411 case PIXEL_ENCODING_YCBCR420: 1409 1412 /* DP specs limits to 6 */
+1 -1
drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
··· 100 100 dsc_enc_caps->color_formats.bits.RGB = 1; 101 101 dsc_enc_caps->color_formats.bits.YCBCR_444 = 1; 102 102 dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1; 103 - dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0; 103 + dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; 104 104 dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1; 105 105 106 106 dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
+1 -1
drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
··· 128 128 dsc_enc_caps->color_formats.bits.RGB = 1; 129 129 dsc_enc_caps->color_formats.bits.YCBCR_444 = 1; 130 130 dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1; 131 - dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0; 131 + dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; 132 132 dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1; 133 133 134 134 dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
+1 -1
drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
··· 78 78 dsc_enc_caps->color_formats.bits.RGB = 1; 79 79 dsc_enc_caps->color_formats.bits.YCBCR_444 = 1; 80 80 dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1; 81 - dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0; 81 + dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; 82 82 dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1; 83 83 84 84 dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
+7
drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
··· 1394 1394 return dcn20_patch_unknown_plane_state(plane_state); 1395 1395 } 1396 1396 1397 + static void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) 1398 + { 1399 + DC_FP_START(); 1400 + dcn21_update_bw_bounding_box_fpu(dc, bw_params); 1401 + DC_FP_END(); 1402 + } 1403 + 1397 1404 static const struct resource_funcs dcn21_res_pool_funcs = { 1398 1405 .destroy = dcn21_destroy_resource_pool, 1399 1406 .link_enc_create = dcn21_link_encoder_create,
+9
drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
··· 1854 1854 .get_dcc_compression_cap = dcn20_get_dcc_compression_cap 1855 1855 }; 1856 1856 1857 + static void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) 1858 + { 1859 + DC_FP_START(); 1860 + dcn31_update_bw_bounding_box_fpu(dc, bw_params); 1861 + DC_FP_END(); 1862 + } 1863 + 1857 1864 static struct resource_funcs dcn31_res_pool_funcs = { 1858 1865 .destroy = dcn31_destroy_resource_pool, 1859 1866 .link_enc_create = dcn31_link_encoder_create, ··· 2002 1995 /* Use pipe context based otg sync logic */ 2003 1996 dc->config.use_pipe_ctx_sync_logic = true; 2004 1997 dc->config.disable_hbr_audio_dp2 = true; 1998 + 1999 + dc->config.no_native422_support = true; 2005 2000 2006 2001 /* read VBIOS LTTPR caps */ 2007 2002 {
+9
drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
··· 1849 1849 .get_dcc_compression_cap = dcn20_get_dcc_compression_cap 1850 1850 }; 1851 1851 1852 + static void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) 1853 + { 1854 + DC_FP_START(); 1855 + dcn315_update_bw_bounding_box_fpu(dc, bw_params); 1856 + DC_FP_END(); 1857 + } 1858 + 1852 1859 static struct resource_funcs dcn315_res_pool_funcs = { 1853 1860 .destroy = dcn315_destroy_resource_pool, 1854 1861 .link_enc_create = dcn31_link_encoder_create, ··· 1965 1958 dc->caps.color.mpc.ogam_rom_caps.pq = 0; 1966 1959 dc->caps.color.mpc.ogam_rom_caps.hlg = 0; 1967 1960 dc->caps.color.mpc.ocsc = 1; 1961 + 1962 + dc->config.no_native422_support = true; 1968 1963 1969 1964 /* read VBIOS LTTPR caps */ 1970 1965 {
+7
drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
··· 1725 1725 .get_dcc_compression_cap = dcn20_get_dcc_compression_cap 1726 1726 }; 1727 1727 1728 + static void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) 1729 + { 1730 + DC_FP_START(); 1731 + dcn316_update_bw_bounding_box_fpu(dc, bw_params); 1732 + DC_FP_END(); 1733 + } 1734 + 1728 1735 static struct resource_funcs dcn316_res_pool_funcs = { 1729 1736 .destroy = dcn316_destroy_resource_pool, 1730 1737 .link_enc_create = dcn31_link_encoder_create,
+4
drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
··· 155 155 dcn42_convert_dc_clock_table_to_soc_bb_clock_table(&soc_bb->clk_table, &soc_bb->vmin_limit, 156 156 dc->clk_mgr->bw_params); 157 157 } 158 + 159 + if (dc->clk_mgr->bw_params->vram_type == Ddr5MemType) { 160 + soc_bb->power_management_parameters = dcn42_ddr5_power_management_parameters; 161 + } 158 162 } 159 163 160 164 static void apply_soc_bb_updates(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)
+1
drivers/gpu/drm/amd/display/include/ddc_service_types.h
··· 36 36 #define DP_BRANCH_DEVICE_ID_006037 0x006037 37 37 #define DP_BRANCH_DEVICE_ID_001CF8 0x001CF8 38 38 #define DP_BRANCH_DEVICE_ID_0060AD 0x0060AD 39 + #define DP_BRANCH_DEVICE_ID_001FF2 0x001FF2 39 40 #define DP_BRANCH_HW_REV_10 0x10 40 41 #define DP_BRANCH_HW_REV_20 0x20 41 42
+21 -11
drivers/gpu/drm/amd/display/modules/freesync/freesync.c
··· 153 153 * round down the vtotal value to avoid stretching vblank over 154 154 * panel's vtotal boundary. 155 155 */ 156 - v_total = div64_u64(div64_u64(((unsigned long long)( 156 + v_total = (unsigned int)div64_u64(div64_u64(((unsigned long long)( 157 157 frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), 158 158 stream->timing.h_total), 1000000); 159 159 } else if (refresh_in_uhz >= stream->timing.max_refresh_in_uhz) { ··· 161 161 * round up the vtotal value to prevent off-by-one error causing 162 162 * v_total_min to be below the panel's lower bound 163 163 */ 164 - v_total = div64_u64(div64_u64(((unsigned long long)( 164 + v_total = (unsigned int)div64_u64(div64_u64(((unsigned long long)( 165 165 frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), 166 166 stream->timing.h_total) + (1000000 - 1), 1000000); 167 167 } else { 168 - v_total = div64_u64(div64_u64(((unsigned long long)( 168 + v_total = (unsigned int)div64_u64(div64_u64(((unsigned long long)( 169 169 frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), 170 170 stream->timing.h_total) + 500000, 1000000); 171 171 } ··· 196 196 uint32_t h_total_up_scaled; 197 197 198 198 h_total_up_scaled = stream->timing.h_total * 10000; 199 - v_total = div_u64((unsigned long long)duration_in_us 199 + v_total = (unsigned int)div_u64((unsigned long long)duration_in_us 200 200 * stream->timing.pix_clk_100hz + (h_total_up_scaled - 1), 201 201 h_total_up_scaled); //ceiling for MMax and MMin for MVRR 202 202 } else { 203 - v_total = div64_u64(div64_u64(((unsigned long long)( 203 + v_total = (unsigned int)div64_u64(div64_u64(((unsigned long long)( 204 204 duration_in_us) * (stream->timing.pix_clk_100hz / 10)), 205 205 stream->timing.h_total), 1000); 206 206 } ··· 232 232 target_duration_in_us; 233 233 234 234 /* Calculate ratio between new and current frame duration with 3 digit */ 235 - unsigned int frame_duration_ratio = div64_u64(1000000, 235 + uint64_t frame_duration_ratio_u64 = div64_u64(1000000, 236 236 (1000 + div64_u64(((unsigned long long)( 237 237 STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME) * 238 238 current_duration_in_us), 239 239 1000000))); 240 + ASSERT(frame_duration_ratio_u64 <= 0xFFFFFFFF); 241 + unsigned int frame_duration_ratio = (unsigned int)frame_duration_ratio_u64; 240 242 241 243 /* Calculate delta between new and current frame duration in us */ 242 - unsigned int frame_duration_delta = div64_u64(((unsigned long long)( 244 + uint64_t frame_duration_delta_u64 = div64_u64(((unsigned long long)( 243 245 current_duration_in_us) * 244 246 (1000 - frame_duration_ratio)), 1000); 247 + ASSERT(frame_duration_delta_u64 <= 0xFFFFFFFF); 248 + unsigned int frame_duration_delta = (unsigned int)frame_duration_delta_u64; 245 249 246 250 /* Adjust frame duration delta based on ratio between current and 247 251 * standard frame duration (frame duration at 60 Hz refresh rate). 248 252 */ 249 - unsigned int ramp_rate_interpolated = div64_u64(((unsigned long long)( 253 + uint64_t ramp_rate_interpolated_u64 = div64_u64(((unsigned long long)( 250 254 frame_duration_delta) * current_duration_in_us), 16666); 255 + ASSERT(ramp_rate_interpolated_u64 <= 0xFFFFFFFF); 256 + unsigned int ramp_rate_interpolated = (unsigned int)ramp_rate_interpolated_u64; 251 257 252 258 /* Going to a higher refresh rate (lower frame duration) */ 253 259 if (ramp_direction_is_up) { ··· 283 277 } 284 278 } 285 279 286 - v_total = div64_u64(div64_u64(((unsigned long long)( 280 + v_total = (unsigned int)div64_u64(div64_u64(((unsigned long long)( 287 281 current_duration_in_us) * (stream->timing.pix_clk_100hz / 10)), 288 282 stream->timing.h_total), 1000); 289 283 ··· 1064 1058 else 1065 1059 in_out_vrr->fixed_refresh_in_uhz = 0; 1066 1060 1067 - refresh_range = div_u64(in_out_vrr->max_refresh_in_uhz + 500000, 1000000) - 1068 - div_u64(in_out_vrr->min_refresh_in_uhz + 500000, 1000000); 1061 + { 1062 + uint64_t rr_tmp = div_u64(in_out_vrr->max_refresh_in_uhz + 500000, 1000000) - 1063 + div_u64(in_out_vrr->min_refresh_in_uhz + 500000, 1000000); 1064 + ASSERT(rr_tmp <= 0xFFFFFFFF); 1065 + refresh_range = (unsigned int)rr_tmp; 1066 + } 1069 1067 1070 1068 in_out_vrr->supported = true; 1071 1069 }
+34 -15
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
··· 250 250 unsigned int lut_index; 251 251 252 252 table->backlight_thresholds[0] = 0; 253 - table->backlight_offsets[0] = params.backlight_lut_array[0]; 253 + ASSERT(params.backlight_lut_array[0] <= 0xFFFF); 254 + table->backlight_offsets[0] = (uint16_t)params.backlight_lut_array[0]; 254 255 table->backlight_thresholds[num_entries-1] = 0xFFFF; 256 + ASSERT(params.backlight_lut_array[params.backlight_lut_array_size - 1] <= 0xFFFF); 255 257 table->backlight_offsets[num_entries-1] = 256 - params.backlight_lut_array[params.backlight_lut_array_size - 1]; 258 + (uint16_t)params.backlight_lut_array[params.backlight_lut_array_size - 1]; 257 259 258 260 /* Setup all brightness levels between 0% and 100% exclusive 259 261 * Fills brightness-to-backlight transform table. Backlight custom curve ··· 267 265 */ 268 266 for (i = 1; i+1 < num_entries; i++) { 269 267 lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1); 268 + 270 269 ASSERT(lut_index < params.backlight_lut_array_size); 271 270 272 - table->backlight_thresholds[i] = 273 - cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries)); 274 - table->backlight_offsets[i] = 275 - cpu_to_be16(params.backlight_lut_array[lut_index]); 271 + unsigned int threshold_val = DIV_ROUNDUP((i * 65536), num_entries); 272 + unsigned int offset_val = params.backlight_lut_array[lut_index]; 273 + 274 + ASSERT(threshold_val <= 0xFFFF); 275 + ASSERT(offset_val <= 0xFFFF); 276 + 277 + table->backlight_thresholds[i] = cpu_to_be16((uint16_t)threshold_val); 278 + table->backlight_offsets[i] = cpu_to_be16((uint16_t)offset_val); 276 279 } 277 280 } 278 281 ··· 289 282 unsigned int lut_index; 290 283 291 284 table->backlight_thresholds[0] = 0; 292 - table->backlight_offsets[0] = params.backlight_lut_array[0]; 285 + ASSERT(params.backlight_lut_array[0] <= 0xFFFF); 286 + table->backlight_offsets[0] = (uint16_t)params.backlight_lut_array[0]; 293 287 table->backlight_thresholds[num_entries-1] = 0xFFFF; 288 + ASSERT(params.backlight_lut_array[params.backlight_lut_array_size - 1] <= 0xFFFF); 294 289 table->backlight_offsets[num_entries-1] = 295 - params.backlight_lut_array[params.backlight_lut_array_size - 1]; 290 + (uint16_t)params.backlight_lut_array[params.backlight_lut_array_size - 1]; 296 291 297 292 /* Setup all brightness levels between 0% and 100% exclusive 298 293 * Fills brightness-to-backlight transform table. Backlight custom curve ··· 308 299 lut_index = DIV_ROUNDUP((i * params.backlight_lut_array_size), num_entries); 309 300 ASSERT(lut_index < params.backlight_lut_array_size); 310 301 302 + unsigned int threshold_val = DIV_ROUNDUP((i * 65536), num_entries); 303 + unsigned int offset_val = params.backlight_lut_array[lut_index]; 304 + 305 + ASSERT(threshold_val <= 0xFFFF); 306 + ASSERT(offset_val <= 0xFFFF); 307 + 311 308 table->backlight_thresholds[i] = (big_endian) ? 312 - cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries)) : 313 - cpu_to_le16(DIV_ROUNDUP((i * 65536), num_entries)); 309 + cpu_to_be16((uint16_t)threshold_val) : cpu_to_le16((uint16_t)threshold_val); 314 310 table->backlight_offsets[i] = (big_endian) ? 315 - cpu_to_be16(params.backlight_lut_array[lut_index]) : 316 - cpu_to_le16(params.backlight_lut_array[lut_index]); 311 + cpu_to_be16((uint16_t)offset_val) : cpu_to_le16((uint16_t)offset_val); 317 312 } 318 313 } 319 314 ··· 753 740 } 754 741 755 742 if (params.backlight_ramping_override) { 743 + 744 + ASSERT(params.backlight_ramping_reduction <= 0xFFFF); 745 + ASSERT(params.backlight_ramping_start <= 0xFFFF); 756 746 for (i = 0; i < NUM_AGGR_LEVEL; i++) { 757 - config.blRampReduction[i] = params.backlight_ramping_reduction; 758 - config.blRampStart[i] = params.backlight_ramping_start; 747 + config.blRampReduction[i] = (uint16_t)params.backlight_ramping_reduction; 748 + config.blRampStart[i] = (uint16_t)params.backlight_ramping_start; 759 749 } 760 750 } else { 761 751 for (i = 0; i < NUM_AGGR_LEVEL; i++) { ··· 1076 1060 bool fill_custom_backlight_caps(unsigned int config_no, struct dm_acpi_atif_backlight_caps *caps) 1077 1061 { 1078 1062 unsigned int data_points_size; 1063 + uint64_t caps_size; 1079 1064 1080 1065 if (config_no >= ARRAY_SIZE(custom_backlight_profiles)) 1081 1066 return false; ··· 1084 1067 data_points_size = custom_backlight_profiles[config_no].num_data_points 1085 1068 * sizeof(custom_backlight_profiles[config_no].data_points[0]); 1086 1069 1087 - caps->size = sizeof(struct dm_acpi_atif_backlight_caps) - sizeof(caps->data_points) + data_points_size; 1070 + caps_size = sizeof(struct dm_acpi_atif_backlight_caps) - sizeof(caps->data_points) + data_points_size; 1071 + ASSERT(caps_size <= 0xFFFF); 1072 + caps->size = (uint16_t)caps_size; 1088 1073 caps->flags = 0; 1089 1074 caps->error_code = 0; 1090 1075 caps->ac_level_percentage = custom_backlight_profiles[config_no].ac_level_percentage;
+6 -2
drivers/gpu/drm/amd/display/modules/vmid/vmid.c
··· 57 57 static void evict_vmids(struct core_vmid *core_vmid) 58 58 { 59 59 int i; 60 - uint16_t ord = dc_get_vmid_use_vector(core_vmid->dc); 60 + int ord_int = dc_get_vmid_use_vector(core_vmid->dc); 61 + 62 + ASSERT(ord_int >= 0 && ord_int <= 0xFFFF); 63 + uint16_t ord = (uint16_t)ord_int; 61 64 62 65 // At this point any positions with value 0 are unused vmids, evict them 63 66 for (i = 1; i < core_vmid->num_vmid; i++) { ··· 123 120 ASSERT(0); 124 121 } 125 122 126 - return vmid; 123 + ASSERT(vmid >= 0 && vmid <= 0xFF); 124 + return (uint8_t)vmid; 127 125 } 128 126 129 127 void mod_vmid_reset(struct mod_vmid *mod_vmid)
+10 -4
drivers/gpu/drm/amd/pm/amdgpu_pm.c
··· 995 995 return ret; 996 996 997 997 ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size); 998 - if (ret) 999 - return ret; 998 + if (ret) { 999 + size = ret; 1000 + goto out_pm_put; 1001 + } 1000 1002 1001 1003 if (size == 0) 1002 1004 size = sysfs_emit(buf, "\n"); 1003 1005 1006 + out_pm_put: 1004 1007 amdgpu_pm_put_access(adev); 1005 1008 1006 1009 return size; ··· 3905 3902 return ret; 3906 3903 3907 3904 ret = amdgpu_dpm_emit_clock_levels(adev, od_type, buf, &size); 3908 - if (ret) 3909 - return ret; 3905 + if (ret) { 3906 + size = ret; 3907 + goto out_pm_put; 3908 + } 3910 3909 if (size == 0) 3911 3910 size = sysfs_emit(buf, "\n"); 3912 3911 3912 + out_pm_put: 3913 3913 amdgpu_pm_put_access(adev); 3914 3914 3915 3915 return size;
-3
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
··· 3062 3062 smu7_set_private_data_based_on_pptable_v0(hwmgr); 3063 3063 } 3064 3064 3065 - if (result) 3066 - goto fail; 3067 - 3068 3065 data->is_tlu_enabled = false; 3069 3066 3070 3067 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
+1
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
··· 584 584 /* Message flags for smu_msg_args */ 585 585 #define SMU_MSG_FLAG_ASYNC BIT(0) /* Async send - skip post-poll */ 586 586 #define SMU_MSG_FLAG_LOCK_HELD BIT(1) /* Caller holds ctl->lock */ 587 + #define SMU_MSG_FLAG_FORCE_READ_ARG BIT(2) /* force read smu arg from pmfw */ 587 588 588 589 /* smu_msg_ctl flags */ 589 590 #define SMU_MSG_CTL_DEBUG_MAILBOX BIT(0) /* Debug mailbox supported */
+3 -3
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
··· 1846 1846 amdgpu_device_load_pci_state(adev->pdev); 1847 1847 1848 1848 dev_dbg(adev->dev, "wait for reset ack\n"); 1849 + ret = -ETIME; 1849 1850 while (ret == -ETIME && timeout) { 1850 1851 ret = smu_msg_wait_response(ctl, 0); 1851 1852 /* Wait a bit more time for getting ACK */ ··· 1856 1855 continue; 1857 1856 } 1858 1857 1859 - if (ret != 1) { 1858 + if (ret != 0) { 1860 1859 dev_err(adev->dev, "failed to send mode2 message \tparam: 0x%08x response %#x\n", 1861 1860 SMU_RESET_MODE_2, ret); 1862 1861 goto out; ··· 1866 1865 } else { 1867 1866 dev_err(adev->dev, "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n", 1868 1867 smu->smc_fw_version); 1868 + ret = -EOPNOTSUPP; 1869 1869 } 1870 1870 1871 - if (ret == 1) 1872 - ret = 0; 1873 1871 out: 1874 1872 mutex_unlock(&ctl->lock); 1875 1873
+59 -11
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
··· 2214 2214 od_table->OverDriveTable.UclkFmax); 2215 2215 } 2216 2216 2217 + #define OD_ERROR_MSG_MAP(msg) \ 2218 + [msg] = #msg 2219 + 2220 + static const char *od_error_message[] = { 2221 + OD_ERROR_MSG_MAP(OD_REQUEST_ADVANCED_NOT_SUPPORTED), 2222 + OD_ERROR_MSG_MAP(OD_UNSUPPORTED_FEATURE), 2223 + OD_ERROR_MSG_MAP(OD_INVALID_FEATURE_COMBO_ERROR), 2224 + OD_ERROR_MSG_MAP(OD_GFXCLK_VF_CURVE_OFFSET_ERROR), 2225 + OD_ERROR_MSG_MAP(OD_VDD_GFX_VMAX_ERROR), 2226 + OD_ERROR_MSG_MAP(OD_VDD_SOC_VMAX_ERROR), 2227 + OD_ERROR_MSG_MAP(OD_PPT_ERROR), 2228 + OD_ERROR_MSG_MAP(OD_FAN_MIN_PWM_ERROR), 2229 + OD_ERROR_MSG_MAP(OD_FAN_ACOUSTIC_TARGET_ERROR), 2230 + OD_ERROR_MSG_MAP(OD_FAN_ACOUSTIC_LIMIT_ERROR), 2231 + OD_ERROR_MSG_MAP(OD_FAN_TARGET_TEMP_ERROR), 2232 + OD_ERROR_MSG_MAP(OD_FAN_ZERO_RPM_STOP_TEMP_ERROR), 2233 + OD_ERROR_MSG_MAP(OD_FAN_CURVE_PWM_ERROR), 2234 + OD_ERROR_MSG_MAP(OD_FAN_CURVE_TEMP_ERROR), 2235 + OD_ERROR_MSG_MAP(OD_FULL_CTRL_GFXCLK_ERROR), 2236 + OD_ERROR_MSG_MAP(OD_FULL_CTRL_UCLK_ERROR), 2237 + OD_ERROR_MSG_MAP(OD_FULL_CTRL_FCLK_ERROR), 2238 + OD_ERROR_MSG_MAP(OD_FULL_CTRL_VDD_GFX_ERROR), 2239 + OD_ERROR_MSG_MAP(OD_FULL_CTRL_VDD_SOC_ERROR), 2240 + OD_ERROR_MSG_MAP(OD_TDC_ERROR), 2241 + OD_ERROR_MSG_MAP(OD_GFXCLK_ERROR), 2242 + OD_ERROR_MSG_MAP(OD_UCLK_ERROR), 2243 + OD_ERROR_MSG_MAP(OD_FCLK_ERROR), 2244 + OD_ERROR_MSG_MAP(OD_OP_TEMP_ERROR), 2245 + OD_ERROR_MSG_MAP(OD_OP_GFX_EDC_ERROR), 2246 + OD_ERROR_MSG_MAP(OD_OP_GFX_PCC_ERROR), 2247 + OD_ERROR_MSG_MAP(OD_POWER_FEATURE_CTRL_ERROR), 2248 + }; 2249 + 2217 2250 static int smu_v14_0_2_upload_overdrive_table(struct smu_context *smu, 2218 2251 OverDriveTableExternal_t *od_table) 2219 2252 { 2220 - int ret; 2221 - ret = smu_cmn_update_table(smu, 2222 - SMU_TABLE_OVERDRIVE, 2223 - 0, 2224 - (void *)od_table, 2225 - true); 2226 - if (ret) 2227 - dev_err(smu->adev->dev, "Failed to upload overdrive table!\n"); 2253 + uint32_t read_arg = 0; 2254 + int ret, od_error_type; 2255 + 2256 + ret = smu_cmn_update_table_read_arg(smu, 2257 + SMU_TABLE_OVERDRIVE, 2258 + 0, 2259 + (void *)od_table, 2260 + &read_arg, 2261 + true); 2262 + if (ret) { 2263 + dev_err(smu->adev->dev, "Failed to upload overdrive table, ret:%d\n", ret); 2264 + if ((read_arg & 0xff) == TABLE_TRANSFER_FAILED) { 2265 + od_error_type = read_arg >> 16; 2266 + dev_err(smu->adev->dev, "Invalid overdrive table content: %s (%d)\n", 2267 + od_error_type < ARRAY_SIZE(od_error_message) ? 2268 + od_error_message[od_error_type] : "unknown", 2269 + od_error_type); 2270 + } 2271 + } 2228 2272 2229 2273 return ret; 2230 2274 } ··· 2418 2374 } 2419 2375 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; 2420 2376 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2377 + od_table->OverDriveTable.FeatureCtrlMask &= ~BIT(PP_OD_FEATURE_FAN_LEGACY_BIT); 2421 2378 break; 2422 2379 case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: 2423 2380 od_table->OverDriveTable.FanZeroRpmEnable = ··· 2447 2402 od_table->OverDriveTable.FanMinimumPwm = 2448 2403 boot_overdrive_table->OverDriveTable.FanMinimumPwm; 2449 2404 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; 2450 - od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2405 + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_LEGACY_BIT); 2406 + od_table->OverDriveTable.FeatureCtrlMask &= ~BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2451 2407 break; 2452 2408 default: 2453 2409 dev_info(adev->dev, "Invalid table index: %ld\n", input); ··· 2618 2572 od_table->OverDriveTable.FanLinearPwmPoints[input[0]] = input[2]; 2619 2573 od_table->OverDriveTable.FanMode = FAN_MODE_MANUAL_LINEAR; 2620 2574 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2575 + od_table->OverDriveTable.FeatureCtrlMask &= ~BIT(PP_OD_FEATURE_FAN_LEGACY_BIT); 2621 2576 break; 2622 2577 2623 2578 case PP_OD_EDIT_ACOUSTIC_LIMIT: ··· 2688 2641 break; 2689 2642 2690 2643 case PP_OD_EDIT_FAN_MINIMUM_PWM: 2691 - if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) { 2644 + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_LEGACY_BIT)) { 2692 2645 dev_warn(adev->dev, "Fan curve setting not supported!\n"); 2693 2646 return -ENOTSUPP; 2694 2647 } ··· 2706 2659 2707 2660 od_table->OverDriveTable.FanMinimumPwm = input[0]; 2708 2661 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; 2709 - od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2662 + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_LEGACY_BIT); 2663 + od_table->OverDriveTable.FeatureCtrlMask &= ~BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2710 2664 break; 2711 2665 2712 2666 case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE:
+25 -12
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
··· 496 496 } 497 497 498 498 /* Read output args */ 499 - if (ret == 0 && args->num_out_args > 0) { 499 + if ((ret == 0 || (args->flags & SMU_MSG_FLAG_FORCE_READ_ARG)) && 500 + args->num_out_args > 0) { 500 501 __smu_msg_v1_read_out_args(ctl, args); 501 502 dev_dbg(adev->dev, "smu send message: %s(%d) resp : 0x%08x", 502 503 smu_get_message_name(smu, args->msg), index, reg); ··· 1061 1060 return 0; 1062 1061 } 1063 1062 1064 - int smu_cmn_update_table(struct smu_context *smu, 1065 - enum smu_table_id table_index, 1066 - int argument, 1067 - void *table_data, 1068 - bool drv2smu) 1063 + int smu_cmn_update_table_read_arg(struct smu_context *smu, 1064 + enum smu_table_id table_index, 1065 + int argument, 1066 + void *table_data, 1067 + uint32_t *read_arg, 1068 + bool drv2smu) 1069 1069 { 1070 - struct smu_table_context *smu_table = &smu->smu_table; 1071 1070 struct amdgpu_device *adev = smu->adev; 1071 + struct smu_table_context *smu_table = &smu->smu_table; 1072 1072 struct smu_table *table = &smu_table->driver_table; 1073 + struct smu_msg_ctl *ctl = &smu->msg_ctl; 1074 + struct smu_msg_args args; 1073 1075 int table_id = smu_cmn_to_asic_specific_index(smu, 1074 1076 CMN2ASIC_MAPPING_TABLE, 1075 1077 table_index); 1076 1078 uint32_t table_size; 1077 1079 int ret = 0; 1080 + 1078 1081 if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0) 1079 1082 return -EINVAL; 1080 1083 ··· 1093 1088 amdgpu_hdp_flush(adev, NULL); 1094 1089 } 1095 1090 1096 - ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ? 1097 - SMU_MSG_TransferTableDram2Smu : 1098 - SMU_MSG_TransferTableSmu2Dram, 1099 - table_id | ((argument & 0xFFFF) << 16), 1100 - NULL); 1091 + args.msg = drv2smu ? SMU_MSG_TransferTableDram2Smu : SMU_MSG_TransferTableSmu2Dram; 1092 + args.args[0] = ((argument & 0xFFFF) << 16) | (table_id & 0xffff); 1093 + args.num_args = 1; 1094 + args.out_args[0] = 0; 1095 + args.num_out_args = read_arg ? 1 : 0; 1096 + args.flags = read_arg ? SMU_MSG_FLAG_FORCE_READ_ARG : 0; 1097 + args.timeout = 0; 1098 + 1099 + ret = ctl->ops->send_msg(ctl, &args); 1100 + 1101 + if (read_arg) 1102 + *read_arg = args.out_args[0]; 1103 + 1101 1104 if (ret) 1102 1105 return ret; 1103 1106
+9 -5
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
··· 102 102 #define SMU_DPM_PCIE_GEN_IDX(gen) smu_cmn_dpm_pcie_gen_idx((gen)) 103 103 #define SMU_DPM_PCIE_WIDTH_IDX(width) smu_cmn_dpm_pcie_width_idx((width)) 104 104 105 + #define smu_cmn_update_table(smu, table_index, argument, table_data, drv2smu) \ 106 + smu_cmn_update_table_read_arg((smu), (table_index), (argument), (table_data), NULL, (drv2smu)) 107 + 105 108 extern const int link_speed[]; 106 109 107 110 /* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */ ··· 171 168 uint32_t *if_version, 172 169 uint32_t *smu_version); 173 170 174 - int smu_cmn_update_table(struct smu_context *smu, 175 - enum smu_table_id table_index, 176 - int argument, 177 - void *table_data, 178 - bool drv2smu); 171 + int smu_cmn_update_table_read_arg(struct smu_context *smu, 172 + enum smu_table_id table_index, 173 + int argument, 174 + void *table_data, 175 + uint32_t *read_arg, 176 + bool drv2smu); 179 177 180 178 int smu_cmn_vram_cpy(struct smu_context *smu, void *dst, 181 179 const void *src, size_t len);
+2 -7
drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_virt_ras_cmd.c
··· 517 517 (struct amdgpu_virt_ras_cmd *)ras_mgr->virt_ras_cmd; 518 518 struct vram_blocks_ecc *blks_ecc = &virt_ras->blocks_ecc; 519 519 520 - if (blks_ecc->shared_mem.cpu_addr) { 521 - __set_cmd_auto_update(adev, 522 - RAS_CMD__GET_ALL_BLOCK_ECC_STATUS, 523 - blks_ecc->shared_mem.gpa, 524 - blks_ecc->shared_mem.size, false); 525 - 520 + if (blks_ecc->shared_mem.cpu_addr) 526 521 memset(blks_ecc->shared_mem.cpu_addr, 0, blks_ecc->shared_mem.size); 527 - } 522 + 528 523 memset(blks_ecc, 0, sizeof(*blks_ecc)); 529 524 530 525 return 0;
+1 -1
drivers/gpu/drm/i915/display/intel_psr.c
··· 2981 2981 return ret; 2982 2982 2983 2983 do { 2984 - bool cursor_in_su_area; 2984 + bool cursor_in_su_area = false; 2985 2985 2986 2986 /* 2987 2987 * Adjust su area to cover cursor fully as necessary
+3 -3
drivers/gpu/drm/renesas/rcar-du/rcar_du_crtc.c
··· 513 513 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 514 514 struct rcar_cmm_config cmm_config = {}; 515 515 516 - if (!rcrtc->cmm->dev) 516 + if (!rcrtc->cmm) 517 517 return; 518 518 519 519 if (drm_lut) ··· 667 667 if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) 668 668 rcar_du_vsp_disable(rcrtc); 669 669 670 - if (rcrtc->cmm->dev) 670 + if (rcrtc->cmm) 671 671 rcar_cmm_disable(rcrtc->cmm->dev); 672 672 673 673 /* ··· 726 726 struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc->state); 727 727 struct rcar_du_device *rcdu = rcrtc->dev; 728 728 729 - if (rcrtc->cmm->dev) 729 + if (rcrtc->cmm) 730 730 rcar_cmm_enable(rcrtc->cmm->dev); 731 731 rcar_du_crtc_get(rcrtc); 732 732