Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'amd-drm-fixes-7.1-2026-04-30' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-7.1-2026-04-30:

amdgpu:
- GFX12 fix for CONFIG_DRM_DEBUG_MM configs
- Fix DC analog support
- Userq fixes
- GART placement fix
- Aldebaran SMU fixes
- AMDGPU_INFO_READ_MMR_REG fix
- UVD 3.1 fix
- GC 6 TCC fix
- Fix root reservation in amdgpu_vm_handle_fault()
- RAS fix
- Module reload fix for APUs
- Fix build for CONFIG_DRM_FBDEV_EMULATION=n
- IGT DWB regression fix
- GC 11.5.4 fix
- VCN user fence fixes
- JPEG user fence fixes
- SMU 13.0.6 fix
- VCN 3/4 IB parser fixes
- NV3x+ dGPU vblank fix
- DCE6/8 fixes for LVDS/eDP panels without an EDID

amdkfd:
- Fix for when CONFIG_HSA_AMD is not set
- SVM fixes

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patch.msgid.link/20260430135619.3929877-1-alexander.deucher@amd.com

+447 -152
+5 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 2839 2839 * that checks whether the PSP is running. A solution for those issues 2840 2840 * in the APU is to trigger a GPU reset, but this should be done during 2841 2841 * the unload phase to avoid adding boot latency and screen flicker. 2842 + * GFX V11 has GC block as default off IP. Every time AMDGPU driver sends 2843 + * a request to PMFW to unload MP1, PMFW will put GC in reset and power down 2844 + * the voltage. Hence, skipping reset for APUs with GFX V11 or later. 2842 2845 */ 2843 - if ((adev->flags & AMD_IS_APU) && !adev->gmc.is_app_apu) { 2846 + if ((adev->flags & AMD_IS_APU) && !adev->gmc.is_app_apu && 2847 + amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 0, 0)) { 2844 2848 r = amdgpu_asic_reset(adev); 2845 2849 if (r) 2846 2850 dev_err(adev->dev, "asic reset on %s failed\n", __func__);
+1 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
··· 3090 3090 case IP_VERSION(11, 5, 1): 3091 3091 case IP_VERSION(11, 5, 2): 3092 3092 case IP_VERSION(11, 5, 3): 3093 - adev->family = AMDGPU_FAMILY_GC_11_5_0; 3094 - break; 3095 3093 case IP_VERSION(11, 5, 4): 3096 - adev->family = AMDGPU_FAMILY_GC_11_5_4; 3094 + adev->family = AMDGPU_FAMILY_GC_11_5_0; 3097 3095 break; 3098 3096 case IP_VERSION(12, 0, 0): 3099 3097 case IP_VERSION(12, 0, 1):
+4 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 3158 3158 amdgpu_register_atpx_handler(); 3159 3159 amdgpu_acpi_detect(); 3160 3160 3161 - /* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */ 3162 - amdgpu_amdkfd_init(); 3161 + /* Ignore KFD init failures when CONFIG_HSA_AMD is not set. */ 3162 + r = amdgpu_amdkfd_init(); 3163 + if (r && r != -ENOENT) 3164 + goto error_fence; 3163 3165 3164 3166 if (amdgpu_pp_feature_mask & PP_OVERDRIVE_MASK) { 3165 3167 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+4 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
··· 314 314 mc->gart_start = max_mc_address - mc->gart_size + 1; 315 315 break; 316 316 case AMDGPU_GART_PLACEMENT_LOW: 317 - mc->gart_start = 0; 317 + if (size_bf >= mc->gart_size) 318 + mc->gart_start = 0; 319 + else 320 + mc->gart_start = ALIGN(mc->fb_end, four_gb); 318 321 break; 319 322 case AMDGPU_GART_PLACEMENT_BEST_FIT: 320 323 default:
+24 -33
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 873 873 ? -EFAULT : 0; 874 874 } 875 875 case AMDGPU_INFO_READ_MMR_REG: { 876 - int ret = 0; 877 - unsigned int n, alloc_size; 878 - uint32_t *regs; 879 876 unsigned int se_num = (info->read_mmr_reg.instance >> 880 877 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & 881 878 AMDGPU_INFO_MMR_SE_INDEX_MASK; 882 879 unsigned int sh_num = (info->read_mmr_reg.instance >> 883 880 AMDGPU_INFO_MMR_SH_INDEX_SHIFT) & 884 881 AMDGPU_INFO_MMR_SH_INDEX_MASK; 885 - 886 - if (!down_read_trylock(&adev->reset_domain->sem)) 887 - return -ENOENT; 882 + unsigned int alloc_size; 883 + uint32_t *regs; 884 + int ret; 888 885 889 886 /* set full masks if the userspace set all bits 890 887 * in the bitfields 891 888 */ 892 - if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) { 889 + if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) 893 890 se_num = 0xffffffff; 894 - } else if (se_num >= AMDGPU_GFX_MAX_SE) { 895 - ret = -EINVAL; 896 - goto out; 897 - } 891 + else if (se_num >= AMDGPU_GFX_MAX_SE) 892 + return -EINVAL; 898 893 899 - if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) { 894 + if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) 900 895 sh_num = 0xffffffff; 901 - } else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) { 902 - ret = -EINVAL; 903 - goto out; 904 - } 896 + else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) 897 + return -EINVAL; 905 898 906 - if (info->read_mmr_reg.count > 128) { 907 - ret = -EINVAL; 908 - goto out; 909 - } 899 + if (info->read_mmr_reg.count > 128) 900 + return -EINVAL; 910 901 911 - regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL); 912 - if (!regs) { 913 - ret = -ENOMEM; 914 - goto out; 915 - } 902 + regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), 903 + GFP_KERNEL); 904 + if (!regs) 905 + return -ENOMEM; 916 906 907 + down_read(&adev->reset_domain->sem); 917 908 alloc_size = info->read_mmr_reg.count * sizeof(*regs); 918 - 919 909 amdgpu_gfx_off_ctrl(adev, false); 910 + ret = 0; 920 911 for (i = 0; i < info->read_mmr_reg.count; i++) { 921 912 if (amdgpu_asic_read_register(adev, se_num, sh_num, 922 913 info->read_mmr_reg.dword_offset + i, 923 914 &regs[i])) { 924 915 DRM_DEBUG_KMS("unallowed offset %#x\n", 925 916 info->read_mmr_reg.dword_offset + i); 926 - kfree(regs); 927 - amdgpu_gfx_off_ctrl(adev, true); 928 917 ret = -EFAULT; 929 - goto out; 918 + break; 930 919 } 931 920 } 932 921 amdgpu_gfx_off_ctrl(adev, true); 933 - n = copy_to_user(out, regs, min(size, alloc_size)); 934 - kfree(regs); 935 - ret = (n ? -EFAULT : 0); 936 - out: 937 922 up_read(&adev->reset_domain->sem); 923 + 924 + if (!ret) { 925 + ret = copy_to_user(out, regs, min(size, alloc_size)) 926 + ? -EFAULT : 0; 927 + } 928 + kfree(regs); 938 929 return ret; 939 930 } 940 931 case AMDGPU_INFO_DEV_INFO: {
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
··· 1950 1950 if (!control || amdgpu_bad_page_threshold == 0) 1951 1951 return; 1952 1952 1953 - if (control->ras_num_bad_pages >= ras->bad_page_cnt_threshold) { 1953 + if (control->ras_num_bad_pages > ras->bad_page_cnt_threshold) { 1954 1954 if (amdgpu_dpm_send_rma_reason(adev)) 1955 1955 dev_warn(adev->dev, "Unable to send out-of-band RMA CPER"); 1956 1956 else
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 75 75 unsigned int type, 76 76 uint64_t size_in_page) 77 77 { 78 + if (!size_in_page) 79 + return 0; 80 + 78 81 return ttm_range_man_init(&adev->mman.bdev, type, 79 82 false, size_in_page); 80 83 }
+15 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
··· 205 205 msecs_to_jiffies(timeout_ms)); 206 206 } 207 207 208 + void amdgpu_userq_process_fence_irq(struct amdgpu_device *adev, u32 doorbell) 209 + { 210 + struct xarray *xa = &adev->userq_doorbell_xa; 211 + struct amdgpu_usermode_queue *queue; 212 + unsigned long flags; 213 + 214 + xa_lock_irqsave(xa, flags); 215 + queue = xa_load(xa, doorbell); 216 + if (queue) 217 + amdgpu_userq_fence_driver_process(queue->fence_drv); 218 + xa_unlock_irqrestore(xa, flags); 219 + } 220 + 208 221 static void amdgpu_userq_init_hang_detect_work(struct amdgpu_usermode_queue *queue) 209 222 { 210 223 INIT_DELAYED_WORK(&queue->hang_detect_work, amdgpu_userq_hang_detect_work); ··· 656 643 #endif 657 644 amdgpu_userq_detect_and_reset_queues(uq_mgr); 658 645 r = amdgpu_userq_unmap_helper(queue); 659 - /*TODO: It requires a reset for userq hw unmap error*/ 660 - if (r) { 661 - drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n"); 662 - queue->state = AMDGPU_USERQ_STATE_HUNG; 663 - } 664 - 665 646 atomic_dec(&uq_mgr->userq_count[queue->queue_type]); 666 647 amdgpu_userq_cleanup(queue); 667 648 mutex_unlock(&uq_mgr->userq_mutex); ··· 1194 1187 bo = range->bo; 1195 1188 ret = amdgpu_ttm_tt_get_user_pages(bo, range); 1196 1189 if (ret) 1197 - goto unlock_all; 1190 + goto free_ranges; 1198 1191 } 1199 1192 1200 1193 invalidated = true; ··· 1221 1214 1222 1215 unlock_all: 1223 1216 drm_exec_fini(&exec); 1217 + free_ranges: 1224 1218 xa_for_each(&xa, tmp_key, range) { 1225 1219 if (!range) 1226 1220 continue;
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
··· 156 156 void amdgpu_userq_pre_reset(struct amdgpu_device *adev); 157 157 int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost); 158 158 void amdgpu_userq_start_hang_detect_work(struct amdgpu_usermode_queue *queue); 159 + void amdgpu_userq_process_fence_irq(struct amdgpu_device *adev, u32 doorbell); 159 160 160 161 int amdgpu_userq_input_va_validate(struct amdgpu_device *adev, 161 162 struct amdgpu_usermode_queue *queue,
+14 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 3023 3023 3024 3024 is_compute_context = vm->is_compute_context; 3025 3025 3026 - if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid, 3027 - node_id, addr >> PAGE_SHIFT, ts, write_fault)) { 3026 + if (is_compute_context) { 3027 + /* Unreserve root since svm_range_restore_pages might try to reserve it. */ 3028 + /* TODO: rework svm_range_restore_pages so that this isn't necessary. */ 3028 3029 amdgpu_bo_unreserve(root); 3030 + 3031 + if (!svm_range_restore_pages(adev, pasid, vmid, 3032 + node_id, addr >> PAGE_SHIFT, ts, write_fault)) { 3033 + amdgpu_bo_unref(&root); 3034 + return true; 3035 + } 3029 3036 amdgpu_bo_unref(&root); 3030 - return true; 3037 + 3038 + /* Re-acquire the VM lock, could be that the VM was freed in between. */ 3039 + vm = amdgpu_vm_lock_by_pasid(adev, &root, pasid); 3040 + if (!vm) 3041 + return false; 3031 3042 } 3032 3043 3033 3044 addr /= AMDGPU_GPU_PAGE_SIZE;
+1 -9
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
··· 6523 6523 DRM_DEBUG("IH: CP EOP\n"); 6524 6524 6525 6525 if (adev->enable_mes && doorbell_offset) { 6526 - struct amdgpu_usermode_queue *queue; 6527 - struct xarray *xa = &adev->userq_doorbell_xa; 6528 - unsigned long flags; 6529 - 6530 - xa_lock_irqsave(xa, flags); 6531 - queue = xa_load(xa, doorbell_offset); 6532 - if (queue) 6533 - amdgpu_userq_fence_driver_process(queue->fence_drv); 6534 - xa_unlock_irqrestore(xa, flags); 6526 + amdgpu_userq_process_fence_irq(adev, doorbell_offset); 6535 6527 } else { 6536 6528 me_id = (entry->ring_id & 0x0c) >> 2; 6537 6529 pipe_id = (entry->ring_id & 0x03) >> 0;
+1 -9
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
··· 4854 4854 DRM_DEBUG("IH: CP EOP\n"); 4855 4855 4856 4856 if (adev->enable_mes && doorbell_offset) { 4857 - struct xarray *xa = &adev->userq_doorbell_xa; 4858 - struct amdgpu_usermode_queue *queue; 4859 - unsigned long flags; 4860 - 4861 - xa_lock_irqsave(xa, flags); 4862 - queue = xa_load(xa, doorbell_offset); 4863 - if (queue) 4864 - amdgpu_userq_fence_driver_process(queue->fence_drv); 4865 - xa_unlock_irqrestore(xa, flags); 4857 + amdgpu_userq_process_fence_irq(adev, doorbell_offset); 4866 4858 } else { 4867 4859 me_id = (entry->ring_id & 0x0c) >> 2; 4868 4860 pipe_id = (entry->ring_id & 0x03) >> 0;
+1 -10
drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c
··· 3643 3643 DRM_DEBUG("IH: CP EOP\n"); 3644 3644 3645 3645 if (adev->enable_mes && doorbell_offset) { 3646 - struct xarray *xa = &adev->userq_doorbell_xa; 3647 - struct amdgpu_usermode_queue *queue; 3648 - unsigned long flags; 3649 - 3650 - xa_lock_irqsave(xa, flags); 3651 - queue = xa_load(xa, doorbell_offset); 3652 - if (queue) 3653 - amdgpu_userq_fence_driver_process(queue->fence_drv); 3654 - 3655 - xa_unlock_irqrestore(xa, flags); 3646 + amdgpu_userq_process_fence_irq(adev, doorbell_offset); 3656 3647 } else { 3657 3648 me_id = (entry->ring_id & 0x0c) >> 2; 3658 3649 pipe_id = (entry->ring_id & 0x03) >> 0;
+66
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
··· 1571 1571 mutex_unlock(&adev->grbm_idx_mutex); 1572 1572 } 1573 1573 1574 + /** 1575 + * gfx_v6_0_setup_tcc() - setup which TCCs are used 1576 + * 1577 + * @adev: amdgpu_device pointer 1578 + * 1579 + * Verify whether the current GPU has any TCCs disabled, 1580 + * which can happen when the GPU is harvested and some 1581 + * memory channels are disabled, reducing the memory bus width. 1582 + * For example, on the Radeon HD 7870 XT (Tahiti LE). 1583 + * 1584 + * If some TCCs are disabled, we need to make sure that 1585 + * the disabled TCCs are not used, and the remaining TCCs 1586 + * are used optimally. 1587 + * 1588 + * TCP_CHAN_STEER_LO/HI control which TCC is used by TCP channels. 1589 + * TCP_ADDR_CONFIG.NUM_TCC_BANKS controls how many channels are used. 1590 + * 1591 + * For optimal performance: 1592 + * - Rely on the CHAN_STEER from the golden registers table, 1593 + * only skip disabled TCCs but keep the mapping order. 1594 + * - Limit NUM_TCC_BANKS to number of active TCCs to avoid thrashing, 1595 + * which performs better than using the same TCC twice. 1596 + */ 1597 + static void gfx_v6_0_setup_tcc(struct amdgpu_device *adev) 1598 + { 1599 + u32 i, tcc, tcp_addr_config, num_active_tcc = 0; 1600 + u64 chan_steer, patched_chan_steer = 0; 1601 + const u32 num_max_tcc = adev->gfx.config.max_texture_channel_caches; 1602 + const u32 dis_tcc_mask = 1603 + amdgpu_gfx_create_bitmask(num_max_tcc) & 1604 + (REG_GET_FIELD(RREG32(mmCGTS_TCC_DISABLE), 1605 + CGTS_TCC_DISABLE, TCC_DISABLE) | 1606 + REG_GET_FIELD(RREG32(mmCGTS_USER_TCC_DISABLE), 1607 + CGTS_USER_TCC_DISABLE, TCC_DISABLE)); 1608 + 1609 + /* When no TCC is disabled, the golden registers table already has optimal TCC setup */ 1610 + if (!dis_tcc_mask) 1611 + return; 1612 + 1613 + /* Each 4-bit nibble contains the index of a TCC used by all TCPs */ 1614 + chan_steer = RREG32(mmTCP_CHAN_STEER_LO) | ((u64)RREG32(mmTCP_CHAN_STEER_HI) << 32ull); 1615 + 1616 + /* Patch the TCP to TCC mapping to skip disabled TCCs */ 1617 + for (i = 0; i < num_max_tcc; ++i) { 1618 + tcc = (chan_steer >> (u64)(4 * i)) & 0xf; 1619 + 1620 + if (!((1 << tcc) & dis_tcc_mask)) { 1621 + /* Copy enabled TCC indices to the patched register value. */ 1622 + patched_chan_steer |= (u64)tcc << (u64)(4 * num_active_tcc); 1623 + ++num_active_tcc; 1624 + } 1625 + } 1626 + 1627 + WARN_ON(num_active_tcc != num_max_tcc - hweight32(dis_tcc_mask)); 1628 + 1629 + /* Patch number of TCCs used by TCPs */ 1630 + tcp_addr_config = REG_SET_FIELD(RREG32(mmTCP_ADDR_CONFIG), 1631 + TCP_ADDR_CONFIG, NUM_TCC_BANKS, 1632 + num_active_tcc - 1); 1633 + 1634 + WREG32(mmTCP_ADDR_CONFIG, tcp_addr_config); 1635 + WREG32(mmTCP_CHAN_STEER_HI, upper_32_bits(patched_chan_steer)); 1636 + WREG32(mmTCP_CHAN_STEER_LO, lower_32_bits(patched_chan_steer)); 1637 + } 1638 + 1574 1639 static void gfx_v6_0_config_init(struct amdgpu_device *adev) 1575 1640 { 1576 1641 adev->gfx.config.double_offchip_lds_buf = 0; ··· 1794 1729 gfx_v6_0_tiling_mode_table_init(adev); 1795 1730 1796 1731 gfx_v6_0_setup_rb(adev); 1732 + gfx_v6_0_setup_tcc(adev); 1797 1733 1798 1734 gfx_v6_0_setup_spi(adev); 1799 1735
+1
drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
··· 802 802 static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = { 803 803 .type = AMDGPU_RING_TYPE_VCN_JPEG, 804 804 .align_mask = 0xf, 805 + .no_user_fence = true, 805 806 .get_rptr = jpeg_v2_0_dec_ring_get_rptr, 806 807 .get_wptr = jpeg_v2_0_dec_ring_get_wptr, 807 808 .set_wptr = jpeg_v2_0_dec_ring_set_wptr,
+2
drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
··· 693 693 static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = { 694 694 .type = AMDGPU_RING_TYPE_VCN_JPEG, 695 695 .align_mask = 0xf, 696 + .no_user_fence = true, 696 697 .get_rptr = jpeg_v2_5_dec_ring_get_rptr, 697 698 .get_wptr = jpeg_v2_5_dec_ring_get_wptr, 698 699 .set_wptr = jpeg_v2_5_dec_ring_set_wptr, ··· 725 724 static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = { 726 725 .type = AMDGPU_RING_TYPE_VCN_JPEG, 727 726 .align_mask = 0xf, 727 + .no_user_fence = true, 728 728 .get_rptr = jpeg_v2_5_dec_ring_get_rptr, 729 729 .get_wptr = jpeg_v2_5_dec_ring_get_wptr, 730 730 .set_wptr = jpeg_v2_5_dec_ring_set_wptr,
+1
drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
··· 594 594 static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = { 595 595 .type = AMDGPU_RING_TYPE_VCN_JPEG, 596 596 .align_mask = 0xf, 597 + .no_user_fence = true, 597 598 .get_rptr = jpeg_v3_0_dec_ring_get_rptr, 598 599 .get_wptr = jpeg_v3_0_dec_ring_get_wptr, 599 600 .set_wptr = jpeg_v3_0_dec_ring_set_wptr,
+1
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
··· 759 759 static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = { 760 760 .type = AMDGPU_RING_TYPE_VCN_JPEG, 761 761 .align_mask = 0xf, 762 + .no_user_fence = true, 762 763 .get_rptr = jpeg_v4_0_dec_ring_get_rptr, 763 764 .get_wptr = jpeg_v4_0_dec_ring_get_wptr, 764 765 .set_wptr = jpeg_v4_0_dec_ring_set_wptr,
+1
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
··· 1219 1219 static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = { 1220 1220 .type = AMDGPU_RING_TYPE_VCN_JPEG, 1221 1221 .align_mask = 0xf, 1222 + .no_user_fence = true, 1222 1223 .get_rptr = jpeg_v4_0_3_dec_ring_get_rptr, 1223 1224 .get_wptr = jpeg_v4_0_3_dec_ring_get_wptr, 1224 1225 .set_wptr = jpeg_v4_0_3_dec_ring_set_wptr,
+1
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
··· 804 804 static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = { 805 805 .type = AMDGPU_RING_TYPE_VCN_JPEG, 806 806 .align_mask = 0xf, 807 + .no_user_fence = true, 807 808 .get_rptr = jpeg_v4_0_5_dec_ring_get_rptr, 808 809 .get_wptr = jpeg_v4_0_5_dec_ring_get_wptr, 809 810 .set_wptr = jpeg_v4_0_5_dec_ring_set_wptr,
+1
drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
··· 680 680 static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = { 681 681 .type = AMDGPU_RING_TYPE_VCN_JPEG, 682 682 .align_mask = 0xf, 683 + .no_user_fence = true, 683 684 .get_rptr = jpeg_v5_0_0_dec_ring_get_rptr, 684 685 .get_wptr = jpeg_v5_0_0_dec_ring_get_wptr, 685 686 .set_wptr = jpeg_v5_0_0_dec_ring_set_wptr,
+1
drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
··· 884 884 static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = { 885 885 .type = AMDGPU_RING_TYPE_VCN_JPEG, 886 886 .align_mask = 0xf, 887 + .no_user_fence = true, 887 888 .get_rptr = jpeg_v5_0_1_dec_ring_get_rptr, 888 889 .get_wptr = jpeg_v5_0_1_dec_ring_get_wptr, 889 890 .set_wptr = jpeg_v5_0_1_dec_ring_set_wptr,
+1
drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_2.c
··· 703 703 static const struct amdgpu_ring_funcs jpeg_v5_0_2_dec_ring_vm_funcs = { 704 704 .type = AMDGPU_RING_TYPE_VCN_JPEG, 705 705 .align_mask = 0xf, 706 + .no_user_fence = true, 706 707 .get_rptr = jpeg_v5_0_2_dec_ring_get_rptr, 707 708 .get_wptr = jpeg_v5_0_2_dec_ring_get_wptr, 708 709 .set_wptr = jpeg_v5_0_2_dec_ring_set_wptr,
+1
drivers/gpu/drm/amd/amdgpu/jpeg_v5_3_0.c
··· 661 661 static const struct amdgpu_ring_funcs jpeg_v5_3_0_dec_ring_vm_funcs = { 662 662 .type = AMDGPU_RING_TYPE_VCN_JPEG, 663 663 .align_mask = 0xf, 664 + .no_user_fence = true, 664 665 .get_rptr = jpeg_v5_3_0_dec_ring_get_rptr, 665 666 .get_wptr = jpeg_v5_3_0_dec_ring_get_wptr, 666 667 .set_wptr = jpeg_v5_3_0_dec_ring_set_wptr,
+1 -10
drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
··· 1662 1662 u32 doorbell_offset = entry->src_data[0]; 1663 1663 1664 1664 if (adev->enable_mes && doorbell_offset) { 1665 - struct amdgpu_usermode_queue *queue; 1666 - struct xarray *xa = &adev->userq_doorbell_xa; 1667 - unsigned long flags; 1668 - 1669 1665 doorbell_offset >>= SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT; 1670 - 1671 - xa_lock_irqsave(xa, flags); 1672 - queue = xa_load(xa, doorbell_offset); 1673 - if (queue) 1674 - amdgpu_userq_fence_driver_process(queue->fence_drv); 1675 - xa_unlock_irqrestore(xa, flags); 1666 + amdgpu_userq_process_fence_irq(adev, doorbell_offset); 1676 1667 } 1677 1668 1678 1669 return 0;
+1 -10
drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
··· 1594 1594 u32 doorbell_offset = entry->src_data[0]; 1595 1595 1596 1596 if (adev->enable_mes && doorbell_offset) { 1597 - struct xarray *xa = &adev->userq_doorbell_xa; 1598 - struct amdgpu_usermode_queue *queue; 1599 - unsigned long flags; 1600 - 1601 1597 doorbell_offset >>= SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT; 1602 - 1603 - xa_lock_irqsave(xa, flags); 1604 - queue = xa_load(xa, doorbell_offset); 1605 - if (queue) 1606 - amdgpu_userq_fence_driver_process(queue->fence_drv); 1607 - xa_unlock_irqrestore(xa, flags); 1598 + amdgpu_userq_process_fence_irq(adev, doorbell_offset); 1608 1599 } 1609 1600 1610 1601 return 0;
+10
drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
··· 242 242 uint64_t addr; 243 243 uint32_t size; 244 244 245 + /* When the keyselect is already set, don't perturb it. */ 246 + if (RREG32(mmUVD_FW_START)) 247 + return; 248 + 245 249 /* program the VCPU memory controller bits 0-27 */ 246 250 addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; 247 251 size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3; ··· 287 283 { 288 284 int i; 289 285 uint32_t keysel = adev->uvd.keyselect; 286 + 287 + if (RREG32(mmUVD_FW_START) & UVD_FW_STATUS__PASS_MASK) { 288 + dev_dbg(adev->dev, "UVD keyselect already set: 0x%x (on CPU: 0x%x)\n", 289 + RREG32(mmUVD_FW_START), adev->uvd.keyselect); 290 + return 0; 291 + } 290 292 291 293 WREG32(mmUVD_FW_START, keysel); 292 294
+2
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
··· 2113 2113 static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = { 2114 2114 .type = AMDGPU_RING_TYPE_VCN_DEC, 2115 2115 .align_mask = 0xf, 2116 + .no_user_fence = true, 2116 2117 .secure_submission_supported = true, 2117 2118 .get_rptr = vcn_v2_0_dec_ring_get_rptr, 2118 2119 .get_wptr = vcn_v2_0_dec_ring_get_wptr, ··· 2146 2145 .type = AMDGPU_RING_TYPE_VCN_ENC, 2147 2146 .align_mask = 0x3f, 2148 2147 .nop = VCN_ENC_CMD_NO_OP, 2148 + .no_user_fence = true, 2149 2149 .get_rptr = vcn_v2_0_enc_ring_get_rptr, 2150 2150 .get_wptr = vcn_v2_0_enc_ring_get_wptr, 2151 2151 .set_wptr = vcn_v2_0_enc_ring_set_wptr,
+2
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
··· 1778 1778 static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = { 1779 1779 .type = AMDGPU_RING_TYPE_VCN_DEC, 1780 1780 .align_mask = 0xf, 1781 + .no_user_fence = true, 1781 1782 .secure_submission_supported = true, 1782 1783 .get_rptr = vcn_v2_5_dec_ring_get_rptr, 1783 1784 .get_wptr = vcn_v2_5_dec_ring_get_wptr, ··· 1880 1879 .type = AMDGPU_RING_TYPE_VCN_ENC, 1881 1880 .align_mask = 0x3f, 1882 1881 .nop = VCN_ENC_CMD_NO_OP, 1882 + .no_user_fence = true, 1883 1883 .get_rptr = vcn_v2_5_enc_ring_get_rptr, 1884 1884 .get_wptr = vcn_v2_5_enc_ring_get_wptr, 1885 1885 .set_wptr = vcn_v2_5_enc_ring_set_wptr,
+6 -1
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
··· 1856 1856 .type = AMDGPU_RING_TYPE_VCN_DEC, 1857 1857 .align_mask = 0x3f, 1858 1858 .nop = VCN_DEC_SW_CMD_NO_OP, 1859 + .no_user_fence = true, 1859 1860 .secure_submission_supported = true, 1860 1861 .get_rptr = vcn_v3_0_dec_ring_get_rptr, 1861 1862 .get_wptr = vcn_v3_0_dec_ring_get_wptr, ··· 1973 1972 1974 1973 for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) { 1975 1974 uint32_t offset, size, *create; 1975 + uint64_t buf_end; 1976 1976 1977 1977 if (msg[0] != RDECODE_MESSAGE_CREATE) 1978 1978 continue; ··· 1981 1979 offset = msg[1]; 1982 1980 size = msg[2]; 1983 1981 1984 - if (size < 4 || offset + size > end - addr) { 1982 + if (size < 4 || check_add_overflow(offset, size, &buf_end) || 1983 + buf_end > end - addr) { 1985 1984 DRM_ERROR("VCN message buffer exceeds BO bounds!\n"); 1986 1985 r = -EINVAL; 1987 1986 goto out; ··· 2039 2036 static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = { 2040 2037 .type = AMDGPU_RING_TYPE_VCN_DEC, 2041 2038 .align_mask = 0xf, 2039 + .no_user_fence = true, 2042 2040 .secure_submission_supported = true, 2043 2041 .get_rptr = vcn_v3_0_dec_ring_get_rptr, 2044 2042 .get_wptr = vcn_v3_0_dec_ring_get_wptr, ··· 2142 2138 .type = AMDGPU_RING_TYPE_VCN_ENC, 2143 2139 .align_mask = 0x3f, 2144 2140 .nop = VCN_ENC_CMD_NO_OP, 2141 + .no_user_fence = true, 2145 2142 .get_rptr = vcn_v3_0_enc_ring_get_rptr, 2146 2143 .get_wptr = vcn_v3_0_enc_ring_get_wptr, 2147 2144 .set_wptr = vcn_v3_0_enc_ring_set_wptr,
+4 -1
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
··· 1889 1889 1890 1890 for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) { 1891 1891 uint32_t offset, size, *create; 1892 + uint64_t buf_end; 1892 1893 1893 1894 if (msg[0] != RDECODE_MESSAGE_CREATE) 1894 1895 continue; ··· 1897 1896 offset = msg[1]; 1898 1897 size = msg[2]; 1899 1898 1900 - if (size < 4 || offset + size > end - addr) { 1899 + if (size < 4 || check_add_overflow(offset, size, &buf_end) || 1900 + buf_end > end - addr) { 1901 1901 DRM_ERROR("VCN message buffer exceeds BO bounds!\n"); 1902 1902 r = -EINVAL; 1903 1903 goto out; ··· 1996 1994 .type = AMDGPU_RING_TYPE_VCN_ENC, 1997 1995 .align_mask = 0x3f, 1998 1996 .nop = VCN_ENC_CMD_NO_OP, 1997 + .no_user_fence = true, 1999 1998 .extra_bytes = sizeof(struct amdgpu_vcn_rb_metadata), 2000 1999 .get_rptr = vcn_v4_0_unified_ring_get_rptr, 2001 2000 .get_wptr = vcn_v4_0_unified_ring_get_wptr,
+1
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
··· 1775 1775 .type = AMDGPU_RING_TYPE_VCN_ENC, 1776 1776 .align_mask = 0x3f, 1777 1777 .nop = VCN_ENC_CMD_NO_OP, 1778 + .no_user_fence = true, 1778 1779 .get_rptr = vcn_v4_0_3_unified_ring_get_rptr, 1779 1780 .get_wptr = vcn_v4_0_3_unified_ring_get_wptr, 1780 1781 .set_wptr = vcn_v4_0_3_unified_ring_set_wptr,
+1
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
··· 1483 1483 .type = AMDGPU_RING_TYPE_VCN_ENC, 1484 1484 .align_mask = 0x3f, 1485 1485 .nop = VCN_ENC_CMD_NO_OP, 1486 + .no_user_fence = true, 1486 1487 .get_rptr = vcn_v4_0_5_unified_ring_get_rptr, 1487 1488 .get_wptr = vcn_v4_0_5_unified_ring_get_wptr, 1488 1489 .set_wptr = vcn_v4_0_5_unified_ring_set_wptr,
+1
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
··· 1207 1207 .type = AMDGPU_RING_TYPE_VCN_ENC, 1208 1208 .align_mask = 0x3f, 1209 1209 .nop = VCN_ENC_CMD_NO_OP, 1210 + .no_user_fence = true, 1210 1211 .get_rptr = vcn_v5_0_0_unified_ring_get_rptr, 1211 1212 .get_wptr = vcn_v5_0_0_unified_ring_get_wptr, 1212 1213 .set_wptr = vcn_v5_0_0_unified_ring_set_wptr,
+1
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
··· 1419 1419 .type = AMDGPU_RING_TYPE_VCN_ENC, 1420 1420 .align_mask = 0x3f, 1421 1421 .nop = VCN_ENC_CMD_NO_OP, 1422 + .no_user_fence = true, 1422 1423 .get_rptr = vcn_v5_0_1_unified_ring_get_rptr, 1423 1424 .get_wptr = vcn_v5_0_1_unified_ring_get_wptr, 1424 1425 .set_wptr = vcn_v5_0_1_unified_ring_set_wptr,
+1
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_2.c
··· 994 994 .type = AMDGPU_RING_TYPE_VCN_ENC, 995 995 .align_mask = 0x3f, 996 996 .nop = VCN_ENC_CMD_NO_OP, 997 + .no_user_fence = true, 997 998 .get_rptr = vcn_v5_0_2_unified_ring_get_rptr, 998 999 .get_wptr = vcn_v5_0_2_unified_ring_get_wptr, 999 1000 .set_wptr = vcn_v5_0_2_unified_ring_set_wptr,
+24 -2
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
··· 25 25 #include <linux/err.h> 26 26 #include <linux/fs.h> 27 27 #include <linux/file.h> 28 + #include <linux/overflow.h> 28 29 #include <linux/sched.h> 29 30 #include <linux/slab.h> 30 31 #include <linux/uaccess.h> ··· 1696 1695 return kfd_smi_event_open(pdd->dev, &args->anon_fd); 1697 1696 } 1698 1697 1698 + static int kfd_ioctl_svm_validate(void *kdata, unsigned int usize) 1699 + { 1700 + struct kfd_ioctl_svm_args *args = kdata; 1701 + size_t expected = struct_size(args, attrs, args->nattr); 1702 + 1703 + if (expected == SIZE_MAX || usize < expected) 1704 + return -EINVAL; 1705 + return 0; 1706 + } 1707 + 1699 1708 #if IS_ENABLED(CONFIG_HSA_AMD_SVM) 1700 1709 1701 1710 static int kfd_ioctl_set_xnack_mode(struct file *filep, ··· 3220 3209 3221 3210 #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \ 3222 3211 [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \ 3223 - .cmd_drv = 0, .name = #ioctl} 3212 + .validate = NULL, .cmd_drv = 0, .name = #ioctl} 3213 + 3214 + #define AMDKFD_IOCTL_DEF_V(ioctl, _func, _validate, _flags) \ 3215 + [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \ 3216 + .validate = _validate, .cmd_drv = 0, .name = #ioctl} 3224 3217 3225 3218 /** Ioctl table */ 3226 3219 static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { ··· 3321 3306 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SMI_EVENTS, 3322 3307 kfd_ioctl_smi_events, 0), 3323 3308 3324 - AMDKFD_IOCTL_DEF(AMDKFD_IOC_SVM, kfd_ioctl_svm, 0), 3309 + AMDKFD_IOCTL_DEF_V(AMDKFD_IOC_SVM, kfd_ioctl_svm, 3310 + kfd_ioctl_svm_validate, 0), 3325 3311 3326 3312 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_XNACK_MODE, 3327 3313 kfd_ioctl_set_xnack_mode, 0), ··· 3445 3429 } 3446 3430 } else if (cmd & IOC_OUT) { 3447 3431 memset(kdata, 0, usize); 3432 + } 3433 + 3434 + if (ioctl->validate) { 3435 + retcode = ioctl->validate(kdata, usize); 3436 + if (retcode) 3437 + goto err_i1; 3448 3438 } 3449 3439 3450 3440 retcode = func(filep, process, kdata);
+3
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
··· 1047 1047 typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p, 1048 1048 void *data); 1049 1049 1050 + typedef int amdkfd_ioctl_validate_t(void *kdata, unsigned int usize); 1051 + 1050 1052 struct amdkfd_ioctl_desc { 1051 1053 unsigned int cmd; 1052 1054 int flags; 1053 1055 amdkfd_ioctl_t *func; 1056 + amdkfd_ioctl_validate_t *validate; 1054 1057 unsigned int cmd_drv; 1055 1058 const char *name; 1056 1059 };
+11
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
··· 1366 1366 1367 1367 pr_debug("CPU[0x%llx 0x%llx] -> GPU[0x%llx 0x%llx]\n", start, last, 1368 1368 gpu_start, gpu_end); 1369 + 1370 + if (!amdgpu_vm_ready(vm)) { 1371 + pr_debug("VM not ready, canceling unmap\n"); 1372 + return -EINVAL; 1373 + } 1374 + 1369 1375 return amdgpu_vm_update_range(adev, vm, false, true, true, false, NULL, gpu_start, 1370 1376 gpu_end, init_pte_value, 0, 0, NULL, NULL, 1371 1377 fence); ··· 1448 1442 1449 1443 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms, 1450 1444 last_start, last_start + npages - 1, readonly); 1445 + 1446 + if (!amdgpu_vm_ready(vm)) { 1447 + pr_debug("VM not ready, canceling map\n"); 1448 + return -EINVAL; 1449 + } 1451 1450 1452 1451 for (i = offset; i < offset + npages; i++) { 1453 1452 uint64_t gpu_start;
+20 -4
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 1903 1903 goto error; 1904 1904 } 1905 1905 1906 - init_data.asic_id.chip_family = adev->family; 1906 + /* special handling for early revisions of GC 11.5.4 */ 1907 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 4)) 1908 + init_data.asic_id.chip_family = AMDGPU_FAMILY_GC_11_5_4; 1909 + else 1910 + init_data.asic_id.chip_family = adev->family; 1907 1911 1908 1912 init_data.asic_id.pci_revision_id = adev->pdev->revision; 1909 1913 init_data.asic_id.hw_internal_rev = adev->external_rev_id; ··· 9408 9404 if (acrtc_state) { 9409 9405 timing = &acrtc_state->stream->timing; 9410 9406 9411 - if (amdgpu_ip_version(adev, DCE_HWIP, 0) < 9412 - IP_VERSION(3, 5, 0) || 9413 - !(adev->flags & AMD_IS_APU)) { 9407 + if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= 9408 + IP_VERSION(3, 2, 0) && 9409 + !(adev->flags & AMD_IS_APU)) { 9410 + /* 9411 + * DGPUs NV3x and newer that support idle optimizations 9412 + * experience intermittent flip-done timeouts on cursor 9413 + * updates. Restore 5s offdelay behavior for now. 9414 + * 9415 + * Discussion on the issue: 9416 + * https://lore.kernel.org/amd-gfx/20260217191632.1243826-1-sysdadmin@m1k.cloud/ 9417 + */ 9418 + config.offdelay_ms = 5000; 9419 + config.disable_immediate = false; 9420 + } else if (amdgpu_ip_version(adev, DCE_HWIP, 0) < 9421 + IP_VERSION(3, 5, 0)) { 9414 9422 /* 9415 9423 * Older HW and DGPU have issues with instant off; 9416 9424 * use a 2 frame offdelay.
+44
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
··· 1032 1032 return drm_edid_read_custom(connector, dm_helpers_probe_acpi_edid, connector); 1033 1033 } 1034 1034 1035 + static const struct drm_edid * 1036 + dm_helpers_read_vbios_hardcoded_edid(struct dc_link *link, struct amdgpu_dm_connector *aconnector) 1037 + { 1038 + struct dc_bios *bios = link->ctx->dc_bios; 1039 + struct embedded_panel_info info; 1040 + const struct drm_edid *edid; 1041 + enum bp_result r; 1042 + 1043 + if (!dc_is_embedded_signal(link->connector_signal) || 1044 + !bios->funcs->get_embedded_panel_info) 1045 + return NULL; 1046 + 1047 + memset(&info, 0, sizeof(info)); 1048 + r = bios->funcs->get_embedded_panel_info(bios, &info); 1049 + 1050 + if (r != BP_RESULT_OK) { 1051 + dm_error("Error when reading embedded panel info: %u\n", r); 1052 + return NULL; 1053 + } 1054 + 1055 + if (!info.fake_edid || !info.fake_edid_size) { 1056 + dm_error("Embedded panel info doesn't contain an EDID\n"); 1057 + return NULL; 1058 + } 1059 + 1060 + edid = drm_edid_alloc(info.fake_edid, info.fake_edid_size); 1061 + 1062 + if (!drm_edid_valid(edid)) { 1063 + dm_error("EDID from embedded panel info is invalid\n"); 1064 + drm_edid_free(edid); 1065 + return NULL; 1066 + } 1067 + 1068 + aconnector->base.display_info.width_mm = info.panel_width_mm; 1069 + aconnector->base.display_info.height_mm = info.panel_height_mm; 1070 + 1071 + return edid; 1072 + } 1073 + 1035 1074 void populate_hdmi_info_from_connector(struct drm_hdmi_info *hdmi, struct dc_edid_caps *edid_caps) 1036 1075 { 1037 1076 edid_caps->scdc_present = hdmi->scdc.supported; ··· 1091 1052 1092 1053 if (link->aux_mode) 1093 1054 ddc = &aconnector->dm_dp_aux.aux.ddc; 1055 + else if (link->ddc_hw_inst == GPIO_DDC_LINE_UNKNOWN && 1056 + dc_is_embedded_signal(link->connector_signal)) 1057 + ddc = NULL; 1094 1058 else 1095 1059 ddc = &aconnector->i2c->base; 1096 1060 ··· 1107 1065 drm_edid = dm_helpers_read_acpi_edid(aconnector); 1108 1066 if (drm_edid) 1109 1067 drm_info(connector->dev, "Using ACPI provided EDID for %s\n", connector->name); 1068 + else if (!ddc) 1069 + drm_edid = dm_helpers_read_vbios_hardcoded_edid(link, aconnector); 1110 1070 else 1111 1071 drm_edid = drm_edid_read_ddc(connector, ddc); 1112 1072 drm_edid_connector_update(connector, drm_edid);
+72 -1
drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
··· 794 794 795 795 static enum bp_result bios_parser_dac_load_detection( 796 796 struct dc_bios *dcb, 797 - enum engine_id engine_id) 797 + enum engine_id engine_id, 798 + struct graphics_object_id ext_enc_id) 798 799 { 799 800 struct bios_parser *bp = BP_FROM_DCB(dcb); 800 801 struct dc_context *ctx = dcb->ctx; 801 802 struct bp_load_detection_parameters bp_params = {0}; 803 + struct bp_external_encoder_control ext_cntl = {0}; 802 804 enum bp_result bp_result = BP_RESULT_UNSUPPORTED; 803 805 uint32_t bios_0_scratch; 804 806 uint32_t device_id_mask = 0; ··· 826 824 827 825 bp_params.engine_id = engine_id; 828 826 bp_result = bp->cmd_tbl.dac_load_detection(bp, &bp_params); 827 + } else if (ext_enc_id.id) { 828 + if (!bp->cmd_tbl.external_encoder_control) 829 + return BP_RESULT_UNSUPPORTED; 830 + 831 + ext_cntl.action = EXTERNAL_ENCODER_CONTROL_DAC_LOAD_DETECT; 832 + ext_cntl.encoder_id = ext_enc_id; 833 + bp_result = bp->cmd_tbl.external_encoder_control(bp, &ext_cntl); 829 834 } 830 835 831 836 if (bp_result != BP_RESULT_OK) ··· 1313 1304 return BP_RESULT_FAILURE; 1314 1305 } 1315 1306 1307 + static enum bp_result get_embedded_panel_extra_info( 1308 + struct bios_parser *bp, 1309 + struct embedded_panel_info *info, 1310 + const uint32_t table_offset) 1311 + { 1312 + uint8_t *record = bios_get_image(&bp->base, table_offset, 1); 1313 + ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; 1314 + ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; 1315 + 1316 + while (*record != ATOM_RECORD_END_TYPE) { 1317 + switch (*record) { 1318 + case LCD_MODE_PATCH_RECORD_MODE_TYPE: 1319 + record += sizeof(ATOM_PATCH_RECORD_MODE); 1320 + break; 1321 + case LCD_RTS_RECORD_TYPE: 1322 + record += sizeof(ATOM_LCD_RTS_RECORD); 1323 + break; 1324 + case LCD_CAP_RECORD_TYPE: 1325 + record += sizeof(ATOM_LCD_MODE_CONTROL_CAP); 1326 + break; 1327 + case LCD_FAKE_EDID_PATCH_RECORD_TYPE: 1328 + fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record; 1329 + if (fake_edid_record->ucFakeEDIDLength) { 1330 + if (fake_edid_record->ucFakeEDIDLength == 128) 1331 + info->fake_edid_size = 1332 + fake_edid_record->ucFakeEDIDLength; 1333 + else 1334 + info->fake_edid_size = 1335 + fake_edid_record->ucFakeEDIDLength * 128; 1336 + 1337 + info->fake_edid = fake_edid_record->ucFakeEDIDString; 1338 + 1339 + record += struct_size(fake_edid_record, 1340 + ucFakeEDIDString, 1341 + info->fake_edid_size); 1342 + } else { 1343 + /* empty fake edid record must be 3 bytes long */ 1344 + record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1; 1345 + } 1346 + break; 1347 + case LCD_PANEL_RESOLUTION_RECORD_TYPE: 1348 + panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; 1349 + info->panel_width_mm = panel_res_record->usHSize; 1350 + info->panel_height_mm = panel_res_record->usVSize; 1351 + record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD); 1352 + break; 1353 + default: 1354 + return BP_RESULT_BADBIOSTABLE; 1355 + } 1356 + } 1357 + 1358 + return BP_RESULT_OK; 1359 + } 1360 + 1316 1361 static enum bp_result get_embedded_panel_info_v1_2( 1317 1362 struct bios_parser *bp, 1318 1363 struct embedded_panel_info *info) ··· 1482 1419 1483 1420 if (ATOM_PANEL_MISC_API_ENABLED & lvds->ucLVDS_Misc) 1484 1421 info->lcd_timing.misc_info.API_ENABLED = true; 1422 + 1423 + if (lvds->usExtInfoTableOffset) 1424 + return get_embedded_panel_extra_info(bp, info, 1425 + le16_to_cpu(lvds->usExtInfoTableOffset) + DATA_TABLES(LCD_Info)); 1485 1426 1486 1427 return BP_RESULT_OK; 1487 1428 } ··· 1611 1544 info->lcd_timing.misc_info.GREY_LEVEL = 1612 1545 (uint32_t) (ATOM_PANEL_MISC_V13_GREY_LEVEL & 1613 1546 lvds->ucLCD_Misc) >> ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT; 1547 + 1548 + if (lvds->usExtInfoTableOffset) 1549 + return get_embedded_panel_extra_info(bp, info, 1550 + le16_to_cpu(lvds->usExtInfoTableOffset) + DATA_TABLES(LCD_Info)); 1614 1551 1615 1552 return BP_RESULT_OK; 1616 1553 }
+1 -1
drivers/gpu/drm/amd/display/dc/dc.h
··· 1682 1682 struct dc_link_training_overrides preferred_training_settings; 1683 1683 struct dp_audio_test_data audio_test_data; 1684 1684 1685 - uint8_t ddc_hw_inst; 1685 + enum gpio_ddc_line ddc_hw_inst; 1686 1686 1687 1687 uint8_t hpd_src; 1688 1688
+2 -1
drivers/gpu/drm/amd/display/dc/dc_bios_types.h
··· 102 102 struct bp_external_encoder_control *cntl); 103 103 enum bp_result (*dac_load_detection)( 104 104 struct dc_bios *bios, 105 - enum engine_id engine_id); 105 + enum engine_id engine_id, 106 + struct graphics_object_id ext_enc_id); 106 107 enum bp_result (*transmitter_control)( 107 108 struct dc_bios *bios, 108 109 struct bp_transmitter_control *cntl);
+2 -2
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c
··· 40 40 #define FN(reg_name, field_name) \ 41 41 mcif_wb30->mcif_wb_shift->field_name, mcif_wb30->mcif_wb_mask->field_name 42 42 43 - #define MCIF_ADDR(addr) (((unsigned long long)addr & 0xffffffffff) + 0xFE) >> 8 44 - #define MCIF_ADDR_HIGH(addr) (unsigned long long)addr >> 40 43 + #define MCIF_ADDR(addr) ((uint32_t)((((unsigned long long)(addr) & 0xffffffffffULL) + 0xFEULL) >> 8)) 44 + #define MCIF_ADDR_HIGH(addr) ((uint32_t)(((unsigned long long)(addr)) >> 40)) 45 45 46 46 /* wbif programming guide: 47 47 * 1. set up wbif parameter:
+3
drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
··· 646 646 enum gpio_ddc_line dal_ddc_get_line( 647 647 const struct ddc *ddc) 648 648 { 649 + if (!ddc) 650 + return GPIO_DDC_LINE_UNKNOWN; 651 + 649 652 return (enum gpio_ddc_line)dal_gpio_get_enum(ddc->pin_data); 650 653 } 651 654
+59 -35
drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
··· 665 665 } 666 666 667 667 static void 668 - dce110_dac_encoder_control(struct pipe_ctx *pipe_ctx, bool enable) 668 + dce110_external_encoder_control(enum bp_external_encoder_control_action action, 669 + struct dc_link *link, 670 + struct dc_crtc_timing *timing) 669 671 { 670 - struct dc_link *link = pipe_ctx->stream->link; 672 + struct dc *dc = link->ctx->dc; 671 673 struct dc_bios *bios = link->ctx->dc_bios; 672 - struct bp_encoder_control encoder_control = {0}; 674 + const struct dc_link_settings *link_settings = &link->cur_link_settings; 675 + enum bp_result bp_result = BP_RESULT_OK; 676 + struct bp_external_encoder_control ext_cntl = { 677 + .action = action, 678 + .connector_obj_id = link->link_enc->connector, 679 + .encoder_id = link->ext_enc_id, 680 + .lanes_number = link_settings->lane_count, 681 + .link_rate = link_settings->link_rate, 673 682 674 - encoder_control.action = enable ? ENCODER_CONTROL_ENABLE : ENCODER_CONTROL_DISABLE; 675 - encoder_control.engine_id = link->link_enc->analog_engine; 676 - encoder_control.pixel_clock = pipe_ctx->stream->timing.pix_clk_100hz / 10; 677 - bios->funcs->encoder_control(bios, &encoder_control); 683 + /* Use signal type of the real link encoder, ie. DP */ 684 + .signal = link->connector_signal, 685 + 686 + /* We don't know the timing yet when executing the SETUP action, 687 + * so use a reasonably high default value. It seems that ENABLE 688 + * can change the actual pixel clock but doesn't work with higher 689 + * pixel clocks than what SETUP was called with. 690 + */ 691 + .pixel_clock = timing ? timing->pix_clk_100hz / 10 : 300000, 692 + .color_depth = timing ? timing->display_color_depth : COLOR_DEPTH_888, 693 + }; 694 + DC_LOGGER_INIT(dc->ctx); 695 + 696 + bp_result = bios->funcs->external_encoder_control(bios, &ext_cntl); 697 + 698 + if (bp_result != BP_RESULT_OK) 699 + DC_LOG_ERROR("Failed to execute external encoder action: 0x%x\n", action); 700 + } 701 + 702 + static void 703 + dce110_prepare_ddc(struct dc_link *link) 704 + { 705 + if (link->ext_enc_id.id) 706 + dce110_external_encoder_control(EXTERNAL_ENCODER_CONTROL_DDC_SETUP, link, NULL); 678 707 } 679 708 680 709 static bool ··· 713 684 struct link_encoder *link_enc = link->link_enc; 714 685 enum bp_result bp_result; 715 686 716 - bp_result = bios->funcs->dac_load_detection(bios, link_enc->analog_engine); 687 + bp_result = bios->funcs->dac_load_detection( 688 + bios, link_enc->analog_engine, link->ext_enc_id); 717 689 return bp_result == BP_RESULT_OK; 718 690 } 719 691 ··· 730 700 uint32_t early_control = 0; 731 701 struct timing_generator *tg = pipe_ctx->stream_res.tg; 732 702 733 - link_hwss->setup_stream_attribute(pipe_ctx); 734 703 link_hwss->setup_stream_encoder(pipe_ctx); 735 704 736 705 dc->hwss.update_info_frame(pipe_ctx); ··· 748 719 749 720 tg->funcs->set_early_control(tg, early_control); 750 721 751 - if (dc_is_rgb_signal(pipe_ctx->stream->signal)) 752 - dce110_dac_encoder_control(pipe_ctx, true); 722 + if (link->ext_enc_id.id) 723 + dce110_external_encoder_control(EXTERNAL_ENCODER_CONTROL_ENABLE, link, timing); 753 724 } 754 725 755 726 static enum bp_result link_transmitter_control( ··· 1248 1219 link_enc->transmitter - TRANSMITTER_UNIPHY_A); 1249 1220 } 1250 1221 1251 - if (dc_is_rgb_signal(pipe_ctx->stream->signal)) 1252 - dce110_dac_encoder_control(pipe_ctx, false); 1222 + if (link->ext_enc_id.id) 1223 + dce110_external_encoder_control(EXTERNAL_ENCODER_CONTROL_DISABLE, link, NULL); 1253 1224 } 1254 1225 1255 1226 void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, ··· 1632 1603 1633 1604 return DC_OK; 1634 1605 } 1635 - static void 1636 - dce110_select_crtc_source(struct pipe_ctx *pipe_ctx) 1637 - { 1638 - struct dc_link *link = pipe_ctx->stream->link; 1639 - struct dc_bios *bios = link->ctx->dc_bios; 1640 - struct bp_crtc_source_select crtc_source_select = {0}; 1641 - enum engine_id engine_id = link->link_enc->preferred_engine; 1642 - 1643 - if (dc_is_rgb_signal(pipe_ctx->stream->signal)) 1644 - engine_id = link->link_enc->analog_engine; 1645 - crtc_source_select.controller_id = CONTROLLER_ID_D0 + pipe_ctx->stream_res.tg->inst; 1646 - crtc_source_select.color_depth = pipe_ctx->stream->timing.display_color_depth; 1647 - crtc_source_select.engine_id = engine_id; 1648 - crtc_source_select.sink_signal = pipe_ctx->stream->signal; 1649 - bios->funcs->select_crtc_source(bios, &crtc_source_select); 1650 - } 1651 1606 1652 1607 enum dc_status dce110_apply_single_controller_ctx_to_hw( 1653 1608 struct pipe_ctx *pipe_ctx, ··· 1650 1637 1651 1638 if (hws->funcs.disable_stream_gating) { 1652 1639 hws->funcs.disable_stream_gating(dc, pipe_ctx); 1653 - } 1654 - 1655 - if (pipe_ctx->stream->signal == SIGNAL_TYPE_RGB) { 1656 - dce110_select_crtc_source(pipe_ctx); 1657 1640 } 1658 1641 1659 1642 if (pipe_ctx->stream_res.audio != NULL) { ··· 1731 1722 pipe_ctx->stream_res.tg->funcs->set_static_screen_control( 1732 1723 pipe_ctx->stream_res.tg, event_triggers, 2); 1733 1724 1734 - if (!dc_is_virtual_signal(pipe_ctx->stream->signal) && 1735 - !dc_is_rgb_signal(pipe_ctx->stream->signal)) 1725 + if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) 1736 1726 pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg( 1737 1727 pipe_ctx->stream_res.stream_enc, 1738 1728 pipe_ctx->stream_res.tg->inst); ··· 3384 3376 link->phy_state.symclk_state = SYMCLK_ON_TX_ON; 3385 3377 } 3386 3378 3379 + static void dce110_enable_analog_link_output( 3380 + struct dc_link *link, 3381 + uint32_t pix_clk_100hz) 3382 + { 3383 + link->link_enc->funcs->enable_analog_output( 3384 + link->link_enc, 3385 + pix_clk_100hz); 3386 + } 3387 + 3387 3388 void dce110_enable_dp_link_output( 3388 3389 struct dc_link *link, 3389 3390 const struct link_resource *link_res, ··· 3438 3421 &pipes[i].pll_settings); 3439 3422 } 3440 3423 } 3424 + } 3425 + 3426 + if (link->ext_enc_id.id) { 3427 + dce110_external_encoder_control(EXTERNAL_ENCODER_CONTROL_INIT, link, NULL); 3428 + dce110_external_encoder_control(EXTERNAL_ENCODER_CONTROL_SETUP, link, NULL); 3441 3429 } 3442 3430 3443 3431 if (dc->link_srv->dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) { ··· 3535 3513 .enable_lvds_link_output = dce110_enable_lvds_link_output, 3536 3514 .enable_tmds_link_output = dce110_enable_tmds_link_output, 3537 3515 .enable_dp_link_output = dce110_enable_dp_link_output, 3516 + .enable_analog_link_output = dce110_enable_analog_link_output, 3538 3517 .disable_link_output = dce110_disable_link_output, 3539 3518 .dac_load_detect = dce110_dac_load_detect, 3519 + .prepare_ddc = dce110_prepare_ddc, 3540 3520 }; 3541 3521 3542 3522 static const struct hwseq_private_funcs dce110_private_funcs = {
+2 -1
drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
··· 753 753 enc_init_data, 754 754 &link_enc_feature, 755 755 &link_enc_regs[link_regs_id], 756 - &link_enc_aux_regs[enc_init_data->channel - 1], 756 + enc_init_data->channel == CHANNEL_ID_UNKNOWN ? 757 + NULL : &link_enc_aux_regs[enc_init_data->channel - 1], 757 758 enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs) ? 758 759 NULL : &link_enc_hpd_regs[enc_init_data->hpd_source]); 759 760 return &enc110->base;
+2 -1
drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
··· 760 760 enc_init_data, 761 761 &link_enc_feature, 762 762 &link_enc_regs[link_regs_id], 763 - &link_enc_aux_regs[enc_init_data->channel - 1], 763 + enc_init_data->channel == CHANNEL_ID_UNKNOWN ? 764 + NULL : &link_enc_aux_regs[enc_init_data->channel - 1], 764 765 enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs) ? 765 766 NULL : &link_enc_hpd_regs[enc_init_data->hpd_source]); 766 767 return &enc110->base;
+4
drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
··· 153 153 uint32_t drr_enabled; 154 154 uint32_t min_drr_refresh_rate; 155 155 bool realtek_eDPToLVDS; 156 + uint16_t panel_width_mm; 157 + uint16_t panel_height_mm; 158 + uint16_t fake_edid_size; 159 + const uint8_t *fake_edid; 156 160 }; 157 161 158 162 struct dc_firmware_info {
+1
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
··· 425 425 dpm_table->dpm_levels[0].enabled = true; 426 426 dpm_table->dpm_levels[1].value = pptable->GfxclkFmax; 427 427 dpm_table->dpm_levels[1].enabled = true; 428 + dpm_table->flags |= SMU_DPM_TABLE_FINE_GRAINED; 428 429 } else { 429 430 dpm_table->count = 1; 430 431 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
+1
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
··· 1129 1129 /* gfxclk dpm table setup */ 1130 1130 dpm_table = &dpm_context->dpm_tables.gfx_table; 1131 1131 dpm_table->clk_type = SMU_GFXCLK; 1132 + dpm_table->flags = SMU_DPM_TABLE_FINE_GRAINED; 1132 1133 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { 1133 1134 /* In the case of gfxclk, only fine-grained dpm is honored. 1134 1135 * Get min/max values from FW.
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
··· 1370 1370 level_index = 1; 1371 1371 } 1372 1372 1373 - if (!is_fine_grained) { 1373 + if (!is_fine_grained || count == 1) { 1374 1374 for (i = 0; i < count; i++) { 1375 1375 freq_match = !is_deep_sleep && 1376 1376 smu_cmn_freqs_match(
+6
include/drm/drm_fb_helper.h
··· 273 273 int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper); 274 274 bool drm_fb_helper_gem_is_fb(const struct drm_fb_helper *fb_helper, 275 275 const struct drm_gem_object *obj); 276 + #else 277 + static inline bool drm_fb_helper_gem_is_fb(const struct drm_fb_helper *fb_helper, 278 + const struct drm_gem_object *obj) 279 + { 280 + return false; 281 + } 276 282 #endif 277 283 278 284 #endif