Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'drm-next-2021-05-10' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
"Bit later than usual, I queued them all up on Friday then promptly
forgot to write the pull request email. This is mainly amdgpu fixes,
with some radeon/msm/fbdev and one i915 gvt fix thrown in.

amdgpu:
- MPO hang workaround
- Fix for concurrent VM flushes on vega/navi
- dcefclk is not adjustable on navi1x and newer
- MST HPD debugfs fix
- Suspend/resumes fixes
- Register VGA clients late in case driver fails to load
- Fix GEM leak in user framebuffer create
- Add support for polaris12 with 32 bit memory interface
- Fix duplicate cursor issue when using overlay
- Fix corruption with tiled surfaces on VCN3
- Add BO size and stride check to fix BO size verification

radeon:
- Fix off-by-one in power state parsing
- Fix possible memory leak in power state parsing

msm:
- NULL ptr dereference fix

fbdev:
- procfs disabled warning fix

i915:
- gvt: Fix a possible division by zero in vgpu display rate
calculation"

* tag 'drm-next-2021-05-10' of git://anongit.freedesktop.org/drm/drm:
drm/amdgpu: Use device specific BO size & stride check.
drm/amdgpu: Init GFX10_ADDR_CONFIG for VCN v3 in DPG mode.
drm/amd/pm: initialize variable
drm/radeon: Avoid power table parsing memory leaks
drm/radeon: Fix off-by-one power_state index heap overwrite
drm/amd/display: Fix two cursor duplication when using overlay
drm/amdgpu: add new MC firmware for Polaris12 32bit ASIC
fbmem: Mark proc_fb_seq_ops as __maybe_unused
drm/msm/dpu: Delete bonkers code
drm/i915/gvt: Prevent divided by zero when calculating refresh rate
amdgpu: fix GEM obj leak in amdgpu_display_user_framebuffer_create
drm/amdgpu: Register VGA clients after init can no longer fail
drm/amdgpu: Handling of amdgpu_device_resume return value for graceful teardown
drm/amdgpu: fix r initial values
drm/amd/display: fix wrong statement in mst hpd debugfs
amdgpu/pm: set pp_dpm_dcefclk to readonly on NAVI10 and newer gpus
amdgpu/pm: Prevent force of DCEFCLK on NAVI10 and SIENNA_CICHLID
drm/amdgpu: fix concurrent VM flushes on Vega/Navi v2
drm/amd/display: Reject non-zero src_y and src_x for video planes

+332 -81
+13 -15
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 3410 3410 /* doorbell bar mapping and doorbell index init*/ 3411 3411 amdgpu_device_doorbell_init(adev); 3412 3412 3413 - /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ 3414 - /* this will fail for cards that aren't VGA class devices, just 3415 - * ignore it */ 3416 - if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) 3417 - vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode); 3418 - 3419 - if (amdgpu_device_supports_px(ddev)) { 3420 - px = true; 3421 - vga_switcheroo_register_client(adev->pdev, 3422 - &amdgpu_switcheroo_ops, px); 3423 - vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); 3424 - } 3425 - 3426 3413 if (amdgpu_emu_mode == 1) { 3427 3414 /* post the asic on emulation mode */ 3428 3415 emu_soc_asic_init(adev); ··· 3606 3619 if (amdgpu_device_cache_pci_state(adev->pdev)) 3607 3620 pci_restore_state(pdev); 3608 3621 3622 + /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ 3623 + /* this will fail for cards that aren't VGA class devices, just 3624 + * ignore it */ 3625 + if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) 3626 + vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode); 3627 + 3628 + if (amdgpu_device_supports_px(ddev)) { 3629 + px = true; 3630 + vga_switcheroo_register_client(adev->pdev, 3631 + &amdgpu_switcheroo_ops, px); 3632 + vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); 3633 + } 3634 + 3609 3635 if (adev->gmc.xgmi.pending_reset) 3610 3636 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work, 3611 3637 msecs_to_jiffies(AMDGPU_RESUME_MS)); ··· 3630 3630 3631 3631 failed: 3632 3632 amdgpu_vf_error_trans_all(adev); 3633 - if (px) 3634 - vga_switcheroo_fini_domain_pm_ops(adev->dev); 3635 3633 3636 3634 failed_unmap: 3637 3635 iounmap(adev->rmmio);
+177 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
··· 837 837 return 0; 838 838 } 839 839 840 + static void get_block_dimensions(unsigned int block_log2, unsigned int cpp, 841 + unsigned int *width, unsigned int *height) 842 + { 843 + unsigned int cpp_log2 = ilog2(cpp); 844 + unsigned int pixel_log2 = block_log2 - cpp_log2; 845 + unsigned int width_log2 = (pixel_log2 + 1) / 2; 846 + unsigned int height_log2 = pixel_log2 - width_log2; 847 + 848 + *width = 1 << width_log2; 849 + *height = 1 << height_log2; 850 + } 851 + 852 + static unsigned int get_dcc_block_size(uint64_t modifier, bool rb_aligned, 853 + bool pipe_aligned) 854 + { 855 + unsigned int ver = AMD_FMT_MOD_GET(TILE_VERSION, modifier); 856 + 857 + switch (ver) { 858 + case AMD_FMT_MOD_TILE_VER_GFX9: { 859 + /* 860 + * TODO: for pipe aligned we may need to check the alignment of the 861 + * total size of the surface, which may need to be bigger than the 862 + * natural alignment due to some HW workarounds 863 + */ 864 + return max(10 + (rb_aligned ? (int)AMD_FMT_MOD_GET(RB, modifier) : 0), 12); 865 + } 866 + case AMD_FMT_MOD_TILE_VER_GFX10: 867 + case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS: { 868 + int pipes_log2 = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); 869 + 870 + if (ver == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && pipes_log2 > 1 && 871 + AMD_FMT_MOD_GET(PACKERS, modifier) == pipes_log2) 872 + ++pipes_log2; 873 + 874 + return max(8 + (pipe_aligned ? pipes_log2 : 0), 12); 875 + } 876 + default: 877 + return 0; 878 + } 879 + } 880 + 881 + static int amdgpu_display_verify_plane(struct amdgpu_framebuffer *rfb, int plane, 882 + const struct drm_format_info *format, 883 + unsigned int block_width, unsigned int block_height, 884 + unsigned int block_size_log2) 885 + { 886 + unsigned int width = rfb->base.width / 887 + ((plane && plane < format->num_planes) ? format->hsub : 1); 888 + unsigned int height = rfb->base.height / 889 + ((plane && plane < format->num_planes) ? format->vsub : 1); 890 + unsigned int cpp = plane < format->num_planes ? format->cpp[plane] : 1; 891 + unsigned int block_pitch = block_width * cpp; 892 + unsigned int min_pitch = ALIGN(width * cpp, block_pitch); 893 + unsigned int block_size = 1 << block_size_log2; 894 + uint64_t size; 895 + 896 + if (rfb->base.pitches[plane] % block_pitch) { 897 + drm_dbg_kms(rfb->base.dev, 898 + "pitch %d for plane %d is not a multiple of block pitch %d\n", 899 + rfb->base.pitches[plane], plane, block_pitch); 900 + return -EINVAL; 901 + } 902 + if (rfb->base.pitches[plane] < min_pitch) { 903 + drm_dbg_kms(rfb->base.dev, 904 + "pitch %d for plane %d is less than minimum pitch %d\n", 905 + rfb->base.pitches[plane], plane, min_pitch); 906 + return -EINVAL; 907 + } 908 + 909 + /* Force at least natural alignment. */ 910 + if (rfb->base.offsets[plane] % block_size) { 911 + drm_dbg_kms(rfb->base.dev, 912 + "offset 0x%x for plane %d is not a multiple of block pitch 0x%x\n", 913 + rfb->base.offsets[plane], plane, block_size); 914 + return -EINVAL; 915 + } 916 + 917 + size = rfb->base.offsets[plane] + 918 + (uint64_t)rfb->base.pitches[plane] / block_pitch * 919 + block_size * DIV_ROUND_UP(height, block_height); 920 + 921 + if (rfb->base.obj[0]->size < size) { 922 + drm_dbg_kms(rfb->base.dev, 923 + "BO size 0x%zx is less than 0x%llx required for plane %d\n", 924 + rfb->base.obj[0]->size, size, plane); 925 + return -EINVAL; 926 + } 927 + 928 + return 0; 929 + } 930 + 931 + 932 + static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb) 933 + { 934 + const struct drm_format_info *format_info = drm_format_info(rfb->base.format->format); 935 + uint64_t modifier = rfb->base.modifier; 936 + int ret; 937 + unsigned int i, block_width, block_height, block_size_log2; 938 + 939 + if (!rfb->base.dev->mode_config.allow_fb_modifiers) 940 + return 0; 941 + 942 + for (i = 0; i < format_info->num_planes; ++i) { 943 + if (modifier == DRM_FORMAT_MOD_LINEAR) { 944 + block_width = 256 / format_info->cpp[i]; 945 + block_height = 1; 946 + block_size_log2 = 8; 947 + } else { 948 + int swizzle = AMD_FMT_MOD_GET(TILE, modifier); 949 + 950 + switch ((swizzle & ~3) + 1) { 951 + case DC_SW_256B_S: 952 + block_size_log2 = 8; 953 + break; 954 + case DC_SW_4KB_S: 955 + case DC_SW_4KB_S_X: 956 + block_size_log2 = 12; 957 + break; 958 + case DC_SW_64KB_S: 959 + case DC_SW_64KB_S_T: 960 + case DC_SW_64KB_S_X: 961 + block_size_log2 = 16; 962 + break; 963 + default: 964 + drm_dbg_kms(rfb->base.dev, 965 + "Swizzle mode with unknown block size: %d\n", swizzle); 966 + return -EINVAL; 967 + } 968 + 969 + get_block_dimensions(block_size_log2, format_info->cpp[i], 970 + &block_width, &block_height); 971 + } 972 + 973 + ret = amdgpu_display_verify_plane(rfb, i, format_info, 974 + block_width, block_height, block_size_log2); 975 + if (ret) 976 + return ret; 977 + } 978 + 979 + if (AMD_FMT_MOD_GET(DCC, modifier)) { 980 + if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) { 981 + block_size_log2 = get_dcc_block_size(modifier, false, false); 982 + get_block_dimensions(block_size_log2 + 8, format_info->cpp[0], 983 + &block_width, &block_height); 984 + ret = amdgpu_display_verify_plane(rfb, i, format_info, 985 + block_width, block_height, 986 + block_size_log2); 987 + if (ret) 988 + return ret; 989 + 990 + ++i; 991 + block_size_log2 = get_dcc_block_size(modifier, true, true); 992 + } else { 993 + bool pipe_aligned = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier); 994 + 995 + block_size_log2 = get_dcc_block_size(modifier, true, pipe_aligned); 996 + } 997 + get_block_dimensions(block_size_log2 + 8, format_info->cpp[0], 998 + &block_width, &block_height); 999 + ret = amdgpu_display_verify_plane(rfb, i, format_info, 1000 + block_width, block_height, block_size_log2); 1001 + if (ret) 1002 + return ret; 1003 + } 1004 + 1005 + return 0; 1006 + } 1007 + 840 1008 static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb, 841 1009 uint64_t *tiling_flags, bool *tmz_surface) 842 1010 { ··· 1070 902 int ret; 1071 903 1072 904 rfb->base.obj[0] = obj; 1073 - 1074 - /* Verify that bo size can fit the fb size. */ 1075 - ret = drm_gem_fb_init_with_funcs(dev, &rfb->base, file_priv, mode_cmd, 1076 - &amdgpu_fb_funcs); 905 + drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd); 906 + ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs); 1077 907 if (ret) 1078 908 goto err; 1079 909 /* Verify that the modifier is supported. */ ··· 1133 967 } 1134 968 } 1135 969 1136 - for (i = 1; i < rfb->base.format->num_planes; ++i) { 970 + ret = amdgpu_display_verify_sizes(rfb); 971 + if (ret) 972 + return ret; 973 + 974 + for (i = 0; i < rfb->base.format->num_planes; ++i) { 1137 975 drm_gem_object_get(rfb->base.obj[0]); 1138 - drm_gem_object_put(rfb->base.obj[i]); 1139 976 rfb->base.obj[i] = rfb->base.obj[0]; 1140 977 } 1141 978 ··· 1168 999 domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags); 1169 1000 if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) { 1170 1001 drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n"); 1002 + drm_gem_object_put(obj); 1171 1003 return ERR_PTR(-EINVAL); 1172 1004 } 1173 1005 ··· 1582 1412 } 1583 1413 } 1584 1414 } 1585 - return r; 1415 + return 0; 1586 1416 } 1587 1417 1588 1418 int amdgpu_display_resume_helper(struct amdgpu_device *adev)
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 1573 1573 amdgpu_device_baco_exit(drm_dev); 1574 1574 } 1575 1575 ret = amdgpu_device_resume(drm_dev, false); 1576 + if (ret) 1577 + return ret; 1578 + 1576 1579 if (amdgpu_device_supports_px(drm_dev)) 1577 1580 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; 1578 1581 adev->in_runpm = false;
+11 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
··· 215 215 /* Check if we have an idle VMID */ 216 216 i = 0; 217 217 list_for_each_entry((*idle), &id_mgr->ids_lru, list) { 218 - fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring); 218 + /* Don't use per engine and per process VMID at the same time */ 219 + struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ? 220 + NULL : ring; 221 + 222 + fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r); 219 223 if (!fences[i]) 220 224 break; 221 225 ++i; ··· 285 281 if (updates && (*id)->flushed_updates && 286 282 updates->context == (*id)->flushed_updates->context && 287 283 !dma_fence_is_later(updates, (*id)->flushed_updates)) 288 - updates = NULL; 284 + updates = NULL; 289 285 290 286 if ((*id)->owner != vm->immediate.fence_context || 291 287 job->vm_pd_addr != (*id)->pd_gpu_addr || ··· 293 289 ((*id)->last_flush->context != fence_context && 294 290 !dma_fence_is_signaled((*id)->last_flush))) { 295 291 struct dma_fence *tmp; 292 + 293 + /* Don't use per engine and per process VMID at the same time */ 294 + if (adev->vm_manager.concurrent_flush) 295 + ring = NULL; 296 296 297 297 /* to prevent one context starved by another context */ 298 298 (*id)->pd_gpu_addr = 0; ··· 373 365 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) 374 366 needs_flush = true; 375 367 376 - /* Concurrent flushes are only possible starting with Vega10 and 377 - * are broken on Navi10 and Navi14. 378 - */ 379 - if (needs_flush && (adev->asic_type < CHIP_VEGA10 || 380 - adev->asic_type == CHIP_NAVI10 || 381 - adev->asic_type == CHIP_NAVI14)) 368 + if (needs_flush && !adev->vm_manager.concurrent_flush) 382 369 continue; 383 370 384 371 /* Good, we can use this VMID. Remember this submission as
+6
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 3148 3148 { 3149 3149 unsigned i; 3150 3150 3151 + /* Concurrent flushes are only possible starting with Vega10 and 3152 + * are broken on Navi10 and Navi14. 3153 + */ 3154 + adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 || 3155 + adev->asic_type == CHIP_NAVI10 || 3156 + adev->asic_type == CHIP_NAVI14); 3151 3157 amdgpu_vmid_mgr_init(adev); 3152 3158 3153 3159 adev->vm_manager.fence_context =
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
··· 331 331 /* Handling of VMIDs */ 332 332 struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS]; 333 333 unsigned int first_kfd_vmid; 334 + bool concurrent_flush; 334 335 335 336 /* Handling of VM fences */ 336 337 u64 fence_context;
+10 -3
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
··· 59 59 MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); 60 60 MODULE_FIRMWARE("amdgpu/polaris10_mc.bin"); 61 61 MODULE_FIRMWARE("amdgpu/polaris12_mc.bin"); 62 + MODULE_FIRMWARE("amdgpu/polaris12_32_mc.bin"); 62 63 MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin"); 63 64 MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin"); 64 65 MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin"); ··· 244 243 chip_name = "polaris10"; 245 244 break; 246 245 case CHIP_POLARIS12: 247 - if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) 246 + if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) { 248 247 chip_name = "polaris12_k"; 249 - else 250 - chip_name = "polaris12"; 248 + } else { 249 + WREG32(mmMC_SEQ_IO_DEBUG_INDEX, ixMC_IO_DEBUG_UP_159); 250 + /* Polaris12 32bit ASIC needs a special MC firmware */ 251 + if (RREG32(mmMC_SEQ_IO_DEBUG_DATA) == 0x05b4dc40) 252 + chip_name = "polaris12_32"; 253 + else 254 + chip_name = "polaris12"; 255 + } 251 256 break; 252 257 case CHIP_FIJI: 253 258 case CHIP_CARRIZO:
+4
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
··· 589 589 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( 590 590 VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0), 591 591 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect); 592 + 593 + /* VCN global tiling registers */ 594 + WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET( 595 + UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); 592 596 } 593 597 594 598 static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
+68
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 4015 4015 scaling_info->src_rect.x = state->src_x >> 16; 4016 4016 scaling_info->src_rect.y = state->src_y >> 16; 4017 4017 4018 + /* 4019 + * For reasons we don't (yet) fully understand a non-zero 4020 + * src_y coordinate into an NV12 buffer can cause a 4021 + * system hang. To avoid hangs (and maybe be overly cautious) 4022 + * let's reject both non-zero src_x and src_y. 4023 + * 4024 + * We currently know of only one use-case to reproduce a 4025 + * scenario with non-zero src_x and src_y for NV12, which 4026 + * is to gesture the YouTube Android app into full screen 4027 + * on ChromeOS. 4028 + */ 4029 + if (state->fb && 4030 + state->fb->format->format == DRM_FORMAT_NV12 && 4031 + (scaling_info->src_rect.x != 0 || 4032 + scaling_info->src_rect.y != 0)) 4033 + return -EINVAL; 4034 + 4018 4035 scaling_info->src_rect.width = state->src_w >> 16; 4019 4036 if (scaling_info->src_rect.width == 0) 4020 4037 return -EINVAL; ··· 9886 9869 } 9887 9870 #endif 9888 9871 9872 + static int validate_overlay(struct drm_atomic_state *state) 9873 + { 9874 + int i; 9875 + struct drm_plane *plane; 9876 + struct drm_plane_state *old_plane_state, *new_plane_state; 9877 + struct drm_plane_state *primary_state, *overlay_state = NULL; 9878 + 9879 + /* Check if primary plane is contained inside overlay */ 9880 + for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { 9881 + if (plane->type == DRM_PLANE_TYPE_OVERLAY) { 9882 + if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 9883 + return 0; 9884 + 9885 + overlay_state = new_plane_state; 9886 + continue; 9887 + } 9888 + } 9889 + 9890 + /* check if we're making changes to the overlay plane */ 9891 + if (!overlay_state) 9892 + return 0; 9893 + 9894 + /* check if overlay plane is enabled */ 9895 + if (!overlay_state->crtc) 9896 + return 0; 9897 + 9898 + /* find the primary plane for the CRTC that the overlay is enabled on */ 9899 + primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary); 9900 + if (IS_ERR(primary_state)) 9901 + return PTR_ERR(primary_state); 9902 + 9903 + /* check if primary plane is enabled */ 9904 + if (!primary_state->crtc) 9905 + return 0; 9906 + 9907 + /* Perform the bounds check to ensure the overlay plane covers the primary */ 9908 + if (primary_state->crtc_x < overlay_state->crtc_x || 9909 + primary_state->crtc_y < overlay_state->crtc_y || 9910 + primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w || 9911 + primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) { 9912 + DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n"); 9913 + return -EINVAL; 9914 + } 9915 + 9916 + return 0; 9917 + } 9918 + 9889 9919 /** 9890 9920 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. 9891 9921 * @dev: The DRM device ··· 10106 10042 if (ret) 10107 10043 goto fail; 10108 10044 } 10045 + 10046 + ret = validate_overlay(state); 10047 + if (ret) 10048 + goto fail; 10109 10049 10110 10050 /* Add new/modified planes */ 10111 10051 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+1 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
··· 3012 3012 if (!aconnector->dc_link) 3013 3013 continue; 3014 3014 3015 - if (!(aconnector->port && &aconnector->mst_port->mst_mgr)) 3015 + if (!aconnector->mst_port) 3016 3016 continue; 3017 3017 3018 3018 link = aconnector->dc_link;
+9 -1
drivers/gpu/drm/amd/pm/amdgpu_pm.c
··· 451 451 struct drm_device *ddev = dev_get_drvdata(dev); 452 452 struct amdgpu_device *adev = drm_to_adev(ddev); 453 453 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 454 - struct pp_states_info data; 454 + struct pp_states_info data = {0}; 455 455 enum amd_pm_state_type pm = 0; 456 456 int i = 0, ret = 0; 457 457 ··· 1888 1888 if (DEVICE_ATTR_IS(pp_dpm_mclk) || 1889 1889 DEVICE_ATTR_IS(pp_dpm_socclk) || 1890 1890 DEVICE_ATTR_IS(pp_dpm_fclk)) { 1891 + dev_attr->attr.mode &= ~S_IWUGO; 1892 + dev_attr->store = NULL; 1893 + } 1894 + } 1895 + 1896 + if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) { 1897 + /* SMU MP1 does not support dcefclk level setting */ 1898 + if (asic_type >= CHIP_NAVI10) { 1891 1899 dev_attr->attr.mode &= ~S_IWUGO; 1892 1900 dev_attr->store = NULL; 1893 1901 }
+4 -1
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
··· 1443 1443 case SMU_SOCCLK: 1444 1444 case SMU_MCLK: 1445 1445 case SMU_UCLK: 1446 - case SMU_DCEFCLK: 1447 1446 case SMU_FCLK: 1448 1447 /* There is only 2 levels for fine grained DPM */ 1449 1448 if (navi10_is_support_fine_grained_dpm(smu, clk_type)) { ··· 1462 1463 if (ret) 1463 1464 return size; 1464 1465 break; 1466 + case SMU_DCEFCLK: 1467 + dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n"); 1468 + break; 1469 + 1465 1470 default: 1466 1471 break; 1467 1472 }
+3 -1
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
··· 1127 1127 case SMU_SOCCLK: 1128 1128 case SMU_MCLK: 1129 1129 case SMU_UCLK: 1130 - case SMU_DCEFCLK: 1131 1130 case SMU_FCLK: 1132 1131 /* There is only 2 levels for fine grained DPM */ 1133 1132 if (sienna_cichlid_is_support_fine_grained_dpm(smu, clk_type)) { ··· 1145 1146 ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); 1146 1147 if (ret) 1147 1148 goto forec_level_out; 1149 + break; 1150 + case SMU_DCEFCLK: 1151 + dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n"); 1148 1152 break; 1149 1153 default: 1150 1154 break;
+3 -3
drivers/gpu/drm/i915/gvt/handlers.c
··· 669 669 link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)); 670 670 671 671 /* Get H/V total from transcoder timing */ 672 - htotal = (vgpu_vreg_t(vgpu, HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT) + 1; 673 - vtotal = (vgpu_vreg_t(vgpu, VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT) + 1; 672 + htotal = (vgpu_vreg_t(vgpu, HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT); 673 + vtotal = (vgpu_vreg_t(vgpu, VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT); 674 674 675 675 if (dp_br && link_n && htotal && vtotal) { 676 676 u64 pixel_clk = 0; ··· 682 682 pixel_clk *= MSEC_PER_SEC; 683 683 684 684 /* Calcuate refresh rate by (pixel_clk / (h_total * v_total)) */ 685 - new_rate = DIV64_U64_ROUND_CLOSEST(pixel_clk, div64_u64(mul_u32_u32(htotal, vtotal), MSEC_PER_SEC)); 685 + new_rate = DIV64_U64_ROUND_CLOSEST(mul_u64_u32_shr(pixel_clk, MSEC_PER_SEC, 0), mul_u32_u32(htotal + 1, vtotal + 1)); 686 686 687 687 if (*old_rate != new_rate) 688 688 *old_rate = new_rate;
-10
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
··· 648 648 if (unlikely(!cstate->num_mixers)) 649 649 return; 650 650 651 - /* 652 - * For planes without commit update, drm framework will not add 653 - * those planes to current state since hardware update is not 654 - * required. However, if those planes were power collapsed since 655 - * last commit cycle, driver has to restore the hardware state 656 - * of those planes explicitly here prior to plane flush. 657 - */ 658 - drm_atomic_crtc_for_each_plane(plane, crtc) 659 - dpu_plane_restore(plane, state); 660 - 661 651 /* update performance setting before crtc kickoff */ 662 652 dpu_core_perf_crtc_update(crtc, 1, false); 663 653
-16
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
··· 1258 1258 } 1259 1259 } 1260 1260 1261 - void dpu_plane_restore(struct drm_plane *plane, struct drm_atomic_state *state) 1262 - { 1263 - struct dpu_plane *pdpu; 1264 - 1265 - if (!plane || !plane->state) { 1266 - DPU_ERROR("invalid plane\n"); 1267 - return; 1268 - } 1269 - 1270 - pdpu = to_dpu_plane(plane); 1271 - 1272 - DPU_DEBUG_PLANE(pdpu, "\n"); 1273 - 1274 - dpu_plane_atomic_update(plane, state); 1275 - } 1276 - 1277 1261 static void dpu_plane_destroy(struct drm_plane *plane) 1278 1262 { 1279 1263 struct dpu_plane *pdpu = plane ? to_dpu_plane(plane) : NULL;
-6
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
··· 85 85 u32 *flush_sspp); 86 86 87 87 /** 88 - * dpu_plane_restore - restore hw state if previously power collapsed 89 - * @plane: Pointer to drm plane structure 90 - */ 91 - void dpu_plane_restore(struct drm_plane *plane, struct drm_atomic_state *state); 92 - 93 - /** 94 88 * dpu_plane_flush - final plane operations before commit flush 95 89 * @plane: Pointer to drm plane structure 96 90 */
+18 -8
drivers/gpu/drm/radeon/radeon_atombios.c
··· 2120 2120 return state_index; 2121 2121 /* last mode is usually default, array is low to high */ 2122 2122 for (i = 0; i < num_modes; i++) { 2123 - rdev->pm.power_state[state_index].clock_info = 2124 - kcalloc(1, sizeof(struct radeon_pm_clock_info), 2125 - GFP_KERNEL); 2123 + /* avoid memory leaks from invalid modes or unknown frev. */ 2124 + if (!rdev->pm.power_state[state_index].clock_info) { 2125 + rdev->pm.power_state[state_index].clock_info = 2126 + kzalloc(sizeof(struct radeon_pm_clock_info), 2127 + GFP_KERNEL); 2128 + } 2126 2129 if (!rdev->pm.power_state[state_index].clock_info) 2127 - return state_index; 2130 + goto out; 2128 2131 rdev->pm.power_state[state_index].num_clock_modes = 1; 2129 2132 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 2130 2133 switch (frev) { ··· 2246 2243 break; 2247 2244 } 2248 2245 } 2246 + out: 2247 + /* free any unused clock_info allocation. */ 2248 + if (state_index && state_index < num_modes) { 2249 + kfree(rdev->pm.power_state[state_index].clock_info); 2250 + rdev->pm.power_state[state_index].clock_info = NULL; 2251 + } 2252 + 2249 2253 /* last mode is usually default */ 2250 - if (rdev->pm.default_power_state_index == -1) { 2254 + if (state_index && rdev->pm.default_power_state_index == -1) { 2251 2255 rdev->pm.power_state[state_index - 1].type = 2252 2256 POWER_STATE_TYPE_DEFAULT; 2253 2257 rdev->pm.default_power_state_index = state_index - 1; 2254 2258 rdev->pm.power_state[state_index - 1].default_clock_mode = 2255 2259 &rdev->pm.power_state[state_index - 1].clock_info[0]; 2256 - rdev->pm.power_state[state_index].flags &= 2260 + rdev->pm.power_state[state_index - 1].flags &= 2257 2261 ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; 2258 - rdev->pm.power_state[state_index].misc = 0; 2259 - rdev->pm.power_state[state_index].misc2 = 0; 2262 + rdev->pm.power_state[state_index - 1].misc = 0; 2263 + rdev->pm.power_state[state_index - 1].misc2 = 0; 2260 2264 } 2261 2265 return state_index; 2262 2266 }
+1 -1
drivers/video/fbdev/core/fbmem.c
··· 733 733 return 0; 734 734 } 735 735 736 - static const struct seq_operations proc_fb_seq_ops = { 736 + static const struct __maybe_unused seq_operations proc_fb_seq_ops = { 737 737 .start = fb_seq_start, 738 738 .next = fb_seq_next, 739 739 .stop = fb_seq_stop,