Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'drm-fixes-2025-10-31' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Simona Vetter:
"Looks like stochastics conspired to make this one a bit bigger, but
nothing scary at all. Also first examples of the new Link: tags, yay!

Next week Dave should be back.

Drivers:
- mediatek: uaf in unbind, fixes -rc2 boot regression
- radeon: devm conversion fixes
- amdgpu: VPE idle handler, re-enable DM idle optimization, DCN3,
SMU, vblank, HDP eDP, powerplay fixes for fiji/iceland
- msm: bunch of gem error path fixes, gmu fw parsing fix, dpu fixes
- intel: fix dmc/dc6 asserts on ADL-S
- xe: fix xe_validation_guard(), wake device handling around gt reset
- ast: fix display output on AST2300
- etnaviv: fix gpu flush
- imx: fix parallel bridge handling
- nouveau: scheduler locking fix
- panel: fixes for kingdisplay-kd097d04 and sitronix-st7789v

Core Changes:
- CI: disable broken sanity job
- sysfb: fix NULL pointer access
- sched: fix SIGKILL handling, locking for race condition
- dma_fence: better timeline name for signalled fences"

* tag 'drm-fixes-2025-10-31' of https://gitlab.freedesktop.org/drm/kernel: (44 commits)
drm/ast: Clear preserved bits from register output value
drm/imx: parallel-display: add the bridge before attaching it
drm/imx: parallel-display: convert to devm_drm_bridge_alloc() API
drm/panel: kingdisplay-kd097d04: Disable EoTp
drm/panel: sitronix-st7789v: fix sync flags for t28cp45tn89
drm/xe: Do not wake device during a GT reset
drm/xe: Fix uninitialized return value from xe_validation_guard()
drm/msm/dpu: Fix adjusted mode clock check for 3d merge
drm/msm/dpu: Disable broken YUV on QSEED2 hardware
drm/msm/dpu: Require linear modifier for writeback framebuffers
drm/msm/dpu: Fix pixel extension sub-sampling
drm/msm/dpu: Disable scaling for unsupported scaler types
drm/msm/dpu: Propagate error from dpu_assign_plane_resources
drm/msm/dpu: Fix allocation of RGB SSPPs without scaling
drm/msm: dsi: fix PLL init in bonded mode
drm/i915/dmc: Clear HRR EVT_CTL/HTP to zero on ADL-S
drm/amd/display: Fix incorrect return of vblank enable on unconfigured crtc
drm/amd/display: Add HDR workaround for a specific eDP
drm/amdgpu: fix SPDX header on cyan_skillfish_reg_init.c
drm/amdgpu: fix SPDX header on irqsrcs_vcn_5_0.h
...

+234 -128
+1 -1
drivers/dma-buf/dma-fence.c
··· 1141 1141 "RCU protection is required for safe access to returned string"); 1142 1142 1143 1143 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 1144 - return fence->ops->get_driver_name(fence); 1144 + return fence->ops->get_timeline_name(fence); 1145 1145 else 1146 1146 return "signaled-timeline"; 1147 1147 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 1 + // SPDX-License-Identifier: MIT 2 2 /* 3 3 * Copyright 2025 Advanced Micro Devices, Inc. 4 4 *
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 1 + /* SPDX-License-Identifier: MIT */ 2 2 /* 3 3 * Copyright 2025 Advanced Micro Devices, Inc. 4 4 *
+30 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
··· 322 322 return 0; 323 323 } 324 324 325 + static bool vpe_need_dpm0_at_power_down(struct amdgpu_device *adev) 326 + { 327 + switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) { 328 + case IP_VERSION(6, 1, 1): 329 + return adev->pm.fw_version < 0x0a640500; 330 + default: 331 + return false; 332 + } 333 + } 334 + 335 + static int vpe_get_dpm_level(struct amdgpu_device *adev) 336 + { 337 + struct amdgpu_vpe *vpe = &adev->vpe; 338 + 339 + if (!adev->pm.dpm_enabled) 340 + return 0; 341 + 342 + return RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_lv)); 343 + } 344 + 325 345 static void vpe_idle_work_handler(struct work_struct *work) 326 346 { 327 347 struct amdgpu_device *adev = ··· 349 329 unsigned int fences = 0; 350 330 351 331 fences += amdgpu_fence_count_emitted(&adev->vpe.ring); 332 + if (fences) 333 + goto reschedule; 352 334 353 - if (fences == 0) 354 - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE); 355 - else 356 - schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); 335 + if (vpe_need_dpm0_at_power_down(adev) && vpe_get_dpm_level(adev) != 0) 336 + goto reschedule; 337 + 338 + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE); 339 + return; 340 + 341 + reschedule: 342 + schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); 357 343 } 358 344 359 345 static int vpe_common_init(struct amdgpu_vpe *vpe)
+1 -1
drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 1 + // SPDX-License-Identifier: MIT 2 2 /* 3 3 * Copyright 2018 Advanced Micro Devices, Inc. 4 4 *
+18 -3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
··· 248 248 struct vblank_control_work *vblank_work = 249 249 container_of(work, struct vblank_control_work, work); 250 250 struct amdgpu_display_manager *dm = vblank_work->dm; 251 + struct amdgpu_device *adev = drm_to_adev(dm->ddev); 252 + int r; 251 253 252 254 mutex_lock(&dm->dc_lock); 253 255 ··· 279 277 280 278 if (dm->active_vblank_irq_count == 0) { 281 279 dc_post_update_surfaces_to_stream(dm->dc); 280 + 281 + r = amdgpu_dpm_pause_power_profile(adev, true); 282 + if (r) 283 + dev_warn(adev->dev, "failed to set default power profile mode\n"); 284 + 282 285 dc_allow_idle_optimizations(dm->dc, true); 286 + 287 + r = amdgpu_dpm_pause_power_profile(adev, false); 288 + if (r) 289 + dev_warn(adev->dev, "failed to restore the power profile mode\n"); 283 290 } 284 291 285 292 mutex_unlock(&dm->dc_lock); ··· 308 297 int irq_type; 309 298 int rc = 0; 310 299 311 - if (acrtc->otg_inst == -1) 312 - goto skip; 300 + if (enable && !acrtc->base.enabled) { 301 + drm_dbg_vbl(crtc->dev, 302 + "Reject vblank enable on unconfigured CRTC %d (enabled=%d)\n", 303 + acrtc->crtc_id, acrtc->base.enabled); 304 + return -EINVAL; 305 + } 313 306 314 307 irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); 315 308 ··· 398 383 return rc; 399 384 } 400 385 #endif 401 - skip: 386 + 402 387 if (amdgpu_in_reset(adev)) 403 388 return 0; 404 389
+1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
··· 83 83 edid_caps->panel_patch.remove_sink_ext_caps = true; 84 84 break; 85 85 case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154): 86 + case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171): 86 87 drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id); 87 88 edid_caps->panel_patch.disable_colorimetry = true; 88 89 break;
-3
drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
··· 578 578 dpp_base->ctx->dc->optimized_required = true; 579 579 dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true; 580 580 } 581 - } else { 582 - REG_SET(CM_MEM_PWR_CTRL, 0, 583 - BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1); 584 581 } 585 582 } 586 583
+1 -1
drivers/gpu/drm/amd/include/amd_cper.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 1 + /* SPDX-License-Identifier: MIT */ 2 2 /* 3 3 * Copyright 2025 Advanced Micro Devices, Inc. 4 4 *
+1 -1
drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 1 + /* SPDX-License-Identifier: MIT */ 2 2 3 3 /* 4 4 * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.
+1 -1
drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
··· 2024 2024 table->VoltageResponseTime = 0; 2025 2025 table->PhaseResponseTime = 0; 2026 2026 table->MemoryThermThrottleEnable = 1; 2027 - table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/ 2027 + table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count); 2028 2028 table->PCIeGenInterval = 1; 2029 2029 table->VRConfig = 0; 2030 2030
+1 -1
drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
··· 2028 2028 table->VoltageResponseTime = 0; 2029 2029 table->PhaseResponseTime = 0; 2030 2030 table->MemoryThermThrottleEnable = 1; 2031 - table->PCIeBootLinkLevel = 0; 2031 + table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count); 2032 2032 table->PCIeGenInterval = 1; 2033 2033 2034 2034 result = iceland_populate_smc_svi2_config(hwmgr, table);
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
··· 969 969 table_index); 970 970 uint32_t table_size; 971 971 int ret = 0; 972 - if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) 972 + if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0) 973 973 return -EINVAL; 974 974 975 975 table_size = smu_table->tables[table_index].size;
+4 -4
drivers/gpu/drm/ast/ast_drv.h
··· 282 282 __ast_write8(addr, reg + 1, val); 283 283 } 284 284 285 - static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask, 285 + static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 preserve_mask, 286 286 u8 val) 287 287 { 288 - u8 tmp = __ast_read8_i_masked(addr, reg, index, read_mask); 288 + u8 tmp = __ast_read8_i_masked(addr, reg, index, preserve_mask); 289 289 290 - tmp |= val; 291 - __ast_write8_i(addr, reg, index, tmp); 290 + val &= ~preserve_mask; 291 + __ast_write8_i(addr, reg, index, tmp | val); 292 292 } 293 293 294 294 static inline u32 ast_read32(struct ast_device *ast, u32 reg)
+1 -1
drivers/gpu/drm/ci/gitlab-ci.yml
··· 280 280 GIT_STRATEGY: none 281 281 script: 282 282 # ci-fairy check-commits --junit-xml=check-commits.xml 283 - - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml 283 + # - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml 284 284 - | 285 285 set -eu 286 286 image_tags=(
+6 -2
drivers/gpu/drm/drm_gem_atomic_helper.c
··· 310 310 void __drm_gem_reset_shadow_plane(struct drm_plane *plane, 311 311 struct drm_shadow_plane_state *shadow_plane_state) 312 312 { 313 - __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); 314 - drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state); 313 + if (shadow_plane_state) { 314 + __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); 315 + drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state); 316 + } else { 317 + __drm_atomic_helper_plane_reset(plane, NULL); 318 + } 315 319 } 316 320 EXPORT_SYMBOL(__drm_gem_reset_shadow_plane); 317 321
+1 -1
drivers/gpu/drm/etnaviv/etnaviv_buffer.c
··· 347 347 u32 link_target, link_dwords; 348 348 bool switch_context = gpu->exec_state != exec_state; 349 349 bool switch_mmu_context = gpu->mmu_context != mmu_context; 350 - unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq); 350 + unsigned int new_flush_seq = READ_ONCE(mmu_context->flush_seq); 351 351 bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq; 352 352 bool has_blt = !!(gpu->identity.minor_features5 & 353 353 chipMinorFeatures5_BLT_ENGINE);
+54 -1
drivers/gpu/drm/i915/display/intel_dmc.c
··· 546 546 REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == event_id; 547 547 } 548 548 549 + static bool fixup_dmc_evt(struct intel_display *display, 550 + enum intel_dmc_id dmc_id, 551 + i915_reg_t reg_ctl, u32 *data_ctl, 552 + i915_reg_t reg_htp, u32 *data_htp) 553 + { 554 + if (!is_dmc_evt_ctl_reg(display, dmc_id, reg_ctl)) 555 + return false; 556 + 557 + if (!is_dmc_evt_htp_reg(display, dmc_id, reg_htp)) 558 + return false; 559 + 560 + /* make sure reg_ctl and reg_htp are for the same event */ 561 + if (i915_mmio_reg_offset(reg_ctl) - i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, 0)) != 562 + i915_mmio_reg_offset(reg_htp) - i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, 0))) 563 + return false; 564 + 565 + /* 566 + * On ADL-S the HRR event handler is not restored after DC6. 567 + * Clear it to zero from the beginning to avoid mismatches later. 568 + */ 569 + if (display->platform.alderlake_s && dmc_id == DMC_FW_MAIN && 570 + is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg_ctl, *data_ctl)) { 571 + *data_ctl = 0; 572 + *data_htp = 0; 573 + return true; 574 + } 575 + 576 + return false; 577 + } 578 + 549 579 static bool disable_dmc_evt(struct intel_display *display, 550 580 enum intel_dmc_id dmc_id, 551 581 i915_reg_t reg, u32 data) ··· 1094 1064 for (i = 0; i < mmio_count; i++) { 1095 1065 dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]); 1096 1066 dmc_info->mmiodata[i] = mmiodata[i]; 1067 + } 1097 1068 1069 + for (i = 0; i < mmio_count - 1; i++) { 1070 + u32 orig_mmiodata[2] = { 1071 + dmc_info->mmiodata[i], 1072 + dmc_info->mmiodata[i+1], 1073 + }; 1074 + 1075 + if (!fixup_dmc_evt(display, dmc_id, 1076 + dmc_info->mmioaddr[i], &dmc_info->mmiodata[i], 1077 + dmc_info->mmioaddr[i+1], &dmc_info->mmiodata[i+1])) 1078 + continue; 1079 + 1080 + drm_dbg_kms(display->drm, 1081 + " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_CTL)\n", 1082 + i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]), 1083 + orig_mmiodata[0], dmc_info->mmiodata[i]); 1084 + drm_dbg_kms(display->drm, 1085 + " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_HTP)\n", 1086 + i+1, i915_mmio_reg_offset(dmc_info->mmioaddr[i+1]), 1087 + orig_mmiodata[1], dmc_info->mmiodata[i+1]); 1088 + } 1089 + 1090 + for (i = 0; i < mmio_count; i++) { 1098 1091 drm_dbg_kms(display->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n", 1099 - i, mmioaddr[i], mmiodata[i], 1092 + i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]), dmc_info->mmiodata[i], 1100 1093 is_dmc_evt_ctl_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" : 1101 1094 is_dmc_evt_htp_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "", 1102 1095 disable_dmc_evt(display, dmc_id, dmc_info->mmioaddr[i],
+9 -9
drivers/gpu/drm/imx/ipuv3/parallel-display.c
··· 25 25 26 26 struct imx_parallel_display_encoder { 27 27 struct drm_encoder encoder; 28 - struct drm_bridge bridge; 29 - struct imx_parallel_display *pd; 30 28 }; 31 29 32 30 struct imx_parallel_display { 33 31 struct device *dev; 34 32 u32 bus_format; 35 33 struct drm_bridge *next_bridge; 34 + struct drm_bridge bridge; 36 35 }; 37 36 38 37 static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b) 39 38 { 40 - return container_of(b, struct imx_parallel_display_encoder, bridge)->pd; 39 + return container_of(b, struct imx_parallel_display, bridge); 41 40 } 42 41 43 42 static const u32 imx_pd_bus_fmts[] = { ··· 194 195 if (IS_ERR(imxpd_encoder)) 195 196 return PTR_ERR(imxpd_encoder); 196 197 197 - imxpd_encoder->pd = imxpd; 198 198 encoder = &imxpd_encoder->encoder; 199 - bridge = &imxpd_encoder->bridge; 199 + bridge = &imxpd->bridge; 200 200 201 201 ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node); 202 202 if (ret) 203 203 return ret; 204 204 205 - bridge->funcs = &imx_pd_bridge_funcs; 206 205 drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); 207 206 208 207 connector = drm_bridge_connector_init(drm, encoder); ··· 225 228 u32 bus_format = 0; 226 229 const char *fmt; 227 230 228 - imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL); 229 - if (!imxpd) 230 - return -ENOMEM; 231 + imxpd = devm_drm_bridge_alloc(dev, struct imx_parallel_display, bridge, 232 + &imx_pd_bridge_funcs); 233 + if (IS_ERR(imxpd)) 234 + return PTR_ERR(imxpd); 231 235 232 236 /* port@1 is the output port */ 233 237 imxpd->next_bridge = devm_drm_of_get_bridge(dev, np, 1, 0); ··· 255 257 imxpd->dev = dev; 256 258 257 259 platform_set_drvdata(pdev, imxpd); 260 + 261 + devm_drm_bridge_add(dev, &imxpd->bridge); 258 262 259 263 return component_add(dev, &imx_pd_ops); 260 264 }
-10
drivers/gpu/drm/mediatek/mtk_drm_drv.c
··· 686 686 for (i = 0; i < private->data->mmsys_dev_num; i++) 687 687 private->all_drm_private[i]->drm = NULL; 688 688 err_put_dev: 689 - for (i = 0; i < private->data->mmsys_dev_num; i++) { 690 - /* For device_find_child in mtk_drm_get_all_priv() */ 691 - put_device(private->all_drm_private[i]->dev); 692 - } 693 689 put_device(private->mutex_dev); 694 690 return ret; 695 691 } ··· 693 697 static void mtk_drm_unbind(struct device *dev) 694 698 { 695 699 struct mtk_drm_private *private = dev_get_drvdata(dev); 696 - int i; 697 700 698 701 /* for multi mmsys dev, unregister drm dev in mmsys master */ 699 702 if (private->drm_master) { 700 703 drm_dev_unregister(private->drm); 701 704 mtk_drm_kms_deinit(private->drm); 702 705 drm_dev_put(private->drm); 703 - 704 - for (i = 0; i < private->data->mmsys_dev_num; i++) { 705 - /* For device_find_child in mtk_drm_get_all_priv() */ 706 - put_device(private->all_drm_private[i]->dev); 707 - } 708 706 put_device(private->mutex_dev); 709 707 } 710 708 private->mtk_drm_bound = false;
+4 -1
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
··· 780 780 return true; 781 781 } 782 782 783 + #define NEXT_BLK(blk) \ 784 + ((const struct block_header *)((const char *)(blk) + sizeof(*(blk)) + (blk)->size)) 785 + 783 786 static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) 784 787 { 785 788 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); ··· 814 811 815 812 for (blk = (const struct block_header *) fw_image->data; 816 813 (const u8*) blk < fw_image->data + fw_image->size; 817 - blk = (const struct block_header *) &blk->data[blk->size >> 2]) { 814 + blk = NEXT_BLK(blk)) { 818 815 if (blk->size == 0) 819 816 continue; 820 817
-7
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 348 348 return 0; 349 349 } 350 350 351 - static bool 352 - adreno_smmu_has_prr(struct msm_gpu *gpu) 353 - { 354 - struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev); 355 - return adreno_smmu && adreno_smmu->set_prr_addr; 356 - } 357 - 358 351 int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx, 359 352 uint32_t param, uint64_t *value, uint32_t *len) 360 353 {
+3
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
··· 1545 1545 adjusted_mode_clk = dpu_core_perf_adjusted_mode_clk(mode->clock, 1546 1546 dpu_kms->perf.perf_cfg); 1547 1547 1548 + if (dpu_kms->catalog->caps->has_3d_merge) 1549 + adjusted_mode_clk /= 2; 1550 + 1548 1551 /* 1549 1552 * The given mode, adjusted for the perf clock factor, should not exceed 1550 1553 * the max core clock rate
+2 -2
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
··· 267 267 .base = 0x200, .len = 0xa0,}, \ 268 268 .csc_blk = {.name = "csc", \ 269 269 .base = 0x320, .len = 0x100,}, \ 270 - .format_list = plane_formats_yuv, \ 271 - .num_formats = ARRAY_SIZE(plane_formats_yuv), \ 270 + .format_list = plane_formats, \ 271 + .num_formats = ARRAY_SIZE(plane_formats), \ 272 272 .rotation_cfg = NULL, \ 273 273 } 274 274
+8 -6
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
··· 500 500 int i; 501 501 502 502 for (i = 0; i < DPU_MAX_PLANES; i++) { 503 + uint32_t w = src_w, h = src_h; 504 + 503 505 if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) { 504 - src_w /= chroma_subsmpl_h; 505 - src_h /= chroma_subsmpl_v; 506 + w /= chroma_subsmpl_h; 507 + h /= chroma_subsmpl_v; 506 508 } 507 509 508 - pixel_ext->num_ext_pxls_top[i] = src_h; 509 - pixel_ext->num_ext_pxls_left[i] = src_w; 510 + pixel_ext->num_ext_pxls_top[i] = h; 511 + pixel_ext->num_ext_pxls_left[i] = w; 510 512 } 511 513 } 512 514 ··· 742 740 * We already have verified scaling against platform limitations. 743 741 * Now check if the SSPP supports scaling at all. 744 742 */ 745 - if (!sblk->scaler_blk.len && 743 + if (!(sblk->scaler_blk.len && pipe->sspp->ops.setup_scaler) && 746 744 ((drm_rect_width(&new_plane_state->src) >> 16 != 747 745 drm_rect_width(&new_plane_state->dst)) || 748 746 (drm_rect_height(&new_plane_state->src) >> 16 != ··· 1280 1278 state, plane_state, 1281 1279 prev_adjacent_plane_state); 1282 1280 if (ret) 1283 - break; 1281 + return ret; 1284 1282 1285 1283 prev_adjacent_plane_state = plane_state; 1286 1284 }
+1 -1
drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
··· 842 842 843 843 if (!reqs->scale && !reqs->yuv) 844 844 hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_DMA); 845 - if (!hw_sspp && reqs->scale) 845 + if (!hw_sspp && !reqs->yuv) 846 846 hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_RGB); 847 847 if (!hw_sspp) 848 848 hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_VIG);
+3
drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
··· 72 72 DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n", 73 73 fb->width, dpu_wb_conn->maxlinewidth); 74 74 return -EINVAL; 75 + } else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { 76 + DPU_ERROR("unsupported fb modifier:%#llx\n", fb->modifier); 77 + return -EINVAL; 75 78 } 76 79 77 80 return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state);
-1
drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
··· 109 109 struct msm_dsi_dphy_timing timing; 110 110 const struct msm_dsi_phy_cfg *cfg; 111 111 void *tuning_cfg; 112 - void *pll_data; 113 112 114 113 enum msm_dsi_phy_usecase usecase; 115 114 bool regulator_ldo_mode;
+2 -16
drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
··· 426 426 u32 data; 427 427 428 428 spin_lock_irqsave(&pll->pll_enable_lock, flags); 429 - if (pll->pll_enable_cnt++) { 430 - spin_unlock_irqrestore(&pll->pll_enable_lock, flags); 431 - WARN_ON(pll->pll_enable_cnt == INT_MAX); 432 - return; 433 - } 429 + pll->pll_enable_cnt++; 430 + WARN_ON(pll->pll_enable_cnt == INT_MAX); 434 431 435 432 data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0); 436 433 data |= DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB; ··· 873 876 spin_lock_init(&pll_7nm->pll_enable_lock); 874 877 875 878 pll_7nm->phy = phy; 876 - phy->pll_data = pll_7nm; 877 879 878 880 ret = pll_7nm_register(pll_7nm, phy->provided_clocks->hws); 879 881 if (ret) { ··· 961 965 u32 const delay_us = 5; 962 966 u32 const timeout_us = 1000; 963 967 struct msm_dsi_dphy_timing *timing = &phy->timing; 964 - struct dsi_pll_7nm *pll = phy->pll_data; 965 968 void __iomem *base = phy->base; 966 969 bool less_than_1500_mhz; 967 - unsigned long flags; 968 970 u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0; 969 971 u32 glbl_pemph_ctrl_0; 970 972 u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0; ··· 1084 1090 glbl_rescode_bot_ctrl = 0x3c; 1085 1091 } 1086 1092 1087 - spin_lock_irqsave(&pll->pll_enable_lock, flags); 1088 - pll->pll_enable_cnt = 1; 1089 1093 /* de-assert digital and pll power down */ 1090 1094 data = DSI_7nm_PHY_CMN_CTRL_0_DIGTOP_PWRDN_B | 1091 1095 DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB; 1092 1096 writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0); 1093 - spin_unlock_irqrestore(&pll->pll_enable_lock, flags); 1094 1097 1095 1098 /* Assert PLL core reset */ 1096 1099 writel(0x00, base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL); ··· 1200 1209 1201 1210 static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy) 1202 1211 { 1203 - struct dsi_pll_7nm *pll = phy->pll_data; 1204 1212 void __iomem *base = phy->base; 1205 - unsigned long flags; 1206 1213 u32 data; 1207 1214 1208 1215 DBG(""); ··· 1227 1238 writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0); 1228 1239 writel(0, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0); 1229 1240 1230 - spin_lock_irqsave(&pll->pll_enable_lock, flags); 1231 - pll->pll_enable_cnt = 0; 1232 1241 /* Turn off all PHY blocks */ 1233 1242 writel(0x00, base + REG_DSI_7nm_PHY_CMN_CTRL_0); 1234 - spin_unlock_irqrestore(&pll->pll_enable_lock, flags); 1235 1243 1236 1244 /* make sure phy is turned off */ 1237 1245 wmb();
+7 -3
drivers/gpu/drm/msm/msm_gem.c
··· 1120 1120 put_pages(obj); 1121 1121 } 1122 1122 1123 - if (obj->resv != &obj->_resv) { 1123 + /* 1124 + * In error paths, we could end up here before msm_gem_new_handle() 1125 + * has changed obj->resv to point to the shared resv. In this case, 1126 + * we don't want to drop a ref to the shared r_obj that we haven't 1127 + * taken yet. 1128 + */ 1129 + if ((msm_obj->flags & MSM_BO_NO_SHARE) && (obj->resv != &obj->_resv)) { 1124 1130 struct drm_gem_object *r_obj = 1125 1131 container_of(obj->resv, struct drm_gem_object, _resv); 1126 - 1127 - WARN_ON(!(msm_obj->flags & MSM_BO_NO_SHARE)); 1128 1132 1129 1133 /* Drop reference we hold to shared resv obj: */ 1130 1134 drm_gem_object_put(r_obj);
+5 -4
drivers/gpu/drm/msm/msm_gem_submit.c
··· 414 414 submit->user_fence, 415 415 DMA_RESV_USAGE_BOOKKEEP, 416 416 DMA_RESV_USAGE_BOOKKEEP); 417 + 418 + last_fence = vm->last_fence; 419 + vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence); 420 + dma_fence_put(last_fence); 421 + 417 422 return; 418 423 } 419 424 ··· 432 427 dma_resv_add_fence(obj->resv, submit->user_fence, 433 428 DMA_RESV_USAGE_READ); 434 429 } 435 - 436 - last_fence = vm->last_fence; 437 - vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence); 438 - dma_fence_put(last_fence); 439 430 } 440 431 441 432 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
+7 -1
drivers/gpu/drm/msm/msm_gem_vma.c
··· 971 971 lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op) 972 972 { 973 973 struct drm_device *dev = job->vm->drm; 974 + struct msm_drm_private *priv = dev->dev_private; 974 975 int i = job->nr_ops++; 975 976 int ret = 0; 976 977 ··· 1016 1015 default: 1017 1016 ret = UERR(EINVAL, dev, "invalid op: %u\n", op->op); 1018 1017 break; 1018 + } 1019 + 1020 + if ((op->op == MSM_VM_BIND_OP_MAP_NULL) && 1021 + !adreno_smmu_has_prr(priv->gpu)) { 1022 + ret = UERR(EINVAL, dev, "PRR not supported\n"); 1019 1023 } 1020 1024 1021 1025 return ret; ··· 1427 1421 * Maybe we could allow just UNMAP ops? OTOH userspace should just 1428 1422 * immediately close the device file and all will be torn down. 1429 1423 */ 1430 - if (to_msm_vm(ctx->vm)->unusable) 1424 + if (to_msm_vm(msm_context_vm(dev, ctx))->unusable) 1431 1425 return UERR(EPIPE, dev, "context is unusable"); 1432 1426 1433 1427 /*
+11
drivers/gpu/drm/msm/msm_gpu.h
··· 299 299 return container_of(adreno_smmu, struct msm_gpu, adreno_smmu); 300 300 } 301 301 302 + static inline bool 303 + adreno_smmu_has_prr(struct msm_gpu *gpu) 304 + { 305 + struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev); 306 + 307 + if (!adreno_smmu) 308 + return false; 309 + 310 + return adreno_smmu && adreno_smmu->set_prr_addr; 311 + } 312 + 302 313 /* It turns out that all targets use the same ringbuffer size */ 303 314 #define MSM_GPU_RINGBUFFER_SZ SZ_32K 304 315 #define MSM_GPU_RINGBUFFER_BLKSIZE 32
+5
drivers/gpu/drm/msm/msm_iommu.c
··· 338 338 339 339 ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages); 340 340 if (ret != p->count) { 341 + kfree(p->pages); 342 + p->pages = NULL; 341 343 p->count = ret; 342 344 return -ENOMEM; 343 345 } ··· 352 350 { 353 351 struct kmem_cache *pt_cache = get_pt_cache(mmu); 354 352 uint32_t remaining_pt_count = p->count - p->ptr; 353 + 354 + if (!p->pages) 355 + return; 355 356 356 357 if (p->count > 0) 357 358 trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count);
+12 -2
drivers/gpu/drm/nouveau/nouveau_sched.c
··· 482 482 return 0; 483 483 } 484 484 485 + static bool 486 + nouveau_sched_job_list_empty(struct nouveau_sched *sched) 487 + { 488 + bool empty; 489 + 490 + spin_lock(&sched->job.list.lock); 491 + empty = list_empty(&sched->job.list.head); 492 + spin_unlock(&sched->job.list.lock); 493 + 494 + return empty; 495 + } 485 496 486 497 static void 487 498 nouveau_sched_fini(struct nouveau_sched *sched) ··· 500 489 struct drm_gpu_scheduler *drm_sched = &sched->base; 501 490 struct drm_sched_entity *entity = &sched->entity; 502 491 503 - rmb(); /* for list_empty to work without lock */ 504 - wait_event(sched->job.wq, list_empty(&sched->job.list.head)); 492 + wait_event(sched->job.wq, nouveau_sched_job_list_empty(sched)); 505 493 506 494 drm_sched_entity_fini(entity); 507 495 drm_sched_fini(drm_sched);
+1 -1
drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
··· 359 359 dsi->lanes = 4; 360 360 dsi->format = MIPI_DSI_FMT_RGB888; 361 361 dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | 362 - MIPI_DSI_MODE_LPM; 362 + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET; 363 363 364 364 kingdisplay = devm_drm_panel_alloc(&dsi->dev, __typeof(*kingdisplay), base, 365 365 &kingdisplay_panel_funcs,
+6 -1
drivers/gpu/drm/panel/panel-sitronix-st7789v.c
··· 249 249 .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, 250 250 }; 251 251 252 + /* 253 + * The mode data for this panel has been reverse engineered without access 254 + * to the panel datasheet / manual. Using DRM_MODE_FLAG_PHSYNC like all 255 + * other panels results in garbage data on the display. 256 + */ 252 257 static const struct drm_display_mode t28cp45tn89_mode = { 253 258 .clock = 6008, 254 259 .hdisplay = 240, ··· 266 261 .vtotal = 320 + 8 + 4 + 4, 267 262 .width_mm = 43, 268 263 .height_mm = 57, 269 - .flags = DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC, 264 + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC, 270 265 }; 271 266 272 267 static const struct drm_display_mode et028013dma_mode = {
+4 -21
drivers/gpu/drm/radeon/radeon_drv.c
··· 314 314 315 315 ret = pci_enable_device(pdev); 316 316 if (ret) 317 - goto err_free; 317 + return ret; 318 318 319 319 pci_set_drvdata(pdev, ddev); 320 320 321 321 ret = radeon_driver_load_kms(ddev, flags); 322 322 if (ret) 323 - goto err_agp; 323 + goto err; 324 324 325 325 ret = drm_dev_register(ddev, flags); 326 326 if (ret) 327 - goto err_agp; 327 + goto err; 328 328 329 329 if (rdev->mc.real_vram_size <= (8 * 1024 * 1024)) 330 330 format = drm_format_info(DRM_FORMAT_C8); ··· 337 337 338 338 return 0; 339 339 340 - err_agp: 340 + err: 341 341 pci_disable_device(pdev); 342 - err_free: 343 - drm_dev_put(ddev); 344 342 return ret; 345 - } 346 - 347 - static void 348 - radeon_pci_remove(struct pci_dev *pdev) 349 - { 350 - struct drm_device *dev = pci_get_drvdata(pdev); 351 - 352 - drm_put_dev(dev); 353 343 } 354 344 355 345 static void 356 346 radeon_pci_shutdown(struct pci_dev *pdev) 357 347 { 358 - /* if we are running in a VM, make sure the device 359 - * torn down properly on reboot/shutdown 360 - */ 361 - if (radeon_device_is_virtual()) 362 - radeon_pci_remove(pdev); 363 - 364 348 #if defined(CONFIG_PPC64) || defined(CONFIG_MACH_LOONGSON64) 365 349 /* 366 350 * Some adapters need to be suspended before a ··· 597 613 .name = DRIVER_NAME, 598 614 .id_table = pciidlist, 599 615 .probe = radeon_pci_probe, 600 - .remove = radeon_pci_remove, 601 616 .shutdown = radeon_pci_shutdown, 602 617 .driver.pm = &radeon_pm_ops, 603 618 };
-1
drivers/gpu/drm/radeon/radeon_kms.c
··· 84 84 rdev->agp = NULL; 85 85 86 86 done_free: 87 - kfree(rdev); 88 87 dev->dev_private = NULL; 89 88 } 90 89
+4 -2
drivers/gpu/drm/scheduler/sched_entity.c
··· 70 70 entity->guilty = guilty; 71 71 entity->num_sched_list = num_sched_list; 72 72 entity->priority = priority; 73 + entity->last_user = current->group_leader; 73 74 /* 74 75 * It's perfectly valid to initialize an entity without having a valid 75 76 * scheduler attached. It's just not valid to use the scheduler before it ··· 303 302 304 303 /* For a killed process disallow further enqueueing of jobs. */ 305 304 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); 306 - if ((!last_user || last_user == current->group_leader) && 305 + if (last_user == current->group_leader && 307 306 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) 308 307 drm_sched_entity_kill(entity); 309 308 ··· 553 552 drm_sched_rq_remove_entity(entity->rq, entity); 554 553 entity->rq = rq; 555 554 } 556 - spin_unlock(&entity->lock); 557 555 558 556 if (entity->num_sched_list == 1) 559 557 entity->sched_list = NULL; 558 + 559 + spin_unlock(&entity->lock); 560 560 } 561 561 562 562 /**
+12 -7
drivers/gpu/drm/xe/xe_gt.c
··· 813 813 unsigned int fw_ref; 814 814 int err; 815 815 816 - if (xe_device_wedged(gt_to_xe(gt))) 817 - return -ECANCELED; 816 + if (xe_device_wedged(gt_to_xe(gt))) { 817 + err = -ECANCELED; 818 + goto err_pm_put; 819 + } 818 820 819 821 /* We only support GT resets with GuC submission */ 820 - if (!xe_device_uc_enabled(gt_to_xe(gt))) 821 - return -ENODEV; 822 + if (!xe_device_uc_enabled(gt_to_xe(gt))) { 823 + err = -ENODEV; 824 + goto err_pm_put; 825 + } 822 826 823 827 xe_gt_info(gt, "reset started\n"); 824 828 825 829 err = gt_wait_reset_unblock(gt); 826 830 if (!err) 827 831 xe_gt_warn(gt, "reset block failed to get lifted"); 828 - 829 - xe_pm_runtime_get(gt_to_xe(gt)); 830 832 831 833 if (xe_fault_inject_gt_reset()) { 832 834 err = -ECANCELED; ··· 876 874 xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err)); 877 875 878 876 xe_device_declare_wedged(gt_to_xe(gt)); 877 + err_pm_put: 879 878 xe_pm_runtime_put(gt_to_xe(gt)); 880 879 881 880 return err; ··· 898 895 return; 899 896 900 897 xe_gt_info(gt, "reset queued\n"); 901 - queue_work(gt->ordered_wq, &gt->reset.worker); 898 + xe_pm_runtime_get_noresume(gt_to_xe(gt)); 899 + if (!queue_work(gt->ordered_wq, &gt->reset.worker)) 900 + xe_pm_runtime_put(gt_to_xe(gt)); 902 901 } 903 902 904 903 void xe_gt_suspend_prepare(struct xe_gt *gt)
+4 -4
drivers/gpu/drm/xe/xe_validation.h
··· 166 166 */ 167 167 DEFINE_CLASS(xe_validation, struct xe_validation_ctx *, 168 168 if (_T) xe_validation_ctx_fini(_T);, 169 - ({_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags); 170 - _ret ? NULL : _ctx; }), 169 + ({*_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags); 170 + *_ret ? NULL : _ctx; }), 171 171 struct xe_validation_ctx *_ctx, struct xe_validation_device *_val, 172 - struct drm_exec *_exec, const struct xe_val_flags _flags, int _ret); 172 + struct drm_exec *_exec, const struct xe_val_flags _flags, int *_ret); 173 173 static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T) 174 174 {return *_T; } 175 175 #define class_xe_validation_is_conditional true ··· 186 186 * exhaustive eviction. 187 187 */ 188 188 #define xe_validation_guard(_ctx, _val, _exec, _flags, _ret) \ 189 - scoped_guard(xe_validation, _ctx, _val, _exec, _flags, _ret) \ 189 + scoped_guard(xe_validation, _ctx, _val, _exec, _flags, &_ret) \ 190 190 drm_exec_until_all_locked(_exec) 191 191 192 192 #endif