Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'drm-misc-fixes-2026-02-26' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-fixes

Several fixes for:
- amdxdna: Fix for a deadlock, a NULL pointer dereference, a suspend
failure, a hang, an out-of-bounds access, a buffer overflow, input
sanitization and other minor fixes.
- dw-dp: An error handling fix
- ethosu: A binary shift overflow fix
- imx: An error handling fix
- logicvc: A dt node reference leak fix
- nouveau: A WARN_ON removal
- samsung-dsim: A memory leak fix
- sharp-memory: A NULL pointer dereference fix
- vmgfx: A reference count and error handling fix

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maxime Ripard <mripard@redhat.com>
Link: https://patch.msgid.link/20260226-heretic-stimulating-swine-6a2f27@penduick

+176 -106
+19 -13
drivers/accel/amdxdna/aie2_ctx.c
··· 23 23 #include "amdxdna_pci_drv.h" 24 24 #include "amdxdna_pm.h" 25 25 26 - static bool force_cmdlist; 26 + static bool force_cmdlist = true; 27 27 module_param(force_cmdlist, bool, 0600); 28 - MODULE_PARM_DESC(force_cmdlist, "Force use command list (Default false)"); 28 + MODULE_PARM_DESC(force_cmdlist, "Force use command list (Default true)"); 29 29 30 30 #define HWCTX_MAX_TIMEOUT 60000 /* milliseconds */ 31 31 ··· 53 53 { 54 54 drm_sched_stop(&hwctx->priv->sched, bad_job); 55 55 aie2_destroy_context(xdna->dev_handle, hwctx); 56 + drm_sched_start(&hwctx->priv->sched, 0); 56 57 } 57 58 58 59 static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx) ··· 81 80 } 82 81 83 82 out: 84 - drm_sched_start(&hwctx->priv->sched, 0); 85 83 XDNA_DBG(xdna, "%s restarted, ret %d", hwctx->name, ret); 86 84 return ret; 87 85 } ··· 297 297 struct dma_fence *fence; 298 298 int ret; 299 299 300 - if (!hwctx->priv->mbox_chann) 300 + ret = amdxdna_pm_resume_get(hwctx->client->xdna); 301 + if (ret) 301 302 return NULL; 302 303 303 - if (!mmget_not_zero(job->mm)) 304 + if (!hwctx->priv->mbox_chann) { 305 + amdxdna_pm_suspend_put(hwctx->client->xdna); 306 + return NULL; 307 + } 308 + 309 + if (!mmget_not_zero(job->mm)) { 310 + amdxdna_pm_suspend_put(hwctx->client->xdna); 304 311 return ERR_PTR(-ESRCH); 312 + } 305 313 306 314 kref_get(&job->refcnt); 307 315 fence = dma_fence_get(job->fence); 308 - 309 - ret = amdxdna_pm_resume_get(hwctx->client->xdna); 310 - if (ret) 311 - goto out; 312 316 313 317 if (job->drv_cmd) { 314 318 switch (job->drv_cmd->opcode) { ··· 501 497 502 498 if (AIE2_FEATURE_ON(xdna->dev_handle, AIE2_TEMPORAL_ONLY)) { 503 499 ret = aie2_destroy_context(xdna->dev_handle, hwctx); 504 - if (ret) 500 + if (ret && ret != -ENODEV) 505 501 XDNA_ERR(xdna, "Destroy temporal only context failed, ret %d", ret); 506 502 } else { 507 503 ret = xrs_release_resource(xdna->xrs_hdl, (uintptr_t)hwctx); ··· 633 629 goto free_entity; 634 630 } 635 631 636 - ret = amdxdna_pm_resume_get(xdna); 632 + ret = amdxdna_pm_resume_get_locked(xdna); 637 633 if (ret) 638 634 goto free_col_list; 639 635 ··· 764 760 if (!hwctx->cus) 765 761 return -ENOMEM; 766 762 767 - ret = amdxdna_pm_resume_get(xdna); 763 + ret = amdxdna_pm_resume_get_locked(xdna); 768 764 if (ret) 769 765 goto free_cus; 770 766 ··· 1074 1070 1075 1071 ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP, 1076 1072 true, MAX_SCHEDULE_TIMEOUT); 1077 - if (!ret || ret == -ERESTARTSYS) 1073 + if (!ret) 1078 1074 XDNA_ERR(xdna, "Failed to wait for bo, ret %ld", ret); 1075 + else if (ret == -ERESTARTSYS) 1076 + XDNA_DBG(xdna, "Wait for bo interrupted by signal"); 1079 1077 }
+10 -5
drivers/accel/amdxdna/aie2_message.c
··· 216 216 217 217 req.context_id = id; 218 218 ret = aie2_send_mgmt_msg_wait(ndev, &msg); 219 - if (ret) 219 + if (ret && ret != -ENODEV) 220 220 XDNA_WARN(xdna, "Destroy context failed, ret %d", ret); 221 + else if (ret == -ENODEV) 222 + XDNA_DBG(xdna, "Destroy context: device already stopped"); 221 223 222 224 return ret; 223 225 } ··· 319 317 { 320 318 struct amdxdna_dev *xdna = ndev->xdna; 321 319 int ret; 320 + 321 + if (!hwctx->priv->mbox_chann) 322 + return 0; 322 323 323 324 xdna_mailbox_stop_channel(hwctx->priv->mbox_chann); 324 325 ret = aie2_destroy_context_req(ndev, hwctx->fw_ctx_id); ··· 699 694 u32 cmd_len; 700 695 void *cmd; 701 696 702 - memset(npu_slot, 0, sizeof(*npu_slot)); 703 697 cmd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); 704 698 if (*size < sizeof(*npu_slot) + cmd_len) 705 699 return -EINVAL; 706 700 701 + memset(npu_slot, 0, sizeof(*npu_slot)); 707 702 npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); 708 703 if (npu_slot->cu_idx == INVALID_CU_IDX) 709 704 return -EINVAL; ··· 724 719 u32 cmd_len; 725 720 u32 arg_sz; 726 721 727 - memset(npu_slot, 0, sizeof(*npu_slot)); 728 722 sn = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); 729 723 arg_sz = cmd_len - sizeof(*sn); 730 724 if (cmd_len < sizeof(*sn) || arg_sz > MAX_NPU_ARGS_SIZE) ··· 732 728 if (*size < sizeof(*npu_slot) + arg_sz) 733 729 return -EINVAL; 734 730 731 + memset(npu_slot, 0, sizeof(*npu_slot)); 735 732 npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); 736 733 if (npu_slot->cu_idx == INVALID_CU_IDX) 737 734 return -EINVAL; ··· 756 751 u32 cmd_len; 757 752 u32 arg_sz; 758 753 759 - memset(npu_slot, 0, sizeof(*npu_slot)); 760 754 pd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); 761 755 arg_sz = cmd_len - sizeof(*pd); 762 756 if (cmd_len < sizeof(*pd) || arg_sz > MAX_NPU_ARGS_SIZE) ··· 764 760 if (*size < sizeof(*npu_slot) + arg_sz) 765 761 return -EINVAL; 766 762 763 + memset(npu_slot, 0, sizeof(*npu_slot)); 767 764 npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); 768 765 if (npu_slot->cu_idx == INVALID_CU_IDX) 769 766 return -EINVAL; ··· 792 787 u32 cmd_len; 793 788 u32 arg_sz; 794 789 795 - memset(npu_slot, 0, sizeof(*npu_slot)); 796 790 pd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); 797 791 arg_sz = cmd_len - sizeof(*pd); 798 792 if (cmd_len < sizeof(*pd) || arg_sz > MAX_NPU_ARGS_SIZE) ··· 800 796 if (*size < sizeof(*npu_slot) + arg_sz) 801 797 return -EINVAL; 802 798 799 + memset(npu_slot, 0, sizeof(*npu_slot)); 803 800 npu_slot->type = EXEC_NPU_TYPE_ELF; 804 801 npu_slot->inst_buf_addr = pd->inst_buf; 805 802 npu_slot->save_buf_addr = pd->save_buf;
+29 -11
drivers/accel/amdxdna/aie2_pci.c
··· 32 32 module_param(aie2_max_col, uint, 0600); 33 33 MODULE_PARM_DESC(aie2_max_col, "Maximum column could be used"); 34 34 35 + static char *npu_fw[] = { 36 + "npu_7.sbin", 37 + "npu.sbin" 38 + }; 39 + 35 40 /* 36 41 * The management mailbox channel is allocated by firmware. 37 42 * The related register and ring buffer information is on SRAM BAR. ··· 328 323 return; 329 324 } 330 325 326 + aie2_runtime_cfg(ndev, AIE2_RT_CFG_CLK_GATING, NULL); 331 327 aie2_mgmt_fw_fini(ndev); 332 328 xdna_mailbox_stop_channel(ndev->mgmt_chann); 333 329 xdna_mailbox_destroy_channel(ndev->mgmt_chann); ··· 412 406 goto stop_psp; 413 407 } 414 408 415 - ret = aie2_pm_init(ndev); 416 - if (ret) { 417 - XDNA_ERR(xdna, "failed to init pm, ret %d", ret); 418 - goto destroy_mgmt_chann; 419 - } 420 - 421 409 ret = aie2_mgmt_fw_init(ndev); 422 410 if (ret) { 423 411 XDNA_ERR(xdna, "initial mgmt firmware failed, ret %d", ret); 412 + goto destroy_mgmt_chann; 413 + } 414 + 415 + ret = aie2_pm_init(ndev); 416 + if (ret) { 417 + XDNA_ERR(xdna, "failed to init pm, ret %d", ret); 424 418 goto destroy_mgmt_chann; 425 419 } 426 420 ··· 457 451 { 458 452 struct amdxdna_client *client; 459 453 460 - guard(mutex)(&xdna->dev_lock); 461 454 list_for_each_entry(client, &xdna->client_list, node) 462 455 aie2_hwctx_suspend(client); 463 456 ··· 494 489 struct psp_config psp_conf; 495 490 const struct firmware *fw; 496 491 unsigned long bars = 0; 492 + char *fw_full_path; 497 493 int i, nvec, ret; 498 494 499 495 if (!hypervisor_is_type(X86_HYPER_NATIVE)) { ··· 509 503 ndev->priv = xdna->dev_info->dev_priv; 510 504 ndev->xdna = xdna; 511 505 512 - ret = request_firmware(&fw, ndev->priv->fw_path, &pdev->dev); 506 + for (i = 0; i < ARRAY_SIZE(npu_fw); i++) { 507 + fw_full_path = kasprintf(GFP_KERNEL, "%s%s", ndev->priv->fw_path, npu_fw[i]); 508 + if (!fw_full_path) 509 + return -ENOMEM; 510 + 511 + ret = firmware_request_nowarn(&fw, fw_full_path, &pdev->dev); 512 + kfree(fw_full_path); 513 + if (!ret) { 514 + XDNA_INFO(xdna, "Load firmware %s%s", ndev->priv->fw_path, npu_fw[i]); 515 + break; 516 + } 517 + } 518 + 513 519 if (ret) { 514 520 XDNA_ERR(xdna, "failed to request_firmware %s, ret %d", 515 521 ndev->priv->fw_path, ret); ··· 969 951 if (!drm_dev_enter(&xdna->ddev, &idx)) 970 952 return -ENODEV; 971 953 972 - ret = amdxdna_pm_resume_get(xdna); 954 + ret = amdxdna_pm_resume_get_locked(xdna); 973 955 if (ret) 974 956 goto dev_exit; 975 957 ··· 1062 1044 if (!drm_dev_enter(&xdna->ddev, &idx)) 1063 1045 return -ENODEV; 1064 1046 1065 - ret = amdxdna_pm_resume_get(xdna); 1047 + ret = amdxdna_pm_resume_get_locked(xdna); 1066 1048 if (ret) 1067 1049 goto dev_exit; 1068 1050 ··· 1152 1134 if (!drm_dev_enter(&xdna->ddev, &idx)) 1153 1135 return -ENODEV; 1154 1136 1155 - ret = amdxdna_pm_resume_get(xdna); 1137 + ret = amdxdna_pm_resume_get_locked(xdna); 1156 1138 if (ret) 1157 1139 goto dev_exit; 1158 1140
+1 -1
drivers/accel/amdxdna/aie2_pm.c
··· 31 31 { 32 32 int ret; 33 33 34 - ret = amdxdna_pm_resume_get(ndev->xdna); 34 + ret = amdxdna_pm_resume_get_locked(ndev->xdna); 35 35 if (ret) 36 36 return ret; 37 37
+11 -13
drivers/accel/amdxdna/amdxdna_ctx.c
··· 104 104 105 105 if (size) { 106 106 count = FIELD_GET(AMDXDNA_CMD_COUNT, cmd->header); 107 - if (unlikely(count <= num_masks)) { 107 + if (unlikely(count <= num_masks || 108 + count * sizeof(u32) + 109 + offsetof(struct amdxdna_cmd, data[0]) > 110 + abo->mem.size)) { 108 111 *size = 0; 109 112 return NULL; 110 113 } ··· 269 266 struct amdxdna_drm_config_hwctx *args = data; 270 267 struct amdxdna_dev *xdna = to_xdna_dev(dev); 271 268 struct amdxdna_hwctx *hwctx; 272 - int ret, idx; 273 269 u32 buf_size; 274 270 void *buf; 271 + int ret; 275 272 u64 val; 276 273 277 274 if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad))) ··· 313 310 return -EINVAL; 314 311 } 315 312 316 - mutex_lock(&xdna->dev_lock); 317 - idx = srcu_read_lock(&client->hwctx_srcu); 313 + guard(mutex)(&xdna->dev_lock); 318 314 hwctx = xa_load(&client->hwctx_xa, args->handle); 319 315 if (!hwctx) { 320 316 XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client->pid, args->handle); 321 317 ret = -EINVAL; 322 - goto unlock_srcu; 318 + goto free_buf; 323 319 } 324 320 325 321 ret = xdna->dev_info->ops->hwctx_config(hwctx, args->param_type, val, buf, buf_size); 326 322 327 - unlock_srcu: 328 - srcu_read_unlock(&client->hwctx_srcu, idx); 329 - mutex_unlock(&xdna->dev_lock); 323 + free_buf: 330 324 kfree(buf); 331 325 return ret; 332 326 } ··· 334 334 struct amdxdna_hwctx *hwctx; 335 335 struct amdxdna_gem_obj *abo; 336 336 struct drm_gem_object *gobj; 337 - int ret, idx; 337 + int ret; 338 338 339 339 if (!xdna->dev_info->ops->hwctx_sync_debug_bo) 340 340 return -EOPNOTSUPP; ··· 345 345 346 346 abo = to_xdna_obj(gobj); 347 347 guard(mutex)(&xdna->dev_lock); 348 - idx = srcu_read_lock(&client->hwctx_srcu); 349 348 hwctx = xa_load(&client->hwctx_xa, abo->assigned_hwctx); 350 349 if (!hwctx) { 351 350 ret = -EINVAL; 352 - goto unlock_srcu; 351 + goto put_obj; 353 352 } 354 353 355 354 ret = xdna->dev_info->ops->hwctx_sync_debug_bo(hwctx, debug_bo_hdl); 356 355 357 - unlock_srcu: 358 - srcu_read_unlock(&client->hwctx_srcu, idx); 356 + put_obj: 359 357 drm_gem_object_put(gobj); 360 358 return ret; 361 359 }
+19 -19
drivers/accel/amdxdna/amdxdna_gem.c
··· 21 21 #include "amdxdna_pci_drv.h" 22 22 #include "amdxdna_ubuf.h" 23 23 24 - #define XDNA_MAX_CMD_BO_SIZE SZ_32K 25 - 26 24 MODULE_IMPORT_NS("DMA_BUF"); 27 25 28 26 static int ··· 743 745 { 744 746 struct amdxdna_dev *xdna = to_xdna_dev(dev); 745 747 struct amdxdna_gem_obj *abo; 746 - int ret; 747 - 748 - if (args->size > XDNA_MAX_CMD_BO_SIZE) { 749 - XDNA_ERR(xdna, "Command bo size 0x%llx too large", args->size); 750 - return ERR_PTR(-EINVAL); 751 - } 752 748 753 749 if (args->size < sizeof(struct amdxdna_cmd)) { 754 750 XDNA_DBG(xdna, "Command BO size 0x%llx too small", args->size); ··· 756 764 abo->type = AMDXDNA_BO_CMD; 757 765 abo->client = filp->driver_priv; 758 766 759 - ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva); 760 - if (ret) { 761 - XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret); 762 - goto release_obj; 763 - } 764 - 765 767 return abo; 766 - 767 - release_obj: 768 - drm_gem_object_put(to_gobj(abo)); 769 - return ERR_PTR(ret); 770 768 } 771 769 772 770 int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ··· 853 871 struct amdxdna_dev *xdna = client->xdna; 854 872 struct amdxdna_gem_obj *abo; 855 873 struct drm_gem_object *gobj; 874 + int ret; 856 875 857 876 gobj = drm_gem_object_lookup(client->filp, bo_hdl); 858 877 if (!gobj) { ··· 862 879 } 863 880 864 881 abo = to_xdna_obj(gobj); 865 - if (bo_type == AMDXDNA_BO_INVALID || abo->type == bo_type) 882 + if (bo_type != AMDXDNA_BO_INVALID && abo->type != bo_type) 883 + goto put_obj; 884 + 885 + if (bo_type != AMDXDNA_BO_CMD || abo->mem.kva) 866 886 return abo; 867 887 888 + if (abo->mem.size > SZ_32K) { 889 + XDNA_ERR(xdna, "Cmd bo is too big %ld", abo->mem.size); 890 + goto put_obj; 891 + } 892 + 893 + ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva); 894 + if (ret) { 895 + XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret); 896 + goto put_obj; 897 + } 898 + 899 + return abo; 900 + 901 + put_obj: 868 902 drm_gem_object_put(gobj); 869 903 return NULL; 870 904 }
+3
drivers/accel/amdxdna/amdxdna_pci_drv.c
··· 23 23 MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin"); 24 24 MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin"); 25 25 MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin"); 26 + MODULE_FIRMWARE("amdnpu/1502_00/npu_7.sbin"); 27 + MODULE_FIRMWARE("amdnpu/17f0_10/npu_7.sbin"); 28 + MODULE_FIRMWARE("amdnpu/17f0_11/npu_7.sbin"); 26 29 27 30 /* 28 31 * 0.0: Initial version
+2
drivers/accel/amdxdna/amdxdna_pm.c
··· 16 16 struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev)); 17 17 int ret = -EOPNOTSUPP; 18 18 19 + guard(mutex)(&xdna->dev_lock); 19 20 if (xdna->dev_info->ops->suspend) 20 21 ret = xdna->dev_info->ops->suspend(xdna); 21 22 ··· 29 28 struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev)); 30 29 int ret = -EOPNOTSUPP; 31 30 31 + guard(mutex)(&xdna->dev_lock); 32 32 if (xdna->dev_info->ops->resume) 33 33 ret = xdna->dev_info->ops->resume(xdna); 34 34
+11
drivers/accel/amdxdna/amdxdna_pm.h
··· 15 15 void amdxdna_pm_init(struct amdxdna_dev *xdna); 16 16 void amdxdna_pm_fini(struct amdxdna_dev *xdna); 17 17 18 + static inline int amdxdna_pm_resume_get_locked(struct amdxdna_dev *xdna) 19 + { 20 + int ret; 21 + 22 + mutex_unlock(&xdna->dev_lock); 23 + ret = amdxdna_pm_resume_get(xdna); 24 + mutex_lock(&xdna->dev_lock); 25 + 26 + return ret; 27 + } 28 + 18 29 #endif /* _AMDXDNA_PM_H_ */
+5 -1
drivers/accel/amdxdna/amdxdna_ubuf.c
··· 7 7 #include <drm/drm_device.h> 8 8 #include <drm/drm_print.h> 9 9 #include <linux/dma-buf.h> 10 + #include <linux/overflow.h> 10 11 #include <linux/pagemap.h> 11 12 #include <linux/vmalloc.h> 12 13 ··· 177 176 goto free_ent; 178 177 } 179 178 180 - exp_info.size += va_ent[i].len; 179 + if (check_add_overflow(exp_info.size, va_ent[i].len, &exp_info.size)) { 180 + ret = -EINVAL; 181 + goto free_ent; 182 + } 181 183 } 182 184 183 185 ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
+1 -1
drivers/accel/amdxdna/npu1_regs.c
··· 72 72 }; 73 73 74 74 static const struct amdxdna_dev_priv npu1_dev_priv = { 75 - .fw_path = "amdnpu/1502_00/npu.sbin", 75 + .fw_path = "amdnpu/1502_00/", 76 76 .rt_config = npu1_default_rt_cfg, 77 77 .dpm_clk_tbl = npu1_dpm_clk_table, 78 78 .fw_feature_tbl = npu1_fw_feature_table,
+1 -1
drivers/accel/amdxdna/npu4_regs.c
··· 98 98 }; 99 99 100 100 static const struct amdxdna_dev_priv npu4_dev_priv = { 101 - .fw_path = "amdnpu/17f0_10/npu.sbin", 101 + .fw_path = "amdnpu/17f0_10/", 102 102 .rt_config = npu4_default_rt_cfg, 103 103 .dpm_clk_tbl = npu4_dpm_clk_table, 104 104 .fw_feature_tbl = npu4_fw_feature_table,
+1 -1
drivers/accel/amdxdna/npu5_regs.c
··· 63 63 #define NPU5_SRAM_BAR_BASE MMNPU_APERTURE1_BASE 64 64 65 65 static const struct amdxdna_dev_priv npu5_dev_priv = { 66 - .fw_path = "amdnpu/17f0_11/npu.sbin", 66 + .fw_path = "amdnpu/17f0_11/", 67 67 .rt_config = npu4_default_rt_cfg, 68 68 .dpm_clk_tbl = npu4_dpm_clk_table, 69 69 .fw_feature_tbl = npu4_fw_feature_table,
+1 -1
drivers/accel/amdxdna/npu6_regs.c
··· 63 63 #define NPU6_SRAM_BAR_BASE MMNPU_APERTURE1_BASE 64 64 65 65 static const struct amdxdna_dev_priv npu6_dev_priv = { 66 - .fw_path = "amdnpu/17f0_10/npu.sbin", 66 + .fw_path = "amdnpu/17f0_10/", 67 67 .rt_config = npu4_default_rt_cfg, 68 68 .dpm_clk_tbl = npu4_dpm_clk_table, 69 69 .fw_feature_tbl = npu4_fw_feature_table,
+1 -1
drivers/accel/ethosu/ethosu_gem.c
··· 154 154 155 155 static u64 cmd_to_addr(u32 *cmd) 156 156 { 157 - return ((u64)((cmd[0] & 0xff0000) << 16)) | cmd[1]; 157 + return (((u64)cmd[0] & 0xff0000) << 16) | cmd[1]; 158 158 } 159 159 160 160 static u64 dma_length(struct ethosu_validated_cmdstream_info *info,
+14 -9
drivers/gpu/drm/bridge/samsung-dsim.c
··· 1881 1881 return 0; 1882 1882 } 1883 1883 1884 + static void samsung_dsim_unregister_te_irq(struct samsung_dsim *dsi) 1885 + { 1886 + if (dsi->te_gpio) { 1887 + free_irq(gpiod_to_irq(dsi->te_gpio), dsi); 1888 + gpiod_put(dsi->te_gpio); 1889 + } 1890 + } 1891 + 1884 1892 static int samsung_dsim_host_attach(struct mipi_dsi_host *host, 1885 1893 struct mipi_dsi_device *device) 1886 1894 { ··· 1969 1961 if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO)) { 1970 1962 ret = samsung_dsim_register_te_irq(dsi, &device->dev); 1971 1963 if (ret) 1972 - return ret; 1964 + goto err_remove_bridge; 1973 1965 } 1974 1966 1975 1967 // The next bridge can be used by host_ops->attach ··· 1990 1982 err_release_next_bridge: 1991 1983 drm_bridge_put(dsi->bridge.next_bridge); 1992 1984 dsi->bridge.next_bridge = NULL; 1993 - return ret; 1994 - } 1995 1985 1996 - static void samsung_dsim_unregister_te_irq(struct samsung_dsim *dsi) 1997 - { 1998 - if (dsi->te_gpio) { 1999 - free_irq(gpiod_to_irq(dsi->te_gpio), dsi); 2000 - gpiod_put(dsi->te_gpio); 2001 - } 1986 + if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO)) 1987 + samsung_dsim_unregister_te_irq(dsi); 1988 + err_remove_bridge: 1989 + drm_bridge_remove(&dsi->bridge); 1990 + return ret; 2002 1991 } 2003 1992 2004 1993 static int samsung_dsim_host_detach(struct mipi_dsi_host *host,
+3 -1
drivers/gpu/drm/bridge/synopsys/dw-dp.c
··· 2049 2049 bridge->type = DRM_MODE_CONNECTOR_DisplayPort; 2050 2050 bridge->ycbcr_420_allowed = true; 2051 2051 2052 - devm_drm_bridge_add(dev, bridge); 2052 + ret = devm_drm_bridge_add(dev, bridge); 2053 + if (ret) 2054 + return ERR_PTR(ret); 2053 2055 2054 2056 dp->aux.dev = dev; 2055 2057 dp->aux.drm_dev = encoder->dev;
+4 -2
drivers/gpu/drm/bridge/ti-sn65dsi86.c
··· 1415 1415 { 1416 1416 struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent); 1417 1417 struct device_node *np = pdata->dev->of_node; 1418 + const struct i2c_client *client = to_i2c_client(pdata->dev); 1418 1419 int ret; 1419 1420 1420 1421 pdata->next_bridge = devm_drm_of_get_bridge(&adev->dev, np, 1, 0); ··· 1434 1433 ? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP; 1435 1434 1436 1435 if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort) { 1437 - pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT | 1438 - DRM_BRIDGE_OP_HPD; 1436 + pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT; 1437 + if (client->irq) 1438 + pdata->bridge.ops |= DRM_BRIDGE_OP_HPD; 1439 1439 /* 1440 1440 * If comms were already enabled they would have been enabled 1441 1441 * with the wrong value of HPD_DISABLE. Update it now. Comms
+2 -1
drivers/gpu/drm/drm_client_modeset.c
··· 930 930 mutex_unlock(&client->modeset_mutex); 931 931 out: 932 932 kfree(crtcs); 933 - modes_destroy(dev, modes, connector_count); 933 + if (modes) 934 + modes_destroy(dev, modes, connector_count); 934 935 kfree(modes); 935 936 kfree(offsets); 936 937 kfree(enabled);
+5 -5
drivers/gpu/drm/drm_gpusvm.c
··· 1338 1338 EXPORT_SYMBOL_GPL(drm_gpusvm_range_pages_valid); 1339 1339 1340 1340 /** 1341 - * drm_gpusvm_range_pages_valid_unlocked() - GPU SVM range pages valid unlocked 1341 + * drm_gpusvm_pages_valid_unlocked() - GPU SVM pages valid unlocked 1342 1342 * @gpusvm: Pointer to the GPU SVM structure 1343 - * @range: Pointer to the GPU SVM range structure 1343 + * @svm_pages: Pointer to the GPU SVM pages structure 1344 1344 * 1345 - * This function determines if a GPU SVM range pages are valid. Expected be 1346 - * called without holding gpusvm->notifier_lock. 1345 + * This function determines if a GPU SVM pages are valid. Expected be called 1346 + * without holding gpusvm->notifier_lock. 1347 1347 * 1348 - * Return: True if GPU SVM range has valid pages, False otherwise 1348 + * Return: True if GPU SVM pages are valid, False otherwise 1349 1349 */ 1350 1350 static bool drm_gpusvm_pages_valid_unlocked(struct drm_gpusvm *gpusvm, 1351 1351 struct drm_gpusvm_pages *svm_pages)
+3 -1
drivers/gpu/drm/imx/ipuv3/parallel-display.c
··· 256 256 257 257 platform_set_drvdata(pdev, imxpd); 258 258 259 - devm_drm_bridge_add(dev, &imxpd->bridge); 259 + ret = devm_drm_bridge_add(dev, &imxpd->bridge); 260 + if (ret) 261 + return ret; 260 262 261 263 return component_add(dev, &imx_pd_ops); 262 264 }
+2 -2
drivers/gpu/drm/logicvc/logicvc_drm.c
··· 92 92 struct device *dev = drm_dev->dev; 93 93 struct device_node *of_node = dev->of_node; 94 94 struct logicvc_drm_config *config = &logicvc->config; 95 - struct device_node *layers_node; 96 95 int ret; 97 96 98 97 logicvc_of_property_parse_bool(of_node, LOGICVC_OF_PROPERTY_DITHERING, ··· 127 128 if (ret) 128 129 return ret; 129 130 130 - layers_node = of_get_child_by_name(of_node, "layers"); 131 + struct device_node *layers_node __free(device_node) = 132 + of_get_child_by_name(of_node, "layers"); 131 133 if (!layers_node) { 132 134 drm_err(drm_dev, "Missing non-optional layers node\n"); 133 135 return -EINVAL;
+6 -6
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
··· 737 737 if (!obj) 738 738 goto done; 739 739 740 - if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || 741 - WARN_ON(obj->buffer.length != 4)) 740 + if (obj->type != ACPI_TYPE_BUFFER || 741 + obj->buffer.length != 4) 742 742 goto done; 743 743 744 744 caps->status = 0; ··· 773 773 if (!obj) 774 774 goto done; 775 775 776 - if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || 777 - WARN_ON(obj->buffer.length != 4)) 776 + if (obj->type != ACPI_TYPE_BUFFER || 777 + obj->buffer.length != 4) 778 778 goto done; 779 779 780 780 jt->status = 0; ··· 861 861 862 862 _DOD = output.pointer; 863 863 864 - if (WARN_ON(_DOD->type != ACPI_TYPE_PACKAGE) || 865 - WARN_ON(_DOD->package.count > ARRAY_SIZE(dod->acpiIdList))) 864 + if (_DOD->type != ACPI_TYPE_PACKAGE || 865 + _DOD->package.count > ARRAY_SIZE(dod->acpiIdList)) 866 866 return; 867 867 868 868 for (int i = 0; i < _DOD->package.count; i++) {
+2 -2
drivers/gpu/drm/tiny/sharp-memory.c
··· 541 541 542 542 smd = devm_drm_dev_alloc(dev, &sharp_memory_drm_driver, 543 543 struct sharp_memory_device, drm); 544 - if (!smd) 545 - return -ENOMEM; 544 + if (IS_ERR(smd)) 545 + return PTR_ERR(smd); 546 546 547 547 spi_set_drvdata(spi, smd); 548 548
+4
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
··· 105 105 * @handle: DMA address handle for the command buffer space if @using_mob is 106 106 * false. Immutable. 107 107 * @size: The size of the command buffer space. Immutable. 108 + * @id: Monotonically increasing ID of the last cmdbuf submitted. 108 109 * @num_contexts: Number of contexts actually enabled. 109 110 */ 110 111 struct vmw_cmdbuf_man { ··· 133 132 bool has_pool; 134 133 dma_addr_t handle; 135 134 size_t size; 135 + u64 id; 136 136 u32 num_contexts; 137 137 }; 138 138 ··· 304 302 { 305 303 struct vmw_cmdbuf_man *man = header->man; 306 304 u32 val; 305 + 306 + header->cb_header->id = man->id++; 307 307 308 308 val = upper_32_bits(header->handle); 309 309 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 1143 1143 ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo); 1144 1144 if (ret != 0) { 1145 1145 drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n"); 1146 - return PTR_ERR(vmw_bo); 1146 + return ret; 1147 1147 } 1148 1148 vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB); 1149 1149 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo); ··· 1199 1199 ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo); 1200 1200 if (ret != 0) { 1201 1201 drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n"); 1202 - return PTR_ERR(vmw_bo); 1202 + return ret; 1203 1203 } 1204 1204 vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM, 1205 1205 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+8 -1
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
··· 260 260 return ret; 261 261 } 262 262 263 + static void vmw_bo_dirty_free(struct kref *kref) 264 + { 265 + struct vmw_bo_dirty *dirty = container_of(kref, struct vmw_bo_dirty, ref_count); 266 + 267 + kvfree(dirty); 268 + } 269 + 263 270 /** 264 271 * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object 265 272 * @vbo: The buffer object ··· 281 274 { 282 275 struct vmw_bo_dirty *dirty = vbo->dirty; 283 276 284 - if (dirty && kref_put(&dirty->ref_count, (void *)kvfree)) 277 + if (dirty && kref_put(&dirty->ref_count, vmw_bo_dirty_free)) 285 278 vbo->dirty = NULL; 286 279 } 287 280
+6 -6
include/uapi/drm/drm_fourcc.h
··· 401 401 * implementation can multiply the values by 2^6=64. For that reason the padding 402 402 * must only contain zeros. 403 403 * index 0 = Y plane, [15:0] z:Y [6:10] little endian 404 - * index 1 = Cr plane, [15:0] z:Cr [6:10] little endian 405 - * index 2 = Cb plane, [15:0] z:Cb [6:10] little endian 404 + * index 1 = Cb plane, [15:0] z:Cb [6:10] little endian 405 + * index 2 = Cr plane, [15:0] z:Cr [6:10] little endian 406 406 */ 407 407 #define DRM_FORMAT_S010 fourcc_code('S', '0', '1', '0') /* 2x2 subsampled Cb (1) and Cr (2) planes 10 bits per channel */ 408 408 #define DRM_FORMAT_S210 fourcc_code('S', '2', '1', '0') /* 2x1 subsampled Cb (1) and Cr (2) planes 10 bits per channel */ ··· 414 414 * implementation can multiply the values by 2^4=16. For that reason the padding 415 415 * must only contain zeros. 416 416 * index 0 = Y plane, [15:0] z:Y [4:12] little endian 417 - * index 1 = Cr plane, [15:0] z:Cr [4:12] little endian 418 - * index 2 = Cb plane, [15:0] z:Cb [4:12] little endian 417 + * index 1 = Cb plane, [15:0] z:Cb [4:12] little endian 418 + * index 2 = Cr plane, [15:0] z:Cr [4:12] little endian 419 419 */ 420 420 #define DRM_FORMAT_S012 fourcc_code('S', '0', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes 12 bits per channel */ 421 421 #define DRM_FORMAT_S212 fourcc_code('S', '2', '1', '2') /* 2x1 subsampled Cb (1) and Cr (2) planes 12 bits per channel */ ··· 424 424 /* 425 425 * 3 plane YCbCr 426 426 * index 0 = Y plane, [15:0] Y little endian 427 - * index 1 = Cr plane, [15:0] Cr little endian 428 - * index 2 = Cb plane, [15:0] Cb little endian 427 + * index 1 = Cb plane, [15:0] Cb little endian 428 + * index 2 = Cr plane, [15:0] Cr little endian 429 429 */ 430 430 #define DRM_FORMAT_S016 fourcc_code('S', '0', '1', '6') /* 2x2 subsampled Cb (1) and Cr (2) planes 16 bits per channel */ 431 431 #define DRM_FORMAT_S216 fourcc_code('S', '2', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes 16 bits per channel */