Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'drm-xe-next-fixes-2026-02-19' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-next

UAPI Changes:
- drm/xe: Prevent VFs from exposing the CCS mode sysfs file (Nareshkumar)

Cross-subsystem Changes:
- drm/pagemap: pass pagemap_addr by reference (Arnd)

Driver Changes:
- SRIOV related fixes (Michal, Piotr)
- PAT cache fix (Jia)
- MMIO read fix (Shuicheng)
- W/a fixes (Roper)
- Adjust type of xe_modparam.force_vram_bar_size (Shuicheng)
- Wedge mode fix (Raag)
- HWMon fix (Karthik)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patch.msgid.link/aZeR6CXDRbeudIVR@intel.com

+72 -61
+1 -1
drivers/gpu/drm/drm_gpusvm.c
··· 1150 1150 addr->dir); 1151 1151 else if (dpagemap && dpagemap->ops->device_unmap) 1152 1152 dpagemap->ops->device_unmap(dpagemap, 1153 - dev, *addr); 1153 + dev, addr); 1154 1154 i += 1 << addr->order; 1155 1155 } 1156 1156
+1 -1
drivers/gpu/drm/drm_pagemap.c
··· 318 318 struct drm_pagemap_zdd *zdd = page->zone_device_data; 319 319 struct drm_pagemap *dpagemap = zdd->dpagemap; 320 320 321 - dpagemap->ops->device_unmap(dpagemap, dev, pagemap_addr[i]); 321 + dpagemap->ops->device_unmap(dpagemap, dev, &pagemap_addr[i]); 322 322 } else { 323 323 dma_unmap_page(dev, pagemap_addr[i].addr, 324 324 PAGE_SIZE << pagemap_addr[i].order, dir);
+1 -1
drivers/gpu/drm/xe/xe_bo.c
··· 1941 1941 int err = 0; 1942 1942 int idx; 1943 1943 1944 - if (!drm_dev_enter(&xe->drm, &idx)) 1944 + if (xe_device_wedged(xe) || !drm_dev_enter(&xe->drm, &idx)) 1945 1945 return ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); 1946 1946 1947 1947 ret = xe_bo_cpu_fault_fastpath(vmf, xe, bo, needs_rpm);
+8 -4
drivers/gpu/drm/xe/xe_configfs.h
··· 21 21 bool xe_configfs_media_gt_allowed(struct pci_dev *pdev); 22 22 u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev); 23 23 bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev); 24 - u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum xe_engine_class, 24 + u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, 25 + enum xe_engine_class class, 25 26 const u32 **cs); 26 - u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class, 27 + u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, 28 + enum xe_engine_class class, 27 29 const u32 **cs); 28 30 #ifdef CONFIG_PCI_IOV 29 31 unsigned int xe_configfs_get_max_vfs(struct pci_dev *pdev); ··· 39 37 static inline bool xe_configfs_media_gt_allowed(struct pci_dev *pdev) { return true; } 40 38 static inline u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev) { return U64_MAX; } 41 39 static inline bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev) { return false; } 42 - static inline u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum xe_engine_class, 40 + static inline u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, 41 + enum xe_engine_class class, 43 42 const u32 **cs) { return 0; } 44 - static inline u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class, 43 + static inline u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, 44 + enum xe_engine_class class, 45 45 const u32 **cs) { return 0; } 46 46 static inline unsigned int xe_configfs_get_max_vfs(struct pci_dev *pdev) { return UINT_MAX; } 47 47 #endif
+1 -1
drivers/gpu/drm/xe/xe_gt_ccs_mode.c
··· 191 191 struct xe_device *xe = gt_to_xe(gt); 192 192 int err; 193 193 194 - if (!xe_gt_ccs_mode_enabled(gt)) 194 + if (!xe_gt_ccs_mode_enabled(gt) || IS_SRIOV_VF(xe)) 195 195 return 0; 196 196 197 197 err = sysfs_create_files(gt->sysfs, gt_ccs_mode_attrs);
+3 -3
drivers/gpu/drm/xe/xe_hwmon.c
··· 48 48 CHANNEL_MCTRL, 49 49 CHANNEL_PCIE, 50 50 CHANNEL_VRAM_N, 51 - CHANNEL_VRAM_N_MAX = CHANNEL_VRAM_N + MAX_VRAM_CHANNELS, 51 + CHANNEL_VRAM_N_MAX = CHANNEL_VRAM_N + MAX_VRAM_CHANNELS - 1, 52 52 CHANNEL_MAX, 53 53 }; 54 54 ··· 264 264 return BMG_PACKAGE_TEMPERATURE; 265 265 else if (channel == CHANNEL_VRAM) 266 266 return BMG_VRAM_TEMPERATURE; 267 - else if (in_range(channel, CHANNEL_VRAM_N, CHANNEL_VRAM_N_MAX)) 267 + else if (in_range(channel, CHANNEL_VRAM_N, MAX_VRAM_CHANNELS)) 268 268 return BMG_VRAM_TEMPERATURE_N(channel - CHANNEL_VRAM_N); 269 269 } else if (xe->info.platform == XE_DG2) { 270 270 if (channel == CHANNEL_PKG) ··· 1427 1427 *str = "mctrl"; 1428 1428 else if (channel == CHANNEL_PCIE) 1429 1429 *str = "pcie"; 1430 - else if (in_range(channel, CHANNEL_VRAM_N, CHANNEL_VRAM_N_MAX)) 1430 + else if (in_range(channel, CHANNEL_VRAM_N, MAX_VRAM_CHANNELS)) 1431 1431 *str = hwmon->temp.vram_label[channel - CHANNEL_VRAM_N]; 1432 1432 return 0; 1433 1433 case hwmon_power:
+5 -5
drivers/gpu/drm/xe/xe_mmio.c
··· 256 256 struct xe_reg reg_udw = { .addr = reg.addr + 0x4 }; 257 257 u32 ldw, udw, oldudw, retries; 258 258 259 - reg.addr = xe_mmio_adjusted_addr(mmio, reg.addr); 260 - reg_udw.addr = xe_mmio_adjusted_addr(mmio, reg_udw.addr); 261 - 262 - /* we shouldn't adjust just one register address */ 263 - xe_tile_assert(mmio->tile, reg_udw.addr == reg.addr + 0x4); 259 + /* 260 + * The two dwords of a 64-bit register can never straddle the offset 261 + * adjustment cutoff. 262 + */ 263 + xe_tile_assert(mmio->tile, !in_range(mmio->adj_limit, reg.addr + 1, 7)); 264 264 265 265 oldudw = xe_mmio_read32(mmio, reg_udw); 266 266 for (retries = 5; retries; --retries) {
+1 -1
drivers/gpu/drm/xe/xe_module.h
··· 12 12 struct xe_modparam { 13 13 bool force_execlist; 14 14 bool probe_display; 15 - u32 force_vram_bar_size; 15 + int force_vram_bar_size; 16 16 int guc_log_level; 17 17 char *guc_firmware_path; 18 18 char *huc_firmware_path;
+6
drivers/gpu/drm/xe/xe_pci.c
··· 557 557 struct xe_gt *gt __free(kfree) = NULL; 558 558 int err; 559 559 560 + /* Don't try to read media ver if media GT is not allowed */ 561 + if (type == GMDID_MEDIA && !xe_configfs_media_gt_allowed(to_pci_dev(xe->drm.dev))) { 562 + *ver = *revid = 0; 563 + return 0; 564 + } 565 + 560 566 gt = kzalloc(sizeof(*gt), GFP_KERNEL); 561 567 if (!gt) 562 568 return -ENOMEM;
+26 -28
drivers/gpu/drm/xe/xe_sriov_pf_sysfs.c
··· 349 349 350 350 /* no user serviceable parts below */ 351 351 352 - static struct kobject *create_xe_sriov_kobj(struct xe_device *xe, unsigned int vfid) 352 + static void action_put_kobject(void *arg) 353 + { 354 + struct kobject *kobj = arg; 355 + 356 + kobject_put(kobj); 357 + } 358 + 359 + static struct kobject *create_xe_sriov_kobj(struct xe_device *xe, unsigned int vfid, 360 + const struct kobj_type *ktype) 353 361 { 354 362 struct xe_sriov_kobj *vkobj; 363 + int err; 355 364 356 365 xe_sriov_pf_assert_vfid(xe, vfid); 357 366 358 367 vkobj = kzalloc(sizeof(*vkobj), GFP_KERNEL); 359 368 if (!vkobj) 360 - return NULL; 369 + return ERR_PTR(-ENOMEM); 361 370 362 371 vkobj->xe = xe; 363 372 vkobj->vfid = vfid; 373 + kobject_init(&vkobj->base, ktype); 374 + 375 + err = devm_add_action_or_reset(xe->drm.dev, action_put_kobject, &vkobj->base); 376 + if (err) 377 + return ERR_PTR(err); 378 + 364 379 return &vkobj->base; 365 380 } 366 381 ··· 478 463 xe_sriov_dbg(xe, "Failed to setup sysfs %s (%pe)\n", what, ERR_PTR(err)); 479 464 } 480 465 481 - static void action_put_kobject(void *arg) 482 - { 483 - struct kobject *kobj = arg; 484 - 485 - kobject_put(kobj); 486 - } 487 - 488 466 static int pf_setup_root(struct xe_device *xe) 489 467 { 490 468 struct kobject *parent = &xe->drm.dev->kobj; 491 469 struct kobject *root; 492 470 int err; 493 471 494 - root = create_xe_sriov_kobj(xe, PFID); 495 - if (!root) 496 - return pf_sysfs_error(xe, -ENOMEM, "root obj"); 472 + root = create_xe_sriov_kobj(xe, PFID, &xe_sriov_dev_ktype); 473 + if (IS_ERR(root)) 474 + return pf_sysfs_error(xe, PTR_ERR(root), "root obj"); 497 475 498 - err = devm_add_action_or_reset(xe->drm.dev, action_put_kobject, root); 499 - if (err) 500 - return pf_sysfs_error(xe, err, "root action"); 501 - 502 - err = kobject_init_and_add(root, &xe_sriov_dev_ktype, parent, "sriov_admin"); 476 + err = kobject_add(root, parent, "sriov_admin"); 503 477 if (err) 504 478 return pf_sysfs_error(xe, err, "root init"); 505 479 ··· 509 505 root = xe->sriov.pf.sysfs.root; 510 506 511 507 for (n = 0; n <= totalvfs; n++) { 512 - kobj = create_xe_sriov_kobj(xe, VFID(n)); 513 - if (!kobj) 514 - return pf_sysfs_error(xe, -ENOMEM, "tree obj"); 515 - 516 - err = devm_add_action_or_reset(xe->drm.dev, action_put_kobject, root); 517 - if (err) 518 - return pf_sysfs_error(xe, err, "tree action"); 508 + kobj = create_xe_sriov_kobj(xe, VFID(n), &xe_sriov_vf_ktype); 509 + if (IS_ERR(kobj)) 510 + return pf_sysfs_error(xe, PTR_ERR(kobj), "tree obj"); 519 511 520 512 if (n) 521 - err = kobject_init_and_add(kobj, &xe_sriov_vf_ktype, 522 - root, "vf%u", n); 513 + err = kobject_add(kobj, root, "vf%u", n); 523 514 else 524 - err = kobject_init_and_add(kobj, &xe_sriov_vf_ktype, 525 - root, "pf"); 515 + err = kobject_add(kobj, root, "pf"); 526 516 if (err) 527 517 return pf_sysfs_error(xe, err, "tree init"); 528 518
+4 -4
drivers/gpu/drm/xe/xe_svm.c
··· 1676 1676 1677 1677 static void xe_drm_pagemap_device_unmap(struct drm_pagemap *dpagemap, 1678 1678 struct device *dev, 1679 - struct drm_pagemap_addr addr) 1679 + const struct drm_pagemap_addr *addr) 1680 1680 { 1681 - if (addr.proto != XE_INTERCONNECT_P2P) 1681 + if (addr->proto != XE_INTERCONNECT_P2P) 1682 1682 return; 1683 1683 1684 - dma_unmap_resource(dev, addr.addr, PAGE_SIZE << addr.order, 1685 - addr.dir, DMA_ATTR_SKIP_CPU_SYNC); 1684 + dma_unmap_resource(dev, addr->addr, PAGE_SIZE << addr->order, 1685 + addr->dir, DMA_ATTR_SKIP_CPU_SYNC); 1686 1686 } 1687 1687 1688 1688 static void xe_pagemap_destroy_work(struct work_struct *work)
+6 -1
drivers/gpu/drm/xe/xe_vm_madvise.c
··· 291 291 break; 292 292 case DRM_XE_MEM_RANGE_ATTR_PAT: 293 293 { 294 - u16 coh_mode = xe_pat_index_get_coh_mode(xe, args->pat_index.val); 294 + u16 pat_index, coh_mode; 295 295 296 + if (XE_IOCTL_DBG(xe, args->pat_index.val >= xe->pat.n_entries)) 297 + return false; 298 + 299 + pat_index = array_index_nospec(args->pat_index.val, xe->pat.n_entries); 300 + coh_mode = xe_pat_index_get_coh_mode(xe, pat_index); 296 301 if (XE_IOCTL_DBG(xe, !coh_mode)) 297 302 return false; 298 303
+8 -10
drivers/gpu/drm/xe/xe_wa.c
··· 548 548 FUNC(xe_rtp_match_first_render_or_compute)), 549 549 XE_RTP_ACTIONS(SET(ROW_CHICKEN, EARLY_EOT_DIS)) 550 550 }, 551 - { XE_RTP_NAME("14019988906"), 552 - XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), 553 - FUNC(xe_rtp_match_first_render_or_compute)), 554 - XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FLSH_IGNORES_PSD)) 555 - }, 556 - { XE_RTP_NAME("14019877138"), 557 - XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), 558 - FUNC(xe_rtp_match_first_render_or_compute)), 559 - XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FD_END_COLLECT)) 560 - }, 561 551 { XE_RTP_NAME("14020338487"), 562 552 XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), 563 553 FUNC(xe_rtp_match_first_render_or_compute)), ··· 822 832 { XE_RTP_NAME("14020756599"), 823 833 XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)), 824 834 XE_RTP_ACTIONS(SET(WM_CHICKEN3, HIZ_PLANE_COMPRESSION_DIS)) 835 + }, 836 + { XE_RTP_NAME("14019988906"), 837 + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)), 838 + XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FLSH_IGNORES_PSD)) 839 + }, 840 + { XE_RTP_NAME("14019877138"), 841 + XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)), 842 + XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FD_END_COLLECT)) 825 843 }, 826 844 { XE_RTP_NAME("14021490052"), 827 845 XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+1 -1
include/drm/drm_pagemap.h
··· 95 95 */ 96 96 void (*device_unmap)(struct drm_pagemap *dpagemap, 97 97 struct device *dev, 98 - struct drm_pagemap_addr addr); 98 + const struct drm_pagemap_addr *addr); 99 99 100 100 /** 101 101 * @populate_mm: Populate part of the mm with @dpagemap memory,