Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'drm-xe-next-fixes-2026-02-05' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-next

- Fix CFI violation in debugfs access (Daniele)
- Kernel-doc fixes (Chaitanya, Shuicheng)
- Disable D3Cold for BMG only on specific platforms (Karthik)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patch.msgid.link/aYStaLZVJWwKCDZt@intel.com

+20 -11
+1 -1
drivers/gpu/drm/xe/abi/guc_scheduler_abi.h
··· 8 8 9 9 #include <linux/types.h> 10 10 11 - /** 11 + /* 12 12 * Generic defines required for registration with and submissions to the GuC 13 13 * scheduler. Includes engine class/instance defines and context attributes 14 14 * (id, priority, etc)
+4 -2
drivers/gpu/drm/xe/xe_guc.c
··· 1661 1661 xe_guc_submit_unpause(guc); 1662 1662 } 1663 1663 1664 - void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) 1664 + int xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) 1665 1665 { 1666 1666 struct xe_gt *gt = guc_to_gt(guc); 1667 1667 u32 status; ··· 1672 1672 if (!IS_SRIOV_VF(gt_to_xe(gt))) { 1673 1673 CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT); 1674 1674 if (!fw_ref.domains) 1675 - return; 1675 + return -EIO; 1676 1676 1677 1677 status = xe_mmio_read32(&gt->mmio, GUC_STATUS); 1678 1678 ··· 1698 1698 1699 1699 drm_puts(p, "\n"); 1700 1700 xe_guc_submit_print(guc, p); 1701 + 1702 + return 0; 1701 1703 } 1702 1704 1703 1705 /**
+1 -1
drivers/gpu/drm/xe/xe_guc.h
··· 53 53 int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val); 54 54 void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir); 55 55 void xe_guc_sanitize(struct xe_guc *guc); 56 - void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p); 56 + int xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p); 57 57 int xe_guc_reset_prepare(struct xe_guc *guc); 58 58 void xe_guc_reset_wait(struct xe_guc *guc); 59 59 void xe_guc_stop_prepare(struct xe_guc *guc);
+1 -1
drivers/gpu/drm/xe/xe_migrate.c
··· 1254 1254 } 1255 1255 1256 1256 /** 1257 - * xe_get_migrate_exec_queue() - Get the execution queue from migrate context. 1257 + * xe_migrate_exec_queue() - Get the execution queue from migrate context. 1258 1258 * @migrate: Migrate context. 1259 1259 * 1260 1260 * Return: Pointer to execution queue on success, error on failure
+10 -3
drivers/gpu/drm/xe/xe_pm.c
··· 8 8 #include <linux/fault-inject.h> 9 9 #include <linux/pm_runtime.h> 10 10 #include <linux/suspend.h> 11 + #include <linux/dmi.h> 11 12 12 13 #include <drm/drm_managed.h> 13 14 #include <drm/ttm/ttm_placement.h> ··· 367 366 368 367 static u32 vram_threshold_value(struct xe_device *xe) 369 368 { 370 - /* FIXME: D3Cold temporarily disabled by default on BMG */ 371 - if (xe->info.platform == XE_BATTLEMAGE) 372 - return 0; 369 + if (xe->info.platform == XE_BATTLEMAGE) { 370 + const char *product_name; 371 + 372 + product_name = dmi_get_system_info(DMI_PRODUCT_NAME); 373 + if (product_name && strstr(product_name, "NUC13RNG")) { 374 + drm_warn(&xe->drm, "BMG + D3Cold not supported on this platform\n"); 375 + return 0; 376 + } 377 + } 373 378 374 379 return DEFAULT_VRAM_THRESHOLD; 375 380 }
+1 -1
drivers/gpu/drm/xe/xe_query.c
··· 490 490 491 491 if (copy_to_user(*ptr, topo, sizeof(*topo))) 492 492 return -EFAULT; 493 - *ptr += sizeof(topo); 493 + *ptr += sizeof(*topo); 494 494 495 495 if (copy_to_user(*ptr, mask, mask_size)) 496 496 return -EFAULT;
+1 -1
drivers/gpu/drm/xe/xe_tlb_inval.c
··· 112 112 } 113 113 114 114 /** 115 - * xe_gt_tlb_inval_init - Initialize TLB invalidation state 115 + * xe_gt_tlb_inval_init_early() - Initialize TLB invalidation state 116 116 * @gt: GT structure 117 117 * 118 118 * Initialize TLB invalidation state, purely software initialization, should
+1 -1
drivers/gpu/drm/xe/xe_tlb_inval_job.c
··· 198 198 } 199 199 200 200 /** 201 - * xe_tlb_inval_alloc_dep() - TLB invalidation job alloc dependency 201 + * xe_tlb_inval_job_alloc_dep() - TLB invalidation job alloc dependency 202 202 * @job: TLB invalidation job to alloc dependency for 203 203 * 204 204 * Allocate storage for a dependency in the TLB invalidation fence. This