Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

drm/i915: switch to use kernel standard error injection

Switch error injection testing from i915_inject_probe_failure
to ALLOW_ERROR_INJECTION. Here taken out calls to
i915_inject_probe_failure and changed to use
ALLOW_ERROR_INJECTION for the same functions.

Below functions are dropped from testing since I
couldn't hit those at module bind time, testing
these would just fail the tests. To include these
in test would need to find way to cause resetting in i915
which would trigger these:

intel_gvt_init
intel_wopcm_init
intel_uc_fw_upload
intel_gt_init with expected -EIO (-EINVAL is tested)
lrc_init_wa_ctx
intel_huc_auth
guc_check_version_range
intel_uc_fw_fetch
uc_fw_xfer
__intel_uc_reset_hw
guc_enable_communication
uc_init_wopcm
..and all stages of __force_fw_fetch_failures

Signed-off-by: Juha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Mika Kahola <mika.kahola@intel.com>
Link: https://patch.msgid.link/20251216080754.221974-2-juhapekka.heikkila@gmail.com

authored by

Juha-Pekka Heikkila and committed by
Mika Kahola
5e2e6b59 d400dad6

+17 -199
+2 -12
drivers/gpu/drm/i915/display/intel_connector.c
··· 156 156 int intel_connector_register(struct drm_connector *_connector) 157 157 { 158 158 struct intel_connector *connector = to_intel_connector(_connector); 159 - struct drm_i915_private *i915 = to_i915(_connector->dev); 160 159 int ret; 161 160 162 161 ret = intel_panel_register(connector); 163 162 if (ret) 164 - goto err; 165 - 166 - if (i915_inject_probe_failure(i915)) { 167 - ret = -EFAULT; 168 - goto err_panel; 169 - } 163 + return ret; 170 164 171 165 intel_connector_debugfs_add(connector); 172 166 173 167 return 0; 174 - 175 - err_panel: 176 - intel_panel_unregister(connector); 177 - err: 178 - return ret; 179 168 } 169 + ALLOW_ERROR_INJECTION(intel_connector_register, ERRNO); 180 170 181 171 void intel_connector_unregister(struct drm_connector *_connector) 182 172 {
+1 -4
drivers/gpu/drm/i915/display/intel_display_driver.c
··· 199 199 /* part #1: call before irq install */ 200 200 int intel_display_driver_probe_noirq(struct intel_display *display) 201 201 { 202 - struct drm_i915_private *i915 = to_i915(display->drm); 203 202 int ret; 204 - 205 - if (i915_inject_probe_failure(i915)) 206 - return -ENODEV; 207 203 208 204 if (HAS_DISPLAY(display)) { 209 205 ret = drm_vblank_init(display->drm, ··· 313 317 314 318 return ret; 315 319 } 320 + ALLOW_ERROR_INJECTION(intel_display_driver_probe_noirq, ERRNO); 316 321 317 322 static void set_display_access(struct intel_display *display, 318 323 bool any_task_allowed,
+1 -3
drivers/gpu/drm/i915/gt/intel_engine_cs.c
··· 963 963 drm_WARN_ON(&i915->drm, engine_mask & 964 964 GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES)); 965 965 966 - if (i915_inject_probe_failure(i915)) 967 - return -ENODEV; 968 - 969 966 for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) { 970 967 setup_logical_ids(gt, logical_ids, class); 971 968 ··· 1004 1007 intel_engines_free(gt); 1005 1008 return err; 1006 1009 } 1010 + ALLOW_ERROR_INJECTION(intel_engines_init_mmio, ERRNO); 1007 1011 1008 1012 void intel_engine_init_execlists(struct intel_engine_cs *engine) 1009 1013 {
+1 -8
drivers/gpu/drm/i915/gt/intel_gt.c
··· 686 686 { 687 687 int err; 688 688 689 - err = i915_inject_probe_error(gt->i915, -ENODEV); 690 - if (err) 691 - return err; 692 - 693 689 intel_gt_init_workarounds(gt); 694 690 695 691 /* ··· 736 740 if (err) 737 741 goto err_gt; 738 742 739 - err = i915_inject_probe_error(gt->i915, -EIO); 740 - if (err) 741 - goto err_gt; 742 - 743 743 intel_uc_init_late(&gt->uc); 744 744 745 745 intel_migrate_init(&gt->migrate, gt); ··· 758 766 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); 759 767 return err; 760 768 } 769 + ALLOW_ERROR_INJECTION(intel_gt_init, ERRNO); 761 770 762 771 void intel_gt_driver_remove(struct intel_gt *gt) 763 772 {
+1 -4
drivers/gpu/drm/i915/gt/intel_gt_print.h
··· 36 36 37 37 #define gt_probe_error(_gt, _fmt, ...) \ 38 38 do { \ 39 - if (i915_error_injected()) \ 40 - gt_dbg(_gt, _fmt, ##__VA_ARGS__); \ 41 - else \ 42 - gt_err(_gt, _fmt, ##__VA_ARGS__); \ 39 + gt_err(_gt, _fmt, ##__VA_ARGS__); \ 43 40 } while (0) 44 41 45 42 #define gt_WARN(_gt, _condition, _fmt, ...) \
-4
drivers/gpu/drm/i915/gt/intel_lrc.c
··· 1911 1911 __i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch); 1912 1912 __i915_gem_object_release_map(wa_ctx->vma->obj); 1913 1913 1914 - /* Verify that we can handle failure to setup the wa_ctx */ 1915 - if (!err) 1916 - err = i915_inject_probe_error(engine->i915, -ENODEV); 1917 - 1918 1914 err_unpin: 1919 1915 if (err) 1920 1916 i915_vma_unpin(wa_ctx->vma);
-3
drivers/gpu/drm/i915/gt/intel_wopcm.c
··· 253 253 GEM_BUG_ON(huc_fw_size >= wopcm_size); 254 254 GEM_BUG_ON(ctx_rsvd + WOPCM_RESERVED_SIZE >= wopcm_size); 255 255 256 - if (i915_inject_probe_failure(i915)) 257 - return; 258 - 259 256 if (__wopcm_regs_locked(gt->uncore, &guc_wopcm_base, &guc_wopcm_size)) { 260 257 drm_dbg(&i915->drm, "GuC WOPCM is already locked [%uK, %uK)\n", 261 258 guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
+1 -7
drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
··· 266 266 u32 *cmds; 267 267 int err; 268 268 269 - err = i915_inject_probe_error(guc_to_i915(guc), -ENXIO); 270 - if (err) 271 - return err; 272 - 273 269 GEM_BUG_ON(ct->vma); 274 270 275 271 blob_size = 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + CTB_G2H_BUFFER_SIZE; ··· 302 306 303 307 return 0; 304 308 } 309 + ALLOW_ERROR_INJECTION(intel_guc_ct_init, ERRNO); 305 310 306 311 /** 307 312 * intel_guc_ct_fini - Fini buffer-based communication ··· 1389 1392 struct intel_guc *guc = ct_to_guc(ct); 1390 1393 1391 1394 if (ct->dead_ct_reported) 1392 - return; 1393 - 1394 - if (i915_error_injected()) 1395 1395 return; 1396 1396 1397 1397 ct->dead_ct_reported = true;
-4
drivers/gpu/drm/i915/gt/uc/intel_huc.c
··· 541 541 if (intel_huc_is_authenticated(huc, type)) 542 542 return -EEXIST; 543 543 544 - ret = i915_inject_probe_error(gt->i915, -ENXIO); 545 - if (ret) 546 - goto fail; 547 - 548 544 switch (type) { 549 545 case INTEL_HUC_AUTH_BY_GUC: 550 546 ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->fw.rsa_data));
+1 -16
drivers/gpu/drm/i915/gt/uc/intel_uc.c
··· 60 60 int ret; 61 61 u32 guc_status; 62 62 63 - ret = i915_inject_probe_error(gt->i915, -ENXIO); 64 - if (ret) 65 - return ret; 66 - 67 63 ret = intel_reset_guc(gt); 68 64 if (ret) { 69 65 gt_err(gt, "Failed to reset GuC, ret = %d\n", ret); ··· 216 220 static int guc_enable_communication(struct intel_guc *guc) 217 221 { 218 222 struct intel_gt *gt = guc_to_gt(guc); 219 - struct drm_i915_private *i915 = gt->i915; 220 223 int ret; 221 224 222 225 GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct)); 223 - 224 - ret = i915_inject_probe_error(i915, -ENXIO); 225 - if (ret) 226 - return ret; 227 226 228 227 ret = intel_guc_ct_enable(&guc->ct); 229 228 if (ret) ··· 314 323 if (!intel_uc_uses_guc(uc)) 315 324 return 0; 316 325 317 - if (i915_inject_probe_failure(uc_to_gt(uc)->i915)) 318 - return -ENOMEM; 319 - 320 326 ret = intel_guc_init(guc); 321 327 if (ret) 322 328 return ret; ··· 326 338 327 339 return 0; 328 340 } 341 + ALLOW_ERROR_INJECTION(__uc_init, ERRNO); 329 342 330 343 static void __uc_fini(struct intel_uc *uc) 331 344 { ··· 369 380 GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK); 370 381 GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK)); 371 382 GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK); 372 - 373 - err = i915_inject_probe_error(gt->i915, -ENXIO); 374 - if (err) 375 - return err; 376 383 377 384 mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED; 378 385 err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE, size, mask,
+2 -58
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
··· 542 542 INTEL_UC_FIRMWARE_NOT_SUPPORTED); 543 543 } 544 544 545 - static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e) 546 - { 547 - struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915; 548 - bool user = e == -EINVAL; 549 - 550 - if (i915_inject_probe_error(i915, e)) { 551 - /* non-existing blob */ 552 - uc_fw->file_selected.path = "<invalid>"; 553 - uc_fw->user_overridden = user; 554 - } else if (i915_inject_probe_error(i915, e)) { 555 - /* require next major version */ 556 - uc_fw->file_wanted.ver.major += 1; 557 - uc_fw->file_wanted.ver.minor = 0; 558 - uc_fw->user_overridden = user; 559 - } else if (i915_inject_probe_error(i915, e)) { 560 - /* require next minor version */ 561 - uc_fw->file_wanted.ver.minor += 1; 562 - uc_fw->user_overridden = user; 563 - } else if (uc_fw->file_wanted.ver.major && 564 - i915_inject_probe_error(i915, e)) { 565 - /* require prev major version */ 566 - uc_fw->file_wanted.ver.major -= 1; 567 - uc_fw->file_wanted.ver.minor = 0; 568 - uc_fw->user_overridden = user; 569 - } else if (uc_fw->file_wanted.ver.minor && 570 - i915_inject_probe_error(i915, e)) { 571 - /* require prev minor version - hey, this should work! */ 572 - uc_fw->file_wanted.ver.minor -= 1; 573 - uc_fw->user_overridden = user; 574 - } else if (user && i915_inject_probe_error(i915, e)) { 575 - /* officially unsupported platform */ 576 - uc_fw->file_wanted.ver.major = 0; 577 - uc_fw->file_wanted.ver.minor = 0; 578 - uc_fw->user_overridden = true; 579 - } 580 - } 581 - 582 545 static void uc_unpack_css_version(struct intel_uc_fw_ver *ver, u32 css_value) 583 546 { 584 547 /* Get version numbers from the CSS header */ ··· 729 766 return -EINVAL; 730 767 } 731 768 732 - return i915_inject_probe_error(gt->i915, -EINVAL); 769 + return 0; 733 770 } 734 771 735 772 static int check_fw_header(struct intel_gt *gt, ··· 867 904 868 905 GEM_BUG_ON(!gt->wopcm.size); 869 906 GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw)); 870 - 871 - err = i915_inject_probe_error(i915, -ENXIO); 872 - if (err) 873 - goto fail; 874 - 875 - __force_fw_fetch_failures(uc_fw, -EINVAL); 876 - __force_fw_fetch_failures(uc_fw, -ESTALE); 877 907 878 908 err = try_firmware_load(uc_fw, &fw); 879 909 memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal)); ··· 1044 1088 u64 offset; 1045 1089 int ret; 1046 1090 1047 - ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT); 1048 - if (ret) 1049 - return ret; 1050 - 1051 1091 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 1052 1092 1053 1093 /* Set the source address for the uCode */ ··· 1107 1155 */ 1108 1156 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags) 1109 1157 { 1110 - struct intel_gt *gt = __uc_fw_to_gt(uc_fw); 1111 1158 int err; 1112 1159 1113 1160 /* make sure the status was cleared the last time we reset the uc */ 1114 1161 GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw)); 1115 - 1116 - err = i915_inject_probe_error(gt->i915, -ENOEXEC); 1117 - if (err) 1118 - return err; 1119 1162 1120 1163 if (!intel_uc_fw_is_loadable(uc_fw)) 1121 1164 return -ENOEXEC; ··· 1144 1197 size_t copied; 1145 1198 void *vaddr; 1146 1199 int err; 1147 - 1148 - err = i915_inject_probe_error(gt->i915, -ENXIO); 1149 - if (err) 1150 - return err; 1151 1200 1152 1201 if (!uc_fw_need_rsa_in_memory(uc_fw)) 1153 1202 return 0; ··· 1186 1243 i915_vma_unpin_and_release(&vma, 0); 1187 1244 return err; 1188 1245 } 1246 + ALLOW_ERROR_INJECTION(uc_fw_rsa_data_create, ERRNO); 1189 1247 1190 1248 static void uc_fw_rsa_data_destroy(struct intel_uc_fw *uc_fw) 1191 1249 {
+3 -9
drivers/gpu/drm/i915/i915_driver.c
··· 227 227 struct intel_display *display = dev_priv->display; 228 228 int ret = 0; 229 229 230 - if (i915_inject_probe_failure(dev_priv)) 231 - return -ENODEV; 232 - 233 230 intel_device_info_runtime_init_early(dev_priv); 234 231 235 232 intel_step_init(dev_priv); ··· 276 279 i915_workqueues_cleanup(dev_priv); 277 280 return ret; 278 281 } 282 + ALLOW_ERROR_INJECTION(i915_driver_early_probe, ERRNO); 279 283 280 284 /** 281 285 * i915_driver_late_release - cleanup the setup done in ··· 319 321 struct intel_gt *gt; 320 322 int ret, i; 321 323 322 - if (i915_inject_probe_failure(dev_priv)) 323 - return -ENODEV; 324 - 325 324 ret = i915_gmch_bridge_setup(dev_priv); 326 325 if (ret < 0) 327 326 return ret; ··· 356 361 357 362 return ret; 358 363 } 364 + ALLOW_ERROR_INJECTION(i915_driver_mmio_probe, ERRNO); 359 365 360 366 /** 361 367 * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe() ··· 463 467 struct intel_display *display = dev_priv->display; 464 468 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 465 469 int ret; 466 - 467 - if (i915_inject_probe_failure(dev_priv)) 468 - return -ENODEV; 469 470 470 471 if (HAS_PPGTT(dev_priv)) { 471 472 if (intel_vgpu_active(dev_priv) && ··· 591 598 i915_perf_fini(dev_priv); 592 599 return ret; 593 600 } 601 + ALLOW_ERROR_INJECTION(i915_driver_hw_probe, ERRNO); 594 602 595 603 /** 596 604 * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
-5
drivers/gpu/drm/i915/i915_params.c
··· 110 110 i915_param_named_unsafe(gsc_firmware_path, charp, 0400, 111 111 "GSC firmware path to use instead of the default one"); 112 112 113 - #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) 114 - i915_param_named_unsafe(inject_probe_failure, uint, 0400, 115 - "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); 116 - #endif 117 - 118 113 #if IS_ENABLED(CONFIG_DRM_I915_GVT) 119 114 i915_param_named(enable_gvt, bool, 0400, 120 115 "Enable support for Intel GVT-g graphics virtualization host support(default:false)");
-1
drivers/gpu/drm/i915/i915_params.h
··· 55 55 param(bool, memtest, false, 0400) \ 56 56 param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \ 57 57 param(unsigned int, reset, 3, 0600) \ 58 - param(unsigned int, inject_probe_failure, 0, 0) \ 59 58 param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \ 60 59 param(unsigned int, request_timeout_ms, CONFIG_DRM_I915_REQUEST_TIMEOUT, CONFIG_DRM_I915_REQUEST_TIMEOUT ? 0600 : 0) \ 61 60 param(unsigned int, lmem_size, 0, 0400) \
+1 -5
drivers/gpu/drm/i915/i915_pci.c
··· 999 999 if (err) 1000 1000 return err; 1001 1001 1002 - if (i915_inject_probe_failure(pdev_to_i915(pdev))) { 1003 - i915_pci_remove(pdev); 1004 - return -ENODEV; 1005 - } 1006 - 1007 1002 err = i915_live_selftests(pdev); 1008 1003 if (err) { 1009 1004 i915_pci_remove(pdev); ··· 1013 1018 1014 1019 return 0; 1015 1020 } 1021 + ALLOW_ERROR_INJECTION(i915_pci_probe, ERRNO); 1016 1022 1017 1023 static void i915_pci_shutdown(struct pci_dev *pdev) 1018 1024 {
+1 -29
drivers/gpu/drm/i915/i915_utils.c
··· 17 17 drm_notice(&i915->drm, "CI tainted: %#x by %pS\n", 18 18 taint, __builtin_return_address(0)); 19 19 20 - /* Failures that occur during fault injection testing are expected */ 21 - if (!i915_error_injected()) 22 - __add_taint_for_CI(taint); 20 + __add_taint_for_CI(taint); 23 21 } 24 - 25 - #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) 26 - static unsigned int i915_probe_fail_count; 27 - 28 - int __i915_inject_probe_error(struct drm_i915_private *i915, int err, 29 - const char *func, int line) 30 - { 31 - if (i915_probe_fail_count >= i915_modparams.inject_probe_failure) 32 - return 0; 33 - 34 - if (++i915_probe_fail_count < i915_modparams.inject_probe_failure) 35 - return 0; 36 - 37 - drm_info(&i915->drm, "Injecting failure %d at checkpoint %u [%s:%d]\n", 38 - err, i915_modparams.inject_probe_failure, func, line); 39 - 40 - i915_modparams.inject_probe_failure = 0; 41 - return err; 42 - } 43 - 44 - bool i915_error_injected(void) 45 - { 46 - return i915_probe_fail_count && !i915_modparams.inject_probe_failure; 47 - } 48 - 49 - #endif 50 22 51 23 bool i915_vtd_active(struct drm_i915_private *i915) 52 24 {
+1 -21
drivers/gpu/drm/i915/i915_utils.h
··· 43 43 __stringify(x), (long)(x)) 44 44 #endif 45 45 46 - #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) 47 - 48 - int __i915_inject_probe_error(struct drm_i915_private *i915, int err, 49 - const char *func, int line); 50 - #define i915_inject_probe_error(_i915, _err) \ 51 - __i915_inject_probe_error((_i915), (_err), __func__, __LINE__) 52 - bool i915_error_injected(void); 53 - 54 - #else 55 - 56 - #define i915_inject_probe_error(i915, e) ({ BUILD_BUG_ON_INVALID(i915); 0; }) 57 - #define i915_error_injected() false 58 - 59 - #endif 60 - 61 - #define i915_inject_probe_failure(i915) i915_inject_probe_error((i915), -ENODEV) 62 - 63 46 #define i915_probe_error(i915, fmt, ...) ({ \ 64 - if (i915_error_injected()) \ 65 - drm_dbg(&(i915)->drm, fmt, ##__VA_ARGS__); \ 66 - else \ 67 - drm_err(&(i915)->drm, fmt, ##__VA_ARGS__); \ 47 + drm_err(&(i915)->drm, fmt, ##__VA_ARGS__); \ 68 48 }) 69 49 70 50 #ifndef fetch_and_zero
-3
drivers/gpu/drm/i915/intel_gvt.c
··· 238 238 */ 239 239 int intel_gvt_init(struct drm_i915_private *dev_priv) 240 240 { 241 - if (i915_inject_probe_failure(dev_priv)) 242 - return -ENODEV; 243 - 244 241 mutex_lock(&intel_gvt_mutex); 245 242 list_add_tail(&dev_priv->vgpu.entry, &intel_gvt_devices); 246 243 if (intel_gvt_ops)
+1 -3
drivers/gpu/drm/i915/intel_uncore.c
··· 2072 2072 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT); 2073 2073 GEM_BUG_ON(uncore->fw_domain[domain_id]); 2074 2074 2075 - if (i915_inject_probe_failure(uncore->i915)) 2076 - return -ENOMEM; 2077 - 2078 2075 d = kzalloc(sizeof(*d), GFP_KERNEL); 2079 2076 if (!d) 2080 2077 return -ENOMEM; ··· 2115 2118 2116 2119 return 0; 2117 2120 } 2121 + ALLOW_ERROR_INJECTION(__fw_domain_init, ERRNO); 2118 2122 2119 2123 static void fw_domain_fini(struct intel_uncore *uncore, 2120 2124 enum forcewake_domain_id domain_id)