Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

drm/amd: Convert DRM_*() to drm_*()

The drm_*() macros include the device which is helpful for debugging
issues in multi-GPU systems.

Signed-off-by: Mario Limonciello (AMD) <superm1@kernel.org>
Reviewed-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Mario Limonciello (AMD) and committed by
Alex Deucher
e2917298 5fd4fef3

+209 -176
+4 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
··· 744 744 745 745 size = *(u16 *) info->buffer.pointer; 746 746 if (size < 3) { 747 - DRM_INFO("ATCS buffer is too small: %zu\n", size); 747 + drm_info(adev_to_drm(adev), 748 + "ATCS buffer is too small: %zu\n", size); 748 749 kfree(info); 749 750 return -EINVAL; 750 751 } ··· 804 803 805 804 info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_POWER_SHIFT_CONTROL, &params); 806 805 if (!info) { 807 - DRM_ERROR("ATCS PSC update failed\n"); 806 + drm_err(adev_to_drm(adev), "ATCS PSC call failed\n"); 808 807 return -EIO; 809 808 } 810 809 ··· 1121 1120 1122 1121 xcc_info = kzalloc(sizeof(struct amdgpu_acpi_xcc_info), 1123 1122 GFP_KERNEL); 1124 - if (!xcc_info) { 1125 - DRM_ERROR("Failed to allocate memory for xcc info\n"); 1123 + if (!xcc_info) 1126 1124 return -ENOMEM; 1127 - } 1128 1125 1129 1126 INIT_LIST_HEAD(&xcc_info->list); 1130 1127 xcc_info->handle = acpi_device_handle(acpi_dev);
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
··· 683 683 ret = amdgpu_ib_schedule(ring, 1, ib, job, &f); 684 684 685 685 if (ret) { 686 - DRM_ERROR("amdgpu: failed to schedule IB.\n"); 686 + drm_err(adev_to_drm(adev), "failed to schedule IB.\n"); 687 687 goto err_ib_sched; 688 688 } 689 689
+4 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
··· 349 349 strscpy(fw_name, "amdgpu/vega20_smc.bin"); 350 350 break; 351 351 default: 352 - DRM_ERROR("SMC firmware not supported\n"); 352 + drm_err(adev_to_drm(adev), "SMC firmware not supported\n"); 353 353 return -EINVAL; 354 354 } 355 355 ··· 357 357 AMDGPU_UCODE_REQUIRED, 358 358 "%s", fw_name); 359 359 if (err) { 360 - DRM_ERROR("Failed to load firmware \"%s\"", fw_name); 360 + drm_err(adev_to_drm(adev), 361 + "Failed to load firmware \"%s\"\n", fw_name); 361 362 amdgpu_ucode_release(&adev->pm.fw); 362 363 return err; 363 364 } ··· 403 402 kmalloc(sizeof(*cgs_device), GFP_KERNEL); 404 403 405 404 if (!cgs_device) { 406 - DRM_ERROR("Couldn't allocate CGS device structure\n"); 405 + drm_err(adev_to_drm(adev), "Couldn't allocate CGS device structure\n"); 407 406 return NULL; 408 407 } 409 408
+22 -12
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
··· 877 877 amdgpu_connector_get_edid(connector); 878 878 879 879 if (!amdgpu_connector->edid) { 880 - DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", 881 - connector->name); 880 + drm_err(connector->dev, 881 + "%s: probed a monitor but no|invalid EDID\n", 882 + connector->name); 882 883 ret = connector_status_connected; 883 884 } else { 884 885 amdgpu_connector->use_digital = ··· 1057 1056 amdgpu_connector_get_edid(connector); 1058 1057 1059 1058 if (!amdgpu_connector->edid) { 1060 - DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", 1059 + drm_err(adev_to_drm(adev), "%s: probed a monitor but no|invalid EDID\n", 1061 1060 connector->name); 1062 1061 ret = connector_status_connected; 1063 1062 broken_edid = true; /* defer use_digital to later */ ··· 1668 1667 if (router->ddc_valid || router->cd_valid) { 1669 1668 amdgpu_connector->router_bus = amdgpu_i2c_lookup(adev, &router->i2c_info); 1670 1669 if (!amdgpu_connector->router_bus) 1671 - DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n"); 1670 + drm_err(adev_to_drm(adev), 1671 + "Failed to assign router i2c bus! Check dmesg for i2c errors.\n"); 1672 1672 } 1673 1673 1674 1674 if (is_dp_bridge) { ··· 1683 1681 has_aux = true; 1684 1682 ddc = &amdgpu_connector->ddc_bus->adapter; 1685 1683 } else { 1686 - DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1684 + drm_err(adev_to_drm(adev), 1685 + "DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1687 1686 } 1688 1687 } 1689 1688 switch (connector_type) { ··· 1778 1775 if (i2c_bus->valid) { 1779 1776 amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus); 1780 1777 if (!amdgpu_connector->ddc_bus) 1781 - DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1778 + drm_err(adev_to_drm(adev), 1779 + "VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1782 1780 else 1783 1781 ddc = &amdgpu_connector->ddc_bus->adapter; 1784 1782 } ··· 1804 1800 if (i2c_bus->valid) { 1805 1801 amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus); 1806 1802 if (!amdgpu_connector->ddc_bus) 1807 - DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1803 + drm_err(adev_to_drm(adev), 1804 + "DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1808 1805 else 1809 1806 ddc = &amdgpu_connector->ddc_bus->adapter; 1810 1807 } ··· 1835 1830 if (i2c_bus->valid) { 1836 1831 amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus); 1837 1832 if (!amdgpu_connector->ddc_bus) 1838 - DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1833 + drm_err(adev_to_drm(adev), 1834 + "DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1839 1835 else 1840 1836 ddc = &amdgpu_connector->ddc_bus->adapter; 1841 1837 } ··· 1892 1886 if (i2c_bus->valid) { 1893 1887 amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus); 1894 1888 if (!amdgpu_connector->ddc_bus) 1895 - DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1889 + drm_err(adev_to_drm(adev), 1890 + "HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1896 1891 else 1897 1892 ddc = &amdgpu_connector->ddc_bus->adapter; 1898 1893 } ··· 1944 1937 has_aux = true; 1945 1938 ddc = &amdgpu_connector->ddc_bus->adapter; 1946 1939 } else { 1947 - DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1940 + drm_err(adev_to_drm(adev), 1941 + "DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1948 1942 } 1949 1943 } 1950 1944 drm_connector_init_with_ddc(dev, &amdgpu_connector->base, ··· 1993 1985 has_aux = true; 1994 1986 ddc = &amdgpu_connector->ddc_bus->adapter; 1995 1987 } else { 1996 - DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1988 + drm_err(adev_to_drm(adev), 1989 + "eDP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1997 1990 } 1998 1991 } 1999 1992 drm_connector_init_with_ddc(dev, &amdgpu_connector->base, ··· 2017 2008 if (i2c_bus->valid) { 2018 2009 amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus); 2019 2010 if (!amdgpu_connector->ddc_bus) 2020 - DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 2011 + drm_err(adev_to_drm(adev), 2012 + "LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 2021 2013 else 2022 2014 ddc = &amdgpu_connector->ddc_bus->adapter; 2023 2015 }
+8 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
··· 76 76 r = drm_exec_lock_obj(&exec, &bo->tbo.base); 77 77 drm_exec_retry_on_contention(&exec); 78 78 if (unlikely(r)) { 79 - DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); 79 + drm_err(adev_to_drm(adev), 80 + "failed to reserve CSA,PD BOs: err=%d\n", r); 80 81 goto error; 81 82 } 82 83 } ··· 93 92 AMDGPU_PTE_EXECUTABLE); 94 93 95 94 if (r) { 96 - DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); 95 + drm_err(adev_to_drm(adev), 96 + "failed to do bo_map on static CSA, err=%d\n", r); 97 97 amdgpu_vm_bo_del(adev, *bo_va); 98 98 goto error; 99 99 } ··· 118 116 r = drm_exec_lock_obj(&exec, &bo->tbo.base); 119 117 drm_exec_retry_on_contention(&exec); 120 118 if (unlikely(r)) { 121 - DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); 119 + drm_err(adev_to_drm(adev), 120 + "failed to reserve CSA,PD BOs: err=%d\n", r); 122 121 goto error; 123 122 } 124 123 } 125 124 126 125 r = amdgpu_vm_bo_unmap(adev, bo_va, csa_addr); 127 126 if (r) { 128 - DRM_ERROR("failed to do bo_unmap on static CSA, err=%d\n", r); 127 + drm_err(adev_to_drm(adev), 128 + "failed to do bo_unmap on static CSA, err=%d\n", r); 129 129 goto error; 130 130 } 131 131
+9 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
··· 438 438 struct drm_sched_entity *ctx_entity; 439 439 440 440 if (hw_ip >= AMDGPU_HW_IP_NUM) { 441 - DRM_ERROR("unknown HW IP type: %d\n", hw_ip); 441 + drm_err(adev_to_drm(ctx->mgr->adev), 442 + "unknown HW IP type: %d\n", hw_ip); 442 443 return -EINVAL; 443 444 } 444 445 445 446 /* Right now all IPs have only one instance - multiple rings. */ 446 447 if (instance != 0) { 447 - DRM_DEBUG("invalid ip instance: %d\n", instance); 448 + drm_dbg(adev_to_drm(ctx->mgr->adev), 449 + "invalid ip instance: %d\n", instance); 448 450 return -EINVAL; 449 451 } 450 452 451 453 if (ring >= amdgpu_ctx_num_entities[hw_ip]) { 452 - DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring); 454 + drm_dbg(adev_to_drm(ctx->mgr->adev), 455 + "invalid ring: %d %d\n", hw_ip, ring); 453 456 return -EINVAL; 454 457 } 455 458 ··· 877 874 878 875 r = dma_fence_wait(other, true); 879 876 if (r < 0 && r != -ERESTARTSYS) 880 - DRM_ERROR("Error (%ld) waiting for fence!\n", r); 877 + drm_err(adev_to_drm(ctx->mgr->adev), 878 + "AMDGPU: Error waiting for fence in ctx %p\n", ctx); 881 879 882 880 dma_fence_put(other); 883 881 return r; ··· 933 929 934 930 idr_for_each_entry(idp, ctx, id) { 935 931 if (kref_read(&ctx->refcount) != 1) { 936 - DRM_ERROR("ctx %p is still alive\n", ctx); 932 + drm_err(adev_to_drm(mgr->adev), "ctx %p is still alive\n", ctx); 937 933 continue; 938 934 } 939 935
+7 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
··· 1921 1921 /* preempt the IB */ 1922 1922 r = amdgpu_ring_preempt_ib(ring); 1923 1923 if (r) { 1924 - DRM_WARN("failed to preempt ring %d\n", ring->idx); 1924 + drm_warn(adev_to_drm(adev), "failed to preempt ring %d\n", ring->idx); 1925 1925 goto failure; 1926 1926 } 1927 1927 ··· 1929 1929 1930 1930 if (atomic_read(&ring->fence_drv.last_seq) != 1931 1931 ring->fence_drv.sync_seq) { 1932 - DRM_INFO("ring %d was preempted\n", ring->idx); 1932 + drm_info(adev_to_drm(adev), "ring %d was preempted\n", ring->idx); 1933 1933 1934 1934 amdgpu_ib_preempt_mark_partial_job(ring); 1935 1935 ··· 2016 2016 ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev, 2017 2017 &fops_ib_preempt); 2018 2018 if (IS_ERR(ent)) { 2019 - DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n"); 2019 + drm_err(adev_to_drm(adev), 2020 + "unable to create amdgpu_preempt_ib debugsfs file\n"); 2020 2021 return PTR_ERR(ent); 2021 2022 } 2022 2023 2023 2024 ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev, 2024 2025 &fops_sclk_set); 2025 2026 if (IS_ERR(ent)) { 2026 - DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n"); 2027 + drm_err(adev_to_drm(adev), 2028 + "unable to create amdgpu_set_sclk debugsfs file\n"); 2027 2029 return PTR_ERR(ent); 2028 2030 } 2029 2031 ··· 2038 2036 2039 2037 r = amdgpu_debugfs_regs_init(adev); 2040 2038 if (r) 2041 - DRM_ERROR("registering register debugfs failed (%d).\n", r); 2039 + drm_err(adev_to_drm(adev), "registering register debugfs failed (%d).\n", r); 2042 2040 2043 2041 amdgpu_debugfs_firmware_init(adev); 2044 2042 amdgpu_ta_if_debugfs_init(adev);
+1 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
··· 334 334 struct drm_sched_job *s_job; 335 335 336 336 coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT); 337 - 338 - if (!coredump) { 339 - DRM_ERROR("%s: failed to allocate memory for coredump\n", __func__); 337 + if (!coredump) 340 338 return; 341 - } 342 339 343 340 coredump->skip_vram_check = skip_vram_check; 344 341 coredump->reset_vram_lost = vram_lost;
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 6225 6225 !amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) 6226 6226 dev_info( 6227 6227 tmp_adev->dev, 6228 - "GPU reset(%d) failed with error %d \n", 6228 + "GPU reset(%d) failed with error %d\n", 6229 6229 atomic_read( 6230 6230 &tmp_adev->gpu_reset_counter), 6231 6231 tmp_adev->asic_reset_res);
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 2402 2402 supports_atomic = true; 2403 2403 2404 2404 if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) { 2405 - DRM_INFO("This hardware requires experimental hardware support.\n" 2405 + dev_info(&pdev->dev, "This hardware requires experimental hardware support.\n" 2406 2406 "See modparam exp_hw_support\n"); 2407 2407 return -ENODEV; 2408 2408 } ··· 2449 2449 retry_init: 2450 2450 ret = drm_dev_register(ddev, flags); 2451 2451 if (ret == -EAGAIN && ++retry <= 3) { 2452 - DRM_INFO("retry init %d\n", retry); 2452 + drm_info(adev_to_drm(adev), "retry init %d\n", retry); 2453 2453 /* Don't request EX mode too frequently which is attacking */ 2454 2454 msleep(5000); 2455 2455 goto retry_init;
+6 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
··· 62 62 struct FW_ATT_RECORD fw_att_record = {0}; 63 63 64 64 if (size < sizeof(struct FW_ATT_RECORD)) { 65 - DRM_WARN("FW attestation input buffer not enough memory"); 65 + drm_warn(adev_to_drm(adev), "FW attestation input buffer not enough memory"); 66 66 return -EINVAL; 67 67 } 68 68 69 69 if ((*pos + sizeof(struct FW_ATT_DB_HEADER)) >= FW_ATTESTATION_MAX_SIZE) { 70 - DRM_WARN("FW attestation out of bounds"); 70 + drm_warn(adev_to_drm(adev), "FW attestation out of bounds"); 71 71 return 0; 72 72 } 73 73 74 74 if (psp_get_fw_attestation_records_addr(&adev->psp, &records_addr)) { 75 - DRM_WARN("Failed to get FW attestation record address"); 75 + drm_warn(adev_to_drm(adev), "Failed to get FW attestation record address"); 76 76 return -EINVAL; 77 77 } 78 78 ··· 86 86 false); 87 87 88 88 if (fw_att_hdr.AttDbCookie != FW_ATTESTATION_DB_COOKIE) { 89 - DRM_WARN("Invalid FW attestation cookie"); 89 + drm_warn(adev_to_drm(adev), "Invalid FW attestation cookie"); 90 90 return -EINVAL; 91 91 } 92 92 93 - DRM_INFO("FW attestation version = 0x%X", fw_att_hdr.AttDbVersion); 93 + drm_info(adev_to_drm(adev), "FW attestation version = 0x%X", 94 + fw_att_hdr.AttDbVersion); 94 95 } 95 96 96 97 amdgpu_device_vram_access(adev,
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
··· 476 476 /* Compute table size */ 477 477 adev->gart.num_cpu_pages = adev->gmc.gart_size / PAGE_SIZE; 478 478 adev->gart.num_gpu_pages = adev->gmc.gart_size / AMDGPU_GPU_PAGE_SIZE; 479 - DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", 479 + drm_info(adev_to_drm(adev), "GART: num cpu pages %u, num gpu pages %u\n", 480 480 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages); 481 481 482 482 return 0;
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
··· 125 125 int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu); 126 126 127 127 if (ret < 3) { 128 - DRM_ERROR("amdgpu: could not parse disable_cu\n"); 128 + drm_err(adev_to_drm(adev), "could not parse disable_cu\n"); 129 129 return; 130 130 } 131 131
+6 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 92 92 return; 93 93 94 94 if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_UNLOAD)) 95 - DRM_WARN("smart shift update failed\n"); 95 + drm_warn(dev, "smart shift update failed\n"); 96 96 97 97 amdgpu_acpi_fini(adev); 98 98 amdgpu_device_fini_hw(adev); ··· 105 105 mutex_lock(&mgpu_info.mutex); 106 106 107 107 if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) { 108 - DRM_ERROR("Cannot register more gpu instance\n"); 108 + drm_err(adev_to_drm(adev), "Cannot register more gpu instance\n"); 109 109 mutex_unlock(&mgpu_info.mutex); 110 110 return; 111 111 } ··· 162 162 dev_dbg(dev->dev, "Error during ACPI methods call\n"); 163 163 164 164 if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_LOAD)) 165 - DRM_WARN("smart shift update failed\n"); 165 + drm_warn(dev, "smart shift update failed\n"); 166 166 167 167 out: 168 168 if (r) ··· 1502 1502 1503 1503 r = amdgpu_userq_mgr_init(&fpriv->userq_mgr, file_priv, adev); 1504 1504 if (r) 1505 - DRM_WARN("Can't setup usermode queues, use legacy workload submission only\n"); 1505 + drm_warn(adev_to_drm(adev), 1506 + "Failed to init usermode queue manager (%d), use legacy workload submission only\n", 1507 + r); 1506 1508 1507 1509 r = amdgpu_eviction_fence_init(&fpriv->evf_mgr); 1508 1510 if (r)
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 1081 1081 adev->gmc.aper_size); 1082 1082 } 1083 1083 1084 - DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", 1084 + drm_info(adev_to_drm(adev), "Detected VRAM RAM=%lluM, BAR=%lluM\n", 1085 1085 adev->gmc.mc_vram_size >> 20, 1086 1086 (unsigned long long)adev->gmc.aper_size >> 20); 1087 - DRM_INFO("RAM width %dbits %s\n", 1087 + drm_info(adev_to_drm(adev), "RAM width %dbits %s\n", 1088 1088 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]); 1089 1089 return amdgpu_ttm_init(adev); 1090 1090 }
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
··· 4422 4422 return 0; 4423 4423 4424 4424 if (amdgpu_ras_query_error_status(adev, &info) != 0) 4425 - DRM_WARN("RAS init harvest failure"); 4425 + drm_warn(adev_to_drm(adev), "RAS init query failure"); 4426 4426 4427 4427 if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0) 4428 - DRM_WARN("RAS init harvest reset failure"); 4428 + drm_warn(adev_to_drm(adev), "RAS init harvest reset failure"); 4429 4429 4430 4430 return 0; 4431 4431 }
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 2200 2200 AMDGPU_GEM_DOMAIN_GTT, 2201 2201 &adev->mman.sdma_access_bo, NULL, 2202 2202 &adev->mman.sdma_access_ptr)) 2203 - DRM_WARN("Debug VRAM access will use slowpath MM access\n"); 2203 + drm_warn(adev_to_drm(adev), 2204 + "Debug VRAM access will use slowpath MM access\n"); 2204 2205 2205 2206 return 0; 2206 2207 }
+4 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
··· 279 279 280 280 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; 281 281 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; 282 - DRM_INFO("Found UVD firmware Version: %u.%u Family ID: %u\n", 282 + drm_info(adev_to_drm(adev), "Found UVD firmware Version: %u.%u Family ID: %u\n", 283 283 version_major, version_minor, family_id); 284 284 285 285 /* ··· 306 306 dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; 307 307 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f; 308 308 enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3; 309 - DRM_INFO("Found UVD firmware ENC: %u.%u DEC: .%u Family ID: %u\n", 309 + drm_info(adev_to_drm(adev), "Found UVD firmware ENC: %u.%u DEC: .%u Family ID: %u\n", 310 310 enc_major, enc_minor, dec_minor, family_id); 311 311 312 312 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; ··· 467 467 int amdgpu_uvd_suspend(struct amdgpu_device *adev) 468 468 { 469 469 if (amdgpu_ras_intr_triggered()) 470 - DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n"); 470 + drm_warn(adev_to_drm(adev), 471 + "UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n"); 471 472 472 473 return 0; 473 474 }
+3 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
··· 53 53 ret_overrun = hrtimer_forward_now(&amdgpu_crtc->vblank_timer, 54 54 output->period_ns); 55 55 if (ret_overrun != 1) 56 - DRM_WARN("%s: vblank timer overrun\n", __func__); 56 + drm_warn(amdgpu_crtc->base.dev, 57 + "%s: vblank timer overrun count: %llu\n", 58 + __func__, ret_overrun); 57 59 58 60 ret = drm_crtc_handle_vblank(crtc); 59 61 /* Don't queue timer again when vblank is disabled. */
+3 -3
drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
··· 218 218 bd->props.power = BACKLIGHT_POWER_ON; 219 219 backlight_update_status(bd); 220 220 221 - DRM_INFO("amdgpu atom DIG backlight initialized\n"); 221 + drm_info(adev_to_drm(adev), "ATOM DIG backlight initialized\n"); 222 222 223 223 return; 224 224 ··· 256 256 backlight_device_unregister(bd); 257 257 kfree(pdata); 258 258 259 - DRM_INFO("amdgpu atom LVDS backlight unloaded\n"); 259 + drm_info(adev_to_drm(adev), "ATOM LVDS backlight unloaded\n"); 260 260 } 261 261 } 262 262 ··· 1724 1724 uint32_t bios_0_scratch; 1725 1725 1726 1726 if (!amdgpu_atombios_encoder_dac_load_detect(encoder, connector)) { 1727 - DRM_DEBUG_KMS("detect returned false \n"); 1727 + DRM_DEBUG_KMS("detect returned false\n"); 1728 1728 return connector_status_unknown; 1729 1729 } 1730 1730
+4 -4
drivers/gpu/drm/amd/amdgpu/cik.c
··· 1552 1552 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; 1553 1553 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) { 1554 1554 if (current_data_rate == 2) { 1555 - DRM_INFO("PCIE gen 3 link speeds already enabled\n"); 1555 + drm_info(adev_to_drm(adev), "PCIE gen 3 link speeds already enabled\n"); 1556 1556 return; 1557 1557 } 1558 - DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n"); 1558 + drm_info(adev_to_drm(adev), "enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n"); 1559 1559 } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) { 1560 1560 if (current_data_rate == 1) { 1561 - DRM_INFO("PCIE gen 2 link speeds already enabled\n"); 1561 + drm_info(adev_to_drm(adev), "PCIE gen 2 link speeds already enabled\n"); 1562 1562 return; 1563 1563 } 1564 - DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n"); 1564 + drm_info(adev_to_drm(adev), "enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n"); 1565 1565 } 1566 1566 1567 1567 if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev))
+2 -2
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 4170 4170 } 4171 4171 4172 4172 if (!adev->gfx.cp_fw_write_wait) 4173 - DRM_WARN_ONCE("CP firmware version too old, please update!"); 4173 + drm_warn_once(adev_to_drm(adev), "CP firmware version too old, please update!"); 4174 4174 } 4175 4175 4176 4176 static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev) ··· 9385 9385 struct amdgpu_irq_src *source, 9386 9386 struct amdgpu_iv_entry *entry) 9387 9387 { 9388 - DRM_ERROR("Illegal opcode in command stream \n"); 9388 + DRM_ERROR("Illegal opcode in command stream\n"); 9389 9389 gfx_v10_0_handle_priv_fault(adev, entry); 9390 9390 return 0; 9391 9391 }
+2 -2
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
··· 4796 4796 adev->gfx.is_poweron = true; 4797 4797 4798 4798 if(get_gb_addr_config(adev)) 4799 - DRM_WARN("Invalid gb_addr_config !\n"); 4799 + drm_warn(adev_to_drm(adev), "Invalid gb_addr_config !\n"); 4800 4800 4801 4801 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 4802 4802 adev->gfx.rs64_enable) ··· 6669 6669 struct amdgpu_irq_src *source, 6670 6670 struct amdgpu_iv_entry *entry) 6671 6671 { 6672 - DRM_ERROR("Illegal opcode in command stream \n"); 6672 + DRM_ERROR("Illegal opcode in command stream\n"); 6673 6673 gfx_v11_0_handle_priv_fault(adev, entry); 6674 6674 return 0; 6675 6675 }
+2 -2
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
··· 3657 3657 adev->gfx.is_poweron = true; 3658 3658 3659 3659 if (get_gb_addr_config(adev)) 3660 - DRM_WARN("Invalid gb_addr_config !\n"); 3660 + drm_warn(adev_to_drm(adev), "Invalid gb_addr_config !\n"); 3661 3661 3662 3662 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) 3663 3663 gfx_v12_0_config_gfx_rs64(adev); ··· 5046 5046 struct amdgpu_irq_src *source, 5047 5047 struct amdgpu_iv_entry *entry) 5048 5048 { 5049 - DRM_ERROR("Illegal opcode in command stream \n"); 5049 + DRM_ERROR("Illegal opcode in command stream\n"); 5050 5050 gfx_v12_0_handle_priv_fault(adev, entry); 5051 5051 return 0; 5052 5052 }
+1 -1
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
··· 3002 3002 static void gfx_v6_0_select_me_pipe_q(struct amdgpu_device *adev, 3003 3003 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) 3004 3004 { 3005 - DRM_INFO("Not implemented\n"); 3005 + drm_info(adev_to_drm(adev), "Not implemented\n"); 3006 3006 } 3007 3007 3008 3008 static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
+4 -4
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 1048 1048 if (adev->gfx.ce_feature_version >= 46 && 1049 1049 adev->gfx.pfp_feature_version >= 46) { 1050 1050 adev->virt.chained_ib_support = true; 1051 - DRM_INFO("Chained IB support enabled!\n"); 1051 + drm_info(adev_to_drm(adev), "Chained IB support enabled!\n"); 1052 1052 } else 1053 1053 adev->virt.chained_ib_support = false; 1054 1054 ··· 3820 3820 gfx_v8_0_select_se_sh(adev, 0xffffffff, 3821 3821 0xffffffff, 0xffffffff, 0); 3822 3822 mutex_unlock(&adev->grbm_idx_mutex); 3823 - DRM_INFO("Timeout wait for RLC serdes %u,%u\n", 3823 + drm_info(adev_to_drm(adev), "Timeout wait for RLC serdes %u,%u\n", 3824 3824 i, j); 3825 3825 return; 3826 3826 } ··· 6669 6669 6670 6670 switch (enc) { 6671 6671 case 0: 6672 - DRM_INFO("SQ general purpose intr detected:" 6672 + drm_info(adev_to_drm(adev), "SQ general purpose intr detected:" 6673 6673 "se_id %d, immed_overflow %d, host_reg_overflow %d," 6674 6674 "host_cmd_overflow %d, cmd_timestamp %d," 6675 6675 "reg_timestamp %d, thread_trace_buff_full %d," ··· 6711 6711 else 6712 6712 sprintf(type, "EDC/ECC error"); 6713 6713 6714 - DRM_INFO( 6714 + drm_info(adev_to_drm(adev), 6715 6715 "SQ %s detected: " 6716 6716 "se_id %d, sh_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d " 6717 6717 "trap %s, sq_ed_info.source %s.\n",
+8 -7
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 1301 1301 (adev->gfx.mec_feature_version < 46) || 1302 1302 (adev->gfx.pfp_fw_version < 0x000000b7) || 1303 1303 (adev->gfx.pfp_feature_version < 46))) 1304 - DRM_WARN_ONCE("CP firmware version too old, please update!"); 1304 + drm_warn_once(adev_to_drm(adev), 1305 + "CP firmware version too old, please update!"); 1305 1306 1306 1307 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1307 1308 case IP_VERSION(9, 0, 1): ··· 2041 2040 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 2042 2041 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 2043 2042 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN; 2044 - DRM_INFO("fix gfx.config for vega12\n"); 2043 + drm_info(adev_to_drm(adev), "fix gfx.config for vega12\n"); 2045 2044 break; 2046 2045 case IP_VERSION(9, 4, 0): 2047 2046 adev->gfx.ras = &gfx_v9_0_ras; ··· 2729 2728 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 2730 2729 0xffffffff, 0xffffffff, 0); 2731 2730 mutex_unlock(&adev->grbm_idx_mutex); 2732 - DRM_INFO("Timeout wait for RLC serdes %u,%u\n", 2731 + drm_info(adev_to_drm(adev), "Timeout wait for RLC serdes %u,%u\n", 2733 2732 i, j); 2734 2733 return; 2735 2734 } ··· 3152 3151 /* RLC_GPM_GENERAL_6 : RLC Ucode version */ 3153 3152 rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6); 3154 3153 if(rlc_ucode_ver == 0x108) { 3155 - DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n", 3154 + drm_info(adev_to_drm(adev), "Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i\n", 3156 3155 rlc_ucode_ver, adev->gfx.rlc_fw_version); 3157 3156 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles, 3158 3157 * default is 0x9C4 to create a 100us interval */ ··· 3335 3334 */ 3336 3335 if (adev->flags & AMD_IS_APU && 3337 3336 adev->in_s3 && !pm_resume_via_firmware()) { 3338 - DRM_INFO("Will skip the CSB packet resubmit\n"); 3337 + drm_info(adev_to_drm(adev), "Will skip the CSB packet resubmit\n"); 3339 3338 return 0; 3340 3339 } 3341 3340 r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3); 3342 3341 if (r) { 3343 - DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 3342 + drm_err(adev_to_drm(adev), "cp failed to lock ring (%d).\n", r); 3344 3343 return r; 3345 3344 } 3346 3345 ··· 5735 5734 5736 5735 if (i >= adev->usec_timeout) { 5737 5736 r = -EINVAL; 5738 - DRM_WARN("ring %d timeout to preempt ib\n", ring->idx); 5737 + drm_warn(adev_to_drm(adev), "ring %d timeout to preempt ib\n", ring->idx); 5739 5738 } 5740 5739 5741 5740 /*reset the CP_VMID_PREEMPT after trailing fence*/
+2 -2
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
··· 1482 1482 0xffffffff, 1483 1483 0xffffffff, xcc_id); 1484 1484 mutex_unlock(&adev->grbm_idx_mutex); 1485 - DRM_INFO("Timeout wait for RLC serdes %u,%u\n", 1485 + drm_info(adev_to_drm(adev), "Timeout wait for RLC serdes %u,%u\n", 1486 1486 i, j); 1487 1487 return; 1488 1488 } ··· 1583 1583 rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6); 1584 1584 if (rlc_ucode_ver == 0x108) { 1585 1585 dev_info(adev->dev, 1586 - "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n", 1586 + "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i\n", 1587 1587 rlc_ucode_ver, adev->gfx.rlc_fw_version); 1588 1588 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles, 1589 1589 * default is 0x9C4 to create a 100us interval */
+1 -1
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
··· 649 649 } 650 650 651 651 if (!time) 652 - DRM_WARN("failed to wait for GRBM(EA) idle\n"); 652 + drm_warn(adev_to_drm(adev), "failed to wait for GRBM(EA) idle\n"); 653 653 } 654 654 655 655 const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = {
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
··· 957 957 if (!adev->in_s0ix) 958 958 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0); 959 959 960 - DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 960 + drm_info(adev_to_drm(adev), "PCIE GART of %uM enabled (table at 0x%016llX).\n", 961 961 (unsigned int)(adev->gmc.gart_size >> 20), 962 962 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); 963 963
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
··· 925 925 adev->mmhub.funcs->set_fault_enable_default(adev, value); 926 926 gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0); 927 927 928 - DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 928 + drm_info(adev_to_drm(adev), "PCIE GART of %uM enabled (table at 0x%016llX).\n", 929 929 (unsigned int)(adev->gmc.gart_size >> 20), 930 930 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); 931 931
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
··· 976 976 adev->mmhub.funcs->set_fault_enable_default(adev, value); 977 977 adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0); 978 978 979 - dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", 979 + drm_info(adev_to_drm(adev), "PCIE GART of %uM enabled (table at 0x%016llX).\n", 980 980 (unsigned)(adev->gmc.gart_size >> 20), 981 981 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); 982 982
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
··· 555 555 gmc_v6_0_set_fault_enable_default(adev, true); 556 556 557 557 gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0); 558 - dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", 558 + drm_info(adev_to_drm(adev), "PCIE GART of %uM enabled (table at 0x%016llX).\n", 559 559 (unsigned int)(adev->gmc.gart_size >> 20), 560 560 (unsigned long long)table_addr); 561 561 return 0;
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
··· 705 705 } 706 706 707 707 gmc_v7_0_flush_gpu_tlb(adev, 0, 0, 0); 708 - DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 708 + drm_info(adev_to_drm(adev), "PCIE GART of %uM enabled (table at 0x%016llX).\n", 709 709 (unsigned int)(adev->gmc.gart_size >> 20), 710 710 (unsigned long long)table_addr); 711 711 return 0;
+2 -2
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
··· 560 560 tmp = RREG32(mmCONFIG_MEMSIZE); 561 561 /* some boards may have garbage in the upper 16 bits */ 562 562 if (tmp & 0xffff0000) { 563 - DRM_INFO("Probable bad vram size: 0x%08x\n", tmp); 563 + drm_info(adev_to_drm(adev), "Probably bad vram size: 0x%08x\n", tmp); 564 564 if (tmp & 0xffff) 565 565 tmp &= 0xffff; 566 566 } ··· 939 939 gmc_v8_0_set_fault_enable_default(adev, true); 940 940 941 941 gmc_v8_0_flush_gpu_tlb(adev, 0, 0, 0); 942 - DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 942 + drm_info(adev_to_drm(adev), "PCIE GART of %uM enabled (table at 0x%016llX).\n", 943 943 (unsigned int)(adev->gmc.gart_size >> 20), 944 944 (unsigned long long)table_addr); 945 945 return 0;
+6 -6
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 1137 1137 */ 1138 1138 mtype_local = MTYPE_RW; 1139 1139 if (amdgpu_mtype_local == 1) { 1140 - DRM_INFO_ONCE("Using MTYPE_NC for local memory\n"); 1140 + drm_info_once(adev_to_drm(adev), "Using MTYPE_NC for local memory\n"); 1141 1141 mtype_local = MTYPE_NC; 1142 1142 } else if (amdgpu_mtype_local == 2) { 1143 - DRM_INFO_ONCE("Using MTYPE_CC for local memory\n"); 1143 + drm_info_once(adev_to_drm(adev), "Using MTYPE_CC for local memory\n"); 1144 1144 mtype_local = MTYPE_CC; 1145 1145 } else { 1146 - DRM_INFO_ONCE("Using MTYPE_RW for local memory\n"); 1146 + drm_info_once(adev_to_drm(adev), "Using MTYPE_RW for local memory\n"); 1147 1147 } 1148 1148 is_local = (!is_vram && (adev->flags & AMD_IS_APU) && 1149 1149 num_possible_nodes() <= 1) || ··· 2131 2131 if (r) 2132 2132 return r; 2133 2133 2134 - DRM_INFO("PCIE GART of %uM enabled.\n", 2134 + drm_info(adev_to_drm(adev), "PCIE GART of %uM enabled.\n", 2135 2135 (unsigned int)(adev->gmc.gart_size >> 20)); 2136 2136 if (adev->gmc.pdb0_bo) 2137 - DRM_INFO("PDB0 located at 0x%016llX\n", 2137 + drm_info(adev_to_drm(adev), "PDB0 located at 0x%016llX\n", 2138 2138 (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo)); 2139 - DRM_INFO("PTB located at 0x%016llX\n", 2139 + drm_info(adev_to_drm(adev), "PTB located at 0x%016llX\n", 2140 2140 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); 2141 2141 2142 2142 return 0;
+1 -1
drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
··· 240 240 UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); 241 241 242 242 if (r) { 243 - DRM_ERROR("amdgpu: JPEG disable power gating failed\n"); 243 + drm_err(adev_to_drm(adev), "failed to disable JPEG power gating\n"); 244 244 return r; 245 245 } 246 246 }
+1 -1
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
··· 662 662 break; 663 663 664 664 default: 665 - DRM_ERROR("unsupported misc op (%d) \n", input->op); 665 + drm_err(adev_to_drm(mes->adev), "unsupported misc op (%d)\n", input->op); 666 666 return -EINVAL; 667 667 } 668 668
+1 -1
drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
··· 699 699 break; 700 700 701 701 default: 702 - DRM_ERROR("unsupported misc op (%d) \n", input->op); 702 + DRM_ERROR("unsupported misc op (%d)\n", input->op); 703 703 return -EINVAL; 704 704 } 705 705
+3 -1
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
··· 96 96 timeout -= 5; 97 97 } while (timeout > 1); 98 98 99 - dev_err(adev->dev, "Doesn't get TRN_MSG_ACK from pf in %d msec \n", NV_MAILBOX_POLL_ACK_TIMEDOUT); 99 + dev_err(adev->dev, 100 + "Doesn't get TRN_MSG_ACK from pf in %d msec\n", 101 + NV_MAILBOX_POLL_ACK_TIMEDOUT); 100 102 101 103 return -ETIME; 102 104 }
+1 -1
drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
··· 140 140 141 141 static int psp_v10_0_mode1_reset(struct psp_context *psp) 142 142 { 143 - DRM_INFO("psp mode 1 reset not supported now! \n"); 143 + drm_info(adev_to_drm(psp->adev), "psp mode 1 reset not supported now!\n"); 144 144 return -EINVAL; 145 145 } 146 146
+1 -1
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
··· 412 412 MBOX_TOS_READY_MASK, 0); 413 413 414 414 if (ret) { 415 - DRM_INFO("psp is not working correctly before mode1 reset!\n"); 415 + drm_info(adev_to_drm(adev), "psp is not working correctly before mode1 reset!\n"); 416 416 return -EINVAL; 417 417 } 418 418
+3 -3
drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
··· 225 225 MBOX_TOS_READY_MASK, 0); 226 226 227 227 if (ret) { 228 - DRM_INFO("psp is not working correctly before mode1 reset!\n"); 228 + drm_info(adev_to_drm(adev), "psp is not working correctly before mode1 reset!\n"); 229 229 return -EINVAL; 230 230 } 231 231 ··· 240 240 0); 241 241 242 242 if (ret) { 243 - DRM_INFO("psp mode 1 reset failed!\n"); 243 + drm_info(adev_to_drm(adev), "psp mode 1 reset failed!\n"); 244 244 return -EINVAL; 245 245 } 246 246 247 - DRM_INFO("psp mode1 reset succeed \n"); 247 + drm_info(adev_to_drm(adev), "psp mode1 reset succeed\n"); 248 248 249 249 return 0; 250 250 }
+3 -3
drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
··· 315 315 ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, 0); 316 316 317 317 if (ret) { 318 - DRM_INFO("psp is not working correctly before mode1 reset!\n"); 318 + drm_info(adev_to_drm(adev), "psp is not working correctly before mode1 reset!\n"); 319 319 return -EINVAL; 320 320 } 321 321 ··· 329 329 ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, 0); 330 330 331 331 if (ret) { 332 - DRM_INFO("psp mode 1 reset failed!\n"); 332 + drm_info(adev_to_drm(adev), "psp mode 1 reset failed!\n"); 333 333 return -EINVAL; 334 334 } 335 335 336 - DRM_INFO("psp mode1 reset succeed \n"); 336 + drm_info(adev_to_drm(adev), "psp mode1 reset succeed\n"); 337 337 338 338 return 0; 339 339 }
+4 -4
drivers/gpu/drm/amd/amdgpu/si.c
··· 2255 2255 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; 2256 2256 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) { 2257 2257 if (current_data_rate == 2) { 2258 - DRM_INFO("PCIE gen 3 link speeds already enabled\n"); 2258 + drm_info(adev_to_drm(adev), "PCIE gen 3 link speeds already enabled\n"); 2259 2259 return; 2260 2260 } 2261 - DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n"); 2261 + drm_info(adev_to_drm(adev), "enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n"); 2262 2262 } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) { 2263 2263 if (current_data_rate == 1) { 2264 - DRM_INFO("PCIE gen 2 link speeds already enabled\n"); 2264 + drm_info(adev_to_drm(adev), "PCIE gen 2 link speeds already enabled\n"); 2265 2265 return; 2266 2266 } 2267 - DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n"); 2267 + drm_info(adev_to_drm(adev), "enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n"); 2268 2268 } 2269 2269 2270 2270 if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev))
+1 -1
drivers/gpu/drm/amd/amdgpu/si_dma.c
··· 584 584 585 585 static int si_dma_soft_reset(struct amdgpu_ip_block *ip_block) 586 586 { 587 - DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n"); 587 + drm_info(adev_to_drm(ip_block->adev), "si_dma_soft_reset --- not implemented !!!!!!!\n"); 588 588 return 0; 589 589 } 590 590
+5 -3
drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
··· 196 196 197 197 if (REG_GET_FIELD(reg, CKSVII2C_IC_INTR_STAT, R_TX_ABRT) == 1) { 198 198 reg_c_tx_abrt_source = RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_TX_ABRT_SOURCE); 199 - DRM_INFO("TX was terminated, IC_TX_ABRT_SOURCE val is:%x", reg_c_tx_abrt_source); 199 + drm_info(adev_to_drm(adev), 200 + "TX was terminated, IC_TX_ABRT_SOURCE val is:%x", 201 + reg_c_tx_abrt_source); 200 202 201 203 /* Check for stop due to NACK */ 202 204 if (REG_GET_FIELD(reg_c_tx_abrt_source, ··· 771 769 uint8_t data[6] = {0xf, 0, 0xde, 0xad, 0xbe, 0xef}; 772 770 773 771 774 - DRM_INFO("Begin"); 772 + drm_info(adev_to_drm(adev), "Begin"); 775 773 776 774 if (!smu_v11_0_i2c_bus_lock(control)) { 777 775 DRM_ERROR("Failed to lock the bus!."); ··· 790 788 smu_v11_0_i2c_bus_unlock(control); 791 789 792 790 793 - DRM_INFO("End"); 791 + drm_info(adev_to_drm(adev), "End"); 794 792 return true; 795 793 } 796 794 #endif
+5 -4
drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
··· 267 267 268 268 if ((adev->asic_type == CHIP_ARCTURUS) && 269 269 amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) 270 - DRM_WARN("Fail to disable DF-Cstate.\n"); 270 + drm_warn(adev_to_drm(adev), 271 + "Fail to disable DF-Cstate.\n"); 271 272 272 273 LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { 273 274 umc_reg_offset = get_umc_6_reg_offset(adev, ··· 285 284 286 285 if ((adev->asic_type == CHIP_ARCTURUS) && 287 286 amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) 288 - DRM_WARN("Fail to enable DF-Cstate\n"); 287 + drm_warn(adev_to_drm(adev), "Fail to enable DF-Cstate\n"); 289 288 290 289 if (rsmu_umc_index_state) 291 290 umc_v6_1_enable_umc_index_mode(adev); ··· 367 366 368 367 if ((adev->asic_type == CHIP_ARCTURUS) && 369 368 amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) 370 - DRM_WARN("Fail to disable DF-Cstate.\n"); 369 + drm_warn(adev_to_drm(adev), "Fail to disable DF-Cstate.\n"); 371 370 372 371 LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { 373 372 umc_reg_offset = get_umc_6_reg_offset(adev, ··· 383 382 384 383 if ((adev->asic_type == CHIP_ARCTURUS) && 385 384 amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) 386 - DRM_WARN("Fail to enable DF-Cstate\n"); 385 + drm_warn(adev_to_drm(adev), "Fail to enable DF-Cstate\n"); 387 386 388 387 if (rsmu_umc_index_state) 389 388 umc_v6_1_enable_umc_index_mode(adev);
+2 -2
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
··· 191 191 192 192 done: 193 193 if (!r) 194 - DRM_INFO("UVD initialized successfully.\n"); 194 + drm_info(adev_to_drm(adev), "UVD initialized successfully.\n"); 195 195 196 196 return r; 197 197 ··· 846 846 847 847 if (RREG32_SMC(ixCURRENT_PG_STATUS) & 848 848 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { 849 - DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); 849 + drm_info(adev_to_drm(adev), "Cannot get clockgating state when UVD is powergated.\n"); 850 850 goto out; 851 851 } 852 852
+7 -7
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
··· 408 408 adev->uvd.inst->irq.num_types = 1; 409 409 adev->uvd.num_enc_rings = 0; 410 410 411 - DRM_INFO("UVD ENC is disabled\n"); 411 + drm_info(adev_to_drm(adev), "UVD ENC is disabled\n"); 412 412 } 413 413 414 414 ring = &adev->uvd.inst->ring; ··· 515 515 done: 516 516 if (!r) { 517 517 if (uvd_v6_0_enc_support(adev)) 518 - DRM_INFO("UVD and UVD ENC initialized successfully.\n"); 518 + drm_info(adev_to_drm(adev), "UVD and UVD ENC initialized successfully.\n"); 519 519 else 520 - DRM_INFO("UVD initialized successfully.\n"); 520 + drm_info(adev_to_drm(adev), "UVD initialized successfully.\n"); 521 521 } 522 522 523 523 return r; ··· 1513 1513 data = RREG32_SMC(ixCURRENT_PG_STATUS); 1514 1514 1515 1515 if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { 1516 - DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); 1516 + drm_info(adev_to_drm(adev), "Cannot get clockgating state when UVD is powergated.\n"); 1517 1517 goto out; 1518 1518 } 1519 1519 ··· 1633 1633 { 1634 1634 if (adev->asic_type >= CHIP_POLARIS10) { 1635 1635 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs; 1636 - DRM_INFO("UVD is enabled in VM mode\n"); 1636 + drm_info(adev_to_drm(adev), "UVD is enabled in VM mode\n"); 1637 1637 } else { 1638 1638 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs; 1639 - DRM_INFO("UVD is enabled in physical mode\n"); 1639 + drm_info(adev_to_drm(adev), "UVD is enabled in physical mode\n"); 1640 1640 } 1641 1641 } 1642 1642 ··· 1647 1647 for (i = 0; i < adev->uvd.num_enc_rings; ++i) 1648 1648 adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs; 1649 1649 1650 - DRM_INFO("UVD ENC is enabled in VM mode\n"); 1650 + drm_info(adev_to_drm(adev), "UVD ENC is enabled in VM mode\n"); 1651 1651 } 1652 1652 1653 1653 static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
+4 -4
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
··· 438 438 adev->firmware.fw_size += 439 439 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); 440 440 } 441 - DRM_INFO("PSP loading UVD firmware\n"); 441 + drm_info(adev_to_drm(adev), "PSP loading UVD firmware\n"); 442 442 } 443 443 444 444 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { ··· 582 582 } 583 583 done: 584 584 if (!r) 585 - DRM_INFO("UVD and UVD ENC initialized successfully.\n"); 585 + drm_info(adev_to_drm(adev), "UVD and UVD ENC initialized successfully.\n"); 586 586 587 587 return r; 588 588 } ··· 1606 1606 continue; 1607 1607 adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs; 1608 1608 adev->uvd.inst[i].ring.me = i; 1609 - DRM_INFO("UVD(%d) is enabled in VM mode\n", i); 1609 + drm_info(adev_to_drm(adev), "UVD(%d) is enabled in VM mode\n", i); 1610 1610 } 1611 1611 } 1612 1612 ··· 1622 1622 adev->uvd.inst[j].ring_enc[i].me = j; 1623 1623 } 1624 1624 1625 - DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j); 1625 + drm_info(adev_to_drm(adev), "UVD(%d) ENC is enabled in VM mode\n", j); 1626 1626 } 1627 1627 } 1628 1628
+3 -3
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
··· 280 280 281 281 282 282 if (vce_v2_0_lmi_clean(adev)) { 283 - DRM_INFO("VCE is not idle \n"); 283 + drm_info(adev_to_drm(adev), "VCE is not idle\n"); 284 284 return 0; 285 285 } 286 286 ··· 289 289 return -EINVAL; 290 290 291 291 if (vce_v2_0_wait_for_idle(ip_block)) { 292 - DRM_INFO("VCE is busy, Can't set clock gating"); 292 + drm_info(adev_to_drm(adev), "VCE is busy, Can't set clock gating"); 293 293 return 0; 294 294 } 295 295 ··· 481 481 return r; 482 482 } 483 483 484 - DRM_INFO("VCE initialized successfully.\n"); 484 + drm_info(adev_to_drm(adev), "VCE initialized successfully.\n"); 485 485 486 486 return 0; 487 487 }
+4 -4
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
··· 485 485 return r; 486 486 } 487 487 488 - DRM_INFO("VCE initialized successfully.\n"); 488 + drm_info(adev_to_drm(adev), "VCE initialized successfully.\n"); 489 489 490 490 return 0; 491 491 } ··· 846 846 data = RREG32_SMC(ixCURRENT_PG_STATUS); 847 847 848 848 if (data & CURRENT_PG_STATUS__VCE_PG_STATUS_MASK) { 849 - DRM_INFO("Cannot get clockgating state when VCE is powergated.\n"); 849 + drm_info(adev_to_drm(adev), "Cannot get clockgating state when VCE is powergated.\n"); 850 850 goto out; 851 851 } 852 852 ··· 978 978 adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs; 979 979 adev->vce.ring[i].me = i; 980 980 } 981 - DRM_INFO("VCE enabled in VM mode\n"); 981 + drm_info(adev_to_drm(adev), "VCE enabled in VM mode\n"); 982 982 } else { 983 983 for (i = 0; i < adev->vce.num_rings; i++) { 984 984 adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs; 985 985 adev->vce.ring[i].me = i; 986 986 } 987 - DRM_INFO("VCE enabled in physical mode\n"); 987 + drm_info(adev_to_drm(adev), "VCE enabled in physical mode\n"); 988 988 } 989 989 } 990 990
+3 -3
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
··· 460 460 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].fw = adev->vce.fw; 461 461 adev->firmware.fw_size += 462 462 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); 463 - DRM_INFO("PSP loading VCE firmware\n"); 463 + drm_info(adev_to_drm(adev), "PSP loading VCE firmware\n"); 464 464 } else { 465 465 r = amdgpu_vce_resume(adev); 466 466 if (r) ··· 536 536 return r; 537 537 } 538 538 539 - DRM_INFO("VCE initialized successfully.\n"); 539 + drm_info(adev_to_drm(adev), "VCE initialized successfully.\n"); 540 540 541 541 return 0; 542 542 } ··· 864 864 adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs; 865 865 adev->vce.ring[i].me = i; 866 866 } 867 - DRM_INFO("VCE enabled in VM mode\n"); 867 + drm_info(adev_to_drm(adev), "VCE enabled in VM mode\n"); 868 868 } 869 869 870 870 static const struct amdgpu_irq_src_funcs vce_v4_0_irq_funcs = {
+3 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
··· 105 105 continue; 106 106 107 107 if (idx >= AMDGPU_DM_MAX_CRTC) { 108 - DRM_WARN("%s connected connectors exceed max crtc\n", __func__); 108 + drm_warn(adev_to_drm(adev), 109 + "%s connected connectors exceed max crtc\n", 110 + __func__); 109 111 mutex_unlock(&ddev->mode_config.mutex); 110 112 return; 111 113 }
+1 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
··· 95 95 return -EFAULT; 96 96 } 97 97 98 - /* check number of parameters. isspace could not differ space and \n */ 98 + /* check number of parameters. isspace could not differ space and\n */ 99 99 while ((*wr_buf_ptr != 0xa) && (wr_buf_count < wr_buf_size)) { 100 100 /* skip space*/ 101 101 while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
+5 -3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
··· 503 503 struct ta_dtm_shared_memory *dtm_cmd; 504 504 505 505 if (!psp->dtm_context.context.initialized) { 506 - DRM_INFO("Failed to enable ASSR, DTM TA is not initialized."); 506 + drm_info(adev_to_drm(psp->adev), 507 + "Failed to enable ASSR, DTM TA is not initialized."); 507 508 return false; 508 509 } 509 510 ··· 521 520 psp_dtm_invoke(psp, dtm_cmd->cmd_id); 522 521 523 522 if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) { 524 - DRM_INFO("Failed to enable ASSR"); 523 + drm_info(adev_to_drm(psp->adev), 524 + "Failed to enable ASSR"); 525 525 return false; 526 526 } 527 527 ··· 815 813 sysfs_bin_attr_init(&hdcp_work[0].attr); 816 814 817 815 if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr)) 818 - DRM_WARN("Failed to create device file hdcp_srm"); 816 + drm_warn(adev_to_drm(adev), "Failed to create device file hdcp_srm\n"); 819 817 820 818 return hdcp_work; 821 819
+10 -10
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
··· 607 607 608 608 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable, 0); 609 609 if (ret) 610 - DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 611 - enable ? "enable" : "disable", ret); 610 + drm_err(adev_to_drm(adev), "DPM %s uvd failed, ret = %d.\n", 611 + enable ? "enable" : "disable", ret); 612 612 } 613 613 614 614 void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst) ··· 617 617 618 618 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCN, !enable, inst); 619 619 if (ret) 620 - DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 621 - enable ? "enable" : "disable", ret); 620 + drm_err(adev_to_drm(adev), "DPM %s vcn failed, ret = %d.\n", 621 + enable ? "enable" : "disable", ret); 622 622 } 623 623 624 624 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) ··· 642 642 643 643 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable, 0); 644 644 if (ret) 645 - DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 646 - enable ? "enable" : "disable", ret); 645 + drm_err(adev_to_drm(adev), "DPM %s vce failed, ret = %d.\n", 646 + enable ? "enable" : "disable", ret); 647 647 } 648 648 649 649 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) ··· 652 652 653 653 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable, 0); 654 654 if (ret) 655 - DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 656 - enable ? "enable" : "disable", ret); 655 + drm_err(adev_to_drm(adev), "Dpm %s jpeg failed, ret = %d.\n", 656 + enable ? "enable" : "disable", ret); 657 657 } 658 658 659 659 void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable) ··· 662 662 663 663 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable, 0); 664 664 if (ret) 665 - DRM_ERROR("Dpm %s vpe failed, ret = %d.\n", 666 - enable ? "enable" : "disable", ret); 665 + drm_err(adev_to_drm(adev), "DPM %s vpe failed, ret = %d.\n", 666 + enable ? "enable" : "disable", ret); 667 667 } 668 668 669 669 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
+2 -3
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
··· 7800 7800 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 7801 7801 if (amdgpu_dpm == 1) 7802 7802 amdgpu_pm_print_power_states(adev); 7803 - DRM_INFO("amdgpu: dpm initialized\n"); 7804 - 7803 + drm_info(adev_to_drm(adev), "si dpm initialized\n"); 7805 7804 return 0; 7806 7805 7807 7806 dpm_failed: 7808 7807 si_dpm_fini(adev); 7809 - DRM_ERROR("amdgpu: dpm initialization failed\n"); 7808 + drm_err(adev_to_drm(adev), "dpm initialization failed\n"); 7810 7809 return ret; 7811 7810 } 7812 7811
+4 -3
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
··· 198 198 &adev->pm.smu_prv_buffer, 199 199 &gpu_addr, 200 200 &cpu_ptr)) { 201 - DRM_ERROR("amdgpu: failed to create smu prv buffer\n"); 201 + drm_err(adev_to_drm(adev), "failed to create smu prv buffer\n"); 202 202 return; 203 203 } 204 204 ··· 213 213 if (r) { 214 214 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL); 215 215 adev->pm.smu_prv_buffer = NULL; 216 - DRM_ERROR("amdgpu: failed to notify SMU buffer address\n"); 216 + drm_err(adev_to_drm(adev), "failed to notify SMU buffer address\n"); 217 217 } 218 218 } 219 219 ··· 1053 1053 &hw_clocks, PHM_PerformanceLevelDesignation_Activity); 1054 1054 1055 1055 if (ret) { 1056 - pr_debug("Error in phm_get_clock_info \n"); 1056 + drm_err(adev_to_drm(hwmgr->adev), 1057 + "Error in phm_get_clock_info\n"); 1057 1058 return -EINVAL; 1058 1059 } 1059 1060