Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

drm/amd: Drop amdgpu prefix from message prints

Hardcoding the prefix isn't necessary when using drm_* or dev_*
message prints.

Signed-off-by: Mario Limonciello (AMD) <superm1@kernel.org>
Reviewed-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Mario Limonciello (AMD) and committed by
Alex Deucher
5fd4fef3 d4b8d132

+77 -75
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
··· 66 66 67 67 throughput = div64_s64(throughput, time_ms); 68 68 69 - dev_info(adev->dev, "amdgpu: %s %u bo moves of %u kB from" 69 + dev_info(adev->dev, " %s %u bo moves of %u kB from" 70 70 " %d to %d in %lld ms, throughput: %lld Mb/s or %lld MB/s\n", 71 71 kind, n, size >> 10, sdomain, ddomain, time_ms, 72 72 throughput * 8, throughput);
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 4714 4714 */ 4715 4715 void amdgpu_device_fini_hw(struct amdgpu_device *adev) 4716 4716 { 4717 - dev_info(adev->dev, "amdgpu: finishing device.\n"); 4717 + dev_info(adev->dev, "finishing device.\n"); 4718 4718 flush_delayed_work(&adev->delayed_init_work); 4719 4719 4720 4720 if (adev->mman.initialized)
+4 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
··· 130 130 } 131 131 132 132 if (se < max_se && sh < max_sh && cu < 16) { 133 - DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu); 133 + drm_info(adev_to_drm(adev), "Disabling CU %u.%u.%u\n", se, sh, cu); 134 134 mask[se * max_sh + sh] |= 1u << cu; 135 135 } else { 136 - DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n", 137 - se, sh, cu); 136 + drm_err(adev_to_drm(adev), "disable_cu %u.%u.%u is out of range\n", 137 + se, sh, cu); 138 138 } 139 139 140 140 next = strchr(p, ','); ··· 152 152 static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev) 153 153 { 154 154 if (amdgpu_compute_multipipe != -1) { 155 - dev_info(adev->dev, "amdgpu: forcing compute pipe policy %d\n", 155 + dev_info(adev->dev, " forcing compute pipe policy %d\n", 156 156 amdgpu_compute_multipipe); 157 157 return amdgpu_compute_multipipe == 1; 158 158 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
··· 351 351 adev->irq.irq = irq; 352 352 adev_to_drm(adev)->max_vblank_count = 0x00ffffff; 353 353 354 - dev_dbg(adev->dev, "amdgpu: irq initialized.\n"); 354 + dev_dbg(adev->dev, "irq initialized.\n"); 355 355 return 0; 356 356 357 357 free_vectors:
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
··· 191 191 uint32_t count; 192 192 193 193 if (ring->count_dw < 0) 194 - DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); 194 + drm_err(adev_to_drm(ring->adev), "writing more dwords to the ring than expected!\n"); 195 195 196 196 /* We pad to match fetch size */ 197 197 count = ring->funcs->align_mask + 1 -
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 2111 2111 DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n"); 2112 2112 } 2113 2113 2114 - dev_info(adev->dev, "amdgpu: %uM of VRAM memory ready\n", 2114 + dev_info(adev->dev, " %uM of VRAM memory ready\n", 2115 2115 (unsigned int)(adev->gmc.real_vram_size / (1024 * 1024))); 2116 2116 2117 2117 /* Compute GTT size, either based on TTM limit ··· 2137 2137 dev_err(adev->dev, "Failed initializing GTT heap.\n"); 2138 2138 return r; 2139 2139 } 2140 - dev_info(adev->dev, "amdgpu: %uM of GTT memory ready.\n", 2140 + dev_info(adev->dev, " %uM of GTT memory ready.\n", 2141 2141 (unsigned int)(gtt_size / (1024 * 1024))); 2142 2142 2143 2143 if (adev->flags & AMD_IS_APU) { ··· 2260 2260 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_MMIO_REMAP); 2261 2261 ttm_device_fini(&adev->mman.bdev); 2262 2262 adev->mman.initialized = false; 2263 - dev_info(adev->dev, "amdgpu: ttm finalized\n"); 2263 + dev_info(adev->dev, " ttm finalized\n"); 2264 2264 } 2265 2265 2266 2266 /**
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
··· 790 790 791 791 ret = amdgpu_ring_alloc(ring, 4); 792 792 if (ret) { 793 - dev_err(adev->dev, "amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, ret); 793 + dev_err(adev->dev, "dma failed to lock ring %d (%d).\n", ring->idx, ret); 794 794 goto out; 795 795 } 796 796
+5 -5
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 4041 4041 WREG32(scratch, 0xCAFEDEAD); 4042 4042 r = amdgpu_ring_alloc(ring, 3); 4043 4043 if (r) { 4044 - DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 4045 - ring->idx, r); 4044 + drm_err(adev_to_drm(adev), "cp failed to lock ring %d (%d).\n", 4045 + ring->idx, r); 4046 4046 return r; 4047 4047 } 4048 4048 ··· 4090 4090 4091 4091 r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib); 4092 4092 if (r) { 4093 - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 4093 + drm_err(adev_to_drm(adev), "failed to get ib (%ld).\n", r); 4094 4094 goto err1; 4095 4095 } 4096 4096 ··· 6379 6379 ring = &adev->gfx.gfx_ring[0]; 6380 6380 r = amdgpu_ring_alloc(ring, gfx_v10_0_get_csb_size(adev) + 4); 6381 6381 if (r) { 6382 - DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 6382 + drm_err(adev_to_drm(adev), "cp failed to lock ring (%d).\n", r); 6383 6383 return r; 6384 6384 } 6385 6385 ··· 6429 6429 ring = &adev->gfx.gfx_ring[1]; 6430 6430 r = amdgpu_ring_alloc(ring, 2); 6431 6431 if (r) { 6432 - DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 6432 + drm_err(adev_to_drm(adev), "cp failed to lock ring (%d).\n", r); 6433 6433 return r; 6434 6434 } 6435 6435
+5 -5
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
··· 571 571 WREG32(scratch, 0xCAFEDEAD); 572 572 r = amdgpu_ring_alloc(ring, 5); 573 573 if (r) { 574 - DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 575 - ring->idx, r); 574 + drm_err(adev_to_drm(adev), "cp failed to lock ring %d (%d).\n", 575 + ring->idx, r); 576 576 return r; 577 577 } 578 578 ··· 628 628 629 629 r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib); 630 630 if (r) { 631 - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 631 + drm_err(adev_to_drm(adev), "failed to get ib (%ld).\n", r); 632 632 goto err1; 633 633 } 634 634 ··· 3630 3630 ring = &adev->gfx.gfx_ring[0]; 3631 3631 r = amdgpu_ring_alloc(ring, gfx_v11_0_get_csb_size(adev)); 3632 3632 if (r) { 3633 - DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 3633 + drm_err(&adev->ddev, "cp failed to lock ring (%d).\n", r); 3634 3634 return r; 3635 3635 } 3636 3636 ··· 3675 3675 ring = &adev->gfx.gfx_ring[1]; 3676 3676 r = amdgpu_ring_alloc(ring, 2); 3677 3677 if (r) { 3678 - DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 3678 + drm_err(adev_to_drm(adev), "cp failed to lock ring (%d).\n", r); 3679 3679 return r; 3680 3680 } 3681 3681
+3 -3
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
··· 460 460 WREG32(scratch, 0xCAFEDEAD); 461 461 r = amdgpu_ring_alloc(ring, 5); 462 462 if (r) { 463 - dev_err(adev->dev, 464 - "amdgpu: cp failed to lock ring %d (%d).\n", 463 + drm_err(adev_to_drm(adev), 464 + "cp failed to lock ring %d (%d).\n", 465 465 ring->idx, r); 466 466 return r; 467 467 } ··· 518 518 519 519 r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib); 520 520 if (r) { 521 - dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r); 521 + drm_err(adev_to_drm(adev), "failed to get ib (%ld).\n", r); 522 522 goto err1; 523 523 } 524 524
+2 -2
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
··· 2010 2010 2011 2011 r = amdgpu_ring_alloc(ring, 7 + 4); 2012 2012 if (r) { 2013 - DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 2013 + drm_err(adev_to_drm(adev), "cp failed to lock ring (%d).\n", r); 2014 2014 return r; 2015 2015 } 2016 2016 amdgpu_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); ··· 2031 2031 2032 2032 r = amdgpu_ring_alloc(ring, gfx_v6_0_get_csb_size(adev) + 10); 2033 2033 if (r) { 2034 - DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 2034 + drm_err(adev_to_drm(adev), "cp failed to lock ring (%d).\n", r); 2035 2035 return r; 2036 2036 } 2037 2037
+1 -1
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
··· 2465 2465 2466 2466 r = amdgpu_ring_alloc(ring, gfx_v7_0_get_csb_size(adev) + 8); 2467 2467 if (r) { 2468 - DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 2468 + drm_err(adev_to_drm(adev), "cp failed to lock ring (%d).\n", r); 2469 2469 return r; 2470 2470 } 2471 2471
+4 -4
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 1509 1509 r = amdgpu_ib_get(adev, NULL, total_size, 1510 1510 AMDGPU_IB_POOL_DIRECT, &ib); 1511 1511 if (r) { 1512 - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 1512 + drm_err(adev_to_drm(adev), "failed to get ib (%d).\n", r); 1513 1513 return r; 1514 1514 } 1515 1515 ··· 1604 1604 /* shedule the ib on the ring */ 1605 1605 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 1606 1606 if (r) { 1607 - DRM_ERROR("amdgpu: ib submit failed (%d).\n", r); 1607 + drm_err(adev_to_drm(adev), "ib submit failed (%d).\n", r); 1608 1608 goto fail; 1609 1609 } 1610 1610 1611 1611 /* wait for the GPU to finish processing the IB */ 1612 1612 r = dma_fence_wait(f, false); 1613 1613 if (r) { 1614 - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 1614 + drm_err(adev_to_drm(adev), "fence wait failed (%d).\n", r); 1615 1615 goto fail; 1616 1616 } 1617 1617 ··· 4143 4143 4144 4144 r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4); 4145 4145 if (r) { 4146 - DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 4146 + drm_err(adev_to_drm(adev), "cp failed to lock ring (%d).\n", r); 4147 4147 return r; 4148 4148 } 4149 4149
+4 -4
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 4582 4582 4583 4583 r = amdgpu_ring_alloc(ring, 7); 4584 4584 if (r) { 4585 - DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n", 4585 + drm_err(adev_to_drm(adev), "GDS workarounds failed to lock ring %s (%d).\n", 4586 4586 ring->name, r); 4587 4587 return r; 4588 4588 } ··· 4671 4671 r = amdgpu_ib_get(adev, NULL, total_size, 4672 4672 AMDGPU_IB_POOL_DIRECT, &ib); 4673 4673 if (r) { 4674 - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 4674 + drm_err(adev_to_drm(adev), "failed to get ib (%d).\n", r); 4675 4675 return r; 4676 4676 } 4677 4677 ··· 4772 4772 /* shedule the ib on the ring */ 4773 4773 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 4774 4774 if (r) { 4775 - DRM_ERROR("amdgpu: ib submit failed (%d).\n", r); 4775 + drm_err(adev_to_drm(adev), "ib schedule failed (%d).\n", r); 4776 4776 goto fail; 4777 4777 } 4778 4778 4779 4779 /* wait for the GPU to finish processing the IB */ 4780 4780 r = dma_fence_wait(f, false); 4781 4781 if (r) { 4782 - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 4782 + drm_err(adev_to_drm(adev), "fence wait failed (%d).\n", r); 4783 4783 goto fail; 4784 4784 } 4785 4785
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
··· 850 850 851 851 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); 852 852 if (r) { 853 - dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); 853 + drm_warn(adev_to_drm(adev), "No suitable DMA available.\n"); 854 854 return r; 855 855 } 856 856
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
··· 824 824 825 825 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); 826 826 if (r) { 827 - dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); 827 + drm_warn(adev_to_drm(adev), "No suitable DMA available.\n"); 828 828 return r; 829 829 } 830 830
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
··· 869 869 870 870 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); 871 871 if (r) { 872 - printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); 872 + drm_warn(adev_to_drm(adev), "No suitable DMA available.\n"); 873 873 return r; 874 874 } 875 875
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 1975 1975 44; 1976 1976 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits)); 1977 1977 if (r) { 1978 - dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); 1978 + drm_warn(adev_to_drm(adev), "No suitable DMA available.\n"); 1979 1979 return r; 1980 1980 } 1981 1981 adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits);
+1 -1
drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
··· 271 271 UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); 272 272 273 273 if (r) { 274 - DRM_ERROR("amdgpu: JPEG enable power gating failed\n"); 274 + drm_err(adev_to_drm(adev), "failed to enable JPEG power gating\n"); 275 275 return r; 276 276 } 277 277 }
+2 -2
drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
··· 298 298 UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); 299 299 300 300 if (r) { 301 - DRM_ERROR("amdgpu: JPEG disable power gating failed\n"); 301 + drm_err(adev_to_drm(adev), "failed to disable JPEG power gating\n"); 302 302 return r; 303 303 } 304 304 } ··· 333 333 UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); 334 334 335 335 if (r) { 336 - DRM_ERROR("amdgpu: JPEG enable power gating failed\n"); 336 + drm_err(adev_to_drm(adev), "failed to enable JPEG power gating\n"); 337 337 return r; 338 338 } 339 339 }
+2 -2
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
··· 335 335 UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); 336 336 337 337 if (r) { 338 - DRM_DEV_ERROR(adev->dev, "amdgpu: JPEG disable power gating failed\n"); 338 + drm_err(adev_to_drm(adev), "failed to disable JPEG power gating\n"); 339 339 return r; 340 340 } 341 341 } ··· 370 370 UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK); 371 371 372 372 if (r) { 373 - DRM_DEV_ERROR(adev->dev, "amdgpu: JPEG enable power gating failed\n"); 373 + drm_err(adev_to_drm(adev), "failed to enable JPEG power gating\n"); 374 374 return r; 375 375 } 376 376 }
+4 -4
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
··· 1031 1031 1032 1032 r = amdgpu_ring_alloc(ring, 20); 1033 1033 if (r) { 1034 - DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); 1034 + drm_err(adev_to_drm(adev), "dma failed to lock ring %d (%d).\n", ring->idx, r); 1035 1035 amdgpu_device_wb_free(adev, index); 1036 1036 return r; 1037 1037 } ··· 1096 1096 r = amdgpu_ib_get(adev, NULL, 256, 1097 1097 AMDGPU_IB_POOL_DIRECT, &ib); 1098 1098 if (r) { 1099 - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 1099 + drm_err(adev_to_drm(adev), "failed to get ib (%ld).\n", r); 1100 1100 goto err0; 1101 1101 } 1102 1102 ··· 1117 1117 1118 1118 r = dma_fence_wait_timeout(f, false, timeout); 1119 1119 if (r == 0) { 1120 - DRM_ERROR("amdgpu: IB test timed out\n"); 1120 + drm_err(adev_to_drm(adev), "IB test timed out\n"); 1121 1121 r = -ETIMEDOUT; 1122 1122 goto err1; 1123 1123 } else if (r < 0) { 1124 - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); 1124 + drm_err(adev_to_drm(adev), "fence wait failed (%ld).\n", r); 1125 1125 goto err1; 1126 1126 } 1127 1127
+6 -6
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
··· 931 931 932 932 r = amdgpu_ring_alloc(ring, 20); 933 933 if (r) { 934 - DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); 934 + drm_err(adev_to_drm(adev), "dma failed to lock ring %d (%d).\n", ring->idx, r); 935 935 amdgpu_device_wb_free(adev, index); 936 936 return r; 937 937 } ··· 995 995 996 996 r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib); 997 997 if (r) { 998 - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 998 + drm_err(adev_to_drm(adev), "failed to get ib (%ld).\n", r); 999 999 goto err0; 1000 1000 } 1001 1001 ··· 1016 1016 1017 1017 r = dma_fence_wait_timeout(f, false, timeout); 1018 1018 if (r == 0) { 1019 - DRM_ERROR("amdgpu: IB test timed out\n"); 1019 + drm_err(adev_to_drm(adev), "IB test timed out\n"); 1020 1020 r = -ETIMEDOUT; 1021 1021 goto err1; 1022 1022 } else if (r < 0) { 1023 - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); 1023 + drm_err(adev_to_drm(adev), "fence wait failed (%ld).\n", r); 1024 1024 goto err1; 1025 1025 } 1026 1026 ··· 1325 1325 ring->use_doorbell = true; 1326 1326 ring->me = i; 1327 1327 1328 - DRM_INFO("use_doorbell being set to: [%s]\n", 1329 - ring->use_doorbell?"true":"false"); 1328 + drm_info(adev_to_drm(adev), "use_doorbell being set to: [%s]\n", 1329 + ring->use_doorbell?"true":"false"); 1330 1330 1331 1331 ring->doorbell_index = 1332 1332 (adev->doorbell_index.sdma_engine[i] << 1); //get DWORD offset
+4 -4
drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
··· 938 938 939 939 r = amdgpu_ring_alloc(ring, 5); 940 940 if (r) { 941 - DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); 941 + drm_err(adev_to_drm(adev), "dma failed to lock ring %d (%d).\n", ring->idx, r); 942 942 amdgpu_device_wb_free(adev, index); 943 943 return r; 944 944 } ··· 1002 1002 1003 1003 r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib); 1004 1004 if (r) { 1005 - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 1005 + drm_err(adev_to_drm(adev), "failed to get ib (%ld).\n", r); 1006 1006 goto err0; 1007 1007 } 1008 1008 ··· 1023 1023 1024 1024 r = dma_fence_wait_timeout(f, false, timeout); 1025 1025 if (r == 0) { 1026 - DRM_ERROR("amdgpu: IB test timed out\n"); 1026 + drm_err(adev_to_drm(adev), "IB test timed out\n"); 1027 1027 r = -ETIMEDOUT; 1028 1028 goto err1; 1029 1029 } else if (r < 0) { 1030 - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); 1030 + drm_err(adev_to_drm(adev), "fence wait failed (%ld).\n", r); 1031 1031 goto err1; 1032 1032 } 1033 1033
+5 -5
drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
··· 954 954 955 955 r = amdgpu_ring_alloc(ring, 5); 956 956 if (r) { 957 - DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); 957 + drm_err(adev_to_drm(adev), "dma failed to lock ring %d (%d).\n", ring->idx, r); 958 958 amdgpu_device_wb_free(adev, index); 959 959 return r; 960 960 } ··· 1018 1018 1019 1019 r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib); 1020 1020 if (r) { 1021 - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 1021 + drm_err(adev_to_drm(adev), "failed to get ib (%ld).\n", r); 1022 1022 goto err0; 1023 1023 } 1024 1024 ··· 1039 1039 1040 1040 r = dma_fence_wait_timeout(f, false, timeout); 1041 1041 if (r == 0) { 1042 - DRM_ERROR("amdgpu: IB test timed out\n"); 1042 + drm_err(adev_to_drm(adev), "IB test timed out\n"); 1043 1043 r = -ETIMEDOUT; 1044 1044 goto err1; 1045 1045 } else if (r < 0) { 1046 - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); 1046 + drm_err(adev_to_drm(adev), "fence wait failed (%ld).\n", r); 1047 1047 goto err1; 1048 1048 } 1049 1049 ··· 1504 1504 ring->trail_seq += 1; 1505 1505 r = amdgpu_ring_alloc(ring, 10); 1506 1506 if (r) { 1507 - DRM_ERROR("ring %d failed to be allocated \n", ring->idx); 1507 + DRM_ERROR("ring %d failed to be allocated\n", ring->idx); 1508 1508 return r; 1509 1509 } 1510 1510 sdma_v7_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
+4 -4
drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
··· 660 660 661 661 r = uvd_v3_1_fw_validate(adev); 662 662 if (r) { 663 - DRM_ERROR("amdgpu: UVD Firmware validate fail (%d).\n", r); 663 + drm_err(adev_to_drm(adev), "UVD Firmware validate fail (%d).\n", r); 664 664 return r; 665 665 } 666 666 ··· 668 668 669 669 r = amdgpu_ring_test_helper(ring); 670 670 if (r) { 671 - DRM_ERROR("amdgpu: UVD ring test fail (%d).\n", r); 671 + drm_err(adev_to_drm(adev), "UVD ring test failed (%d).\n", r); 672 672 goto done; 673 673 } 674 674 675 675 r = amdgpu_ring_alloc(ring, 10); 676 676 if (r) { 677 - DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 677 + drm_err(adev_to_drm(adev), "ring alloc failed (%d).\n", r); 678 678 goto done; 679 679 } 680 680 ··· 701 701 702 702 done: 703 703 if (!r) 704 - DRM_INFO("UVD initialized successfully.\n"); 704 + drm_info(adev_to_drm(adev), "UVD initialized successfully.\n"); 705 705 706 706 return r; 707 707 }
+2 -2
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
··· 167 167 168 168 r = amdgpu_ring_alloc(ring, 10); 169 169 if (r) { 170 - DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 170 + drm_err(adev_to_drm(adev), "ring alloc failed (%d).\n", r); 171 171 goto done; 172 172 } 173 173 ··· 194 194 195 195 done: 196 196 if (!r) 197 - DRM_INFO("UVD initialized successfully.\n"); 197 + drm_info(adev_to_drm(adev), "UVD initialized successfully.\n"); 198 198 199 199 return r; 200 200 }
+1 -1
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
··· 164 164 165 165 r = amdgpu_ring_alloc(ring, 10); 166 166 if (r) { 167 - DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 167 + drm_err(adev_to_drm(adev), "ring alloc failed (%d).\n", r); 168 168 goto done; 169 169 } 170 170
+1 -1
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
··· 478 478 479 479 r = amdgpu_ring_alloc(ring, 10); 480 480 if (r) { 481 - DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 481 + drm_err(adev_to_drm(adev), "ring alloc failed (%d).\n", r); 482 482 goto done; 483 483 } 484 484
+1 -1
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
··· 542 542 543 543 r = amdgpu_ring_alloc(ring, 10); 544 544 if (r) { 545 - DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r); 545 + drm_err(adev_to_drm(adev), "ring alloc failed (%d).\n", r); 546 546 goto done; 547 547 } 548 548
+4 -2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 2098 2098 drm_err(adev_to_drm(adev), 2099 2099 "failed to initialize freesync_module.\n"); 2100 2100 } else 2101 - drm_dbg_driver(adev_to_drm(adev), "amdgpu: freesync_module init done %p.\n", 2101 + drm_dbg_driver(adev_to_drm(adev), "freesync_module init done %p.\n", 2102 2102 adev->dm.freesync_module); 2103 2103 2104 2104 amdgpu_dm_init_color_mod(); ··· 2120 2120 if (!adev->dm.hdcp_workqueue) 2121 2121 drm_err(adev_to_drm(adev), "failed to initialize hdcp_workqueue.\n"); 2122 2122 else 2123 - drm_dbg_driver(adev_to_drm(adev), "amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); 2123 + drm_dbg_driver(adev_to_drm(adev), 2124 + "hdcp_workqueue init done %p.\n", 2125 + adev->dm.hdcp_workqueue); 2124 2126 2125 2127 dc_init_callbacks(adev->dm.dc, &init_params); 2126 2128 }