Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

drm/amdgpu: Use the right function for hdp flush

There are a few prechecks made before HDP flush like a flush is not
required on APU bare metal. Using hdp callback directly bypasses those
checks. Use amdgpu_device_flush_hdp which takes care of prechecks.

Signed-off-by: Lijo Lazar <lijo.lazar@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Lijo Lazar and committed by
Alex Deucher
1d9bff4c 5045c6c6

+23 -23
+4 -4
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 6172 6172 } 6173 6173 6174 6174 if (amdgpu_emu_mode == 1) 6175 - adev->hdp.funcs->flush_hdp(adev, NULL); 6175 + amdgpu_device_flush_hdp(adev, NULL); 6176 6176 6177 6177 tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL); 6178 6178 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); ··· 6250 6250 } 6251 6251 6252 6252 if (amdgpu_emu_mode == 1) 6253 - adev->hdp.funcs->flush_hdp(adev, NULL); 6253 + amdgpu_device_flush_hdp(adev, NULL); 6254 6254 6255 6255 tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL); 6256 6256 tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0); ··· 6327 6327 } 6328 6328 6329 6329 if (amdgpu_emu_mode == 1) 6330 - adev->hdp.funcs->flush_hdp(adev, NULL); 6330 + amdgpu_device_flush_hdp(adev, NULL); 6331 6331 6332 6332 tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL); 6333 6333 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); ··· 6702 6702 } 6703 6703 6704 6704 if (amdgpu_emu_mode == 1) 6705 - adev->hdp.funcs->flush_hdp(adev, NULL); 6705 + amdgpu_device_flush_hdp(adev, NULL); 6706 6706 6707 6707 tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL); 6708 6708 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
+6 -6
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
··· 2509 2509 } 2510 2510 2511 2511 if (amdgpu_emu_mode == 1) 2512 - adev->hdp.funcs->flush_hdp(adev, NULL); 2512 + amdgpu_device_flush_hdp(adev, NULL); 2513 2513 2514 2514 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 2515 2515 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); ··· 2553 2553 } 2554 2554 2555 2555 if (amdgpu_emu_mode == 1) 2556 - adev->hdp.funcs->flush_hdp(adev, NULL); 2556 + amdgpu_device_flush_hdp(adev, NULL); 2557 2557 2558 2558 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 2559 2559 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); ··· 2598 2598 } 2599 2599 2600 2600 if (amdgpu_emu_mode == 1) 2601 - adev->hdp.funcs->flush_hdp(adev, NULL); 2601 + amdgpu_device_flush_hdp(adev, NULL); 2602 2602 2603 2603 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 2604 2604 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); ··· 3234 3234 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj); 3235 3235 3236 3236 if (amdgpu_emu_mode == 1) 3237 - adev->hdp.funcs->flush_hdp(adev, NULL); 3237 + amdgpu_device_flush_hdp(adev, NULL); 3238 3238 3239 3239 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 3240 3240 lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); ··· 3452 3452 amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj); 3453 3453 3454 3454 if (amdgpu_emu_mode == 1) 3455 - adev->hdp.funcs->flush_hdp(adev, NULL); 3455 + amdgpu_device_flush_hdp(adev, NULL); 3456 3456 3457 3457 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 3458 3458 lower_32_bits(adev->gfx.me.me_fw_gpu_addr)); ··· 4648 4648 if (r) 4649 4649 return r; 4650 4650 4651 - adev->hdp.funcs->flush_hdp(adev, NULL); 4651 + amdgpu_device_flush_hdp(adev, NULL); 4652 4652 4653 4653 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 4654 4654 false : true;
+3 -3
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
··· 2389 2389 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj); 2390 2390 2391 2391 if (amdgpu_emu_mode == 1) 2392 - adev->hdp.funcs->flush_hdp(adev, NULL); 2392 + amdgpu_device_flush_hdp(adev, NULL); 2393 2393 2394 2394 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 2395 2395 lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); ··· 2533 2533 amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj); 2534 2534 2535 2535 if (amdgpu_emu_mode == 1) 2536 - adev->hdp.funcs->flush_hdp(adev, NULL); 2536 + amdgpu_device_flush_hdp(adev, NULL); 2537 2537 2538 2538 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 2539 2539 lower_32_bits(adev->gfx.me.me_fw_gpu_addr)); ··· 3503 3503 if (r) 3504 3504 return r; 3505 3505 3506 - adev->hdp.funcs->flush_hdp(adev, NULL); 3506 + amdgpu_device_flush_hdp(adev, NULL); 3507 3507 3508 3508 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 3509 3509 false : true;
+2 -2
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
··· 268 268 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; 269 269 270 270 /* flush hdp cache */ 271 - adev->hdp.funcs->flush_hdp(adev, NULL); 271 + amdgpu_device_flush_hdp(adev, NULL); 272 272 273 273 /* This is necessary for SRIOV as well as for GFXOFF to function 274 274 * properly under bare metal ··· 965 965 adev->hdp.funcs->init_registers(adev); 966 966 967 967 /* Flush HDP after it is initialized */ 968 - adev->hdp.funcs->flush_hdp(adev, NULL); 968 + amdgpu_device_flush_hdp(adev, NULL); 969 969 970 970 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 971 971 false : true;
+2 -2
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
··· 229 229 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng; 230 230 231 231 /* flush hdp cache */ 232 - adev->hdp.funcs->flush_hdp(adev, NULL); 232 + amdgpu_device_flush_hdp(adev, NULL); 233 233 234 234 /* This is necessary for SRIOV as well as for GFXOFF to function 235 235 * properly under bare metal ··· 895 895 return r; 896 896 897 897 /* Flush HDP after it is initialized */ 898 - adev->hdp.funcs->flush_hdp(adev, NULL); 898 + amdgpu_device_flush_hdp(adev, NULL); 899 899 900 900 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 901 901 false : true;
+2 -2
drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
··· 297 297 return; 298 298 299 299 /* flush hdp cache */ 300 - adev->hdp.funcs->flush_hdp(adev, NULL); 300 + amdgpu_device_flush_hdp(adev, NULL); 301 301 302 302 /* This is necessary for SRIOV as well as for GFXOFF to function 303 303 * properly under bare metal ··· 877 877 return r; 878 878 879 879 /* Flush HDP after it is initialized */ 880 - adev->hdp.funcs->flush_hdp(adev, NULL); 880 + amdgpu_device_flush_hdp(adev, NULL); 881 881 882 882 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 883 883 false : true;
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 2425 2425 adev->hdp.funcs->init_registers(adev); 2426 2426 2427 2427 /* After HDP is initialized, flush HDP.*/ 2428 - adev->hdp.funcs->flush_hdp(adev, NULL); 2428 + amdgpu_device_flush_hdp(adev, NULL); 2429 2429 2430 2430 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) 2431 2431 value = false;
+1 -1
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
··· 533 533 } 534 534 535 535 memcpy_toio(adev->mman.aper_base_kaddr, buf, sz); 536 - adev->hdp.funcs->flush_hdp(adev, NULL); 536 + amdgpu_device_flush_hdp(adev, NULL); 537 537 vfree(buf); 538 538 drm_dev_exit(idx); 539 539 } else {
+1 -1
drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
··· 610 610 } 611 611 612 612 memcpy_toio(adev->mman.aper_base_kaddr, buf, sz); 613 - adev->hdp.funcs->flush_hdp(adev, NULL); 613 + amdgpu_device_flush_hdp(adev, NULL); 614 614 vfree(buf); 615 615 drm_dev_exit(idx); 616 616 } else {
+1 -1
drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
··· 498 498 } 499 499 500 500 memcpy_toio(adev->mman.aper_base_kaddr, buf, sz); 501 - adev->hdp.funcs->flush_hdp(adev, NULL); 501 + amdgpu_device_flush_hdp(adev, NULL); 502 502 vfree(buf); 503 503 drm_dev_exit(idx); 504 504 } else {