A Modern GPGPU API & wip linux RDNA2+ Driver
rdna driver linux gpu
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

amdgpu: minimal compute shader dispatch

+201 -15
+150 -9
drivers/amdgpu/cmds.cpp
··· 248 248 } 249 249 } 250 250 251 - struct Shader {}; 252 - 253 251 struct DispatchInfo { 254 252 uint32_t x; 255 253 uint32_t y; 256 254 uint32_t z; 257 255 uint64_t indirect_va; 256 + uint64_t data_va; 257 + }; 258 + 259 + struct ShaderRegs { 260 + uint32_t pgm_lo; 261 + uint32_t pgm_hi; 262 + uint32_t pgm_rsrc1; 263 + uint32_t pgm_rsrc2; 264 + uint32_t pgm_rsrc3; 265 + 266 + uint32_t userdata_0; 267 + }; 268 + 269 + enum class HwStage { 270 + Compute 271 + }; 272 + 273 + struct ShaderInfo { 274 + uint32_t block_size[3]; 275 + HwStage hw_stage; 276 + ShaderRegs regs; 277 + 278 + uint32_t wave_size; 279 + }; 280 + 281 + struct ShaderConfig { 282 + uint32_t pgm_rsrc1; 283 + uint32_t pgm_rsrc2; 284 + uint32_t pgm_rsrc3; 285 + uint32_t compute_resource_limits; 286 + 287 + uint32_t user_sgpr_count; 288 + }; 289 + 290 + struct Shader { 291 + ShaderInfo info; 292 + ShaderConfig config; 293 + uint64_t va; 258 294 }; 259 295 296 + void init_compute_shader_config(DeviceImpl *dev, Shader &shader) { 297 + 298 + // @todo: ultra temporary. 299 + auto x = amdgpu_malloc(dev, 1024, 16, KesMemoryDefault); 300 + *((uint32_t *)x.cpu) = 0xBF810000; // s_endpgm 301 + 302 + // @todo: temporary 303 + auto wave_size = 32; 304 + auto waves_per_threadgroup = 1; 305 + auto max_waves_per_sh = 0x3FF; 306 + auto threadgroups_per_cu = 1; 307 + 308 + // Fixed for the Root Pointer ABI 309 + auto num_user_sgprs = 2; 310 + 311 + auto num_vgprs = 8; 312 + auto num_sgprs = 8; 313 + auto num_shared_vgprs = 1; 314 + auto scratch_enabled = false; 315 + auto trap_present = false; 316 + 317 + auto dx10_clamp = true; 318 + 319 + auto num_shared_vgpr_blocks = num_shared_vgprs / 8; 320 + 321 + shader.config.user_sgpr_count = num_user_sgprs; 322 + shader.info.wave_size = wave_size; 323 + shader.info.block_size[0] = 32; 324 + shader.info.block_size[1] = 1; 325 + shader.info.block_size[2] = 1; 326 + shader.va = x.gpu; 327 + shader.info.hw_stage = HwStage::Compute; 328 + 329 + // use large limits. 330 + shader.config.compute_resource_limits = 331 + S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0) 332 + | S_00B854_WAVES_PER_SH(max_waves_per_sh) 333 + | S_00B854_CU_GROUP_COUNT(threadgroups_per_cu - 1); 334 + 335 + shader.config.pgm_rsrc1 = 336 + S_00B848_VGPRS((num_vgprs - 1) / (wave_size == 32 ? 8 : 4)) 337 + | S_00B848_DX10_CLAMP(dx10_clamp); 338 + 339 + shader.config.pgm_rsrc2 = 340 + S_00B84C_USER_SGPR(shader.config.user_sgpr_count) 341 + | S_00B22C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5) 342 + | S_00B12C_SCRATCH_EN(scratch_enabled) 343 + | S_00B12C_TRAP_PRESENT(trap_present) 344 + | S_00B84C_TGID_X_EN(1) 345 + | S_00B84C_TGID_Y_EN(1) 346 + | S_00B84C_TGID_Z_EN(1); 347 + 348 + shader.config.pgm_rsrc3 = 349 + S_00B8A0_SHARED_VGPR_CNT(num_shared_vgpr_blocks); 350 + } 351 + 352 + void precompute_regs(ShaderInfo &info) { 353 + auto &regs = info.regs; 354 + 355 + // @todo: setup that compute_resource_limits thingy. 356 + 357 + switch(info.hw_stage) { 358 + case HwStage::Compute: 359 + regs.pgm_lo = R_00B830_COMPUTE_PGM_LO; 360 + regs.pgm_hi = R_00B834_COMPUTE_PGM_HI; 361 + regs.pgm_rsrc1 = R_00B848_COMPUTE_PGM_RSRC1; 362 + regs.pgm_rsrc2 = R_00B84C_COMPUTE_PGM_RSRC2; 363 + regs.pgm_rsrc3 = R_00B8A0_COMPUTE_PGM_RSRC3; 364 + regs.userdata_0 = R_00B900_COMPUTE_USER_DATA_0; 365 + break; 366 + } 367 + } 368 + 369 + void emit_compute_shader(Shader &shader, Pm4Encoder &enc) { 370 + enc.set_sh_reg(shader.info.regs.pgm_lo, shader.va >> 8); 371 + enc.set_sh_reg(shader.info.regs.pgm_hi, shader.va >> 40); 372 + 373 + enc.set_sh_reg(shader.info.regs.pgm_rsrc1, shader.config.pgm_rsrc1); 374 + enc.set_sh_reg(shader.info.regs.pgm_rsrc2, shader.config.pgm_rsrc2); 375 + enc.set_sh_reg(shader.info.regs.pgm_rsrc3, shader.config.pgm_rsrc3); 376 + 377 + enc.set_sh_reg(R_00B854_COMPUTE_RESOURCE_LIMITS, shader.config.compute_resource_limits); 378 + 379 + enc.set_sh_reg_seq(R_00B81C_COMPUTE_NUM_THREAD_X, 3); 380 + enc.emit(shader.info.block_size[0] & 0xFFFF); 381 + enc.emit(shader.info.block_size[1] & 0xFFFF); 382 + enc.emit(shader.info.block_size[2] & 0xFFFF); 383 + } 384 + 260 385 void amdgpu_emit_dispatch_packets(GpuInfo &ginfo, Pm4Encoder &enc, Shader &shader, DispatchInfo &dinfo) { 261 386 262 387 // @todo: get this from device settings ··· 271 396 dispatch_initiator &= ~S_00B800_ORDER_MODE(1); 272 397 } 273 398 274 - // @todo: get from shader info 275 - auto wave_size = 32; 276 - if (wave_size == 32) { 399 + if (shader.info.wave_size == 32) { 277 400 dispatch_initiator |= S_00B800_CS_W32_EN(1); 278 401 } 279 402 403 + emit_compute_shader(shader, enc); 404 + 405 + uint32_t regs[2]; 406 + regs[0] = dinfo.data_va; 407 + regs[1] = dinfo.data_va >> 32; 408 + 409 + // emit user data pointers. 410 + enc.set_sh_reg_seq(shader.info.regs.userdata_0, shader.config.user_sgpr_count); 411 + for (auto i = 0; i < shader.config.user_sgpr_count; ++i) { 412 + enc.emit(regs[i]); 413 + } 414 + 280 415 if (dinfo.indirect_va) { 281 416 // mesa align32 workaround not needed; only for GFX7 282 417 enc.emit(PKT3(PKT3_DISPATCH_INDIRECT, 2, 0) | PKT3_SHADER_TYPE_S(1)); ··· 294 429 } 295 430 } 296 431 297 - void amdgpu_cmd_dispatch(KesCommandList pcl, uint32_t x, uint32_t y, uint32_t z) { 432 + void amdgpu_cmd_dispatch(KesCommandList pcl, kes_gpuptr_t data, uint32_t x, uint32_t y, uint32_t z) { 298 433 auto *cl = reinterpret_cast<CommandListImpl *>(pcl); 299 434 assert(cl, "dispatch: command list handle invalid: {}", (void *)pcl); 300 435 ··· 306 441 .x = x, 307 442 .y = y, 308 443 .z = z, 309 - .indirect_va = 0 444 + .indirect_va = 0, 445 + .data_va = data, 310 446 }; 311 447 448 + // @todo: do this earlier. 449 + init_compute_shader_config(cl->queue->dev, tmp); 450 + precompute_regs(tmp.info); 451 + 312 452 amdgpu_emit_dispatch_packets(cl->queue->dev->info, enc, tmp, dinfo); 313 453 } 314 454 315 - void amdgpu_cmd_dispatch_indirect(KesCommandList pcl, uint64_t indirect_addr) { 455 + void amdgpu_cmd_dispatch_indirect(KesCommandList pcl, kes_gpuptr_t data, kes_gpuptr_t indirect_addr) { 316 456 auto *cl = reinterpret_cast<CommandListImpl *>(pcl); 317 457 assert(cl, "dispatch: command list handle invalid: {}", (void *)pcl); 318 458 ··· 321 461 322 462 Shader tmp{}; 323 463 DispatchInfo dinfo{ 324 - .indirect_va = indirect_addr 464 + .indirect_va = indirect_addr, 465 + .data_va = data, 325 466 }; 326 467 327 468 amdgpu_emit_dispatch_packets(cl->queue->dev->info, enc, tmp, dinfo);
+2 -2
drivers/amdgpu/cp_encoder.cpp
··· 1 1 #include "cp_encoder.h" 2 2 #include "cmdstream.h" 3 3 #include "gpuinfo.h" 4 - #include <cassert> 4 + #include "beta.h" 5 5 6 - CPEncoder::CPEncoder(GpuInfo &info, uint8_t ip_type, CommandStream &cs) : info(info), ip_type(ip_type), cs(cs) {} 6 + CPEncoder::CPEncoder(GpuInfo &info, uint8_t ip_type, CommandStream &cs) : info(info), ip_type(ip_type), cs(cs), m_pm4(info, ip_type, cs) {} 7 7 8 8 void CPEncoder::nop(uint32_t count, uint32_t *content) { 9 9 assert(count > 0, "CPEncoder::nop: count must always be >= 1");
+5
drivers/amdgpu/cp_encoder.h
··· 3 3 #include "cmdstream.h" 4 4 #include "gpuinfo.h" 5 5 #include "impl.h" 6 + #include "pm4_encoder.h" 6 7 7 8 /** 8 9 * Command Processor command encoder ··· 10 11 class CPEncoder { 11 12 public: 12 13 CPEncoder(GpuInfo &info, uint8_t ip_type, CommandStream &cs); 14 + 15 + Pm4Encoder &pm4() { return m_pm4; } 13 16 14 17 // nops are variable length; we can write data here if we want 15 18 // (scratch space for example). ··· 29 32 GpuInfo &info; 30 33 uint8_t ip_type; 31 34 CommandStream &cs; 35 + 36 + Pm4Encoder m_pm4; 32 37 };
+2 -2
drivers/amdgpu/impl.h
··· 58 58 59 59 void amdgpu_cmd_signal_after(KesCommandList, KesStage before, kes_gpuptr_t addr, uint64_t value, KesSignal); 60 60 void amdgpu_cmd_wait_before(KesCommandList, KesStage after, kes_gpuptr_t addr, uint64_t value, KesOp, KesHazardFlags, uint64_t mask); 61 - void amdgpu_cmd_dispatch(KesCommandList pcl, uint32_t x, uint32_t y, uint32_t z); 62 - void amdgpu_cmd_dispatch_indirect(KesCommandList pcl, uint64_t indirect_addr); 61 + void amdgpu_cmd_dispatch(KesCommandList pcl, kes_gpuptr_t data_ptr, uint32_t x, uint32_t y, uint32_t z); 62 + void amdgpu_cmd_dispatch_indirect(KesCommandList pcl, kes_gpuptr_t data_ptr, kes_gpuptr_t indirect_addr); 63 63 } 64 64 65 65 void device_register_allocation(DeviceImpl *impl, amdgpu_bo_handle bo);
+1
drivers/amdgpu/init.cpp
··· 36 36 KesDevice amdgpu_create(int drm_fd) { 37 37 auto dev = new DeviceImpl; 38 38 dev->fd = drm_fd; 39 + dev->global_residency_list = nullptr; 39 40 40 41 for (auto i = 0; i < AMDGPU_HW_IP_NUM; ++i) { 41 42 dev->num_queues[i] = 0;
+2 -2
drivers/amdgpu/mem.cpp
··· 10 10 KesAllocation amdgpu_malloc(KesDevice pd, size_t size, size_t align, KesMemory memory) { 11 11 auto *dev = reinterpret_cast<DeviceImpl *>(pd); 12 12 13 - auto aligned_size = (size + VEK_HUGE_PAGE_SIZE - 1) & ~(VEK_HUGE_PAGE_SIZE - 1); 14 - auto alignment = VEK_HUGE_PAGE_SIZE; 13 + auto aligned_size = (size + align - 1) & ~(align - 1); 14 + auto alignment = align; 15 15 16 16 KesAllocation alloc = {}; 17 17 auto *impl = new AllocationImpl;
+39
test/examples/07_hello_dispatch/hello_dispatch.cpp
··· 1 + #include <unistd.h> 2 + #include <kestrel/kestrel.h> 3 + 4 + #include <stdio.h> 5 + 6 + struct DispatchArguments { 7 + uint64_t va; 8 + uint32_t size; 9 + }; 10 + 11 + int main(void) { 12 + 13 + auto size = 10 * 1024 * 1024; 14 + 15 + auto dev = kes_create(); 16 + 17 + auto x = kes_malloc(dev, size, 4, KesMemoryDefault); 18 + auto y = kes_malloc(dev, sizeof(DispatchArguments), 8, KesMemoryDefault); 19 + 20 + DispatchArguments *args = (DispatchArguments *)y.cpu; 21 + args->va = x.gpu; 22 + args->size = size; 23 + 24 + auto compute = kes_create_queue(dev, KesQueueTypeCompute); 25 + 26 + auto cl = kes_start_recording(compute); 27 + { 28 + kes_cmd_dispatch(cl, y.gpu, 128, 1, 1); 29 + } 30 + 31 + kes_submit(compute, cl); 32 + 33 + sleep(1); 34 + 35 + kes_free(dev, &x); 36 + kes_destroy(dev); 37 + 38 + return 0; 39 + }