A Modern GPGPU API & wip linux RDNA2+ Driver
rdna driver linux gpu
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

wip: making example_07 work (not yet!)

+972 -62
+12 -14
drivers/amdgpu/cmds.cpp
··· 1 + #include "compiler/compiler.h" 2 + #include "compiler/gir.h" 1 3 #include "cp_encoder.h" 2 4 #include "gpuinfo.h" 3 5 #include "kestrel/kestrel.h" ··· 275 277 HwStage hw_stage; 276 278 ShaderRegs regs; 277 279 280 + bool ordered; 278 281 uint32_t wave_size; 279 282 }; 280 283 ··· 297 300 298 301 // @todo: ultra temporary. 299 302 auto x = amdgpu_malloc(dev, 1024, 256, KesMemoryDefault); 300 - uint32_t *pgm = (uint32_t *)x.cpu; 301 - auto pgmidx = 0; 302 - // pgm[pgmidx++] = 0x24020402; // v_lshlrev_b32 v1, 2, v0 303 - // pgm[pgmidx++] = 0x4A020300; // v_add_co_u32 v1, vcc_lo, s0, v1 304 - // pgm[pgmidx++] = 0x4A040201; // v_add_co_ci_u32 v2, vcc_lo, s1, 0, vcc_lo 305 - // pgm[pgmidx++] = 0xDC500000; // global_store_dword v[1:2], v0, off 306 - // pgm[pgmidx++] = 0x00000001; 307 - pgm[pgmidx++] = 0xBF810000; // s_endpgm 308 303 309 - // (RDNA ISA Ref. 2.5) 310 - for (auto i = 0; i < 64; ++i) { 311 - pgm[pgmidx++] = 0xBF9F0000; // s_code_end 304 + { 305 + gir::IRModule mod; 306 + gir::Builder gb(mod); 307 + 308 + gir::rdna2_compile(mod, x.cpu, x.gpu); 312 309 } 313 310 314 311 log("shader code: {} {}", (void *)x.cpu, (void *)x.gpu); 315 312 316 313 // @todo: temporary 314 + auto ordered = false; 317 315 auto wave_size = 32; 318 316 auto waves_per_threadgroup = 1; 319 317 auto max_waves_per_sh = 0x3FF; ··· 333 331 auto num_shared_vgpr_blocks = num_shared_vgprs / 8; 334 332 335 333 shader.config.user_sgpr_count = num_user_sgprs; 334 + shader.info.ordered = ordered; 336 335 shader.info.wave_size = wave_size; 337 336 shader.info.block_size[0] = 32; 338 337 shader.info.block_size[1] = 1; ··· 353 352 354 353 shader.config.pgm_rsrc2 = 355 354 S_00B84C_USER_SGPR(shader.config.user_sgpr_count) 356 - | S_00B22C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5) 355 + | S_00B22C_USER_SGPR_MSB_GFX10(shader.config.user_sgpr_count >> 5) 357 356 | S_00B12C_SCRATCH_EN(scratch_enabled) 358 357 | S_00B12C_TRAP_PRESENT(trap_present) 359 358 | S_00B84C_TGID_X_EN(1) ··· 406 405 auto predicating = false; 407 406 408 407 // @todo: support 409 - auto ordered = false; 410 - if (ordered) { 408 + if (shader.info.ordered) { 411 409 dispatch_initiator &= ~S_00B800_ORDER_MODE(1); 412 410 } 413 411
+85 -23
drivers/amdgpu/cmdstream.cpp
··· 3 3 #include "impl.h" 4 4 #include "beta.h" 5 5 6 + KesSemaphore amdgpu_create_semaphore(KesDevice pd, uint64_t initial_value) { 7 + auto *dev = reinterpret_cast<DeviceImpl *>(pd); 8 + auto *sem = new SemaphoreImpl(); 9 + sem->dev_handle = dev->amd_handle; 10 + 11 + int r = amdgpu_cs_create_syncobj2(dev->amd_handle, DRM_SYNCOBJ_CREATE_SIGNALED, &sem->syncobj_handle); 12 + 13 + // Set the initial timeline point 14 + if (initial_value > 0) { 15 + amdgpu_cs_syncobj_timeline_signal(dev->amd_handle, &sem->syncobj_handle, &initial_value, 1); 16 + } 17 + 18 + return sem; 19 + } 20 + 21 + int amdgpu_wait_semaphore(KesSemaphore ps, uint64_t value) { 22 + return 0; 23 + /* 24 + auto *sem = reinterpret_cast<SemaphoreImpl *>(ps); 25 + 26 + uint32_t wait_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT; 27 + 28 + int r = amdgpu_cs_syncobj_timeline_wait(sem->dev_handle, &sem->syncobj_handle, &value, 1, 29 + 1000000000, wait_flags, nullptr); 30 + // In a real driver, you'd check the return code here for -ECANCELED (GPU Reset) 31 + return r; 32 + */ 33 + } 34 + 35 + 6 36 void CommandStream::emit(uint32_t x) { 7 37 assert(cursor < end, "commandstream emit out of bounds: {}-{} {}", (void *)start, (void *)end, (void *)cursor); 8 38 *cursor++ = x; ··· 17 47 .preferred_heap = AMDGPU_GEM_DOMAIN_GTT, 18 48 .flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_UNCACHED // Or WC 19 49 }; 50 + 51 + amdgpu_cs_create_syncobj2(dev->amd_handle, DRM_SYNCOBJ_CREATE_SIGNALED, &m_queue_syncobj); 20 52 21 53 amdgpu_bo_alloc(m_dev->amd_handle, &req, &m_bo_handle); 22 54 ··· 50 82 return cs; 51 83 } 52 84 53 - void CommandRing::submit(CommandStream& cs) { 85 + // @todo: this can support multiple semaphores in a single chunk. 86 + void *alloc_timeline_syncobj_chunk(drm_amdgpu_cs_chunk *chunk, uint32_t syncobj, uint64_t point, uint32_t chunk_id) { 87 + auto count = 1; 88 + auto sems = (drm_amdgpu_cs_chunk_syncobj *)malloc(sizeof(drm_amdgpu_cs_chunk_syncobj) * count); 89 + 90 + sems[0].handle = syncobj; 91 + sems[0].flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT; 92 + sems[0].point = point; 93 + 94 + chunk->chunk_id = chunk_id; 95 + chunk->length_dw = sizeof(drm_amdgpu_cs_chunk_syncobj) / 4 * count; 96 + chunk->chunk_data = (uint64_t)(uintptr_t)sems; 97 + 98 + return sems; 99 + } 100 + 101 + void CommandRing::submit(CommandStream& cs, SemaphoreImpl *sem, uint64_t value) { 102 + // @todo: we currently ignore user sem. 103 + 54 104 uint32_t start_dw = cs.end - (m_cfg.stream_size_bytes / 4) - m_cpu_map; 55 105 start_dw = (reinterpret_cast<uint8_t*>(cs.end) - reinterpret_cast<uint8_t*>(m_cpu_map) - m_cfg.stream_size_bytes) / 4; 56 106 57 107 uint32_t count_dw = cs.cursor - (cs.end - (m_cfg.stream_size_bytes / 4)); 58 108 59 - amdgpu_cs_ib_info ib = {}; 60 - //ib.handle = m_bo_handle; 61 - ib.ib_mc_address = cs.gpu_va_start; 62 - ib.size = count_dw; 109 + auto has_user_sem = sem != nullptr; 63 110 64 - auto next_point = m_timeline_counter++; 111 + auto chunks = (drm_amdgpu_cs_chunk *)malloc(sizeof(drm_amdgpu_cs_chunk) * 4); 112 + auto chunk_data = (drm_amdgpu_cs_chunk_data *)malloc(sizeof(drm_amdgpu_cs_chunk_data) * 1); 65 113 66 - amdgpu_cs_request req = {}; 67 - req.ip_type = m_ip_type; 68 - req.number_of_ibs = 1; 69 - req.ibs = &ib; 70 - if (m_dev->residency_dirty) { 71 - req.resources = m_dev->global_residency_list; 72 - m_dev->residency_dirty = false; 114 + auto num_chunks = 1; 115 + { 116 + chunks[0].chunk_id = AMDGPU_CHUNK_ID_IB; 117 + chunks[0].length_dw = sizeof(drm_amdgpu_cs_chunk_ib) / 4; 118 + chunks[0].chunk_data = (uint64_t)(uintptr_t)&chunk_data[0]; 119 + 120 + chunk_data[0].ib_data._pad = 0; 121 + chunk_data[0].ib_data.va_start = cs.gpu_va_start; 122 + chunk_data[0].ib_data.ib_bytes = count_dw * 4; 123 + chunk_data[0].ib_data.ip_type = m_ip_type; 124 + chunk_data[0].ib_data.ip_instance = 0; 125 + chunk_data[0].ib_data.ring = 0; 126 + chunk_data[0].ib_data.flags = 0; 73 127 } 74 128 75 - auto r = amdgpu_cs_submit(m_ctx, 0, &req, 1); 76 - if (r != 0) { 77 - warn("submit failed: (ctx: {}) {}", (void *)m_ctx, r); 129 + if (false) { 130 + chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES; 131 + chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_bo_list_in) / 4; 132 + chunks[num_chunks].chunk_data = (uintptr_t)m_dev->global_residency_list; 133 + num_chunks++; 78 134 } 79 - if (r == 0) { 80 - amdgpu_cs_fence fence = {}; 81 - fence.context = m_ctx; 82 - fence.ip_type = m_ip_type; 83 - // @todo: syncronization... 135 + 136 + // create a wait block for the last submission. 137 + alloc_timeline_syncobj_chunk(&chunks[num_chunks], m_queue_syncobj, m_timeline_counter, AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT); 138 + num_chunks++; 139 + 140 + m_timeline_counter++; 141 + 142 + // create a signal block for the finishing of this submission. 143 + alloc_timeline_syncobj_chunk(&chunks[num_chunks], m_queue_syncobj, m_timeline_counter, AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL); 144 + num_chunks++; 84 145 85 - m_history.push_back({start_dw, start_dw + (uint32_t)(m_cfg.stream_size_bytes/4), next_point}); 86 - m_write_cursor_dw += (m_cfg.stream_size_bytes / 4); 146 + auto r = amdgpu_cs_submit_raw2(m_dev->amd_handle, m_ctx, 0, num_chunks, chunks, nullptr); 147 + if (r != 0) { 148 + warn("submit: failed with error: {}", r); 87 149 } 88 150 } 89 151
+3 -1
drivers/amdgpu/cmdstream.h
··· 7 7 #include <amdgpu_drm.h> 8 8 9 9 struct DeviceImpl; 10 + struct SemaphoreImpl; 10 11 11 12 class CommandStream { 12 13 public: ··· 36 37 CommandRing(const CommandRing&) = delete; 37 38 38 39 CommandStream begin_recording(); 39 - void submit(CommandStream& cs); 40 + void submit(CommandStream& cs, SemaphoreImpl *, uint64_t); 40 41 41 42 private: 42 43 struct Submission { ··· 56 57 uint64_t m_gpu_va; 57 58 uint32_t* m_cpu_map; 58 59 60 + uint32_t m_queue_syncobj; 59 61 uint64_t m_timeline_counter = 0; 60 62 61 63 uint32_t m_write_cursor_dw = 0;
+128
drivers/amdgpu/compiler/compiler.cpp
··· 1 + #include "compiler.h" 2 + #include "rdna2_asm.h" 3 + #include "gir.h" 4 + 5 + #include <sstream> 6 + #include <iomanip> 7 + #include <string> 8 + #include <fstream> 9 + 10 + namespace gir { 11 + 12 + /* 13 + * 14 + * on rdna 2, 64-bit addresses are kept in a seq gpr-pair. This means that Refs to addr 15 + * are actually 2 refs; should we do an 'expansion' pass? 16 + * 17 + * DCE 18 + * memory access hoisting (load early). see ISA for details. 19 + * LCIM 20 + * 21 + * 22 + */ 23 + 24 + // @todo: this is obviously very early stage wip... 25 + 26 + struct Compiler { 27 + IRModule& mod; 28 + RDNA2Assembler as; 29 + }; 30 + 31 + void analyze_uniformity(Compiler &); 32 + void analyze_liveness(Compiler &); 33 + void allocate_registers(Compiler &); 34 + void codegen(Compiler &); 35 + 36 + void rdna2_compile(IRModule &mod, void *write_ptr, uint64_t base_addr) { 37 + Compiler compiler(mod); 38 + 39 + analyze_liveness(compiler); 40 + allocate_registers(compiler); 41 + analyze_uniformity(compiler); 42 + codegen(compiler); 43 + 44 + auto code = compiler.as.values(); 45 + auto code_size_bytes = code.size() * sizeof(uint32_t); 46 + memcpy(write_ptr, code.data(), code_size_bytes); 47 + 48 + // dump the shader code to a file. 49 + // @todo: wip 50 + { 51 + std::stringstream ss; 52 + ss << "shader_" << std::hex << reinterpret_cast<uintptr_t>(write_ptr) << ".bin"; 53 + std::string filename = ss.str(); 54 + 55 + std::ofstream outfile(filename, std::ios::out | std::ios::binary); 56 + if (outfile.is_open()) { 57 + outfile.write(reinterpret_cast<const char*>(code.data()), code_size_bytes); 58 + outfile.close(); 59 + } 60 + log("shader written to {}", filename); 61 + exit(0); 62 + } 63 + } 64 + 65 + void analyze_liveness(Compiler &cc) { 66 + for (uint32_t i = 0; i < cc.mod.insts.size(); ++i) { 67 + for (auto arg : cc.mod.insts[i].args) { 68 + if (arg.id != 0xFFFFFFFF) cc.mod.values[arg.id].last_use = i; 69 + } 70 + } 71 + } 72 + 73 + void analyze_uniformity(Compiler &cc) { 74 + // Simple propagation: Root ptr is uniform. 75 + for (auto& inst : cc.mod.insts) { 76 + bool divergent = false; 77 + for (auto arg : inst.args) { 78 + if (arg.id != 0xFFFFFFFF && !cc.mod.values[arg.id].is_uniform) divergent = true; 79 + } 80 + if (inst.op == LOAD_GLOBAL) divergent = true; // Memory reads are divergent 81 + if (inst.dest.id != 0xFFFFFFFF) cc.mod.values[inst.dest.id].is_uniform = !divergent; 82 + } 83 + } 84 + 85 + void allocate_registers(Compiler &cc) { 86 + // linear register allocation. We need to note the DS and determine how many 87 + // contiguous sgpr/vgprs are needed for that. 88 + } 89 + 90 + void codegen(Compiler &cc) { 91 + 92 + /* 93 + for (auto& inst : mod.insts) { 94 + switch (inst.op) { 95 + case ADD: 96 + if (mod.values[inst.dest.id].is_uniform) 97 + as.sop2(sop2_opcode::s_add_u32, mod.values[inst.dest.id].phys_reg, 98 + mod.values[inst.args[0].id].phys_reg, mod.values[inst.args[1].id].phys_reg); 99 + else 100 + as.vop2(vop2_opcode::v_add_nc_u32, mod.values[inst.dest.id].phys_reg, 101 + mod.values[inst.args[0].id].phys_reg, mod.values[inst.args[1].id].phys_reg); 102 + break; 103 + case LOAD_GLOBAL: 104 + as.global(global_opcode::global_load_dword, inst.imm, 105 + mod.values[inst.dest.id].phys_reg, mod.values[inst.args[0].id].phys_reg, 0); 106 + break; 107 + case STORE_GLOBAL: 108 + as.global(global_opcode::global_store_dword, inst.imm, 109 + 0, mod.values[inst.args[0].id].phys_reg, mod.values[inst.args[2].id].phys_reg); 110 + break; 111 + case V_MOV_S2V: 112 + as.vop2(vop2_opcode::v_mov_b32, mod.values[inst.dest.id].phys_reg, 113 + mod.values[inst.args[0].id].phys_reg, 0); 114 + break; 115 + } 116 + } 117 + */ 118 + 119 + 120 + cc.as.sopp(RDNA2Assembler::sopp_opcode::s_endpgm, 0); 121 + 122 + // (RDNA ISA Ref. 2.5) 123 + for (auto i = 0; i < 64; ++i) { 124 + cc.as.sopp(RDNA2Assembler::sopp_opcode::s_code_end, 0); 125 + } 126 + } 127 + 128 + }
+8
drivers/amdgpu/compiler/compiler.h
··· 1 + #pragma once 2 + 3 + #include "gir.h" 4 + namespace gir { 5 + 6 + void rdna2_compile(IRModule &mod, void *write_ptr, uint64_t base_addr); 7 + 8 + }
+56
drivers/amdgpu/compiler/gir.h
··· 1 + #pragma once 2 + 3 + #include "rdna2_asm.h" 4 + #include <cstdint> 5 + #include <vector> 6 + 7 + namespace gir { 8 + enum class Type { 9 + Int, Addr, 10 + }; 11 + 12 + enum Op { ADD, SUB, LOAD_GLOBAL, STORE_GLOBAL, GET_ROOT_PTR, V_MOV_S2V, LOAD_ROOT_PTR }; 13 + 14 + struct Ref { uint32_t id; }; 15 + 16 + struct Inst { 17 + Op op; 18 + Ref dest; 19 + std::vector<Ref> args; 20 + uint32_t imm = 0; 21 + }; 22 + 23 + struct ValueMeta { 24 + bool is_uniform = false; 25 + Type type; 26 + uint32_t phys_reg = 0xFFFFFFFF; 27 + uint32_t last_use = 0; 28 + }; 29 + 30 + class IRModule { 31 + public: 32 + std::vector<ValueMeta> values; 33 + std::vector<Inst> insts; 34 + 35 + inline Ref make_value(Type type) { 36 + values.push_back({false, type, 0xFFFFFFFF, 0}); 37 + return { (uint32_t)values.size() - 1 }; 38 + } 39 + }; 40 + 41 + class Builder { 42 + public: 43 + Builder(IRModule& m) : mod(m) {} 44 + 45 + Ref iadd(Ref a, Ref b); 46 + 47 + Ref load_root_ptr(); 48 + 49 + Ref load_global(Ref addr, uint32_t offset); 50 + 51 + void store_global(Ref addr, Ref data, uint32_t offset); 52 + private: 53 + IRModule& mod; 54 + }; 55 + 56 + };
+27
drivers/amdgpu/compiler/gir_builder.cpp
··· 1 + #include "gir.h" 2 + 3 + namespace gir { 4 + 5 + Ref Builder::iadd(Ref a, Ref b) { 6 + Ref dst = mod.make_value(Type::Int); 7 + mod.insts.push_back({ADD, dst, {a, b}}); 8 + return dst; 9 + } 10 + 11 + Ref Builder::load_root_ptr() { 12 + Ref dst = mod.make_value(Type::Addr); 13 + mod.insts.push_back({LOAD_ROOT_PTR, dst, {}}); 14 + return dst; 15 + } 16 + 17 + Ref Builder::load_global(Ref addr, uint32_t offset) { 18 + Ref dst = mod.make_value(Type::Int); 19 + mod.insts.push_back({LOAD_GLOBAL, dst, {addr}, offset}); 20 + return dst; 21 + } 22 + 23 + void Builder::store_global(Ref addr, Ref data, uint32_t offset) { 24 + mod.insts.push_back({STORE_GLOBAL, {0xFFFFFFFF}, {addr, data}, offset}); 25 + } 26 + 27 + }
+566
drivers/amdgpu/compiler/rdna2_asm.h
··· 1 + #pragma once 2 + 3 + #include <cstdint> 4 + #include <vector> 5 + #include "common.h" 6 + 7 + class RDNA2Assembler { 8 + public: 9 + 10 + enum class ssrc : uint8_t { 11 + sgpr0 = 0, 12 + vcc_lo = 106, 13 + vcc_hi = 107, 14 + ttmp0 = 108, 15 + m0 = 124, 16 + null_reg = 125, 17 + exec_lo = 126, 18 + exec_hi = 127, 19 + src_zero = 128, 20 + int_pos_1 = 129, 21 + int_pos_64 = 192, 22 + int_neg_1 = 193, 23 + int_neg_16 = 208, 24 + shared_base = 235, 25 + shared_limit = 236, 26 + private_base = 237, 27 + private_limit = 238, 28 + pops_exiting_wave_id = 239, 29 + float_0_5 = 240, 30 + float_neg_0_5 = 241, 31 + float_1_0 = 242, 32 + float_neg_1_0 = 243, 33 + float_2_0 = 244, 34 + float_neg_2_0 = 245, 35 + float_4_0 = 246, 36 + float_neg_4_0 = 247, 37 + float_1_over_2pi = 248, 38 + vccz = 251, 39 + execz = 252, 40 + scc = 253, 41 + literal_constant = 255 42 + }; 43 + 44 + enum class vsrc : uint16_t { 45 + sgpr0 = 0, 46 + vcc_lo = 106, 47 + vcc_hi = 107, 48 + ttmp0 = 108, 49 + m0 = 124, 50 + null_reg = 125, 51 + exec_lo = 126, 52 + exec_hi = 127, 53 + zero = 128, 54 + int_pos_1 = 129, 55 + int_pos_64 = 192, 56 + int_neg_1 = 193, 57 + int_neg_16 = 208, 58 + dpp8 = 233, 59 + dpp8fi = 234, 60 + shared_base = 235, 61 + shared_limit = 236, 62 + private_base = 237, 63 + private_limit = 238, 64 + pops_exiting_wave_id = 239, 65 + float_0_5 = 240, 66 + float_neg_0_5 = 241, 67 + float_1_0 = 242, 68 + float_neg_1_0 = 243, 69 + float_2_0 = 244, 70 + float_neg_2_0 = 245, 71 + float_4_0 = 246, 72 + float_neg_4_0 = 247, 73 + float_1_over_2pi = 248, 74 + sdwa = 249, 75 + dpp16 = 250, 76 + vccz = 251, 77 + execz = 252, 78 + scc = 253, 79 + literal_constant = 255, 80 + vgpr0 = 256 81 + }; 82 + 83 + enum class sop2_opcode : uint8_t { 84 + s_add_u32 = 0, 85 + s_sub_u32 = 1, 86 + s_add_i32 = 2, 87 + s_sub_i32 = 3, 88 + s_addc_u32 = 4, 89 + s_subb_u32 = 5, 90 + s_min_i32 = 6, 91 + s_min_u32 = 7, 92 + s_max_i32 = 8, 93 + s_max_u32 = 9, 94 + s_cselect_b32 = 10, 95 + s_cselect_b64 = 11, 96 + s_and_b32 = 14, 97 + s_and_b64 = 15, 98 + s_or_b32 = 16, 99 + s_or_b64 = 17, 100 + s_xor_b32 = 18, 101 + s_xor_b64 = 19, 102 + s_andn2_b32 = 20, 103 + s_andn2_b64 = 21, 104 + s_orn2_b32 = 22, 105 + s_orn2_b64 = 23, 106 + s_nand_b32 = 24, 107 + s_nand_b64 = 25, 108 + s_nor_b32 = 26, 109 + s_nor_b64 = 27, 110 + s_xnor_b32 = 28, 111 + s_xnor_b64 = 29, 112 + s_lshl_b32 = 30, 113 + s_lshl_b64 = 31, 114 + s_lshr_b32 = 32, 115 + s_lshr_b64 = 33, 116 + s_ashr_i32 = 34, 117 + s_ashr_i64 = 35, 118 + s_bfm_b32 = 36, 119 + s_bfm_b64 = 37, 120 + s_mul_i32 = 38, 121 + s_bfe_u32 = 39, 122 + s_bfe_i32 = 40, 123 + s_bfe_u64 = 41, 124 + s_bfe_i64 = 42, 125 + s_absdiff_i32 = 44, 126 + s_lshl1_add_u32 = 46, 127 + s_lshl2_add_u32 = 47, 128 + s_lshl3_add_u32 = 48, 129 + s_lshl4_add_u32 = 49, 130 + s_pack_ll_b32_b16 = 50, 131 + s_pack_lh_b32_b16 = 51, 132 + s_pack_hh_b32_b16 = 52, 133 + s_mul_hi_u32 = 53, 134 + s_mul_hi_i32 = 54 135 + }; 136 + 137 + inline void sop2(sop2_opcode op, ssrc sdst, ssrc ssrc0, ssrc ssrc1) { 138 + assert((uint8_t)sdst < 128, "sop2: invalid ssrc for sdst: %u", (uint8_t)sdst); 139 + emit(0b10 << 30 | (uint8_t)op << 23 | (uint8_t)sdst << 16 | (uint8_t)ssrc1 << 8 | (uint8_t)ssrc0); 140 + } 141 + 142 + enum class sopk_opcode : uint8_t { 143 + s_movk_i32 = 0, 144 + s_version = 1, 145 + s_cmovk_i32 = 2, 146 + s_cmpk_eq_i32 = 3, 147 + s_cmpk_lg_i32 = 4, 148 + s_cmpk_gt_i32 = 5, 149 + s_cmpk_ge_i32 = 6, 150 + s_cmpk_lt_i32 = 7, 151 + s_cmpk_le_i32 = 8, 152 + s_cmpk_eq_u32 = 9, 153 + s_cmpk_lg_u32 = 10, 154 + s_cmpk_gt_u32 = 11, 155 + s_cmpk_ge_u32 = 12, 156 + s_cmpk_lt_u32 = 13, 157 + s_cmpk_le_u32 = 14, 158 + s_addk_i32 = 15, 159 + s_mulk_i32 = 16, 160 + s_getreg_b32 = 18, 161 + s_setreg_b32 = 19, 162 + s_setreg_imm32_b32 = 21, 163 + s_call_b64 = 22, 164 + s_waitcnt_vscnt = 23, 165 + s_waitcnt_vmcnt = 24, 166 + s_waitcnt_expcnt = 25, 167 + s_waitcnt_lgkmcnt = 26, 168 + s_subvector_loop_begin = 27, 169 + s_subvector_loop_end = 28 170 + }; 171 + 172 + inline void sopk(sopk_opcode op, ssrc sdst, int16_t imm) { 173 + assert((uint8_t)sdst < 128, "sopk: invalid ssrc for sdst: %u", (uint8_t)sdst); 174 + emit(0b1011 << 28 | (uint8_t)op << 23 | (uint8_t)sdst << 16 | imm); 175 + } 176 + 177 + enum class sop1_opcode : uint8_t { 178 + s_mov_b32 = 3, 179 + s_mov_b64 = 4, 180 + s_cmov_b32 = 5, 181 + s_cmov_b64 = 6, 182 + s_not_b32 = 7, 183 + s_not_b64 = 8, 184 + s_wqm_b32 = 9, 185 + s_wqm_b64 = 10, 186 + s_brev_b32 = 11, 187 + s_brev_b64 = 12, 188 + s_bcnt0_i32_b32 = 13, 189 + s_bcnt0_i32_b64 = 14, 190 + s_bcnt1_i32_b32 = 15, 191 + s_bcnt1_i32_b64 = 16, 192 + s_ff0_i32_b32 = 17, 193 + s_ff0_i32_b64 = 18, 194 + s_ff1_i32_b32 = 19, 195 + s_ff1_i32_b64 = 20, 196 + s_flbit_i32_b32 = 21, 197 + s_flbit_i32_b64 = 22, 198 + s_flbit_i32 = 23, 199 + s_flbit_i32_i64 = 24, 200 + s_sext_i32_i8 = 25, 201 + s_sext_i32_i16 = 26, 202 + s_bitset0_b32 = 27, 203 + s_bitset0_b64 = 28, 204 + s_bitset1_b32 = 29, 205 + s_bitset1_b64 = 30, 206 + s_getpc_b64 = 31, 207 + s_setpc_b64 = 32, 208 + s_swappc_b64 = 33, 209 + s_rfe_b64 = 34, 210 + s_and_saveexec_b64 = 36, 211 + s_or_saveexec_b64 = 37, 212 + s_xor_saveexec_b64 = 38, 213 + s_andn2_saveexec_b64 = 39, 214 + s_orn2_saveexec_b64 = 40, 215 + s_nand_saveexec_b64 = 41, 216 + s_nor_saveexec_b64 = 42, 217 + s_xnor_saveexec_b64 = 43, 218 + s_quadmask_b32 = 44, 219 + s_quadmask_b64 = 45, 220 + s_movrels_b32 = 46, 221 + s_movrels_b64 = 47, 222 + s_movreld_b32 = 48, 223 + s_movreld_b64 = 49, 224 + s_abs_i32 = 52, 225 + s_andn1_saveexec_b64 = 55, 226 + s_orn1_saveexec_b64 = 56, 227 + s_andn1_wrexec_b64 = 57, 228 + s_andn2_wrexec_b64 = 57, 229 + s_bitreplicate_b64_b32 = 59, 230 + s_and_saveexec_b32 = 60, 231 + s_or_saveexec_b32 = 61, 232 + s_xor_saveexec_b32 = 62, 233 + s_andn2_saveexec_b32 = 63, 234 + s_orn2_saveexec_b32 = 64, 235 + s_nand_saveexec_b32 = 65, 236 + s_nor_saveexec_b32 = 66, 237 + }; 238 + 239 + inline void sop1(sop1_opcode op, ssrc sdst, ssrc ssrc0) { 240 + assert((uint8_t)sdst < 128, "sop1: invalid ssrc for sdst: %u", (uint8_t)sdst); 241 + emit(0b101111101 << 23 | (uint8_t)sdst << 16 | (uint8_t)op << 8 | (uint8_t)ssrc0); 242 + } 243 + 244 + enum class sopc_opcode : uint8_t { 245 + s_cmp_eq_i32 = 0, 246 + s_cmp_lg_i32 = 1, 247 + s_cmp_gt_i32 = 2, 248 + s_cmp_ge_i32 = 3, 249 + s_cmp_lt_i32 = 4, 250 + s_cmp_le_i32 = 5, 251 + s_cmp_eq_u32 = 6, 252 + s_cmp_lg_u32 = 7, 253 + s_cmp_gt_u32 = 8, 254 + s_cmp_ge_u32 = 9, 255 + s_cmp_lt_u32 = 10, 256 + s_cmp_le_u32 = 11, 257 + s_bitcmp0_b32 = 12, 258 + s_bitcmp1_b32 = 13, 259 + s_bitcmp0_b64 = 14, 260 + s_bitcmp1_b64 = 15, 261 + s_cmp_eq_u64 = 18, 262 + s_cmp_lg_u64 = 19 263 + }; 264 + 265 + inline void sopc(sopc_opcode op, ssrc ssrc0, ssrc ssrc1) { 266 + emit(0b101111110 << 23 | (uint8_t)op << 16 | (uint8_t)ssrc1 << 8 | (uint8_t)ssrc0); 267 + } 268 + 269 + enum class sopp_opcode : uint8_t { 270 + s_nop = 0, 271 + s_endpgm = 1, 272 + s_branch = 2, 273 + s_wakeup = 3, 274 + s_cbranch_scc0 = 4, 275 + s_cbranch_scc1 = 5, 276 + s_cbranch_vccz = 6, 277 + s_cbranch_vccnz = 7, 278 + s_cbranch_execz = 8, 279 + s_cbranch_execnz = 9, 280 + s_barrier = 10, 281 + s_setkill = 11, 282 + s_waitcnt = 12, 283 + s_sethalt = 13, 284 + s_sleep = 14, 285 + s_setprio = 15, 286 + s_sendmsg = 16, 287 + s_sendmsghalt = 17, 288 + s_trap = 18, 289 + s_icache_inv = 19, 290 + s_incperflevel = 20, 291 + s_decperflevel = 21, 292 + s_ttracedata = 22, 293 + s_cbranch_cdbgsys = 23, 294 + s_cbranch_cdbguser = 24, 295 + s_cbranch_cdbgsys_or_user = 25, 296 + s_cbranch_cdbgsys_and_user = 26, 297 + s_endpgm_saved = 27, 298 + s_endpgm_ordered_ps_done = 30, 299 + s_code_end = 31, 300 + s_inst_prefetch = 32, 301 + s_clause = 33, 302 + s_waitcnt_depctr = 34, 303 + s_round_mode = 36, 304 + s_denorm_mode = 37, 305 + s_ttracedata_imm = 40 306 + }; 307 + 308 + inline void sopp(sopp_opcode op, int16_t simm) { 309 + emit(0b101111111 << 23 | ((uint8_t)op & 0x7F) << 16 | simm); 310 + } 311 + 312 + enum class smem_opcode : uint8_t { 313 + s_load_dword = 0, 314 + s_load_dwordx2 = 1, 315 + s_load_dwordx4 = 2, 316 + s_load_dwordx8 = 3, 317 + s_load_dwordx16 = 4, 318 + s_buffer_load_dword = 8, 319 + s_buffer_load_dwordx2 = 9, 320 + s_buffer_load_dwordx4 = 10, 321 + s_buffer_load_dwordx8 = 11, 322 + s_buffer_load_dwordx16 = 12, 323 + s_gl1_inv = 31, 324 + s_dcache_inv = 32, 325 + s_memtime = 36, 326 + s_memrealtime = 37, 327 + s_atc_probe = 38, 328 + s_atc_probe_buffer = 39 329 + }; 330 + 331 + inline void smem(smem_opcode op, bool glc, bool dlc, uint8_t sdata, uint8_t sbase, uint8_t soffset, int32_t offset) { 332 + assert(offset >= -1048576 && offset <= 1048575, "smem: offset exceeds 21-bit signed range: %d", offset); 333 + 334 + // LSB of sbase is ignored. 335 + auto sbase_enc = (sbase >> 1); 336 + auto enc_offset = offset & 0x1FFFFF; 337 + emit(0b111101u << 26 | ((uint8_t)op & 0xFF) << 18 | (glc & 0b1) << 16 | (dlc & 0b1) << 14 | (sdata & 0x7F) << 6 | sbase_enc & 0x3F); 338 + emit((soffset & 0x7F) << 25 | (offset & 0x1FFFFF)); 339 + } 340 + 341 + enum class vop2_opcode : uint8_t { 342 + v_cndmask_b32 = 1, 343 + v_dot2c_f32_f16 = 2, 344 + v_add_f32 = 3, 345 + v_sub_f32 = 4, 346 + v_subrev_f32 = 5, 347 + v_fmac_legacy_f32 = 6, 348 + v_mul_legacy_f32 = 7, 349 + v_mul_f32 = 8, 350 + v_mul_i32_i24 = 9, 351 + v_mul_hi_i32_i24 = 10, 352 + v_mul_u32_u24 = 11, 353 + v_mul_hi_u32_u24 = 12, 354 + v_dot4c_i32_i8 = 13, 355 + v_min_f32 = 15, 356 + v_max_f32 = 16, 357 + v_min_i32 = 17, 358 + v_max_i32 = 18, 359 + v_min_u32 = 19, 360 + v_max_u32 = 20, 361 + v_lshrrev_b32 = 22, 362 + v_ashrrev_i32 = 24, 363 + v_lshlrev_b32 = 26, 364 + v_and_b32 = 27, 365 + v_or_b32 = 28, 366 + v_xor_b32 = 29, 367 + v_xnor_b32 = 30, 368 + v_add_nc_u32 = 37, 369 + v_sub_nc_u32 = 38, 370 + v_subrev_nc_u32 = 39, 371 + v_add_co_ci_u32 = 40, 372 + v_sub_co_ci_u32 = 41, 373 + v_subrev_co_ci_u32 = 42, 374 + v_fmac_f32 = 43, 375 + v_fmamk_f32 = 44, 376 + v_fmaak_f32 = 45, 377 + v_cvt_pkrtz_f16_f32 = 47, 378 + v_add_f16 = 50, 379 + v_sub_f16 = 51, 380 + v_subrev_f16 = 52, 381 + v_mul_f16 = 53, 382 + v_fmac_f16 = 54, 383 + v_fmamk_f16 = 55, 384 + v_fmaak_f16 = 56, 385 + v_max_f16 = 57, 386 + v_min_f16 = 58, 387 + v_ldexp_f16 = 59, 388 + v_pk_fmac_f16 = 60 389 + }; 390 + 391 + inline void vop2(vop2_opcode op, uint8_t vdst, vsrc src0, uint8_t vsrc1) { 392 + emit(0b0 << 31 | (uint8_t)op << 25 | vdst << 17 | vsrc1 << 9 | (uint16_t)src0 & 0x1FF); 393 + } 394 + 395 + // @todo: i think flat & global are really the same.. 396 + // may want to consolidate them, but what about scratch? 397 + enum class flat_opcode : uint8_t { 398 + flat_load_ubyte = 8, 399 + flat_load_sbyte = 9, 400 + flat_load_ushort = 10, 401 + flat_load_sshort = 11, 402 + flat_load_dword = 12, 403 + flat_load_dwordx2 = 13, 404 + flat_load_dwordx4 = 14, 405 + flat_load_dwordx3 = 15, 406 + flat_store_byte = 24, 407 + flat_store_byte_d16_hi = 25, 408 + flat_store_short = 26, 409 + flat_store_short_d16_hi = 27, 410 + flat_store_dword = 28, 411 + flat_store_dwordx2 = 29, 412 + flat_store_dwordx4 = 30, 413 + flat_store_dwordx3 = 31, 414 + flat_load_ubyte_d16 = 32, 415 + flat_load_ubyte_d16_hi = 33, 416 + flat_load_sbyte_d16 = 34, 417 + flat_load_sbyte_d16_hi = 35, 418 + flat_load_short_d16 = 36, 419 + flat_load_short_d16_hi = 37, 420 + flat_atomic_swap = 48, 421 + flat_atomic_cmpswap = 49, 422 + flat_atomic_add = 50, 423 + flat_atomic_sub = 51, 424 + flat_atomic_smin = 53, 425 + flat_atomic_umin = 54, 426 + flat_atomic_smax = 55, 427 + flat_atomic_umax = 56, 428 + flat_atomic_and = 57, 429 + flat_atomic_or = 58, 430 + flat_atomic_xor = 59, 431 + flat_atomic_inc = 60, 432 + flat_atomic_dec = 61, 433 + flat_atomic_fcmpswap = 62, 434 + flat_atomic_fmin = 63, 435 + flat_atomic_fmax = 64, 436 + flat_atomic_swap_x2 = 80, 437 + flat_atomic_cmpswap_x2 = 81, 438 + flat_atomic_add_x2 = 82, 439 + flat_atomic_sub_x2 = 83, 440 + flat_atomic_smin_x2 = 85, 441 + flat_atomic_umin_x2 = 86, 442 + flat_atomic_smax_x2 = 87, 443 + flat_atomic_umax_x2 = 88, 444 + flat_atomic_and_x2 = 89, 445 + flat_atomic_or_x2 = 90, 446 + flat_atomic_xor_x2 = 91, 447 + flat_atomic_inc_x2 = 92, 448 + flat_atomic_dec_x2 = 93, 449 + flat_atomic_fcmpswap_x2 = 94, 450 + flat_atomic_fmin_x2 = 95, 451 + flat_atomic_fmax_x2 = 96 452 + }; 453 + 454 + enum class global_opcode : uint8_t { 455 + global_load_ubyte = 8, 456 + global_load_sbyte = 9, 457 + global_load_ushort = 10, 458 + global_load_sshort = 11, 459 + global_load_dword = 12, 460 + global_load_dwordx2 = 13, 461 + global_load_dwordx4 = 14, 462 + global_load_dwordx3 = 15, 463 + global_load_dword_addtid = 22, 464 + global_store_dword_addtid = 23, 465 + global_store_byte = 24, 466 + global_store_byte_d16_hi = 25, 467 + global_store_short = 26, 468 + global_store_short_d16_hi = 27, 469 + global_store_dword = 28, 470 + global_store_dwordx2 = 29, 471 + global_store_dwordx4 = 30, 472 + global_store_dwordx3 = 31, 473 + global_load_ubyte_d16 = 32, 474 + global_load_ubyte_d16_hi = 33, 475 + global_load_sbyte_d16 = 34, 476 + global_load_sbyte_d16_hi = 35, 477 + global_load_short_d16 = 36, 478 + global_load_short_d16_hi = 37, 479 + global_atomic_swap = 48, 480 + global_atomic_cmpswap = 49, 481 + global_atomic_add = 50, 482 + global_atomic_sub = 51, 483 + global_atomic_csub = 52, 484 + global_atomic_smin = 53, 485 + global_atomic_umin = 54, 486 + global_atomic_smax = 55, 487 + global_atomic_umax = 56, 488 + global_atomic_and = 57, 489 + global_atomic_or = 58, 490 + global_atomic_xor = 59, 491 + global_atomic_inc = 60, 492 + global_atomic_dec = 61, 493 + global_atomic_fcmpswap = 62, 494 + global_atomic_fmin = 63, 495 + global_atomic_fmax = 64, 496 + global_atomic_swap_x2 = 80, 497 + global_atomic_cmpswap_x2 = 81, 498 + global_atomic_add_x2 = 82, 499 + global_atomic_sub_x2 = 83, 500 + global_atomic_smin_x2 = 85, 501 + global_atomic_umin_x2 = 86, 502 + global_atomic_smax_x2 = 87, 503 + global_atomic_umax_x2 = 88, 504 + global_atomic_and_x2 = 89, 505 + global_atomic_or_x2 = 90, 506 + global_atomic_xor_x2 = 91, 507 + global_atomic_inc_x2 = 92, 508 + global_atomic_dec_x2 = 93, 509 + global_atomic_fcmpswap_x2 = 94, 510 + global_atomic_fmin_x2 = 95, 511 + global_atomic_fmax_x2 = 96 512 + }; 513 + 514 + enum class scratch_opcode : uint8_t { 515 + scratch_load_ubyte = 8, 516 + scratch_load_sbyte = 9, 517 + scratch_load_ushort = 10, 518 + scratch_load_sshort = 11, 519 + scratch_load_dword = 12, 520 + scratch_load_dwordx2 = 13, 521 + scratch_load_dwordx4 = 14, 522 + scratch_load_dwordx3 = 15, 523 + scratch_store_byte = 24, 524 + scratch_store_byte_d16_hi = 25, 525 + scratch_store_short = 26, 526 + scratch_store_short_d16_hi = 27, 527 + scratch_store_dword = 28, 528 + scratch_store_dwordx2 = 29, 529 + scratch_store_dwordx4 = 30, 530 + scratch_store_dwordx3 = 31, 531 + scratch_load_ubyte_d16 = 32, 532 + scratch_load_ubyte_d16_hi = 33, 533 + scratch_load_sbyte_d16 = 34, 534 + scratch_load_sbyte_d16_hi = 35, 535 + scratch_load_short_d16 = 36, 536 + scratch_load_short_d16_hi = 37 537 + }; 538 + 539 + inline void flat(flat_opcode op, bool slc, bool glc, bool lds, bool dlc, uint16_t offset, uint8_t vdst, uint8_t saddr, uint8_t data, uint8_t addr) { 540 + flat_impl((uint8_t)op, slc, glc, 0, lds, dlc, offset, vdst, saddr, data, addr); 541 + } 542 + 543 + inline void scratch(scratch_opcode op, bool slc, bool glc, bool lds, bool dlc, uint16_t offset, uint8_t vdst, uint8_t saddr, uint8_t data, uint8_t addr) { 544 + flat_impl((uint8_t)op, slc, glc, 1, lds, dlc, offset, vdst, saddr, data, addr); 545 + } 546 + 547 + inline void global(global_opcode op, bool slc, bool glc, bool lds, bool dlc, uint16_t offset, uint8_t vdst, uint8_t saddr, uint8_t data, uint8_t addr) { 548 + flat_impl((uint8_t)op, slc, glc, 2, lds, dlc, offset, vdst, saddr, data, addr); 549 + } 550 + 551 + std::vector<uint32_t> &values() { 552 + return m_values; 553 + } 554 + private: 555 + 556 + inline void flat_impl(uint8_t op, bool slc, bool glc, uint8_t seg, bool lds, bool dlc, uint16_t offset, uint8_t vdst, uint8_t saddr, uint8_t data, uint8_t addr) { 557 + emit(0b110111 << 26 | (op & 0x7F) << 18 | (slc & 0b1) << 17 | (glc & 0b1) << 16 | (seg & 0x2) << 14 | (lds & 0b1) << 13 | (dlc & 0b1) << 12 | offset & 0x7FF); 558 + emit((vdst & 0xFF) << 24 | (saddr & 0x7F) << 16 | (data & 0xFF) << 8 | addr & 0xFF); 559 + } 560 + 561 + void emit(uint32_t v) { 562 + m_values.push_back(v); 563 + } 564 + 565 + std::vector<uint32_t> m_values; 566 + };
+9 -1
drivers/amdgpu/impl.h
··· 39 39 CommandStream cs; 40 40 }; 41 41 42 + struct SemaphoreImpl { 43 + amdgpu_device_handle dev_handle; 44 + uint32_t syncobj_handle; 45 + }; 46 + 42 47 extern "C" { 43 48 KesDevice amdgpu_create(int drm_fd); 44 49 void amdgpu_destroy(KesDevice); ··· 50 55 51 56 KesCommandList amdgpu_start_recording(KesQueue); 52 57 53 - void amdgpu_submit(KesQueue, KesCommandList); 58 + void amdgpu_submit(KesQueue, KesCommandList, KesSemaphore, uint64_t); 54 59 55 60 void amdgpu_cmd_memset(KesCommandList, kes_gpuptr_t addr, size_t size, uint32_t value); 56 61 void amdgpu_cmd_memcpy(KesCommandList, kes_gpuptr_t dst, kes_gpuptr_t src, size_t size); ··· 60 65 void amdgpu_cmd_wait_before(KesCommandList, KesStage after, kes_gpuptr_t addr, uint64_t value, KesOp, KesHazardFlags, uint64_t mask); 61 66 void amdgpu_cmd_dispatch(KesCommandList pcl, kes_gpuptr_t data_ptr, uint32_t x, uint32_t y, uint32_t z); 62 67 void amdgpu_cmd_dispatch_indirect(KesCommandList pcl, kes_gpuptr_t data_ptr, kes_gpuptr_t indirect_addr); 68 + 69 + KesSemaphore amdgpu_create_semaphore(KesDevice, uint64_t); 70 + int amdgpu_wait_semaphore(KesSemaphore, uint64_t); 63 71 } 64 72 65 73 void device_register_allocation(DeviceImpl *impl, amdgpu_bo_handle bo);
+2
drivers/amdgpu/interface.cpp
··· 21 21 fns->fn_cmd_wait_before = amdgpu_cmd_wait_before; 22 22 fns->fn_cmd_dispatch = amdgpu_cmd_dispatch; 23 23 fns->fn_cmd_dispatch_indirect = amdgpu_cmd_dispatch_indirect; 24 + fns->fn_create_semaphore = amdgpu_create_semaphore; 25 + fns->fn_wait_semaphore = amdgpu_wait_semaphore; 24 26 }
+3 -2
drivers/amdgpu/queue.cpp
··· 63 63 } 64 64 65 65 // @todo: add support for semaphore or other synchronization. 66 - void amdgpu_submit(KesQueue pq, KesCommandList pcl) { 66 + void amdgpu_submit(KesQueue pq, KesCommandList pcl, KesSemaphore ps, uint64_t value) { 67 67 auto *queue = reinterpret_cast<QueueImpl *>(pq); 68 68 auto *cl = reinterpret_cast<CommandListImpl *>(pcl); 69 + auto *sem = reinterpret_cast<SemaphoreImpl *>(ps); 69 70 assert(cl->queue == queue, "submit: commandlist from foreign queue"); 70 71 71 - queue->cmd_ring->submit(cl->cs); //, semaphore, value); 72 + queue->cmd_ring->submit(cl->cs, sem, value); 72 73 73 74 // @todo: to free commandlist, we want to be sure that it is no longer mapped and stuff. 74 75 // then, we can freely-free it. But i think this needs some deferred-cleanup, as
+1 -1
drivers/i915/impl.h
··· 13 13 14 14 KesCommandList i915_start_recording(KesQueue); 15 15 16 - void i915_submit(KesQueue, KesCommandList); 16 + void i915_submit(KesQueue, KesCommandList, KesSemaphore, uint64_t); 17 17 18 18 void i915_cmd_memset(KesCommandList, kes_gpuptr_t addr, size_t size, uint32_t value); 19 19 void i915_cmd_write_timestamp(KesCommandList, kes_gpuptr_t addr);
+1 -1
drivers/i915/mock.cpp
··· 27 27 return nullptr; 28 28 } 29 29 30 - API_EXPORT void i915_submit(KesQueue, KesCommandList) { 30 + API_EXPORT void i915_submit(KesQueue, KesCommandList, KesSemaphore, uint64_t) { 31 31 32 32 } 33 33
+3 -1
kestrel/include/kestrel/interface.h
··· 23 23 KesQueue (*fn_create_queue)(KesDevice, enum KesQueueType); 24 24 void (*fn_destroy_queue)(KesQueue); 25 25 KesCommandList (*fn_start_recording)(KesQueue); 26 - void (*fn_submit)(KesQueue, KesCommandList); 26 + void (*fn_submit)(KesQueue, KesCommandList, KesSemaphore, uint64_t); 27 27 void (*fn_cmd_memset)(KesCommandList, kes_gpuptr_t addr, size_t size, uint32_t value); 28 28 void (*fn_cmd_memcpy)(KesCommandList, kes_gpuptr_t dst, kes_gpuptr_t src, size_t size); 29 29 void (*fn_cmd_write_timestamp)(KesCommandList, kes_gpuptr_t addr); ··· 31 31 void (*fn_cmd_wait_before)(KesCommandList, enum KesStage after, kes_gpuptr_t addr, uint64_t value, enum KesOp, enum KesHazardFlags, uint64_t mask); 32 32 void (*fn_cmd_dispatch)(KesCommandList command_list, kes_gpuptr_t data, uint32_t x, uint32_t y, uint32_t z); 33 33 void (*fn_cmd_dispatch_indirect)(KesCommandList command_list, kes_gpuptr_t data, kes_gpuptr_t command_addr); 34 + KesSemaphore (*fn_create_semaphore)(KesDevice device, uint64_t value); 35 + int (*fn_wait_semaphore)(KesSemaphore semaphore, uint64_t value); 34 36 }; 35 37 36 38 /**
+9 -1
kestrel/include/kestrel/kestrel.h
··· 31 31 typedef void *KesCommandList; 32 32 33 33 /** 34 + * Opaque handle to a Semaphore. 35 + */ 36 + typedef void *KesSemaphore; 37 + 38 + /** 34 39 * Structure describing a memory allocation. 35 40 * @sa kes_malloc 36 41 */ ··· 235 240 * @param queue The queue to submit the command list to. Must be the queue that the command list was created for. 236 241 * @param command_list The command list to submit. 237 242 */ 238 - void kes_submit(KesQueue queue, KesCommandList command_list); 243 + void kes_submit(KesQueue queue, KesCommandList command_list, KesSemaphore semaphore, uint64_t value); 239 244 240 245 /** 241 246 * Record a memory set command in the command list. ··· 312 317 */ 313 318 void kes_cmd_dispatch_indirect(KesCommandList command_list, kes_gpuptr_t data, kes_gpuptr_t command_addr); 314 319 320 + KesSemaphore kes_create_semaphore(KesDevice device, uint64_t value); 321 + 322 + int kes_wait_semaphore(KesSemaphore semaphore, uint64_t value); 315 323 316 324 #ifdef __cplusplus 317 325 }
+26 -2
kestrel/rt/api.cpp
··· 18 18 uint32_t vendor_id; 19 19 }; 20 20 21 + // @todo: more proper discovery, checking. 22 + // also, consider how kes_create should work (feature-flags, requirements etc). 21 23 std::vector<DiscoveryInfo> discover_gpus() { 22 24 std::vector<DiscoveryInfo> found; 23 25 std::string base_path = "/sys/class/drm"; ··· 57 59 struct CommandListHandle { 58 60 DeviceHandle *dev; 59 61 KesCommandList cmdlist; 62 + }; 63 + 64 + struct SemaphoreHandle { 65 + DeviceHandle *dev; 66 + KesSemaphore sem; 60 67 }; 61 68 62 69 API_EXPORT KesDevice kes_create() { ··· 156 163 return clhandle; 157 164 } 158 165 159 - API_EXPORT void kes_submit(KesQueue pq, KesCommandList pcl) { 166 + API_EXPORT void kes_submit(KesQueue pq, KesCommandList pcl, KesSemaphore semaphore, uint64_t value) { 160 167 auto *qhandle = reinterpret_cast<QueueHandle *>(pq); 161 168 auto *clhandle = reinterpret_cast<CommandListHandle *>(pcl); 162 169 auto *dev = qhandle->dev; 163 170 164 - dev->fns.fn_submit(qhandle->queue, clhandle->cmdlist); 171 + dev->fns.fn_submit(qhandle->queue, clhandle->cmdlist, semaphore, value); 165 172 166 173 delete clhandle; 167 174 } ··· 214 221 215 222 dev->fns.fn_cmd_dispatch_indirect(clhandle->cmdlist, data, command_addr); 216 223 } 224 + 225 + KesSemaphore kes_create_semaphore(KesDevice pd, uint64_t value) { 226 + auto *dev = reinterpret_cast<DeviceHandle *>(pd); 227 + auto sem = dev->fns.fn_create_semaphore(dev->drv_handle, value); 228 + 229 + auto *handle = new SemaphoreHandle{}; 230 + handle->dev = dev; 231 + handle->sem = sem; 232 + return handle; 233 + } 234 + 235 + int kes_wait_semaphore(KesSemaphore ps, uint64_t value) { 236 + auto *handle = reinterpret_cast<SemaphoreHandle *>(ps); 237 + auto *dev = handle->dev; 238 + 239 + return dev->fns.fn_wait_semaphore(handle->sem, value); 240 + }
+4 -2
test/examples/02_hello_queue/hello_queue.cpp
··· 11 11 auto x = kes_malloc(dev, size, 4, KesMemoryDefault); 12 12 auto y = kes_malloc(dev, 8, 4, KesMemoryDefault); 13 13 14 + auto sem = kes_create_semaphore(dev, 0); 15 + 14 16 printf("x: %p (%p) (%llu bytes)\n", x.cpu, x.gpu, x.size); 15 17 printf("y: %p (%p) (%llu bytes)\n", y.cpu, y.gpu, y.size); 16 18 ··· 23 25 kes_cmd_wait_before(l1, KesStageTransfer, y.gpu, 1337, KesOpEqual, KesHazardFlagsNone, ~0); 24 26 kes_cmd_memset(l1, x.gpu, size, 2); 25 27 26 - kes_submit(dma, l1); 28 + kes_submit(dma, l1, sem, 1); 27 29 28 - // @todo: hacky bussy-wait 30 + // hacky bussy-wait just to prove it works. 29 31 printf("x[0]: %u\n", ((uint32_t *)x.cpu)[0]); 30 32 31 33 while(*((uint32_t *)x.cpu) == 0);
+5 -2
test/examples/03_hello_2queue/hello_2queue.cpp
··· 12 12 auto y = kes_malloc(dev, 8, 4, KesMemoryDefault); 13 13 auto ts = kes_malloc(dev, 8 * 5, 4, KesMemoryDefault); 14 14 15 + auto sem1 = kes_create_semaphore(dev, 0); 16 + auto sem2 = kes_create_semaphore(dev, 0); 17 + 15 18 printf("x: %p (%p) (%llu bytes)\n", x.cpu, x.gpu, x.size); 16 19 printf("y: %p (%p) (%llu bytes)\n", y.cpu, y.gpu, y.size); 17 20 ··· 35 38 kes_cmd_write_timestamp(l2, ts.gpu + 32); 36 39 } 37 40 38 - kes_submit(dma2, l2); 39 - kes_submit(dma1, l1); 41 + kes_submit(dma2, l2, sem1, 1); 42 + kes_submit(dma1, l1, sem2, 1); 40 43 41 44 // @todo: how to wait on cpu for DMA transfer? TODO? 42 45 printf("x[0]: %u\n", ((uint32_t *)x.cpu)[0]);
+8 -1
test/examples/04_hello_timestamp/hello_timestamp.cpp
··· 11 11 auto x = kes_malloc(dev, size, 4, KesMemoryDefault); 12 12 auto y = kes_malloc(dev, 16, 4, KesMemoryDefault); 13 13 14 + auto sem = kes_create_semaphore(dev, 0); 15 + 14 16 printf("x: %p (%p) (%llu bytes)\n", x.cpu, x.gpu, x.size); 15 17 printf("y: %p (%p) (%llu bytes)\n", y.cpu, y.gpu, y.size); 16 18 ··· 21 23 kes_cmd_memset(l1, x.gpu, size, 2); 22 24 kes_cmd_write_timestamp(l1, y.gpu + 8); 23 25 24 - kes_submit(dma, l1); 26 + kes_submit(dma, l1, sem, 1); 27 + 28 + auto r = kes_wait_semaphore(sem, 1); 29 + if (r < 0) { 30 + printf("wait for semaphore failed: %d\n", r); 31 + } 25 32 26 33 // @todo: how to wait on cpu for DMA transfer? TODO? 27 34 printf("x[0]: %u\n", ((uint32_t *)x.cpu)[0]);
+4 -1
test/examples/05_hello_transfer_copy/transfer_copy.cpp
··· 11 11 auto x = kes_malloc(dev, size, 4, KesMemoryDefault); 12 12 auto y = kes_malloc(dev, size, 4, KesMemoryDefault); 13 13 14 + auto sem = kes_create_semaphore(dev, 0); 15 + 14 16 printf("x: %p (%p) (%llu bytes)\n", x.cpu, x.gpu, x.size); 15 17 printf("y: %p (%p) (%llu bytes)\n", y.cpu, y.gpu, y.size); 16 18 ··· 19 21 auto l1 = kes_start_recording(dma); 20 22 kes_cmd_memset(l1, x.gpu, size, 2); 21 23 kes_cmd_memcpy(l1, y.gpu, x.gpu, size); 22 - kes_submit(dma, l1); 24 + 25 + kes_submit(dma, l1, sem, 1); 23 26 24 27 // @todo: hacky bussy-wait 25 28 printf("x[0]: %u\n", ((uint32_t *)x.cpu)[0]);
+5 -3
test/examples/06_hello_sync/hello_sync.cpp
··· 12 12 auto y = kes_malloc(dev, 8, 4, KesMemoryDefault); 13 13 auto ts = kes_malloc(dev, 8 * 4, 4, KesMemoryDefault); 14 14 15 + auto sem1 = kes_create_semaphore(dev, 0); 16 + auto sem2 = kes_create_semaphore(dev, 0); 17 + 15 18 printf("x: %p (%p) (%llu bytes)\n", x.cpu, x.gpu, x.size); 16 19 printf("y: %p (%p) (%llu bytes)\n", y.cpu, y.gpu, y.size); 17 20 ··· 33 36 kes_cmd_write_timestamp(l2, ts.gpu + 24); 34 37 } 35 38 36 - kes_submit(dma, l1); 37 - kes_submit(compute, l2); 39 + kes_submit(dma, l1, sem1, 1); 40 + kes_submit(compute, l2, sem2, 1); 38 41 39 - // @todo: how to wait on cpu for DMA transfer? TODO? 40 42 printf("x[0]: %u\n", ((uint32_t *)x.cpu)[0]); 41 43 sleep(1); 42 44 printf("x[0]: %u\n", ((uint32_t *)x.cpu)[0]);
+7 -6
test/examples/07_hello_dispatch/hello_dispatch.cpp
··· 13 13 auto x = kes_malloc(dev, 1024, 4, KesMemoryDefault); 14 14 auto y = kes_malloc(dev, sizeof(DispatchArguments), 8, KesMemoryDefault); 15 15 16 + auto sem = kes_create_semaphore(dev, 0); 17 + 16 18 printf("x: %p %p\n", (void *)x.cpu, (void *)x.gpu); 17 19 printf("y: %p %p\n", (void *)y.cpu, (void *)y.gpu); 18 20 ··· 26 28 kes_cmd_dispatch(cl, y.gpu, 32, 1, 1); 27 29 } 28 30 29 - kes_submit(compute, cl); 31 + kes_submit(compute, cl, sem, 1); 30 32 31 - sleep(1); 32 - 33 + auto r = kes_wait_semaphore(sem, 1); 34 + if (r < 0) { 35 + printf("wait for semaphore failed: %d\n", r); 36 + } 33 37 printf("x[0]: %u\n", ((uint32_t *)x.cpu)[0]); 34 38 printf("x[1]: %u\n", ((uint32_t *)x.cpu)[1]); 35 39 printf("x[2]: %u\n", ((uint32_t *)x.cpu)[2]); 36 40 printf("x[3]: %u\n", ((uint32_t *)x.cpu)[3]); 37 - 38 - kes_free(dev, &x); 39 - kes_destroy(dev); 40 41 41 42 return 0; 42 43 }