Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

drm/amdkfd: Add MQD manager for GFX 12.1.0

This patch adds the following functionality for GFX 12.1.0:
1. Add a new MQD manager for GFX v12.1.0.
2. Add a new 12.1.0 specific device queue manager file.

Signed-off-by: Mukul Joshi <mukul.joshi@amd.com>
Reviewed-by: Alex Sierra <alex.sierra@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Mukul Joshi and committed by
Alex Deucher
01bbc4a4 1fca2832

+756 -1
+2
drivers/gpu/drm/amd/amdkfd/Makefile
··· 38 38 $(AMDKFD_PATH)/kfd_mqd_manager_v10.o \ 39 39 $(AMDKFD_PATH)/kfd_mqd_manager_v11.o \ 40 40 $(AMDKFD_PATH)/kfd_mqd_manager_v12.o \ 41 + $(AMDKFD_PATH)/kfd_mqd_manager_v12_1.o \ 41 42 $(AMDKFD_PATH)/kfd_kernel_queue.o \ 42 43 $(AMDKFD_PATH)/kfd_packet_manager.o \ 43 44 $(AMDKFD_PATH)/kfd_packet_manager_vi.o \ ··· 51 50 $(AMDKFD_PATH)/kfd_device_queue_manager_v10.o \ 52 51 $(AMDKFD_PATH)/kfd_device_queue_manager_v11.o \ 53 52 $(AMDKFD_PATH)/kfd_device_queue_manager_v12.o \ 53 + $(AMDKFD_PATH)/kfd_device_queue_manager_v12_1.o \ 54 54 $(AMDKFD_PATH)/kfd_interrupt.o \ 55 55 $(AMDKFD_PATH)/kfd_events.o \ 56 56 $(AMDKFD_PATH)/cik_event_interrupt.o \
+3 -1
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
··· 3007 3007 break; 3008 3008 3009 3009 default: 3010 - if (KFD_GC_VERSION(dev) >= IP_VERSION(12, 0, 0)) 3010 + if (KFD_GC_VERSION(dev) >= IP_VERSION(12, 1, 0)) 3011 + device_queue_manager_init_v12_1(&dqm->asic_ops); 3012 + else if (KFD_GC_VERSION(dev) >= IP_VERSION(12, 0, 0)) 3011 3013 device_queue_manager_init_v12(&dqm->asic_ops); 3012 3014 else if (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0)) 3013 3015 device_queue_manager_init_v11(&dqm->asic_ops);
+2
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
··· 299 299 struct device_queue_manager_asic_ops *asic_ops); 300 300 void device_queue_manager_init_v12( 301 301 struct device_queue_manager_asic_ops *asic_ops); 302 + void device_queue_manager_init_v12_1( 303 + struct device_queue_manager_asic_ops *asic_ops); 302 304 void program_sh_mem_settings(struct device_queue_manager *dqm, 303 305 struct qcm_process_device *qpd); 304 306 unsigned int get_cp_queues_num(struct device_queue_manager *dqm);
+90
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12_1.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /* 3 + * Copyright 2025 Advanced Micro Devices, Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice shall be included in 13 + * all copies or substantial portions of the Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 + * OTHER DEALINGS IN THE SOFTWARE. 22 + * 23 + */ 24 + 25 + #include "kfd_device_queue_manager.h" 26 + #include "gc/gc_12_1_0_sh_mask.h" 27 + #include "soc24_enum.h" 28 + 29 + static int update_qpd_v12_1(struct device_queue_manager *dqm, 30 + struct qcm_process_device *qpd); 31 + static void init_sdma_vm_v12_1(struct device_queue_manager *dqm, struct queue *q, 32 + struct qcm_process_device *qpd); 33 + 34 + void device_queue_manager_init_v12_1( 35 + struct device_queue_manager_asic_ops *asic_ops) 36 + { 37 + asic_ops->update_qpd = update_qpd_v12_1; 38 + asic_ops->init_sdma_vm = init_sdma_vm_v12_1; 39 + asic_ops->mqd_manager_init = mqd_manager_init_v12_1; 40 + } 41 + 42 + static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd) 43 + { 44 + uint32_t shared_base = pdd->lds_base >> 48; 45 + uint32_t private_base = pdd->scratch_base >> 58; 46 + 47 + return (shared_base << SH_MEM_BASES__SHARED_BASE__SHIFT) | 48 + (private_base << SH_MEM_BASES__PRIVATE_BASE__SHIFT); 49 + } 50 + 51 + static int update_qpd_v12_1(struct device_queue_manager *dqm, 52 + struct qcm_process_device *qpd) 53 + { 54 + struct kfd_process_device *pdd; 55 + 56 + pdd = qpd_to_pdd(qpd); 57 + 58 + /* check if sh_mem_config register already configured */ 59 + if (qpd->sh_mem_config == 0) { 60 + qpd->sh_mem_config = 61 + (SH_MEM_ALIGNMENT_MODE_UNALIGNED << 62 + SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | 63 + (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT); 64 + 65 + qpd->sh_mem_config |= 66 + (1 << SH_MEM_CONFIG__F8_MODE__SHIFT); 67 + qpd->sh_mem_ape1_limit = 0; 68 + qpd->sh_mem_ape1_base = 0; 69 + } 70 + 71 + if (KFD_SUPPORT_XNACK_PER_PROCESS(dqm->dev)) { 72 + if (!pdd->process->xnack_enabled) 73 + qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT; 74 + else 75 + qpd->sh_mem_config &= ~(1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT); 76 + } 77 + 78 + qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd); 79 + 80 + pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases); 81 + 82 + return 0; 83 + } 84 + 85 + static void init_sdma_vm_v12_1(struct device_queue_manager *dqm, struct queue *q, 86 + struct qcm_process_device *qpd) 87 + { 88 + /* Not needed on SDMAv4 onwards any more */ 89 + q->properties.sdma_vm_addr = 0; 90 + }
+657
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /* 3 + * Copyright 2025 Advanced Micro Devices, Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice shall be included in 13 + * all copies or substantial portions of the Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 + * OTHER DEALINGS IN THE SOFTWARE. 22 + * 23 + */ 24 + 25 + #include <linux/printk.h> 26 + #include <linux/slab.h> 27 + #include <linux/uaccess.h> 28 + #include "kfd_priv.h" 29 + #include "kfd_mqd_manager.h" 30 + #include "v12_structs.h" 31 + #include "gc/gc_12_1_0_sh_mask.h" 32 + #include "amdgpu_amdkfd.h" 33 + #include "kfd_device_queue_manager.h" 34 + 35 + #define MQD_SIZE (2 * PAGE_SIZE) 36 + 37 + static uint64_t mqd_stride_v12_1(struct mqd_manager *mm, 38 + struct queue_properties *q) 39 + { 40 + if (q->type == KFD_QUEUE_TYPE_COMPUTE) 41 + return MQD_SIZE; 42 + else 43 + return PAGE_SIZE; 44 + } 45 + 46 + static inline struct v12_1_compute_mqd *get_mqd(void *mqd) 47 + { 48 + return (struct v12_1_compute_mqd *)mqd; 49 + } 50 + 51 + static inline struct v12_sdma_mqd *get_sdma_mqd(void *mqd) 52 + { 53 + return (struct v12_sdma_mqd *)mqd; 54 + } 55 + 56 + static void update_cu_mask(struct mqd_manager *mm, void *mqd, 57 + struct mqd_update_info *minfo, uint32_t inst) 58 + { 59 + struct v12_1_compute_mqd *m; 60 + uint32_t se_mask[KFD_MAX_NUM_SE] = {0}; 61 + 62 + if (!minfo || !minfo->cu_mask.ptr) 63 + return; 64 + 65 + mqd_symmetrically_map_cu_mask(mm, 66 + minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, inst); 67 + 68 + m = get_mqd(mqd); 69 + m->compute_static_thread_mgmt_se0 = se_mask[0]; 70 + m->compute_static_thread_mgmt_se1 = se_mask[1]; 71 + m->compute_static_thread_mgmt_se2 = se_mask[2]; 72 + m->compute_static_thread_mgmt_se3 = se_mask[3]; 73 + m->compute_static_thread_mgmt_se4 = se_mask[4]; 74 + m->compute_static_thread_mgmt_se5 = se_mask[5]; 75 + m->compute_static_thread_mgmt_se6 = se_mask[6]; 76 + m->compute_static_thread_mgmt_se7 = se_mask[7]; 77 + 78 + pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n", 79 + m->compute_static_thread_mgmt_se0, 80 + m->compute_static_thread_mgmt_se1, 81 + m->compute_static_thread_mgmt_se2, 82 + m->compute_static_thread_mgmt_se3, 83 + m->compute_static_thread_mgmt_se4, 84 + m->compute_static_thread_mgmt_se5, 85 + m->compute_static_thread_mgmt_se6, 86 + m->compute_static_thread_mgmt_se7); 87 + } 88 + 89 + static void set_priority(struct v12_1_compute_mqd *m, struct queue_properties *q) 90 + { 91 + m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; 92 + m->cp_hqd_queue_priority = q->priority; 93 + } 94 + 95 + static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node, 96 + struct queue_properties *q) 97 + { 98 + struct kfd_mem_obj *mqd_mem_obj; 99 + unsigned int size; 100 + 101 + /* 102 + * Allocate two PAGE_SIZE memory for Compute MQD as MES writes to areas beyond 103 + * struct MQD size. Size of the Compute MQD is 1 PAGE_SIZE. 104 + * For SDMA MQD, we allocate 1 Page_size. 105 + */ 106 + if (q->type == KFD_QUEUE_TYPE_COMPUTE) 107 + size = MQD_SIZE * NUM_XCC(node->xcc_mask); 108 + else 109 + size = PAGE_SIZE; 110 + 111 + if (kfd_gtt_sa_allocate(node, size, &mqd_mem_obj)) 112 + return NULL; 113 + 114 + return mqd_mem_obj; 115 + } 116 + 117 + static void init_mqd(struct mqd_manager *mm, void **mqd, 118 + struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, 119 + struct queue_properties *q) 120 + { 121 + uint64_t addr; 122 + struct v12_1_compute_mqd *m; 123 + 124 + m = (struct v12_1_compute_mqd *) mqd_mem_obj->cpu_ptr; 125 + addr = mqd_mem_obj->gpu_addr; 126 + 127 + memset(m, 0, MQD_SIZE); 128 + 129 + m->header = 0xC0310800; 130 + m->compute_pipelinestat_enable = 1; 131 + m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF; 132 + m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; 133 + m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; 134 + m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; 135 + m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF; 136 + m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF; 137 + m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF; 138 + m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF; 139 + m->compute_static_thread_mgmt_se8 = 0xFFFFFFFF; 140 + 141 + m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK | 142 + 0x63 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT; 143 + 144 + m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT; 145 + 146 + m->cp_mqd_base_addr_lo = lower_32_bits(addr); 147 + m->cp_mqd_base_addr_hi = upper_32_bits(addr); 148 + 149 + m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT | 150 + 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT | 151 + 1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT; 152 + 153 + /* Set cp_hqd_hq_status0.c_queue_debug_en to 1 to have the CP set up the 154 + * DISPATCH_PTR. This is required for the kfd debugger 155 + */ 156 + m->cp_hqd_hq_status0 = 1 << 14; 157 + 158 + if (amdgpu_amdkfd_have_atomics_support(mm->dev->adev)) 159 + m->cp_hqd_hq_status0 |= 1 << 29; 160 + 161 + if (q->format == KFD_QUEUE_FORMAT_AQL) { 162 + m->cp_hqd_aql_control = 163 + 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; 164 + } 165 + 166 + if (mm->dev->kfd->cwsr_enabled) { 167 + m->cp_hqd_persistent_state |= 168 + (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT); 169 + m->cp_hqd_ctx_save_base_addr_lo = 170 + lower_32_bits(q->ctx_save_restore_area_address); 171 + m->cp_hqd_ctx_save_base_addr_hi = 172 + upper_32_bits(q->ctx_save_restore_area_address); 173 + m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size; 174 + m->cp_hqd_cntl_stack_size = q->ctl_stack_size; 175 + m->cp_hqd_cntl_stack_offset = q->ctl_stack_size; 176 + m->cp_hqd_wg_state_offset = q->ctl_stack_size; 177 + } 178 + 179 + *mqd = m; 180 + if (gart_addr) 181 + *gart_addr = addr; 182 + mm->update_mqd(mm, m, q, NULL); 183 + } 184 + 185 + static int load_mqd(struct mqd_manager *mm, void *mqd, 186 + uint32_t pipe_id, uint32_t queue_id, 187 + struct queue_properties *p, struct mm_struct *mms) 188 + { 189 + int r = 0; 190 + /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ 191 + uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); 192 + 193 + r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, 194 + (uint32_t __user *)p->write_ptr, 195 + wptr_shift, 0, mms, 0); 196 + return r; 197 + } 198 + 199 + static void update_mqd(struct mqd_manager *mm, void *mqd, 200 + struct queue_properties *q, 201 + struct mqd_update_info *minfo) 202 + { 203 + struct v12_1_compute_mqd *m; 204 + 205 + m = get_mqd(mqd); 206 + 207 + m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; 208 + m->cp_hqd_pq_control |= 209 + ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1; 210 + m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK; 211 + pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control); 212 + 213 + m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); 214 + m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); 215 + 216 + m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); 217 + m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); 218 + m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr); 219 + m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr); 220 + 221 + m->cp_hqd_pq_doorbell_control = 222 + q->doorbell_off << 223 + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; 224 + pr_debug("cp_hqd_pq_doorbell_control 0x%x\n", 225 + m->cp_hqd_pq_doorbell_control); 226 + 227 + m->cp_hqd_ib_control = 1 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT; 228 + 229 + /* 230 + * HW does not clamp this field correctly. Maximum EOP queue size 231 + * is constrained by per-SE EOP done signal count, which is 8-bit. 232 + * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit 233 + * more than (EOP entry count - 1) so a queue size of 0x800 dwords 234 + * is safe, giving a maximum field value of 0xA. 235 + */ 236 + m->cp_hqd_eop_control = min(0xA, 237 + ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1); 238 + m->cp_hqd_eop_base_addr_lo = 239 + lower_32_bits(q->eop_ring_buffer_address >> 8); 240 + m->cp_hqd_eop_base_addr_hi = 241 + upper_32_bits(q->eop_ring_buffer_address >> 8); 242 + 243 + m->cp_hqd_iq_timer = 0; 244 + 245 + m->cp_hqd_vmid = q->vmid; 246 + 247 + if (q->format == KFD_QUEUE_FORMAT_AQL) { 248 + /* GC 10 removed WPP_CLAMP from PQ Control */ 249 + m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK | 250 + 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT | 251 + 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT; 252 + m->cp_hqd_pq_doorbell_control |= 253 + 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT; 254 + } 255 + if (mm->dev->kfd->cwsr_enabled) 256 + m->cp_hqd_ctx_save_control = 0; 257 + 258 + set_priority(m, q); 259 + 260 + q->is_active = QUEUE_IS_ACTIVE(*q); 261 + } 262 + 263 + static bool check_preemption_failed(struct mqd_manager *mm, void *mqd) 264 + { 265 + return false; 266 + } 267 + 268 + static int get_wave_state(struct mqd_manager *mm, void *mqd, 269 + struct queue_properties *q, 270 + void __user *ctl_stack, 271 + u32 *ctl_stack_used_size, 272 + u32 *save_area_used_size) 273 + { 274 + struct v12_1_compute_mqd *m; 275 + struct mqd_user_context_save_area_header header; 276 + 277 + m = get_mqd(mqd); 278 + 279 + /* Control stack is written backwards, while workgroup context data 280 + * is written forwards. Both starts from m->cp_hqd_cntl_stack_size. 281 + * Current position is at m->cp_hqd_cntl_stack_offset and 282 + * m->cp_hqd_wg_state_offset, respectively. 283 + */ 284 + *ctl_stack_used_size = m->cp_hqd_cntl_stack_size - 285 + m->cp_hqd_cntl_stack_offset; 286 + *save_area_used_size = m->cp_hqd_wg_state_offset - 287 + m->cp_hqd_cntl_stack_size; 288 + 289 + /* Control stack is not copied to user mode for GFXv12 because 290 + * it's part of the context save area that is already 291 + * accessible to user mode 292 + */ 293 + header.control_stack_size = *ctl_stack_used_size; 294 + header.wave_state_size = *save_area_used_size; 295 + 296 + header.wave_state_offset = m->cp_hqd_wg_state_offset; 297 + header.control_stack_offset = m->cp_hqd_cntl_stack_offset; 298 + 299 + if (copy_to_user(ctl_stack, &header, sizeof(header))) 300 + return -EFAULT; 301 + 302 + return 0; 303 + } 304 + 305 + static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, 306 + struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, 307 + struct queue_properties *q) 308 + { 309 + struct v12_1_compute_mqd *m; 310 + 311 + init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q); 312 + 313 + m = get_mqd(*mqd); 314 + 315 + m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT | 316 + 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; 317 + } 318 + 319 + static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, 320 + struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, 321 + struct queue_properties *q) 322 + { 323 + struct v12_sdma_mqd *m; 324 + 325 + m = (struct v12_sdma_mqd *) mqd_mem_obj->cpu_ptr; 326 + 327 + memset(m, 0, PAGE_SIZE); 328 + 329 + *mqd = m; 330 + if (gart_addr) 331 + *gart_addr = mqd_mem_obj->gpu_addr; 332 + 333 + mm->update_mqd(mm, m, q, NULL); 334 + } 335 + 336 + #define SDMA_RLC_DUMMY_DEFAULT 0xf 337 + 338 + static void update_mqd_sdma(struct mqd_manager *mm, void *mqd, 339 + struct queue_properties *q, 340 + struct mqd_update_info *minfo) 341 + { 342 + struct v12_sdma_mqd *m; 343 + 344 + m = get_sdma_mqd(mqd); 345 + m->sdmax_rlcx_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1) 346 + << SDMA0_SDMA_QUEUE0_RB_CNTL__RB_SIZE__SHIFT | 347 + q->vmid << SDMA0_SDMA_QUEUE0_RB_CNTL__RB_VMID__SHIFT | 348 + 1 << SDMA0_SDMA_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | 349 + 6 << SDMA0_SDMA_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT | 350 + 1 << SDMA0_SDMA_QUEUE0_RB_CNTL__MCU_WPTR_POLL_ENABLE__SHIFT; 351 + 352 + m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8); 353 + m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8); 354 + m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr); 355 + m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr); 356 + m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr); 357 + m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr); 358 + m->sdmax_rlcx_doorbell_offset = 359 + q->doorbell_off << SDMA0_SDMA_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT; 360 + 361 + m->sdma_engine_id = q->sdma_engine_id; 362 + m->sdma_queue_id = q->sdma_queue_id; 363 + 364 + m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT; 365 + 366 + q->is_active = QUEUE_IS_ACTIVE(*q); 367 + } 368 + 369 + static void get_xcc_mqd(struct kfd_mem_obj *mqd_mem_obj, 370 + struct kfd_mem_obj *xcc_mqd_mem_obj, 371 + uint64_t offset) 372 + { 373 + xcc_mqd_mem_obj->gtt_mem = (offset == 0) ? 374 + mqd_mem_obj->gtt_mem : NULL; 375 + xcc_mqd_mem_obj->gpu_addr = mqd_mem_obj->gpu_addr + offset; 376 + xcc_mqd_mem_obj->cpu_ptr = (uint32_t *)((uintptr_t)mqd_mem_obj->cpu_ptr 377 + + offset); 378 + } 379 + 380 + static void init_mqd_v12_1(struct mqd_manager *mm, void **mqd, 381 + struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, 382 + struct queue_properties *q) 383 + { 384 + struct v12_1_compute_mqd *m; 385 + int xcc = 0; 386 + struct kfd_mem_obj xcc_mqd_mem_obj; 387 + uint64_t xcc_gart_addr = 0; 388 + uint64_t xcc_ctx_save_restore_area_address; 389 + uint64_t offset = mm->mqd_stride(mm, q); 390 + uint32_t local_xcc_start = mm->dev->dqm->current_logical_xcc_start++; 391 + 392 + memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj)); 393 + for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) { 394 + get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset*xcc); 395 + 396 + init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q); 397 + 398 + m->cp_mqd_stride_size = offset; 399 + 400 + /* 401 + * Update the CWSR address for each XCC if CWSR is enabled 402 + * and CWSR area is allocated in thunk 403 + */ 404 + if (mm->dev->kfd->cwsr_enabled && 405 + q->ctx_save_restore_area_address) { 406 + xcc_ctx_save_restore_area_address = 407 + q->ctx_save_restore_area_address + 408 + (xcc * q->ctx_save_restore_area_size); 409 + 410 + m->cp_hqd_ctx_save_base_addr_lo = 411 + lower_32_bits(xcc_ctx_save_restore_area_address); 412 + m->cp_hqd_ctx_save_base_addr_hi = 413 + upper_32_bits(xcc_ctx_save_restore_area_address); 414 + } 415 + 416 + if (q->format == KFD_QUEUE_FORMAT_AQL) { 417 + m->compute_tg_chunk_size = 1; 418 + m->compute_current_logical_xcc_id = 419 + (local_xcc_start + xcc) % 420 + NUM_XCC(mm->dev->xcc_mask); 421 + } else { 422 + /* PM4 Queue */ 423 + m->compute_current_logical_xcc_id = 0; 424 + m->compute_tg_chunk_size = 0; 425 + m->pm4_target_xcc_in_xcp = q->pm4_target_xcc; 426 + } 427 + 428 + if (xcc == 0) { 429 + /* Set the MQD pointer and gart address to XCC0 MQD */ 430 + *mqd = m; 431 + *gart_addr = xcc_gart_addr; 432 + } 433 + } 434 + } 435 + 436 + static void update_mqd_v12_1(struct mqd_manager *mm, void *mqd, 437 + struct queue_properties *q, struct mqd_update_info *minfo) 438 + { 439 + struct v12_1_compute_mqd *m; 440 + int xcc = 0; 441 + uint64_t size = mm->mqd_stride(mm, q); 442 + 443 + for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) { 444 + m = get_mqd(mqd + size * xcc); 445 + update_mqd(mm, m, q, minfo); 446 + 447 + update_cu_mask(mm, m, minfo, xcc); 448 + 449 + if (q->format == KFD_QUEUE_FORMAT_AQL) { 450 + m->compute_tg_chunk_size = 1; 451 + } else { 452 + /* PM4 Queue */ 453 + m->compute_current_logical_xcc_id = 0; 454 + m->compute_tg_chunk_size = 0; 455 + m->pm4_target_xcc_in_xcp = q->pm4_target_xcc; 456 + } 457 + } 458 + } 459 + 460 + static int destroy_mqd_v12_1(struct mqd_manager *mm, void *mqd, 461 + enum kfd_preempt_type type, unsigned int timeout, 462 + uint32_t pipe_id, uint32_t queue_id) 463 + { 464 + uint32_t xcc_mask = mm->dev->xcc_mask; 465 + int xcc_id, err, inst = 0; 466 + void *xcc_mqd; 467 + struct v12_1_compute_mqd *m; 468 + uint64_t mqd_offset; 469 + 470 + m = get_mqd(mqd); 471 + mqd_offset = m->cp_mqd_stride_size; 472 + 473 + for_each_inst(xcc_id, xcc_mask) { 474 + xcc_mqd = mqd + mqd_offset * inst; 475 + err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd, 476 + type, timeout, pipe_id, 477 + queue_id, xcc_id); 478 + if (err) { 479 + pr_debug("Destroy MQD failed for xcc: %d\n", inst); 480 + break; 481 + } 482 + ++inst; 483 + } 484 + 485 + return err; 486 + } 487 + 488 + static int load_mqd_v12_1(struct mqd_manager *mm, void *mqd, 489 + uint32_t pipe_id, uint32_t queue_id, 490 + struct queue_properties *p, struct mm_struct *mms) 491 + { 492 + /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ 493 + uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); 494 + uint32_t xcc_mask = mm->dev->xcc_mask; 495 + int xcc_id, err, inst = 0; 496 + void *xcc_mqd; 497 + uint64_t mqd_stride_size = mm->mqd_stride(mm, p); 498 + 499 + for_each_inst(xcc_id, xcc_mask) { 500 + xcc_mqd = mqd + mqd_stride_size * inst; 501 + err = mm->dev->kfd2kgd->hqd_load( 502 + mm->dev->adev, xcc_mqd, pipe_id, queue_id, 503 + (uint32_t __user *)p->write_ptr, wptr_shift, 0, mms, 504 + xcc_id); 505 + if (err) { 506 + pr_debug("Load MQD failed for xcc: %d\n", inst); 507 + break; 508 + } 509 + ++inst; 510 + } 511 + 512 + return err; 513 + } 514 + 515 + static int get_wave_state_v12_1(struct mqd_manager *mm, void *mqd, 516 + struct queue_properties *q, 517 + void __user *ctl_stack, 518 + u32 *ctl_stack_used_size, 519 + u32 *save_area_used_size) 520 + { 521 + int xcc, err = 0; 522 + void *xcc_mqd; 523 + void __user *xcc_ctl_stack; 524 + uint64_t mqd_stride_size = mm->mqd_stride(mm, q); 525 + u32 tmp_ctl_stack_used_size = 0, tmp_save_area_used_size = 0; 526 + 527 + for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) { 528 + xcc_mqd = mqd + mqd_stride_size * xcc; 529 + xcc_ctl_stack = (void __user *)((uintptr_t)ctl_stack + 530 + q->ctx_save_restore_area_size * xcc); 531 + 532 + err = get_wave_state(mm, xcc_mqd, q, xcc_ctl_stack, 533 + &tmp_ctl_stack_used_size, 534 + &tmp_save_area_used_size); 535 + if (err) 536 + break; 537 + 538 + /* 539 + * Set the ctl_stack_used_size and save_area_used_size to 540 + * ctl_stack_used_size and save_area_used_size of XCC 0 when 541 + * passing the info to user-space. 542 + * For multi XCC, user-space would have to look at the header 543 + * info of each Control stack area to determine the control 544 + * stack size and save area used. 545 + */ 546 + if (xcc == 0) { 547 + *ctl_stack_used_size = tmp_ctl_stack_used_size; 548 + *save_area_used_size = tmp_save_area_used_size; 549 + } 550 + } 551 + 552 + return err; 553 + } 554 + 555 + #if defined(CONFIG_DEBUG_FS) 556 + 557 + static int debugfs_show_mqd(struct seq_file *m, void *data) 558 + { 559 + seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4, 560 + data, sizeof(struct v12_1_compute_mqd), false); 561 + return 0; 562 + } 563 + 564 + static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) 565 + { 566 + seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4, 567 + data, sizeof(struct v12_sdma_mqd), false); 568 + return 0; 569 + } 570 + 571 + #endif 572 + 573 + struct mqd_manager *mqd_manager_init_v12_1(enum KFD_MQD_TYPE type, 574 + struct kfd_node *dev) 575 + { 576 + struct mqd_manager *mqd; 577 + 578 + if (WARN_ON(type >= KFD_MQD_TYPE_MAX)) 579 + return NULL; 580 + 581 + mqd = kzalloc(sizeof(*mqd), GFP_KERNEL); 582 + if (!mqd) 583 + return NULL; 584 + 585 + mqd->dev = dev; 586 + 587 + switch (type) { 588 + case KFD_MQD_TYPE_CP: 589 + pr_debug("%s@%i\n", __func__, __LINE__); 590 + mqd->allocate_mqd = allocate_mqd; 591 + mqd->init_mqd = init_mqd_v12_1; 592 + mqd->free_mqd = kfd_free_mqd_cp; 593 + mqd->load_mqd = load_mqd_v12_1; 594 + mqd->update_mqd = update_mqd_v12_1; 595 + mqd->destroy_mqd = destroy_mqd_v12_1; 596 + mqd->is_occupied = kfd_is_occupied_cp; 597 + mqd->mqd_size = sizeof(struct v12_1_compute_mqd); 598 + mqd->get_wave_state = get_wave_state_v12_1; 599 + mqd->mqd_stride = mqd_stride_v12_1; 600 + #if defined(CONFIG_DEBUG_FS) 601 + mqd->debugfs_show_mqd = debugfs_show_mqd; 602 + #endif 603 + pr_debug("%s@%i\n", __func__, __LINE__); 604 + break; 605 + case KFD_MQD_TYPE_HIQ: 606 + pr_debug("%s@%i\n", __func__, __LINE__); 607 + mqd->allocate_mqd = allocate_hiq_mqd; 608 + mqd->init_mqd = init_mqd_hiq; 609 + mqd->free_mqd = free_mqd_hiq_sdma; 610 + mqd->load_mqd = kfd_hiq_load_mqd_kiq; 611 + mqd->update_mqd = update_mqd; 612 + mqd->destroy_mqd = kfd_destroy_mqd_cp; 613 + mqd->is_occupied = kfd_is_occupied_cp; 614 + mqd->mqd_size = sizeof(struct v12_1_compute_mqd); 615 + mqd->mqd_stride = kfd_mqd_stride; 616 + #if defined(CONFIG_DEBUG_FS) 617 + mqd->debugfs_show_mqd = debugfs_show_mqd; 618 + #endif 619 + mqd->check_preemption_failed = check_preemption_failed; 620 + pr_debug("%s@%i\n", __func__, __LINE__); 621 + break; 622 + case KFD_MQD_TYPE_DIQ: 623 + mqd->allocate_mqd = allocate_mqd; 624 + mqd->init_mqd = init_mqd_hiq; 625 + mqd->free_mqd = kfd_free_mqd_cp; 626 + mqd->load_mqd = load_mqd; 627 + mqd->update_mqd = update_mqd; 628 + mqd->destroy_mqd = kfd_destroy_mqd_cp; 629 + mqd->is_occupied = kfd_is_occupied_cp; 630 + mqd->mqd_size = sizeof(struct v12_1_compute_mqd); 631 + #if defined(CONFIG_DEBUG_FS) 632 + mqd->debugfs_show_mqd = debugfs_show_mqd; 633 + #endif 634 + break; 635 + case KFD_MQD_TYPE_SDMA: 636 + pr_debug("%s@%i\n", __func__, __LINE__); 637 + mqd->allocate_mqd = allocate_mqd; 638 + mqd->init_mqd = init_mqd_sdma; 639 + mqd->free_mqd = kfd_free_mqd_cp; 640 + mqd->load_mqd = kfd_load_mqd_sdma; 641 + mqd->update_mqd = update_mqd_sdma; 642 + mqd->destroy_mqd = kfd_destroy_mqd_sdma; 643 + mqd->is_occupied = kfd_is_occupied_sdma; 644 + mqd->mqd_size = sizeof(struct v12_sdma_mqd); 645 + mqd->mqd_stride = kfd_mqd_stride; 646 + #if defined(CONFIG_DEBUG_FS) 647 + mqd->debugfs_show_mqd = debugfs_show_mqd_sdma; 648 + #endif 649 + pr_debug("%s@%i\n", __func__, __LINE__); 650 + break; 651 + default: 652 + kfree(mqd); 653 + return NULL; 654 + } 655 + 656 + return mqd; 657 + }
+2
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
··· 1353 1353 struct kfd_node *dev); 1354 1354 struct mqd_manager *mqd_manager_init_v12(enum KFD_MQD_TYPE type, 1355 1355 struct kfd_node *dev); 1356 + struct mqd_manager *mqd_manager_init_v12_1(enum KFD_MQD_TYPE type, 1357 + struct kfd_node *dev); 1356 1358 struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev); 1357 1359 void device_queue_manager_uninit(struct device_queue_manager *dqm); 1358 1360 struct kernel_queue *kernel_queue_init(struct kfd_node *dev,