Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/delay.h>
25#include <linux/firmware.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28
29#include "amdgpu.h"
30#include "amdgpu_xcp.h"
31#include "amdgpu_ucode.h"
32#include "amdgpu_trace.h"
33#include "amdgpu_reset.h"
34
35#include "sdma/sdma_4_4_2_offset.h"
36#include "sdma/sdma_4_4_2_sh_mask.h"
37
38#include "soc15_common.h"
39#include "soc15.h"
40#include "vega10_sdma_pkt_open.h"
41
42#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
43#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
44
45#include "amdgpu_ras.h"
46
47MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin");
48MODULE_FIRMWARE("amdgpu/sdma_4_4_4.bin");
49MODULE_FIRMWARE("amdgpu/sdma_4_4_5.bin");
50
51static const struct amdgpu_hwip_reg_entry sdma_reg_list_4_4_2[] = {
52 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS_REG),
53 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS1_REG),
54 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS2_REG),
55 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS3_REG),
56 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UCODE_CHECKSUM),
57 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RB_RPTR_FETCH_HI),
58 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RB_RPTR_FETCH),
59 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_RD_STATUS),
60 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_WR_STATUS),
61 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_RD_XNACK0),
62 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_RD_XNACK1),
63 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_WR_XNACK0),
64 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_WR_XNACK1),
65 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_CNTL),
66 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_RPTR),
67 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_RPTR_HI),
68 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_WPTR),
69 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_WPTR_HI),
70 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_OFFSET),
71 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_BASE_LO),
72 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_BASE_HI),
73 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_CNTL),
74 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_RPTR),
75 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_SUB_REMAIN),
76 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_DUMMY_REG),
77 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_CNTL),
78 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_RPTR),
79 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_RPTR_HI),
80 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_WPTR),
81 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_WPTR_HI),
82 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_IB_OFFSET),
83 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_IB_BASE_LO),
84 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_IB_BASE_HI),
85 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_DUMMY_REG),
86 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_CNTL),
87 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_RPTR),
88 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_RPTR_HI),
89 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_WPTR),
90 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_WPTR_HI),
91 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_IB_OFFSET),
92 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_IB_BASE_LO),
93 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_IB_BASE_HI),
94 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_DUMMY_REG),
95 SOC15_REG_ENTRY_STR(GC, 0, regSDMA_VM_CNTL)
96};
97
98#define mmSMNAID_AID0_MCA_SMU 0x03b30400
99
100#define WREG32_SDMA(instance, offset, value) \
101 WREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset)), value)
102#define RREG32_SDMA(instance, offset) \
103 RREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset)))
104
105static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev);
106static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev);
107static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev);
108static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev);
109static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev);
110static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring);
111static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring);
112static int sdma_v4_4_2_soft_reset_engine(struct amdgpu_device *adev,
113 u32 instance_id);
114
115static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev,
116 u32 instance, u32 offset)
117{
118 u32 dev_inst = GET_INST(SDMA0, instance);
119
120 return (adev->reg_offset[SDMA0_HWIP][dev_inst][0] + offset);
121}
122
123static unsigned sdma_v4_4_2_seq_to_irq_id(int seq_num)
124{
125 switch (seq_num) {
126 case 0:
127 return SOC15_IH_CLIENTID_SDMA0;
128 case 1:
129 return SOC15_IH_CLIENTID_SDMA1;
130 case 2:
131 return SOC15_IH_CLIENTID_SDMA2;
132 case 3:
133 return SOC15_IH_CLIENTID_SDMA3;
134 default:
135 return -EINVAL;
136 }
137}
138
139static int sdma_v4_4_2_irq_id_to_seq(struct amdgpu_device *adev, unsigned client_id)
140{
141 switch (client_id) {
142 case SOC15_IH_CLIENTID_SDMA0:
143 return 0;
144 case SOC15_IH_CLIENTID_SDMA1:
145 return 1;
146 case SOC15_IH_CLIENTID_SDMA2:
147 if (amdgpu_sriov_vf(adev) && (adev->gfx.xcc_mask == 0x1))
148 return 0;
149 else
150 return 2;
151 case SOC15_IH_CLIENTID_SDMA3:
152 if (amdgpu_sriov_vf(adev) && (adev->gfx.xcc_mask == 0x1))
153 return 1;
154 else
155 return 3;
156 default:
157 return -EINVAL;
158 }
159}
160
161static void sdma_v4_4_2_inst_init_golden_registers(struct amdgpu_device *adev,
162 uint32_t inst_mask)
163{
164 u32 val;
165 int i;
166
167 for (i = 0; i < adev->sdma.num_instances; i++) {
168 val = RREG32_SDMA(i, regSDMA_GB_ADDR_CONFIG);
169 val = REG_SET_FIELD(val, SDMA_GB_ADDR_CONFIG, NUM_BANKS, 4);
170 val = REG_SET_FIELD(val, SDMA_GB_ADDR_CONFIG,
171 PIPE_INTERLEAVE_SIZE, 0);
172 WREG32_SDMA(i, regSDMA_GB_ADDR_CONFIG, val);
173
174 val = RREG32_SDMA(i, regSDMA_GB_ADDR_CONFIG_READ);
175 val = REG_SET_FIELD(val, SDMA_GB_ADDR_CONFIG_READ, NUM_BANKS,
176 4);
177 val = REG_SET_FIELD(val, SDMA_GB_ADDR_CONFIG_READ,
178 PIPE_INTERLEAVE_SIZE, 0);
179 WREG32_SDMA(i, regSDMA_GB_ADDR_CONFIG_READ, val);
180 }
181}
182
183/**
184 * sdma_v4_4_2_init_microcode - load ucode images from disk
185 *
186 * @adev: amdgpu_device pointer
187 *
188 * Use the firmware interface to load the ucode images into
189 * the driver (not loaded into hw).
190 * Returns 0 on success, error on failure.
191 */
192static int sdma_v4_4_2_init_microcode(struct amdgpu_device *adev)
193{
194 int ret, i;
195
196 for (i = 0; i < adev->sdma.num_instances; i++) {
197 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2) ||
198 amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 4) ||
199 amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 5)) {
200 ret = amdgpu_sdma_init_microcode(adev, 0, true);
201 break;
202 } else {
203 ret = amdgpu_sdma_init_microcode(adev, i, false);
204 if (ret)
205 return ret;
206 }
207 }
208
209 return ret;
210}
211
212/**
213 * sdma_v4_4_2_ring_get_rptr - get the current read pointer
214 *
215 * @ring: amdgpu ring pointer
216 *
217 * Get the current rptr from the hardware.
218 */
219static uint64_t sdma_v4_4_2_ring_get_rptr(struct amdgpu_ring *ring)
220{
221 u64 rptr;
222
223 /* XXX check if swapping is necessary on BE */
224 rptr = READ_ONCE(*((u64 *)&ring->adev->wb.wb[ring->rptr_offs]));
225
226 DRM_DEBUG("rptr before shift == 0x%016llx\n", rptr);
227 return rptr >> 2;
228}
229
230/**
231 * sdma_v4_4_2_ring_get_wptr - get the current write pointer
232 *
233 * @ring: amdgpu ring pointer
234 *
235 * Get the current wptr from the hardware.
236 */
237static uint64_t sdma_v4_4_2_ring_get_wptr(struct amdgpu_ring *ring)
238{
239 struct amdgpu_device *adev = ring->adev;
240 u64 wptr;
241
242 if (ring->use_doorbell) {
243 /* XXX check if swapping is necessary on BE */
244 wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
245 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
246 } else {
247 wptr = RREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR_HI);
248 wptr = wptr << 32;
249 wptr |= RREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR);
250 DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n",
251 ring->me, wptr);
252 }
253
254 return wptr >> 2;
255}
256
257/**
258 * sdma_v4_4_2_ring_set_wptr - commit the write pointer
259 *
260 * @ring: amdgpu ring pointer
261 *
262 * Write the wptr back to the hardware.
263 */
264static void sdma_v4_4_2_ring_set_wptr(struct amdgpu_ring *ring)
265{
266 struct amdgpu_device *adev = ring->adev;
267
268 DRM_DEBUG("Setting write pointer\n");
269 if (ring->use_doorbell) {
270 u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
271
272 DRM_DEBUG("Using doorbell -- "
273 "wptr_offs == 0x%08x "
274 "lower_32_bits(ring->wptr) << 2 == 0x%08x "
275 "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
276 ring->wptr_offs,
277 lower_32_bits(ring->wptr << 2),
278 upper_32_bits(ring->wptr << 2));
279 /* XXX check if swapping is necessary on BE */
280 WRITE_ONCE(*wb, (ring->wptr << 2));
281 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
282 ring->doorbell_index, ring->wptr << 2);
283 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
284 } else {
285 DRM_DEBUG("Not using doorbell -- "
286 "regSDMA%i_GFX_RB_WPTR == 0x%08x "
287 "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
288 ring->me,
289 lower_32_bits(ring->wptr << 2),
290 ring->me,
291 upper_32_bits(ring->wptr << 2));
292 WREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR,
293 lower_32_bits(ring->wptr << 2));
294 WREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR_HI,
295 upper_32_bits(ring->wptr << 2));
296 }
297}
298
299/**
300 * sdma_v4_4_2_page_ring_get_wptr - get the current write pointer
301 *
302 * @ring: amdgpu ring pointer
303 *
304 * Get the current wptr from the hardware.
305 */
306static uint64_t sdma_v4_4_2_page_ring_get_wptr(struct amdgpu_ring *ring)
307{
308 struct amdgpu_device *adev = ring->adev;
309 u64 wptr;
310
311 if (ring->use_doorbell) {
312 /* XXX check if swapping is necessary on BE */
313 wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
314 } else {
315 wptr = RREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR_HI);
316 wptr = wptr << 32;
317 wptr |= RREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR);
318 }
319
320 return wptr >> 2;
321}
322
323/**
324 * sdma_v4_4_2_page_ring_set_wptr - commit the write pointer
325 *
326 * @ring: amdgpu ring pointer
327 *
328 * Write the wptr back to the hardware.
329 */
330static void sdma_v4_4_2_page_ring_set_wptr(struct amdgpu_ring *ring)
331{
332 struct amdgpu_device *adev = ring->adev;
333
334 if (ring->use_doorbell) {
335 u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
336
337 /* XXX check if swapping is necessary on BE */
338 WRITE_ONCE(*wb, (ring->wptr << 2));
339 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
340 } else {
341 uint64_t wptr = ring->wptr << 2;
342
343 WREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR,
344 lower_32_bits(wptr));
345 WREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR_HI,
346 upper_32_bits(wptr));
347 }
348}
349
350static void sdma_v4_4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
351{
352 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
353 int i;
354
355 for (i = 0; i < count; i++)
356 if (sdma && sdma->burst_nop && (i == 0))
357 amdgpu_ring_write(ring, ring->funcs->nop |
358 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
359 else
360 amdgpu_ring_write(ring, ring->funcs->nop);
361}
362
363/**
364 * sdma_v4_4_2_ring_emit_ib - Schedule an IB on the DMA engine
365 *
366 * @ring: amdgpu ring pointer
367 * @job: job to retrieve vmid from
368 * @ib: IB object to schedule
369 * @flags: unused
370 *
371 * Schedule an IB in the DMA ring.
372 */
373static void sdma_v4_4_2_ring_emit_ib(struct amdgpu_ring *ring,
374 struct amdgpu_job *job,
375 struct amdgpu_ib *ib,
376 uint32_t flags)
377{
378 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
379
380 /* IB packet must end on a 8 DW boundary */
381 sdma_v4_4_2_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
382
383 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
384 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
385 /* base must be 32 byte aligned */
386 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
387 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
388 amdgpu_ring_write(ring, ib->length_dw);
389 amdgpu_ring_write(ring, 0);
390 amdgpu_ring_write(ring, 0);
391
392}
393
394static void sdma_v4_4_2_wait_reg_mem(struct amdgpu_ring *ring,
395 int mem_space, int hdp,
396 uint32_t addr0, uint32_t addr1,
397 uint32_t ref, uint32_t mask,
398 uint32_t inv)
399{
400 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
401 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(hdp) |
402 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(mem_space) |
403 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
404 if (mem_space) {
405 /* memory */
406 amdgpu_ring_write(ring, addr0);
407 amdgpu_ring_write(ring, addr1);
408 } else {
409 /* registers */
410 amdgpu_ring_write(ring, addr0 << 2);
411 amdgpu_ring_write(ring, addr1 << 2);
412 }
413 amdgpu_ring_write(ring, ref); /* reference */
414 amdgpu_ring_write(ring, mask); /* mask */
415 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
416 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(inv)); /* retry count, poll interval */
417}
418
419/**
420 * sdma_v4_4_2_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
421 *
422 * @ring: amdgpu ring pointer
423 *
424 * Emit an hdp flush packet on the requested DMA ring.
425 */
426static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
427{
428 struct amdgpu_device *adev = ring->adev;
429 u32 ref_and_mask = 0;
430 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
431
432 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0
433 << (ring->me % adev->sdma.num_inst_per_aid);
434
435 sdma_v4_4_2_wait_reg_mem(ring, 0, 1,
436 adev->nbio.funcs->get_hdp_flush_done_offset(adev),
437 adev->nbio.funcs->get_hdp_flush_req_offset(adev),
438 ref_and_mask, ref_and_mask, 10);
439}
440
441/**
442 * sdma_v4_4_2_ring_emit_fence - emit a fence on the DMA ring
443 *
444 * @ring: amdgpu ring pointer
445 * @addr: address
446 * @seq: sequence number
447 * @flags: fence related flags
448 *
449 * Add a DMA fence packet to the ring to write
450 * the fence seq number and DMA trap packet to generate
451 * an interrupt if needed.
452 */
453static void sdma_v4_4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
454 unsigned flags)
455{
456 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
457 /* write the fence */
458 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
459 /* zero in first two bits */
460 BUG_ON(addr & 0x3);
461 amdgpu_ring_write(ring, lower_32_bits(addr));
462 amdgpu_ring_write(ring, upper_32_bits(addr));
463 amdgpu_ring_write(ring, lower_32_bits(seq));
464
465 /* optionally write high bits as well */
466 if (write64bit) {
467 addr += 4;
468 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
469 /* zero in first two bits */
470 BUG_ON(addr & 0x3);
471 amdgpu_ring_write(ring, lower_32_bits(addr));
472 amdgpu_ring_write(ring, upper_32_bits(addr));
473 amdgpu_ring_write(ring, upper_32_bits(seq));
474 }
475
476 /* generate an interrupt */
477 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
478 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
479}
480
481
482/**
483 * sdma_v4_4_2_inst_gfx_stop - stop the gfx async dma engines
484 *
485 * @adev: amdgpu_device pointer
486 * @inst_mask: mask of dma engine instances to be disabled
487 *
488 * Stop the gfx async dma ring buffers.
489 */
490static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
491 uint32_t inst_mask)
492{
493 struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
494 u32 doorbell_offset, doorbell;
495 u32 rb_cntl, ib_cntl, sdma_cntl;
496 int i;
497
498 for_each_inst(i, inst_mask) {
499 sdma[i] = &adev->sdma.instance[i].ring;
500
501 rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
502 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 0);
503 WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
504 ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
505 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0);
506 WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
507 sdma_cntl = RREG32_SDMA(i, regSDMA_CNTL);
508 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, UTC_L1_ENABLE, 0);
509 WREG32_SDMA(i, regSDMA_CNTL, sdma_cntl);
510
511 if (sdma[i]->use_doorbell) {
512 doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
513 doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET);
514
515 doorbell = REG_SET_FIELD(doorbell, SDMA_GFX_DOORBELL, ENABLE, 0);
516 doorbell_offset = REG_SET_FIELD(doorbell_offset,
517 SDMA_GFX_DOORBELL_OFFSET,
518 OFFSET, 0);
519 WREG32_SDMA(i, regSDMA_GFX_DOORBELL, doorbell);
520 WREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET, doorbell_offset);
521 }
522 }
523}
524
525/**
526 * sdma_v4_4_2_inst_rlc_stop - stop the compute async dma engines
527 *
528 * @adev: amdgpu_device pointer
529 * @inst_mask: mask of dma engine instances to be disabled
530 *
531 * Stop the compute async dma queues.
532 */
533static void sdma_v4_4_2_inst_rlc_stop(struct amdgpu_device *adev,
534 uint32_t inst_mask)
535{
536 /* XXX todo */
537}
538
539/**
540 * sdma_v4_4_2_inst_page_stop - stop the page async dma engines
541 *
542 * @adev: amdgpu_device pointer
543 * @inst_mask: mask of dma engine instances to be disabled
544 *
545 * Stop the page async dma ring buffers.
546 */
547static void sdma_v4_4_2_inst_page_stop(struct amdgpu_device *adev,
548 uint32_t inst_mask)
549{
550 u32 rb_cntl, ib_cntl;
551 int i;
552
553 for_each_inst(i, inst_mask) {
554 rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
555 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
556 RB_ENABLE, 0);
557 WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
558 ib_cntl = RREG32_SDMA(i, regSDMA_PAGE_IB_CNTL);
559 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL,
560 IB_ENABLE, 0);
561 WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl);
562 }
563}
564
565/**
566 * sdma_v4_4_2_inst_ctx_switch_enable - stop the async dma engines context switch
567 *
568 * @adev: amdgpu_device pointer
569 * @enable: enable/disable the DMA MEs context switch.
570 * @inst_mask: mask of dma engine instances to be enabled
571 *
572 * Halt or unhalt the async dma engines context switch.
573 */
574static void sdma_v4_4_2_inst_ctx_switch_enable(struct amdgpu_device *adev,
575 bool enable, uint32_t inst_mask)
576{
577 u32 f32_cntl, phase_quantum = 0;
578 int i;
579
580 if (amdgpu_sdma_phase_quantum) {
581 unsigned value = amdgpu_sdma_phase_quantum;
582 unsigned unit = 0;
583
584 while (value > (SDMA_PHASE0_QUANTUM__VALUE_MASK >>
585 SDMA_PHASE0_QUANTUM__VALUE__SHIFT)) {
586 value = (value + 1) >> 1;
587 unit++;
588 }
589 if (unit > (SDMA_PHASE0_QUANTUM__UNIT_MASK >>
590 SDMA_PHASE0_QUANTUM__UNIT__SHIFT)) {
591 value = (SDMA_PHASE0_QUANTUM__VALUE_MASK >>
592 SDMA_PHASE0_QUANTUM__VALUE__SHIFT);
593 unit = (SDMA_PHASE0_QUANTUM__UNIT_MASK >>
594 SDMA_PHASE0_QUANTUM__UNIT__SHIFT);
595 WARN_ONCE(1,
596 "clamping sdma_phase_quantum to %uK clock cycles\n",
597 value << unit);
598 }
599 phase_quantum =
600 value << SDMA_PHASE0_QUANTUM__VALUE__SHIFT |
601 unit << SDMA_PHASE0_QUANTUM__UNIT__SHIFT;
602 }
603
604 for_each_inst(i, inst_mask) {
605 f32_cntl = RREG32_SDMA(i, regSDMA_CNTL);
606 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_CNTL,
607 AUTO_CTXSW_ENABLE, enable ? 1 : 0);
608 if (enable && amdgpu_sdma_phase_quantum) {
609 WREG32_SDMA(i, regSDMA_PHASE0_QUANTUM, phase_quantum);
610 WREG32_SDMA(i, regSDMA_PHASE1_QUANTUM, phase_quantum);
611 WREG32_SDMA(i, regSDMA_PHASE2_QUANTUM, phase_quantum);
612 }
613 WREG32_SDMA(i, regSDMA_CNTL, f32_cntl);
614
615 /* Extend page fault timeout to avoid interrupt storm */
616 WREG32_SDMA(i, regSDMA_UTCL1_TIMEOUT, 0x00800080);
617 }
618}
619
620/**
621 * sdma_v4_4_2_inst_enable - stop the async dma engines
622 *
623 * @adev: amdgpu_device pointer
624 * @enable: enable/disable the DMA MEs.
625 * @inst_mask: mask of dma engine instances to be enabled
626 *
627 * Halt or unhalt the async dma engines.
628 */
629static void sdma_v4_4_2_inst_enable(struct amdgpu_device *adev, bool enable,
630 uint32_t inst_mask)
631{
632 u32 f32_cntl;
633 int i;
634
635 if (!enable) {
636 sdma_v4_4_2_inst_gfx_stop(adev, inst_mask);
637 sdma_v4_4_2_inst_rlc_stop(adev, inst_mask);
638 if (adev->sdma.has_page_queue)
639 sdma_v4_4_2_inst_page_stop(adev, inst_mask);
640
641 /* SDMA FW needs to respond to FREEZE requests during reset.
642 * Keep it running during reset */
643 if (!amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
644 return;
645 }
646
647 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
648 return;
649
650 for_each_inst(i, inst_mask) {
651 f32_cntl = RREG32_SDMA(i, regSDMA_F32_CNTL);
652 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_F32_CNTL, HALT, enable ? 0 : 1);
653 WREG32_SDMA(i, regSDMA_F32_CNTL, f32_cntl);
654 }
655}
656
657/*
658 * sdma_v4_4_2_rb_cntl - get parameters for rb_cntl
659 */
660static uint32_t sdma_v4_4_2_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
661{
662 /* Set ring buffer size in dwords */
663 uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
664
665 barrier(); /* work around https://llvm.org/pr42576 */
666 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
667#ifdef __BIG_ENDIAN
668 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
669 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL,
670 RPTR_WRITEBACK_SWAP_ENABLE, 1);
671#endif
672 return rb_cntl;
673}
674
675/**
676 * sdma_v4_4_2_gfx_resume - setup and start the async dma engines
677 *
678 * @adev: amdgpu_device pointer
679 * @i: instance to resume
680 * @restore: used to restore wptr when restart
681 *
682 * Set up the gfx DMA ring buffers and enable them.
683 * Returns 0 for success, error for failure.
684 */
685static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i, bool restore)
686{
687 struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
688 u32 rb_cntl, ib_cntl, wptr_poll_cntl;
689 u32 wb_offset;
690 u32 doorbell;
691 u32 doorbell_offset;
692 u64 wptr_gpu_addr;
693 u64 rwptr;
694
695 wb_offset = (ring->rptr_offs * 4);
696
697 rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
698 rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl);
699 WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
700
701 /* set the wb address whether it's enabled or not */
702 WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_ADDR_HI,
703 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
704 WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_ADDR_LO,
705 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
706
707 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL,
708 RPTR_WRITEBACK_ENABLE, 1);
709
710 WREG32_SDMA(i, regSDMA_GFX_RB_BASE, ring->gpu_addr >> 8);
711 WREG32_SDMA(i, regSDMA_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
712
713 if (!restore)
714 ring->wptr = 0;
715
716 /* before programing wptr to a less value, need set minor_ptr_update first */
717 WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 1);
718
719 /* For the guilty queue, set RPTR to the current wptr to skip bad commands,
720 * It is not a guilty queue, restore cache_rptr and continue execution.
721 */
722 if (adev->sdma.instance[i].gfx_guilty)
723 rwptr = ring->wptr;
724 else
725 rwptr = ring->cached_rptr;
726
727 /* Initialize the ring buffer's read and write pointers */
728 if (restore) {
729 WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, lower_32_bits(rwptr << 2));
730 WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, upper_32_bits(rwptr << 2));
731 WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, lower_32_bits(rwptr << 2));
732 WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, upper_32_bits(rwptr << 2));
733 } else {
734 WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0);
735 WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0);
736 WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0);
737 WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0);
738 }
739
740 doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
741 doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET);
742
743 doorbell = REG_SET_FIELD(doorbell, SDMA_GFX_DOORBELL, ENABLE,
744 ring->use_doorbell);
745 doorbell_offset = REG_SET_FIELD(doorbell_offset,
746 SDMA_GFX_DOORBELL_OFFSET,
747 OFFSET, ring->doorbell_index);
748 WREG32_SDMA(i, regSDMA_GFX_DOORBELL, doorbell);
749 WREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET, doorbell_offset);
750
751 sdma_v4_4_2_ring_set_wptr(ring);
752
753 /* set minor_ptr_update to 0 after wptr programed */
754 WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 0);
755
756 /* setup the wptr shadow polling */
757 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
758 WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_ADDR_LO,
759 lower_32_bits(wptr_gpu_addr));
760 WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_ADDR_HI,
761 upper_32_bits(wptr_gpu_addr));
762 wptr_poll_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_CNTL);
763 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
764 SDMA_GFX_RB_WPTR_POLL_CNTL,
765 F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
766 WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
767
768 /* enable DMA RB */
769 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 1);
770 WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
771
772 ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
773 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 1);
774#ifdef __BIG_ENDIAN
775 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
776#endif
777 /* enable DMA IBs */
778 WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
779}
780
781/**
782 * sdma_v4_4_2_page_resume - setup and start the async dma engines
783 *
784 * @adev: amdgpu_device pointer
785 * @i: instance to resume
786 * @restore: boolean to say restore needed or not
787 *
788 * Set up the page DMA ring buffers and enable them.
789 * Returns 0 for success, error for failure.
790 */
791static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i, bool restore)
792{
793 struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
794 u32 rb_cntl, ib_cntl, wptr_poll_cntl;
795 u32 wb_offset;
796 u32 doorbell;
797 u32 doorbell_offset;
798 u64 wptr_gpu_addr;
799 u64 rwptr;
800
801 wb_offset = (ring->rptr_offs * 4);
802
803 rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
804 rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl);
805 WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
806
807 /* For the guilty queue, set RPTR to the current wptr to skip bad commands,
808 * It is not a guilty queue, restore cache_rptr and continue execution.
809 */
810 if (adev->sdma.instance[i].page_guilty)
811 rwptr = ring->wptr;
812 else
813 rwptr = ring->cached_rptr;
814
815 /* Initialize the ring buffer's read and write pointers */
816 if (restore) {
817 WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, lower_32_bits(rwptr << 2));
818 WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, upper_32_bits(rwptr << 2));
819 WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, lower_32_bits(rwptr << 2));
820 WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, upper_32_bits(rwptr << 2));
821 } else {
822 WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, 0);
823 WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, 0);
824 WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, 0);
825 WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, 0);
826 }
827
828 /* set the wb address whether it's enabled or not */
829 WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_HI,
830 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
831 WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_LO,
832 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
833
834 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
835 RPTR_WRITEBACK_ENABLE, 1);
836
837 WREG32_SDMA(i, regSDMA_PAGE_RB_BASE, ring->gpu_addr >> 8);
838 WREG32_SDMA(i, regSDMA_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
839
840 if (!restore)
841 ring->wptr = 0;
842
843 /* before programing wptr to a less value, need set minor_ptr_update first */
844 WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 1);
845
846 doorbell = RREG32_SDMA(i, regSDMA_PAGE_DOORBELL);
847 doorbell_offset = RREG32_SDMA(i, regSDMA_PAGE_DOORBELL_OFFSET);
848
849 doorbell = REG_SET_FIELD(doorbell, SDMA_PAGE_DOORBELL, ENABLE,
850 ring->use_doorbell);
851 doorbell_offset = REG_SET_FIELD(doorbell_offset,
852 SDMA_PAGE_DOORBELL_OFFSET,
853 OFFSET, ring->doorbell_index);
854 WREG32_SDMA(i, regSDMA_PAGE_DOORBELL, doorbell);
855 WREG32_SDMA(i, regSDMA_PAGE_DOORBELL_OFFSET, doorbell_offset);
856
857 /* paging queue doorbell range is setup at sdma_v4_4_2_gfx_resume */
858 sdma_v4_4_2_page_ring_set_wptr(ring);
859
860 /* set minor_ptr_update to 0 after wptr programed */
861 WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 0);
862
863 /* setup the wptr shadow polling */
864 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
865 WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_ADDR_LO,
866 lower_32_bits(wptr_gpu_addr));
867 WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_ADDR_HI,
868 upper_32_bits(wptr_gpu_addr));
869 wptr_poll_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_CNTL);
870 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
871 SDMA_PAGE_RB_WPTR_POLL_CNTL,
872 F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
873 WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
874
875 /* enable DMA RB */
876 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL, RB_ENABLE, 1);
877 WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
878
879 ib_cntl = RREG32_SDMA(i, regSDMA_PAGE_IB_CNTL);
880 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL, IB_ENABLE, 1);
881#ifdef __BIG_ENDIAN
882 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1);
883#endif
884 /* enable DMA IBs */
885 WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl);
886}
887
888static void sdma_v4_4_2_init_pg(struct amdgpu_device *adev)
889{
890
891}
892
893/**
894 * sdma_v4_4_2_inst_rlc_resume - setup and start the async dma engines
895 *
896 * @adev: amdgpu_device pointer
897 * @inst_mask: mask of dma engine instances to be enabled
898 *
899 * Set up the compute DMA queues and enable them.
900 * Returns 0 for success, error for failure.
901 */
902static int sdma_v4_4_2_inst_rlc_resume(struct amdgpu_device *adev,
903 uint32_t inst_mask)
904{
905 sdma_v4_4_2_init_pg(adev);
906
907 return 0;
908}
909
910/**
911 * sdma_v4_4_2_inst_load_microcode - load the sDMA ME ucode
912 *
913 * @adev: amdgpu_device pointer
914 * @inst_mask: mask of dma engine instances to be enabled
915 *
916 * Loads the sDMA0/1 ucode.
917 * Returns 0 for success, -EINVAL if the ucode is not available.
918 */
919static int sdma_v4_4_2_inst_load_microcode(struct amdgpu_device *adev,
920 uint32_t inst_mask)
921{
922 const struct sdma_firmware_header_v1_0 *hdr;
923 const __le32 *fw_data;
924 u32 fw_size;
925 int i, j;
926
927 /* halt the MEs */
928 sdma_v4_4_2_inst_enable(adev, false, inst_mask);
929
930 for_each_inst(i, inst_mask) {
931 if (!adev->sdma.instance[i].fw)
932 return -EINVAL;
933
934 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
935 amdgpu_ucode_print_sdma_hdr(&hdr->header);
936 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
937
938 fw_data = (const __le32 *)
939 (adev->sdma.instance[i].fw->data +
940 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
941
942 WREG32_SDMA(i, regSDMA_UCODE_ADDR, 0);
943
944 for (j = 0; j < fw_size; j++)
945 WREG32_SDMA(i, regSDMA_UCODE_DATA,
946 le32_to_cpup(fw_data++));
947
948 WREG32_SDMA(i, regSDMA_UCODE_ADDR,
949 adev->sdma.instance[i].fw_version);
950 }
951
952 return 0;
953}
954
955/**
956 * sdma_v4_4_2_inst_start - setup and start the async dma engines
957 *
958 * @adev: amdgpu_device pointer
959 * @inst_mask: mask of dma engine instances to be enabled
960 * @restore: boolean to say restore needed or not
961 *
962 * Set up the DMA engines and enable them.
963 * Returns 0 for success, error for failure.
964 */
965static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
966 uint32_t inst_mask, bool restore)
967{
968 struct amdgpu_ring *ring;
969 uint32_t tmp_mask;
970 int i, r = 0;
971
972 if (amdgpu_sriov_vf(adev)) {
973 sdma_v4_4_2_inst_ctx_switch_enable(adev, false, inst_mask);
974 sdma_v4_4_2_inst_enable(adev, false, inst_mask);
975 } else {
976 /* bypass sdma microcode loading on Gopher */
977 if (!restore && adev->firmware.load_type != AMDGPU_FW_LOAD_PSP &&
978 adev->sdma.instance[0].fw) {
979 r = sdma_v4_4_2_inst_load_microcode(adev, inst_mask);
980 if (r)
981 return r;
982 }
983
984 /* unhalt the MEs */
985 sdma_v4_4_2_inst_enable(adev, true, inst_mask);
986 /* enable sdma ring preemption */
987 sdma_v4_4_2_inst_ctx_switch_enable(adev, true, inst_mask);
988 }
989
990 /* start the gfx rings and rlc compute queues */
991 tmp_mask = inst_mask;
992 for_each_inst(i, tmp_mask) {
993 uint32_t temp;
994
995 WREG32_SDMA(i, regSDMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
996 sdma_v4_4_2_gfx_resume(adev, i, restore);
997 if (adev->sdma.has_page_queue)
998 sdma_v4_4_2_page_resume(adev, i, restore);
999
1000 /* set utc l1 enable flag always to 1 */
1001 temp = RREG32_SDMA(i, regSDMA_CNTL);
1002 temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1);
1003 WREG32_SDMA(i, regSDMA_CNTL, temp);
1004
1005 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < IP_VERSION(4, 4, 5)) {
1006 /* enable context empty interrupt during initialization */
1007 temp = REG_SET_FIELD(temp, SDMA_CNTL, CTXEMPTY_INT_ENABLE, 1);
1008 WREG32_SDMA(i, regSDMA_CNTL, temp);
1009 }
1010 if (!amdgpu_sriov_vf(adev)) {
1011 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1012 /* unhalt engine */
1013 temp = RREG32_SDMA(i, regSDMA_F32_CNTL);
1014 temp = REG_SET_FIELD(temp, SDMA_F32_CNTL, HALT, 0);
1015 WREG32_SDMA(i, regSDMA_F32_CNTL, temp);
1016 }
1017 }
1018 }
1019
1020 if (amdgpu_sriov_vf(adev)) {
1021 sdma_v4_4_2_inst_ctx_switch_enable(adev, true, inst_mask);
1022 sdma_v4_4_2_inst_enable(adev, true, inst_mask);
1023 } else {
1024 r = sdma_v4_4_2_inst_rlc_resume(adev, inst_mask);
1025 if (r)
1026 return r;
1027 }
1028
1029 tmp_mask = inst_mask;
1030 for_each_inst(i, tmp_mask) {
1031 ring = &adev->sdma.instance[i].ring;
1032
1033 r = amdgpu_ring_test_helper(ring);
1034 if (r)
1035 return r;
1036
1037 if (adev->sdma.has_page_queue) {
1038 struct amdgpu_ring *page = &adev->sdma.instance[i].page;
1039
1040 r = amdgpu_ring_test_helper(page);
1041 if (r)
1042 return r;
1043 }
1044 }
1045
1046 return r;
1047}
1048
1049/**
1050 * sdma_v4_4_2_ring_test_ring - simple async dma engine test
1051 *
1052 * @ring: amdgpu_ring structure holding ring information
1053 *
1054 * Test the DMA engine by writing using it to write an
1055 * value to memory.
1056 * Returns 0 for success, error for failure.
1057 */
1058static int sdma_v4_4_2_ring_test_ring(struct amdgpu_ring *ring)
1059{
1060 struct amdgpu_device *adev = ring->adev;
1061 unsigned i;
1062 unsigned index;
1063 int r;
1064 u32 tmp;
1065 u64 gpu_addr;
1066
1067 r = amdgpu_device_wb_get(adev, &index);
1068 if (r)
1069 return r;
1070
1071 gpu_addr = adev->wb.gpu_addr + (index * 4);
1072 tmp = 0xCAFEDEAD;
1073 adev->wb.wb[index] = cpu_to_le32(tmp);
1074
1075 r = amdgpu_ring_alloc(ring, 5);
1076 if (r)
1077 goto error_free_wb;
1078
1079 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1080 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
1081 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
1082 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
1083 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
1084 amdgpu_ring_write(ring, 0xDEADBEEF);
1085 amdgpu_ring_commit(ring);
1086
1087 for (i = 0; i < adev->usec_timeout; i++) {
1088 tmp = le32_to_cpu(adev->wb.wb[index]);
1089 if (tmp == 0xDEADBEEF)
1090 break;
1091 udelay(1);
1092 }
1093
1094 if (i >= adev->usec_timeout)
1095 r = -ETIMEDOUT;
1096
1097error_free_wb:
1098 amdgpu_device_wb_free(adev, index);
1099 return r;
1100}
1101
1102/**
1103 * sdma_v4_4_2_ring_test_ib - test an IB on the DMA engine
1104 *
1105 * @ring: amdgpu_ring structure holding ring information
1106 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
1107 *
1108 * Test a simple IB in the DMA ring.
1109 * Returns 0 on success, error on failure.
1110 */
1111static int sdma_v4_4_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1112{
1113 struct amdgpu_device *adev = ring->adev;
1114 struct amdgpu_ib ib;
1115 struct dma_fence *f = NULL;
1116 unsigned index;
1117 long r;
1118 u32 tmp = 0;
1119 u64 gpu_addr;
1120
1121 r = amdgpu_device_wb_get(adev, &index);
1122 if (r)
1123 return r;
1124
1125 gpu_addr = adev->wb.gpu_addr + (index * 4);
1126 tmp = 0xCAFEDEAD;
1127 adev->wb.wb[index] = cpu_to_le32(tmp);
1128 memset(&ib, 0, sizeof(ib));
1129 r = amdgpu_ib_get(adev, NULL, 256,
1130 AMDGPU_IB_POOL_DIRECT, &ib);
1131 if (r)
1132 goto err0;
1133
1134 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1135 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1136 ib.ptr[1] = lower_32_bits(gpu_addr);
1137 ib.ptr[2] = upper_32_bits(gpu_addr);
1138 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
1139 ib.ptr[4] = 0xDEADBEEF;
1140 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1141 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1142 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1143 ib.length_dw = 8;
1144
1145 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1146 if (r)
1147 goto err1;
1148
1149 r = dma_fence_wait_timeout(f, false, timeout);
1150 if (r == 0) {
1151 r = -ETIMEDOUT;
1152 goto err1;
1153 } else if (r < 0) {
1154 goto err1;
1155 }
1156 tmp = le32_to_cpu(adev->wb.wb[index]);
1157 if (tmp == 0xDEADBEEF)
1158 r = 0;
1159 else
1160 r = -EINVAL;
1161
1162err1:
1163 amdgpu_ib_free(&ib, NULL);
1164 dma_fence_put(f);
1165err0:
1166 amdgpu_device_wb_free(adev, index);
1167 return r;
1168}
1169
1170
1171/**
1172 * sdma_v4_4_2_vm_copy_pte - update PTEs by copying them from the GART
1173 *
1174 * @ib: indirect buffer to fill with commands
1175 * @pe: addr of the page entry
1176 * @src: src addr to copy from
1177 * @count: number of page entries to update
1178 *
1179 * Update PTEs by copying them from the GART using sDMA.
1180 */
1181static void sdma_v4_4_2_vm_copy_pte(struct amdgpu_ib *ib,
1182 uint64_t pe, uint64_t src,
1183 unsigned count)
1184{
1185 unsigned bytes = count * 8;
1186
1187 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1188 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1189 ib->ptr[ib->length_dw++] = bytes - 1;
1190 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1191 ib->ptr[ib->length_dw++] = lower_32_bits(src);
1192 ib->ptr[ib->length_dw++] = upper_32_bits(src);
1193 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1194 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1195
1196}
1197
1198/**
1199 * sdma_v4_4_2_vm_write_pte - update PTEs by writing them manually
1200 *
1201 * @ib: indirect buffer to fill with commands
1202 * @pe: addr of the page entry
1203 * @value: dst addr to write into pe
1204 * @count: number of page entries to update
1205 * @incr: increase next addr by incr bytes
1206 *
1207 * Update PTEs by writing them manually using sDMA.
1208 */
1209static void sdma_v4_4_2_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1210 uint64_t value, unsigned count,
1211 uint32_t incr)
1212{
1213 unsigned ndw = count * 2;
1214
1215 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1216 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1217 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1218 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1219 ib->ptr[ib->length_dw++] = ndw - 1;
1220 for (; ndw > 0; ndw -= 2) {
1221 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1222 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1223 value += incr;
1224 }
1225}
1226
1227/**
1228 * sdma_v4_4_2_vm_set_pte_pde - update the page tables using sDMA
1229 *
1230 * @ib: indirect buffer to fill with commands
1231 * @pe: addr of the page entry
1232 * @addr: dst addr to write into pe
1233 * @count: number of page entries to update
1234 * @incr: increase next addr by incr bytes
1235 * @flags: access flags
1236 *
1237 * Update the page tables using sDMA.
1238 */
1239static void sdma_v4_4_2_vm_set_pte_pde(struct amdgpu_ib *ib,
1240 uint64_t pe,
1241 uint64_t addr, unsigned count,
1242 uint32_t incr, uint64_t flags)
1243{
1244 /* for physically contiguous pages (vram) */
1245 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
1246 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1247 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1248 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1249 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1250 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1251 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1252 ib->ptr[ib->length_dw++] = incr; /* increment size */
1253 ib->ptr[ib->length_dw++] = 0;
1254 ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1255}
1256
1257/**
1258 * sdma_v4_4_2_ring_pad_ib - pad the IB to the required number of dw
1259 *
1260 * @ring: amdgpu_ring structure holding ring information
1261 * @ib: indirect buffer to fill with padding
1262 */
1263static void sdma_v4_4_2_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1264{
1265 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1266 u32 pad_count;
1267 int i;
1268
1269 pad_count = (-ib->length_dw) & 7;
1270 for (i = 0; i < pad_count; i++)
1271 if (sdma && sdma->burst_nop && (i == 0))
1272 ib->ptr[ib->length_dw++] =
1273 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1274 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1275 else
1276 ib->ptr[ib->length_dw++] =
1277 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1278}
1279
1280
1281/**
1282 * sdma_v4_4_2_ring_emit_pipeline_sync - sync the pipeline
1283 *
1284 * @ring: amdgpu_ring pointer
1285 *
1286 * Make sure all previous operations are completed (CIK).
1287 */
1288static void sdma_v4_4_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1289{
1290 uint32_t seq = ring->fence_drv.sync_seq;
1291 uint64_t addr = ring->fence_drv.gpu_addr;
1292
1293 /* wait for idle */
1294 sdma_v4_4_2_wait_reg_mem(ring, 1, 0,
1295 addr & 0xfffffffc,
1296 upper_32_bits(addr) & 0xffffffff,
1297 seq, 0xffffffff, 4);
1298}
1299
1300
1301/**
1302 * sdma_v4_4_2_ring_emit_vm_flush - vm flush using sDMA
1303 *
1304 * @ring: amdgpu_ring pointer
1305 * @vmid: vmid number to use
1306 * @pd_addr: address
1307 *
1308 * Update the page table base and flush the VM TLB
1309 * using sDMA.
1310 */
1311static void sdma_v4_4_2_ring_emit_vm_flush(struct amdgpu_ring *ring,
1312 unsigned vmid, uint64_t pd_addr)
1313{
1314 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1315}
1316
1317static void sdma_v4_4_2_ring_emit_wreg(struct amdgpu_ring *ring,
1318 uint32_t reg, uint32_t val)
1319{
1320 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1321 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1322 amdgpu_ring_write(ring, reg);
1323 amdgpu_ring_write(ring, val);
1324}
1325
1326static void sdma_v4_4_2_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1327 uint32_t val, uint32_t mask)
1328{
1329 sdma_v4_4_2_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
1330}
1331
1332static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev)
1333{
1334 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
1335 case IP_VERSION(4, 4, 2):
1336 case IP_VERSION(4, 4, 5):
1337 return false;
1338 default:
1339 return false;
1340 }
1341}
1342
1343static const struct amdgpu_sdma_funcs sdma_v4_4_2_sdma_funcs = {
1344 .stop_kernel_queue = &sdma_v4_4_2_stop_queue,
1345 .start_kernel_queue = &sdma_v4_4_2_restore_queue,
1346 .soft_reset_kernel_queue = &sdma_v4_4_2_soft_reset_engine,
1347};
1348
1349static const struct amdgpu_vm_pte_funcs sdma_v4_4_2_vm_pte_funcs = {
1350 .copy_pte_num_dw = 7,
1351 .copy_pte = sdma_v4_4_2_vm_copy_pte,
1352
1353 .write_pte = sdma_v4_4_2_vm_write_pte,
1354 .set_pte_pde = sdma_v4_4_2_vm_set_pte_pde,
1355};
1356
1357static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
1358{
1359 struct amdgpu_device *adev = ip_block->adev;
1360 int r;
1361
1362 r = sdma_v4_4_2_init_microcode(adev);
1363 if (r)
1364 return r;
1365
1366 /* TODO: Page queue breaks driver reload under SRIOV */
1367 if (sdma_v4_4_2_fw_support_paging_queue(adev))
1368 adev->sdma.has_page_queue = true;
1369
1370 sdma_v4_4_2_set_ring_funcs(adev);
1371 sdma_v4_4_2_set_buffer_funcs(adev);
1372 amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v4_4_2_vm_pte_funcs);
1373 sdma_v4_4_2_set_irq_funcs(adev);
1374 sdma_v4_4_2_set_ras_funcs(adev);
1375 return 0;
1376}
1377
1378#if 0
1379static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev,
1380 void *err_data,
1381 struct amdgpu_iv_entry *entry);
1382#endif
1383
1384static int sdma_v4_4_2_late_init(struct amdgpu_ip_block *ip_block)
1385{
1386 struct amdgpu_device *adev = ip_block->adev;
1387#if 0
1388 struct ras_ih_if ih_info = {
1389 .cb = sdma_v4_4_2_process_ras_data_cb,
1390 };
1391#endif
1392 if (!amdgpu_persistent_edc_harvesting_supported(adev))
1393 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__SDMA);
1394
1395 /* The initialization is done in the late_init stage to ensure that the SMU
1396 * initialization and capability setup are completed before we check the SDMA
1397 * reset capability
1398 */
1399 sdma_v4_4_2_update_reset_mask(adev);
1400
1401 return 0;
1402}
1403
1404static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block)
1405{
1406 struct amdgpu_ring *ring;
1407 int r, i;
1408 struct amdgpu_device *adev = ip_block->adev;
1409 u32 aid_id;
1410 uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2);
1411 uint32_t *ptr;
1412
1413 /* SDMA trap event */
1414 for (i = 0; i < adev->sdma.num_inst_per_aid; i++) {
1415 r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1416 SDMA0_4_0__SRCID__SDMA_TRAP,
1417 &adev->sdma.trap_irq);
1418 if (r)
1419 return r;
1420 }
1421
1422 /* SDMA SRAM ECC event */
1423 for (i = 0; i < adev->sdma.num_inst_per_aid; i++) {
1424 r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1425 SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
1426 &adev->sdma.ecc_irq);
1427 if (r)
1428 return r;
1429 }
1430
1431 /* SDMA VM_HOLE/DOORBELL_INV/POLL_TIMEOUT/SRBM_WRITE_PROTECTION event*/
1432 for (i = 0; i < adev->sdma.num_inst_per_aid; i++) {
1433 r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1434 SDMA0_4_0__SRCID__SDMA_VM_HOLE,
1435 &adev->sdma.vm_hole_irq);
1436 if (r)
1437 return r;
1438
1439 r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1440 SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID,
1441 &adev->sdma.doorbell_invalid_irq);
1442 if (r)
1443 return r;
1444
1445 r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1446 SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT,
1447 &adev->sdma.pool_timeout_irq);
1448 if (r)
1449 return r;
1450
1451 r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1452 SDMA0_4_0__SRCID__SDMA_SRBMWRITE,
1453 &adev->sdma.srbm_write_irq);
1454 if (r)
1455 return r;
1456
1457 r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1458 SDMA0_4_0__SRCID__SDMA_CTXEMPTY,
1459 &adev->sdma.ctxt_empty_irq);
1460 if (r)
1461 return r;
1462 }
1463
1464 for (i = 0; i < adev->sdma.num_instances; i++) {
1465 mutex_init(&adev->sdma.instance[i].engine_reset_mutex);
1466 /* Initialize guilty flags for GFX and PAGE queues */
1467 adev->sdma.instance[i].gfx_guilty = false;
1468 adev->sdma.instance[i].page_guilty = false;
1469 adev->sdma.instance[i].funcs = &sdma_v4_4_2_sdma_funcs;
1470
1471 ring = &adev->sdma.instance[i].ring;
1472 ring->ring_obj = NULL;
1473 ring->use_doorbell = true;
1474 aid_id = adev->sdma.instance[i].aid_id;
1475
1476 DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
1477 ring->use_doorbell?"true":"false");
1478
1479 /* doorbell size is 2 dwords, get DWORD offset */
1480 ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
1481 ring->vm_hub = AMDGPU_MMHUB0(aid_id);
1482
1483 sprintf(ring->name, "sdma%d.%d", aid_id,
1484 i % adev->sdma.num_inst_per_aid);
1485 r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
1486 AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1487 AMDGPU_RING_PRIO_DEFAULT, NULL);
1488 if (r)
1489 return r;
1490
1491 if (adev->sdma.has_page_queue) {
1492 ring = &adev->sdma.instance[i].page;
1493 ring->ring_obj = NULL;
1494 ring->use_doorbell = true;
1495
1496 /* doorbell index of page queue is assigned right after
1497 * gfx queue on the same instance
1498 */
1499 ring->doorbell_index =
1500 (adev->doorbell_index.sdma_engine[i] + 1) << 1;
1501 ring->vm_hub = AMDGPU_MMHUB0(aid_id);
1502
1503 sprintf(ring->name, "page%d.%d", aid_id,
1504 i % adev->sdma.num_inst_per_aid);
1505 r = amdgpu_ring_init(adev, ring, 1024,
1506 &adev->sdma.trap_irq,
1507 AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1508 AMDGPU_RING_PRIO_DEFAULT, NULL);
1509 if (r)
1510 return r;
1511 }
1512 }
1513
1514 adev->sdma.supported_reset =
1515 amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
1516
1517 if (amdgpu_sdma_ras_sw_init(adev)) {
1518 dev_err(adev->dev, "fail to initialize sdma ras block\n");
1519 return -EINVAL;
1520 }
1521
1522 /* Allocate memory for SDMA IP Dump buffer */
1523 ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
1524 if (ptr)
1525 adev->sdma.ip_dump = ptr;
1526 else
1527 DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
1528
1529 r = amdgpu_sdma_sysfs_reset_mask_init(adev);
1530 if (r)
1531 return r;
1532
1533 return r;
1534}
1535
1536static int sdma_v4_4_2_sw_fini(struct amdgpu_ip_block *ip_block)
1537{
1538 struct amdgpu_device *adev = ip_block->adev;
1539 int i;
1540
1541 for (i = 0; i < adev->sdma.num_instances; i++) {
1542 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1543 if (adev->sdma.has_page_queue)
1544 amdgpu_ring_fini(&adev->sdma.instance[i].page);
1545 }
1546
1547 amdgpu_sdma_sysfs_reset_mask_fini(adev);
1548 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2) ||
1549 amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 4) ||
1550 amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 5))
1551 amdgpu_sdma_destroy_inst_ctx(adev, true);
1552 else
1553 amdgpu_sdma_destroy_inst_ctx(adev, false);
1554
1555 kfree(adev->sdma.ip_dump);
1556
1557 return 0;
1558}
1559
1560static int sdma_v4_4_2_hw_init(struct amdgpu_ip_block *ip_block)
1561{
1562 int r;
1563 struct amdgpu_device *adev = ip_block->adev;
1564 uint32_t inst_mask;
1565
1566 inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
1567 if (!amdgpu_sriov_vf(adev))
1568 sdma_v4_4_2_inst_init_golden_registers(adev, inst_mask);
1569
1570 r = sdma_v4_4_2_inst_start(adev, inst_mask, false);
1571
1572 return r;
1573}
1574
1575static int sdma_v4_4_2_hw_fini(struct amdgpu_ip_block *ip_block)
1576{
1577 struct amdgpu_device *adev = ip_block->adev;
1578 uint32_t inst_mask;
1579 int i;
1580
1581 if (amdgpu_sriov_vf(adev))
1582 return 0;
1583
1584 inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
1585 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
1586 for (i = 0; i < adev->sdma.num_instances; i++) {
1587 amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
1588 AMDGPU_SDMA_IRQ_INSTANCE0 + i);
1589 }
1590 }
1591
1592 sdma_v4_4_2_inst_ctx_switch_enable(adev, false, inst_mask);
1593 sdma_v4_4_2_inst_enable(adev, false, inst_mask);
1594
1595 return 0;
1596}
1597
1598static int sdma_v4_4_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1599 enum amd_clockgating_state state);
1600
1601static int sdma_v4_4_2_suspend(struct amdgpu_ip_block *ip_block)
1602{
1603 struct amdgpu_device *adev = ip_block->adev;
1604
1605 if (amdgpu_in_reset(adev))
1606 sdma_v4_4_2_set_clockgating_state(ip_block, AMD_CG_STATE_UNGATE);
1607
1608 return sdma_v4_4_2_hw_fini(ip_block);
1609}
1610
1611static int sdma_v4_4_2_resume(struct amdgpu_ip_block *ip_block)
1612{
1613 return sdma_v4_4_2_hw_init(ip_block);
1614}
1615
1616static bool sdma_v4_4_2_is_idle(struct amdgpu_ip_block *ip_block)
1617{
1618 struct amdgpu_device *adev = ip_block->adev;
1619 u32 i;
1620
1621 for (i = 0; i < adev->sdma.num_instances; i++) {
1622 u32 tmp = RREG32_SDMA(i, regSDMA_STATUS_REG);
1623
1624 if (!(tmp & SDMA_STATUS_REG__IDLE_MASK))
1625 return false;
1626 }
1627
1628 return true;
1629}
1630
1631static int sdma_v4_4_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
1632{
1633 unsigned i, j;
1634 u32 sdma[AMDGPU_MAX_SDMA_INSTANCES];
1635 struct amdgpu_device *adev = ip_block->adev;
1636
1637 for (i = 0; i < adev->usec_timeout; i++) {
1638 for (j = 0; j < adev->sdma.num_instances; j++) {
1639 sdma[j] = RREG32_SDMA(j, regSDMA_STATUS_REG);
1640 if (!(sdma[j] & SDMA_STATUS_REG__IDLE_MASK))
1641 break;
1642 }
1643 if (j == adev->sdma.num_instances)
1644 return 0;
1645 udelay(1);
1646 }
1647 return -ETIMEDOUT;
1648}
1649
1650static int sdma_v4_4_2_soft_reset(struct amdgpu_ip_block *ip_block)
1651{
1652 /* todo */
1653
1654 return 0;
1655}
1656
1657static bool sdma_v4_4_2_is_queue_selected(struct amdgpu_device *adev, uint32_t instance_id, bool is_page_queue)
1658{
1659 uint32_t reg_offset = is_page_queue ? regSDMA_PAGE_CONTEXT_STATUS : regSDMA_GFX_CONTEXT_STATUS;
1660 uint32_t context_status = RREG32(sdma_v4_4_2_get_reg_offset(adev, instance_id, reg_offset));
1661
1662 /* Check if the SELECTED bit is set */
1663 return (context_status & SDMA_GFX_CONTEXT_STATUS__SELECTED_MASK) != 0;
1664}
1665
1666static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring,
1667 unsigned int vmid,
1668 struct amdgpu_fence *timedout_fence)
1669{
1670 struct amdgpu_device *adev = ring->adev;
1671 u32 id = ring->me;
1672 int r;
1673
1674 amdgpu_amdkfd_suspend(adev, true);
1675 r = amdgpu_sdma_reset_engine(adev, id, false);
1676 amdgpu_amdkfd_resume(adev, true);
1677 return r;
1678}
1679
1680static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring)
1681{
1682 struct amdgpu_device *adev = ring->adev;
1683 u32 instance_id = ring->me;
1684 u32 inst_mask;
1685 uint64_t rptr;
1686
1687 if (amdgpu_sriov_vf(adev))
1688 return -EINVAL;
1689
1690 /* Check if this queue is the guilty one */
1691 adev->sdma.instance[instance_id].gfx_guilty =
1692 sdma_v4_4_2_is_queue_selected(adev, instance_id, false);
1693 if (adev->sdma.has_page_queue)
1694 adev->sdma.instance[instance_id].page_guilty =
1695 sdma_v4_4_2_is_queue_selected(adev, instance_id, true);
1696
1697 /* Cache the rptr before reset, after the reset,
1698 * all of the registers will be reset to 0
1699 */
1700 rptr = amdgpu_ring_get_rptr(ring);
1701 ring->cached_rptr = rptr;
1702 /* Cache the rptr for the page queue if it exists */
1703 if (adev->sdma.has_page_queue) {
1704 struct amdgpu_ring *page_ring = &adev->sdma.instance[instance_id].page;
1705 rptr = amdgpu_ring_get_rptr(page_ring);
1706 page_ring->cached_rptr = rptr;
1707 }
1708
1709 /* stop queue */
1710 inst_mask = 1 << ring->me;
1711 sdma_v4_4_2_inst_gfx_stop(adev, inst_mask);
1712 if (adev->sdma.has_page_queue)
1713 sdma_v4_4_2_inst_page_stop(adev, inst_mask);
1714
1715 return 0;
1716}
1717
1718static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring)
1719{
1720 struct amdgpu_device *adev = ring->adev;
1721 u32 inst_mask;
1722 int i, r;
1723
1724 inst_mask = 1 << ring->me;
1725 udelay(50);
1726
1727 for (i = 0; i < adev->usec_timeout; i++) {
1728 if (!REG_GET_FIELD(RREG32_SDMA(ring->me, regSDMA_F32_CNTL), SDMA_F32_CNTL, HALT))
1729 break;
1730 udelay(1);
1731 }
1732
1733 if (i == adev->usec_timeout) {
1734 dev_err(adev->dev, "timed out waiting for SDMA%d unhalt after reset\n",
1735 ring->me);
1736 return -ETIMEDOUT;
1737 }
1738
1739 r = sdma_v4_4_2_inst_start(adev, inst_mask, true);
1740
1741 return r;
1742}
1743
1744static int sdma_v4_4_2_soft_reset_engine(struct amdgpu_device *adev,
1745 u32 instance_id)
1746{
1747 /* For SDMA 4.x, use the existing DPM interface for backward compatibility
1748 * we need to convert the logical instance ID to physical instance ID before reset.
1749 */
1750 return amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, instance_id));
1751}
1752
1753static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
1754 struct amdgpu_irq_src *source,
1755 unsigned type,
1756 enum amdgpu_interrupt_state state)
1757{
1758 u32 sdma_cntl;
1759
1760 sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
1761 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, TRAP_ENABLE,
1762 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1763 WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
1764
1765 return 0;
1766}
1767
1768static int sdma_v4_4_2_process_trap_irq(struct amdgpu_device *adev,
1769 struct amdgpu_irq_src *source,
1770 struct amdgpu_iv_entry *entry)
1771{
1772 uint32_t instance, i;
1773
1774 DRM_DEBUG("IH: SDMA trap\n");
1775 instance = sdma_v4_4_2_irq_id_to_seq(adev, entry->client_id);
1776
1777 /* Client id gives the SDMA instance in AID. To know the exact SDMA
1778 * instance, interrupt entry gives the node id which corresponds to the AID instance.
1779 * Match node id with the AID id associated with the SDMA instance. */
1780 for (i = instance; i < adev->sdma.num_instances;
1781 i += adev->sdma.num_inst_per_aid) {
1782 if (adev->sdma.instance[i].aid_id ==
1783 node_id_to_phys_map[entry->node_id])
1784 break;
1785 }
1786
1787 if (i >= adev->sdma.num_instances) {
1788 dev_WARN_ONCE(
1789 adev->dev, 1,
1790 "Couldn't find the right sdma instance in trap handler");
1791 return 0;
1792 }
1793
1794 switch (entry->ring_id) {
1795 case 0:
1796 amdgpu_fence_process(&adev->sdma.instance[i].ring);
1797 break;
1798 case 1:
1799 amdgpu_fence_process(&adev->sdma.instance[i].page);
1800 break;
1801 default:
1802 break;
1803 }
1804 return 0;
1805}
1806
1807#if 0
1808static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev,
1809 void *err_data,
1810 struct amdgpu_iv_entry *entry)
1811{
1812 int instance;
1813
1814 /* When “Full RAS” is enabled, the per-IP interrupt sources should
1815 * be disabled and the driver should only look for the aggregated
1816 * interrupt via sync flood
1817 */
1818 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA))
1819 goto out;
1820
1821 instance = sdma_v4_4_2_irq_id_to_seq(adev, entry->client_id);
1822 if (instance < 0)
1823 goto out;
1824
1825 amdgpu_sdma_process_ras_data_cb(adev, err_data, entry);
1826
1827out:
1828 return AMDGPU_RAS_SUCCESS;
1829}
1830#endif
1831
1832static int sdma_v4_4_2_process_illegal_inst_irq(struct amdgpu_device *adev,
1833 struct amdgpu_irq_src *source,
1834 struct amdgpu_iv_entry *entry)
1835{
1836 int instance;
1837
1838 DRM_ERROR("Illegal instruction in SDMA command stream\n");
1839
1840 instance = sdma_v4_4_2_irq_id_to_seq(adev, entry->client_id);
1841 if (instance < 0)
1842 return 0;
1843
1844 switch (entry->ring_id) {
1845 case 0:
1846 drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
1847 break;
1848 }
1849 return 0;
1850}
1851
1852static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev,
1853 struct amdgpu_irq_src *source,
1854 unsigned type,
1855 enum amdgpu_interrupt_state state)
1856{
1857 u32 sdma_cntl;
1858
1859 sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
1860 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, DRAM_ECC_INT_ENABLE,
1861 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1862 WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
1863
1864 return 0;
1865}
1866
1867static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev,
1868 struct amdgpu_iv_entry *entry)
1869{
1870 int instance;
1871 struct amdgpu_task_info *task_info;
1872 u64 addr;
1873
1874 instance = sdma_v4_4_2_irq_id_to_seq(adev, entry->client_id);
1875 if (instance < 0 || instance >= adev->sdma.num_instances) {
1876 dev_err(adev->dev, "sdma instance invalid %d\n", instance);
1877 return -EINVAL;
1878 }
1879
1880 addr = (u64)entry->src_data[0] << 12;
1881 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
1882
1883 dev_dbg_ratelimited(adev->dev,
1884 "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u pasid:%u\n",
1885 instance, addr, entry->src_id, entry->ring_id, entry->vmid,
1886 entry->pasid);
1887
1888 task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
1889 if (task_info) {
1890 dev_dbg_ratelimited(adev->dev, " for process %s pid %d thread %s pid %d\n",
1891 task_info->process_name, task_info->tgid,
1892 task_info->task.comm, task_info->task.pid);
1893 amdgpu_vm_put_task_info(task_info);
1894 }
1895
1896 return 0;
1897}
1898
1899static int sdma_v4_4_2_process_vm_hole_irq(struct amdgpu_device *adev,
1900 struct amdgpu_irq_src *source,
1901 struct amdgpu_iv_entry *entry)
1902{
1903 dev_dbg_ratelimited(adev->dev, "MC or SEM address in VM hole\n");
1904 sdma_v4_4_2_print_iv_entry(adev, entry);
1905 return 0;
1906}
1907
1908static int sdma_v4_4_2_process_doorbell_invalid_irq(struct amdgpu_device *adev,
1909 struct amdgpu_irq_src *source,
1910 struct amdgpu_iv_entry *entry)
1911{
1912
1913 dev_dbg_ratelimited(adev->dev, "SDMA received a doorbell from BIF with byte_enable !=0xff\n");
1914 sdma_v4_4_2_print_iv_entry(adev, entry);
1915 return 0;
1916}
1917
1918static int sdma_v4_4_2_process_pool_timeout_irq(struct amdgpu_device *adev,
1919 struct amdgpu_irq_src *source,
1920 struct amdgpu_iv_entry *entry)
1921{
1922 dev_dbg_ratelimited(adev->dev,
1923 "Polling register/memory timeout executing POLL_REG/MEM with finite timer\n");
1924 sdma_v4_4_2_print_iv_entry(adev, entry);
1925 return 0;
1926}
1927
1928static int sdma_v4_4_2_process_srbm_write_irq(struct amdgpu_device *adev,
1929 struct amdgpu_irq_src *source,
1930 struct amdgpu_iv_entry *entry)
1931{
1932 dev_dbg_ratelimited(adev->dev,
1933 "SDMA gets an Register Write SRBM_WRITE command in non-privilege command buffer\n");
1934 sdma_v4_4_2_print_iv_entry(adev, entry);
1935 return 0;
1936}
1937
1938static int sdma_v4_4_2_process_ctxt_empty_irq(struct amdgpu_device *adev,
1939 struct amdgpu_irq_src *source,
1940 struct amdgpu_iv_entry *entry)
1941{
1942 /* There is nothing useful to be done here, only kept for debug */
1943 dev_dbg_ratelimited(adev->dev, "SDMA context empty interrupt");
1944 sdma_v4_4_2_print_iv_entry(adev, entry);
1945 return 0;
1946}
1947
1948static void sdma_v4_4_2_inst_update_medium_grain_light_sleep(
1949 struct amdgpu_device *adev, bool enable, uint32_t inst_mask)
1950{
1951 uint32_t data, def;
1952 int i;
1953
1954 /* leave as default if it is not driver controlled */
1955 if (!(adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS))
1956 return;
1957
1958 if (enable) {
1959 for_each_inst(i, inst_mask) {
1960 /* 1-not override: enable sdma mem light sleep */
1961 def = data = RREG32_SDMA(i, regSDMA_POWER_CNTL);
1962 data |= SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1963 if (def != data)
1964 WREG32_SDMA(i, regSDMA_POWER_CNTL, data);
1965 }
1966 } else {
1967 for_each_inst(i, inst_mask) {
1968 /* 0-override:disable sdma mem light sleep */
1969 def = data = RREG32_SDMA(i, regSDMA_POWER_CNTL);
1970 data &= ~SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1971 if (def != data)
1972 WREG32_SDMA(i, regSDMA_POWER_CNTL, data);
1973 }
1974 }
1975}
1976
1977static void sdma_v4_4_2_inst_update_medium_grain_clock_gating(
1978 struct amdgpu_device *adev, bool enable, uint32_t inst_mask)
1979{
1980 uint32_t data, def;
1981 int i;
1982
1983 /* leave as default if it is not driver controlled */
1984 if (!(adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG))
1985 return;
1986
1987 if (enable) {
1988 for_each_inst(i, inst_mask) {
1989 def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL);
1990 data &= ~(SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1991 SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1992 SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1993 SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1994 SDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1995 SDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1996 if (def != data)
1997 WREG32_SDMA(i, regSDMA_CLK_CTRL, data);
1998 }
1999 } else {
2000 for_each_inst(i, inst_mask) {
2001 def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL);
2002 data |= (SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK |
2003 SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK |
2004 SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK |
2005 SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK |
2006 SDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK |
2007 SDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK);
2008 if (def != data)
2009 WREG32_SDMA(i, regSDMA_CLK_CTRL, data);
2010 }
2011 }
2012}
2013
2014static int sdma_v4_4_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
2015 enum amd_clockgating_state state)
2016{
2017 struct amdgpu_device *adev = ip_block->adev;
2018 uint32_t inst_mask;
2019
2020 if (amdgpu_sriov_vf(adev))
2021 return 0;
2022
2023 inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
2024
2025 sdma_v4_4_2_inst_update_medium_grain_clock_gating(
2026 adev, state == AMD_CG_STATE_GATE, inst_mask);
2027 sdma_v4_4_2_inst_update_medium_grain_light_sleep(
2028 adev, state == AMD_CG_STATE_GATE, inst_mask);
2029 return 0;
2030}
2031
2032static int sdma_v4_4_2_set_powergating_state(struct amdgpu_ip_block *ip_block,
2033 enum amd_powergating_state state)
2034{
2035 return 0;
2036}
2037
2038static void sdma_v4_4_2_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
2039{
2040 struct amdgpu_device *adev = ip_block->adev;
2041 int data;
2042
2043 if (amdgpu_sriov_vf(adev))
2044 *flags = 0;
2045
2046 /* AMD_CG_SUPPORT_SDMA_MGCG */
2047 data = RREG32(SOC15_REG_OFFSET(SDMA0, GET_INST(SDMA0, 0), regSDMA_CLK_CTRL));
2048 if (!(data & SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK))
2049 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
2050
2051 /* AMD_CG_SUPPORT_SDMA_LS */
2052 data = RREG32(SOC15_REG_OFFSET(SDMA0, GET_INST(SDMA0, 0), regSDMA_POWER_CNTL));
2053 if (data & SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
2054 *flags |= AMD_CG_SUPPORT_SDMA_LS;
2055}
2056
2057static void sdma_v4_4_2_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
2058{
2059 struct amdgpu_device *adev = ip_block->adev;
2060 int i, j;
2061 uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2);
2062 uint32_t instance_offset;
2063
2064 if (!adev->sdma.ip_dump)
2065 return;
2066
2067 drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
2068 for (i = 0; i < adev->sdma.num_instances; i++) {
2069 instance_offset = i * reg_count;
2070 drm_printf(p, "\nInstance:%d\n", i);
2071
2072 for (j = 0; j < reg_count; j++)
2073 drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_4_4_2[j].reg_name,
2074 adev->sdma.ip_dump[instance_offset + j]);
2075 }
2076}
2077
2078static void sdma_v4_4_2_dump_ip_state(struct amdgpu_ip_block *ip_block)
2079{
2080 struct amdgpu_device *adev = ip_block->adev;
2081 int i, j;
2082 uint32_t instance_offset;
2083 uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2);
2084
2085 if (!adev->sdma.ip_dump)
2086 return;
2087
2088 for (i = 0; i < adev->sdma.num_instances; i++) {
2089 instance_offset = i * reg_count;
2090 for (j = 0; j < reg_count; j++)
2091 adev->sdma.ip_dump[instance_offset + j] =
2092 RREG32(sdma_v4_4_2_get_reg_offset(adev, i,
2093 sdma_reg_list_4_4_2[j].reg_offset));
2094 }
2095}
2096
2097const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = {
2098 .name = "sdma_v4_4_2",
2099 .early_init = sdma_v4_4_2_early_init,
2100 .late_init = sdma_v4_4_2_late_init,
2101 .sw_init = sdma_v4_4_2_sw_init,
2102 .sw_fini = sdma_v4_4_2_sw_fini,
2103 .hw_init = sdma_v4_4_2_hw_init,
2104 .hw_fini = sdma_v4_4_2_hw_fini,
2105 .suspend = sdma_v4_4_2_suspend,
2106 .resume = sdma_v4_4_2_resume,
2107 .is_idle = sdma_v4_4_2_is_idle,
2108 .wait_for_idle = sdma_v4_4_2_wait_for_idle,
2109 .soft_reset = sdma_v4_4_2_soft_reset,
2110 .set_clockgating_state = sdma_v4_4_2_set_clockgating_state,
2111 .set_powergating_state = sdma_v4_4_2_set_powergating_state,
2112 .get_clockgating_state = sdma_v4_4_2_get_clockgating_state,
2113 .dump_ip_state = sdma_v4_4_2_dump_ip_state,
2114 .print_ip_state = sdma_v4_4_2_print_ip_state,
2115};
2116
2117static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
2118 .type = AMDGPU_RING_TYPE_SDMA,
2119 .align_mask = 0xff,
2120 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
2121 .support_64bit_ptrs = true,
2122 .get_rptr = sdma_v4_4_2_ring_get_rptr,
2123 .get_wptr = sdma_v4_4_2_ring_get_wptr,
2124 .set_wptr = sdma_v4_4_2_ring_set_wptr,
2125 .emit_frame_size =
2126 6 + /* sdma_v4_4_2_ring_emit_hdp_flush */
2127 3 + /* hdp invalidate */
2128 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */
2129 /* sdma_v4_4_2_ring_emit_vm_flush */
2130 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2131 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
2132 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */
2133 .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */
2134 .emit_ib = sdma_v4_4_2_ring_emit_ib,
2135 .emit_fence = sdma_v4_4_2_ring_emit_fence,
2136 .emit_pipeline_sync = sdma_v4_4_2_ring_emit_pipeline_sync,
2137 .emit_vm_flush = sdma_v4_4_2_ring_emit_vm_flush,
2138 .emit_hdp_flush = sdma_v4_4_2_ring_emit_hdp_flush,
2139 .test_ring = sdma_v4_4_2_ring_test_ring,
2140 .test_ib = sdma_v4_4_2_ring_test_ib,
2141 .insert_nop = sdma_v4_4_2_ring_insert_nop,
2142 .pad_ib = sdma_v4_4_2_ring_pad_ib,
2143 .emit_wreg = sdma_v4_4_2_ring_emit_wreg,
2144 .emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
2145 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2146 .reset = sdma_v4_4_2_reset_queue,
2147};
2148
2149static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
2150 .type = AMDGPU_RING_TYPE_SDMA,
2151 .align_mask = 0xff,
2152 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
2153 .support_64bit_ptrs = true,
2154 .get_rptr = sdma_v4_4_2_ring_get_rptr,
2155 .get_wptr = sdma_v4_4_2_page_ring_get_wptr,
2156 .set_wptr = sdma_v4_4_2_page_ring_set_wptr,
2157 .emit_frame_size =
2158 6 + /* sdma_v4_4_2_ring_emit_hdp_flush */
2159 3 + /* hdp invalidate */
2160 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */
2161 /* sdma_v4_4_2_ring_emit_vm_flush */
2162 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2163 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
2164 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */
2165 .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */
2166 .emit_ib = sdma_v4_4_2_ring_emit_ib,
2167 .emit_fence = sdma_v4_4_2_ring_emit_fence,
2168 .emit_pipeline_sync = sdma_v4_4_2_ring_emit_pipeline_sync,
2169 .emit_vm_flush = sdma_v4_4_2_ring_emit_vm_flush,
2170 .emit_hdp_flush = sdma_v4_4_2_ring_emit_hdp_flush,
2171 .test_ring = sdma_v4_4_2_ring_test_ring,
2172 .test_ib = sdma_v4_4_2_ring_test_ib,
2173 .insert_nop = sdma_v4_4_2_ring_insert_nop,
2174 .pad_ib = sdma_v4_4_2_ring_pad_ib,
2175 .emit_wreg = sdma_v4_4_2_ring_emit_wreg,
2176 .emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
2177 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2178 .reset = sdma_v4_4_2_reset_queue,
2179};
2180
2181static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev)
2182{
2183 int i, dev_inst;
2184
2185 for (i = 0; i < adev->sdma.num_instances; i++) {
2186 adev->sdma.instance[i].ring.funcs = &sdma_v4_4_2_ring_funcs;
2187 adev->sdma.instance[i].ring.me = i;
2188 if (adev->sdma.has_page_queue) {
2189 adev->sdma.instance[i].page.funcs =
2190 &sdma_v4_4_2_page_ring_funcs;
2191 adev->sdma.instance[i].page.me = i;
2192 }
2193
2194 dev_inst = GET_INST(SDMA0, i);
2195 /* AID to which SDMA belongs depends on physical instance */
2196 adev->sdma.instance[i].aid_id =
2197 dev_inst / adev->sdma.num_inst_per_aid;
2198 }
2199}
2200
2201static const struct amdgpu_irq_src_funcs sdma_v4_4_2_trap_irq_funcs = {
2202 .set = sdma_v4_4_2_set_trap_irq_state,
2203 .process = sdma_v4_4_2_process_trap_irq,
2204};
2205
2206static const struct amdgpu_irq_src_funcs sdma_v4_4_2_illegal_inst_irq_funcs = {
2207 .process = sdma_v4_4_2_process_illegal_inst_irq,
2208};
2209
2210static const struct amdgpu_irq_src_funcs sdma_v4_4_2_ecc_irq_funcs = {
2211 .set = sdma_v4_4_2_set_ecc_irq_state,
2212 .process = amdgpu_sdma_process_ecc_irq,
2213};
2214
2215static const struct amdgpu_irq_src_funcs sdma_v4_4_2_vm_hole_irq_funcs = {
2216 .process = sdma_v4_4_2_process_vm_hole_irq,
2217};
2218
2219static const struct amdgpu_irq_src_funcs sdma_v4_4_2_doorbell_invalid_irq_funcs = {
2220 .process = sdma_v4_4_2_process_doorbell_invalid_irq,
2221};
2222
2223static const struct amdgpu_irq_src_funcs sdma_v4_4_2_pool_timeout_irq_funcs = {
2224 .process = sdma_v4_4_2_process_pool_timeout_irq,
2225};
2226
2227static const struct amdgpu_irq_src_funcs sdma_v4_4_2_srbm_write_irq_funcs = {
2228 .process = sdma_v4_4_2_process_srbm_write_irq,
2229};
2230
2231static const struct amdgpu_irq_src_funcs sdma_v4_4_2_ctxt_empty_irq_funcs = {
2232 .process = sdma_v4_4_2_process_ctxt_empty_irq,
2233};
2234
2235static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
2236{
2237 adev->sdma.trap_irq.num_types = adev->sdma.num_instances;
2238 adev->sdma.ecc_irq.num_types = adev->sdma.num_instances;
2239 adev->sdma.vm_hole_irq.num_types = adev->sdma.num_instances;
2240 adev->sdma.doorbell_invalid_irq.num_types = adev->sdma.num_instances;
2241 adev->sdma.pool_timeout_irq.num_types = adev->sdma.num_instances;
2242 adev->sdma.srbm_write_irq.num_types = adev->sdma.num_instances;
2243 adev->sdma.ctxt_empty_irq.num_types = adev->sdma.num_instances;
2244
2245 adev->sdma.trap_irq.funcs = &sdma_v4_4_2_trap_irq_funcs;
2246 adev->sdma.illegal_inst_irq.funcs = &sdma_v4_4_2_illegal_inst_irq_funcs;
2247 adev->sdma.ecc_irq.funcs = &sdma_v4_4_2_ecc_irq_funcs;
2248 adev->sdma.vm_hole_irq.funcs = &sdma_v4_4_2_vm_hole_irq_funcs;
2249 adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_4_2_doorbell_invalid_irq_funcs;
2250 adev->sdma.pool_timeout_irq.funcs = &sdma_v4_4_2_pool_timeout_irq_funcs;
2251 adev->sdma.srbm_write_irq.funcs = &sdma_v4_4_2_srbm_write_irq_funcs;
2252 adev->sdma.ctxt_empty_irq.funcs = &sdma_v4_4_2_ctxt_empty_irq_funcs;
2253}
2254
2255/**
2256 * sdma_v4_4_2_emit_copy_buffer - copy buffer using the sDMA engine
2257 *
2258 * @ib: indirect buffer to copy to
2259 * @src_offset: src GPU address
2260 * @dst_offset: dst GPU address
2261 * @byte_count: number of bytes to xfer
2262 * @copy_flags: copy flags for the buffers
2263 *
2264 * Copy GPU buffers using the DMA engine.
2265 * Used by the amdgpu ttm implementation to move pages if
2266 * registered as the asic copy callback.
2267 */
2268static void sdma_v4_4_2_emit_copy_buffer(struct amdgpu_ib *ib,
2269 uint64_t src_offset,
2270 uint64_t dst_offset,
2271 uint32_t byte_count,
2272 uint32_t copy_flags)
2273{
2274 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
2275 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
2276 SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0);
2277 ib->ptr[ib->length_dw++] = byte_count - 1;
2278 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
2279 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
2280 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
2281 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
2282 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
2283}
2284
2285/**
2286 * sdma_v4_4_2_emit_fill_buffer - fill buffer using the sDMA engine
2287 *
2288 * @ib: indirect buffer to copy to
2289 * @src_data: value to write to buffer
2290 * @dst_offset: dst GPU address
2291 * @byte_count: number of bytes to xfer
2292 *
2293 * Fill GPU buffers using the DMA engine.
2294 */
2295static void sdma_v4_4_2_emit_fill_buffer(struct amdgpu_ib *ib,
2296 uint32_t src_data,
2297 uint64_t dst_offset,
2298 uint32_t byte_count)
2299{
2300 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
2301 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
2302 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
2303 ib->ptr[ib->length_dw++] = src_data;
2304 ib->ptr[ib->length_dw++] = byte_count - 1;
2305}
2306
2307static const struct amdgpu_buffer_funcs sdma_v4_4_2_buffer_funcs = {
2308 .copy_max_bytes = 1 << 30,
2309 .copy_num_dw = 7,
2310 .emit_copy_buffer = sdma_v4_4_2_emit_copy_buffer,
2311
2312 .fill_max_bytes = 1 << 30,
2313 .fill_num_dw = 5,
2314 .emit_fill_buffer = sdma_v4_4_2_emit_fill_buffer,
2315};
2316
2317static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev)
2318{
2319 adev->mman.buffer_funcs = &sdma_v4_4_2_buffer_funcs;
2320 if (adev->sdma.has_page_queue)
2321 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
2322 else
2323 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
2324}
2325
2326/**
2327 * sdma_v4_4_2_update_reset_mask - update reset mask for SDMA
2328 * @adev: Pointer to the AMDGPU device structure
2329 *
2330 * This function update reset mask for SDMA and sets the supported
2331 * reset types based on the IP version and firmware versions.
2332 *
2333 */
2334static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev)
2335{
2336 /* per queue reset not supported for SRIOV */
2337 if (amdgpu_sriov_vf(adev))
2338 return;
2339
2340 /*
2341 * the user queue relies on MEC fw and pmfw when the sdma queue do reset.
2342 * it needs to check both of them at here to skip old mec and pmfw.
2343 */
2344 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2345 case IP_VERSION(9, 4, 3):
2346 case IP_VERSION(9, 4, 4):
2347 if ((adev->gfx.mec_fw_version >= 0xb0) &&
2348 amdgpu_dpm_reset_sdma_is_supported(adev) &&
2349 !adev->debug_disable_gpu_ring_reset)
2350 adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
2351 break;
2352 case IP_VERSION(9, 5, 0):
2353 if ((adev->gfx.mec_fw_version >= 0xf) &&
2354 amdgpu_dpm_reset_sdma_is_supported(adev) &&
2355 !adev->debug_disable_gpu_ring_reset)
2356 adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
2357 break;
2358 default:
2359 break;
2360 }
2361
2362}
2363
2364const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block = {
2365 .type = AMD_IP_BLOCK_TYPE_SDMA,
2366 .major = 4,
2367 .minor = 4,
2368 .rev = 2,
2369 .funcs = &sdma_v4_4_2_ip_funcs,
2370};
2371
2372static int sdma_v4_4_2_xcp_resume(void *handle, uint32_t inst_mask)
2373{
2374 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2375 int r;
2376
2377 if (!amdgpu_sriov_vf(adev))
2378 sdma_v4_4_2_inst_init_golden_registers(adev, inst_mask);
2379
2380 r = sdma_v4_4_2_inst_start(adev, inst_mask, false);
2381
2382 return r;
2383}
2384
2385static int sdma_v4_4_2_xcp_suspend(void *handle, uint32_t inst_mask)
2386{
2387 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2388 uint32_t tmp_mask = inst_mask;
2389 int i;
2390
2391 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
2392 for_each_inst(i, tmp_mask) {
2393 amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
2394 AMDGPU_SDMA_IRQ_INSTANCE0 + i);
2395 }
2396 }
2397
2398 sdma_v4_4_2_inst_ctx_switch_enable(adev, false, inst_mask);
2399 sdma_v4_4_2_inst_enable(adev, false, inst_mask);
2400
2401 return 0;
2402}
2403
2404struct amdgpu_xcp_ip_funcs sdma_v4_4_2_xcp_funcs = {
2405 .suspend = &sdma_v4_4_2_xcp_suspend,
2406 .resume = &sdma_v4_4_2_xcp_resume
2407};
2408
2409static const struct amdgpu_ras_err_status_reg_entry sdma_v4_2_2_ue_reg_list[] = {
2410 {AMDGPU_RAS_REG_ENTRY(SDMA0, 0, regSDMA_UE_ERR_STATUS_LO, regSDMA_UE_ERR_STATUS_HI),
2411 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SDMA"},
2412};
2413
2414static const struct amdgpu_ras_memory_id_entry sdma_v4_4_2_ras_memory_list[] = {
2415 {AMDGPU_SDMA_MBANK_DATA_BUF0, "SDMA_MBANK_DATA_BUF0"},
2416 {AMDGPU_SDMA_MBANK_DATA_BUF1, "SDMA_MBANK_DATA_BUF1"},
2417 {AMDGPU_SDMA_MBANK_DATA_BUF2, "SDMA_MBANK_DATA_BUF2"},
2418 {AMDGPU_SDMA_MBANK_DATA_BUF3, "SDMA_MBANK_DATA_BUF3"},
2419 {AMDGPU_SDMA_MBANK_DATA_BUF4, "SDMA_MBANK_DATA_BUF4"},
2420 {AMDGPU_SDMA_MBANK_DATA_BUF5, "SDMA_MBANK_DATA_BUF5"},
2421 {AMDGPU_SDMA_MBANK_DATA_BUF6, "SDMA_MBANK_DATA_BUF6"},
2422 {AMDGPU_SDMA_MBANK_DATA_BUF7, "SDMA_MBANK_DATA_BUF7"},
2423 {AMDGPU_SDMA_MBANK_DATA_BUF8, "SDMA_MBANK_DATA_BUF8"},
2424 {AMDGPU_SDMA_MBANK_DATA_BUF9, "SDMA_MBANK_DATA_BUF9"},
2425 {AMDGPU_SDMA_MBANK_DATA_BUF10, "SDMA_MBANK_DATA_BUF10"},
2426 {AMDGPU_SDMA_MBANK_DATA_BUF11, "SDMA_MBANK_DATA_BUF11"},
2427 {AMDGPU_SDMA_MBANK_DATA_BUF12, "SDMA_MBANK_DATA_BUF12"},
2428 {AMDGPU_SDMA_MBANK_DATA_BUF13, "SDMA_MBANK_DATA_BUF13"},
2429 {AMDGPU_SDMA_MBANK_DATA_BUF14, "SDMA_MBANK_DATA_BUF14"},
2430 {AMDGPU_SDMA_MBANK_DATA_BUF15, "SDMA_MBANK_DATA_BUF15"},
2431 {AMDGPU_SDMA_UCODE_BUF, "SDMA_UCODE_BUF"},
2432 {AMDGPU_SDMA_RB_CMD_BUF, "SDMA_RB_CMD_BUF"},
2433 {AMDGPU_SDMA_IB_CMD_BUF, "SDMA_IB_CMD_BUF"},
2434 {AMDGPU_SDMA_UTCL1_RD_FIFO, "SDMA_UTCL1_RD_FIFO"},
2435 {AMDGPU_SDMA_UTCL1_RDBST_FIFO, "SDMA_UTCL1_RDBST_FIFO"},
2436 {AMDGPU_SDMA_UTCL1_WR_FIFO, "SDMA_UTCL1_WR_FIFO"},
2437 {AMDGPU_SDMA_DATA_LUT_FIFO, "SDMA_DATA_LUT_FIFO"},
2438 {AMDGPU_SDMA_SPLIT_DAT_BUF, "SDMA_SPLIT_DAT_BUF"},
2439};
2440
2441static void sdma_v4_4_2_inst_query_ras_error_count(struct amdgpu_device *adev,
2442 uint32_t sdma_inst,
2443 void *ras_err_status)
2444{
2445 struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status;
2446 uint32_t sdma_dev_inst = GET_INST(SDMA0, sdma_inst);
2447 unsigned long ue_count = 0;
2448 struct amdgpu_smuio_mcm_config_info mcm_info = {
2449 .socket_id = adev->smuio.funcs->get_socket_id(adev),
2450 .die_id = adev->sdma.instance[sdma_inst].aid_id,
2451 };
2452
2453 /* sdma v4_4_2 doesn't support query ce counts */
2454 amdgpu_ras_inst_query_ras_error_count(adev,
2455 sdma_v4_2_2_ue_reg_list,
2456 ARRAY_SIZE(sdma_v4_2_2_ue_reg_list),
2457 sdma_v4_4_2_ras_memory_list,
2458 ARRAY_SIZE(sdma_v4_4_2_ras_memory_list),
2459 sdma_dev_inst,
2460 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
2461 &ue_count);
2462
2463 amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
2464}
2465
2466static void sdma_v4_4_2_query_ras_error_count(struct amdgpu_device *adev,
2467 void *ras_err_status)
2468{
2469 uint32_t inst_mask;
2470 int i = 0;
2471
2472 inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
2473 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
2474 for_each_inst(i, inst_mask)
2475 sdma_v4_4_2_inst_query_ras_error_count(adev, i, ras_err_status);
2476 } else {
2477 dev_warn(adev->dev, "SDMA RAS is not supported\n");
2478 }
2479}
2480
2481static void sdma_v4_4_2_inst_reset_ras_error_count(struct amdgpu_device *adev,
2482 uint32_t sdma_inst)
2483{
2484 uint32_t sdma_dev_inst = GET_INST(SDMA0, sdma_inst);
2485
2486 amdgpu_ras_inst_reset_ras_error_count(adev,
2487 sdma_v4_2_2_ue_reg_list,
2488 ARRAY_SIZE(sdma_v4_2_2_ue_reg_list),
2489 sdma_dev_inst);
2490}
2491
2492static void sdma_v4_4_2_reset_ras_error_count(struct amdgpu_device *adev)
2493{
2494 uint32_t inst_mask;
2495 int i = 0;
2496
2497 inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
2498 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
2499 for_each_inst(i, inst_mask)
2500 sdma_v4_4_2_inst_reset_ras_error_count(adev, i);
2501 } else {
2502 dev_warn(adev->dev, "SDMA RAS is not supported\n");
2503 }
2504}
2505
2506static const struct amdgpu_ras_block_hw_ops sdma_v4_4_2_ras_hw_ops = {
2507 .query_ras_error_count = sdma_v4_4_2_query_ras_error_count,
2508 .reset_ras_error_count = sdma_v4_4_2_reset_ras_error_count,
2509};
2510
2511static int sdma_v4_4_2_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
2512 enum aca_smu_type type, void *data)
2513{
2514 struct aca_bank_info info;
2515 u64 misc0;
2516 int ret;
2517
2518 ret = aca_bank_info_decode(bank, &info);
2519 if (ret)
2520 return ret;
2521
2522 misc0 = bank->regs[ACA_REG_IDX_MISC0];
2523 switch (type) {
2524 case ACA_SMU_TYPE_UE:
2525 bank->aca_err_type = ACA_ERROR_TYPE_UE;
2526 ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
2527 1ULL);
2528 break;
2529 case ACA_SMU_TYPE_CE:
2530 bank->aca_err_type = ACA_ERROR_TYPE_CE;
2531 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
2532 ACA_REG__MISC0__ERRCNT(misc0));
2533 break;
2534 default:
2535 return -EINVAL;
2536 }
2537
2538 return ret;
2539}
2540
2541/* CODE_SDMA0 - CODE_SDMA4, reference to smu driver if header file */
2542static int sdma_v4_4_2_err_codes[] = { 33, 34, 35, 36 };
2543
2544static bool sdma_v4_4_2_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
2545 enum aca_smu_type type, void *data)
2546{
2547 u32 instlo;
2548
2549 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
2550 instlo &= GENMASK(31, 1);
2551
2552 if (instlo != mmSMNAID_AID0_MCA_SMU)
2553 return false;
2554
2555 if (aca_bank_check_error_codes(handle->adev, bank,
2556 sdma_v4_4_2_err_codes,
2557 ARRAY_SIZE(sdma_v4_4_2_err_codes)))
2558 return false;
2559
2560 return true;
2561}
2562
2563static const struct aca_bank_ops sdma_v4_4_2_aca_bank_ops = {
2564 .aca_bank_parser = sdma_v4_4_2_aca_bank_parser,
2565 .aca_bank_is_valid = sdma_v4_4_2_aca_bank_is_valid,
2566};
2567
2568static const struct aca_info sdma_v4_4_2_aca_info = {
2569 .hwip = ACA_HWIP_TYPE_SMU,
2570 .mask = ACA_ERROR_UE_MASK,
2571 .bank_ops = &sdma_v4_4_2_aca_bank_ops,
2572};
2573
2574static int sdma_v4_4_2_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
2575{
2576 int r;
2577
2578 r = amdgpu_sdma_ras_late_init(adev, ras_block);
2579 if (r)
2580 return r;
2581
2582 return amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__SDMA,
2583 &sdma_v4_4_2_aca_info, NULL);
2584}
2585
2586static struct amdgpu_sdma_ras sdma_v4_4_2_ras = {
2587 .ras_block = {
2588 .hw_ops = &sdma_v4_4_2_ras_hw_ops,
2589 .ras_late_init = sdma_v4_4_2_ras_late_init,
2590 },
2591};
2592
2593static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev)
2594{
2595 adev->sdma.ras = &sdma_v4_4_2_ras;
2596}