Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include <linux/delay.h>
26#include <linux/firmware.h>
27#include <linux/module.h>
28
29#include "amdgpu.h"
30#include "amdgpu_ucode.h"
31#include "amdgpu_trace.h"
32#include "vi.h"
33#include "vid.h"
34
35#include "oss/oss_2_4_d.h"
36#include "oss/oss_2_4_sh_mask.h"
37
38#include "gmc/gmc_7_1_d.h"
39#include "gmc/gmc_7_1_sh_mask.h"
40
41#include "gca/gfx_8_0_d.h"
42#include "gca/gfx_8_0_enum.h"
43#include "gca/gfx_8_0_sh_mask.h"
44
45#include "bif/bif_5_0_d.h"
46#include "bif/bif_5_0_sh_mask.h"
47
48#include "iceland_sdma_pkt_open.h"
49
50#include "ivsrcid/ivsrcid_vislands30.h"
51
52static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
53static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
54static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev);
55
56MODULE_FIRMWARE("amdgpu/topaz_sdma.bin");
57MODULE_FIRMWARE("amdgpu/topaz_sdma1.bin");
58
59static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = {
60 SDMA0_REGISTER_OFFSET,
61 SDMA1_REGISTER_OFFSET
62};
63
64static const u32 golden_settings_iceland_a11[] = {
65 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
66 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
67 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
68 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
69};
70
71static const u32 iceland_mgcg_cgcg_init[] = {
72 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
73 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
74};
75
76/*
77 * sDMA - System DMA
78 * Starting with CIK, the GPU has new asynchronous
79 * DMA engines. These engines are used for compute
80 * and gfx. There are two DMA engines (SDMA0, SDMA1)
81 * and each one supports 1 ring buffer used for gfx
82 * and 2 queues used for compute.
83 *
84 * The programming model is very similar to the CP
85 * (ring buffer, IBs, etc.), but sDMA has it's own
86 * packet format that is different from the PM4 format
87 * used by the CP. sDMA supports copying data, writing
88 * embedded data, solid fills, and a number of other
89 * things. It also has support for tiling/detiling of
90 * buffers.
91 */
92
93static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
94{
95 switch (adev->asic_type) {
96 case CHIP_TOPAZ:
97 amdgpu_device_program_register_sequence(adev,
98 iceland_mgcg_cgcg_init,
99 ARRAY_SIZE(iceland_mgcg_cgcg_init));
100 amdgpu_device_program_register_sequence(adev,
101 golden_settings_iceland_a11,
102 ARRAY_SIZE(golden_settings_iceland_a11));
103 break;
104 default:
105 break;
106 }
107}
108
109static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
110{
111 int i;
112
113 for (i = 0; i < adev->sdma.num_instances; i++)
114 amdgpu_ucode_release(&adev->sdma.instance[i].fw);
115}
116
117/**
118 * sdma_v2_4_init_microcode - load ucode images from disk
119 *
120 * @adev: amdgpu_device pointer
121 *
122 * Use the firmware interface to load the ucode images into
123 * the driver (not loaded into hw).
124 * Returns 0 on success, error on failure.
125 */
126static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
127{
128 const char *chip_name;
129 int err = 0, i;
130 struct amdgpu_firmware_info *info = NULL;
131 const struct common_firmware_header *header = NULL;
132 const struct sdma_firmware_header_v1_0 *hdr;
133
134 DRM_DEBUG("\n");
135
136 switch (adev->asic_type) {
137 case CHIP_TOPAZ:
138 chip_name = "topaz";
139 break;
140 default:
141 BUG();
142 }
143
144 for (i = 0; i < adev->sdma.num_instances; i++) {
145 if (i == 0)
146 err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
147 AMDGPU_UCODE_REQUIRED,
148 "amdgpu/%s_sdma.bin", chip_name);
149 else
150 err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
151 AMDGPU_UCODE_REQUIRED,
152 "amdgpu/%s_sdma1.bin", chip_name);
153 if (err)
154 goto out;
155 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
156 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
157 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
158 if (adev->sdma.instance[i].feature_version >= 20)
159 adev->sdma.instance[i].burst_nop = true;
160
161 if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
162 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
163 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
164 info->fw = adev->sdma.instance[i].fw;
165 header = (const struct common_firmware_header *)info->fw->data;
166 adev->firmware.fw_size +=
167 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
168 }
169 }
170
171out:
172 if (err) {
173 pr_err("sdma_v2_4: Failed to load firmware \"%s_sdma%s.bin\"\n",
174 chip_name, i == 0 ? "" : "1");
175 for (i = 0; i < adev->sdma.num_instances; i++)
176 amdgpu_ucode_release(&adev->sdma.instance[i].fw);
177 }
178 return err;
179}
180
181/**
182 * sdma_v2_4_ring_get_rptr - get the current read pointer
183 *
184 * @ring: amdgpu ring pointer
185 *
186 * Get the current rptr from the hardware (VI+).
187 */
188static uint64_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
189{
190 /* XXX check if swapping is necessary on BE */
191 return *ring->rptr_cpu_addr >> 2;
192}
193
194/**
195 * sdma_v2_4_ring_get_wptr - get the current write pointer
196 *
197 * @ring: amdgpu ring pointer
198 *
199 * Get the current wptr from the hardware (VI+).
200 */
201static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
202{
203 struct amdgpu_device *adev = ring->adev;
204 u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
205
206 return wptr;
207}
208
209/**
210 * sdma_v2_4_ring_set_wptr - commit the write pointer
211 *
212 * @ring: amdgpu ring pointer
213 *
214 * Write the wptr back to the hardware (VI+).
215 */
216static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
217{
218 struct amdgpu_device *adev = ring->adev;
219
220 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], ring->wptr << 2);
221}
222
223static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
224{
225 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
226 int i;
227
228 for (i = 0; i < count; i++)
229 if (sdma && sdma->burst_nop && (i == 0))
230 amdgpu_ring_write(ring, ring->funcs->nop |
231 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
232 else
233 amdgpu_ring_write(ring, ring->funcs->nop);
234}
235
236/**
237 * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine
238 *
239 * @ring: amdgpu ring pointer
240 * @job: job to retrieve vmid from
241 * @ib: IB object to schedule
242 * @flags: unused
243 *
244 * Schedule an IB in the DMA ring (VI).
245 */
246static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
247 struct amdgpu_job *job,
248 struct amdgpu_ib *ib,
249 uint32_t flags)
250{
251 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
252
253 /* IB packet must end on a 8 DW boundary */
254 sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
255
256 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
257 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
258 /* base must be 32 byte aligned */
259 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
260 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
261 amdgpu_ring_write(ring, ib->length_dw);
262 amdgpu_ring_write(ring, 0);
263 amdgpu_ring_write(ring, 0);
264
265}
266
267/**
268 * sdma_v2_4_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
269 *
270 * @ring: amdgpu ring pointer
271 *
272 * Emit an hdp flush packet on the requested DMA ring.
273 */
274static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
275{
276 u32 ref_and_mask = 0;
277
278 if (ring->me == 0)
279 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
280 else
281 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
282
283 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
284 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
285 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
286 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
287 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
288 amdgpu_ring_write(ring, ref_and_mask); /* reference */
289 amdgpu_ring_write(ring, ref_and_mask); /* mask */
290 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
291 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
292}
293
294/**
295 * sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring
296 *
297 * @ring: amdgpu ring pointer
298 * @addr: address
299 * @seq: sequence number
300 * @flags: fence related flags
301 *
302 * Add a DMA fence packet to the ring to write
303 * the fence seq number and DMA trap packet to generate
304 * an interrupt if needed (VI).
305 */
306static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
307 unsigned flags)
308{
309 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
310 /* write the fence */
311 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
312 amdgpu_ring_write(ring, lower_32_bits(addr));
313 amdgpu_ring_write(ring, upper_32_bits(addr));
314 amdgpu_ring_write(ring, lower_32_bits(seq));
315
316 /* optionally write high bits as well */
317 if (write64bit) {
318 addr += 4;
319 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
320 amdgpu_ring_write(ring, lower_32_bits(addr));
321 amdgpu_ring_write(ring, upper_32_bits(addr));
322 amdgpu_ring_write(ring, upper_32_bits(seq));
323 }
324
325 /* generate an interrupt */
326 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
327 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
328}
329
330/**
331 * sdma_v2_4_gfx_stop - stop the gfx async dma engines
332 *
333 * @adev: amdgpu_device pointer
334 *
335 * Stop the gfx async dma ring buffers (VI).
336 */
337static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
338{
339 u32 rb_cntl, ib_cntl;
340 int i;
341
342 for (i = 0; i < adev->sdma.num_instances; i++) {
343 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
344 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
345 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
346 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
347 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
348 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
349 }
350}
351
352/**
353 * sdma_v2_4_rlc_stop - stop the compute async dma engines
354 *
355 * @adev: amdgpu_device pointer
356 *
357 * Stop the compute async dma queues (VI).
358 */
359static void sdma_v2_4_rlc_stop(struct amdgpu_device *adev)
360{
361 /* XXX todo */
362}
363
364/**
365 * sdma_v2_4_enable - stop the async dma engines
366 *
367 * @adev: amdgpu_device pointer
368 * @enable: enable/disable the DMA MEs.
369 *
370 * Halt or unhalt the async dma engines (VI).
371 */
372static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
373{
374 u32 f32_cntl;
375 int i;
376
377 if (!enable) {
378 sdma_v2_4_gfx_stop(adev);
379 sdma_v2_4_rlc_stop(adev);
380 }
381
382 for (i = 0; i < adev->sdma.num_instances; i++) {
383 f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
384 if (enable)
385 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
386 else
387 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
388 WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
389 }
390}
391
392/**
393 * sdma_v2_4_gfx_resume - setup and start the async dma engines
394 *
395 * @adev: amdgpu_device pointer
396 *
397 * Set up the gfx DMA ring buffers and enable them (VI).
398 * Returns 0 for success, error for failure.
399 */
400static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
401{
402 struct amdgpu_ring *ring;
403 u32 rb_cntl, ib_cntl;
404 u32 rb_bufsz;
405 int i, j, r;
406
407 for (i = 0; i < adev->sdma.num_instances; i++) {
408 ring = &adev->sdma.instance[i].ring;
409
410 mutex_lock(&adev->srbm_mutex);
411 for (j = 0; j < 16; j++) {
412 vi_srbm_select(adev, 0, 0, 0, j);
413 /* SDMA GFX */
414 WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
415 WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
416 }
417 vi_srbm_select(adev, 0, 0, 0, 0);
418 mutex_unlock(&adev->srbm_mutex);
419
420 WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
421 adev->gfx.config.gb_addr_config & 0x70);
422
423 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
424
425 /* Set ring buffer size in dwords */
426 rb_bufsz = order_base_2(ring->ring_size / 4);
427 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
428 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
429#ifdef __BIG_ENDIAN
430 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
431 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
432 RPTR_WRITEBACK_SWAP_ENABLE, 1);
433#endif
434 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
435
436 /* Initialize the ring buffer's read and write pointers */
437 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
438 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
439 WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
440 WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
441
442 /* set the wb address whether it's enabled or not */
443 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
444 upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
445 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
446 lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
447
448 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
449
450 WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
451 WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
452
453 ring->wptr = 0;
454 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
455
456 /* enable DMA RB */
457 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
458 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
459
460 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
461 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
462#ifdef __BIG_ENDIAN
463 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
464#endif
465 /* enable DMA IBs */
466 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
467 }
468
469 sdma_v2_4_enable(adev, true);
470 for (i = 0; i < adev->sdma.num_instances; i++) {
471 ring = &adev->sdma.instance[i].ring;
472 r = amdgpu_ring_test_helper(ring);
473 if (r)
474 return r;
475 }
476
477 return 0;
478}
479
480/**
481 * sdma_v2_4_rlc_resume - setup and start the async dma engines
482 *
483 * @adev: amdgpu_device pointer
484 *
485 * Set up the compute DMA queues and enable them (VI).
486 * Returns 0 for success, error for failure.
487 */
488static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev)
489{
490 /* XXX todo */
491 return 0;
492}
493
494
495/**
496 * sdma_v2_4_start - setup and start the async dma engines
497 *
498 * @adev: amdgpu_device pointer
499 *
500 * Set up the DMA engines and enable them (VI).
501 * Returns 0 for success, error for failure.
502 */
503static int sdma_v2_4_start(struct amdgpu_device *adev)
504{
505 int r;
506
507 /* halt the engine before programing */
508 sdma_v2_4_enable(adev, false);
509
510 /* start the gfx rings and rlc compute queues */
511 r = sdma_v2_4_gfx_resume(adev);
512 if (r)
513 return r;
514 r = sdma_v2_4_rlc_resume(adev);
515 if (r)
516 return r;
517
518 return 0;
519}
520
521/**
522 * sdma_v2_4_ring_test_ring - simple async dma engine test
523 *
524 * @ring: amdgpu_ring structure holding ring information
525 *
526 * Test the DMA engine by writing using it to write an
527 * value to memory. (VI).
528 * Returns 0 for success, error for failure.
529 */
530static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
531{
532 struct amdgpu_device *adev = ring->adev;
533 unsigned i;
534 unsigned index;
535 int r;
536 u32 tmp;
537 u64 gpu_addr;
538
539 r = amdgpu_device_wb_get(adev, &index);
540 if (r)
541 return r;
542
543 gpu_addr = adev->wb.gpu_addr + (index * 4);
544 tmp = 0xCAFEDEAD;
545 adev->wb.wb[index] = cpu_to_le32(tmp);
546
547 r = amdgpu_ring_alloc(ring, 5);
548 if (r)
549 goto error_free_wb;
550
551 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
552 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
553 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
554 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
555 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
556 amdgpu_ring_write(ring, 0xDEADBEEF);
557 amdgpu_ring_commit(ring);
558
559 for (i = 0; i < adev->usec_timeout; i++) {
560 tmp = le32_to_cpu(adev->wb.wb[index]);
561 if (tmp == 0xDEADBEEF)
562 break;
563 udelay(1);
564 }
565
566 if (i >= adev->usec_timeout)
567 r = -ETIMEDOUT;
568
569error_free_wb:
570 amdgpu_device_wb_free(adev, index);
571 return r;
572}
573
574/**
575 * sdma_v2_4_ring_test_ib - test an IB on the DMA engine
576 *
577 * @ring: amdgpu_ring structure holding ring information
578 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
579 *
580 * Test a simple IB in the DMA ring (VI).
581 * Returns 0 on success, error on failure.
582 */
583static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
584{
585 struct amdgpu_device *adev = ring->adev;
586 struct amdgpu_ib ib;
587 struct dma_fence *f = NULL;
588 unsigned index;
589 u32 tmp = 0;
590 u64 gpu_addr;
591 long r;
592
593 r = amdgpu_device_wb_get(adev, &index);
594 if (r)
595 return r;
596
597 gpu_addr = adev->wb.gpu_addr + (index * 4);
598 tmp = 0xCAFEDEAD;
599 adev->wb.wb[index] = cpu_to_le32(tmp);
600 memset(&ib, 0, sizeof(ib));
601 r = amdgpu_ib_get(adev, NULL, 256,
602 AMDGPU_IB_POOL_DIRECT, &ib);
603 if (r)
604 goto err0;
605
606 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
607 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
608 ib.ptr[1] = lower_32_bits(gpu_addr);
609 ib.ptr[2] = upper_32_bits(gpu_addr);
610 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
611 ib.ptr[4] = 0xDEADBEEF;
612 ib.ptr[5] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
613 ib.ptr[6] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
614 ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
615 ib.length_dw = 8;
616
617 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
618 if (r)
619 goto err1;
620
621 r = dma_fence_wait_timeout(f, false, timeout);
622 if (r == 0) {
623 r = -ETIMEDOUT;
624 goto err1;
625 } else if (r < 0) {
626 goto err1;
627 }
628 tmp = le32_to_cpu(adev->wb.wb[index]);
629 if (tmp == 0xDEADBEEF)
630 r = 0;
631 else
632 r = -EINVAL;
633
634err1:
635 amdgpu_ib_free(&ib, NULL);
636 dma_fence_put(f);
637err0:
638 amdgpu_device_wb_free(adev, index);
639 return r;
640}
641
642/**
643 * sdma_v2_4_vm_copy_pte - update PTEs by copying them from the GART
644 *
645 * @ib: indirect buffer to fill with commands
646 * @pe: addr of the page entry
647 * @src: src addr to copy from
648 * @count: number of page entries to update
649 *
650 * Update PTEs by copying them from the GART using sDMA (CIK).
651 */
652static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
653 uint64_t pe, uint64_t src,
654 unsigned count)
655{
656 unsigned bytes = count * 8;
657
658 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
659 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
660 ib->ptr[ib->length_dw++] = bytes;
661 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
662 ib->ptr[ib->length_dw++] = lower_32_bits(src);
663 ib->ptr[ib->length_dw++] = upper_32_bits(src);
664 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
665 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
666}
667
668/**
669 * sdma_v2_4_vm_write_pte - update PTEs by writing them manually
670 *
671 * @ib: indirect buffer to fill with commands
672 * @pe: addr of the page entry
673 * @value: dst addr to write into pe
674 * @count: number of page entries to update
675 * @incr: increase next addr by incr bytes
676 *
677 * Update PTEs by writing them manually using sDMA (CIK).
678 */
679static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
680 uint64_t value, unsigned count,
681 uint32_t incr)
682{
683 unsigned ndw = count * 2;
684
685 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
686 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
687 ib->ptr[ib->length_dw++] = pe;
688 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
689 ib->ptr[ib->length_dw++] = ndw;
690 for (; ndw > 0; ndw -= 2) {
691 ib->ptr[ib->length_dw++] = lower_32_bits(value);
692 ib->ptr[ib->length_dw++] = upper_32_bits(value);
693 value += incr;
694 }
695}
696
697/**
698 * sdma_v2_4_vm_set_pte_pde - update the page tables using sDMA
699 *
700 * @ib: indirect buffer to fill with commands
701 * @pe: addr of the page entry
702 * @addr: dst addr to write into pe
703 * @count: number of page entries to update
704 * @incr: increase next addr by incr bytes
705 * @flags: access flags
706 *
707 * Update the page tables using sDMA (CIK).
708 */
709static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
710 uint64_t addr, unsigned count,
711 uint32_t incr, uint64_t flags)
712{
713 /* for physically contiguous pages (vram) */
714 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
715 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
716 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
717 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
718 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
719 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
720 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
721 ib->ptr[ib->length_dw++] = incr; /* increment size */
722 ib->ptr[ib->length_dw++] = 0;
723 ib->ptr[ib->length_dw++] = count; /* number of entries */
724}
725
726/**
727 * sdma_v2_4_ring_pad_ib - pad the IB to the required number of dw
728 *
729 * @ring: amdgpu_ring structure holding ring information
730 * @ib: indirect buffer to fill with padding
731 *
732 */
733static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
734{
735 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
736 u32 pad_count;
737 int i;
738
739 pad_count = (-ib->length_dw) & 7;
740 for (i = 0; i < pad_count; i++)
741 if (sdma && sdma->burst_nop && (i == 0))
742 ib->ptr[ib->length_dw++] =
743 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
744 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
745 else
746 ib->ptr[ib->length_dw++] =
747 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
748}
749
750/**
751 * sdma_v2_4_ring_emit_pipeline_sync - sync the pipeline
752 *
753 * @ring: amdgpu_ring pointer
754 *
755 * Make sure all previous operations are completed (CIK).
756 */
757static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
758{
759 uint32_t seq = ring->fence_drv.sync_seq;
760 uint64_t addr = ring->fence_drv.gpu_addr;
761
762 /* wait for idle */
763 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
764 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
765 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
766 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
767 amdgpu_ring_write(ring, addr & 0xfffffffc);
768 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
769 amdgpu_ring_write(ring, seq); /* reference */
770 amdgpu_ring_write(ring, 0xffffffff); /* mask */
771 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
772 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
773}
774
775/**
776 * sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA
777 *
778 * @ring: amdgpu_ring pointer
779 * @vmid: vmid number to use
780 * @pd_addr: address
781 *
782 * Update the page table base and flush the VM TLB
783 * using sDMA (VI).
784 */
785static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
786 unsigned vmid, uint64_t pd_addr)
787{
788 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
789
790 /* wait for flush */
791 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
792 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
793 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */
794 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
795 amdgpu_ring_write(ring, 0);
796 amdgpu_ring_write(ring, 0); /* reference */
797 amdgpu_ring_write(ring, 0); /* mask */
798 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
799 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
800}
801
802static void sdma_v2_4_ring_emit_wreg(struct amdgpu_ring *ring,
803 uint32_t reg, uint32_t val)
804{
805 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
806 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
807 amdgpu_ring_write(ring, reg);
808 amdgpu_ring_write(ring, val);
809}
810
811static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
812 .copy_pte_num_dw = 7,
813 .copy_pte = sdma_v2_4_vm_copy_pte,
814
815 .write_pte = sdma_v2_4_vm_write_pte,
816 .set_pte_pde = sdma_v2_4_vm_set_pte_pde,
817};
818
819static int sdma_v2_4_early_init(struct amdgpu_ip_block *ip_block)
820{
821 struct amdgpu_device *adev = ip_block->adev;
822 int r;
823
824 adev->sdma.num_instances = SDMA_MAX_INSTANCE;
825
826 r = sdma_v2_4_init_microcode(adev);
827 if (r)
828 return r;
829
830 sdma_v2_4_set_ring_funcs(adev);
831 sdma_v2_4_set_buffer_funcs(adev);
832 amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v2_4_vm_pte_funcs);
833 sdma_v2_4_set_irq_funcs(adev);
834
835 return 0;
836}
837
838static int sdma_v2_4_sw_init(struct amdgpu_ip_block *ip_block)
839{
840 struct amdgpu_ring *ring;
841 int r, i;
842 struct amdgpu_device *adev = ip_block->adev;
843
844 /* SDMA trap event */
845 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
846 &adev->sdma.trap_irq);
847 if (r)
848 return r;
849
850 /* SDMA Privileged inst */
851 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
852 &adev->sdma.illegal_inst_irq);
853 if (r)
854 return r;
855
856 /* SDMA Privileged inst */
857 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
858 &adev->sdma.illegal_inst_irq);
859 if (r)
860 return r;
861
862 for (i = 0; i < adev->sdma.num_instances; i++) {
863 ring = &adev->sdma.instance[i].ring;
864 ring->ring_obj = NULL;
865 ring->use_doorbell = false;
866 sprintf(ring->name, "sdma%d", i);
867 r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
868 (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
869 AMDGPU_SDMA_IRQ_INSTANCE1,
870 AMDGPU_RING_PRIO_DEFAULT, NULL);
871 if (r)
872 return r;
873 }
874
875 return r;
876}
877
878static int sdma_v2_4_sw_fini(struct amdgpu_ip_block *ip_block)
879{
880 struct amdgpu_device *adev = ip_block->adev;
881 int i;
882
883 for (i = 0; i < adev->sdma.num_instances; i++)
884 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
885
886 sdma_v2_4_free_microcode(adev);
887 return 0;
888}
889
890static int sdma_v2_4_hw_init(struct amdgpu_ip_block *ip_block)
891{
892 int r;
893 struct amdgpu_device *adev = ip_block->adev;
894
895 sdma_v2_4_init_golden_registers(adev);
896
897 r = sdma_v2_4_start(adev);
898 if (r)
899 return r;
900
901 return r;
902}
903
904static int sdma_v2_4_hw_fini(struct amdgpu_ip_block *ip_block)
905{
906 sdma_v2_4_enable(ip_block->adev, false);
907
908 return 0;
909}
910
911static int sdma_v2_4_suspend(struct amdgpu_ip_block *ip_block)
912{
913 return sdma_v2_4_hw_fini(ip_block);
914}
915
916static int sdma_v2_4_resume(struct amdgpu_ip_block *ip_block)
917{
918 return sdma_v2_4_hw_init(ip_block);
919}
920
921static bool sdma_v2_4_is_idle(struct amdgpu_ip_block *ip_block)
922{
923 struct amdgpu_device *adev = ip_block->adev;
924 u32 tmp = RREG32(mmSRBM_STATUS2);
925
926 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
927 SRBM_STATUS2__SDMA1_BUSY_MASK))
928 return false;
929
930 return true;
931}
932
933static int sdma_v2_4_wait_for_idle(struct amdgpu_ip_block *ip_block)
934{
935 unsigned i;
936 u32 tmp;
937 struct amdgpu_device *adev = ip_block->adev;
938
939 for (i = 0; i < adev->usec_timeout; i++) {
940 tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
941 SRBM_STATUS2__SDMA1_BUSY_MASK);
942
943 if (!tmp)
944 return 0;
945 udelay(1);
946 }
947 return -ETIMEDOUT;
948}
949
950static int sdma_v2_4_soft_reset(struct amdgpu_ip_block *ip_block)
951{
952 u32 srbm_soft_reset = 0;
953 struct amdgpu_device *adev = ip_block->adev;
954 u32 tmp = RREG32(mmSRBM_STATUS2);
955
956 if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
957 /* sdma0 */
958 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
959 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
960 WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
961 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
962 }
963 if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
964 /* sdma1 */
965 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
966 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
967 WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
968 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
969 }
970
971 if (srbm_soft_reset) {
972 tmp = RREG32(mmSRBM_SOFT_RESET);
973 tmp |= srbm_soft_reset;
974 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
975 WREG32(mmSRBM_SOFT_RESET, tmp);
976 tmp = RREG32(mmSRBM_SOFT_RESET);
977
978 udelay(50);
979
980 tmp &= ~srbm_soft_reset;
981 WREG32(mmSRBM_SOFT_RESET, tmp);
982 tmp = RREG32(mmSRBM_SOFT_RESET);
983
984 /* Wait a little for things to settle down */
985 udelay(50);
986 }
987
988 return 0;
989}
990
991static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev,
992 struct amdgpu_irq_src *src,
993 unsigned type,
994 enum amdgpu_interrupt_state state)
995{
996 u32 sdma_cntl;
997
998 switch (type) {
999 case AMDGPU_SDMA_IRQ_INSTANCE0:
1000 switch (state) {
1001 case AMDGPU_IRQ_STATE_DISABLE:
1002 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1003 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1004 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1005 break;
1006 case AMDGPU_IRQ_STATE_ENABLE:
1007 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
1008 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1009 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
1010 break;
1011 default:
1012 break;
1013 }
1014 break;
1015 case AMDGPU_SDMA_IRQ_INSTANCE1:
1016 switch (state) {
1017 case AMDGPU_IRQ_STATE_DISABLE:
1018 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1019 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
1020 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1021 break;
1022 case AMDGPU_IRQ_STATE_ENABLE:
1023 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
1024 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
1025 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
1026 break;
1027 default:
1028 break;
1029 }
1030 break;
1031 default:
1032 break;
1033 }
1034 return 0;
1035}
1036
1037static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev,
1038 struct amdgpu_irq_src *source,
1039 struct amdgpu_iv_entry *entry)
1040{
1041 u8 instance_id, queue_id;
1042
1043 instance_id = (entry->ring_id & 0x3) >> 0;
1044 queue_id = (entry->ring_id & 0xc) >> 2;
1045 DRM_DEBUG("IH: SDMA trap\n");
1046 switch (instance_id) {
1047 case 0:
1048 switch (queue_id) {
1049 case 0:
1050 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1051 break;
1052 case 1:
1053 /* XXX compute */
1054 break;
1055 case 2:
1056 /* XXX compute */
1057 break;
1058 }
1059 break;
1060 case 1:
1061 switch (queue_id) {
1062 case 0:
1063 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1064 break;
1065 case 1:
1066 /* XXX compute */
1067 break;
1068 case 2:
1069 /* XXX compute */
1070 break;
1071 }
1072 break;
1073 }
1074 return 0;
1075}
1076
1077static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev,
1078 struct amdgpu_irq_src *source,
1079 struct amdgpu_iv_entry *entry)
1080{
1081 u8 instance_id, queue_id;
1082
1083 DRM_ERROR("Illegal instruction in SDMA command stream\n");
1084 instance_id = (entry->ring_id & 0x3) >> 0;
1085 queue_id = (entry->ring_id & 0xc) >> 2;
1086
1087 if (instance_id <= 1 && queue_id == 0)
1088 drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
1089 return 0;
1090}
1091
1092static int sdma_v2_4_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1093 enum amd_clockgating_state state)
1094{
1095 /* XXX handled via the smc on VI */
1096 return 0;
1097}
1098
1099static int sdma_v2_4_set_powergating_state(struct amdgpu_ip_block *ip_block,
1100 enum amd_powergating_state state)
1101{
1102 return 0;
1103}
1104
1105static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
1106 .name = "sdma_v2_4",
1107 .early_init = sdma_v2_4_early_init,
1108 .sw_init = sdma_v2_4_sw_init,
1109 .sw_fini = sdma_v2_4_sw_fini,
1110 .hw_init = sdma_v2_4_hw_init,
1111 .hw_fini = sdma_v2_4_hw_fini,
1112 .suspend = sdma_v2_4_suspend,
1113 .resume = sdma_v2_4_resume,
1114 .is_idle = sdma_v2_4_is_idle,
1115 .wait_for_idle = sdma_v2_4_wait_for_idle,
1116 .soft_reset = sdma_v2_4_soft_reset,
1117 .set_clockgating_state = sdma_v2_4_set_clockgating_state,
1118 .set_powergating_state = sdma_v2_4_set_powergating_state,
1119};
1120
1121static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
1122 .type = AMDGPU_RING_TYPE_SDMA,
1123 .align_mask = 0xf,
1124 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1125 .support_64bit_ptrs = false,
1126 .secure_submission_supported = true,
1127 .get_rptr = sdma_v2_4_ring_get_rptr,
1128 .get_wptr = sdma_v2_4_ring_get_wptr,
1129 .set_wptr = sdma_v2_4_ring_set_wptr,
1130 .emit_frame_size =
1131 6 + /* sdma_v2_4_ring_emit_hdp_flush */
1132 3 + /* hdp invalidate */
1133 6 + /* sdma_v2_4_ring_emit_pipeline_sync */
1134 VI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* sdma_v2_4_ring_emit_vm_flush */
1135 10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
1136 .emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
1137 .emit_ib = sdma_v2_4_ring_emit_ib,
1138 .emit_fence = sdma_v2_4_ring_emit_fence,
1139 .emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
1140 .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
1141 .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
1142 .test_ring = sdma_v2_4_ring_test_ring,
1143 .test_ib = sdma_v2_4_ring_test_ib,
1144 .insert_nop = sdma_v2_4_ring_insert_nop,
1145 .pad_ib = sdma_v2_4_ring_pad_ib,
1146 .emit_wreg = sdma_v2_4_ring_emit_wreg,
1147};
1148
1149static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
1150{
1151 int i;
1152
1153 for (i = 0; i < adev->sdma.num_instances; i++) {
1154 adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
1155 adev->sdma.instance[i].ring.me = i;
1156 }
1157}
1158
1159static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
1160 .set = sdma_v2_4_set_trap_irq_state,
1161 .process = sdma_v2_4_process_trap_irq,
1162};
1163
1164static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = {
1165 .process = sdma_v2_4_process_illegal_inst_irq,
1166};
1167
1168static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
1169{
1170 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1171 adev->sdma.trap_irq.funcs = &sdma_v2_4_trap_irq_funcs;
1172 adev->sdma.illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs;
1173}
1174
1175/**
1176 * sdma_v2_4_emit_copy_buffer - copy buffer using the sDMA engine
1177 *
1178 * @ib: indirect buffer to copy to
1179 * @src_offset: src GPU address
1180 * @dst_offset: dst GPU address
1181 * @byte_count: number of bytes to xfer
1182 * @copy_flags: unused
1183 *
1184 * Copy GPU buffers using the DMA engine (VI).
1185 * Used by the amdgpu ttm implementation to move pages if
1186 * registered as the asic copy callback.
1187 */
1188static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
1189 uint64_t src_offset,
1190 uint64_t dst_offset,
1191 uint32_t byte_count,
1192 uint32_t copy_flags)
1193{
1194 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1195 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1196 ib->ptr[ib->length_dw++] = byte_count;
1197 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1198 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1199 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1200 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1201 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1202}
1203
1204/**
1205 * sdma_v2_4_emit_fill_buffer - fill buffer using the sDMA engine
1206 *
1207 * @ib: indirect buffer to copy to
1208 * @src_data: value to write to buffer
1209 * @dst_offset: dst GPU address
1210 * @byte_count: number of bytes to xfer
1211 *
1212 * Fill GPU buffers using the DMA engine (VI).
1213 */
1214static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ib *ib,
1215 uint32_t src_data,
1216 uint64_t dst_offset,
1217 uint32_t byte_count)
1218{
1219 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1220 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1221 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1222 ib->ptr[ib->length_dw++] = src_data;
1223 ib->ptr[ib->length_dw++] = byte_count;
1224}
1225
1226static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = {
1227 .copy_max_bytes = 0x1fffff,
1228 .copy_num_dw = 7,
1229 .emit_copy_buffer = sdma_v2_4_emit_copy_buffer,
1230
1231 .fill_max_bytes = 0x1fffff,
1232 .fill_num_dw = 7,
1233 .emit_fill_buffer = sdma_v2_4_emit_fill_buffer,
1234};
1235
1236static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
1237{
1238 adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
1239 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1240}
1241
1242const struct amdgpu_ip_block_version sdma_v2_4_ip_block = {
1243 .type = AMD_IP_BLOCK_TYPE_SDMA,
1244 .major = 2,
1245 .minor = 4,
1246 .rev = 0,
1247 .funcs = &sdma_v2_4_ip_funcs,
1248};