Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/delay.h>
25#include <linux/firmware.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28
29#include "amdgpu.h"
30#include "amdgpu_ucode.h"
31#include "amdgpu_trace.h"
32
33#include "gc/gc_10_1_0_offset.h"
34#include "gc/gc_10_1_0_sh_mask.h"
35#include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h"
36#include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h"
37
38#include "soc15_common.h"
39#include "soc15.h"
40#include "navi10_sdma_pkt_open.h"
41#include "nbio_v2_3.h"
42#include "sdma_common.h"
43#include "sdma_v5_0.h"
44
45MODULE_FIRMWARE("amdgpu/navi10_sdma.bin");
46MODULE_FIRMWARE("amdgpu/navi10_sdma1.bin");
47
48MODULE_FIRMWARE("amdgpu/navi14_sdma.bin");
49MODULE_FIRMWARE("amdgpu/navi14_sdma1.bin");
50
51MODULE_FIRMWARE("amdgpu/navi12_sdma.bin");
52MODULE_FIRMWARE("amdgpu/navi12_sdma1.bin");
53
54MODULE_FIRMWARE("amdgpu/cyan_skillfish2_sdma.bin");
55MODULE_FIRMWARE("amdgpu/cyan_skillfish2_sdma1.bin");
56
57#define SDMA1_REG_OFFSET 0x600
58#define SDMA0_HYP_DEC_REG_START 0x5880
59#define SDMA0_HYP_DEC_REG_END 0x5893
60#define SDMA1_HYP_DEC_REG_OFFSET 0x20
61
62static const struct amdgpu_hwip_reg_entry sdma_reg_list_5_0[] = {
63 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS_REG),
64 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS1_REG),
65 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS2_REG),
66 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_STATUS3_REG),
67 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UCODE_CHECKSUM),
68 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH_HI),
69 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RB_RPTR_FETCH),
70 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_STATUS),
71 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_STATUS),
72 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK0),
73 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_RD_XNACK1),
74 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK0),
75 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_UTCL1_WR_XNACK1),
76 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_CNTL),
77 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR),
78 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_RPTR_HI),
79 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR),
80 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_RB_WPTR_HI),
81 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_OFFSET),
82 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_LO),
83 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_BASE_HI),
84 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_CNTL),
85 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_RPTR),
86 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_IB_SUB_REMAIN),
87 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_GFX_DUMMY_REG),
88 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_CNTL),
89 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR),
90 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_RPTR_HI),
91 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR),
92 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_RB_WPTR_HI),
93 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_OFFSET),
94 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_LO),
95 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_IB_BASE_HI),
96 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_PAGE_DUMMY_REG),
97 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_CNTL),
98 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR),
99 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_RPTR_HI),
100 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR),
101 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_RB_WPTR_HI),
102 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_OFFSET),
103 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_LO),
104 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_IB_BASE_HI),
105 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_RLC0_DUMMY_REG),
106 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_INT_STATUS),
107 SOC15_REG_ENTRY_STR(GC, 0, mmSDMA0_VM_CNTL),
108 SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS2)
109};
110
111static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev);
112static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev);
113static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev);
114static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring);
115static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring);
116
117static const struct soc15_reg_golden golden_settings_sdma_5[] = {
118 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
119 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
120 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
121 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
122 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
123 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
124 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
125 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
126 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
127 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
128 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
129 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_UTCL1_PAGE, 0x00ffffff, 0x000c5c00),
130 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
131 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
132 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
133 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
134 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
135 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
136 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
137 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
138 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
139 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
140 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
141 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x00ffffff, 0x000c5c00)
142};
143
144static const struct soc15_reg_golden golden_settings_sdma_5_sriov[] = {
145 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
146 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
147 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
148 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
149 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
150 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
151 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
152 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
153 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
154 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
155 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
156 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
157 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
158 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
159 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
160 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
161 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
162 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
163 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
164 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
165};
166
167static const struct soc15_reg_golden golden_settings_sdma_nv10[] = {
168 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
169 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
170};
171
172static const struct soc15_reg_golden golden_settings_sdma_nv14[] = {
173 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
174 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
175};
176
177static const struct soc15_reg_golden golden_settings_sdma_nv12[] = {
178 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
179 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
180 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
181 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
182 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
183 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
184};
185
186static const struct soc15_reg_golden golden_settings_sdma_cyan_skillfish[] = {
187 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
188 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
189 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
190 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
191 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
192 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
193 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
194 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
195 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
196 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
197 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
198 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
199 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
200 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_UTCL1_PAGE, 0x007fffff, 0x004c5c00),
201 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
202 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
203 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
204 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
205 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
206 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
207 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
208 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
209 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
210 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
211 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
212 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
213 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
214 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x007fffff, 0x004c5c00)
215};
216
217static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
218{
219 u32 base;
220
221 if (internal_offset >= SDMA0_HYP_DEC_REG_START &&
222 internal_offset <= SDMA0_HYP_DEC_REG_END) {
223 base = adev->reg_offset[GC_HWIP][0][1];
224 if (instance == 1)
225 internal_offset += SDMA1_HYP_DEC_REG_OFFSET;
226 } else {
227 base = adev->reg_offset[GC_HWIP][0][0];
228 if (instance == 1)
229 internal_offset += SDMA1_REG_OFFSET;
230 }
231
232 return base + internal_offset;
233}
234
235static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
236{
237 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
238 case IP_VERSION(5, 0, 0):
239 soc15_program_register_sequence(adev,
240 golden_settings_sdma_5,
241 (const u32)ARRAY_SIZE(golden_settings_sdma_5));
242 soc15_program_register_sequence(adev,
243 golden_settings_sdma_nv10,
244 (const u32)ARRAY_SIZE(golden_settings_sdma_nv10));
245 break;
246 case IP_VERSION(5, 0, 2):
247 soc15_program_register_sequence(adev,
248 golden_settings_sdma_5,
249 (const u32)ARRAY_SIZE(golden_settings_sdma_5));
250 soc15_program_register_sequence(adev,
251 golden_settings_sdma_nv14,
252 (const u32)ARRAY_SIZE(golden_settings_sdma_nv14));
253 break;
254 case IP_VERSION(5, 0, 5):
255 if (amdgpu_sriov_vf(adev))
256 soc15_program_register_sequence(adev,
257 golden_settings_sdma_5_sriov,
258 (const u32)ARRAY_SIZE(golden_settings_sdma_5_sriov));
259 else
260 soc15_program_register_sequence(adev,
261 golden_settings_sdma_5,
262 (const u32)ARRAY_SIZE(golden_settings_sdma_5));
263 soc15_program_register_sequence(adev,
264 golden_settings_sdma_nv12,
265 (const u32)ARRAY_SIZE(golden_settings_sdma_nv12));
266 break;
267 case IP_VERSION(5, 0, 1):
268 soc15_program_register_sequence(adev,
269 golden_settings_sdma_cyan_skillfish,
270 (const u32)ARRAY_SIZE(golden_settings_sdma_cyan_skillfish));
271 break;
272 default:
273 break;
274 }
275}
276
277/**
278 * sdma_v5_0_init_microcode - load ucode images from disk
279 *
280 * @adev: amdgpu_device pointer
281 *
282 * Use the firmware interface to load the ucode images into
283 * the driver (not loaded into hw).
284 * Returns 0 on success, error on failure.
285 */
286
287// emulation only, won't work on real chip
288// navi10 real chip need to use PSP to load firmware
289static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
290{
291 int ret, i;
292
293 for (i = 0; i < adev->sdma.num_instances; i++) {
294 ret = amdgpu_sdma_init_microcode(adev, i, false);
295 if (ret)
296 return ret;
297 }
298
299 return ret;
300}
301
302static unsigned sdma_v5_0_ring_init_cond_exec(struct amdgpu_ring *ring,
303 uint64_t addr)
304{
305 unsigned ret;
306
307 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
308 amdgpu_ring_write(ring, lower_32_bits(addr));
309 amdgpu_ring_write(ring, upper_32_bits(addr));
310 amdgpu_ring_write(ring, 1);
311 /* this is the offset we need patch later */
312 ret = ring->wptr & ring->buf_mask;
313 /* insert dummy here and patch it later */
314 amdgpu_ring_write(ring, 0);
315
316 return ret;
317}
318
319/**
320 * sdma_v5_0_ring_get_rptr - get the current read pointer
321 *
322 * @ring: amdgpu ring pointer
323 *
324 * Get the current rptr from the hardware (NAVI10+).
325 */
326static uint64_t sdma_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
327{
328 u64 *rptr;
329
330 /* XXX check if swapping is necessary on BE */
331 rptr = (u64 *)ring->rptr_cpu_addr;
332
333 DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
334 return ((*rptr) >> 2);
335}
336
337/**
338 * sdma_v5_0_ring_get_wptr - get the current write pointer
339 *
340 * @ring: amdgpu ring pointer
341 *
342 * Get the current wptr from the hardware (NAVI10+).
343 */
344static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
345{
346 struct amdgpu_device *adev = ring->adev;
347 u64 wptr;
348
349 if (ring->use_doorbell) {
350 /* XXX check if swapping is necessary on BE */
351 wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr));
352 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
353 } else {
354 wptr = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI));
355 wptr = wptr << 32;
356 wptr |= RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR));
357 DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr);
358 }
359
360 return wptr >> 2;
361}
362
363/**
364 * sdma_v5_0_ring_set_wptr - commit the write pointer
365 *
366 * @ring: amdgpu ring pointer
367 *
368 * Write the wptr back to the hardware (NAVI10+).
369 */
370static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
371{
372 struct amdgpu_device *adev = ring->adev;
373
374 DRM_DEBUG("Setting write pointer\n");
375 if (ring->use_doorbell) {
376 DRM_DEBUG("Using doorbell -- "
377 "wptr_offs == 0x%08x "
378 "lower_32_bits(ring->wptr) << 2 == 0x%08x "
379 "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
380 ring->wptr_offs,
381 lower_32_bits(ring->wptr << 2),
382 upper_32_bits(ring->wptr << 2));
383 /* XXX check if swapping is necessary on BE */
384 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
385 ring->wptr << 2);
386 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
387 ring->doorbell_index, ring->wptr << 2);
388 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
389 } else {
390 DRM_DEBUG("Not using doorbell -- "
391 "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
392 "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
393 ring->me,
394 lower_32_bits(ring->wptr << 2),
395 ring->me,
396 upper_32_bits(ring->wptr << 2));
397 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
398 ring->me, mmSDMA0_GFX_RB_WPTR),
399 lower_32_bits(ring->wptr << 2));
400 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
401 ring->me, mmSDMA0_GFX_RB_WPTR_HI),
402 upper_32_bits(ring->wptr << 2));
403 }
404}
405
406static void sdma_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
407{
408 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
409 int i;
410
411 for (i = 0; i < count; i++)
412 if (sdma && sdma->burst_nop && (i == 0))
413 amdgpu_ring_write(ring, ring->funcs->nop |
414 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
415 else
416 amdgpu_ring_write(ring, ring->funcs->nop);
417}
418
419/**
420 * sdma_v5_0_ring_emit_ib - Schedule an IB on the DMA engine
421 *
422 * @ring: amdgpu ring pointer
423 * @job: job to retrieve vmid from
424 * @ib: IB object to schedule
425 * @flags: unused
426 *
427 * Schedule an IB in the DMA ring (NAVI10).
428 */
429static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
430 struct amdgpu_job *job,
431 struct amdgpu_ib *ib,
432 uint32_t flags)
433{
434 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
435 uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
436
437 /* An IB packet must end on a 8 DW boundary--the next dword
438 * must be on a 8-dword boundary. Our IB packet below is 6
439 * dwords long, thus add x number of NOPs, such that, in
440 * modular arithmetic,
441 * wptr + 6 + x = 8k, k >= 0, which in C is,
442 * (wptr + 6 + x) % 8 = 0.
443 * The expression below, is a solution of x.
444 */
445 sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
446
447 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
448 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
449 /* base must be 32 byte aligned */
450 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
451 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
452 amdgpu_ring_write(ring, ib->length_dw);
453 amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
454 amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
455}
456
457/**
458 * sdma_v5_0_ring_emit_mem_sync - flush the IB by graphics cache rinse
459 *
460 * @ring: amdgpu ring pointer
461 *
462 * flush the IB by graphics cache rinse.
463 */
464static void sdma_v5_0_ring_emit_mem_sync(struct amdgpu_ring *ring)
465{
466 uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
467 SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
468 SDMA_GCR_GLI_INV(1);
469
470 /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
471 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
472 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0));
473 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) |
474 SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
475 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) |
476 SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
477 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) |
478 SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
479}
480
481/**
482 * sdma_v5_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
483 *
484 * @ring: amdgpu ring pointer
485 *
486 * Emit an hdp flush packet on the requested DMA ring.
487 */
488static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
489{
490 struct amdgpu_device *adev = ring->adev;
491 u32 ref_and_mask = 0;
492 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
493
494 if (ring->me == 0)
495 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
496 else
497 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
498
499 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
500 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
501 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
502 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
503 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
504 amdgpu_ring_write(ring, ref_and_mask); /* reference */
505 amdgpu_ring_write(ring, ref_and_mask); /* mask */
506 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
507 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
508}
509
510/**
511 * sdma_v5_0_ring_emit_fence - emit a fence on the DMA ring
512 *
513 * @ring: amdgpu ring pointer
514 * @addr: address
515 * @seq: sequence number
516 * @flags: fence related flags
517 *
518 * Add a DMA fence packet to the ring to write
519 * the fence seq number and DMA trap packet to generate
520 * an interrupt if needed (NAVI10).
521 */
522static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
523 unsigned flags)
524{
525 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
526 /* write the fence */
527 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
528 SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); /* Ucached(UC) */
529 /* zero in first two bits */
530 BUG_ON(addr & 0x3);
531 amdgpu_ring_write(ring, lower_32_bits(addr));
532 amdgpu_ring_write(ring, upper_32_bits(addr));
533 amdgpu_ring_write(ring, lower_32_bits(seq));
534
535 /* optionally write high bits as well */
536 if (write64bit) {
537 addr += 4;
538 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
539 SDMA_PKT_FENCE_HEADER_MTYPE(0x3));
540 /* zero in first two bits */
541 BUG_ON(addr & 0x3);
542 amdgpu_ring_write(ring, lower_32_bits(addr));
543 amdgpu_ring_write(ring, upper_32_bits(addr));
544 amdgpu_ring_write(ring, upper_32_bits(seq));
545 }
546
547 if (flags & AMDGPU_FENCE_FLAG_INT) {
548 /* generate an interrupt */
549 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
550 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
551 }
552}
553
554
555/**
556 * sdma_v5_0_gfx_stop - stop the gfx async dma engines
557 *
558 * @adev: amdgpu_device pointer
559 * @inst_mask: mask of dma engine instances to be disabled
560 * Stop the gfx async dma ring buffers (NAVI10).
561 */
562static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev, uint32_t inst_mask)
563{
564 u32 rb_cntl, ib_cntl;
565 int i;
566
567 for_each_inst(i, inst_mask) {
568 rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
569 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
570 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
571 ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
572 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
573 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
574 }
575}
576
577/**
578 * sdma_v5_0_rlc_stop - stop the compute async dma engines
579 *
580 * @adev: amdgpu_device pointer
581 *
582 * Stop the compute async dma queues (NAVI10).
583 */
584static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev)
585{
586 /* XXX todo */
587}
588
589/**
590 * sdma_v5_0_ctx_switch_enable - stop the async dma engines context switch
591 *
592 * @adev: amdgpu_device pointer
593 * @enable: enable/disable the DMA MEs context switch.
594 *
595 * Halt or unhalt the async dma engines context switch (NAVI10).
596 */
597static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
598{
599 u32 f32_cntl = 0, phase_quantum = 0;
600 int i;
601
602 if (amdgpu_sdma_phase_quantum) {
603 unsigned value = amdgpu_sdma_phase_quantum;
604 unsigned unit = 0;
605
606 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
607 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
608 value = (value + 1) >> 1;
609 unit++;
610 }
611 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
612 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
613 value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
614 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
615 unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
616 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
617 WARN_ONCE(1,
618 "clamping sdma_phase_quantum to %uK clock cycles\n",
619 value << unit);
620 }
621 phase_quantum =
622 value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
623 unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
624 }
625
626 for (i = 0; i < adev->sdma.num_instances; i++) {
627 if (!amdgpu_sriov_vf(adev)) {
628 f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
629 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
630 AUTO_CTXSW_ENABLE, enable ? 1 : 0);
631 }
632
633 if (enable && amdgpu_sdma_phase_quantum) {
634 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
635 phase_quantum);
636 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
637 phase_quantum);
638 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
639 phase_quantum);
640 }
641 if (!amdgpu_sriov_vf(adev))
642 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
643 }
644
645}
646
647/**
648 * sdma_v5_0_enable - stop the async dma engines
649 *
650 * @adev: amdgpu_device pointer
651 * @enable: enable/disable the DMA MEs.
652 *
653 * Halt or unhalt the async dma engines (NAVI10).
654 */
655static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
656{
657 u32 f32_cntl;
658 int i;
659 uint32_t inst_mask;
660
661 inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
662 if (!enable) {
663 sdma_v5_0_gfx_stop(adev, 1 << inst_mask);
664 sdma_v5_0_rlc_stop(adev);
665 }
666
667 if (amdgpu_sriov_vf(adev))
668 return;
669
670 for (i = 0; i < adev->sdma.num_instances; i++) {
671 f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
672 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
673 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
674 }
675}
676
677/**
678 * sdma_v5_0_gfx_resume_instance - start/restart a certain sdma engine
679 *
680 * @adev: amdgpu_device pointer
681 * @i: instance
682 * @restore: used to restore wptr when restart
683 *
684 * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr.
685 * Return 0 for success.
686 */
687static int sdma_v5_0_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore)
688{
689 struct amdgpu_ring *ring;
690 u32 rb_cntl, ib_cntl;
691 u32 rb_bufsz;
692 u32 doorbell;
693 u32 doorbell_offset;
694 u32 temp;
695 u32 wptr_poll_cntl;
696 u64 wptr_gpu_addr;
697
698 ring = &adev->sdma.instance[i].ring;
699
700 if (!amdgpu_sriov_vf(adev))
701 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
702
703 /* Set ring buffer size in dwords */
704 rb_bufsz = order_base_2(ring->ring_size / 4);
705 rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
706 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
707#ifdef __BIG_ENDIAN
708 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
709 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
710 RPTR_WRITEBACK_SWAP_ENABLE, 1);
711#endif
712 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
713
714 /* Initialize the ring buffer's read and write pointers */
715 if (restore) {
716 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), lower_32_bits(ring->wptr << 2));
717 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), upper_32_bits(ring->wptr << 2));
718 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
719 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
720 } else {
721 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
722 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
723 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
724 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
725 }
726 /* setup the wptr shadow polling */
727 wptr_gpu_addr = ring->wptr_gpu_addr;
728 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
729 lower_32_bits(wptr_gpu_addr));
730 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
731 upper_32_bits(wptr_gpu_addr));
732 wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
733 mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
734 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
735 SDMA0_GFX_RB_WPTR_POLL_CNTL,
736 F32_POLL_ENABLE, 1);
737 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
738 wptr_poll_cntl);
739
740 /* set the wb address whether it's enabled or not */
741 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
742 upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
743 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
744 lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
745
746 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
747
748 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE),
749 ring->gpu_addr >> 8);
750 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI),
751 ring->gpu_addr >> 40);
752
753 if (!restore)
754 ring->wptr = 0;
755
756 /* before programing wptr to a less value, need set minor_ptr_update first */
757 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
758
759 if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
760 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR),
761 lower_32_bits(ring->wptr << 2));
762 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI),
763 upper_32_bits(ring->wptr << 2));
764 }
765
766 doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
767 doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
768 mmSDMA0_GFX_DOORBELL_OFFSET));
769
770 if (ring->use_doorbell) {
771 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
772 doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
773 OFFSET, ring->doorbell_index);
774 } else {
775 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
776 }
777 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
778 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET),
779 doorbell_offset);
780
781 adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
782 ring->doorbell_index, 20);
783
784 if (amdgpu_sriov_vf(adev))
785 sdma_v5_0_ring_set_wptr(ring);
786
787 /* set minor_ptr_update to 0 after wptr programed */
788 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
789
790 if (!amdgpu_sriov_vf(adev)) {
791 /* set utc l1 enable flag always to 1 */
792 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
793 temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
794
795 /* enable MCBP */
796 temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
797 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
798
799 /* Set up RESP_MODE to non-copy addresses */
800 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
801 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
802 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
803 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
804
805 /* program default cache read and write policy */
806 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
807 /* clean read policy and write policy bits */
808 temp &= 0xFF0FFF;
809 temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
810 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
811 }
812
813 if (!amdgpu_sriov_vf(adev)) {
814 /* unhalt engine */
815 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
816 temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
817 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
818 }
819
820 /* enable DMA RB */
821 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
822 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
823
824 ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
825 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
826#ifdef __BIG_ENDIAN
827 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
828#endif
829 /* enable DMA IBs */
830 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
831
832 if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
833 sdma_v5_0_ctx_switch_enable(adev, true);
834 sdma_v5_0_enable(adev, true);
835 }
836
837 return amdgpu_ring_test_helper(ring);
838}
839
840/**
841 * sdma_v5_0_gfx_resume - setup and start the async dma engines
842 *
843 * @adev: amdgpu_device pointer
844 *
845 * Set up the gfx DMA ring buffers and enable them (NAVI10).
846 * Returns 0 for success, error for failure.
847 */
848static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
849{
850 int i, r;
851
852 for (i = 0; i < adev->sdma.num_instances; i++) {
853 r = sdma_v5_0_gfx_resume_instance(adev, i, false);
854 if (r)
855 return r;
856 }
857
858 return 0;
859}
860
861/**
862 * sdma_v5_0_rlc_resume - setup and start the async dma engines
863 *
864 * @adev: amdgpu_device pointer
865 *
866 * Set up the compute DMA queues and enable them (NAVI10).
867 * Returns 0 for success, error for failure.
868 */
869static int sdma_v5_0_rlc_resume(struct amdgpu_device *adev)
870{
871 return 0;
872}
873
874/**
875 * sdma_v5_0_load_microcode - load the sDMA ME ucode
876 *
877 * @adev: amdgpu_device pointer
878 *
879 * Loads the sDMA0/1 ucode.
880 * Returns 0 for success, -EINVAL if the ucode is not available.
881 */
882static int sdma_v5_0_load_microcode(struct amdgpu_device *adev)
883{
884 const struct sdma_firmware_header_v1_0 *hdr;
885 const __le32 *fw_data;
886 u32 fw_size;
887 int i, j;
888
889 /* halt the MEs */
890 sdma_v5_0_enable(adev, false);
891
892 for (i = 0; i < adev->sdma.num_instances; i++) {
893 if (!adev->sdma.instance[i].fw)
894 return -EINVAL;
895
896 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
897 amdgpu_ucode_print_sdma_hdr(&hdr->header);
898 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
899
900 fw_data = (const __le32 *)
901 (adev->sdma.instance[i].fw->data +
902 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
903
904 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
905
906 for (j = 0; j < fw_size; j++) {
907 if (amdgpu_emu_mode == 1 && j % 500 == 0)
908 msleep(1);
909 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
910 }
911
912 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
913 }
914
915 return 0;
916}
917
918/**
919 * sdma_v5_0_start - setup and start the async dma engines
920 *
921 * @adev: amdgpu_device pointer
922 *
923 * Set up the DMA engines and enable them (NAVI10).
924 * Returns 0 for success, error for failure.
925 */
926static int sdma_v5_0_start(struct amdgpu_device *adev)
927{
928 int r = 0;
929
930 if (amdgpu_sriov_vf(adev)) {
931 sdma_v5_0_ctx_switch_enable(adev, false);
932 sdma_v5_0_enable(adev, false);
933
934 /* set RB registers */
935 r = sdma_v5_0_gfx_resume(adev);
936 return r;
937 }
938
939 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
940 r = sdma_v5_0_load_microcode(adev);
941 if (r)
942 return r;
943 }
944
945 /* unhalt the MEs */
946 sdma_v5_0_enable(adev, true);
947 /* enable sdma ring preemption */
948 sdma_v5_0_ctx_switch_enable(adev, true);
949
950 /* start the gfx rings and rlc compute queues */
951 r = sdma_v5_0_gfx_resume(adev);
952 if (r)
953 return r;
954 r = sdma_v5_0_rlc_resume(adev);
955
956 return r;
957}
958
959static int sdma_v5_0_mqd_init(struct amdgpu_device *adev, void *mqd,
960 struct amdgpu_mqd_prop *prop)
961{
962 struct v10_sdma_mqd *m = mqd;
963 uint64_t wb_gpu_addr;
964
965 m->sdmax_rlcx_rb_cntl =
966 order_base_2(prop->queue_size / 4) << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
967 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
968 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
969 1 << SDMA0_RLC0_RB_CNTL__RB_PRIV__SHIFT;
970
971 m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8);
972 m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8);
973
974 m->sdmax_rlcx_rb_wptr_poll_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, 0,
975 mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
976
977 wb_gpu_addr = prop->wptr_gpu_addr;
978 m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits(wb_gpu_addr);
979 m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr);
980
981 wb_gpu_addr = prop->rptr_gpu_addr;
982 m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits(wb_gpu_addr);
983 m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits(wb_gpu_addr);
984
985 m->sdmax_rlcx_ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, 0,
986 mmSDMA0_GFX_IB_CNTL));
987
988 m->sdmax_rlcx_doorbell_offset =
989 prop->doorbell_index << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
990
991 m->sdmax_rlcx_doorbell = REG_SET_FIELD(0, SDMA0_RLC0_DOORBELL, ENABLE, 1);
992
993 return 0;
994}
995
996static void sdma_v5_0_set_mqd_funcs(struct amdgpu_device *adev)
997{
998 adev->mqds[AMDGPU_HW_IP_DMA].mqd_size = sizeof(struct v10_sdma_mqd);
999 adev->mqds[AMDGPU_HW_IP_DMA].init_mqd = sdma_v5_0_mqd_init;
1000}
1001
1002/**
1003 * sdma_v5_0_ring_test_ring - simple async dma engine test
1004 *
1005 * @ring: amdgpu_ring structure holding ring information
1006 *
1007 * Test the DMA engine by writing using it to write an
1008 * value to memory. (NAVI10).
1009 * Returns 0 for success, error for failure.
1010 */
1011static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
1012{
1013 struct amdgpu_device *adev = ring->adev;
1014 unsigned i;
1015 unsigned index;
1016 int r;
1017 u32 tmp;
1018 u64 gpu_addr;
1019
1020 tmp = 0xCAFEDEAD;
1021
1022 r = amdgpu_device_wb_get(adev, &index);
1023 if (r) {
1024 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
1025 return r;
1026 }
1027
1028 gpu_addr = adev->wb.gpu_addr + (index * 4);
1029 adev->wb.wb[index] = cpu_to_le32(tmp);
1030
1031 r = amdgpu_ring_alloc(ring, 20);
1032 if (r) {
1033 drm_err(adev_to_drm(adev), "dma failed to lock ring %d (%d).\n", ring->idx, r);
1034 amdgpu_device_wb_free(adev, index);
1035 return r;
1036 }
1037
1038 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1039 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
1040 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
1041 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
1042 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
1043 amdgpu_ring_write(ring, 0xDEADBEEF);
1044 amdgpu_ring_commit(ring);
1045
1046 for (i = 0; i < adev->usec_timeout; i++) {
1047 tmp = le32_to_cpu(adev->wb.wb[index]);
1048 if (tmp == 0xDEADBEEF)
1049 break;
1050 if (amdgpu_emu_mode == 1)
1051 msleep(1);
1052 else
1053 udelay(1);
1054 }
1055
1056 if (i >= adev->usec_timeout)
1057 r = -ETIMEDOUT;
1058
1059 amdgpu_device_wb_free(adev, index);
1060
1061 return r;
1062}
1063
1064/**
1065 * sdma_v5_0_ring_test_ib - test an IB on the DMA engine
1066 *
1067 * @ring: amdgpu_ring structure holding ring information
1068 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
1069 *
1070 * Test a simple IB in the DMA ring (NAVI10).
1071 * Returns 0 on success, error on failure.
1072 */
1073static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1074{
1075 struct amdgpu_device *adev = ring->adev;
1076 struct amdgpu_ib ib;
1077 struct dma_fence *f = NULL;
1078 unsigned index;
1079 long r;
1080 u32 tmp = 0;
1081 u64 gpu_addr;
1082
1083 tmp = 0xCAFEDEAD;
1084 memset(&ib, 0, sizeof(ib));
1085
1086 r = amdgpu_device_wb_get(adev, &index);
1087 if (r) {
1088 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
1089 return r;
1090 }
1091
1092 gpu_addr = adev->wb.gpu_addr + (index * 4);
1093 adev->wb.wb[index] = cpu_to_le32(tmp);
1094
1095 r = amdgpu_ib_get(adev, NULL, 256,
1096 AMDGPU_IB_POOL_DIRECT, &ib);
1097 if (r) {
1098 drm_err(adev_to_drm(adev), "failed to get ib (%ld).\n", r);
1099 goto err0;
1100 }
1101
1102 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1103 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1104 ib.ptr[1] = lower_32_bits(gpu_addr);
1105 ib.ptr[2] = upper_32_bits(gpu_addr);
1106 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
1107 ib.ptr[4] = 0xDEADBEEF;
1108 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1109 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1110 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1111 ib.length_dw = 8;
1112
1113 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1114 if (r)
1115 goto err1;
1116
1117 r = dma_fence_wait_timeout(f, false, timeout);
1118 if (r == 0) {
1119 drm_err(adev_to_drm(adev), "IB test timed out\n");
1120 r = -ETIMEDOUT;
1121 goto err1;
1122 } else if (r < 0) {
1123 drm_err(adev_to_drm(adev), "fence wait failed (%ld).\n", r);
1124 goto err1;
1125 }
1126
1127 tmp = le32_to_cpu(adev->wb.wb[index]);
1128
1129 if (tmp == 0xDEADBEEF)
1130 r = 0;
1131 else
1132 r = -EINVAL;
1133
1134err1:
1135 amdgpu_ib_free(&ib, NULL);
1136 dma_fence_put(f);
1137err0:
1138 amdgpu_device_wb_free(adev, index);
1139 return r;
1140}
1141
1142
1143/**
1144 * sdma_v5_0_vm_copy_pte - update PTEs by copying them from the GART
1145 *
1146 * @ib: indirect buffer to fill with commands
1147 * @pe: addr of the page entry
1148 * @src: src addr to copy from
1149 * @count: number of page entries to update
1150 *
1151 * Update PTEs by copying them from the GART using sDMA (NAVI10).
1152 */
1153static void sdma_v5_0_vm_copy_pte(struct amdgpu_ib *ib,
1154 uint64_t pe, uint64_t src,
1155 unsigned count)
1156{
1157 unsigned bytes = count * 8;
1158
1159 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1160 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1161 ib->ptr[ib->length_dw++] = bytes - 1;
1162 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1163 ib->ptr[ib->length_dw++] = lower_32_bits(src);
1164 ib->ptr[ib->length_dw++] = upper_32_bits(src);
1165 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1166 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1167
1168}
1169
1170/**
1171 * sdma_v5_0_vm_write_pte - update PTEs by writing them manually
1172 *
1173 * @ib: indirect buffer to fill with commands
1174 * @pe: addr of the page entry
1175 * @value: dst addr to write into pe
1176 * @count: number of page entries to update
1177 * @incr: increase next addr by incr bytes
1178 *
1179 * Update PTEs by writing them manually using sDMA (NAVI10).
1180 */
1181static void sdma_v5_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1182 uint64_t value, unsigned count,
1183 uint32_t incr)
1184{
1185 unsigned ndw = count * 2;
1186
1187 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1188 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1189 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1190 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1191 ib->ptr[ib->length_dw++] = ndw - 1;
1192 for (; ndw > 0; ndw -= 2) {
1193 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1194 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1195 value += incr;
1196 }
1197}
1198
1199/**
1200 * sdma_v5_0_vm_set_pte_pde - update the page tables using sDMA
1201 *
1202 * @ib: indirect buffer to fill with commands
1203 * @pe: addr of the page entry
1204 * @addr: dst addr to write into pe
1205 * @count: number of page entries to update
1206 * @incr: increase next addr by incr bytes
1207 * @flags: access flags
1208 *
1209 * Update the page tables using sDMA (NAVI10).
1210 */
1211static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib,
1212 uint64_t pe,
1213 uint64_t addr, unsigned count,
1214 uint32_t incr, uint64_t flags)
1215{
1216 /* for physically contiguous pages (vram) */
1217 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
1218 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1219 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1220 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1221 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1222 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1223 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1224 ib->ptr[ib->length_dw++] = incr; /* increment size */
1225 ib->ptr[ib->length_dw++] = 0;
1226 ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1227}
1228
1229/**
1230 * sdma_v5_0_ring_pad_ib - pad the IB
1231 * @ring: amdgpu_ring structure holding ring information
1232 * @ib: indirect buffer to fill with padding
1233 *
1234 * Pad the IB with NOPs to a boundary multiple of 8.
1235 */
1236static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1237{
1238 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1239 u32 pad_count;
1240 int i;
1241
1242 pad_count = (-ib->length_dw) & 0x7;
1243 for (i = 0; i < pad_count; i++)
1244 if (sdma && sdma->burst_nop && (i == 0))
1245 ib->ptr[ib->length_dw++] =
1246 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1247 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1248 else
1249 ib->ptr[ib->length_dw++] =
1250 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1251}
1252
1253
1254/**
1255 * sdma_v5_0_ring_emit_pipeline_sync - sync the pipeline
1256 *
1257 * @ring: amdgpu_ring pointer
1258 *
1259 * Make sure all previous operations are completed (CIK).
1260 */
1261static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1262{
1263 uint32_t seq = ring->fence_drv.sync_seq;
1264 uint64_t addr = ring->fence_drv.gpu_addr;
1265
1266 /* wait for idle */
1267 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1268 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1269 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
1270 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1271 amdgpu_ring_write(ring, addr & 0xfffffffc);
1272 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1273 amdgpu_ring_write(ring, seq); /* reference */
1274 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1275 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1276 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1277}
1278
1279
1280/**
1281 * sdma_v5_0_ring_emit_vm_flush - vm flush using sDMA
1282 *
1283 * @ring: amdgpu_ring pointer
1284 * @vmid: vmid number to use
1285 * @pd_addr: address
1286 *
1287 * Update the page table base and flush the VM TLB
1288 * using sDMA (NAVI10).
1289 */
1290static void sdma_v5_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1291 unsigned vmid, uint64_t pd_addr)
1292{
1293 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1294}
1295
1296static void sdma_v5_0_ring_emit_wreg(struct amdgpu_ring *ring,
1297 uint32_t reg, uint32_t val)
1298{
1299 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1300 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1301 amdgpu_ring_write(ring, reg);
1302 amdgpu_ring_write(ring, val);
1303}
1304
1305static void sdma_v5_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1306 uint32_t val, uint32_t mask)
1307{
1308 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1309 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1310 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
1311 amdgpu_ring_write(ring, reg << 2);
1312 amdgpu_ring_write(ring, 0);
1313 amdgpu_ring_write(ring, val); /* reference */
1314 amdgpu_ring_write(ring, mask); /* mask */
1315 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1316 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
1317}
1318
1319static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
1320 uint32_t reg0, uint32_t reg1,
1321 uint32_t ref, uint32_t mask)
1322{
1323 amdgpu_ring_emit_wreg(ring, reg0, ref);
1324 /* wait for a cycle to reset vm_inv_eng*_ack */
1325 amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
1326 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
1327}
1328
1329static int sdma_v5_0_soft_reset_engine(struct amdgpu_device *adev, u32 instance_id)
1330{
1331 u32 grbm_soft_reset;
1332 u32 tmp;
1333
1334 grbm_soft_reset = REG_SET_FIELD(0,
1335 GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
1336 1);
1337 grbm_soft_reset <<= instance_id;
1338
1339 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
1340 tmp |= grbm_soft_reset;
1341 DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
1342 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
1343 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
1344
1345 udelay(50);
1346
1347 tmp &= ~grbm_soft_reset;
1348 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
1349 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
1350 return 0;
1351}
1352
1353static const struct amdgpu_sdma_funcs sdma_v5_0_sdma_funcs = {
1354 .stop_kernel_queue = &sdma_v5_0_stop_queue,
1355 .start_kernel_queue = &sdma_v5_0_restore_queue,
1356 .soft_reset_kernel_queue = &sdma_v5_0_soft_reset_engine,
1357};
1358
1359static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
1360 .copy_pte_num_dw = 7,
1361 .copy_pte = sdma_v5_0_vm_copy_pte,
1362 .write_pte = sdma_v5_0_vm_write_pte,
1363 .set_pte_pde = sdma_v5_0_vm_set_pte_pde,
1364};
1365
1366static int sdma_v5_0_early_init(struct amdgpu_ip_block *ip_block)
1367{
1368 struct amdgpu_device *adev = ip_block->adev;
1369 int r;
1370
1371 r = sdma_v5_0_init_microcode(adev);
1372 if (r)
1373 return r;
1374
1375 sdma_v5_0_set_ring_funcs(adev);
1376 sdma_v5_0_set_buffer_funcs(adev);
1377 amdgpu_sdma_set_vm_pte_scheds(adev, &sdma_v5_0_vm_pte_funcs);
1378 sdma_v5_0_set_irq_funcs(adev);
1379 sdma_v5_0_set_mqd_funcs(adev);
1380
1381 return 0;
1382}
1383
1384
1385static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
1386{
1387 struct amdgpu_ring *ring;
1388 int r, i;
1389 struct amdgpu_device *adev = ip_block->adev;
1390 uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0);
1391 uint32_t *ptr;
1392
1393 /* SDMA trap event */
1394 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0,
1395 SDMA0_5_0__SRCID__SDMA_TRAP,
1396 &adev->sdma.trap_irq);
1397 if (r)
1398 return r;
1399
1400 /* SDMA trap event */
1401 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1,
1402 SDMA1_5_0__SRCID__SDMA_TRAP,
1403 &adev->sdma.trap_irq);
1404 if (r)
1405 return r;
1406
1407 for (i = 0; i < adev->sdma.num_instances; i++) {
1408 mutex_init(&adev->sdma.instance[i].engine_reset_mutex);
1409 adev->sdma.instance[i].funcs = &sdma_v5_0_sdma_funcs;
1410 ring = &adev->sdma.instance[i].ring;
1411 ring->ring_obj = NULL;
1412 ring->use_doorbell = true;
1413
1414 DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
1415 ring->use_doorbell?"true":"false");
1416
1417 ring->doorbell_index = (i == 0) ?
1418 (adev->doorbell_index.sdma_engine[0] << 1) //get DWORD offset
1419 : (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset
1420
1421 ring->vm_hub = AMDGPU_GFXHUB(0);
1422 sprintf(ring->name, "sdma%d", i);
1423 r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
1424 (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
1425 AMDGPU_SDMA_IRQ_INSTANCE1,
1426 AMDGPU_RING_PRIO_DEFAULT, NULL);
1427 if (r)
1428 return r;
1429 }
1430
1431 adev->sdma.supported_reset =
1432 amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
1433 if (!amdgpu_sriov_vf(adev) &&
1434 !adev->debug_disable_gpu_ring_reset)
1435 adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1436
1437 /* Allocate memory for SDMA IP Dump buffer */
1438 ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
1439 if (ptr)
1440 adev->sdma.ip_dump = ptr;
1441 else
1442 DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
1443
1444 r = amdgpu_sdma_sysfs_reset_mask_init(adev);
1445 if (r)
1446 return r;
1447
1448 return r;
1449}
1450
1451static int sdma_v5_0_sw_fini(struct amdgpu_ip_block *ip_block)
1452{
1453 struct amdgpu_device *adev = ip_block->adev;
1454 int i;
1455
1456 for (i = 0; i < adev->sdma.num_instances; i++)
1457 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1458
1459 amdgpu_sdma_sysfs_reset_mask_fini(adev);
1460 amdgpu_sdma_destroy_inst_ctx(adev, false);
1461
1462 kfree(adev->sdma.ip_dump);
1463
1464 return 0;
1465}
1466
1467static int sdma_v5_0_hw_init(struct amdgpu_ip_block *ip_block)
1468{
1469 int r;
1470 struct amdgpu_device *adev = ip_block->adev;
1471
1472 sdma_v5_0_init_golden_registers(adev);
1473
1474 r = sdma_v5_0_start(adev);
1475
1476 return r;
1477}
1478
1479static int sdma_v5_0_hw_fini(struct amdgpu_ip_block *ip_block)
1480{
1481 struct amdgpu_device *adev = ip_block->adev;
1482
1483 if (amdgpu_sriov_vf(adev))
1484 return 0;
1485
1486 sdma_v5_0_ctx_switch_enable(adev, false);
1487 sdma_v5_0_enable(adev, false);
1488
1489 return 0;
1490}
1491
1492static int sdma_v5_0_suspend(struct amdgpu_ip_block *ip_block)
1493{
1494 return sdma_v5_0_hw_fini(ip_block);
1495}
1496
1497static int sdma_v5_0_resume(struct amdgpu_ip_block *ip_block)
1498{
1499 return sdma_v5_0_hw_init(ip_block);
1500}
1501
1502static bool sdma_v5_0_is_idle(struct amdgpu_ip_block *ip_block)
1503{
1504 struct amdgpu_device *adev = ip_block->adev;
1505 u32 i;
1506
1507 for (i = 0; i < adev->sdma.num_instances; i++) {
1508 u32 tmp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
1509
1510 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
1511 return false;
1512 }
1513
1514 return true;
1515}
1516
1517static int sdma_v5_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
1518{
1519 unsigned i;
1520 u32 sdma0, sdma1;
1521 struct amdgpu_device *adev = ip_block->adev;
1522
1523 for (i = 0; i < adev->usec_timeout; i++) {
1524 sdma0 = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
1525 sdma1 = RREG32(sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
1526
1527 if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
1528 return 0;
1529 udelay(1);
1530 }
1531 return -ETIMEDOUT;
1532}
1533
1534static int sdma_v5_0_soft_reset(struct amdgpu_ip_block *ip_block)
1535{
1536 /* todo */
1537
1538 return 0;
1539}
1540
1541static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring,
1542 unsigned int vmid,
1543 struct amdgpu_fence *timedout_fence)
1544{
1545 struct amdgpu_device *adev = ring->adev;
1546 int r;
1547
1548 if (ring->me >= adev->sdma.num_instances) {
1549 dev_err(adev->dev, "sdma instance not found\n");
1550 return -EINVAL;
1551 }
1552
1553 amdgpu_ring_reset_helper_begin(ring, timedout_fence);
1554
1555 amdgpu_amdkfd_suspend(adev, true);
1556 r = amdgpu_sdma_reset_engine(adev, ring->me, true);
1557 amdgpu_amdkfd_resume(adev, true);
1558 if (r)
1559 return r;
1560
1561 return amdgpu_ring_reset_helper_end(ring, timedout_fence);
1562}
1563
1564static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring)
1565{
1566 u32 f32_cntl, freeze, cntl, stat1_reg;
1567 struct amdgpu_device *adev = ring->adev;
1568 int i, j, r = 0;
1569
1570 if (amdgpu_sriov_vf(adev))
1571 return -EINVAL;
1572
1573 i = ring->me;
1574 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
1575
1576 /* stop queue */
1577 sdma_v5_0_gfx_stop(adev, 1 << i);
1578
1579 /* engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */
1580 freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
1581 freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 1);
1582 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
1583
1584 for (j = 0; j < adev->usec_timeout; j++) {
1585 freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
1586 if (REG_GET_FIELD(freeze, SDMA0_FREEZE, FROZEN) & 1)
1587 break;
1588 udelay(1);
1589 }
1590
1591 /* check sdma copy engine all idle if frozen not received*/
1592 if (j == adev->usec_timeout) {
1593 stat1_reg = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_STATUS1_REG));
1594 if ((stat1_reg & 0x3FF) != 0x3FF) {
1595 DRM_ERROR("cannot soft reset as sdma not idle\n");
1596 r = -ETIMEDOUT;
1597 goto err0;
1598 }
1599 }
1600
1601 f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
1602 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
1603 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
1604
1605 cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
1606 cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0);
1607 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl);
1608err0:
1609 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
1610 return r;
1611}
1612
1613static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring)
1614{
1615 struct amdgpu_device *adev = ring->adev;
1616 u32 inst_id = ring->me;
1617 u32 freeze;
1618 int r;
1619
1620 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
1621 /* unfreeze*/
1622 freeze = RREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE));
1623 freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0);
1624 WREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE), freeze);
1625
1626 r = sdma_v5_0_gfx_resume_instance(adev, inst_id, true);
1627 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
1628
1629 return r;
1630}
1631
1632static int sdma_v5_0_ring_preempt_ib(struct amdgpu_ring *ring)
1633{
1634 int i, r = 0;
1635 struct amdgpu_device *adev = ring->adev;
1636 u32 index = 0;
1637 u64 sdma_gfx_preempt;
1638
1639 amdgpu_sdma_get_index_from_ring(ring, &index);
1640 if (index == 0)
1641 sdma_gfx_preempt = mmSDMA0_GFX_PREEMPT;
1642 else
1643 sdma_gfx_preempt = mmSDMA1_GFX_PREEMPT;
1644
1645 /* assert preemption condition */
1646 amdgpu_ring_set_preempt_cond_exec(ring, false);
1647
1648 /* emit the trailing fence */
1649 ring->trail_seq += 1;
1650 amdgpu_ring_alloc(ring, 10);
1651 sdma_v5_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
1652 ring->trail_seq, 0);
1653 amdgpu_ring_commit(ring);
1654
1655 /* assert IB preemption */
1656 WREG32(sdma_gfx_preempt, 1);
1657
1658 /* poll the trailing fence */
1659 for (i = 0; i < adev->usec_timeout; i++) {
1660 if (ring->trail_seq ==
1661 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
1662 break;
1663 udelay(1);
1664 }
1665
1666 if (i >= adev->usec_timeout) {
1667 r = -EINVAL;
1668 DRM_ERROR("ring %d failed to be preempted\n", ring->idx);
1669 }
1670
1671 /* deassert IB preemption */
1672 WREG32(sdma_gfx_preempt, 0);
1673
1674 /* deassert the preemption condition */
1675 amdgpu_ring_set_preempt_cond_exec(ring, true);
1676 return r;
1677}
1678
1679static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev,
1680 struct amdgpu_irq_src *source,
1681 unsigned type,
1682 enum amdgpu_interrupt_state state)
1683{
1684 u32 sdma_cntl;
1685
1686 if (!amdgpu_sriov_vf(adev)) {
1687 u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
1688 sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
1689 sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
1690
1691 sdma_cntl = RREG32(reg_offset);
1692 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
1693 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1694 WREG32(reg_offset, sdma_cntl);
1695 }
1696
1697 return 0;
1698}
1699
1700static int sdma_v5_0_process_trap_irq(struct amdgpu_device *adev,
1701 struct amdgpu_irq_src *source,
1702 struct amdgpu_iv_entry *entry)
1703{
1704 DRM_DEBUG("IH: SDMA trap\n");
1705
1706 if (drm_WARN_ON_ONCE(&adev->ddev,
1707 adev->enable_mes &&
1708 (entry->src_data[0] & AMDGPU_FENCE_MES_QUEUE_FLAG)))
1709 return 0;
1710
1711 switch (entry->client_id) {
1712 case SOC15_IH_CLIENTID_SDMA0:
1713 switch (entry->ring_id) {
1714 case 0:
1715 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1716 break;
1717 case 1:
1718 /* XXX compute */
1719 break;
1720 case 2:
1721 /* XXX compute */
1722 break;
1723 case 3:
1724 /* XXX page queue*/
1725 break;
1726 }
1727 break;
1728 case SOC15_IH_CLIENTID_SDMA1:
1729 switch (entry->ring_id) {
1730 case 0:
1731 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1732 break;
1733 case 1:
1734 /* XXX compute */
1735 break;
1736 case 2:
1737 /* XXX compute */
1738 break;
1739 case 3:
1740 /* XXX page queue*/
1741 break;
1742 }
1743 break;
1744 }
1745 return 0;
1746}
1747
1748static int sdma_v5_0_process_illegal_inst_irq(struct amdgpu_device *adev,
1749 struct amdgpu_irq_src *source,
1750 struct amdgpu_iv_entry *entry)
1751{
1752 return 0;
1753}
1754
1755static void sdma_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
1756 bool enable)
1757{
1758 uint32_t data, def;
1759 int i;
1760
1761 for (i = 0; i < adev->sdma.num_instances; i++) {
1762 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
1763 /* Enable sdma clock gating */
1764 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1765 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1766 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1767 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1768 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1769 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1770 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1771 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1772 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1773 if (def != data)
1774 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1775 } else {
1776 /* Disable sdma clock gating */
1777 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1778 data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1779 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1780 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1781 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1782 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1783 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1784 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1785 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1786 if (def != data)
1787 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1788 }
1789 }
1790}
1791
1792static void sdma_v5_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
1793 bool enable)
1794{
1795 uint32_t data, def;
1796 int i;
1797
1798 for (i = 0; i < adev->sdma.num_instances; i++) {
1799 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
1800 /* Enable sdma mem light sleep */
1801 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1802 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1803 if (def != data)
1804 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1805
1806 } else {
1807 /* Disable sdma mem light sleep */
1808 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1809 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1810 if (def != data)
1811 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1812
1813 }
1814 }
1815}
1816
1817static int sdma_v5_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1818 enum amd_clockgating_state state)
1819{
1820 struct amdgpu_device *adev = ip_block->adev;
1821
1822 if (amdgpu_sriov_vf(adev))
1823 return 0;
1824
1825 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
1826 case IP_VERSION(5, 0, 0):
1827 case IP_VERSION(5, 0, 2):
1828 case IP_VERSION(5, 0, 5):
1829 sdma_v5_0_update_medium_grain_clock_gating(adev,
1830 state == AMD_CG_STATE_GATE);
1831 sdma_v5_0_update_medium_grain_light_sleep(adev,
1832 state == AMD_CG_STATE_GATE);
1833 break;
1834 default:
1835 break;
1836 }
1837
1838 return 0;
1839}
1840
1841static int sdma_v5_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
1842 enum amd_powergating_state state)
1843{
1844 return 0;
1845}
1846
1847static void sdma_v5_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
1848{
1849 struct amdgpu_device *adev = ip_block->adev;
1850 int data;
1851
1852 if (amdgpu_sriov_vf(adev))
1853 *flags = 0;
1854
1855 /* AMD_CG_SUPPORT_SDMA_MGCG */
1856 data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CLK_CTRL));
1857 if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK))
1858 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
1859
1860 /* AMD_CG_SUPPORT_SDMA_LS */
1861 data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_POWER_CNTL));
1862 if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
1863 *flags |= AMD_CG_SUPPORT_SDMA_LS;
1864}
1865
1866static void sdma_v5_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
1867{
1868 struct amdgpu_device *adev = ip_block->adev;
1869 int i, j;
1870 uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0);
1871 uint32_t instance_offset;
1872
1873 if (!adev->sdma.ip_dump)
1874 return;
1875
1876 drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
1877 for (i = 0; i < adev->sdma.num_instances; i++) {
1878 instance_offset = i * reg_count;
1879 drm_printf(p, "\nInstance:%d\n", i);
1880
1881 for (j = 0; j < reg_count; j++)
1882 drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_5_0[j].reg_name,
1883 adev->sdma.ip_dump[instance_offset + j]);
1884 }
1885}
1886
1887static void sdma_v5_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
1888{
1889 struct amdgpu_device *adev = ip_block->adev;
1890 int i, j;
1891 uint32_t instance_offset;
1892 uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0);
1893
1894 if (!adev->sdma.ip_dump)
1895 return;
1896
1897 amdgpu_gfx_off_ctrl(adev, false);
1898 for (i = 0; i < adev->sdma.num_instances; i++) {
1899 instance_offset = i * reg_count;
1900 for (j = 0; j < reg_count; j++)
1901 adev->sdma.ip_dump[instance_offset + j] =
1902 RREG32(sdma_v5_0_get_reg_offset(adev, i,
1903 sdma_reg_list_5_0[j].reg_offset));
1904 }
1905 amdgpu_gfx_off_ctrl(adev, true);
1906}
1907
1908static const struct amd_ip_funcs sdma_v5_0_ip_funcs = {
1909 .name = "sdma_v5_0",
1910 .early_init = sdma_v5_0_early_init,
1911 .sw_init = sdma_v5_0_sw_init,
1912 .sw_fini = sdma_v5_0_sw_fini,
1913 .hw_init = sdma_v5_0_hw_init,
1914 .hw_fini = sdma_v5_0_hw_fini,
1915 .suspend = sdma_v5_0_suspend,
1916 .resume = sdma_v5_0_resume,
1917 .is_idle = sdma_v5_0_is_idle,
1918 .wait_for_idle = sdma_v5_0_wait_for_idle,
1919 .soft_reset = sdma_v5_0_soft_reset,
1920 .set_clockgating_state = sdma_v5_0_set_clockgating_state,
1921 .set_powergating_state = sdma_v5_0_set_powergating_state,
1922 .get_clockgating_state = sdma_v5_0_get_clockgating_state,
1923 .dump_ip_state = sdma_v5_0_dump_ip_state,
1924 .print_ip_state = sdma_v5_0_print_ip_state,
1925};
1926
1927static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
1928 .type = AMDGPU_RING_TYPE_SDMA,
1929 .align_mask = 0xf,
1930 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1931 .support_64bit_ptrs = true,
1932 .secure_submission_supported = true,
1933 .get_rptr = sdma_v5_0_ring_get_rptr,
1934 .get_wptr = sdma_v5_0_ring_get_wptr,
1935 .set_wptr = sdma_v5_0_ring_set_wptr,
1936 .emit_frame_size =
1937 5 + /* sdma_v5_0_ring_init_cond_exec */
1938 6 + /* sdma_v5_0_ring_emit_hdp_flush */
1939 3 + /* hdp_invalidate */
1940 6 + /* sdma_v5_0_ring_emit_pipeline_sync */
1941 /* sdma_v5_0_ring_emit_vm_flush */
1942 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1943 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
1944 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
1945 .emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */
1946 .emit_ib = sdma_v5_0_ring_emit_ib,
1947 .emit_mem_sync = sdma_v5_0_ring_emit_mem_sync,
1948 .emit_fence = sdma_v5_0_ring_emit_fence,
1949 .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,
1950 .emit_vm_flush = sdma_v5_0_ring_emit_vm_flush,
1951 .emit_hdp_flush = sdma_v5_0_ring_emit_hdp_flush,
1952 .test_ring = sdma_v5_0_ring_test_ring,
1953 .test_ib = sdma_v5_0_ring_test_ib,
1954 .insert_nop = sdma_v5_0_ring_insert_nop,
1955 .pad_ib = sdma_v5_0_ring_pad_ib,
1956 .emit_wreg = sdma_v5_0_ring_emit_wreg,
1957 .emit_reg_wait = sdma_v5_0_ring_emit_reg_wait,
1958 .emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait,
1959 .init_cond_exec = sdma_v5_0_ring_init_cond_exec,
1960 .preempt_ib = sdma_v5_0_ring_preempt_ib,
1961 .reset = sdma_v5_0_reset_queue,
1962};
1963
1964static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev)
1965{
1966 int i;
1967
1968 for (i = 0; i < adev->sdma.num_instances; i++) {
1969 adev->sdma.instance[i].ring.funcs = &sdma_v5_0_ring_funcs;
1970 adev->sdma.instance[i].ring.me = i;
1971 }
1972}
1973
1974static const struct amdgpu_irq_src_funcs sdma_v5_0_trap_irq_funcs = {
1975 .set = sdma_v5_0_set_trap_irq_state,
1976 .process = sdma_v5_0_process_trap_irq,
1977};
1978
1979static const struct amdgpu_irq_src_funcs sdma_v5_0_illegal_inst_irq_funcs = {
1980 .process = sdma_v5_0_process_illegal_inst_irq,
1981};
1982
1983static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev)
1984{
1985 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
1986 adev->sdma.num_instances;
1987 adev->sdma.trap_irq.funcs = &sdma_v5_0_trap_irq_funcs;
1988 adev->sdma.illegal_inst_irq.funcs = &sdma_v5_0_illegal_inst_irq_funcs;
1989}
1990
1991/**
1992 * sdma_v5_0_emit_copy_buffer - copy buffer using the sDMA engine
1993 *
1994 * @ib: indirect buffer to copy to
1995 * @src_offset: src GPU address
1996 * @dst_offset: dst GPU address
1997 * @byte_count: number of bytes to xfer
1998 * @copy_flags: copy flags for the buffers
1999 *
2000 * Copy GPU buffers using the DMA engine (NAVI10).
2001 * Used by the amdgpu ttm implementation to move pages if
2002 * registered as the asic copy callback.
2003 */
2004static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib,
2005 uint64_t src_offset,
2006 uint64_t dst_offset,
2007 uint32_t byte_count,
2008 uint32_t copy_flags)
2009{
2010 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
2011 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
2012 SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0);
2013 ib->ptr[ib->length_dw++] = byte_count - 1;
2014 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
2015 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
2016 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
2017 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
2018 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
2019}
2020
2021/**
2022 * sdma_v5_0_emit_fill_buffer - fill buffer using the sDMA engine
2023 *
2024 * @ib: indirect buffer to fill
2025 * @src_data: value to write to buffer
2026 * @dst_offset: dst GPU address
2027 * @byte_count: number of bytes to xfer
2028 *
2029 * Fill GPU buffers using the DMA engine (NAVI10).
2030 */
2031static void sdma_v5_0_emit_fill_buffer(struct amdgpu_ib *ib,
2032 uint32_t src_data,
2033 uint64_t dst_offset,
2034 uint32_t byte_count)
2035{
2036 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
2037 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
2038 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
2039 ib->ptr[ib->length_dw++] = src_data;
2040 ib->ptr[ib->length_dw++] = byte_count - 1;
2041}
2042
2043static const struct amdgpu_buffer_funcs sdma_v5_0_buffer_funcs = {
2044 .copy_max_bytes = 0x400000,
2045 .copy_num_dw = 7,
2046 .emit_copy_buffer = sdma_v5_0_emit_copy_buffer,
2047
2048 .fill_max_bytes = 0x400000,
2049 .fill_num_dw = 5,
2050 .emit_fill_buffer = sdma_v5_0_emit_fill_buffer,
2051};
2052
2053static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev)
2054{
2055 if (adev->mman.buffer_funcs == NULL) {
2056 adev->mman.buffer_funcs = &sdma_v5_0_buffer_funcs;
2057 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
2058 }
2059}
2060
2061const struct amdgpu_ip_block_version sdma_v5_0_ip_block = {
2062 .type = AMD_IP_BLOCK_TYPE_SDMA,
2063 .major = 5,
2064 .minor = 0,
2065 .rev = 0,
2066 .funcs = &sdma_v5_0_ip_funcs,
2067};