Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26#include <linux/kthread.h>
27#include <linux/pci.h>
28#include <linux/uaccess.h>
29#include <linux/pm_runtime.h>
30
31#include "amdgpu.h"
32#include "amdgpu_pm.h"
33#include "amdgpu_dm_debugfs.h"
34#include "amdgpu_ras.h"
35#include "amdgpu_rap.h"
36#include "amdgpu_securedisplay.h"
37#include "amdgpu_fw_attestation.h"
38#include "amdgpu_umr.h"
39
40#include "amdgpu_reset.h"
41#include "amdgpu_psp_ta.h"
42#include "amdgpu_userq.h"
43
44#if defined(CONFIG_DEBUG_FS)
45
46/**
47 * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes
48 *
49 * @read: True if reading
50 * @f: open file handle
51 * @buf: User buffer to write/read to
52 * @size: Number of bytes to write/read
53 * @pos: Offset to seek to
54 *
55 * This debugfs entry has special meaning on the offset being sought.
56 * Various bits have different meanings:
57 *
58 * Bit 62: Indicates a GRBM bank switch is needed
59 * Bit 61: Indicates a SRBM bank switch is needed (implies bit 62 is
60 * zero)
61 * Bits 24..33: The SE or ME selector if needed
62 * Bits 34..43: The SH (or SA) or PIPE selector if needed
63 * Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed
64 *
65 * Bit 23: Indicates that the PM power gating lock should be held
66 * This is necessary to read registers that might be
67 * unreliable during a power gating transistion.
68 *
69 * The lower bits are the BYTE offset of the register to read. This
70 * allows reading multiple registers in a single call and having
71 * the returned size reflect that.
72 */
73static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
74 char __user *buf, size_t size, loff_t *pos)
75{
76 struct amdgpu_device *adev = file_inode(f)->i_private;
77 ssize_t result = 0;
78 int r;
79 bool pm_pg_lock, use_bank, use_ring;
80 unsigned int instance_bank, sh_bank, se_bank, me, pipe, queue, vmid;
81
82 pm_pg_lock = use_bank = use_ring = false;
83 instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0;
84
85 if (size & 0x3 || *pos & 0x3 ||
86 ((*pos & (1ULL << 62)) && (*pos & (1ULL << 61))))
87 return -EINVAL;
88
89 /* are we reading registers for which a PG lock is necessary? */
90 pm_pg_lock = (*pos >> 23) & 1;
91
92 if (*pos & (1ULL << 62)) {
93 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
94 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
95 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
96
97 if (se_bank == 0x3FF)
98 se_bank = 0xFFFFFFFF;
99 if (sh_bank == 0x3FF)
100 sh_bank = 0xFFFFFFFF;
101 if (instance_bank == 0x3FF)
102 instance_bank = 0xFFFFFFFF;
103 use_bank = true;
104 } else if (*pos & (1ULL << 61)) {
105
106 me = (*pos & GENMASK_ULL(33, 24)) >> 24;
107 pipe = (*pos & GENMASK_ULL(43, 34)) >> 34;
108 queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
109 vmid = (*pos & GENMASK_ULL(58, 54)) >> 54;
110
111 use_ring = true;
112 } else {
113 use_bank = use_ring = false;
114 }
115
116 *pos &= (1UL << 22) - 1;
117
118 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
119 if (r < 0) {
120 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
121 return r;
122 }
123
124 r = amdgpu_virt_enable_access_debugfs(adev);
125 if (r < 0) {
126 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
127 return r;
128 }
129
130 if (use_bank) {
131 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
132 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
133 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
134 amdgpu_virt_disable_access_debugfs(adev);
135 return -EINVAL;
136 }
137 mutex_lock(&adev->grbm_idx_mutex);
138 amdgpu_gfx_select_se_sh(adev, se_bank,
139 sh_bank, instance_bank, 0);
140 } else if (use_ring) {
141 mutex_lock(&adev->srbm_mutex);
142 amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid, 0);
143 }
144
145 if (pm_pg_lock)
146 mutex_lock(&adev->pm.mutex);
147
148 while (size) {
149 uint32_t value;
150
151 if (read) {
152 value = RREG32(*pos >> 2);
153 r = put_user(value, (uint32_t *)buf);
154 } else {
155 r = get_user(value, (uint32_t *)buf);
156 if (!r)
157 amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0);
158 }
159 if (r) {
160 result = r;
161 goto end;
162 }
163
164 result += 4;
165 buf += 4;
166 *pos += 4;
167 size -= 4;
168 }
169
170end:
171 if (use_bank) {
172 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
173 mutex_unlock(&adev->grbm_idx_mutex);
174 } else if (use_ring) {
175 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
176 mutex_unlock(&adev->srbm_mutex);
177 }
178
179 if (pm_pg_lock)
180 mutex_unlock(&adev->pm.mutex);
181
182 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
183
184 amdgpu_virt_disable_access_debugfs(adev);
185 return result;
186}
187
188/*
189 * amdgpu_debugfs_regs_read - Callback for reading MMIO registers
190 */
191static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
192 size_t size, loff_t *pos)
193{
194 return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
195}
196
197/*
198 * amdgpu_debugfs_regs_write - Callback for writing MMIO registers
199 */
200static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
201 size_t size, loff_t *pos)
202{
203 return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
204}
205
206static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file)
207{
208 struct amdgpu_debugfs_regs2_data *rd;
209
210 rd = kzalloc_obj(*rd);
211 if (!rd)
212 return -ENOMEM;
213 rd->adev = file_inode(file)->i_private;
214 file->private_data = rd;
215 mutex_init(&rd->lock);
216
217 return 0;
218}
219
220static int amdgpu_debugfs_regs2_release(struct inode *inode, struct file *file)
221{
222 struct amdgpu_debugfs_regs2_data *rd = file->private_data;
223
224 mutex_destroy(&rd->lock);
225 kfree(file->private_data);
226 return 0;
227}
228
229static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 offset, size_t size, int write_en)
230{
231 struct amdgpu_debugfs_regs2_data *rd = f->private_data;
232 struct amdgpu_device *adev = rd->adev;
233 ssize_t result = 0;
234 int r;
235 uint32_t value;
236
237 if (size & 0x3 || offset & 0x3)
238 return -EINVAL;
239
240 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
241 if (r < 0) {
242 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
243 return r;
244 }
245
246 r = amdgpu_virt_enable_access_debugfs(adev);
247 if (r < 0) {
248 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
249 return r;
250 }
251
252 mutex_lock(&rd->lock);
253
254 if (rd->id.use_grbm) {
255 if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) ||
256 (rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) {
257 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
258 amdgpu_virt_disable_access_debugfs(adev);
259 mutex_unlock(&rd->lock);
260 return -EINVAL;
261 }
262 mutex_lock(&adev->grbm_idx_mutex);
263 amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se,
264 rd->id.grbm.sh,
265 rd->id.grbm.instance, rd->id.xcc_id);
266 }
267
268 if (rd->id.use_srbm) {
269 mutex_lock(&adev->srbm_mutex);
270 amdgpu_gfx_select_me_pipe_q(adev, rd->id.srbm.me, rd->id.srbm.pipe,
271 rd->id.srbm.queue, rd->id.srbm.vmid, rd->id.xcc_id);
272 }
273
274 if (rd->id.pg_lock)
275 mutex_lock(&adev->pm.mutex);
276
277 while (size) {
278 if (!write_en) {
279 value = RREG32(offset >> 2);
280 r = put_user(value, (uint32_t *)buf);
281 } else {
282 r = get_user(value, (uint32_t *)buf);
283 if (!r)
284 amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value, rd->id.xcc_id);
285 }
286 if (r) {
287 result = r;
288 goto end;
289 }
290 offset += 4;
291 size -= 4;
292 result += 4;
293 buf += 4;
294 }
295end:
296 if (rd->id.use_grbm) {
297 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, rd->id.xcc_id);
298 mutex_unlock(&adev->grbm_idx_mutex);
299 }
300
301 if (rd->id.use_srbm) {
302 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, rd->id.xcc_id);
303 mutex_unlock(&adev->srbm_mutex);
304 }
305
306 if (rd->id.pg_lock)
307 mutex_unlock(&adev->pm.mutex);
308
309 mutex_unlock(&rd->lock);
310
311 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
312
313 amdgpu_virt_disable_access_debugfs(adev);
314 return result;
315}
316
317static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigned long data)
318{
319 struct amdgpu_debugfs_regs2_data *rd = f->private_data;
320 struct amdgpu_debugfs_regs2_iocdata v1_data;
321 int r;
322
323 mutex_lock(&rd->lock);
324
325 switch (cmd) {
326 case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE_V2:
327 r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata_v2 *)data,
328 sizeof(rd->id));
329 if (r)
330 r = -EINVAL;
331 goto done;
332 case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE:
333 r = copy_from_user(&v1_data, (struct amdgpu_debugfs_regs2_iocdata *)data,
334 sizeof(v1_data));
335 if (r) {
336 r = -EINVAL;
337 goto done;
338 }
339 goto v1_copy;
340 default:
341 r = -EINVAL;
342 goto done;
343 }
344
345v1_copy:
346 rd->id.use_srbm = v1_data.use_srbm;
347 rd->id.use_grbm = v1_data.use_grbm;
348 rd->id.pg_lock = v1_data.pg_lock;
349 rd->id.grbm.se = v1_data.grbm.se;
350 rd->id.grbm.sh = v1_data.grbm.sh;
351 rd->id.grbm.instance = v1_data.grbm.instance;
352 rd->id.srbm.me = v1_data.srbm.me;
353 rd->id.srbm.pipe = v1_data.srbm.pipe;
354 rd->id.srbm.queue = v1_data.srbm.queue;
355 rd->id.xcc_id = 0;
356done:
357 mutex_unlock(&rd->lock);
358 return r;
359}
360
361static ssize_t amdgpu_debugfs_regs2_read(struct file *f, char __user *buf, size_t size, loff_t *pos)
362{
363 return amdgpu_debugfs_regs2_op(f, buf, *pos, size, 0);
364}
365
366static ssize_t amdgpu_debugfs_regs2_write(struct file *f, const char __user *buf, size_t size, loff_t *pos)
367{
368 return amdgpu_debugfs_regs2_op(f, (char __user *)buf, *pos, size, 1);
369}
370
371static int amdgpu_debugfs_gprwave_open(struct inode *inode, struct file *file)
372{
373 struct amdgpu_debugfs_gprwave_data *rd;
374
375 rd = kzalloc_obj(*rd);
376 if (!rd)
377 return -ENOMEM;
378 rd->adev = file_inode(file)->i_private;
379 file->private_data = rd;
380 mutex_init(&rd->lock);
381
382 return 0;
383}
384
385static int amdgpu_debugfs_gprwave_release(struct inode *inode, struct file *file)
386{
387 struct amdgpu_debugfs_gprwave_data *rd = file->private_data;
388
389 mutex_destroy(&rd->lock);
390 kfree(file->private_data);
391 return 0;
392}
393
394static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, size_t size, loff_t *pos)
395{
396 struct amdgpu_debugfs_gprwave_data *rd = f->private_data;
397 struct amdgpu_device *adev = rd->adev;
398 ssize_t result = 0;
399 int r;
400 uint32_t *data, x;
401
402 if (size > 4096 || size & 0x3 || *pos & 0x3)
403 return -EINVAL;
404
405 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
406 if (r < 0) {
407 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
408 return r;
409 }
410
411 r = amdgpu_virt_enable_access_debugfs(adev);
412 if (r < 0) {
413 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
414 return r;
415 }
416
417 data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
418 if (!data) {
419 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
420 amdgpu_virt_disable_access_debugfs(adev);
421 return -ENOMEM;
422 }
423
424 /* switch to the specific se/sh/cu */
425 mutex_lock(&adev->grbm_idx_mutex);
426 amdgpu_gfx_select_se_sh(adev, rd->id.se, rd->id.sh, rd->id.cu, rd->id.xcc_id);
427
428 if (!rd->id.gpr_or_wave) {
429 x = 0;
430 if (adev->gfx.funcs->read_wave_data)
431 adev->gfx.funcs->read_wave_data(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, data, &x);
432 } else {
433 x = size >> 2;
434 if (rd->id.gpr.vpgr_or_sgpr) {
435 if (adev->gfx.funcs->read_wave_vgprs)
436 adev->gfx.funcs->read_wave_vgprs(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, rd->id.gpr.thread, *pos, size>>2, data);
437 } else {
438 if (adev->gfx.funcs->read_wave_sgprs)
439 adev->gfx.funcs->read_wave_sgprs(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, *pos, size>>2, data);
440 }
441 }
442
443 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, rd->id.xcc_id);
444 mutex_unlock(&adev->grbm_idx_mutex);
445
446 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
447
448 if (!x) {
449 result = -EINVAL;
450 goto done;
451 }
452
453 while (size && (*pos < x * 4)) {
454 uint32_t value;
455
456 value = data[*pos >> 2];
457 r = put_user(value, (uint32_t *)buf);
458 if (r) {
459 result = r;
460 goto done;
461 }
462
463 result += 4;
464 buf += 4;
465 *pos += 4;
466 size -= 4;
467 }
468
469done:
470 amdgpu_virt_disable_access_debugfs(adev);
471 kfree(data);
472 return result;
473}
474
475static long amdgpu_debugfs_gprwave_ioctl(struct file *f, unsigned int cmd, unsigned long data)
476{
477 struct amdgpu_debugfs_gprwave_data *rd = f->private_data;
478 int r = 0;
479
480 mutex_lock(&rd->lock);
481
482 switch (cmd) {
483 case AMDGPU_DEBUGFS_GPRWAVE_IOC_SET_STATE:
484 if (copy_from_user(&rd->id,
485 (struct amdgpu_debugfs_gprwave_iocdata *)data,
486 sizeof(rd->id)))
487 r = -EFAULT;
488 goto done;
489 default:
490 r = -EINVAL;
491 goto done;
492 }
493
494done:
495 mutex_unlock(&rd->lock);
496 return r;
497}
498
499
500
501
502/**
503 * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register
504 *
505 * @f: open file handle
506 * @buf: User buffer to store read data in
507 * @size: Number of bytes to read
508 * @pos: Offset to seek to
509 *
510 * The lower bits are the BYTE offset of the register to read. This
511 * allows reading multiple registers in a single call and having
512 * the returned size reflect that.
513 */
514static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
515 size_t size, loff_t *pos)
516{
517 struct amdgpu_device *adev = file_inode(f)->i_private;
518 ssize_t result = 0;
519 int r;
520
521 if (size & 0x3 || *pos & 0x3)
522 return -EINVAL;
523
524 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
525 if (r < 0) {
526 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
527 return r;
528 }
529
530 r = amdgpu_virt_enable_access_debugfs(adev);
531 if (r < 0) {
532 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
533 return r;
534 }
535
536 while (size) {
537 uint32_t value;
538
539 if (upper_32_bits(*pos))
540 value = RREG32_PCIE_EXT(*pos);
541 else
542 value = RREG32_PCIE(*pos);
543
544 r = put_user(value, (uint32_t *)buf);
545 if (r)
546 goto out;
547
548 result += 4;
549 buf += 4;
550 *pos += 4;
551 size -= 4;
552 }
553
554 r = result;
555out:
556 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
557 amdgpu_virt_disable_access_debugfs(adev);
558 return r;
559}
560
561/**
562 * amdgpu_debugfs_regs_pcie_write - Write to a PCIE register
563 *
564 * @f: open file handle
565 * @buf: User buffer to write data from
566 * @size: Number of bytes to write
567 * @pos: Offset to seek to
568 *
569 * The lower bits are the BYTE offset of the register to write. This
570 * allows writing multiple registers in a single call and having
571 * the returned size reflect that.
572 */
573static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
574 size_t size, loff_t *pos)
575{
576 struct amdgpu_device *adev = file_inode(f)->i_private;
577 ssize_t result = 0;
578 int r;
579
580 if (size & 0x3 || *pos & 0x3)
581 return -EINVAL;
582
583 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
584 if (r < 0) {
585 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
586 return r;
587 }
588
589 r = amdgpu_virt_enable_access_debugfs(adev);
590 if (r < 0) {
591 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
592 return r;
593 }
594
595 while (size) {
596 uint32_t value;
597
598 r = get_user(value, (uint32_t *)buf);
599 if (r)
600 goto out;
601
602 if (upper_32_bits(*pos))
603 WREG32_PCIE_EXT(*pos, value);
604 else
605 WREG32_PCIE(*pos, value);
606
607 result += 4;
608 buf += 4;
609 *pos += 4;
610 size -= 4;
611 }
612
613 r = result;
614out:
615 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
616 amdgpu_virt_disable_access_debugfs(adev);
617 return r;
618}
619
620/**
621 * amdgpu_debugfs_regs_pcie64_read - Read from a 64-bit PCIE register
622 *
623 * @f: open file handle
624 * @buf: User buffer to store read data in
625 * @size: Number of bytes to read
626 * @pos: Offset to seek to
627 */
628static ssize_t amdgpu_debugfs_regs_pcie64_read(struct file *f, char __user *buf,
629 size_t size, loff_t *pos)
630{
631 struct amdgpu_device *adev = file_inode(f)->i_private;
632 ssize_t result = 0;
633 int r;
634
635 if (size & 0x7 || *pos & 0x7)
636 return -EINVAL;
637
638 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
639 if (r < 0) {
640 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
641 return r;
642 }
643
644 r = amdgpu_virt_enable_access_debugfs(adev);
645 if (r < 0) {
646 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
647 return r;
648 }
649
650 while (size) {
651 uint64_t value;
652
653 value = RREG64_PCIE_EXT(*pos);
654
655 r = put_user(value, (uint64_t *)buf);
656 if (r)
657 goto out;
658
659 result += 8;
660 buf += 8;
661 *pos += 8;
662 size -= 8;
663 }
664
665 r = result;
666out:
667 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
668 amdgpu_virt_disable_access_debugfs(adev);
669 return r;
670}
671
672/**
673 * amdgpu_debugfs_regs_pcie64_write - Write to a 64-bit PCIE register
674 *
675 * @f: open file handle
676 * @buf: User buffer to write data from
677 * @size: Number of bytes to write
678 * @pos: Offset to seek to
679 */
680static ssize_t amdgpu_debugfs_regs_pcie64_write(struct file *f, const char __user *buf,
681 size_t size, loff_t *pos)
682{
683 struct amdgpu_device *adev = file_inode(f)->i_private;
684 ssize_t result = 0;
685 int r;
686
687 if (size & 0x7 || *pos & 0x7)
688 return -EINVAL;
689
690 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
691 if (r < 0) {
692 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
693 return r;
694 }
695
696 r = amdgpu_virt_enable_access_debugfs(adev);
697 if (r < 0) {
698 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
699 return r;
700 }
701
702 while (size) {
703 uint64_t value;
704
705 r = get_user(value, (uint64_t *)buf);
706 if (r)
707 goto out;
708
709 WREG64_PCIE_EXT(*pos, value);
710
711 result += 8;
712 buf += 8;
713 *pos += 8;
714 size -= 8;
715 }
716
717 r = result;
718out:
719 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
720 amdgpu_virt_disable_access_debugfs(adev);
721 return r;
722}
723
724/**
725 * amdgpu_debugfs_regs_didt_read - Read from a DIDT register
726 *
727 * @f: open file handle
728 * @buf: User buffer to store read data in
729 * @size: Number of bytes to read
730 * @pos: Offset to seek to
731 *
732 * The lower bits are the BYTE offset of the register to read. This
733 * allows reading multiple registers in a single call and having
734 * the returned size reflect that.
735 */
736static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
737 size_t size, loff_t *pos)
738{
739 struct amdgpu_device *adev = file_inode(f)->i_private;
740 ssize_t result = 0;
741 int r;
742
743 if (size & 0x3 || *pos & 0x3)
744 return -EINVAL;
745
746 if (!adev->reg.didt.rreg)
747 return -EOPNOTSUPP;
748
749 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
750 if (r < 0) {
751 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
752 return r;
753 }
754
755 r = amdgpu_virt_enable_access_debugfs(adev);
756 if (r < 0) {
757 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
758 return r;
759 }
760
761 while (size) {
762 uint32_t value;
763
764 value = RREG32_DIDT(*pos >> 2);
765 r = put_user(value, (uint32_t *)buf);
766 if (r)
767 goto out;
768
769 result += 4;
770 buf += 4;
771 *pos += 4;
772 size -= 4;
773 }
774
775 r = result;
776out:
777 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
778 amdgpu_virt_disable_access_debugfs(adev);
779 return r;
780}
781
782/**
783 * amdgpu_debugfs_regs_didt_write - Write to a DIDT register
784 *
785 * @f: open file handle
786 * @buf: User buffer to write data from
787 * @size: Number of bytes to write
788 * @pos: Offset to seek to
789 *
790 * The lower bits are the BYTE offset of the register to write. This
791 * allows writing multiple registers in a single call and having
792 * the returned size reflect that.
793 */
794static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
795 size_t size, loff_t *pos)
796{
797 struct amdgpu_device *adev = file_inode(f)->i_private;
798 ssize_t result = 0;
799 int r;
800
801 if (size & 0x3 || *pos & 0x3)
802 return -EINVAL;
803
804 if (!adev->reg.didt.wreg)
805 return -EOPNOTSUPP;
806
807 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
808 if (r < 0) {
809 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
810 return r;
811 }
812
813 r = amdgpu_virt_enable_access_debugfs(adev);
814 if (r < 0) {
815 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
816 return r;
817 }
818
819 while (size) {
820 uint32_t value;
821
822 r = get_user(value, (uint32_t *)buf);
823 if (r)
824 goto out;
825
826 WREG32_DIDT(*pos >> 2, value);
827
828 result += 4;
829 buf += 4;
830 *pos += 4;
831 size -= 4;
832 }
833
834 r = result;
835out:
836 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
837 amdgpu_virt_disable_access_debugfs(adev);
838 return r;
839}
840
841/**
842 * amdgpu_debugfs_regs_smc_read - Read from a SMC register
843 *
844 * @f: open file handle
845 * @buf: User buffer to store read data in
846 * @size: Number of bytes to read
847 * @pos: Offset to seek to
848 *
849 * The lower bits are the BYTE offset of the register to read. This
850 * allows reading multiple registers in a single call and having
851 * the returned size reflect that.
852 */
853static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
854 size_t size, loff_t *pos)
855{
856 struct amdgpu_device *adev = file_inode(f)->i_private;
857 ssize_t result = 0;
858 int r;
859
860 if (!adev->reg.smc.rreg)
861 return -EOPNOTSUPP;
862
863 if (size & 0x3 || *pos & 0x3)
864 return -EINVAL;
865
866 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
867 if (r < 0) {
868 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
869 return r;
870 }
871
872 r = amdgpu_virt_enable_access_debugfs(adev);
873 if (r < 0) {
874 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
875 return r;
876 }
877
878 while (size) {
879 uint32_t value;
880
881 value = RREG32_SMC(*pos);
882 r = put_user(value, (uint32_t *)buf);
883 if (r)
884 goto out;
885
886 result += 4;
887 buf += 4;
888 *pos += 4;
889 size -= 4;
890 }
891
892 r = result;
893out:
894 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
895 amdgpu_virt_disable_access_debugfs(adev);
896 return r;
897}
898
899/**
900 * amdgpu_debugfs_regs_smc_write - Write to a SMC register
901 *
902 * @f: open file handle
903 * @buf: User buffer to write data from
904 * @size: Number of bytes to write
905 * @pos: Offset to seek to
906 *
907 * The lower bits are the BYTE offset of the register to write. This
908 * allows writing multiple registers in a single call and having
909 * the returned size reflect that.
910 */
911static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
912 size_t size, loff_t *pos)
913{
914 struct amdgpu_device *adev = file_inode(f)->i_private;
915 ssize_t result = 0;
916 int r;
917
918 if (!adev->reg.smc.wreg)
919 return -EOPNOTSUPP;
920
921 if (size & 0x3 || *pos & 0x3)
922 return -EINVAL;
923
924 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
925 if (r < 0) {
926 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
927 return r;
928 }
929
930 r = amdgpu_virt_enable_access_debugfs(adev);
931 if (r < 0) {
932 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
933 return r;
934 }
935
936 while (size) {
937 uint32_t value;
938
939 r = get_user(value, (uint32_t *)buf);
940 if (r)
941 goto out;
942
943 WREG32_SMC(*pos, value);
944
945 result += 4;
946 buf += 4;
947 *pos += 4;
948 size -= 4;
949 }
950
951 r = result;
952out:
953 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
954 amdgpu_virt_disable_access_debugfs(adev);
955 return r;
956}
957
958/**
959 * amdgpu_debugfs_gca_config_read - Read from gfx config data
960 *
961 * @f: open file handle
962 * @buf: User buffer to store read data in
963 * @size: Number of bytes to read
964 * @pos: Offset to seek to
965 *
966 * This file is used to access configuration data in a somewhat
967 * stable fashion. The format is a series of DWORDs with the first
968 * indicating which revision it is. New content is appended to the
969 * end so that older software can still read the data.
970 */
971
972static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
973 size_t size, loff_t *pos)
974{
975 struct amdgpu_device *adev = file_inode(f)->i_private;
976 ssize_t result = 0;
977 int r;
978 uint32_t *config, no_regs = 0;
979
980 if (size & 0x3 || *pos & 0x3)
981 return -EINVAL;
982
983 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
984 if (!config)
985 return -ENOMEM;
986
987 /* version, increment each time something is added */
988 config[no_regs++] = 5;
989 config[no_regs++] = adev->gfx.config.max_shader_engines;
990 config[no_regs++] = adev->gfx.config.max_tile_pipes;
991 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
992 config[no_regs++] = adev->gfx.config.max_sh_per_se;
993 config[no_regs++] = adev->gfx.config.max_backends_per_se;
994 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
995 config[no_regs++] = adev->gfx.config.max_gprs;
996 config[no_regs++] = adev->gfx.config.max_gs_threads;
997 config[no_regs++] = adev->gfx.config.max_hw_contexts;
998 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
999 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
1000 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
1001 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
1002 config[no_regs++] = adev->gfx.config.num_tile_pipes;
1003 config[no_regs++] = adev->gfx.config.backend_enable_mask;
1004 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
1005 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
1006 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
1007 config[no_regs++] = adev->gfx.config.num_gpus;
1008 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
1009 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
1010 config[no_regs++] = adev->gfx.config.gb_addr_config;
1011 config[no_regs++] = adev->gfx.config.num_rbs;
1012
1013 /* rev==1 */
1014 config[no_regs++] = adev->rev_id;
1015 config[no_regs++] = adev->pg_flags;
1016 config[no_regs++] = lower_32_bits(adev->cg_flags);
1017
1018 /* rev==2 */
1019 config[no_regs++] = adev->family;
1020 config[no_regs++] = adev->external_rev_id;
1021
1022 /* rev==3 */
1023 config[no_regs++] = adev->pdev->device;
1024 config[no_regs++] = adev->pdev->revision;
1025 config[no_regs++] = adev->pdev->subsystem_device;
1026 config[no_regs++] = adev->pdev->subsystem_vendor;
1027
1028 /* rev==4 APU flag */
1029 config[no_regs++] = adev->flags & AMD_IS_APU ? 1 : 0;
1030
1031 /* rev==5 PG/CG flag upper 32bit */
1032 config[no_regs++] = 0;
1033 config[no_regs++] = upper_32_bits(adev->cg_flags);
1034
1035 while (size && (*pos < no_regs * 4)) {
1036 uint32_t value;
1037
1038 value = config[*pos >> 2];
1039 r = put_user(value, (uint32_t *)buf);
1040 if (r) {
1041 kfree(config);
1042 return r;
1043 }
1044
1045 result += 4;
1046 buf += 4;
1047 *pos += 4;
1048 size -= 4;
1049 }
1050
1051 kfree(config);
1052 return result;
1053}
1054
1055/**
1056 * amdgpu_debugfs_sensor_read - Read from the powerplay sensors
1057 *
1058 * @f: open file handle
1059 * @buf: User buffer to store read data in
1060 * @size: Number of bytes to read
1061 * @pos: Offset to seek to
1062 *
1063 * The offset is treated as the BYTE address of one of the sensors
1064 * enumerated in amd/include/kgd_pp_interface.h under the
1065 * 'amd_pp_sensors' enumeration. For instance to read the UVD VCLK
1066 * you would use the offset 3 * 4 = 12.
1067 */
1068static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
1069 size_t size, loff_t *pos)
1070{
1071 struct amdgpu_device *adev = file_inode(f)->i_private;
1072 int idx, x, outsize, r, valuesize;
1073 uint32_t values[16];
1074
1075 if (size & 3 || *pos & 0x3)
1076 return -EINVAL;
1077
1078 if (!adev->pm.dpm_enabled)
1079 return -EINVAL;
1080
1081 /* convert offset to sensor number */
1082 idx = *pos >> 2;
1083
1084 valuesize = sizeof(values);
1085
1086 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1087 if (r < 0) {
1088 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1089 return r;
1090 }
1091
1092 r = amdgpu_virt_enable_access_debugfs(adev);
1093 if (r < 0) {
1094 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1095 return r;
1096 }
1097
1098 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
1099
1100 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1101
1102 if (r) {
1103 amdgpu_virt_disable_access_debugfs(adev);
1104 return r;
1105 }
1106
1107 if (size > valuesize) {
1108 amdgpu_virt_disable_access_debugfs(adev);
1109 return -EINVAL;
1110 }
1111
1112 outsize = 0;
1113 x = 0;
1114 if (!r) {
1115 while (size) {
1116 r = put_user(values[x++], (int32_t *)buf);
1117 buf += 4;
1118 size -= 4;
1119 outsize += 4;
1120 }
1121 }
1122
1123 amdgpu_virt_disable_access_debugfs(adev);
1124 return !r ? outsize : r;
1125}
1126
1127/** amdgpu_debugfs_wave_read - Read WAVE STATUS data
1128 *
1129 * @f: open file handle
1130 * @buf: User buffer to store read data in
1131 * @size: Number of bytes to read
1132 * @pos: Offset to seek to
1133 *
1134 * The offset being sought changes which wave that the status data
1135 * will be returned for. The bits are used as follows:
1136 *
1137 * Bits 0..6: Byte offset into data
1138 * Bits 7..14: SE selector
1139 * Bits 15..22: SH/SA selector
1140 * Bits 23..30: CU/{WGP+SIMD} selector
1141 * Bits 31..36: WAVE ID selector
1142 * Bits 37..44: SIMD ID selector
1143 *
1144 * The returned data begins with one DWORD of version information
1145 * Followed by WAVE STATUS registers relevant to the GFX IP version
1146 * being used. See gfx_v8_0_read_wave_data() for an example output.
1147 */
1148static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
1149 size_t size, loff_t *pos)
1150{
1151 struct amdgpu_device *adev = f->f_inode->i_private;
1152 int r, x;
1153 ssize_t result = 0;
1154 uint32_t offset, se, sh, cu, wave, simd, data[32];
1155
1156 if (size & 3 || *pos & 3)
1157 return -EINVAL;
1158
1159 /* decode offset */
1160 offset = (*pos & GENMASK_ULL(6, 0));
1161 se = (*pos & GENMASK_ULL(14, 7)) >> 7;
1162 sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
1163 cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
1164 wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
1165 simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
1166
1167 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1168 if (r < 0) {
1169 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1170 return r;
1171 }
1172
1173 r = amdgpu_virt_enable_access_debugfs(adev);
1174 if (r < 0) {
1175 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1176 return r;
1177 }
1178
1179 /* switch to the specific se/sh/cu */
1180 mutex_lock(&adev->grbm_idx_mutex);
1181 amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0);
1182
1183 x = 0;
1184 if (adev->gfx.funcs->read_wave_data)
1185 adev->gfx.funcs->read_wave_data(adev, 0, simd, wave, data, &x);
1186
1187 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
1188 mutex_unlock(&adev->grbm_idx_mutex);
1189
1190 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1191
1192 if (!x) {
1193 amdgpu_virt_disable_access_debugfs(adev);
1194 return -EINVAL;
1195 }
1196
1197 while (size && (offset < x * 4)) {
1198 uint32_t value;
1199
1200 value = data[offset >> 2];
1201 r = put_user(value, (uint32_t *)buf);
1202 if (r) {
1203 amdgpu_virt_disable_access_debugfs(adev);
1204 return r;
1205 }
1206
1207 result += 4;
1208 buf += 4;
1209 offset += 4;
1210 size -= 4;
1211 }
1212
1213 amdgpu_virt_disable_access_debugfs(adev);
1214 return result;
1215}
1216
1217/** amdgpu_debugfs_gpr_read - Read wave gprs
1218 *
1219 * @f: open file handle
1220 * @buf: User buffer to store read data in
1221 * @size: Number of bytes to read
1222 * @pos: Offset to seek to
1223 *
1224 * The offset being sought changes which wave that the status data
1225 * will be returned for. The bits are used as follows:
1226 *
1227 * Bits 0..11: Byte offset into data
1228 * Bits 12..19: SE selector
1229 * Bits 20..27: SH/SA selector
1230 * Bits 28..35: CU/{WGP+SIMD} selector
1231 * Bits 36..43: WAVE ID selector
1232 * Bits 37..44: SIMD ID selector
1233 * Bits 52..59: Thread selector
1234 * Bits 60..61: Bank selector (VGPR=0,SGPR=1)
1235 *
1236 * The return data comes from the SGPR or VGPR register bank for
1237 * the selected operational unit.
1238 */
1239static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
1240 size_t size, loff_t *pos)
1241{
1242 struct amdgpu_device *adev = f->f_inode->i_private;
1243 int r;
1244 ssize_t result = 0;
1245 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
1246
1247 if (size > 4096 || size & 3 || *pos & 3)
1248 return -EINVAL;
1249
1250 /* decode offset */
1251 offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
1252 se = (*pos & GENMASK_ULL(19, 12)) >> 12;
1253 sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
1254 cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
1255 wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
1256 simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
1257 thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
1258 bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
1259
1260 data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
1261 if (!data)
1262 return -ENOMEM;
1263
1264 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1265 if (r < 0)
1266 goto err;
1267
1268 r = amdgpu_virt_enable_access_debugfs(adev);
1269 if (r < 0)
1270 goto err;
1271
1272 /* switch to the specific se/sh/cu */
1273 mutex_lock(&adev->grbm_idx_mutex);
1274 amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0);
1275
1276 if (bank == 0) {
1277 if (adev->gfx.funcs->read_wave_vgprs)
1278 adev->gfx.funcs->read_wave_vgprs(adev, 0, simd, wave, thread, offset, size>>2, data);
1279 } else {
1280 if (adev->gfx.funcs->read_wave_sgprs)
1281 adev->gfx.funcs->read_wave_sgprs(adev, 0, simd, wave, offset, size>>2, data);
1282 }
1283
1284 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
1285 mutex_unlock(&adev->grbm_idx_mutex);
1286
1287 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1288
1289 while (size) {
1290 uint32_t value;
1291
1292 value = data[result >> 2];
1293 r = put_user(value, (uint32_t *)buf);
1294 if (r) {
1295 amdgpu_virt_disable_access_debugfs(adev);
1296 goto err;
1297 }
1298
1299 result += 4;
1300 buf += 4;
1301 size -= 4;
1302 }
1303
1304 kfree(data);
1305 amdgpu_virt_disable_access_debugfs(adev);
1306 return result;
1307
1308err:
1309 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1310 kfree(data);
1311 return r;
1312}
1313
1314/**
1315 * amdgpu_debugfs_gfxoff_residency_read - Read GFXOFF residency
1316 *
1317 * @f: open file handle
1318 * @buf: User buffer to store read data in
1319 * @size: Number of bytes to read
1320 * @pos: Offset to seek to
1321 *
1322 * Read the last residency value logged. It doesn't auto update, one needs to
1323 * stop logging before getting the current value.
1324 */
1325static ssize_t amdgpu_debugfs_gfxoff_residency_read(struct file *f, char __user *buf,
1326 size_t size, loff_t *pos)
1327{
1328 struct amdgpu_device *adev = file_inode(f)->i_private;
1329 ssize_t result = 0;
1330 int r;
1331
1332 if (size & 0x3 || *pos & 0x3)
1333 return -EINVAL;
1334
1335 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1336 if (r < 0) {
1337 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1338 return r;
1339 }
1340
1341 while (size) {
1342 uint32_t value;
1343
1344 r = amdgpu_get_gfx_off_residency(adev, &value);
1345 if (r)
1346 goto out;
1347
1348 r = put_user(value, (uint32_t *)buf);
1349 if (r)
1350 goto out;
1351
1352 result += 4;
1353 buf += 4;
1354 *pos += 4;
1355 size -= 4;
1356 }
1357
1358 r = result;
1359out:
1360 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1361
1362 return r;
1363}
1364
1365/**
1366 * amdgpu_debugfs_gfxoff_residency_write - Log GFXOFF Residency
1367 *
1368 * @f: open file handle
1369 * @buf: User buffer to write data from
1370 * @size: Number of bytes to write
1371 * @pos: Offset to seek to
1372 *
1373 * Write a 32-bit non-zero to start logging; write a 32-bit zero to stop
1374 */
1375static ssize_t amdgpu_debugfs_gfxoff_residency_write(struct file *f, const char __user *buf,
1376 size_t size, loff_t *pos)
1377{
1378 struct amdgpu_device *adev = file_inode(f)->i_private;
1379 ssize_t result = 0;
1380 int r;
1381
1382 if (size & 0x3 || *pos & 0x3)
1383 return -EINVAL;
1384
1385 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1386 if (r < 0) {
1387 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1388 return r;
1389 }
1390
1391 while (size) {
1392 u32 value;
1393
1394 r = get_user(value, (uint32_t *)buf);
1395 if (r)
1396 goto out;
1397
1398 amdgpu_set_gfx_off_residency(adev, value ? true : false);
1399
1400 result += 4;
1401 buf += 4;
1402 *pos += 4;
1403 size -= 4;
1404 }
1405
1406 r = result;
1407out:
1408 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1409
1410 return r;
1411}
1412
1413
1414/**
1415 * amdgpu_debugfs_gfxoff_count_read - Read GFXOFF entry count
1416 *
1417 * @f: open file handle
1418 * @buf: User buffer to store read data in
1419 * @size: Number of bytes to read
1420 * @pos: Offset to seek to
1421 */
1422static ssize_t amdgpu_debugfs_gfxoff_count_read(struct file *f, char __user *buf,
1423 size_t size, loff_t *pos)
1424{
1425 struct amdgpu_device *adev = file_inode(f)->i_private;
1426 ssize_t result = 0;
1427 int r;
1428
1429 if (size & 0x3 || *pos & 0x3)
1430 return -EINVAL;
1431
1432 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1433 if (r < 0) {
1434 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1435 return r;
1436 }
1437
1438 while (size) {
1439 u64 value = 0;
1440
1441 r = amdgpu_get_gfx_off_entrycount(adev, &value);
1442 if (r)
1443 goto out;
1444
1445 r = put_user(value, (u64 *)buf);
1446 if (r)
1447 goto out;
1448
1449 result += 4;
1450 buf += 4;
1451 *pos += 4;
1452 size -= 4;
1453 }
1454
1455 r = result;
1456out:
1457 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1458
1459 return r;
1460}
1461
1462/**
1463 * amdgpu_debugfs_gfxoff_write - Enable/disable GFXOFF
1464 *
1465 * @f: open file handle
1466 * @buf: User buffer to write data from
1467 * @size: Number of bytes to write
1468 * @pos: Offset to seek to
1469 *
1470 * Write a 32-bit zero to disable or a 32-bit non-zero to enable
1471 */
1472static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf,
1473 size_t size, loff_t *pos)
1474{
1475 struct amdgpu_device *adev = file_inode(f)->i_private;
1476 ssize_t result = 0;
1477 int r;
1478
1479 if (size & 0x3 || *pos & 0x3)
1480 return -EINVAL;
1481
1482 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1483 if (r < 0) {
1484 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1485 return r;
1486 }
1487
1488 while (size) {
1489 uint32_t value;
1490
1491 r = get_user(value, (uint32_t *)buf);
1492 if (r)
1493 goto out;
1494
1495 amdgpu_gfx_off_ctrl(adev, value ? true : false);
1496
1497 result += 4;
1498 buf += 4;
1499 *pos += 4;
1500 size -= 4;
1501 }
1502
1503 r = result;
1504out:
1505 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1506
1507 return r;
1508}
1509
1510
1511/**
1512 * amdgpu_debugfs_gfxoff_read - read gfxoff status
1513 *
1514 * @f: open file handle
1515 * @buf: User buffer to store read data in
1516 * @size: Number of bytes to read
1517 * @pos: Offset to seek to
1518 */
1519static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
1520 size_t size, loff_t *pos)
1521{
1522 struct amdgpu_device *adev = file_inode(f)->i_private;
1523 ssize_t result = 0;
1524 int r;
1525
1526 if (size & 0x3 || *pos & 0x3)
1527 return -EINVAL;
1528
1529 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1530 if (r < 0) {
1531 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1532 return r;
1533 }
1534
1535 while (size) {
1536 u32 value = adev->gfx.gfx_off_state;
1537
1538 r = put_user(value, (u32 *)buf);
1539 if (r)
1540 goto out;
1541
1542 result += 4;
1543 buf += 4;
1544 *pos += 4;
1545 size -= 4;
1546 }
1547
1548 r = result;
1549out:
1550 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1551
1552 return r;
1553}
1554
1555static ssize_t amdgpu_debugfs_gfxoff_status_read(struct file *f, char __user *buf,
1556 size_t size, loff_t *pos)
1557{
1558 struct amdgpu_device *adev = file_inode(f)->i_private;
1559 ssize_t result = 0;
1560 int r;
1561
1562 if (size & 0x3 || *pos & 0x3)
1563 return -EINVAL;
1564
1565 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1566 if (r < 0) {
1567 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1568 return r;
1569 }
1570
1571 while (size) {
1572 u32 value;
1573
1574 r = amdgpu_get_gfx_off_status(adev, &value);
1575 if (r)
1576 goto out;
1577
1578 r = put_user(value, (u32 *)buf);
1579 if (r)
1580 goto out;
1581
1582 result += 4;
1583 buf += 4;
1584 *pos += 4;
1585 size -= 4;
1586 }
1587
1588 r = result;
1589out:
1590 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1591
1592 return r;
1593}
1594
1595static const struct file_operations amdgpu_debugfs_regs2_fops = {
1596 .owner = THIS_MODULE,
1597 .unlocked_ioctl = amdgpu_debugfs_regs2_ioctl,
1598 .read = amdgpu_debugfs_regs2_read,
1599 .write = amdgpu_debugfs_regs2_write,
1600 .open = amdgpu_debugfs_regs2_open,
1601 .release = amdgpu_debugfs_regs2_release,
1602 .llseek = default_llseek
1603};
1604
1605static const struct file_operations amdgpu_debugfs_gprwave_fops = {
1606 .owner = THIS_MODULE,
1607 .unlocked_ioctl = amdgpu_debugfs_gprwave_ioctl,
1608 .read = amdgpu_debugfs_gprwave_read,
1609 .open = amdgpu_debugfs_gprwave_open,
1610 .release = amdgpu_debugfs_gprwave_release,
1611 .llseek = default_llseek
1612};
1613
1614static const struct file_operations amdgpu_debugfs_regs_fops = {
1615 .owner = THIS_MODULE,
1616 .read = amdgpu_debugfs_regs_read,
1617 .write = amdgpu_debugfs_regs_write,
1618 .llseek = default_llseek
1619};
1620static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
1621 .owner = THIS_MODULE,
1622 .read = amdgpu_debugfs_regs_didt_read,
1623 .write = amdgpu_debugfs_regs_didt_write,
1624 .llseek = default_llseek
1625};
1626static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
1627 .owner = THIS_MODULE,
1628 .read = amdgpu_debugfs_regs_pcie_read,
1629 .write = amdgpu_debugfs_regs_pcie_write,
1630 .llseek = default_llseek
1631};
1632static const struct file_operations amdgpu_debugfs_regs_pcie64_fops = {
1633 .owner = THIS_MODULE,
1634 .read = amdgpu_debugfs_regs_pcie64_read,
1635 .write = amdgpu_debugfs_regs_pcie64_write,
1636 .llseek = default_llseek
1637};
1638static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
1639 .owner = THIS_MODULE,
1640 .read = amdgpu_debugfs_regs_smc_read,
1641 .write = amdgpu_debugfs_regs_smc_write,
1642 .llseek = default_llseek
1643};
1644
1645static const struct file_operations amdgpu_debugfs_gca_config_fops = {
1646 .owner = THIS_MODULE,
1647 .read = amdgpu_debugfs_gca_config_read,
1648 .llseek = default_llseek
1649};
1650
1651static const struct file_operations amdgpu_debugfs_sensors_fops = {
1652 .owner = THIS_MODULE,
1653 .read = amdgpu_debugfs_sensor_read,
1654 .llseek = default_llseek
1655};
1656
1657static const struct file_operations amdgpu_debugfs_wave_fops = {
1658 .owner = THIS_MODULE,
1659 .read = amdgpu_debugfs_wave_read,
1660 .llseek = default_llseek
1661};
1662static const struct file_operations amdgpu_debugfs_gpr_fops = {
1663 .owner = THIS_MODULE,
1664 .read = amdgpu_debugfs_gpr_read,
1665 .llseek = default_llseek
1666};
1667
1668static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
1669 .owner = THIS_MODULE,
1670 .read = amdgpu_debugfs_gfxoff_read,
1671 .write = amdgpu_debugfs_gfxoff_write,
1672 .llseek = default_llseek
1673};
1674
1675static const struct file_operations amdgpu_debugfs_gfxoff_status_fops = {
1676 .owner = THIS_MODULE,
1677 .read = amdgpu_debugfs_gfxoff_status_read,
1678 .llseek = default_llseek
1679};
1680
1681static const struct file_operations amdgpu_debugfs_gfxoff_count_fops = {
1682 .owner = THIS_MODULE,
1683 .read = amdgpu_debugfs_gfxoff_count_read,
1684 .llseek = default_llseek
1685};
1686
1687static const struct file_operations amdgpu_debugfs_gfxoff_residency_fops = {
1688 .owner = THIS_MODULE,
1689 .read = amdgpu_debugfs_gfxoff_residency_read,
1690 .write = amdgpu_debugfs_gfxoff_residency_write,
1691 .llseek = default_llseek
1692};
1693
1694static const struct file_operations *debugfs_regs[] = {
1695 &amdgpu_debugfs_regs_fops,
1696 &amdgpu_debugfs_regs2_fops,
1697 &amdgpu_debugfs_gprwave_fops,
1698 &amdgpu_debugfs_regs_didt_fops,
1699 &amdgpu_debugfs_regs_pcie_fops,
1700 &amdgpu_debugfs_regs_pcie64_fops,
1701 &amdgpu_debugfs_regs_smc_fops,
1702 &amdgpu_debugfs_gca_config_fops,
1703 &amdgpu_debugfs_sensors_fops,
1704 &amdgpu_debugfs_wave_fops,
1705 &amdgpu_debugfs_gpr_fops,
1706 &amdgpu_debugfs_gfxoff_fops,
1707 &amdgpu_debugfs_gfxoff_status_fops,
1708 &amdgpu_debugfs_gfxoff_count_fops,
1709 &amdgpu_debugfs_gfxoff_residency_fops,
1710};
1711
1712static const char * const debugfs_regs_names[] = {
1713 "amdgpu_regs",
1714 "amdgpu_regs2",
1715 "amdgpu_gprwave",
1716 "amdgpu_regs_didt",
1717 "amdgpu_regs_pcie",
1718 "amdgpu_regs_pcie64",
1719 "amdgpu_regs_smc",
1720 "amdgpu_gca_config",
1721 "amdgpu_sensors",
1722 "amdgpu_wave",
1723 "amdgpu_gpr",
1724 "amdgpu_gfxoff",
1725 "amdgpu_gfxoff_status",
1726 "amdgpu_gfxoff_count",
1727 "amdgpu_gfxoff_residency",
1728};
1729
1730/**
1731 * amdgpu_debugfs_regs_init - Initialize debugfs entries that provide
1732 * register access.
1733 *
1734 * @adev: The device to attach the debugfs entries to
1735 */
1736int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
1737{
1738 struct drm_minor *minor = adev_to_drm(adev)->primary;
1739 struct dentry *ent, *root = minor->debugfs_root;
1740 unsigned int i;
1741
1742 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
1743 ent = debugfs_create_file(debugfs_regs_names[i],
1744 S_IFREG | 0400, root,
1745 adev, debugfs_regs[i]);
1746 if (!i && !IS_ERR_OR_NULL(ent))
1747 i_size_write(ent->d_inode, adev->rmmio_size);
1748 }
1749
1750 return 0;
1751}
1752
1753static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
1754{
1755 struct amdgpu_device *adev = m->private;
1756 struct drm_device *dev = adev_to_drm(adev);
1757 int r = 0, i;
1758
1759 r = pm_runtime_get_sync(dev->dev);
1760 if (r < 0) {
1761 pm_runtime_put_autosuspend(dev->dev);
1762 return r;
1763 }
1764
1765 /* Avoid accidently unparking the sched thread during GPU reset */
1766 r = down_write_killable(&adev->reset_domain->sem);
1767 if (r)
1768 return r;
1769
1770 /* hold on the scheduler */
1771 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1772 struct amdgpu_ring *ring = adev->rings[i];
1773
1774 if (!amdgpu_ring_sched_ready(ring))
1775 continue;
1776 drm_sched_wqueue_stop(&ring->sched);
1777 }
1778
1779 seq_puts(m, "run ib test:\n");
1780 r = amdgpu_ib_ring_tests(adev);
1781 if (r)
1782 seq_printf(m, "ib ring tests failed (%d).\n", r);
1783 else
1784 seq_puts(m, "ib ring tests passed.\n");
1785
1786 /* go on the scheduler */
1787 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1788 struct amdgpu_ring *ring = adev->rings[i];
1789
1790 if (!amdgpu_ring_sched_ready(ring))
1791 continue;
1792 drm_sched_wqueue_start(&ring->sched);
1793 }
1794
1795 up_write(&adev->reset_domain->sem);
1796
1797 pm_runtime_put_autosuspend(dev->dev);
1798
1799 return 0;
1800}
1801
1802static int amdgpu_debugfs_evict_vram(void *data, u64 *val)
1803{
1804 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1805 struct drm_device *dev = adev_to_drm(adev);
1806 int r;
1807
1808 r = pm_runtime_get_sync(dev->dev);
1809 if (r < 0) {
1810 pm_runtime_put_autosuspend(dev->dev);
1811 return r;
1812 }
1813
1814 *val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
1815
1816 pm_runtime_put_autosuspend(dev->dev);
1817
1818 return 0;
1819}
1820
1821
1822static int amdgpu_debugfs_evict_gtt(void *data, u64 *val)
1823{
1824 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1825 struct drm_device *dev = adev_to_drm(adev);
1826 int r;
1827
1828 r = pm_runtime_get_sync(dev->dev);
1829 if (r < 0) {
1830 pm_runtime_put_autosuspend(dev->dev);
1831 return r;
1832 }
1833
1834 *val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT);
1835
1836 pm_runtime_put_autosuspend(dev->dev);
1837
1838 return 0;
1839}
1840
1841static int amdgpu_debugfs_benchmark(void *data, u64 val)
1842{
1843 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1844 struct drm_device *dev = adev_to_drm(adev);
1845 int r;
1846
1847 r = pm_runtime_get_sync(dev->dev);
1848 if (r < 0) {
1849 pm_runtime_put_autosuspend(dev->dev);
1850 return r;
1851 }
1852
1853 r = amdgpu_benchmark(adev, val);
1854
1855 pm_runtime_put_autosuspend(dev->dev);
1856
1857 return r;
1858}
1859
1860static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused)
1861{
1862 struct amdgpu_device *adev = m->private;
1863 struct drm_device *dev = adev_to_drm(adev);
1864 struct drm_file *file;
1865 int r;
1866
1867 r = mutex_lock_interruptible(&dev->filelist_mutex);
1868 if (r)
1869 return r;
1870
1871 list_for_each_entry(file, &dev->filelist, lhead) {
1872 struct amdgpu_fpriv *fpriv = file->driver_priv;
1873 struct amdgpu_vm *vm = &fpriv->vm;
1874 struct amdgpu_task_info *ti;
1875
1876 ti = amdgpu_vm_get_task_info_vm(vm);
1877 if (ti) {
1878 seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->task.pid, ti->process_name);
1879 amdgpu_vm_put_task_info(ti);
1880 }
1881
1882 r = amdgpu_bo_reserve(vm->root.bo, true);
1883 if (r)
1884 break;
1885 amdgpu_debugfs_vm_bo_info(vm, m);
1886 amdgpu_bo_unreserve(vm->root.bo);
1887 }
1888
1889 mutex_unlock(&dev->filelist_mutex);
1890
1891 return r;
1892}
1893
1894DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_test_ib);
1895DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_vm_info);
1896DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_vram_fops, amdgpu_debugfs_evict_vram,
1897 NULL, "%lld\n");
1898DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_gtt_fops, amdgpu_debugfs_evict_gtt,
1899 NULL, "%lld\n");
1900DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_benchmark_fops, NULL, amdgpu_debugfs_benchmark,
1901 "%lld\n");
1902
1903static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring,
1904 struct dma_fence **fences)
1905{
1906 struct amdgpu_fence_driver *drv = &ring->fence_drv;
1907 uint32_t sync_seq, last_seq;
1908
1909 last_seq = atomic_read(&ring->fence_drv.last_seq);
1910 sync_seq = ring->fence_drv.sync_seq;
1911
1912 last_seq &= drv->num_fences_mask;
1913 sync_seq &= drv->num_fences_mask;
1914
1915 do {
1916 struct dma_fence *fence, **ptr;
1917
1918 ++last_seq;
1919 last_seq &= drv->num_fences_mask;
1920 ptr = &drv->fences[last_seq];
1921
1922 fence = rcu_dereference_protected(*ptr, 1);
1923 RCU_INIT_POINTER(*ptr, NULL);
1924
1925 if (!fence)
1926 continue;
1927
1928 fences[last_seq] = fence;
1929
1930 } while (last_seq != sync_seq);
1931}
1932
1933static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences,
1934 int length)
1935{
1936 int i;
1937 struct dma_fence *fence;
1938
1939 for (i = 0; i < length; i++) {
1940 fence = fences[i];
1941 if (!fence)
1942 continue;
1943 dma_fence_signal(fence);
1944 dma_fence_put(fence);
1945 }
1946}
1947
1948static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
1949{
1950 struct drm_sched_job *s_job;
1951 struct dma_fence *fence;
1952
1953 spin_lock(&sched->job_list_lock);
1954 list_for_each_entry(s_job, &sched->pending_list, list) {
1955 fence = sched->ops->run_job(s_job);
1956 dma_fence_put(fence);
1957 }
1958 spin_unlock(&sched->job_list_lock);
1959}
1960
1961static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
1962{
1963 struct amdgpu_job *job;
1964 struct drm_sched_job *s_job, *tmp;
1965 uint32_t preempt_seq;
1966 struct dma_fence *fence, **ptr;
1967 struct amdgpu_fence_driver *drv = &ring->fence_drv;
1968 struct drm_gpu_scheduler *sched = &ring->sched;
1969 bool preempted = true;
1970
1971 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
1972 return;
1973
1974 preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2));
1975 if (preempt_seq <= atomic_read(&drv->last_seq)) {
1976 preempted = false;
1977 goto no_preempt;
1978 }
1979
1980 preempt_seq &= drv->num_fences_mask;
1981 ptr = &drv->fences[preempt_seq];
1982 fence = rcu_dereference_protected(*ptr, 1);
1983
1984no_preempt:
1985 spin_lock(&sched->job_list_lock);
1986 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
1987 if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
1988 /* remove job from ring_mirror_list */
1989 list_del_init(&s_job->list);
1990 sched->ops->free_job(s_job);
1991 continue;
1992 }
1993 job = to_amdgpu_job(s_job);
1994 if (preempted && (&job->hw_fence->base) == fence)
1995 /* mark the job as preempted */
1996 job->preemption_status |= AMDGPU_IB_PREEMPTED;
1997 }
1998 spin_unlock(&sched->job_list_lock);
1999}
2000
2001static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
2002{
2003 int r, length;
2004 struct amdgpu_ring *ring;
2005 struct dma_fence **fences = NULL;
2006 struct amdgpu_device *adev = (struct amdgpu_device *)data;
2007
2008 if (val >= AMDGPU_MAX_RINGS)
2009 return -EINVAL;
2010
2011 ring = adev->rings[val];
2012
2013 if (!amdgpu_ring_sched_ready(ring) ||
2014 !ring->funcs->preempt_ib)
2015 return -EINVAL;
2016
2017 /* the last preemption failed */
2018 if (ring->trail_seq != le32_to_cpu(*ring->trail_fence_cpu_addr))
2019 return -EBUSY;
2020
2021 length = ring->fence_drv.num_fences_mask + 1;
2022 fences = kcalloc(length, sizeof(void *), GFP_KERNEL);
2023 if (!fences)
2024 return -ENOMEM;
2025
2026 /* Avoid accidently unparking the sched thread during GPU reset */
2027 r = down_read_killable(&adev->reset_domain->sem);
2028 if (r)
2029 goto pro_end;
2030
2031 /* stop the scheduler */
2032 drm_sched_wqueue_stop(&ring->sched);
2033
2034 /* preempt the IB */
2035 r = amdgpu_ring_preempt_ib(ring);
2036 if (r) {
2037 drm_warn(adev_to_drm(adev), "failed to preempt ring %d\n", ring->idx);
2038 goto failure;
2039 }
2040
2041 amdgpu_fence_process(ring);
2042
2043 if (atomic_read(&ring->fence_drv.last_seq) !=
2044 ring->fence_drv.sync_seq) {
2045 drm_info(adev_to_drm(adev), "ring %d was preempted\n", ring->idx);
2046
2047 amdgpu_ib_preempt_mark_partial_job(ring);
2048
2049 /* swap out the old fences */
2050 amdgpu_ib_preempt_fences_swap(ring, fences);
2051
2052 amdgpu_fence_driver_force_completion(ring);
2053
2054 /* resubmit unfinished jobs */
2055 amdgpu_ib_preempt_job_recovery(&ring->sched);
2056
2057 /* wait for jobs finished */
2058 amdgpu_fence_wait_empty(ring);
2059
2060 /* signal the old fences */
2061 amdgpu_ib_preempt_signal_fences(fences, length);
2062 }
2063
2064failure:
2065 /* restart the scheduler */
2066 drm_sched_wqueue_start(&ring->sched);
2067
2068 up_read(&adev->reset_domain->sem);
2069
2070pro_end:
2071 kfree(fences);
2072
2073 return r;
2074}
2075
2076static int amdgpu_debugfs_sclk_set(void *data, u64 val)
2077{
2078 int ret = 0;
2079 uint32_t max_freq, min_freq;
2080 struct amdgpu_device *adev = (struct amdgpu_device *)data;
2081
2082 if (amdgpu_sriov_multi_vf_mode(adev))
2083 return -EINVAL;
2084
2085 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2086 if (ret < 0) {
2087 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2088 return ret;
2089 }
2090
2091 ret = amdgpu_dpm_get_dpm_freq_range(adev, PP_SCLK, &min_freq, &max_freq);
2092 if (ret == -EOPNOTSUPP) {
2093 ret = 0;
2094 goto out;
2095 }
2096 if (ret || val > max_freq || val < min_freq) {
2097 ret = -EINVAL;
2098 goto out;
2099 }
2100
2101 ret = amdgpu_dpm_set_soft_freq_range(adev, PP_SCLK, (uint32_t)val, (uint32_t)val);
2102 if (ret)
2103 ret = -EINVAL;
2104
2105out:
2106 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2107
2108 return ret;
2109}
2110
2111DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
2112 amdgpu_debugfs_ib_preempt, "%llu\n");
2113
2114DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
2115 amdgpu_debugfs_sclk_set, "%llu\n");
2116
2117int amdgpu_debugfs_init(struct amdgpu_device *adev)
2118{
2119 struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
2120 struct dentry *ent;
2121 int r, i;
2122
2123 if (!debugfs_initialized())
2124 return 0;
2125
2126 debugfs_create_x32("amdgpu_smu_debug", 0600, root,
2127 &adev->pm.smu_debug_mask);
2128
2129 ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
2130 &fops_ib_preempt);
2131 if (IS_ERR(ent)) {
2132 drm_err(adev_to_drm(adev),
2133 "unable to create amdgpu_preempt_ib debugsfs file\n");
2134 return PTR_ERR(ent);
2135 }
2136
2137 ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev,
2138 &fops_sclk_set);
2139 if (IS_ERR(ent)) {
2140 drm_err(adev_to_drm(adev),
2141 "unable to create amdgpu_set_sclk debugsfs file\n");
2142 return PTR_ERR(ent);
2143 }
2144
2145 /* Register debugfs entries for amdgpu_ttm */
2146 amdgpu_ttm_debugfs_init(adev);
2147 amdgpu_debugfs_pm_init(adev);
2148 amdgpu_debugfs_sa_init(adev);
2149 amdgpu_debugfs_fence_init(adev);
2150 amdgpu_debugfs_gem_init(adev);
2151
2152 r = amdgpu_debugfs_regs_init(adev);
2153 if (r)
2154 drm_err(adev_to_drm(adev), "registering register debugfs failed (%d).\n", r);
2155
2156 amdgpu_debugfs_firmware_init(adev);
2157 amdgpu_ta_if_debugfs_init(adev);
2158
2159 amdgpu_debugfs_mes_event_log_init(adev);
2160
2161#if defined(CONFIG_DRM_AMD_DC)
2162 if (adev->dc_enabled)
2163 dtn_debugfs_init(adev);
2164#endif
2165
2166 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2167 struct amdgpu_ring *ring = adev->rings[i];
2168
2169 if (!ring)
2170 continue;
2171
2172 amdgpu_debugfs_ring_init(adev, ring);
2173 }
2174
2175 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
2176 if (!amdgpu_vcnfw_log)
2177 break;
2178
2179 if (adev->vcn.harvest_config & (1 << i))
2180 continue;
2181
2182 amdgpu_debugfs_vcn_fwlog_init(adev, i, &adev->vcn.inst[i]);
2183 }
2184
2185 if (amdgpu_umsch_mm & amdgpu_umsch_mm_fwlog)
2186 amdgpu_debugfs_umsch_fwlog_init(adev, &adev->umsch_mm);
2187
2188 amdgpu_debugfs_vcn_sched_mask_init(adev);
2189 amdgpu_debugfs_jpeg_sched_mask_init(adev);
2190 amdgpu_debugfs_gfx_sched_mask_init(adev);
2191 amdgpu_debugfs_compute_sched_mask_init(adev);
2192 amdgpu_debugfs_sdma_sched_mask_init(adev);
2193
2194 amdgpu_ras_debugfs_create_all(adev);
2195 amdgpu_rap_debugfs_init(adev);
2196 amdgpu_securedisplay_debugfs_init(adev);
2197 amdgpu_fw_attestation_debugfs_init(adev);
2198 amdgpu_psp_debugfs_init(adev);
2199
2200 debugfs_create_file("amdgpu_evict_vram", 0400, root, adev,
2201 &amdgpu_evict_vram_fops);
2202 debugfs_create_file("amdgpu_evict_gtt", 0400, root, adev,
2203 &amdgpu_evict_gtt_fops);
2204 debugfs_create_file("amdgpu_test_ib", 0400, root, adev,
2205 &amdgpu_debugfs_test_ib_fops);
2206 debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
2207 &amdgpu_debugfs_vm_info_fops);
2208 debugfs_create_file("amdgpu_benchmark", 0200, root, adev,
2209 &amdgpu_benchmark_fops);
2210
2211 adev->debugfs_vbios_blob.data = adev->bios;
2212 adev->debugfs_vbios_blob.size = adev->bios_size;
2213 debugfs_create_blob("amdgpu_vbios", 0444, root,
2214 &adev->debugfs_vbios_blob);
2215
2216 if (adev->discovery.debugfs_blob.size)
2217 debugfs_create_blob("amdgpu_discovery", 0444, root,
2218 &adev->discovery.debugfs_blob);
2219
2220 return 0;
2221}
2222
2223static int amdgpu_pt_info_read(struct seq_file *m, void *unused)
2224{
2225 struct drm_file *file;
2226 struct amdgpu_fpriv *fpriv;
2227 struct amdgpu_bo *root_bo;
2228 struct amdgpu_device *adev;
2229 int r;
2230
2231 file = m->private;
2232 if (!file)
2233 return -EINVAL;
2234
2235 adev = drm_to_adev(file->minor->dev);
2236 fpriv = file->driver_priv;
2237 if (!fpriv || !fpriv->vm.root.bo)
2238 return -ENODEV;
2239
2240 root_bo = amdgpu_bo_ref(fpriv->vm.root.bo);
2241 r = amdgpu_bo_reserve(root_bo, true);
2242 if (r) {
2243 amdgpu_bo_unref(&root_bo);
2244 return -EINVAL;
2245 }
2246
2247 seq_printf(m, "pd_address: 0x%llx\n", amdgpu_gmc_pd_addr(fpriv->vm.root.bo));
2248 seq_printf(m, "max_pfn: 0x%llx\n", adev->vm_manager.max_pfn);
2249 seq_printf(m, "num_level: 0x%x\n", adev->vm_manager.num_level);
2250 seq_printf(m, "block_size: 0x%x\n", adev->vm_manager.block_size);
2251 seq_printf(m, "fragment_size: 0x%x\n", adev->vm_manager.fragment_size);
2252
2253 amdgpu_bo_unreserve(root_bo);
2254 amdgpu_bo_unref(&root_bo);
2255
2256 return 0;
2257}
2258
2259static int amdgpu_pt_info_open(struct inode *inode, struct file *file)
2260{
2261 return single_open(file, amdgpu_pt_info_read, inode->i_private);
2262}
2263
2264static const struct file_operations amdgpu_pt_info_fops = {
2265 .owner = THIS_MODULE,
2266 .open = amdgpu_pt_info_open,
2267 .read = seq_read,
2268 .llseek = seq_lseek,
2269 .release = single_release,
2270};
2271
2272static int amdgpu_mqd_info_read(struct seq_file *m, void *unused)
2273{
2274 struct amdgpu_usermode_queue *queue = m->private;
2275 struct amdgpu_bo *bo;
2276 int r;
2277
2278 if (!queue || !queue->mqd.obj)
2279 return -EINVAL;
2280
2281 bo = amdgpu_bo_ref(queue->mqd.obj);
2282 r = amdgpu_bo_reserve(bo, true);
2283 if (r) {
2284 amdgpu_bo_unref(&bo);
2285 return -EINVAL;
2286 }
2287
2288 seq_printf(m, "queue_type: %d\n", queue->queue_type);
2289 seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj));
2290
2291 amdgpu_bo_unreserve(bo);
2292 amdgpu_bo_unref(&bo);
2293
2294 return 0;
2295}
2296
2297static int amdgpu_mqd_info_open(struct inode *inode, struct file *file)
2298{
2299 return single_open(file, amdgpu_mqd_info_read, inode->i_private);
2300}
2301
2302static const struct file_operations amdgpu_mqd_info_fops = {
2303 .owner = THIS_MODULE,
2304 .open = amdgpu_mqd_info_open,
2305 .read = seq_read,
2306 .llseek = seq_lseek,
2307 .release = single_release,
2308};
2309
2310void amdgpu_debugfs_userq_init(struct drm_file *file, struct amdgpu_usermode_queue *queue, int qid)
2311{
2312 char queue_name[32];
2313
2314 scnprintf(queue_name, sizeof(queue_name), "queue_%d", qid);
2315 queue->debugfs_queue = debugfs_create_dir(queue_name, file->debugfs_client);
2316 debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops);
2317}
2318
2319void amdgpu_debugfs_vm_init(struct drm_file *file)
2320{
2321 debugfs_create_file("vm_pagetable_info", 0444, file->debugfs_client, file,
2322 &amdgpu_pt_info_fops);
2323}
2324
2325#else
2326int amdgpu_debugfs_init(struct amdgpu_device *adev)
2327{
2328 return 0;
2329}
2330int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
2331{
2332 return 0;
2333}
2334void amdgpu_debugfs_vm_init(struct drm_file *file)
2335{
2336}
2337void amdgpu_debugfs_userq_init(struct drm_file *file,
2338 struct amdgpu_usermode_queue *queue,
2339 int qid)
2340{
2341}
2342#endif