Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Dave Airlie
30 */
31#include <linux/seq_file.h>
32#include <linux/atomic.h>
33#include <linux/wait.h>
34#include <linux/kref.h>
35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <linux/pm_runtime.h>
38
39#include <drm/drm_drv.h>
40#include "amdgpu.h"
41#include "amdgpu_trace.h"
42#include "amdgpu_reset.h"
43
44/*
45 * Cast helper
46 */
47static const struct dma_fence_ops amdgpu_fence_ops;
48static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
49{
50 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
51
52 return __f;
53}
54
55/**
56 * amdgpu_fence_write - write a fence value
57 *
58 * @ring: ring the fence is associated with
59 * @seq: sequence number to write
60 *
61 * Writes a fence value to memory (all asics).
62 */
63static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
64{
65 struct amdgpu_fence_driver *drv = &ring->fence_drv;
66
67 if (drv->cpu_addr)
68 *drv->cpu_addr = cpu_to_le32(seq);
69}
70
71/**
72 * amdgpu_fence_read - read a fence value
73 *
74 * @ring: ring the fence is associated with
75 *
76 * Reads a fence value from memory (all asics).
77 * Returns the value of the fence read from memory.
78 */
79static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
80{
81 struct amdgpu_fence_driver *drv = &ring->fence_drv;
82 u32 seq = 0;
83
84 if (drv->cpu_addr)
85 seq = le32_to_cpu(*drv->cpu_addr);
86 else
87 seq = atomic_read(&drv->last_seq);
88
89 return seq;
90}
91
92/**
93 * amdgpu_fence_emit - emit a fence on the requested ring
94 *
95 * @ring: ring the fence is associated with
96 * @af: amdgpu fence input
97 * @flags: flags to pass into the subordinate .emit_fence() call
98 *
99 * Emits a fence command on the requested ring (all asics).
100 */
101void amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
102 unsigned int flags)
103{
104 struct amdgpu_device *adev = ring->adev;
105 struct dma_fence *fence;
106 struct dma_fence __rcu **ptr;
107 uint32_t seq;
108
109 fence = &af->base;
110 af->ring = ring;
111
112 seq = ++ring->fence_drv.sync_seq;
113 dma_fence_init(fence, &amdgpu_fence_ops,
114 &ring->fence_drv.lock,
115 adev->fence_context + ring->idx, seq);
116
117 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
118 seq, flags | AMDGPU_FENCE_FLAG_INT);
119
120 pm_runtime_get_noresume(adev_to_drm(adev)->dev);
121 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
122 if (unlikely(rcu_dereference_protected(*ptr, 1))) {
123 struct dma_fence *old;
124
125 rcu_read_lock();
126 old = dma_fence_get_rcu_safe(ptr);
127 rcu_read_unlock();
128
129 if (old) {
130 /*
131 * dma_fence_wait(old, false) is not interruptible.
132 * It will not return an error in this case.
133 * So we can safely ignore the return value.
134 */
135 dma_fence_wait(old, false);
136 dma_fence_put(old);
137 }
138 }
139
140 to_amdgpu_fence(fence)->start_timestamp = ktime_get();
141
142 /* This function can't be called concurrently anyway, otherwise
143 * emitting the fence would mess up the hardware ring buffer.
144 */
145 rcu_assign_pointer(*ptr, dma_fence_get(fence));
146}
147
148/**
149 * amdgpu_fence_emit_polling - emit a fence on the requeste ring
150 *
151 * @ring: ring the fence is associated with
152 * @s: resulting sequence number
153 * @timeout: the timeout for waiting in usecs
154 *
155 * Emits a fence command on the requested ring (all asics).
156 * Used For polling fence.
157 * Returns 0 on success, -ENOMEM on failure.
158 */
159int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
160 uint32_t timeout)
161{
162 uint32_t seq;
163 signed long r;
164
165 if (!s)
166 return -EINVAL;
167
168 seq = ++ring->fence_drv.sync_seq;
169 r = amdgpu_fence_wait_polling(ring,
170 seq - ring->fence_drv.num_fences_mask,
171 timeout);
172 if (r < 1)
173 return -ETIMEDOUT;
174
175 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
176 seq, 0);
177
178 *s = seq;
179
180 return 0;
181}
182
183/**
184 * amdgpu_fence_schedule_fallback - schedule fallback check
185 *
186 * @ring: pointer to struct amdgpu_ring
187 *
188 * Start a timer as fallback to our interrupts.
189 */
190static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
191{
192 mod_timer(&ring->fence_drv.fallback_timer,
193 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
194}
195
196/**
197 * amdgpu_fence_process - check for fence activity
198 *
199 * @ring: pointer to struct amdgpu_ring
200 *
201 * Checks the current fence value and calculates the last
202 * signalled fence value. Wakes the fence queue if the
203 * sequence number has increased.
204 *
205 * Returns true if fence was processed
206 */
207bool amdgpu_fence_process(struct amdgpu_ring *ring)
208{
209 struct amdgpu_fence_driver *drv = &ring->fence_drv;
210 struct amdgpu_device *adev = ring->adev;
211 uint32_t seq, last_seq;
212
213 do {
214 last_seq = atomic_read(&ring->fence_drv.last_seq);
215 seq = amdgpu_fence_read(ring);
216
217 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
218
219 if (timer_delete(&ring->fence_drv.fallback_timer) &&
220 seq != ring->fence_drv.sync_seq)
221 amdgpu_fence_schedule_fallback(ring);
222
223 if (unlikely(seq == last_seq))
224 return false;
225
226 last_seq &= drv->num_fences_mask;
227 seq &= drv->num_fences_mask;
228
229 do {
230 struct dma_fence *fence, **ptr;
231
232 ++last_seq;
233 last_seq &= drv->num_fences_mask;
234 ptr = &drv->fences[last_seq];
235
236 /* There is always exactly one thread signaling this fence slot */
237 fence = rcu_dereference_protected(*ptr, 1);
238 RCU_INIT_POINTER(*ptr, NULL);
239
240 if (!fence)
241 continue;
242
243 dma_fence_signal(fence);
244 dma_fence_put(fence);
245 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
246 } while (last_seq != seq);
247
248 return true;
249}
250
251/**
252 * amdgpu_fence_fallback - fallback for hardware interrupts
253 *
254 * @t: timer context used to obtain the pointer to ring structure
255 *
256 * Checks for fence activity.
257 */
258static void amdgpu_fence_fallback(struct timer_list *t)
259{
260 struct amdgpu_ring *ring = timer_container_of(ring, t,
261 fence_drv.fallback_timer);
262
263 if (amdgpu_fence_process(ring))
264 dev_warn(ring->adev->dev,
265 "Fence fallback timer expired on ring %s\n",
266 ring->name);
267}
268
269/**
270 * amdgpu_fence_wait_empty - wait for all fences to signal
271 *
272 * @ring: ring index the fence is associated with
273 *
274 * Wait for all fences on the requested ring to signal (all asics).
275 * Returns 0 if the fences have passed, error for all other cases.
276 */
277int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
278{
279 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
280 struct dma_fence *fence, **ptr;
281 int r;
282
283 if (!seq)
284 return 0;
285
286 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
287 rcu_read_lock();
288 fence = rcu_dereference(*ptr);
289 if (!fence || !dma_fence_get_rcu(fence)) {
290 rcu_read_unlock();
291 return 0;
292 }
293 rcu_read_unlock();
294
295 r = dma_fence_wait(fence, false);
296 dma_fence_put(fence);
297 return r;
298}
299
300/**
301 * amdgpu_fence_wait_polling - busy wait for givn sequence number
302 *
303 * @ring: ring index the fence is associated with
304 * @wait_seq: sequence number to wait
305 * @timeout: the timeout for waiting in usecs
306 *
307 * Wait for all fences on the requested ring to signal (all asics).
308 * Returns left time if no timeout, 0 or minus if timeout.
309 */
310signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
311 uint32_t wait_seq,
312 signed long timeout)
313{
314
315 while ((int32_t)(wait_seq - amdgpu_fence_read(ring)) > 0 && timeout > 0) {
316 udelay(2);
317 timeout -= 2;
318 }
319 return timeout > 0 ? timeout : 0;
320}
321/**
322 * amdgpu_fence_count_emitted - get the count of emitted fences
323 *
324 * @ring: ring the fence is associated with
325 *
326 * Get the number of fences emitted on the requested ring (all asics).
327 * Returns the number of emitted fences on the ring. Used by the
328 * dynpm code to ring track activity.
329 */
330unsigned int amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
331{
332 uint64_t emitted;
333
334 /* We are not protected by ring lock when reading the last sequence
335 * but it's ok to report slightly wrong fence count here.
336 */
337 emitted = 0x100000000ull;
338 emitted -= atomic_read(&ring->fence_drv.last_seq);
339 emitted += READ_ONCE(ring->fence_drv.sync_seq);
340 return lower_32_bits(emitted);
341}
342
343/**
344 * amdgpu_fence_last_unsignaled_time_us - the time fence emitted until now
345 * @ring: ring the fence is associated with
346 *
347 * Find the earliest fence unsignaled until now, calculate the time delta
348 * between the time fence emitted and now.
349 */
350u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring)
351{
352 struct amdgpu_fence_driver *drv = &ring->fence_drv;
353 struct dma_fence *fence;
354 uint32_t last_seq, sync_seq;
355
356 last_seq = atomic_read(&ring->fence_drv.last_seq);
357 sync_seq = READ_ONCE(ring->fence_drv.sync_seq);
358 if (last_seq == sync_seq)
359 return 0;
360
361 ++last_seq;
362 last_seq &= drv->num_fences_mask;
363 fence = drv->fences[last_seq];
364 if (!fence)
365 return 0;
366
367 return ktime_us_delta(ktime_get(),
368 to_amdgpu_fence(fence)->start_timestamp);
369}
370
371/**
372 * amdgpu_fence_update_start_timestamp - update the timestamp of the fence
373 * @ring: ring the fence is associated with
374 * @seq: the fence seq number to update.
375 * @timestamp: the start timestamp to update.
376 *
377 * The function called at the time the fence and related ib is about to
378 * resubmit to gpu in MCBP scenario. Thus we do not consider race condition
379 * with amdgpu_fence_process to modify the same fence.
380 */
381void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq, ktime_t timestamp)
382{
383 struct amdgpu_fence_driver *drv = &ring->fence_drv;
384 struct dma_fence *fence;
385
386 seq &= drv->num_fences_mask;
387 fence = drv->fences[seq];
388 if (!fence)
389 return;
390
391 to_amdgpu_fence(fence)->start_timestamp = timestamp;
392}
393
394/**
395 * amdgpu_fence_driver_start_ring - make the fence driver
396 * ready for use on the requested ring.
397 *
398 * @ring: ring to start the fence driver on
399 * @irq_src: interrupt source to use for this ring
400 * @irq_type: interrupt type to use for this ring
401 *
402 * Make the fence driver ready for processing (all asics).
403 * Not all asics have all rings, so each asic will only
404 * start the fence driver on the rings it has.
405 * Returns 0 for success, errors for failure.
406 */
407int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
408 struct amdgpu_irq_src *irq_src,
409 unsigned int irq_type)
410{
411 struct amdgpu_device *adev = ring->adev;
412 uint64_t index;
413
414 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
415 ring->fence_drv.cpu_addr = ring->fence_cpu_addr;
416 ring->fence_drv.gpu_addr = ring->fence_gpu_addr;
417 } else {
418 /* put fence directly behind firmware */
419 index = ALIGN(adev->uvd.fw->size, 8);
420 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
421 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
422 }
423 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
424
425 ring->fence_drv.irq_src = irq_src;
426 ring->fence_drv.irq_type = irq_type;
427 ring->fence_drv.initialized = true;
428
429 DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n",
430 ring->name, ring->fence_drv.gpu_addr);
431 return 0;
432}
433
434/**
435 * amdgpu_fence_driver_init_ring - init the fence driver
436 * for the requested ring.
437 *
438 * @ring: ring to init the fence driver on
439 *
440 * Init the fence driver for the requested ring (all asics).
441 * Helper function for amdgpu_fence_driver_sw_init().
442 */
443int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
444{
445 struct amdgpu_device *adev = ring->adev;
446
447 if (!adev)
448 return -EINVAL;
449
450 if (!is_power_of_2(ring->num_hw_submission))
451 return -EINVAL;
452
453 ring->fence_drv.cpu_addr = NULL;
454 ring->fence_drv.gpu_addr = 0;
455 ring->fence_drv.sync_seq = 0;
456 atomic_set(&ring->fence_drv.last_seq, 0);
457 ring->fence_drv.initialized = false;
458
459 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
460
461 ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1;
462 spin_lock_init(&ring->fence_drv.lock);
463 ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *),
464 GFP_KERNEL);
465
466 if (!ring->fence_drv.fences)
467 return -ENOMEM;
468
469 return 0;
470}
471
472/**
473 * amdgpu_fence_driver_sw_init - init the fence driver
474 * for all possible rings.
475 *
476 * @adev: amdgpu device pointer
477 *
478 * Init the fence driver for all possible rings (all asics).
479 * Not all asics have all rings, so each asic will only
480 * start the fence driver on the rings it has using
481 * amdgpu_fence_driver_start_ring().
482 * Returns 0 for success.
483 */
484int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
485{
486 return 0;
487}
488
489/**
490 * amdgpu_fence_need_ring_interrupt_restore - helper function to check whether
491 * fence driver interrupts need to be restored.
492 *
493 * @ring: ring that to be checked
494 *
495 * Interrupts for rings that belong to GFX IP don't need to be restored
496 * when the target power state is s0ix.
497 *
498 * Return true if need to restore interrupts, false otherwise.
499 */
500static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring)
501{
502 struct amdgpu_device *adev = ring->adev;
503 bool is_gfx_power_domain = false;
504
505 switch (ring->funcs->type) {
506 case AMDGPU_RING_TYPE_SDMA:
507 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
508 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
509 IP_VERSION(5, 0, 0))
510 is_gfx_power_domain = true;
511 break;
512 case AMDGPU_RING_TYPE_GFX:
513 case AMDGPU_RING_TYPE_COMPUTE:
514 case AMDGPU_RING_TYPE_KIQ:
515 case AMDGPU_RING_TYPE_MES:
516 is_gfx_power_domain = true;
517 break;
518 default:
519 break;
520 }
521
522 return !(adev->in_s0ix && is_gfx_power_domain);
523}
524
525/**
526 * amdgpu_fence_driver_hw_fini - tear down the fence driver
527 * for all possible rings.
528 *
529 * @adev: amdgpu device pointer
530 *
531 * Tear down the fence driver for all possible rings (all asics).
532 */
533void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
534{
535 int i, r;
536
537 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
538 struct amdgpu_ring *ring = adev->rings[i];
539
540 if (!ring || !ring->fence_drv.initialized)
541 continue;
542
543 /* You can't wait for HW to signal if it's gone */
544 if (!drm_dev_is_unplugged(adev_to_drm(adev)))
545 r = amdgpu_fence_wait_empty(ring);
546 else
547 r = -ENODEV;
548 /* no need to trigger GPU reset as we are unloading */
549 if (r)
550 amdgpu_fence_driver_force_completion(ring);
551
552 if (!drm_dev_is_unplugged(adev_to_drm(adev)) &&
553 ring->fence_drv.irq_src &&
554 amdgpu_fence_need_ring_interrupt_restore(ring))
555 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
556 ring->fence_drv.irq_type);
557
558 timer_delete_sync(&ring->fence_drv.fallback_timer);
559 }
560}
561
562/* Will either stop and flush handlers for amdgpu interrupt or reanble it */
563void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop)
564{
565 int i;
566
567 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
568 struct amdgpu_ring *ring = adev->rings[i];
569
570 if (!ring || !ring->fence_drv.initialized || !ring->fence_drv.irq_src)
571 continue;
572
573 if (stop)
574 disable_irq(adev->irq.irq);
575 else
576 enable_irq(adev->irq.irq);
577 }
578}
579
580void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
581{
582 unsigned int i, j;
583
584 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
585 struct amdgpu_ring *ring = adev->rings[i];
586
587 if (!ring || !ring->fence_drv.initialized)
588 continue;
589
590 /*
591 * Notice we check for sched.ops since there's some
592 * override on the meaning of sched.ready by amdgpu.
593 * The natural check would be sched.ready, which is
594 * set as drm_sched_init() finishes...
595 */
596 if (ring->sched.ops)
597 drm_sched_fini(&ring->sched);
598
599 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
600 dma_fence_put(ring->fence_drv.fences[j]);
601 kfree(ring->fence_drv.fences);
602 ring->fence_drv.fences = NULL;
603 ring->fence_drv.initialized = false;
604 }
605}
606
607/**
608 * amdgpu_fence_driver_hw_init - enable the fence driver
609 * for all possible rings.
610 *
611 * @adev: amdgpu device pointer
612 *
613 * Enable the fence driver for all possible rings (all asics).
614 * Not all asics have all rings, so each asic will only
615 * start the fence driver on the rings it has using
616 * amdgpu_fence_driver_start_ring().
617 * Returns 0 for success.
618 */
619void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
620{
621 int i;
622
623 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
624 struct amdgpu_ring *ring = adev->rings[i];
625
626 if (!ring || !ring->fence_drv.initialized)
627 continue;
628
629 /* enable the interrupt */
630 if (ring->fence_drv.irq_src &&
631 amdgpu_fence_need_ring_interrupt_restore(ring))
632 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
633 ring->fence_drv.irq_type);
634 }
635}
636
637/**
638 * amdgpu_fence_driver_set_error - set error code on fences
639 * @ring: the ring which contains the fences
640 * @error: the error code to set
641 *
642 * Set an error code to all the fences pending on the ring.
643 */
644void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error)
645{
646 struct amdgpu_fence_driver *drv = &ring->fence_drv;
647 unsigned long flags;
648
649 spin_lock_irqsave(&drv->lock, flags);
650 for (unsigned int i = 0; i <= drv->num_fences_mask; ++i) {
651 struct dma_fence *fence;
652
653 fence = rcu_dereference_protected(drv->fences[i],
654 lockdep_is_held(&drv->lock));
655 if (fence && !dma_fence_is_signaled_locked(fence))
656 dma_fence_set_error(fence, error);
657 }
658 spin_unlock_irqrestore(&drv->lock, flags);
659}
660
661/**
662 * amdgpu_fence_driver_force_completion - force signal latest fence of ring
663 *
664 * @ring: fence of the ring to signal
665 *
666 */
667void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
668{
669 amdgpu_fence_driver_set_error(ring, -ECANCELED);
670 amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
671 amdgpu_fence_process(ring);
672}
673
674
675/*
676 * Kernel queue reset handling
677 *
678 * The driver can reset individual queues for most engines, but those queues
679 * may contain work from multiple contexts. Resetting the queue will reset
680 * lose all of that state. In order to minimize the collateral damage, the
681 * driver will save the ring contents which are not associated with the guilty
682 * context prior to resetting the queue. After resetting the queue the queue
683 * contents from the other contexts is re-emitted to the rings so that it can
684 * be processed by the engine. To handle this, we save the queue's write
685 * pointer (wptr) in the fences associated with each context. If we get a
686 * queue timeout, we can then use the wptrs from the fences to determine
687 * which data needs to be saved out of the queue's ring buffer.
688 */
689
690/**
691 * amdgpu_ring_set_fence_errors_and_reemit - Set dma_fence errors and reemit
692 *
693 * @ring: the ring to operate on
694 * @guilty_fence: fence of the ring to update
695 *
696 */
697void amdgpu_ring_set_fence_errors_and_reemit(struct amdgpu_ring *ring,
698 struct amdgpu_fence *guilty_fence)
699{
700 struct dma_fence *unprocessed;
701 struct dma_fence __rcu **ptr;
702 struct amdgpu_fence *fence;
703 unsigned long flags;
704 u32 seq, last_seq;
705 unsigned int i;
706 bool is_guilty_fence;
707 bool is_guilty_context;
708
709 last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask;
710 seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;
711
712 ring->reemit = true;
713 amdgpu_ring_alloc(ring, ring->ring_backup_entries_to_copy);
714 spin_lock_irqsave(&ring->fence_drv.lock, flags);
715 do {
716 last_seq++;
717 last_seq &= ring->fence_drv.num_fences_mask;
718
719 ptr = &ring->fence_drv.fences[last_seq];
720 rcu_read_lock();
721 unprocessed = rcu_dereference(*ptr);
722
723 if (unprocessed && !dma_fence_is_signaled_locked(unprocessed)) {
724 fence = container_of(unprocessed, struct amdgpu_fence, base);
725 is_guilty_fence = fence == guilty_fence;
726 is_guilty_context = fence->context == guilty_fence->context;
727
728 /* mark all fences from the guilty context with an error */
729 if (is_guilty_fence)
730 dma_fence_set_error(&fence->base, -ETIME);
731 else if (is_guilty_context)
732 dma_fence_set_error(&fence->base, -ECANCELED);
733
734 /* reemit the packet stream and update wptrs */
735 fence->ib_wptr = ring->wptr;
736 for (i = 0; i < fence->ib_dw_size; i++) {
737 /* Skip the IB(s) for the guilty context. */
738 if (is_guilty_context &&
739 i >= fence->skip_ib_dw_start_offset &&
740 i < fence->skip_ib_dw_end_offset)
741 amdgpu_ring_write(ring, ring->funcs->nop);
742 else
743 amdgpu_ring_write(ring,
744 ring->ring_backup[fence->backup_idx + i]);
745 }
746 }
747 rcu_read_unlock();
748 } while (last_seq != seq);
749 spin_unlock_irqrestore(&ring->fence_drv.lock, flags);
750 amdgpu_ring_commit(ring);
751 ring->reemit = false;
752}
753
754static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring,
755 struct amdgpu_fence *af)
756{
757 unsigned int first_idx = af->ib_wptr & ring->buf_mask;
758 unsigned int dw_size = af->ib_dw_size;
759 unsigned int i;
760
761 af->backup_idx = ring->ring_backup_entries_to_copy;
762 /* Backup the contents of the ring buffer. */
763 for (i = first_idx; dw_size > 0; ++i, i &= ring->buf_mask, --dw_size)
764 ring->ring_backup[ring->ring_backup_entries_to_copy++] = ring->ring[i];
765}
766
767void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
768 struct amdgpu_fence *guilty_fence)
769{
770 struct dma_fence *unprocessed;
771 struct dma_fence __rcu **ptr;
772 struct amdgpu_fence *fence;
773 u32 seq, last_seq;
774
775 last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask;
776 seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;
777 ring->ring_backup_entries_to_copy = 0;
778
779 do {
780 last_seq++;
781 last_seq &= ring->fence_drv.num_fences_mask;
782
783 ptr = &ring->fence_drv.fences[last_seq];
784 rcu_read_lock();
785 unprocessed = rcu_dereference(*ptr);
786
787 if (unprocessed && !dma_fence_is_signaled(unprocessed)) {
788 fence = container_of(unprocessed, struct amdgpu_fence, base);
789
790 amdgpu_ring_backup_unprocessed_command(ring, fence);
791 }
792 rcu_read_unlock();
793 } while (last_seq != seq);
794}
795
796/*
797 * Common fence implementation
798 */
799
800static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
801{
802 return "amdgpu";
803}
804
805static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
806{
807 return (const char *)to_amdgpu_fence(f)->ring->name;
808}
809
810/**
811 * amdgpu_fence_enable_signaling - enable signalling on fence
812 * @f: fence
813 *
814 * This function is called with fence_queue lock held, and adds a callback
815 * to fence_queue that checks if this fence is signaled, and if so it
816 * signals the fence and removes itself.
817 */
818static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
819{
820 if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
821 amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
822
823 return true;
824}
825
826/**
827 * amdgpu_fence_free - free up the fence memory
828 *
829 * @rcu: RCU callback head
830 *
831 * Free up the fence memory after the RCU grace period.
832 */
833static void amdgpu_fence_free(struct rcu_head *rcu)
834{
835 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
836
837 /* free fence_slab if it's separated fence*/
838 kfree(to_amdgpu_fence(f));
839}
840
841/**
842 * amdgpu_fence_release - callback that fence can be freed
843 *
844 * @f: fence
845 *
846 * This function is called when the reference count becomes zero.
847 * It just RCU schedules freeing up the fence.
848 */
849static void amdgpu_fence_release(struct dma_fence *f)
850{
851 call_rcu(&f->rcu, amdgpu_fence_free);
852}
853
854static const struct dma_fence_ops amdgpu_fence_ops = {
855 .get_driver_name = amdgpu_fence_get_driver_name,
856 .get_timeline_name = amdgpu_fence_get_timeline_name,
857 .enable_signaling = amdgpu_fence_enable_signaling,
858 .release = amdgpu_fence_release,
859};
860
861/*
862 * Fence debugfs
863 */
864#if defined(CONFIG_DEBUG_FS)
865static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused)
866{
867 struct amdgpu_device *adev = m->private;
868 int i;
869
870 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
871 struct amdgpu_ring *ring = adev->rings[i];
872
873 if (!ring || !ring->fence_drv.initialized)
874 continue;
875
876 amdgpu_fence_process(ring);
877
878 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
879 seq_printf(m, "Last signaled fence 0x%08x\n",
880 atomic_read(&ring->fence_drv.last_seq));
881 seq_printf(m, "Last emitted 0x%08x\n",
882 ring->fence_drv.sync_seq);
883
884 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
885 ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
886 seq_printf(m, "Last signaled trailing fence 0x%08x\n",
887 le32_to_cpu(*ring->trail_fence_cpu_addr));
888 seq_printf(m, "Last emitted 0x%08x\n",
889 ring->trail_seq);
890 }
891
892 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
893 continue;
894
895 /* set in CP_VMID_PREEMPT and preemption occurred */
896 seq_printf(m, "Last preempted 0x%08x\n",
897 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
898 /* set in CP_VMID_RESET and reset occurred */
899 seq_printf(m, "Last reset 0x%08x\n",
900 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
901 /* Both preemption and reset occurred */
902 seq_printf(m, "Last both 0x%08x\n",
903 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
904 }
905 return 0;
906}
907
908/*
909 * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
910 *
911 * Manually trigger a gpu reset at the next fence wait.
912 */
913static int gpu_recover_get(void *data, u64 *val)
914{
915 struct amdgpu_device *adev = (struct amdgpu_device *)data;
916 struct drm_device *dev = adev_to_drm(adev);
917 int r;
918
919 r = pm_runtime_get_sync(dev->dev);
920 if (r < 0) {
921 pm_runtime_put_autosuspend(dev->dev);
922 return 0;
923 }
924
925 if (amdgpu_reset_domain_schedule(adev->reset_domain, &adev->reset_work))
926 flush_work(&adev->reset_work);
927
928 *val = atomic_read(&adev->reset_domain->reset_res);
929
930 pm_runtime_put_autosuspend(dev->dev);
931
932 return 0;
933}
934
935DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info);
936DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL,
937 "%lld\n");
938
939static void amdgpu_debugfs_reset_work(struct work_struct *work)
940{
941 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
942 reset_work);
943
944 struct amdgpu_reset_context reset_context;
945
946 memset(&reset_context, 0, sizeof(reset_context));
947
948 reset_context.method = AMD_RESET_METHOD_NONE;
949 reset_context.reset_req_dev = adev;
950 reset_context.src = AMDGPU_RESET_SRC_USER;
951 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
952 set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
953
954 amdgpu_device_gpu_recover(adev, NULL, &reset_context);
955}
956
957#endif
958
959void amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
960{
961#if defined(CONFIG_DEBUG_FS)
962 struct drm_minor *minor = adev_to_drm(adev)->primary;
963 struct dentry *root = minor->debugfs_root;
964
965 debugfs_create_file("amdgpu_fence_info", 0444, root, adev,
966 &amdgpu_debugfs_fence_info_fops);
967
968 if (!amdgpu_sriov_vf(adev)) {
969
970 INIT_WORK(&adev->reset_work, amdgpu_debugfs_reset_work);
971 debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev,
972 &amdgpu_debugfs_gpu_recover_fops);
973 }
974#endif
975}
976