Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include "drm/drm_drv.h"
8
9#include "msm_gpu.h"
10#include "msm_gem.h"
11#include "msm_mmu.h"
12#include "msm_fence.h"
13#include "msm_gpu_trace.h"
14//#include "adreno/adreno_gpu.h"
15
16#include <generated/utsrelease.h>
17#include <linux/string_helpers.h>
18#include <linux/devcoredump.h>
19#include <linux/sched/task.h>
20#include <linux/sched/mm.h>
21
22/*
23 * Power Management:
24 */
25
26static int enable_pwrrail(struct msm_gpu *gpu)
27{
28 struct drm_device *dev = gpu->dev;
29 int ret = 0;
30
31 if (gpu->gpu_reg) {
32 ret = regulator_enable(gpu->gpu_reg);
33 if (ret) {
34 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
35 return ret;
36 }
37 }
38
39 if (gpu->gpu_cx) {
40 ret = regulator_enable(gpu->gpu_cx);
41 if (ret) {
42 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
43 return ret;
44 }
45 }
46
47 return 0;
48}
49
50static int disable_pwrrail(struct msm_gpu *gpu)
51{
52 if (gpu->gpu_cx)
53 regulator_disable(gpu->gpu_cx);
54 if (gpu->gpu_reg)
55 regulator_disable(gpu->gpu_reg);
56 return 0;
57}
58
59static int enable_clk(struct msm_gpu *gpu)
60{
61 if (gpu->core_clk && gpu->fast_rate)
62 dev_pm_opp_set_rate(&gpu->pdev->dev, gpu->fast_rate);
63
64 /* Set the RBBM timer rate to 19.2Mhz */
65 if (gpu->rbbmtimer_clk)
66 clk_set_rate(gpu->rbbmtimer_clk, 19200000);
67
68 return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
69}
70
71static int disable_clk(struct msm_gpu *gpu)
72{
73 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
74
75 /*
76 * Set the clock to a deliberately low rate. On older targets the clock
77 * speed had to be non zero to avoid problems. On newer targets this
78 * will be rounded down to zero anyway so it all works out.
79 */
80 if (gpu->core_clk)
81 dev_pm_opp_set_rate(&gpu->pdev->dev, 27000000);
82
83 if (gpu->rbbmtimer_clk)
84 clk_set_rate(gpu->rbbmtimer_clk, 0);
85
86 return 0;
87}
88
89static int enable_axi(struct msm_gpu *gpu)
90{
91 return clk_prepare_enable(gpu->ebi1_clk);
92}
93
94static int disable_axi(struct msm_gpu *gpu)
95{
96 clk_disable_unprepare(gpu->ebi1_clk);
97 return 0;
98}
99
100int msm_gpu_pm_resume(struct msm_gpu *gpu)
101{
102 int ret;
103
104 DBG("%s", gpu->name);
105 trace_msm_gpu_resume(0);
106
107 ret = enable_pwrrail(gpu);
108 if (ret)
109 return ret;
110
111 ret = enable_clk(gpu);
112 if (ret)
113 return ret;
114
115 ret = enable_axi(gpu);
116 if (ret)
117 return ret;
118
119 msm_devfreq_resume(gpu);
120
121 gpu->needs_hw_init = true;
122
123 return 0;
124}
125
126int msm_gpu_pm_suspend(struct msm_gpu *gpu)
127{
128 int ret;
129
130 DBG("%s", gpu->name);
131 trace_msm_gpu_suspend(0);
132
133 msm_devfreq_suspend(gpu);
134
135 ret = disable_axi(gpu);
136 if (ret)
137 return ret;
138
139 ret = disable_clk(gpu);
140 if (ret)
141 return ret;
142
143 ret = disable_pwrrail(gpu);
144 if (ret)
145 return ret;
146
147 gpu->suspend_count++;
148
149 return 0;
150}
151
152void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_context *ctx,
153 struct drm_printer *p)
154{
155 drm_printf(p, "drm-engine-gpu:\t%llu ns\n", ctx->elapsed_ns);
156 drm_printf(p, "drm-cycles-gpu:\t%llu\n", ctx->cycles);
157 drm_printf(p, "drm-maxfreq-gpu:\t%u Hz\n", gpu->fast_rate);
158}
159
160int msm_gpu_hw_init(struct msm_gpu *gpu)
161{
162 int ret;
163
164 WARN_ON(!mutex_is_locked(&gpu->lock));
165
166 if (!gpu->needs_hw_init)
167 return 0;
168
169 disable_irq(gpu->irq);
170 ret = gpu->funcs->hw_init(gpu);
171 if (!ret)
172 gpu->needs_hw_init = false;
173 enable_irq(gpu->irq);
174
175 return ret;
176}
177
178#ifdef CONFIG_DEV_COREDUMP
179static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
180 size_t count, void *data, size_t datalen)
181{
182 struct msm_gpu *gpu = data;
183 struct drm_print_iterator iter;
184 struct drm_printer p;
185 struct msm_gpu_state *state;
186
187 state = msm_gpu_crashstate_get(gpu);
188 if (!state)
189 return 0;
190
191 iter.data = buffer;
192 iter.offset = 0;
193 iter.start = offset;
194 iter.remain = count;
195
196 p = drm_coredump_printer(&iter);
197
198 drm_printf(&p, "---\n");
199 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
200 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
201 drm_printf(&p, "time: %ptSp\n", &state->time);
202 if (state->comm)
203 drm_printf(&p, "comm: %s\n", state->comm);
204 if (state->cmd)
205 drm_printf(&p, "cmdline: %s\n", state->cmd);
206
207 gpu->funcs->show(gpu, state, &p);
208
209 msm_gpu_crashstate_put(gpu);
210
211 return count - iter.remain;
212}
213
214static void msm_gpu_devcoredump_free(void *data)
215{
216 struct msm_gpu *gpu = data;
217
218 msm_gpu_crashstate_put(gpu);
219}
220
221static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
222 struct drm_gem_object *obj, u64 iova,
223 bool full, size_t offset, size_t size)
224{
225 struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
226 struct msm_gem_object *msm_obj = to_msm_bo(obj);
227
228 /* Don't record write only objects */
229 state_bo->size = size;
230 state_bo->flags = msm_obj->flags;
231 state_bo->iova = iova;
232
233 BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(msm_obj->name));
234
235 memcpy(state_bo->name, msm_obj->name, sizeof(state_bo->name));
236
237 if (full) {
238 void *ptr;
239
240 state_bo->data = kvmalloc(size, GFP_KERNEL);
241 if (!state_bo->data)
242 goto out;
243
244 ptr = msm_gem_get_vaddr_active(obj);
245 if (IS_ERR(ptr)) {
246 kvfree(state_bo->data);
247 state_bo->data = NULL;
248 goto out;
249 }
250
251 memcpy(state_bo->data, ptr + offset, size);
252 msm_gem_put_vaddr_locked(obj);
253 }
254out:
255 state->nr_bos++;
256}
257
258static void crashstate_get_bos(struct msm_gpu_state *state, struct msm_gem_submit *submit)
259{
260 extern bool rd_full;
261
262 if (msm_context_is_vmbind(submit->queue->ctx)) {
263 struct drm_exec exec;
264 struct drm_gpuva *vma;
265 unsigned cnt = 0;
266
267 drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
268 drm_exec_until_all_locked(&exec) {
269 cnt = 0;
270
271 drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(submit->vm));
272 drm_exec_retry_on_contention(&exec);
273
274 drm_gpuvm_for_each_va (vma, submit->vm) {
275 if (!vma->gem.obj)
276 continue;
277
278 cnt++;
279 drm_exec_lock_obj(&exec, vma->gem.obj);
280 drm_exec_retry_on_contention(&exec);
281 }
282
283 }
284
285 drm_gpuvm_for_each_va (vma, submit->vm)
286 cnt++;
287
288 state->bos = kcalloc(cnt, sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
289
290 if (state->bos)
291 drm_gpuvm_for_each_va(vma, submit->vm) {
292 bool dump = rd_full || (vma->flags & MSM_VMA_DUMP);
293
294 /* Skip MAP_NULL/PRR VMAs: */
295 if (!vma->gem.obj)
296 continue;
297
298 msm_gpu_crashstate_get_bo(state, vma->gem.obj, vma->va.addr,
299 dump, vma->gem.offset, vma->va.range);
300 }
301
302 drm_exec_fini(&exec);
303 } else {
304 state->bos = kcalloc(submit->nr_bos,
305 sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
306
307 for (int i = 0; state->bos && i < submit->nr_bos; i++) {
308 struct drm_gem_object *obj = submit->bos[i].obj;
309 bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP);
310
311 msm_gem_lock(obj);
312 msm_gpu_crashstate_get_bo(state, obj, submit->bos[i].iova,
313 dump, 0, obj->size);
314 msm_gem_unlock(obj);
315 }
316 }
317}
318
319static void crashstate_get_vm_logs(struct msm_gpu_state *state, struct msm_gem_vm *vm)
320{
321 uint32_t vm_log_len = (1 << vm->log_shift);
322 uint32_t vm_log_mask = vm_log_len - 1;
323 int first;
324
325 /* Bail if no log, or empty log: */
326 if (!vm->log || !vm->log[0].op)
327 return;
328
329 mutex_lock(&vm->mmu_lock);
330
331 /*
332 * log_idx is the next entry to overwrite, meaning it is the oldest, or
333 * first, entry (other than the special case handled below where the
334 * log hasn't wrapped around yet)
335 */
336 first = vm->log_idx;
337
338 if (!vm->log[first].op) {
339 /*
340 * If the next log entry has not been written yet, then only
341 * entries 0 to idx-1 are valid (ie. we haven't wrapped around
342 * yet)
343 */
344 state->nr_vm_logs = MAX(0, first - 1);
345 first = 0;
346 } else {
347 state->nr_vm_logs = vm_log_len;
348 }
349
350 state->vm_logs = kmalloc_objs(vm->log[0], state->nr_vm_logs);
351 if (!state->vm_logs) {
352 state->nr_vm_logs = 0;
353 }
354
355 for (int i = 0; i < state->nr_vm_logs; i++) {
356 int idx = (i + first) & vm_log_mask;
357
358 state->vm_logs[i] = vm->log[idx];
359 }
360
361 mutex_unlock(&vm->mmu_lock);
362}
363
364static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
365 struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
366 char *comm, char *cmd)
367{
368 struct msm_gpu_state *state;
369
370 /* Check if the target supports capturing crash state */
371 if (!gpu->funcs->gpu_state_get)
372 return;
373
374 /* Only save one crash state at a time */
375 if (gpu->crashstate)
376 return;
377
378 state = gpu->funcs->gpu_state_get(gpu);
379 if (IS_ERR_OR_NULL(state))
380 return;
381
382 /* Fill in the additional crash state information */
383 state->comm = kstrdup(comm, GFP_KERNEL);
384 state->cmd = kstrdup(cmd, GFP_KERNEL);
385 if (fault_info)
386 state->fault_info = *fault_info;
387
388 if (submit && state->fault_info.ttbr0) {
389 struct msm_gpu_fault_info *info = &state->fault_info;
390 struct msm_mmu *mmu = to_msm_vm(submit->vm)->mmu;
391
392 msm_iommu_pagetable_params(mmu, &info->pgtbl_ttbr0,
393 &info->asid);
394 msm_iommu_pagetable_walk(mmu, info->iova, info->ptes);
395 }
396
397 if (submit) {
398 crashstate_get_vm_logs(state, to_msm_vm(submit->vm));
399 crashstate_get_bos(state, submit);
400 }
401
402 /* Set the active crash state to be dumped on failure */
403 gpu->crashstate = state;
404
405 dev_coredumpm(&gpu->pdev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
406 msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
407}
408#else
409static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
410 struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
411 char *comm, char *cmd)
412{
413}
414#endif
415
416/*
417 * Hangcheck detection for locked gpu:
418 */
419
420static struct msm_gem_submit *
421find_submit(struct msm_ringbuffer *ring, uint32_t fence)
422{
423 struct msm_gem_submit *submit;
424 unsigned long flags;
425
426 spin_lock_irqsave(&ring->submit_lock, flags);
427 list_for_each_entry(submit, &ring->submits, node) {
428 if (submit->seqno == fence) {
429 spin_unlock_irqrestore(&ring->submit_lock, flags);
430 return submit;
431 }
432 }
433 spin_unlock_irqrestore(&ring->submit_lock, flags);
434
435 return NULL;
436}
437
438static void retire_submits(struct msm_gpu *gpu);
439
440static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd)
441{
442 struct msm_context *ctx = submit->queue->ctx;
443 struct task_struct *task;
444
445 WARN_ON(!mutex_is_locked(&submit->gpu->lock));
446
447 /* Note that kstrdup will return NULL if argument is NULL: */
448 *comm = kstrdup(ctx->comm, GFP_KERNEL);
449 *cmd = kstrdup(ctx->cmdline, GFP_KERNEL);
450
451 task = get_pid_task(submit->pid, PIDTYPE_PID);
452 if (!task)
453 return;
454
455 if (!*comm)
456 *comm = kstrdup(task->comm, GFP_KERNEL);
457
458 if (!*cmd)
459 *cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
460
461 put_task_struct(task);
462}
463
464static void recover_worker(struct kthread_work *work)
465{
466 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
467 struct drm_device *dev = gpu->dev;
468 struct msm_drm_private *priv = dev->dev_private;
469 struct msm_gem_submit *submit;
470 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
471 char *comm = NULL, *cmd = NULL;
472 unsigned int noreclaim_flag;
473 struct task_struct *task;
474 int i;
475
476 mutex_lock(&gpu->lock);
477
478 DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
479
480 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
481
482 /*
483 * If the submit retired while we were waiting for the worker to run,
484 * or waiting to acquire the gpu lock, then nothing more to do.
485 */
486 if (!submit)
487 goto out_unlock;
488
489 /* Increment the fault counts */
490 submit->queue->faults++;
491
492 task = get_pid_task(submit->pid, PIDTYPE_PID);
493 if (!task)
494 gpu->global_faults++;
495 else {
496 struct msm_gem_vm *vm = to_msm_vm(submit->vm);
497
498 vm->faults++;
499
500 /*
501 * If userspace has opted-in to VM_BIND (and therefore userspace
502 * management of the VM), faults mark the VM as unusable. This
503 * matches vulkan expectations (vulkan is the main target for
504 * VM_BIND).
505 */
506 if (!vm->managed)
507 msm_gem_vm_unusable(submit->vm);
508 }
509
510 noreclaim_flag = memalloc_noreclaim_save();
511
512 get_comm_cmdline(submit, &comm, &cmd);
513
514 if (comm && cmd) {
515 DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
516 gpu->name, comm, cmd);
517
518 msm_rd_dump_submit(priv->hangrd, submit,
519 "offending task: %s (%s)", comm, cmd);
520 } else {
521 DRM_DEV_ERROR(dev->dev, "%s: offending task: unknown\n", gpu->name);
522
523 msm_rd_dump_submit(priv->hangrd, submit, NULL);
524 }
525
526 /* Record the crash state */
527 pm_runtime_get_sync(&gpu->pdev->dev);
528 msm_gpu_crashstate_capture(gpu, submit, NULL, comm, cmd);
529
530 memalloc_noreclaim_restore(noreclaim_flag);
531
532 kfree(cmd);
533 kfree(comm);
534
535 /*
536 * Update all the rings with the latest and greatest fence.. this
537 * needs to happen after msm_rd_dump_submit() to ensure that the
538 * bo's referenced by the offending submit are still around.
539 */
540 for (i = 0; i < gpu->nr_rings; i++) {
541 struct msm_ringbuffer *ring = gpu->rb[i];
542
543 uint32_t fence = ring->memptrs->fence;
544
545 /*
546 * For the current (faulting?) ring/submit advance the fence by
547 * one more to clear the faulting submit
548 */
549 if (ring == cur_ring)
550 ring->memptrs->fence = ++fence;
551
552 msm_update_fence(ring->fctx, fence);
553 }
554
555 /* retire completed submits, plus the one that hung: */
556 retire_submits(gpu);
557
558 gpu->funcs->recover(gpu);
559
560 /*
561 * Replay all remaining submits starting with highest priority
562 * ring
563 */
564 for (i = 0; i < gpu->nr_rings; i++) {
565 struct msm_ringbuffer *ring = gpu->rb[i];
566 unsigned long flags;
567
568 spin_lock_irqsave(&ring->submit_lock, flags);
569 list_for_each_entry(submit, &ring->submits, node) {
570 /*
571 * If the submit uses an unusable vm make sure
572 * we don't actually run it
573 */
574 if (to_msm_vm(submit->vm)->unusable)
575 submit->nr_cmds = 0;
576 gpu->funcs->submit(gpu, submit);
577 }
578 spin_unlock_irqrestore(&ring->submit_lock, flags);
579 }
580
581 pm_runtime_put(&gpu->pdev->dev);
582
583out_unlock:
584 mutex_unlock(&gpu->lock);
585
586 msm_gpu_retire(gpu);
587}
588
589void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_info *fault_info)
590{
591 struct msm_gem_submit *submit;
592 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
593 char *comm = NULL, *cmd = NULL;
594 unsigned int noreclaim_flag;
595
596 mutex_lock(&gpu->lock);
597
598 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
599 if (submit && submit->fault_dumped)
600 goto resume_smmu;
601
602 noreclaim_flag = memalloc_noreclaim_save();
603
604 if (submit) {
605 get_comm_cmdline(submit, &comm, &cmd);
606
607 /*
608 * When we get GPU iova faults, we can get 1000s of them,
609 * but we really only want to log the first one.
610 */
611 submit->fault_dumped = true;
612 }
613
614 /* Record the crash state */
615 pm_runtime_get_sync(&gpu->pdev->dev);
616 msm_gpu_crashstate_capture(gpu, submit, fault_info, comm, cmd);
617 pm_runtime_put_sync(&gpu->pdev->dev);
618
619 memalloc_noreclaim_restore(noreclaim_flag);
620
621 kfree(cmd);
622 kfree(comm);
623
624resume_smmu:
625 mutex_unlock(&gpu->lock);
626}
627
628static void hangcheck_timer_reset(struct msm_gpu *gpu)
629{
630 struct msm_drm_private *priv = gpu->dev->dev_private;
631 mod_timer(&gpu->hangcheck_timer,
632 round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period)));
633}
634
635static bool made_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
636{
637 if (ring->hangcheck_progress_retries >= DRM_MSM_HANGCHECK_PROGRESS_RETRIES)
638 return false;
639
640 if (!gpu->funcs->progress)
641 return false;
642
643 if (!gpu->funcs->progress(gpu, ring))
644 return false;
645
646 ring->hangcheck_progress_retries++;
647 return true;
648}
649
650static void hangcheck_handler(struct timer_list *t)
651{
652 struct msm_gpu *gpu = timer_container_of(gpu, t, hangcheck_timer);
653 struct drm_device *dev = gpu->dev;
654 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
655 uint32_t fence = ring->memptrs->fence;
656
657 if (fence != ring->hangcheck_fence) {
658 /* some progress has been made.. ya! */
659 ring->hangcheck_fence = fence;
660 ring->hangcheck_progress_retries = 0;
661 } else if (fence_before(fence, ring->fctx->last_fence) &&
662 !made_progress(gpu, ring)) {
663 /* no progress and not done.. hung! */
664 ring->hangcheck_fence = fence;
665 ring->hangcheck_progress_retries = 0;
666 DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
667 gpu->name, ring->id);
668 DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
669 gpu->name, fence);
670 DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
671 gpu->name, ring->fctx->last_fence);
672
673 kthread_queue_work(gpu->worker, &gpu->recover_work);
674 }
675
676 /* if still more pending work, reset the hangcheck timer: */
677 if (fence_after(ring->fctx->last_fence, ring->hangcheck_fence))
678 hangcheck_timer_reset(gpu);
679
680 /* workaround for missing irq: */
681 msm_gpu_retire(gpu);
682}
683
684/*
685 * Performance Counters:
686 */
687
688/* called under perf_lock */
689static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
690{
691 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
692 int i, n = min(ncntrs, gpu->num_perfcntrs);
693
694 /* read current values: */
695 for (i = 0; i < gpu->num_perfcntrs; i++)
696 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
697
698 /* update cntrs: */
699 for (i = 0; i < n; i++)
700 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
701
702 /* save current values: */
703 for (i = 0; i < gpu->num_perfcntrs; i++)
704 gpu->last_cntrs[i] = current_cntrs[i];
705
706 return n;
707}
708
709static void update_sw_cntrs(struct msm_gpu *gpu)
710{
711 ktime_t time;
712 uint32_t elapsed;
713 unsigned long flags;
714
715 spin_lock_irqsave(&gpu->perf_lock, flags);
716 if (!gpu->perfcntr_active)
717 goto out;
718
719 time = ktime_get();
720 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
721
722 gpu->totaltime += elapsed;
723 if (gpu->last_sample.active)
724 gpu->activetime += elapsed;
725
726 gpu->last_sample.active = msm_gpu_active(gpu);
727 gpu->last_sample.time = time;
728
729out:
730 spin_unlock_irqrestore(&gpu->perf_lock, flags);
731}
732
733void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
734{
735 unsigned long flags;
736
737 pm_runtime_get_sync(&gpu->pdev->dev);
738
739 spin_lock_irqsave(&gpu->perf_lock, flags);
740 /* we could dynamically enable/disable perfcntr registers too.. */
741 gpu->last_sample.active = msm_gpu_active(gpu);
742 gpu->last_sample.time = ktime_get();
743 gpu->activetime = gpu->totaltime = 0;
744 gpu->perfcntr_active = true;
745 update_hw_cntrs(gpu, 0, NULL);
746 spin_unlock_irqrestore(&gpu->perf_lock, flags);
747}
748
749void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
750{
751 gpu->perfcntr_active = false;
752 pm_runtime_put_sync(&gpu->pdev->dev);
753}
754
755/* returns -errno or # of cntrs sampled */
756int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
757 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
758{
759 unsigned long flags;
760 int ret;
761
762 spin_lock_irqsave(&gpu->perf_lock, flags);
763
764 if (!gpu->perfcntr_active) {
765 ret = -EINVAL;
766 goto out;
767 }
768
769 *activetime = gpu->activetime;
770 *totaltime = gpu->totaltime;
771
772 gpu->activetime = gpu->totaltime = 0;
773
774 ret = update_hw_cntrs(gpu, ncntrs, cntrs);
775
776out:
777 spin_unlock_irqrestore(&gpu->perf_lock, flags);
778
779 return ret;
780}
781
782/*
783 * Cmdstream submission/retirement:
784 */
785
786static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
787 struct msm_gem_submit *submit)
788{
789 int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
790 volatile struct msm_gpu_submit_stats *stats;
791 u64 elapsed, clock = 0, cycles;
792 unsigned long flags;
793
794 stats = &ring->memptrs->stats[index];
795 /* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
796 elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
797 do_div(elapsed, 192);
798
799 cycles = stats->cpcycles_end - stats->cpcycles_start;
800
801 /* Calculate the clock frequency from the number of CP cycles */
802 if (elapsed) {
803 clock = cycles * 1000;
804 do_div(clock, elapsed);
805 }
806
807 submit->queue->ctx->elapsed_ns += elapsed;
808 submit->queue->ctx->cycles += cycles;
809
810 trace_msm_gpu_submit_retired(submit, elapsed, clock,
811 stats->alwayson_start, stats->alwayson_end);
812
813 msm_submit_retire(submit);
814
815 pm_runtime_mark_last_busy(&gpu->pdev->dev);
816
817 spin_lock_irqsave(&ring->submit_lock, flags);
818 list_del(&submit->node);
819 spin_unlock_irqrestore(&ring->submit_lock, flags);
820
821 /* Update devfreq on transition from active->idle: */
822 mutex_lock(&gpu->active_lock);
823 gpu->active_submits--;
824 WARN_ON(gpu->active_submits < 0);
825 if (!gpu->active_submits) {
826 msm_devfreq_idle(gpu);
827 pm_runtime_put_autosuspend(&gpu->pdev->dev);
828 }
829
830 mutex_unlock(&gpu->active_lock);
831
832 msm_gem_submit_put(submit);
833}
834
835static void retire_submits(struct msm_gpu *gpu)
836{
837 int i;
838
839 /* Retire the commits starting with highest priority */
840 for (i = 0; i < gpu->nr_rings; i++) {
841 struct msm_ringbuffer *ring = gpu->rb[i];
842
843 while (true) {
844 struct msm_gem_submit *submit = NULL;
845 unsigned long flags;
846
847 spin_lock_irqsave(&ring->submit_lock, flags);
848 submit = list_first_entry_or_null(&ring->submits,
849 struct msm_gem_submit, node);
850 spin_unlock_irqrestore(&ring->submit_lock, flags);
851
852 /*
853 * If no submit, we are done. If submit->fence hasn't
854 * been signalled, then later submits are not signalled
855 * either, so we are also done.
856 */
857 if (submit && dma_fence_is_signaled(submit->hw_fence)) {
858 retire_submit(gpu, ring, submit);
859 } else {
860 break;
861 }
862 }
863 }
864
865 wake_up_all(&gpu->retire_event);
866}
867
868static void retire_worker(struct kthread_work *work)
869{
870 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
871
872 retire_submits(gpu);
873}
874
875/* call from irq handler to schedule work to retire bo's */
876void msm_gpu_retire(struct msm_gpu *gpu)
877{
878 int i;
879
880 for (i = 0; i < gpu->nr_rings; i++)
881 msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
882
883 kthread_queue_work(gpu->worker, &gpu->retire_work);
884 update_sw_cntrs(gpu);
885}
886
887/* add bo's to gpu's ring, and kick gpu: */
888void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
889{
890 struct msm_ringbuffer *ring = submit->ring;
891 unsigned long flags;
892
893 WARN_ON(!mutex_is_locked(&gpu->lock));
894
895 pm_runtime_get_sync(&gpu->pdev->dev);
896
897 msm_gpu_hw_init(gpu);
898
899 submit->seqno = submit->hw_fence->seqno;
900
901 update_sw_cntrs(gpu);
902
903 /*
904 * ring->submits holds a ref to the submit, to deal with the case
905 * that a submit completes before msm_ioctl_gem_submit() returns.
906 */
907 msm_gem_submit_get(submit);
908
909 spin_lock_irqsave(&ring->submit_lock, flags);
910 list_add_tail(&submit->node, &ring->submits);
911 spin_unlock_irqrestore(&ring->submit_lock, flags);
912
913 /* Update devfreq on transition from idle->active: */
914 mutex_lock(&gpu->active_lock);
915 if (!gpu->active_submits) {
916 pm_runtime_get(&gpu->pdev->dev);
917 msm_devfreq_active(gpu);
918 }
919 gpu->active_submits++;
920 mutex_unlock(&gpu->active_lock);
921
922 gpu->funcs->submit(gpu, submit);
923 submit->ring->cur_ctx_seqno = submit->queue->ctx->seqno;
924
925 pm_runtime_put(&gpu->pdev->dev);
926 hangcheck_timer_reset(gpu);
927}
928
929/*
930 * Init/Cleanup:
931 */
932
933static irqreturn_t irq_handler(int irq, void *data)
934{
935 struct msm_gpu *gpu = data;
936 return gpu->funcs->irq(gpu);
937}
938
939static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
940{
941 int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
942
943 if (ret < 1) {
944 gpu->nr_clocks = 0;
945 return ret;
946 }
947
948 gpu->nr_clocks = ret;
949
950 gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
951 gpu->nr_clocks, "core");
952
953 gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
954 gpu->nr_clocks, "rbbmtimer");
955
956 return 0;
957}
958
959/* Return a new address space for a msm_drm_private instance */
960struct drm_gpuvm *
961msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
962 bool kernel_managed)
963{
964 struct drm_gpuvm *vm = NULL;
965
966 if (!gpu)
967 return NULL;
968
969 /*
970 * If the target doesn't support private address spaces then return
971 * the global one
972 */
973 if (gpu->funcs->create_private_vm) {
974 vm = gpu->funcs->create_private_vm(gpu, kernel_managed);
975 if (!IS_ERR(vm))
976 to_msm_vm(vm)->pid = get_pid(task_pid(task));
977 }
978
979 if (IS_ERR_OR_NULL(vm))
980 vm = drm_gpuvm_get(gpu->vm);
981
982 return vm;
983}
984
985int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
986 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
987 const char *name, struct msm_gpu_config *config)
988{
989 struct msm_drm_private *priv = drm->dev_private;
990 int i, ret, nr_rings = config->nr_rings;
991 void *memptrs;
992 uint64_t memptrs_iova;
993
994 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
995 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
996
997 gpu->dev = drm;
998 gpu->funcs = funcs;
999 gpu->name = name;
1000
1001 gpu->worker = kthread_run_worker(0, "gpu-worker");
1002 if (IS_ERR(gpu->worker)) {
1003 ret = PTR_ERR(gpu->worker);
1004 gpu->worker = NULL;
1005 goto fail;
1006 }
1007
1008 sched_set_fifo_low(gpu->worker->task);
1009
1010 mutex_init(&gpu->active_lock);
1011 mutex_init(&gpu->lock);
1012 init_waitqueue_head(&gpu->retire_event);
1013 kthread_init_work(&gpu->retire_work, retire_worker);
1014 kthread_init_work(&gpu->recover_work, recover_worker);
1015
1016 priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
1017
1018 /*
1019 * If progress detection is supported, halve the hangcheck timer
1020 * duration, as it takes two iterations of the hangcheck handler
1021 * to detect a hang.
1022 */
1023 if (funcs->progress)
1024 priv->hangcheck_period /= 2;
1025
1026 timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
1027
1028 spin_lock_init(&gpu->perf_lock);
1029
1030
1031 /* Map registers: */
1032 gpu->mmio = msm_ioremap(pdev, config->ioname);
1033 if (IS_ERR(gpu->mmio)) {
1034 ret = PTR_ERR(gpu->mmio);
1035 goto fail;
1036 }
1037
1038 /* Get Interrupt: */
1039 gpu->irq = platform_get_irq(pdev, 0);
1040 if (gpu->irq < 0) {
1041 ret = gpu->irq;
1042 goto fail;
1043 }
1044
1045 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
1046 IRQF_TRIGGER_HIGH, "gpu-irq", gpu);
1047 if (ret) {
1048 DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
1049 goto fail;
1050 }
1051
1052 ret = get_clocks(pdev, gpu);
1053 if (ret)
1054 goto fail;
1055
1056 gpu->ebi1_clk = msm_clk_get(pdev, "bus");
1057 DBG("ebi1_clk: %p", gpu->ebi1_clk);
1058 if (IS_ERR(gpu->ebi1_clk))
1059 gpu->ebi1_clk = NULL;
1060
1061 /* Acquire regulators: */
1062 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
1063 DBG("gpu_reg: %p", gpu->gpu_reg);
1064 if (IS_ERR(gpu->gpu_reg))
1065 gpu->gpu_reg = NULL;
1066
1067 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
1068 DBG("gpu_cx: %p", gpu->gpu_cx);
1069 if (IS_ERR(gpu->gpu_cx))
1070 gpu->gpu_cx = NULL;
1071
1072 platform_set_drvdata(pdev, &gpu->adreno_smmu);
1073
1074 msm_devfreq_init(gpu);
1075
1076 gpu->vm = gpu->funcs->create_vm(gpu, pdev);
1077 if (IS_ERR(gpu->vm)) {
1078 ret = PTR_ERR(gpu->vm);
1079 goto fail;
1080 }
1081
1082 memptrs = msm_gem_kernel_new(drm,
1083 sizeof(struct msm_rbmemptrs) * nr_rings,
1084 check_apriv(gpu, MSM_BO_WC), gpu->vm, &gpu->memptrs_bo,
1085 &memptrs_iova);
1086
1087 if (IS_ERR(memptrs)) {
1088 ret = PTR_ERR(memptrs);
1089 DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
1090 goto fail;
1091 }
1092
1093 msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
1094
1095 if (nr_rings > ARRAY_SIZE(gpu->rb)) {
1096 DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
1097 ARRAY_SIZE(gpu->rb));
1098 nr_rings = ARRAY_SIZE(gpu->rb);
1099 }
1100
1101 /* Create ringbuffer(s): */
1102 for (i = 0; i < nr_rings; i++) {
1103 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
1104
1105 if (IS_ERR(gpu->rb[i])) {
1106 ret = PTR_ERR(gpu->rb[i]);
1107 DRM_DEV_ERROR(drm->dev,
1108 "could not create ringbuffer %d: %d\n", i, ret);
1109 goto fail;
1110 }
1111
1112 memptrs += sizeof(struct msm_rbmemptrs);
1113 memptrs_iova += sizeof(struct msm_rbmemptrs);
1114 }
1115
1116 gpu->nr_rings = nr_rings;
1117
1118 refcount_set(&gpu->sysprof_active, 1);
1119
1120 return 0;
1121
1122fail:
1123 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
1124 msm_ringbuffer_destroy(gpu->rb[i]);
1125 gpu->rb[i] = NULL;
1126 }
1127
1128 msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm);
1129
1130 platform_set_drvdata(pdev, NULL);
1131 return ret;
1132}
1133
1134void msm_gpu_cleanup(struct msm_gpu *gpu)
1135{
1136 int i;
1137
1138 DBG("%s", gpu->name);
1139
1140 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
1141 msm_ringbuffer_destroy(gpu->rb[i]);
1142 gpu->rb[i] = NULL;
1143 }
1144
1145 msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm);
1146
1147 if (!IS_ERR_OR_NULL(gpu->vm)) {
1148 struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
1149 mmu->funcs->detach(mmu);
1150 drm_gpuvm_put(gpu->vm);
1151 }
1152
1153 if (gpu->worker) {
1154 kthread_destroy_worker(gpu->worker);
1155 }
1156
1157 msm_devfreq_cleanup(gpu);
1158
1159 platform_set_drvdata(gpu->pdev, NULL);
1160}