Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0+
2/* Copyright (C) 2018 Broadcom */
3
4/**
5 * DOC: Broadcom V3D scheduling
6 *
7 * The shared DRM GPU scheduler is used to coordinate submitting jobs
8 * to the hardware. Each DRM fd (roughly a client process) gets its
9 * own scheduler entity, which will process jobs in order. The GPU
10 * scheduler will schedule the clients with a FIFO scheduling algorithm.
11 *
12 * For simplicity, and in order to keep latency low for interactive
13 * jobs when bulk background jobs are queued up, we submit a new job
14 * to the HW only when it has completed the last one, instead of
15 * filling up the CT[01]Q FIFOs with jobs. Similarly, we use
16 * `drm_sched_job_add_dependency()` to manage the dependency between bin
17 * and render, instead of having the clients submit jobs using the HW's
18 * semaphores to interlock between them.
19 */
20
21#include <linux/sched/clock.h>
22#include <linux/kthread.h>
23
24#include <drm/drm_print.h>
25#include <drm/drm_syncobj.h>
26
27#include "v3d_drv.h"
28#include "v3d_regs.h"
29#include "v3d_trace.h"
30
31#define V3D_CSD_CFG012_WG_COUNT_SHIFT 16
32
33static struct v3d_job *
34to_v3d_job(struct drm_sched_job *sched_job)
35{
36 return container_of(sched_job, struct v3d_job, base);
37}
38
39static struct v3d_bin_job *
40to_bin_job(struct drm_sched_job *sched_job)
41{
42 return container_of(sched_job, struct v3d_bin_job, base.base);
43}
44
45static struct v3d_render_job *
46to_render_job(struct drm_sched_job *sched_job)
47{
48 return container_of(sched_job, struct v3d_render_job, base.base);
49}
50
51static struct v3d_tfu_job *
52to_tfu_job(struct drm_sched_job *sched_job)
53{
54 return container_of(sched_job, struct v3d_tfu_job, base.base);
55}
56
57static struct v3d_csd_job *
58to_csd_job(struct drm_sched_job *sched_job)
59{
60 return container_of(sched_job, struct v3d_csd_job, base.base);
61}
62
63static struct v3d_cpu_job *
64to_cpu_job(struct drm_sched_job *sched_job)
65{
66 return container_of(sched_job, struct v3d_cpu_job, base.base);
67}
68
69void v3d_stats_release(struct kref *refcount)
70{
71 struct v3d_stats *stats = container_of(refcount, typeof(*stats), refcount);
72
73 kfree(stats);
74}
75
76struct v3d_stats *v3d_stats_alloc(void)
77{
78 struct v3d_stats *stats;
79
80 stats = kzalloc_obj(*stats);
81 if (!stats)
82 return NULL;
83
84 kref_init(&stats->refcount);
85 seqcount_init(&stats->lock);
86
87 return stats;
88}
89
90static void
91v3d_sched_job_free(struct drm_sched_job *sched_job)
92{
93 struct v3d_job *job = to_v3d_job(sched_job);
94
95 v3d_job_cleanup(job);
96}
97
98void
99v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info,
100 unsigned int count)
101{
102 if (query_info->queries) {
103 unsigned int i;
104
105 for (i = 0; i < count; i++)
106 drm_syncobj_put(query_info->queries[i].syncobj);
107
108 kvfree(query_info->queries);
109 }
110}
111
112void
113v3d_performance_query_info_free(struct v3d_performance_query_info *query_info,
114 unsigned int count)
115{
116 if (query_info->queries) {
117 unsigned int i;
118
119 for (i = 0; i < count; i++) {
120 drm_syncobj_put(query_info->queries[i].syncobj);
121 kvfree(query_info->queries[i].kperfmon_ids);
122 }
123
124 kvfree(query_info->queries);
125 }
126}
127
128static void
129v3d_cpu_job_free(struct drm_sched_job *sched_job)
130{
131 struct v3d_cpu_job *job = to_cpu_job(sched_job);
132
133 v3d_timestamp_query_info_free(&job->timestamp_query,
134 job->timestamp_query.count);
135
136 v3d_performance_query_info_free(&job->performance_query,
137 job->performance_query.count);
138
139 v3d_job_cleanup(&job->base);
140}
141
142static void
143v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job)
144{
145 struct v3d_perfmon *perfmon = v3d->global_perfmon;
146
147 if (!perfmon)
148 perfmon = job->perfmon;
149
150 if (perfmon == v3d->active_perfmon)
151 return;
152
153 if (perfmon != v3d->active_perfmon)
154 v3d_perfmon_stop(v3d, v3d->active_perfmon, true);
155
156 if (perfmon && v3d->active_perfmon != perfmon)
157 v3d_perfmon_start(v3d, perfmon);
158}
159
160static void
161v3d_stats_start(struct v3d_stats *stats, u64 now)
162{
163 raw_write_seqcount_begin(&stats->lock);
164 stats->start_ns = now;
165 raw_write_seqcount_end(&stats->lock);
166}
167
168static void
169v3d_job_start_stats(struct v3d_job *job)
170{
171 u64 now = local_clock();
172
173 preempt_disable();
174 v3d_stats_start(job->client_stats, now);
175 v3d_stats_start(job->global_stats, now);
176 preempt_enable();
177}
178
179static void
180v3d_stats_update(struct v3d_stats *stats, u64 now)
181{
182 raw_write_seqcount_begin(&stats->lock);
183 stats->enabled_ns += now - stats->start_ns;
184 stats->jobs_completed++;
185 stats->start_ns = 0;
186 raw_write_seqcount_end(&stats->lock);
187}
188
189void
190v3d_job_update_stats(struct v3d_job *job)
191{
192 u64 now = local_clock();
193
194 preempt_disable();
195 v3d_stats_update(job->client_stats, now);
196 v3d_stats_update(job->global_stats, now);
197 preempt_enable();
198}
199
200static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
201{
202 struct v3d_bin_job *job = to_bin_job(sched_job);
203 struct v3d_dev *v3d = job->base.v3d;
204 struct v3d_queue_state *queue = &v3d->queue[V3D_BIN];
205 struct drm_device *dev = &v3d->drm;
206 struct dma_fence *fence;
207 unsigned long irqflags;
208
209 if (unlikely(job->base.base.s_fence->finished.error)) {
210 spin_lock_irqsave(&queue->queue_lock, irqflags);
211 queue->active_job = NULL;
212 spin_unlock_irqrestore(&queue->queue_lock, irqflags);
213 return NULL;
214 }
215
216 /* Lock required around bin_job update vs
217 * v3d_overflow_mem_work().
218 */
219 spin_lock_irqsave(&queue->queue_lock, irqflags);
220 queue->active_job = &job->base;
221 /* Clear out the overflow allocation, so we don't
222 * reuse the overflow attached to a previous job.
223 */
224 V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0);
225 spin_unlock_irqrestore(&queue->queue_lock, irqflags);
226
227 v3d_invalidate_caches(v3d);
228
229 fence = v3d_fence_create(v3d, V3D_BIN);
230 if (IS_ERR(fence))
231 return NULL;
232
233 if (job->base.irq_fence)
234 dma_fence_put(job->base.irq_fence);
235 job->base.irq_fence = dma_fence_get(fence);
236
237 trace_v3d_submit_cl(dev, false, to_v3d_fence(fence)->seqno,
238 job->start, job->end);
239
240 v3d_job_start_stats(&job->base);
241 v3d_switch_perfmon(v3d, &job->base);
242
243 /* Set the current and end address of the control list.
244 * Writing the end register is what starts the job.
245 */
246 if (job->qma) {
247 V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, job->qma);
248 V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, job->qms);
249 }
250 if (job->qts) {
251 V3D_CORE_WRITE(0, V3D_CLE_CT0QTS,
252 V3D_CLE_CT0QTS_ENABLE |
253 job->qts);
254 }
255 V3D_CORE_WRITE(0, V3D_CLE_CT0QBA, job->start);
256 V3D_CORE_WRITE(0, V3D_CLE_CT0QEA, job->end);
257
258 return fence;
259}
260
261static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
262{
263 struct v3d_render_job *job = to_render_job(sched_job);
264 struct v3d_dev *v3d = job->base.v3d;
265 struct drm_device *dev = &v3d->drm;
266 struct dma_fence *fence;
267
268 if (unlikely(job->base.base.s_fence->finished.error)) {
269 v3d->queue[V3D_RENDER].active_job = NULL;
270 return NULL;
271 }
272
273 v3d->queue[V3D_RENDER].active_job = &job->base;
274
275 /* Can we avoid this flush? We need to be careful of
276 * scheduling, though -- imagine job0 rendering to texture and
277 * job1 reading, and them being executed as bin0, bin1,
278 * render0, render1, so that render1's flush at bin time
279 * wasn't enough.
280 */
281 v3d_invalidate_caches(v3d);
282
283 fence = v3d_fence_create(v3d, V3D_RENDER);
284 if (IS_ERR(fence))
285 return NULL;
286
287 if (job->base.irq_fence)
288 dma_fence_put(job->base.irq_fence);
289 job->base.irq_fence = dma_fence_get(fence);
290
291 trace_v3d_submit_cl(dev, true, to_v3d_fence(fence)->seqno,
292 job->start, job->end);
293
294 v3d_job_start_stats(&job->base);
295 v3d_switch_perfmon(v3d, &job->base);
296
297 /* XXX: Set the QCFG */
298
299 /* Set the current and end address of the control list.
300 * Writing the end register is what starts the job.
301 */
302 V3D_CORE_WRITE(0, V3D_CLE_CT1QBA, job->start);
303 V3D_CORE_WRITE(0, V3D_CLE_CT1QEA, job->end);
304
305 return fence;
306}
307
308static struct dma_fence *
309v3d_tfu_job_run(struct drm_sched_job *sched_job)
310{
311 struct v3d_tfu_job *job = to_tfu_job(sched_job);
312 struct v3d_dev *v3d = job->base.v3d;
313 struct drm_device *dev = &v3d->drm;
314 struct dma_fence *fence;
315
316 if (unlikely(job->base.base.s_fence->finished.error)) {
317 v3d->queue[V3D_TFU].active_job = NULL;
318 return NULL;
319 }
320
321 v3d->queue[V3D_TFU].active_job = &job->base;
322
323 fence = v3d_fence_create(v3d, V3D_TFU);
324 if (IS_ERR(fence))
325 return NULL;
326
327 if (job->base.irq_fence)
328 dma_fence_put(job->base.irq_fence);
329 job->base.irq_fence = dma_fence_get(fence);
330
331 trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno);
332
333 v3d_job_start_stats(&job->base);
334
335 V3D_WRITE(V3D_TFU_IIA(v3d->ver), job->args.iia);
336 V3D_WRITE(V3D_TFU_IIS(v3d->ver), job->args.iis);
337 V3D_WRITE(V3D_TFU_ICA(v3d->ver), job->args.ica);
338 V3D_WRITE(V3D_TFU_IUA(v3d->ver), job->args.iua);
339 V3D_WRITE(V3D_TFU_IOA(v3d->ver), job->args.ioa);
340 if (v3d->ver >= V3D_GEN_71)
341 V3D_WRITE(V3D_V7_TFU_IOC, job->args.v71.ioc);
342 V3D_WRITE(V3D_TFU_IOS(v3d->ver), job->args.ios);
343 V3D_WRITE(V3D_TFU_COEF0(v3d->ver), job->args.coef[0]);
344 if (v3d->ver >= V3D_GEN_71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) {
345 V3D_WRITE(V3D_TFU_COEF1(v3d->ver), job->args.coef[1]);
346 V3D_WRITE(V3D_TFU_COEF2(v3d->ver), job->args.coef[2]);
347 V3D_WRITE(V3D_TFU_COEF3(v3d->ver), job->args.coef[3]);
348 }
349 /* ICFG kicks off the job. */
350 V3D_WRITE(V3D_TFU_ICFG(v3d->ver), job->args.icfg | V3D_TFU_ICFG_IOC);
351
352 return fence;
353}
354
355static struct dma_fence *
356v3d_csd_job_run(struct drm_sched_job *sched_job)
357{
358 struct v3d_csd_job *job = to_csd_job(sched_job);
359 struct v3d_dev *v3d = job->base.v3d;
360 struct drm_device *dev = &v3d->drm;
361 struct dma_fence *fence;
362 int i, csd_cfg0_reg;
363
364 if (unlikely(job->base.base.s_fence->finished.error)) {
365 v3d->queue[V3D_CSD].active_job = NULL;
366 return NULL;
367 }
368
369 v3d->queue[V3D_CSD].active_job = &job->base;
370
371 v3d_invalidate_caches(v3d);
372
373 fence = v3d_fence_create(v3d, V3D_CSD);
374 if (IS_ERR(fence))
375 return NULL;
376
377 if (job->base.irq_fence)
378 dma_fence_put(job->base.irq_fence);
379 job->base.irq_fence = dma_fence_get(fence);
380
381 trace_v3d_submit_csd(dev, to_v3d_fence(fence)->seqno);
382
383 v3d_job_start_stats(&job->base);
384 v3d_switch_perfmon(v3d, &job->base);
385
386 csd_cfg0_reg = V3D_CSD_QUEUED_CFG0(v3d->ver);
387 for (i = 1; i <= 6; i++)
388 V3D_CORE_WRITE(0, csd_cfg0_reg + 4 * i, job->args.cfg[i]);
389
390 /* Although V3D 7.1 has an eighth configuration register, we are not
391 * using it. Therefore, make sure it remains unused.
392 *
393 * XXX: Set the CFG7 register
394 */
395 if (v3d->ver >= V3D_GEN_71)
396 V3D_CORE_WRITE(0, V3D_V7_CSD_QUEUED_CFG7, 0);
397
398 /* CFG0 write kicks off the job. */
399 V3D_CORE_WRITE(0, csd_cfg0_reg, job->args.cfg[0]);
400
401 return fence;
402}
403
404static void
405v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
406{
407 struct v3d_indirect_csd_info *indirect_csd = &job->indirect_csd;
408 struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
409 struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect);
410 struct drm_v3d_submit_csd *args = &indirect_csd->job->args;
411 struct v3d_dev *v3d = job->base.v3d;
412 u32 num_batches, *wg_counts;
413
414 v3d_get_bo_vaddr(bo);
415 v3d_get_bo_vaddr(indirect);
416
417 wg_counts = (uint32_t *)(bo->vaddr + indirect_csd->offset);
418
419 if (wg_counts[0] == 0 || wg_counts[1] == 0 || wg_counts[2] == 0)
420 return;
421
422 args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
423 args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
424 args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
425
426 num_batches = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
427 (wg_counts[0] * wg_counts[1] * wg_counts[2]);
428
429 /* V3D 7.1.6 and later don't subtract 1 from the number of batches */
430 if (v3d->ver < 71 || (v3d->ver == 71 && v3d->rev < 6))
431 args->cfg[4] = num_batches - 1;
432 else
433 args->cfg[4] = num_batches;
434
435 WARN_ON(args->cfg[4] == ~0);
436
437 for (int i = 0; i < 3; i++) {
438 /* 0xffffffff indicates that the uniform rewrite is not needed */
439 if (indirect_csd->wg_uniform_offsets[i] != 0xffffffff) {
440 u32 uniform_idx = indirect_csd->wg_uniform_offsets[i];
441 ((uint32_t *)indirect->vaddr)[uniform_idx] = wg_counts[i];
442 }
443 }
444
445 v3d_put_bo_vaddr(indirect);
446 v3d_put_bo_vaddr(bo);
447}
448
449static void
450v3d_timestamp_query(struct v3d_cpu_job *job)
451{
452 struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
453 struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
454 u8 *value_addr;
455
456 v3d_get_bo_vaddr(bo);
457
458 for (int i = 0; i < timestamp_query->count; i++) {
459 value_addr = ((u8 *)bo->vaddr) + timestamp_query->queries[i].offset;
460 *((u64 *)value_addr) = i == 0 ? ktime_get_ns() : 0ull;
461
462 drm_syncobj_replace_fence(timestamp_query->queries[i].syncobj,
463 job->base.done_fence);
464 }
465
466 v3d_put_bo_vaddr(bo);
467}
468
469static void
470v3d_reset_timestamp_queries(struct v3d_cpu_job *job)
471{
472 struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
473 struct v3d_timestamp_query *queries = timestamp_query->queries;
474 struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
475 u8 *value_addr;
476
477 v3d_get_bo_vaddr(bo);
478
479 for (int i = 0; i < timestamp_query->count; i++) {
480 value_addr = ((u8 *)bo->vaddr) + queries[i].offset;
481 *((u64 *)value_addr) = 0;
482
483 drm_syncobj_replace_fence(queries[i].syncobj, NULL);
484 }
485
486 v3d_put_bo_vaddr(bo);
487}
488
489static void write_to_buffer_32(u32 *dst, unsigned int idx, u32 value)
490{
491 dst[idx] = value;
492}
493
494static void write_to_buffer_64(u64 *dst, unsigned int idx, u64 value)
495{
496 dst[idx] = value;
497}
498
499static void
500write_to_buffer(void *dst, unsigned int idx, bool do_64bit, u64 value)
501{
502 if (do_64bit)
503 write_to_buffer_64(dst, idx, value);
504 else
505 write_to_buffer_32(dst, idx, value);
506}
507
508static void
509v3d_copy_query_results(struct v3d_cpu_job *job)
510{
511 struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
512 struct v3d_timestamp_query *queries = timestamp_query->queries;
513 struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
514 struct v3d_bo *timestamp = to_v3d_bo(job->base.bo[1]);
515 struct v3d_copy_query_results_info *copy = &job->copy;
516 struct dma_fence *fence;
517 u8 *query_addr;
518 bool available, write_result;
519 u8 *data;
520 int i;
521
522 v3d_get_bo_vaddr(bo);
523 v3d_get_bo_vaddr(timestamp);
524
525 data = ((u8 *)bo->vaddr) + copy->offset;
526
527 for (i = 0; i < timestamp_query->count; i++) {
528 fence = drm_syncobj_fence_get(queries[i].syncobj);
529 available = fence ? dma_fence_is_signaled(fence) : false;
530
531 write_result = available || copy->do_partial;
532 if (write_result) {
533 query_addr = ((u8 *)timestamp->vaddr) + queries[i].offset;
534 write_to_buffer(data, 0, copy->do_64bit, *((u64 *)query_addr));
535 }
536
537 if (copy->availability_bit)
538 write_to_buffer(data, 1, copy->do_64bit, available ? 1u : 0u);
539
540 data += copy->stride;
541
542 dma_fence_put(fence);
543 }
544
545 v3d_put_bo_vaddr(timestamp);
546 v3d_put_bo_vaddr(bo);
547}
548
549static void
550v3d_reset_performance_queries(struct v3d_cpu_job *job)
551{
552 struct v3d_performance_query_info *performance_query = &job->performance_query;
553 struct v3d_file_priv *v3d_priv = job->base.file_priv;
554 struct v3d_dev *v3d = job->base.v3d;
555 struct v3d_perfmon *perfmon;
556
557 for (int i = 0; i < performance_query->count; i++) {
558 for (int j = 0; j < performance_query->nperfmons; j++) {
559 perfmon = v3d_perfmon_find(v3d_priv,
560 performance_query->queries[i].kperfmon_ids[j]);
561 if (!perfmon) {
562 drm_dbg(&v3d->drm, "Failed to find perfmon.");
563 continue;
564 }
565
566 v3d_perfmon_stop(v3d, perfmon, false);
567
568 memset(perfmon->values, 0, perfmon->ncounters * sizeof(u64));
569
570 v3d_perfmon_put(perfmon);
571 }
572
573 drm_syncobj_replace_fence(performance_query->queries[i].syncobj, NULL);
574 }
575}
576
577static void
578v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data,
579 unsigned int query)
580{
581 struct v3d_performance_query_info *performance_query =
582 &job->performance_query;
583 struct v3d_file_priv *v3d_priv = job->base.file_priv;
584 struct v3d_performance_query *perf_query =
585 &performance_query->queries[query];
586 struct v3d_dev *v3d = job->base.v3d;
587 unsigned int i, j, offset;
588
589 for (i = 0, offset = 0;
590 i < performance_query->nperfmons;
591 i++, offset += DRM_V3D_MAX_PERF_COUNTERS) {
592 struct v3d_perfmon *perfmon;
593
594 perfmon = v3d_perfmon_find(v3d_priv,
595 perf_query->kperfmon_ids[i]);
596 if (!perfmon) {
597 drm_dbg(&v3d->drm, "Failed to find perfmon.");
598 continue;
599 }
600
601 v3d_perfmon_stop(v3d, perfmon, true);
602
603 if (job->copy.do_64bit) {
604 for (j = 0; j < perfmon->ncounters; j++)
605 write_to_buffer_64(data, offset + j,
606 perfmon->values[j]);
607 } else {
608 for (j = 0; j < perfmon->ncounters; j++)
609 write_to_buffer_32(data, offset + j,
610 perfmon->values[j]);
611 }
612
613 v3d_perfmon_put(perfmon);
614 }
615}
616
617static void
618v3d_copy_performance_query(struct v3d_cpu_job *job)
619{
620 struct v3d_performance_query_info *performance_query = &job->performance_query;
621 struct v3d_copy_query_results_info *copy = &job->copy;
622 struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
623 struct dma_fence *fence;
624 bool available, write_result;
625 u8 *data;
626
627 v3d_get_bo_vaddr(bo);
628
629 data = ((u8 *)bo->vaddr) + copy->offset;
630
631 for (int i = 0; i < performance_query->count; i++) {
632 fence = drm_syncobj_fence_get(performance_query->queries[i].syncobj);
633 available = fence ? dma_fence_is_signaled(fence) : false;
634
635 write_result = available || copy->do_partial;
636 if (write_result)
637 v3d_write_performance_query_result(job, data, i);
638
639 if (copy->availability_bit)
640 write_to_buffer(data, performance_query->ncounters,
641 copy->do_64bit, available ? 1u : 0u);
642
643 data += copy->stride;
644
645 dma_fence_put(fence);
646 }
647
648 v3d_put_bo_vaddr(bo);
649}
650
651static const v3d_cpu_job_fn cpu_job_function[] = {
652 [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = v3d_rewrite_csd_job_wg_counts_from_indirect,
653 [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = v3d_timestamp_query,
654 [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = v3d_reset_timestamp_queries,
655 [V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = v3d_copy_query_results,
656 [V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = v3d_reset_performance_queries,
657 [V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = v3d_copy_performance_query,
658};
659
660static struct dma_fence *
661v3d_cpu_job_run(struct drm_sched_job *sched_job)
662{
663 struct v3d_cpu_job *job = to_cpu_job(sched_job);
664 struct v3d_dev *v3d = job->base.v3d;
665
666 if (job->job_type >= ARRAY_SIZE(cpu_job_function)) {
667 drm_dbg(&v3d->drm, "Unknown CPU job: %d\n", job->job_type);
668 return NULL;
669 }
670
671 v3d_job_start_stats(&job->base);
672 trace_v3d_cpu_job_begin(&v3d->drm, job->job_type);
673
674 cpu_job_function[job->job_type](job);
675
676 trace_v3d_cpu_job_end(&v3d->drm, job->job_type);
677 v3d_job_update_stats(&job->base);
678
679 /* Synchronous operation, so no fence to wait on. */
680 return NULL;
681}
682
683static struct dma_fence *
684v3d_cache_clean_job_run(struct drm_sched_job *sched_job)
685{
686 struct v3d_job *job = to_v3d_job(sched_job);
687 struct v3d_dev *v3d = job->v3d;
688
689 v3d_job_start_stats(job);
690
691 v3d_clean_caches(v3d);
692
693 v3d_job_update_stats(job);
694
695 /* Synchronous operation, so no fence to wait on. */
696 return NULL;
697}
698
699static enum drm_gpu_sched_stat
700v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job,
701 enum v3d_queue q)
702{
703 struct v3d_job *job = to_v3d_job(sched_job);
704 enum v3d_queue i;
705
706 mutex_lock(&v3d->reset_lock);
707
708 /* block scheduler */
709 for (i = 0; i < V3D_MAX_QUEUES; i++)
710 drm_sched_stop(&v3d->queue[i].sched, sched_job);
711
712 if (sched_job)
713 drm_sched_increase_karma(sched_job);
714
715 /* get the GPU back into the init state */
716 v3d_reset(v3d);
717
718 atomic_inc(&v3d->reset_counter);
719 atomic_inc(&job->client_stats->reset_counter);
720
721 for (i = 0; i < V3D_MAX_QUEUES; i++)
722 drm_sched_resubmit_jobs(&v3d->queue[i].sched);
723
724 /* Unblock schedulers and restart their jobs. */
725 for (i = 0; i < V3D_MAX_QUEUES; i++)
726 drm_sched_start(&v3d->queue[i].sched, 0);
727
728 mutex_unlock(&v3d->reset_lock);
729
730 return DRM_GPU_SCHED_STAT_RESET;
731}
732
733static enum drm_gpu_sched_stat
734v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
735 u32 *timedout_ctca, u32 *timedout_ctra)
736{
737 struct v3d_job *job = to_v3d_job(sched_job);
738 struct v3d_dev *v3d = job->v3d;
739 u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q));
740 u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q));
741
742 /* If the current address or return address have changed, then the GPU
743 * has probably made progress and we should delay the reset. This
744 * could fail if the GPU got in an infinite loop in the CL, but that
745 * is pretty unlikely outside of an i-g-t testcase.
746 */
747 if (*timedout_ctca != ctca || *timedout_ctra != ctra) {
748 *timedout_ctca = ctca;
749 *timedout_ctra = ctra;
750
751 return DRM_GPU_SCHED_STAT_NO_HANG;
752 }
753
754 return v3d_gpu_reset_for_timeout(v3d, sched_job, q);
755}
756
757static enum drm_gpu_sched_stat
758v3d_bin_job_timedout(struct drm_sched_job *sched_job)
759{
760 struct v3d_bin_job *job = to_bin_job(sched_job);
761
762 return v3d_cl_job_timedout(sched_job, V3D_BIN,
763 &job->timedout_ctca, &job->timedout_ctra);
764}
765
766static enum drm_gpu_sched_stat
767v3d_render_job_timedout(struct drm_sched_job *sched_job)
768{
769 struct v3d_render_job *job = to_render_job(sched_job);
770
771 return v3d_cl_job_timedout(sched_job, V3D_RENDER,
772 &job->timedout_ctca, &job->timedout_ctra);
773}
774
775static enum drm_gpu_sched_stat
776v3d_tfu_job_timedout(struct drm_sched_job *sched_job)
777{
778 struct v3d_job *job = to_v3d_job(sched_job);
779
780 return v3d_gpu_reset_for_timeout(job->v3d, sched_job, V3D_TFU);
781}
782
783static enum drm_gpu_sched_stat
784v3d_csd_job_timedout(struct drm_sched_job *sched_job)
785{
786 struct v3d_csd_job *job = to_csd_job(sched_job);
787 struct v3d_dev *v3d = job->base.v3d;
788 u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4(v3d->ver));
789
790 /* If we've made progress, skip reset, add the job to the pending
791 * list, and let the timer get rearmed.
792 */
793 if (job->timedout_batches != batches) {
794 job->timedout_batches = batches;
795
796 return DRM_GPU_SCHED_STAT_NO_HANG;
797 }
798
799 return v3d_gpu_reset_for_timeout(v3d, sched_job, V3D_CSD);
800}
801
802static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
803 .run_job = v3d_bin_job_run,
804 .timedout_job = v3d_bin_job_timedout,
805 .free_job = v3d_sched_job_free,
806};
807
808static const struct drm_sched_backend_ops v3d_render_sched_ops = {
809 .run_job = v3d_render_job_run,
810 .timedout_job = v3d_render_job_timedout,
811 .free_job = v3d_sched_job_free,
812};
813
814static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
815 .run_job = v3d_tfu_job_run,
816 .timedout_job = v3d_tfu_job_timedout,
817 .free_job = v3d_sched_job_free,
818};
819
820static const struct drm_sched_backend_ops v3d_csd_sched_ops = {
821 .run_job = v3d_csd_job_run,
822 .timedout_job = v3d_csd_job_timedout,
823 .free_job = v3d_sched_job_free
824};
825
826static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
827 .run_job = v3d_cache_clean_job_run,
828 .free_job = v3d_sched_job_free
829};
830
831static const struct drm_sched_backend_ops v3d_cpu_sched_ops = {
832 .run_job = v3d_cpu_job_run,
833 .free_job = v3d_cpu_job_free
834};
835
836static int
837v3d_queue_sched_init(struct v3d_dev *v3d, const struct drm_sched_backend_ops *ops,
838 enum v3d_queue queue, const char *name)
839{
840 struct drm_sched_init_args args = {
841 .num_rqs = DRM_SCHED_PRIORITY_COUNT,
842 .credit_limit = 1,
843 .timeout = msecs_to_jiffies(500),
844 .dev = v3d->drm.dev,
845 };
846
847 args.ops = ops;
848 args.name = name;
849
850 return drm_sched_init(&v3d->queue[queue].sched, &args);
851}
852
853int
854v3d_sched_init(struct v3d_dev *v3d)
855{
856 int ret;
857
858 ret = v3d_queue_sched_init(v3d, &v3d_bin_sched_ops, V3D_BIN, "v3d_bin");
859 if (ret)
860 return ret;
861
862 ret = v3d_queue_sched_init(v3d, &v3d_render_sched_ops, V3D_RENDER,
863 "v3d_render");
864 if (ret)
865 goto fail;
866
867 ret = v3d_queue_sched_init(v3d, &v3d_tfu_sched_ops, V3D_TFU, "v3d_tfu");
868 if (ret)
869 goto fail;
870
871 if (v3d_has_csd(v3d)) {
872 ret = v3d_queue_sched_init(v3d, &v3d_csd_sched_ops, V3D_CSD,
873 "v3d_csd");
874 if (ret)
875 goto fail;
876
877 ret = v3d_queue_sched_init(v3d, &v3d_cache_clean_sched_ops,
878 V3D_CACHE_CLEAN, "v3d_cache_clean");
879 if (ret)
880 goto fail;
881 }
882
883 ret = v3d_queue_sched_init(v3d, &v3d_cpu_sched_ops, V3D_CPU, "v3d_cpu");
884 if (ret)
885 goto fail;
886
887 return 0;
888
889fail:
890 v3d_sched_fini(v3d);
891 return ret;
892}
893
894void
895v3d_sched_fini(struct v3d_dev *v3d)
896{
897 enum v3d_queue q;
898
899 for (q = 0; q < V3D_MAX_QUEUES; q++) {
900 if (v3d->queue[q].sched.ready)
901 drm_sched_fini(&v3d->queue[q].sched);
902 }
903}