Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#include "xe_exec_queue.h"
7
8#include <linux/nospec.h>
9
10#include <drm/drm_device.h>
11#include <drm/drm_drv.h>
12#include <drm/drm_file.h>
13#include <drm/drm_syncobj.h>
14#include <uapi/drm/xe_drm.h>
15
16#include "xe_bo.h"
17#include "xe_dep_scheduler.h"
18#include "xe_device.h"
19#include "xe_gt.h"
20#include "xe_gt_sriov_pf.h"
21#include "xe_gt_sriov_vf.h"
22#include "xe_hw_engine_class_sysfs.h"
23#include "xe_hw_engine_group.h"
24#include "xe_irq.h"
25#include "xe_lrc.h"
26#include "xe_macros.h"
27#include "xe_migrate.h"
28#include "xe_pm.h"
29#include "xe_trace.h"
30#include "xe_vm.h"
31#include "xe_pxp.h"
32
33/**
34 * DOC: Execution Queue
35 *
36 * An Execution queue is an interface for the HW context of execution.
37 * The user creates an execution queue, submits the GPU jobs through those
38 * queues and in the end destroys them.
39 *
40 * Execution queues can also be created by XeKMD itself for driver internal
41 * operations like object migration etc.
42 *
43 * An execution queue is associated with a specified HW engine or a group of
44 * engines (belonging to the same tile and engine class) and any GPU job
45 * submitted on the queue will be run on one of these engines.
46 *
47 * An execution queue is tied to an address space (VM). It holds a reference
48 * of the associated VM and the underlying Logical Ring Context/s (LRC/s)
49 * until the queue is destroyed.
50 *
51 * The execution queue sits on top of the submission backend. It opaquely
52 * handles the GuC and Execlist backends whichever the platform uses, and
53 * the ring operations the different engine classes support.
54 */
55
56/**
57 * DOC: Multi Queue Group
58 *
59 * Multi Queue Group is another mode of execution supported by the compute
60 * and blitter copy command streamers (CCS and BCS, respectively). It is
61 * an enhancement of the existing hardware architecture and leverages the
62 * same submission model. It enables support for efficient, parallel
63 * execution of multiple queues within a single shared context. The multi
64 * queue group functionality is only supported with GuC submission backend.
65 * All the queues of a group must use the same address space (VM).
66 *
67 * The DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE execution queue property
68 * supports creating a multi queue group and adding queues to a queue group.
69 *
70 * The XE_EXEC_QUEUE_CREATE ioctl call with above property with value field
71 * set to DRM_XE_MULTI_GROUP_CREATE, will create a new multi queue group with
72 * the queue being created as the primary queue (aka q0) of the group. To add
73 * secondary queues to the group, they need to be created with the above
74 * property with id of the primary queue as the value. The properties of
75 * the primary queue (like priority, time slice) applies to the whole group.
76 * So, these properties can't be set for secondary queues of a group.
77 *
78 * The hardware does not support removing a queue from a multi-queue group.
79 * However, queues can be dynamically added to the group. A group can have
80 * up to 64 queues. To support this, XeKMD holds references to LRCs of the
81 * queues even after the queues are destroyed by the user until the whole
82 * group is destroyed. The secondary queues hold a reference to the primary
83 * queue thus preventing the group from being destroyed when user destroys
84 * the primary queue. Once the primary queue is destroyed, secondary queues
85 * can't be added to the queue group and new job submissions on existing
86 * secondary queues are not allowed.
87 *
88 * The queues of a multi queue group can set their priority within the group
89 * through the DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY property.
90 * This multi queue priority can also be set dynamically through the
91 * XE_EXEC_QUEUE_SET_PROPERTY ioctl. This is the only other property
92 * supported by the secondary queues of a multi queue group, other than
93 * DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE.
94 *
95 * When GuC reports an error on any of the queues of a multi queue group,
96 * the queue cleanup mechanism is invoked for all the queues of the group
97 * as hardware cannot make progress on the multi queue context.
98 *
99 * Refer :ref:`multi-queue-group-guc-interface` for multi queue group GuC
100 * interface.
101 */
102
103enum xe_exec_queue_sched_prop {
104 XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
105 XE_EXEC_QUEUE_TIMESLICE = 1,
106 XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2,
107 XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
108};
109
110static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
111 u64 extensions);
112
113static void xe_exec_queue_group_cleanup(struct xe_exec_queue *q)
114{
115 struct xe_exec_queue_group *group = q->multi_queue.group;
116 struct xe_lrc *lrc;
117 unsigned long idx;
118
119 if (xe_exec_queue_is_multi_queue_secondary(q)) {
120 /*
121 * Put pairs with get from xe_exec_queue_lookup() call
122 * in xe_exec_queue_group_validate().
123 */
124 xe_exec_queue_put(xe_exec_queue_multi_queue_primary(q));
125 return;
126 }
127
128 if (!group)
129 return;
130
131 /* Primary queue cleanup */
132 xa_for_each(&group->xa, idx, lrc)
133 xe_lrc_put(lrc);
134
135 xa_destroy(&group->xa);
136 mutex_destroy(&group->list_lock);
137 xe_bo_unpin_map_no_vm(group->cgp_bo);
138 kfree(group);
139}
140
141static void __xe_exec_queue_free(struct xe_exec_queue *q)
142{
143 int i;
144
145 for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i)
146 if (q->tlb_inval[i].dep_scheduler)
147 xe_dep_scheduler_fini(q->tlb_inval[i].dep_scheduler);
148
149 if (xe_exec_queue_uses_pxp(q))
150 xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
151
152 if (xe_exec_queue_is_multi_queue(q))
153 xe_exec_queue_group_cleanup(q);
154
155 if (q->vm) {
156 xe_vm_remove_exec_queue(q->vm, q);
157 xe_vm_put(q->vm);
158 }
159
160 if (q->xef)
161 xe_file_put(q->xef);
162
163 kvfree(q->replay_state);
164 kfree(q);
165}
166
167static int alloc_dep_schedulers(struct xe_device *xe, struct xe_exec_queue *q)
168{
169 struct xe_tile *tile = gt_to_tile(q->gt);
170 int i;
171
172 for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i) {
173 struct xe_dep_scheduler *dep_scheduler;
174 struct xe_gt *gt;
175 struct workqueue_struct *wq;
176
177 if (i == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT)
178 gt = tile->primary_gt;
179 else
180 gt = tile->media_gt;
181
182 if (!gt)
183 continue;
184
185 wq = gt->tlb_inval.job_wq;
186
187#define MAX_TLB_INVAL_JOBS 16 /* Picking a reasonable value */
188 dep_scheduler = xe_dep_scheduler_create(xe, wq, q->name,
189 MAX_TLB_INVAL_JOBS);
190 if (IS_ERR(dep_scheduler))
191 return PTR_ERR(dep_scheduler);
192
193 q->tlb_inval[i].dep_scheduler = dep_scheduler;
194 }
195#undef MAX_TLB_INVAL_JOBS
196
197 return 0;
198}
199
200static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
201 struct xe_vm *vm,
202 u32 logical_mask,
203 u16 width, struct xe_hw_engine *hwe,
204 u32 flags, u64 extensions)
205{
206 struct xe_exec_queue *q;
207 struct xe_gt *gt = hwe->gt;
208 int err;
209
210 /* only kernel queues can be permanent */
211 XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
212
213 q = kzalloc_flex(*q, lrc, width);
214 if (!q)
215 return ERR_PTR(-ENOMEM);
216
217 kref_init(&q->refcount);
218 q->flags = flags;
219 q->hwe = hwe;
220 q->gt = gt;
221 q->class = hwe->class;
222 q->width = width;
223 q->msix_vec = XE_IRQ_DEFAULT_MSIX;
224 q->logical_mask = logical_mask;
225 q->fence_irq = >->fence_irq[hwe->class];
226 q->ring_ops = gt->ring_ops[hwe->class];
227 q->ops = gt->exec_queue_ops;
228 INIT_LIST_HEAD(&q->lr.link);
229 INIT_LIST_HEAD(&q->vm_exec_queue_link);
230 INIT_LIST_HEAD(&q->multi_gt_link);
231 INIT_LIST_HEAD(&q->hw_engine_group_link);
232 INIT_LIST_HEAD(&q->pxp.link);
233 spin_lock_init(&q->multi_queue.lock);
234 spin_lock_init(&q->lrc_lookup_lock);
235 q->multi_queue.priority = XE_MULTI_QUEUE_PRIORITY_NORMAL;
236
237 q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
238 q->sched_props.preempt_timeout_us =
239 hwe->eclass->sched_props.preempt_timeout_us;
240 q->sched_props.job_timeout_ms =
241 hwe->eclass->sched_props.job_timeout_ms;
242 if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
243 q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
244 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
245 else
246 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
247
248 if (q->flags & (EXEC_QUEUE_FLAG_MIGRATE | EXEC_QUEUE_FLAG_VM)) {
249 err = alloc_dep_schedulers(xe, q);
250 if (err) {
251 __xe_exec_queue_free(q);
252 return ERR_PTR(err);
253 }
254 }
255
256 if (vm)
257 q->vm = xe_vm_get(vm);
258
259 if (extensions) {
260 /*
261 * may set q->usm, must come before xe_lrc_create(),
262 * may overwrite q->sched_props, must come before q->ops->init()
263 */
264 err = exec_queue_user_extensions(xe, q, extensions);
265 if (err) {
266 __xe_exec_queue_free(q);
267 return ERR_PTR(err);
268 }
269 }
270
271 return q;
272}
273
274static void xe_exec_queue_set_lrc(struct xe_exec_queue *q, struct xe_lrc *lrc, u16 idx)
275{
276 xe_assert(gt_to_xe(q->gt), idx < q->width);
277
278 scoped_guard(spinlock, &q->lrc_lookup_lock)
279 q->lrc[idx] = lrc;
280}
281
282/**
283 * xe_exec_queue_get_lrc() - Get the LRC from exec queue.
284 * @q: The exec queue instance.
285 * @idx: Index within multi-LRC array.
286 *
287 * Retrieves LRC of given index for the exec queue under lock
288 * and takes reference.
289 *
290 * Return: Pointer to LRC on success, error on failure, NULL on
291 * lookup failure.
292 */
293struct xe_lrc *xe_exec_queue_get_lrc(struct xe_exec_queue *q, u16 idx)
294{
295 struct xe_lrc *lrc;
296
297 xe_assert(gt_to_xe(q->gt), idx < q->width);
298
299 scoped_guard(spinlock, &q->lrc_lookup_lock) {
300 lrc = q->lrc[idx];
301 if (lrc)
302 xe_lrc_get(lrc);
303 }
304
305 return lrc;
306}
307
308/**
309 * xe_exec_queue_lrc() - Get the LRC from exec queue.
310 * @q: The exec queue instance.
311 *
312 * Retrieves the primary LRC for the exec queue. Note that this function
313 * returns only the first LRC instance, even when multiple parallel LRCs
314 * are configured. This function does not increment reference count,
315 * so the reference can be just forgotten after use.
316 *
317 * Return: Pointer to LRC on success, error on failure
318 */
319struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q)
320{
321 return q->lrc[0];
322}
323
324static void __xe_exec_queue_fini(struct xe_exec_queue *q)
325{
326 int i;
327
328 q->ops->fini(q);
329
330 for (i = 0; i < q->width; ++i)
331 xe_lrc_put(q->lrc[i]);
332}
333
334static int __xe_exec_queue_init(struct xe_exec_queue *q, u32 exec_queue_flags)
335{
336 int i, err;
337 u32 flags = 0;
338
339 /*
340 * PXP workloads executing on RCS or CCS must run in isolation (i.e. no
341 * other workload can use the EUs at the same time). On MTL this is done
342 * by setting the RUNALONE bit in the LRC, while starting on Xe2 there
343 * is a dedicated bit for it.
344 */
345 if (xe_exec_queue_uses_pxp(q) &&
346 (q->class == XE_ENGINE_CLASS_RENDER || q->class == XE_ENGINE_CLASS_COMPUTE)) {
347 if (GRAPHICS_VER(gt_to_xe(q->gt)) >= 20)
348 flags |= XE_LRC_CREATE_PXP;
349 else
350 flags |= XE_LRC_CREATE_RUNALONE;
351 }
352
353 if (!(exec_queue_flags & EXEC_QUEUE_FLAG_KERNEL))
354 flags |= XE_LRC_CREATE_USER_CTX;
355
356 if (q->flags & EXEC_QUEUE_FLAG_DISABLE_STATE_CACHE_PERF_FIX)
357 flags |= XE_LRC_DISABLE_STATE_CACHE_PERF_FIX;
358
359 err = q->ops->init(q);
360 if (err)
361 return err;
362
363 /*
364 * This must occur after q->ops->init to avoid race conditions during VF
365 * post-migration recovery, as the fixups for the LRC GGTT addresses
366 * depend on the queue being present in the backend tracking structure.
367 *
368 * In addition to above, we must wait on inflight GGTT changes to avoid
369 * writing out stale values here. Such wait provides a solid solution
370 * (without a race) only if the function can detect migration instantly
371 * from the moment vCPU resumes execution.
372 */
373 for (i = 0; i < q->width; ++i) {
374 struct xe_lrc *__lrc = NULL;
375 int marker;
376
377 do {
378 struct xe_lrc *lrc;
379
380 marker = xe_gt_sriov_vf_wait_valid_ggtt(q->gt);
381
382 lrc = xe_lrc_create(q->hwe, q->vm, q->replay_state,
383 xe_lrc_ring_size(), q->msix_vec, flags);
384 if (IS_ERR(lrc)) {
385 err = PTR_ERR(lrc);
386 goto err_lrc;
387 }
388
389 xe_exec_queue_set_lrc(q, lrc, i);
390
391 if (__lrc)
392 xe_lrc_put(__lrc);
393 __lrc = lrc;
394
395 } while (marker != xe_vf_migration_fixups_complete_count(q->gt));
396 }
397
398 return 0;
399
400err_lrc:
401 __xe_exec_queue_fini(q);
402 return err;
403}
404
405/**
406 * xe_exec_queue_create() - Create an exec queue
407 * @xe: Xe device
408 * @vm: VM for the exec queue
409 * @logical_mask: Logical mask of HW engines
410 * @width: Width of the exec queue (number of LRCs)
411 * @hwe: Hardware engine
412 * @flags: Exec queue creation flags
413 * @extensions: Extensions for exec queue creation
414 *
415 * Create an exec queue (allocate and initialize) with the specified parameters
416 *
417 * Return: Pointer to the created exec queue on success, ERR_PTR on failure
418 */
419struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
420 u32 logical_mask, u16 width,
421 struct xe_hw_engine *hwe, u32 flags,
422 u64 extensions)
423{
424 struct xe_exec_queue *q;
425 int err;
426
427 /* VMs for GSCCS queues (and only those) must have the XE_VM_FLAG_GSC flag */
428 xe_assert(xe, !vm || (!!(vm->flags & XE_VM_FLAG_GSC) == !!(hwe->engine_id == XE_HW_ENGINE_GSCCS0)));
429
430 q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags,
431 extensions);
432 if (IS_ERR(q))
433 return q;
434
435 err = __xe_exec_queue_init(q, flags);
436 if (err)
437 goto err_post_alloc;
438
439 /*
440 * We can only add the queue to the PXP list after the init is complete,
441 * because the PXP termination can call exec_queue_kill and that will
442 * go bad if the queue is only half-initialized. This means that we
443 * can't do it when we handle the PXP extension in __xe_exec_queue_alloc
444 * and we need to do it here instead.
445 */
446 if (xe_exec_queue_uses_pxp(q)) {
447 err = xe_pxp_exec_queue_add(xe->pxp, q);
448 if (err)
449 goto err_post_init;
450 }
451
452 return q;
453
454err_post_init:
455 __xe_exec_queue_fini(q);
456err_post_alloc:
457 __xe_exec_queue_free(q);
458 return ERR_PTR(err);
459}
460ALLOW_ERROR_INJECTION(xe_exec_queue_create, ERRNO);
461
462/**
463 * xe_exec_queue_create_class() - Create an exec queue for a specific engine class
464 * @xe: Xe device
465 * @gt: GT for the exec queue
466 * @vm: VM for the exec queue
467 * @class: Engine class
468 * @flags: Exec queue creation flags
469 * @extensions: Extensions for exec queue creation
470 *
471 * Create an exec queue for the specified engine class.
472 *
473 * Return: Pointer to the created exec queue on success, ERR_PTR on failure
474 */
475struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
476 struct xe_vm *vm,
477 enum xe_engine_class class,
478 u32 flags, u64 extensions)
479{
480 struct xe_hw_engine *hwe, *hwe0 = NULL;
481 enum xe_hw_engine_id id;
482 u32 logical_mask = 0;
483
484 for_each_hw_engine(hwe, gt, id) {
485 if (xe_hw_engine_is_reserved(hwe))
486 continue;
487
488 if (hwe->class == class) {
489 logical_mask |= BIT(hwe->logical_instance);
490 if (!hwe0)
491 hwe0 = hwe;
492 }
493 }
494
495 if (!logical_mask)
496 return ERR_PTR(-ENODEV);
497
498 return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions);
499}
500
501/**
502 * xe_exec_queue_create_bind() - Create bind exec queue.
503 * @xe: Xe device.
504 * @tile: tile which bind exec queue belongs to.
505 * @flags: exec queue creation flags
506 * @user_vm: The user VM which this exec queue belongs to
507 * @extensions: exec queue creation extensions
508 *
509 * Normalize bind exec queue creation. Bind exec queue is tied to migration VM
510 * for access to physical memory required for page table programming. On a
511 * faulting devices the reserved copy engine instance must be used to avoid
512 * deadlocking (user binds cannot get stuck behind faults as kernel binds which
513 * resolve faults depend on user binds). On non-faulting devices any copy engine
514 * can be used.
515 *
516 * Returns exec queue on success, ERR_PTR on failure
517 */
518struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
519 struct xe_tile *tile,
520 struct xe_vm *user_vm,
521 u32 flags, u64 extensions)
522{
523 struct xe_gt *gt = tile->primary_gt;
524 struct xe_exec_queue *q;
525 struct xe_vm *migrate_vm;
526
527 migrate_vm = xe_migrate_get_vm(tile->migrate);
528 if (xe->info.has_usm) {
529 struct xe_hw_engine *hwe = xe_gt_hw_engine(gt,
530 XE_ENGINE_CLASS_COPY,
531 gt->usm.reserved_bcs_instance,
532 false);
533
534 if (!hwe) {
535 xe_vm_put(migrate_vm);
536 return ERR_PTR(-EINVAL);
537 }
538
539 q = xe_exec_queue_create(xe, migrate_vm,
540 BIT(hwe->logical_instance), 1, hwe,
541 flags, extensions);
542 } else {
543 q = xe_exec_queue_create_class(xe, gt, migrate_vm,
544 XE_ENGINE_CLASS_COPY, flags,
545 extensions);
546 }
547 xe_vm_put(migrate_vm);
548
549 if (!IS_ERR(q)) {
550 int err = drm_syncobj_create(&q->ufence_syncobj,
551 DRM_SYNCOBJ_CREATE_SIGNALED,
552 NULL);
553 if (err) {
554 xe_exec_queue_put(q);
555 return ERR_PTR(err);
556 }
557
558 if (user_vm)
559 q->user_vm = xe_vm_get(user_vm);
560 }
561
562 return q;
563}
564ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO);
565
566/**
567 * xe_exec_queue_destroy() - Destroy an exec queue
568 * @ref: Reference count of the exec queue
569 *
570 * Called when the last reference to the exec queue is dropped.
571 * Cleans up all resources associated with the exec queue.
572 * This function should not be called directly; use xe_exec_queue_put() instead.
573 */
574void xe_exec_queue_destroy(struct kref *ref)
575{
576 struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
577 struct xe_exec_queue *eq, *next;
578 int i;
579
580 xe_assert(gt_to_xe(q->gt), atomic_read(&q->job_cnt) == 0);
581
582 if (q->ufence_syncobj)
583 drm_syncobj_put(q->ufence_syncobj);
584
585 if (xe_exec_queue_uses_pxp(q))
586 xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
587
588 xe_exec_queue_last_fence_put_unlocked(q);
589 for_each_tlb_inval(i)
590 xe_exec_queue_tlb_inval_last_fence_put_unlocked(q, i);
591
592 if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
593 list_for_each_entry_safe(eq, next, &q->multi_gt_list,
594 multi_gt_link)
595 xe_exec_queue_put(eq);
596 }
597
598 if (q->user_vm) {
599 xe_vm_put(q->user_vm);
600 q->user_vm = NULL;
601 }
602
603 q->ops->destroy(q);
604}
605
606/**
607 * xe_exec_queue_fini() - Finalize an exec queue
608 * @q: The exec queue
609 *
610 * Finalizes the exec queue by updating run ticks, releasing LRC references,
611 * and freeing the queue structure. This is called after the queue has been
612 * destroyed and all references have been dropped.
613 */
614void xe_exec_queue_fini(struct xe_exec_queue *q)
615{
616 /*
617 * Before releasing our ref to lrc and xef, accumulate our run ticks
618 * and wakeup any waiters.
619 */
620 xe_exec_queue_update_run_ticks(q);
621 if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
622 wake_up_var(&q->xef->exec_queue.pending_removal);
623
624 __xe_exec_queue_fini(q);
625 __xe_exec_queue_free(q);
626}
627
628/**
629 * xe_exec_queue_assign_name() - Assign a name to an exec queue
630 * @q: The exec queue
631 * @instance: Instance number for the engine
632 *
633 * Assigns a human-readable name to the exec queue based on its engine class
634 * and instance number (e.g., "rcs0", "vcs1", "bcs2").
635 */
636void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
637{
638 switch (q->class) {
639 case XE_ENGINE_CLASS_RENDER:
640 snprintf(q->name, sizeof(q->name), "rcs%d", instance);
641 break;
642 case XE_ENGINE_CLASS_VIDEO_DECODE:
643 snprintf(q->name, sizeof(q->name), "vcs%d", instance);
644 break;
645 case XE_ENGINE_CLASS_VIDEO_ENHANCE:
646 snprintf(q->name, sizeof(q->name), "vecs%d", instance);
647 break;
648 case XE_ENGINE_CLASS_COPY:
649 snprintf(q->name, sizeof(q->name), "bcs%d", instance);
650 break;
651 case XE_ENGINE_CLASS_COMPUTE:
652 snprintf(q->name, sizeof(q->name), "ccs%d", instance);
653 break;
654 case XE_ENGINE_CLASS_OTHER:
655 snprintf(q->name, sizeof(q->name), "gsccs%d", instance);
656 break;
657 default:
658 XE_WARN_ON(q->class);
659 }
660}
661
662/**
663 * xe_exec_queue_lookup() - Look up an exec queue by ID
664 * @xef: Xe file private data
665 * @id: Exec queue ID
666 *
667 * Looks up an exec queue by its ID and increments its reference count.
668 *
669 * Return: Pointer to the exec queue if found, NULL otherwise
670 */
671struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
672{
673 struct xe_exec_queue *q;
674
675 mutex_lock(&xef->exec_queue.lock);
676 q = xa_load(&xef->exec_queue.xa, id);
677 if (q)
678 xe_exec_queue_get(q);
679 mutex_unlock(&xef->exec_queue.lock);
680
681 return q;
682}
683
684/**
685 * xe_exec_queue_device_get_max_priority() - Get maximum priority for an exec queues
686 * @xe: Xe device
687 *
688 * Returns the maximum priority level that can be assigned to an exec queues.
689 *
690 * Return: Maximum priority level (HIGH if CAP_SYS_NICE, NORMAL otherwise)
691 */
692enum xe_exec_queue_priority
693xe_exec_queue_device_get_max_priority(struct xe_device *xe)
694{
695 return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
696 XE_EXEC_QUEUE_PRIORITY_NORMAL;
697}
698
699static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
700 u64 value)
701{
702 if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
703 return -EINVAL;
704
705 if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
706 return -EPERM;
707
708 q->sched_props.priority = value;
709 return 0;
710}
711
712static bool xe_exec_queue_enforce_schedule_limit(void)
713{
714#if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
715 return true;
716#else
717 return !capable(CAP_SYS_NICE);
718#endif
719}
720
721static void
722xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
723 enum xe_exec_queue_sched_prop prop,
724 u32 *min, u32 *max)
725{
726 switch (prop) {
727 case XE_EXEC_QUEUE_JOB_TIMEOUT:
728 *min = eclass->sched_props.job_timeout_min;
729 *max = eclass->sched_props.job_timeout_max;
730 break;
731 case XE_EXEC_QUEUE_TIMESLICE:
732 *min = eclass->sched_props.timeslice_min;
733 *max = eclass->sched_props.timeslice_max;
734 break;
735 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
736 *min = eclass->sched_props.preempt_timeout_min;
737 *max = eclass->sched_props.preempt_timeout_max;
738 break;
739 default:
740 break;
741 }
742#if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
743 if (capable(CAP_SYS_NICE)) {
744 switch (prop) {
745 case XE_EXEC_QUEUE_JOB_TIMEOUT:
746 *min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
747 *max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
748 break;
749 case XE_EXEC_QUEUE_TIMESLICE:
750 *min = XE_HW_ENGINE_TIMESLICE_MIN;
751 *max = XE_HW_ENGINE_TIMESLICE_MAX;
752 break;
753 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
754 *min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
755 *max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
756 break;
757 default:
758 break;
759 }
760 }
761#endif
762}
763
764static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
765 u64 value)
766{
767 u32 min = 0, max = 0;
768
769 xe_exec_queue_get_prop_minmax(q->hwe->eclass,
770 XE_EXEC_QUEUE_TIMESLICE, &min, &max);
771
772 if (xe_exec_queue_enforce_schedule_limit() &&
773 !xe_hw_engine_timeout_in_range(value, min, max))
774 return -EINVAL;
775
776 q->sched_props.timeslice_us = value;
777 return 0;
778}
779
780static int
781exec_queue_set_pxp_type(struct xe_device *xe, struct xe_exec_queue *q, u64 value)
782{
783 if (value == DRM_XE_PXP_TYPE_NONE)
784 return 0;
785
786 /* we only support HWDRM sessions right now */
787 if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM))
788 return -EINVAL;
789
790 if (!xe_pxp_is_enabled(xe->pxp))
791 return -ENODEV;
792
793 return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM);
794}
795
796static int exec_queue_set_hang_replay_state(struct xe_device *xe,
797 struct xe_exec_queue *q,
798 u64 value)
799{
800 size_t size = xe_gt_lrc_hang_replay_size(q->gt, q->class);
801 u64 __user *address = u64_to_user_ptr(value);
802 void *ptr;
803
804 ptr = vmemdup_user(address, size);
805 if (XE_IOCTL_DBG(xe, IS_ERR(ptr)))
806 return PTR_ERR(ptr);
807
808 q->replay_state = ptr;
809
810 return 0;
811}
812
813static int xe_exec_queue_group_init(struct xe_device *xe, struct xe_exec_queue *q)
814{
815 struct xe_tile *tile = gt_to_tile(q->gt);
816 struct xe_exec_queue_group *group;
817 struct xe_bo *bo;
818
819 group = kzalloc_obj(*group);
820 if (!group)
821 return -ENOMEM;
822
823 bo = xe_bo_create_pin_map_novm(xe, tile, SZ_4K, ttm_bo_type_kernel,
824 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
825 XE_BO_FLAG_PINNED_LATE_RESTORE |
826 XE_BO_FLAG_FORCE_USER_VRAM |
827 XE_BO_FLAG_GGTT_INVALIDATE |
828 XE_BO_FLAG_GGTT, false);
829 if (IS_ERR(bo)) {
830 drm_err(&xe->drm, "CGP bo allocation for queue group failed: %ld\n",
831 PTR_ERR(bo));
832 kfree(group);
833 return PTR_ERR(bo);
834 }
835
836 xe_map_memset(xe, &bo->vmap, 0, 0, SZ_4K);
837
838 group->primary = q;
839 group->cgp_bo = bo;
840 INIT_LIST_HEAD(&group->list);
841 xa_init_flags(&group->xa, XA_FLAGS_ALLOC1);
842 mutex_init(&group->list_lock);
843 q->multi_queue.group = group;
844
845 /* group->list_lock is used in submission backend */
846 if (IS_ENABLED(CONFIG_LOCKDEP)) {
847 fs_reclaim_acquire(GFP_KERNEL);
848 might_lock(&group->list_lock);
849 fs_reclaim_release(GFP_KERNEL);
850 }
851
852 return 0;
853}
854
855static inline bool xe_exec_queue_supports_multi_queue(struct xe_exec_queue *q)
856{
857 return q->gt->info.multi_queue_engine_class_mask & BIT(q->class);
858}
859
860static int xe_exec_queue_group_validate(struct xe_device *xe, struct xe_exec_queue *q,
861 u32 primary_id)
862{
863 struct xe_exec_queue_group *group;
864 struct xe_exec_queue *primary;
865 int ret;
866
867 /*
868 * Get from below xe_exec_queue_lookup() pairs with put
869 * in xe_exec_queue_group_cleanup().
870 */
871 primary = xe_exec_queue_lookup(q->vm->xef, primary_id);
872 if (XE_IOCTL_DBG(xe, !primary))
873 return -ENOENT;
874
875 if (XE_IOCTL_DBG(xe, !xe_exec_queue_is_multi_queue_primary(primary)) ||
876 XE_IOCTL_DBG(xe, q->vm != primary->vm) ||
877 XE_IOCTL_DBG(xe, q->logical_mask != primary->logical_mask)) {
878 ret = -EINVAL;
879 goto put_primary;
880 }
881
882 group = primary->multi_queue.group;
883 q->multi_queue.valid = true;
884 q->multi_queue.group = group;
885
886 return 0;
887put_primary:
888 xe_exec_queue_put(primary);
889 return ret;
890}
891
892#define XE_MAX_GROUP_SIZE 64
893static int xe_exec_queue_group_add(struct xe_device *xe, struct xe_exec_queue *q)
894{
895 struct xe_exec_queue_group *group = q->multi_queue.group;
896 u32 pos;
897 int err;
898
899 xe_assert(xe, xe_exec_queue_is_multi_queue_secondary(q));
900
901 /* Primary queue holds a reference to LRCs of all secondary queues */
902 err = xa_alloc(&group->xa, &pos, xe_lrc_get(q->lrc[0]),
903 XA_LIMIT(1, XE_MAX_GROUP_SIZE - 1), GFP_KERNEL);
904 if (XE_IOCTL_DBG(xe, err)) {
905 xe_lrc_put(q->lrc[0]);
906
907 /* It is invalid if queue group limit is exceeded */
908 if (err == -EBUSY)
909 err = -EINVAL;
910
911 return err;
912 }
913
914 q->multi_queue.pos = pos;
915
916 return 0;
917}
918
919static void xe_exec_queue_group_delete(struct xe_device *xe, struct xe_exec_queue *q)
920{
921 struct xe_exec_queue_group *group = q->multi_queue.group;
922 struct xe_lrc *lrc;
923
924 xe_assert(xe, xe_exec_queue_is_multi_queue_secondary(q));
925
926 lrc = xa_erase(&group->xa, q->multi_queue.pos);
927 xe_assert(xe, lrc);
928 xe_lrc_put(lrc);
929}
930
931static int exec_queue_set_multi_group(struct xe_device *xe, struct xe_exec_queue *q,
932 u64 value)
933{
934 if (XE_IOCTL_DBG(xe, !xe_exec_queue_supports_multi_queue(q)))
935 return -ENODEV;
936
937 if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe)))
938 return -EOPNOTSUPP;
939
940 if (XE_IOCTL_DBG(xe, !q->vm->xef))
941 return -EINVAL;
942
943 if (XE_IOCTL_DBG(xe, xe_exec_queue_is_parallel(q)))
944 return -EINVAL;
945
946 if (XE_IOCTL_DBG(xe, xe_exec_queue_is_multi_queue(q)))
947 return -EINVAL;
948
949 if (value & DRM_XE_MULTI_GROUP_CREATE) {
950 if (XE_IOCTL_DBG(xe, value & ~DRM_XE_MULTI_GROUP_CREATE))
951 return -EINVAL;
952
953 q->multi_queue.valid = true;
954 q->multi_queue.is_primary = true;
955 q->multi_queue.pos = 0;
956 return 0;
957 }
958
959 /* While adding secondary queues, the upper 32 bits must be 0 */
960 if (XE_IOCTL_DBG(xe, value & (~0ull << 32)))
961 return -EINVAL;
962
963 return xe_exec_queue_group_validate(xe, q, value);
964}
965
966static int exec_queue_set_multi_queue_priority(struct xe_device *xe, struct xe_exec_queue *q,
967 u64 value)
968{
969 if (XE_IOCTL_DBG(xe, value > XE_MULTI_QUEUE_PRIORITY_HIGH))
970 return -EINVAL;
971
972 /* For queue creation time (!q->xef) setting, just store the priority value */
973 if (!q->xef) {
974 q->multi_queue.priority = value;
975 return 0;
976 }
977
978 if (!xe_exec_queue_is_multi_queue(q))
979 return -EINVAL;
980
981 return q->ops->set_multi_queue_priority(q, value);
982}
983
984static int exec_queue_set_state_cache_perf_fix(struct xe_device *xe, struct xe_exec_queue *q,
985 u64 value)
986{
987 if (XE_IOCTL_DBG(xe, q->class != XE_ENGINE_CLASS_RENDER))
988 return -EOPNOTSUPP;
989
990 q->flags |= value != 0 ? EXEC_QUEUE_FLAG_DISABLE_STATE_CACHE_PERF_FIX : 0;
991
992 return 0;
993}
994
995typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
996 struct xe_exec_queue *q,
997 u64 value);
998
999static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
1000 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
1001 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
1002 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE] = exec_queue_set_pxp_type,
1003 [DRM_XE_EXEC_QUEUE_SET_HANG_REPLAY_STATE] = exec_queue_set_hang_replay_state,
1004 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP] = exec_queue_set_multi_group,
1005 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY] =
1006 exec_queue_set_multi_queue_priority,
1007 [DRM_XE_EXEC_QUEUE_SET_DISABLE_STATE_CACHE_PERF_FIX] =
1008 exec_queue_set_state_cache_perf_fix,
1009};
1010
1011/**
1012 * xe_exec_queue_set_property_ioctl() - Set a property on an exec queue
1013 * @dev: DRM device
1014 * @data: IOCTL data
1015 * @file: DRM file
1016 *
1017 * Allows setting properties on an existing exec queue. Currently only
1018 * supports setting multi-queue priority.
1019 *
1020 * Return: 0 on success, negative error code on failure
1021 */
1022int xe_exec_queue_set_property_ioctl(struct drm_device *dev, void *data,
1023 struct drm_file *file)
1024{
1025 struct xe_device *xe = to_xe_device(dev);
1026 struct xe_file *xef = to_xe_file(file);
1027 struct drm_xe_exec_queue_set_property *args = data;
1028 struct xe_exec_queue *q;
1029 int ret;
1030 u32 idx;
1031
1032 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1033 return -EINVAL;
1034
1035 if (XE_IOCTL_DBG(xe, args->property !=
1036 DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY))
1037 return -EINVAL;
1038
1039 q = xe_exec_queue_lookup(xef, args->exec_queue_id);
1040 if (XE_IOCTL_DBG(xe, !q))
1041 return -ENOENT;
1042
1043 idx = array_index_nospec(args->property,
1044 ARRAY_SIZE(exec_queue_set_property_funcs));
1045 ret = exec_queue_set_property_funcs[idx](xe, q, args->value);
1046 if (XE_IOCTL_DBG(xe, ret))
1047 goto err_post_lookup;
1048
1049 xe_exec_queue_put(q);
1050 return 0;
1051
1052 err_post_lookup:
1053 xe_exec_queue_put(q);
1054 return ret;
1055}
1056
1057static int exec_queue_user_ext_check(struct xe_exec_queue *q, u64 properties)
1058{
1059 u64 secondary_queue_valid_props = BIT_ULL(DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP) |
1060 BIT_ULL(DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY);
1061
1062 /*
1063 * Only MULTI_QUEUE_PRIORITY property is valid for secondary queues of a
1064 * multi-queue group.
1065 */
1066 if (xe_exec_queue_is_multi_queue_secondary(q) &&
1067 properties & ~secondary_queue_valid_props)
1068 return -EINVAL;
1069
1070 return 0;
1071}
1072
1073static int exec_queue_user_ext_check_final(struct xe_exec_queue *q, u64 properties)
1074{
1075 /* MULTI_QUEUE_PRIORITY only applies to multi-queue group queues */
1076 if ((properties & BIT_ULL(DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY)) &&
1077 !(properties & BIT_ULL(DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP)))
1078 return -EINVAL;
1079
1080 return 0;
1081}
1082
1083static int exec_queue_user_ext_set_property(struct xe_device *xe,
1084 struct xe_exec_queue *q,
1085 u64 extension, u64 *properties)
1086{
1087 u64 __user *address = u64_to_user_ptr(extension);
1088 struct drm_xe_ext_set_property ext;
1089 int err;
1090 u32 idx;
1091
1092 err = copy_from_user(&ext, address, sizeof(ext));
1093 if (XE_IOCTL_DBG(xe, err))
1094 return -EFAULT;
1095
1096 if (XE_IOCTL_DBG(xe, ext.property >=
1097 ARRAY_SIZE(exec_queue_set_property_funcs)) ||
1098 XE_IOCTL_DBG(xe, ext.pad) ||
1099 XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
1100 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE &&
1101 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE &&
1102 ext.property != DRM_XE_EXEC_QUEUE_SET_HANG_REPLAY_STATE &&
1103 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP &&
1104 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY &&
1105 ext.property != DRM_XE_EXEC_QUEUE_SET_DISABLE_STATE_CACHE_PERF_FIX))
1106 return -EINVAL;
1107
1108 idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
1109 if (!exec_queue_set_property_funcs[idx])
1110 return -EINVAL;
1111
1112 *properties |= BIT_ULL(idx);
1113 err = exec_queue_user_ext_check(q, *properties);
1114 if (XE_IOCTL_DBG(xe, err))
1115 return err;
1116
1117 return exec_queue_set_property_funcs[idx](xe, q, ext.value);
1118}
1119
1120typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
1121 struct xe_exec_queue *q,
1122 u64 extension, u64 *properties);
1123
1124static const xe_exec_queue_user_extension_fn exec_queue_user_extension_funcs[] = {
1125 [DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
1126};
1127
1128#define MAX_USER_EXTENSIONS 16
1129static int __exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
1130 u64 extensions, int ext_number, u64 *properties)
1131{
1132 u64 __user *address = u64_to_user_ptr(extensions);
1133 struct drm_xe_user_extension ext;
1134 int err;
1135 u32 idx;
1136
1137 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
1138 return -E2BIG;
1139
1140 err = copy_from_user(&ext, address, sizeof(ext));
1141 if (XE_IOCTL_DBG(xe, err))
1142 return -EFAULT;
1143
1144 if (XE_IOCTL_DBG(xe, ext.pad) ||
1145 XE_IOCTL_DBG(xe, ext.name >=
1146 ARRAY_SIZE(exec_queue_user_extension_funcs)))
1147 return -EINVAL;
1148
1149 idx = array_index_nospec(ext.name,
1150 ARRAY_SIZE(exec_queue_user_extension_funcs));
1151 err = exec_queue_user_extension_funcs[idx](xe, q, extensions, properties);
1152 if (XE_IOCTL_DBG(xe, err))
1153 return err;
1154
1155 if (ext.next_extension)
1156 return __exec_queue_user_extensions(xe, q, ext.next_extension,
1157 ++ext_number, properties);
1158
1159 return 0;
1160}
1161
1162static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
1163 u64 extensions)
1164{
1165 u64 properties = 0;
1166 int err;
1167
1168 err = __exec_queue_user_extensions(xe, q, extensions, 0, &properties);
1169 if (XE_IOCTL_DBG(xe, err))
1170 return err;
1171
1172 err = exec_queue_user_ext_check_final(q, properties);
1173 if (XE_IOCTL_DBG(xe, err))
1174 return err;
1175
1176 if (xe_exec_queue_is_multi_queue_primary(q)) {
1177 err = xe_exec_queue_group_init(xe, q);
1178 if (XE_IOCTL_DBG(xe, err))
1179 return err;
1180 }
1181
1182 return 0;
1183}
1184
1185static u32 calc_validate_logical_mask(struct xe_device *xe,
1186 struct drm_xe_engine_class_instance *eci,
1187 u16 width, u16 num_placements)
1188{
1189 int len = width * num_placements;
1190 int i, j, n;
1191 u16 class;
1192 u16 gt_id;
1193 u32 return_mask = 0, prev_mask;
1194
1195 if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) &&
1196 len > 1))
1197 return 0;
1198
1199 for (i = 0; i < width; ++i) {
1200 u32 current_mask = 0;
1201
1202 for (j = 0; j < num_placements; ++j) {
1203 struct xe_hw_engine *hwe;
1204
1205 n = j * width + i;
1206
1207 hwe = xe_hw_engine_lookup(xe, eci[n]);
1208 if (XE_IOCTL_DBG(xe, !hwe))
1209 return 0;
1210
1211 if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
1212 return 0;
1213
1214 if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
1215 XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
1216 return 0;
1217
1218 class = eci[n].engine_class;
1219 gt_id = eci[n].gt_id;
1220
1221 if (width == 1 || !i)
1222 return_mask |= BIT(eci[n].engine_instance);
1223 current_mask |= BIT(eci[n].engine_instance);
1224 }
1225
1226 /* Parallel submissions must be logically contiguous */
1227 if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
1228 return 0;
1229
1230 prev_mask = current_mask;
1231 }
1232
1233 return return_mask;
1234}
1235
1236static bool has_sched_groups(struct xe_gt *gt)
1237{
1238 if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_sriov_pf_sched_groups_enabled(gt))
1239 return true;
1240
1241 if (IS_SRIOV_VF(gt_to_xe(gt)) && xe_gt_sriov_vf_sched_groups_enabled(gt))
1242 return true;
1243
1244 return false;
1245}
1246
1247/**
1248 * xe_exec_queue_create_ioctl() - Create an exec queue via IOCTL
1249 * @dev: DRM device
1250 * @data: IOCTL data
1251 * @file: DRM file
1252 *
1253 * Creates a new exec queue based on user-provided parameters. Supports
1254 * creating VM bind queues, regular exec queues, multi-lrc exec queues
1255 * and multi-queue groups.
1256 *
1257 * Return: 0 on success with exec_queue_id filled in, negative error code on failure
1258 */
1259int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
1260 struct drm_file *file)
1261{
1262 struct xe_device *xe = to_xe_device(dev);
1263 struct xe_file *xef = to_xe_file(file);
1264 struct drm_xe_exec_queue_create *args = data;
1265 struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
1266 struct drm_xe_engine_class_instance __user *user_eci =
1267 u64_to_user_ptr(args->instances);
1268 struct xe_hw_engine *hwe;
1269 struct xe_vm *vm;
1270 struct xe_tile *tile;
1271 struct xe_exec_queue *q = NULL;
1272 u32 logical_mask;
1273 u32 flags = 0;
1274 u32 id;
1275 u32 len;
1276 int err;
1277
1278 if (XE_IOCTL_DBG(xe, args->flags & ~DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT) ||
1279 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1280 return -EINVAL;
1281
1282 len = args->width * args->num_placements;
1283 if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
1284 return -EINVAL;
1285
1286 err = copy_from_user(eci, user_eci,
1287 sizeof(struct drm_xe_engine_class_instance) * len);
1288 if (XE_IOCTL_DBG(xe, err))
1289 return -EFAULT;
1290
1291 if (XE_IOCTL_DBG(xe, !xe_device_get_gt(xe, eci[0].gt_id)))
1292 return -EINVAL;
1293
1294 if (args->flags & DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT)
1295 flags |= EXEC_QUEUE_FLAG_LOW_LATENCY;
1296
1297 if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
1298 if (XE_IOCTL_DBG(xe, args->width != 1) ||
1299 XE_IOCTL_DBG(xe, args->num_placements != 1) ||
1300 XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
1301 return -EINVAL;
1302
1303 vm = xe_vm_lookup(xef, args->vm_id);
1304 if (XE_IOCTL_DBG(xe, !vm))
1305 return -ENOENT;
1306
1307 err = down_read_interruptible(&vm->lock);
1308 if (err) {
1309 xe_vm_put(vm);
1310 return err;
1311 }
1312
1313 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
1314 up_read(&vm->lock);
1315 xe_vm_put(vm);
1316 return -ENOENT;
1317 }
1318
1319 for_each_tile(tile, xe, id) {
1320 struct xe_exec_queue *new;
1321
1322 flags |= EXEC_QUEUE_FLAG_VM;
1323 if (id)
1324 flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD;
1325
1326 new = xe_exec_queue_create_bind(xe, tile, vm, flags,
1327 args->extensions);
1328 if (IS_ERR(new)) {
1329 up_read(&vm->lock);
1330 xe_vm_put(vm);
1331 err = PTR_ERR(new);
1332 if (q)
1333 goto put_exec_queue;
1334 return err;
1335 }
1336 if (id == 0)
1337 q = new;
1338 else
1339 list_add_tail(&new->multi_gt_list,
1340 &q->multi_gt_link);
1341 }
1342 up_read(&vm->lock);
1343 xe_vm_put(vm);
1344 } else {
1345 logical_mask = calc_validate_logical_mask(xe, eci,
1346 args->width,
1347 args->num_placements);
1348 if (XE_IOCTL_DBG(xe, !logical_mask))
1349 return -EINVAL;
1350
1351 hwe = xe_hw_engine_lookup(xe, eci[0]);
1352 if (XE_IOCTL_DBG(xe, !hwe))
1353 return -EINVAL;
1354
1355 /* multi-lrc is only supported on select engine classes */
1356 if (XE_IOCTL_DBG(xe, args->width > 1 &&
1357 !(xe->info.multi_lrc_mask & BIT(hwe->class))))
1358 return -EOPNOTSUPP;
1359
1360 vm = xe_vm_lookup(xef, args->vm_id);
1361 if (XE_IOCTL_DBG(xe, !vm))
1362 return -ENOENT;
1363
1364 err = down_read_interruptible(&vm->lock);
1365 if (err) {
1366 xe_vm_put(vm);
1367 return err;
1368 }
1369
1370 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
1371 up_read(&vm->lock);
1372 xe_vm_put(vm);
1373 return -ENOENT;
1374 }
1375
1376 /* SRIOV sched groups are not compatible with multi-lrc */
1377 if (XE_IOCTL_DBG(xe, args->width > 1 && has_sched_groups(hwe->gt))) {
1378 up_read(&vm->lock);
1379 xe_vm_put(vm);
1380 return -EINVAL;
1381 }
1382
1383 q = xe_exec_queue_create(xe, vm, logical_mask,
1384 args->width, hwe, flags,
1385 args->extensions);
1386 up_read(&vm->lock);
1387 xe_vm_put(vm);
1388 if (IS_ERR(q))
1389 return PTR_ERR(q);
1390
1391 if (xe_exec_queue_is_multi_queue_secondary(q)) {
1392 err = xe_exec_queue_group_add(xe, q);
1393 if (XE_IOCTL_DBG(xe, err))
1394 goto put_exec_queue;
1395 }
1396
1397 if (xe_vm_in_preempt_fence_mode(vm)) {
1398 q->lr.context = dma_fence_context_alloc(1);
1399
1400 err = xe_vm_add_compute_exec_queue(vm, q);
1401 if (XE_IOCTL_DBG(xe, err))
1402 goto delete_queue_group;
1403 }
1404
1405 if (q->vm && q->hwe->hw_engine_group) {
1406 err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q);
1407 if (err)
1408 goto kill_exec_queue;
1409 }
1410 }
1411
1412 q->xef = xe_file_get(xef);
1413 if (eci[0].engine_class != DRM_XE_ENGINE_CLASS_VM_BIND)
1414 xe_vm_add_exec_queue(vm, q);
1415
1416 /* user id alloc must always be last in ioctl to prevent UAF */
1417 err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
1418 if (err)
1419 goto del_hw_engine_group;
1420
1421 args->exec_queue_id = id;
1422
1423 return 0;
1424
1425del_hw_engine_group:
1426 if (q->vm && q->hwe && q->hwe->hw_engine_group)
1427 xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
1428kill_exec_queue:
1429 xe_exec_queue_kill(q);
1430delete_queue_group:
1431 if (xe_exec_queue_is_multi_queue_secondary(q))
1432 xe_exec_queue_group_delete(xe, q);
1433put_exec_queue:
1434 xe_exec_queue_put(q);
1435 return err;
1436}
1437
1438/**
1439 * xe_exec_queue_get_property_ioctl() - Get a property from an exec queue
1440 * @dev: DRM device
1441 * @data: IOCTL data
1442 * @file: DRM file
1443 *
1444 * Retrieves property values from an existing exec queue. Currently supports
1445 * getting the ban/reset status.
1446 *
1447 * Return: 0 on success with value filled in, negative error code on failure
1448 */
1449int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
1450 struct drm_file *file)
1451{
1452 struct xe_device *xe = to_xe_device(dev);
1453 struct xe_file *xef = to_xe_file(file);
1454 struct drm_xe_exec_queue_get_property *args = data;
1455 struct xe_exec_queue *q;
1456 int ret;
1457
1458 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1459 return -EINVAL;
1460
1461 q = xe_exec_queue_lookup(xef, args->exec_queue_id);
1462 if (XE_IOCTL_DBG(xe, !q))
1463 return -ENOENT;
1464
1465 switch (args->property) {
1466 case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
1467 args->value = q->ops->reset_status(q);
1468 ret = 0;
1469 break;
1470 default:
1471 ret = -EINVAL;
1472 }
1473
1474 xe_exec_queue_put(q);
1475
1476 return ret;
1477}
1478
1479/**
1480 * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
1481 * @q: The exec_queue
1482 *
1483 * Return: True if the exec_queue is long-running, false otherwise.
1484 */
1485bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
1486{
1487 return q->vm && xe_vm_in_lr_mode(q->vm) &&
1488 !(q->flags & EXEC_QUEUE_FLAG_VM);
1489}
1490
1491/**
1492 * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
1493 * @q: The exec_queue
1494 *
1495 * FIXME: Need to determine what to use as the short-lived
1496 * timeline lock for the exec_queues, so that the return value
1497 * of this function becomes more than just an advisory
1498 * snapshot in time. The timeline lock must protect the
1499 * seqno from racing submissions on the same exec_queue.
1500 * Typically vm->resv, but user-created timeline locks use the migrate vm
1501 * and never grabs the migrate vm->resv so we have a race there.
1502 *
1503 * Return: True if the exec_queue is idle, false otherwise.
1504 */
1505bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
1506{
1507 if (xe_exec_queue_is_parallel(q)) {
1508 int i;
1509
1510 for (i = 0; i < q->width; ++i) {
1511 if (xe_lrc_seqno(q->lrc[i]) !=
1512 q->lrc[i]->fence_ctx.next_seqno - 1)
1513 return false;
1514 }
1515
1516 return true;
1517 }
1518
1519 return xe_lrc_seqno(q->lrc[0]) ==
1520 q->lrc[0]->fence_ctx.next_seqno - 1;
1521}
1522
1523/**
1524 * xe_exec_queue_update_run_ticks() - Update run time in ticks for this exec queue
1525 * from hw
1526 * @q: The exec queue
1527 *
1528 * Update the timestamp saved by HW for this exec queue and save run ticks
1529 * calculated by using the delta from last update.
1530 */
1531void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
1532{
1533 struct xe_device *xe = gt_to_xe(q->gt);
1534 struct xe_lrc *lrc;
1535 u64 old_ts, new_ts;
1536 int idx;
1537
1538 /*
1539 * Jobs that are executed by kernel doesn't have a corresponding xe_file
1540 * and thus are not accounted.
1541 */
1542 if (!q->xef)
1543 return;
1544
1545 /* Synchronize with unbind while holding the xe file open */
1546 if (!drm_dev_enter(&xe->drm, &idx))
1547 return;
1548 /*
1549 * Only sample the first LRC. For parallel submission, all of them are
1550 * scheduled together and we compensate that below by multiplying by
1551 * width - this may introduce errors if that premise is not true and
1552 * they don't exit 100% aligned. On the other hand, looping through
1553 * the LRCs and reading them in different time could also introduce
1554 * errors.
1555 */
1556 lrc = q->lrc[0];
1557 new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
1558 q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
1559
1560 drm_dev_exit(idx);
1561}
1562
1563/**
1564 * xe_exec_queue_kill - permanently stop all execution from an exec queue
1565 * @q: The exec queue
1566 *
1567 * This function permanently stops all activity on an exec queue. If the queue
1568 * is actively executing on the HW, it will be kicked off the engine; any
1569 * pending jobs are discarded and all future submissions are rejected.
1570 * This function is safe to call multiple times.
1571 */
1572void xe_exec_queue_kill(struct xe_exec_queue *q)
1573{
1574 struct xe_exec_queue *eq = q, *next;
1575
1576 list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
1577 multi_gt_link) {
1578 q->ops->kill(eq);
1579 xe_vm_remove_compute_exec_queue(q->vm, eq);
1580 }
1581
1582 q->ops->kill(q);
1583 xe_vm_remove_compute_exec_queue(q->vm, q);
1584}
1585
1586/**
1587 * xe_exec_queue_destroy_ioctl() - Destroy an exec queue via IOCTL
1588 * @dev: DRM device
1589 * @data: IOCTL data
1590 * @file: DRM file
1591 *
1592 * Destroys an existing exec queue and releases its reference.
1593 *
1594 * Return: 0 on success, negative error code on failure
1595 */
1596int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
1597 struct drm_file *file)
1598{
1599 struct xe_device *xe = to_xe_device(dev);
1600 struct xe_file *xef = to_xe_file(file);
1601 struct drm_xe_exec_queue_destroy *args = data;
1602 struct xe_exec_queue *q;
1603
1604 if (XE_IOCTL_DBG(xe, args->pad) ||
1605 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1606 return -EINVAL;
1607
1608 mutex_lock(&xef->exec_queue.lock);
1609 q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
1610 if (q)
1611 atomic_inc(&xef->exec_queue.pending_removal);
1612 mutex_unlock(&xef->exec_queue.lock);
1613
1614 if (XE_IOCTL_DBG(xe, !q))
1615 return -ENOENT;
1616
1617 if (q->vm && q->hwe->hw_engine_group)
1618 xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
1619
1620 xe_exec_queue_kill(q);
1621
1622 trace_xe_exec_queue_close(q);
1623 xe_exec_queue_put(q);
1624
1625 return 0;
1626}
1627
1628static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
1629 struct xe_vm *vm)
1630{
1631 if (q->flags & EXEC_QUEUE_FLAG_MIGRATE) {
1632 xe_migrate_job_lock_assert(q);
1633 } else if (q->flags & EXEC_QUEUE_FLAG_VM) {
1634 lockdep_assert_held(&vm->lock);
1635 } else {
1636 xe_vm_assert_held(vm);
1637 lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem);
1638 }
1639}
1640
1641/**
1642 * xe_exec_queue_last_fence_put() - Drop ref to last fence
1643 * @q: The exec queue
1644 * @vm: The VM the engine does a bind or exec for
1645 */
1646void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
1647{
1648 xe_exec_queue_last_fence_lockdep_assert(q, vm);
1649
1650 xe_exec_queue_last_fence_put_unlocked(q);
1651}
1652
1653/**
1654 * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
1655 * @q: The exec queue
1656 *
1657 * Only safe to be called from xe_exec_queue_destroy().
1658 */
1659void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
1660{
1661 if (q->last_fence) {
1662 dma_fence_put(q->last_fence);
1663 q->last_fence = NULL;
1664 }
1665}
1666
1667/**
1668 * xe_exec_queue_last_fence_get() - Get last fence
1669 * @q: The exec queue
1670 * @vm: The VM the engine does a bind or exec for
1671 *
1672 * Get last fence, takes a ref
1673 *
1674 * Returns: last fence if not signaled, dma fence stub if signaled
1675 */
1676struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
1677 struct xe_vm *vm)
1678{
1679 struct dma_fence *fence;
1680
1681 xe_exec_queue_last_fence_lockdep_assert(q, vm);
1682
1683 if (q->last_fence &&
1684 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
1685 xe_exec_queue_last_fence_put(q, vm);
1686
1687 fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
1688 dma_fence_get(fence);
1689 return fence;
1690}
1691
1692/**
1693 * xe_exec_queue_last_fence_get_for_resume() - Get last fence
1694 * @q: The exec queue
1695 * @vm: The VM the engine does a bind or exec for
1696 *
1697 * Get last fence, takes a ref. Only safe to be called in the context of
1698 * resuming the hw engine group's long-running exec queue, when the group
1699 * semaphore is held.
1700 *
1701 * Returns: last fence if not signaled, dma fence stub if signaled
1702 */
1703struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q,
1704 struct xe_vm *vm)
1705{
1706 struct dma_fence *fence;
1707
1708 lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem);
1709
1710 if (q->last_fence &&
1711 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
1712 xe_exec_queue_last_fence_put_unlocked(q);
1713
1714 fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
1715 dma_fence_get(fence);
1716 return fence;
1717}
1718
1719/**
1720 * xe_exec_queue_last_fence_set() - Set last fence
1721 * @q: The exec queue
1722 * @vm: The VM the engine does a bind or exec for
1723 * @fence: The fence
1724 *
1725 * Set the last fence for the engine. Increases reference count for fence, when
1726 * closing engine xe_exec_queue_last_fence_put should be called.
1727 */
1728void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
1729 struct dma_fence *fence)
1730{
1731 xe_exec_queue_last_fence_lockdep_assert(q, vm);
1732 xe_assert(vm->xe, !dma_fence_is_container(fence));
1733
1734 xe_exec_queue_last_fence_put(q, vm);
1735 q->last_fence = dma_fence_get(fence);
1736}
1737
1738/**
1739 * xe_exec_queue_tlb_inval_last_fence_put() - Drop ref to last TLB invalidation fence
1740 * @q: The exec queue
1741 * @vm: The VM the engine does a bind for
1742 * @type: Either primary or media GT
1743 */
1744void xe_exec_queue_tlb_inval_last_fence_put(struct xe_exec_queue *q,
1745 struct xe_vm *vm,
1746 unsigned int type)
1747{
1748 xe_exec_queue_last_fence_lockdep_assert(q, vm);
1749 xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
1750 type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
1751
1752 xe_exec_queue_tlb_inval_last_fence_put_unlocked(q, type);
1753}
1754
1755/**
1756 * xe_exec_queue_tlb_inval_last_fence_put_unlocked() - Drop ref to last TLB
1757 * invalidation fence unlocked
1758 * @q: The exec queue
1759 * @type: Either primary or media GT
1760 *
1761 * Only safe to be called from xe_exec_queue_destroy().
1762 */
1763void xe_exec_queue_tlb_inval_last_fence_put_unlocked(struct xe_exec_queue *q,
1764 unsigned int type)
1765{
1766 xe_assert(gt_to_xe(q->gt), type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
1767 type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
1768
1769 dma_fence_put(q->tlb_inval[type].last_fence);
1770 q->tlb_inval[type].last_fence = NULL;
1771}
1772
1773/**
1774 * xe_exec_queue_tlb_inval_last_fence_get() - Get last fence for TLB invalidation
1775 * @q: The exec queue
1776 * @vm: The VM the engine does a bind for
1777 * @type: Either primary or media GT
1778 *
1779 * Get last fence, takes a ref
1780 *
1781 * Returns: last fence if not signaled, dma fence stub if signaled
1782 */
1783struct dma_fence *xe_exec_queue_tlb_inval_last_fence_get(struct xe_exec_queue *q,
1784 struct xe_vm *vm,
1785 unsigned int type)
1786{
1787 struct dma_fence *fence;
1788
1789 xe_exec_queue_last_fence_lockdep_assert(q, vm);
1790 xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
1791 type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
1792 xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM |
1793 EXEC_QUEUE_FLAG_MIGRATE));
1794
1795 if (q->tlb_inval[type].last_fence &&
1796 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1797 &q->tlb_inval[type].last_fence->flags))
1798 xe_exec_queue_tlb_inval_last_fence_put(q, vm, type);
1799
1800 fence = q->tlb_inval[type].last_fence ?: dma_fence_get_stub();
1801 dma_fence_get(fence);
1802 return fence;
1803}
1804
1805/**
1806 * xe_exec_queue_tlb_inval_last_fence_set() - Set last fence for TLB invalidation
1807 * @q: The exec queue
1808 * @vm: The VM the engine does a bind for
1809 * @fence: The fence
1810 * @type: Either primary or media GT
1811 *
1812 * Set the last fence for the tlb invalidation type on the queue. Increases
1813 * reference count for fence, when closing queue
1814 * xe_exec_queue_tlb_inval_last_fence_put should be called.
1815 */
1816void xe_exec_queue_tlb_inval_last_fence_set(struct xe_exec_queue *q,
1817 struct xe_vm *vm,
1818 struct dma_fence *fence,
1819 unsigned int type)
1820{
1821 xe_exec_queue_last_fence_lockdep_assert(q, vm);
1822 xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
1823 type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
1824 xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM |
1825 EXEC_QUEUE_FLAG_MIGRATE));
1826 xe_assert(vm->xe, !dma_fence_is_container(fence));
1827
1828 xe_exec_queue_tlb_inval_last_fence_put(q, vm, type);
1829 q->tlb_inval[type].last_fence = dma_fence_get(fence);
1830}
1831
1832/**
1833 * xe_exec_queue_contexts_hwsp_rebase - Re-compute GGTT references
1834 * within all LRCs of a queue.
1835 * @q: the &xe_exec_queue struct instance containing target LRCs
1836 * @scratch: scratch buffer to be used as temporary storage
1837 *
1838 * Returns: zero on success, negative error code on failure
1839 */
1840int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch)
1841{
1842 int i;
1843 int err = 0;
1844
1845 for (i = 0; i < q->width; ++i) {
1846 struct xe_lrc *lrc;
1847
1848 lrc = xe_exec_queue_get_lrc(q, i);
1849 if (!lrc)
1850 continue;
1851
1852 xe_lrc_update_memirq_regs_with_address(lrc, q->hwe, scratch);
1853 xe_lrc_update_hwctx_regs_with_address(lrc);
1854 err = xe_lrc_setup_wa_bb_with_scratch(lrc, q->hwe, scratch);
1855 xe_lrc_put(lrc);
1856 if (err)
1857 break;
1858 }
1859
1860 return err;
1861}