Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: MIT */
2/* Copyright (C) 2023 Collabora ltd. */
3#ifndef _PANTHOR_DRM_H_
4#define _PANTHOR_DRM_H_
5
6#include "drm.h"
7
8#if defined(__cplusplus)
9extern "C" {
10#endif
11
12/**
13 * DOC: Introduction
14 *
15 * This documentation describes the Panthor IOCTLs.
16 *
17 * Just a few generic rules about the data passed to the Panthor IOCTLs:
18 *
19 * - Structures must be aligned on 64-bit/8-byte. If the object is not
20 * naturally aligned, a padding field must be added.
21 * - Fields must be explicitly aligned to their natural type alignment with
22 * pad[0..N] fields.
23 * - All padding fields will be checked by the driver to make sure they are
24 * zeroed.
25 * - Flags can be added, but not removed/replaced.
26 * - New fields can be added to the main structures (the structures
27 * directly passed to the ioctl). Those fields can be added at the end of
28 * the structure, or replace existing padding fields. Any new field being
29 * added must preserve the behavior that existed before those fields were
30 * added when a value of zero is passed.
31 * - New fields can be added to indirect objects (objects pointed by the
32 * main structure), iff those objects are passed a size to reflect the
33 * size known by the userspace driver (see drm_panthor_obj_array::stride
34 * or drm_panthor_dev_query::size).
35 * - If the kernel driver is too old to know some fields, those will be
36 * ignored if zero, and otherwise rejected (and so will be zero on output).
37 * - If userspace is too old to know some fields, those will be zeroed
38 * (input) before the structure is parsed by the kernel driver.
39 * - Each new flag/field addition must come with a driver version update so
40 * the userspace driver doesn't have to trial and error to know which
41 * flags are supported.
42 * - Structures should not contain unions, as this would defeat the
43 * extensibility of such structures.
44 * - IOCTLs can't be removed or replaced. New IOCTL IDs should be placed
45 * at the end of the drm_panthor_ioctl_id enum.
46 */
47
48/**
49 * DOC: MMIO regions exposed to userspace.
50 *
51 * .. c:macro:: DRM_PANTHOR_USER_MMIO_OFFSET
52 *
53 * File offset for all MMIO regions being exposed to userspace. Don't use
54 * this value directly, use DRM_PANTHOR_USER_<name>_OFFSET values instead.
55 * pgoffset passed to mmap2() is an unsigned long, which forces us to use a
56 * different offset on 32-bit and 64-bit systems.
57 *
58 * .. c:macro:: DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET
59 *
60 * File offset for the LATEST_FLUSH_ID register. The Userspace driver controls
61 * GPU cache flushing through CS instructions, but the flush reduction
62 * mechanism requires a flush_id. This flush_id could be queried with an
63 * ioctl, but Arm provides a well-isolated register page containing only this
64 * read-only register, so let's expose this page through a static mmap offset
65 * and allow direct mapping of this MMIO region so we can avoid the
66 * user <-> kernel round-trip.
67 */
68#define DRM_PANTHOR_USER_MMIO_OFFSET_32BIT (1ull << 43)
69#define DRM_PANTHOR_USER_MMIO_OFFSET_64BIT (1ull << 56)
70#define DRM_PANTHOR_USER_MMIO_OFFSET (sizeof(unsigned long) < 8 ? \
71 DRM_PANTHOR_USER_MMIO_OFFSET_32BIT : \
72 DRM_PANTHOR_USER_MMIO_OFFSET_64BIT)
73#define DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET (DRM_PANTHOR_USER_MMIO_OFFSET | 0)
74
75/**
76 * DOC: IOCTL IDs
77 *
78 * enum drm_panthor_ioctl_id - IOCTL IDs
79 *
80 * Place new ioctls at the end, don't re-order, don't replace or remove entries.
81 *
82 * These IDs are not meant to be used directly. Use the DRM_IOCTL_PANTHOR_xxx
83 * definitions instead.
84 */
85enum drm_panthor_ioctl_id {
86 /** @DRM_PANTHOR_DEV_QUERY: Query device information. */
87 DRM_PANTHOR_DEV_QUERY = 0,
88
89 /** @DRM_PANTHOR_VM_CREATE: Create a VM. */
90 DRM_PANTHOR_VM_CREATE,
91
92 /** @DRM_PANTHOR_VM_DESTROY: Destroy a VM. */
93 DRM_PANTHOR_VM_DESTROY,
94
95 /** @DRM_PANTHOR_VM_BIND: Bind/unbind memory to a VM. */
96 DRM_PANTHOR_VM_BIND,
97
98 /** @DRM_PANTHOR_VM_GET_STATE: Get VM state. */
99 DRM_PANTHOR_VM_GET_STATE,
100
101 /** @DRM_PANTHOR_BO_CREATE: Create a buffer object. */
102 DRM_PANTHOR_BO_CREATE,
103
104 /**
105 * @DRM_PANTHOR_BO_MMAP_OFFSET: Get the file offset to pass to
106 * mmap to map a GEM object.
107 */
108 DRM_PANTHOR_BO_MMAP_OFFSET,
109
110 /** @DRM_PANTHOR_GROUP_CREATE: Create a scheduling group. */
111 DRM_PANTHOR_GROUP_CREATE,
112
113 /** @DRM_PANTHOR_GROUP_DESTROY: Destroy a scheduling group. */
114 DRM_PANTHOR_GROUP_DESTROY,
115
116 /**
117 * @DRM_PANTHOR_GROUP_SUBMIT: Submit jobs to queues belonging
118 * to a specific scheduling group.
119 */
120 DRM_PANTHOR_GROUP_SUBMIT,
121
122 /** @DRM_PANTHOR_GROUP_GET_STATE: Get the state of a scheduling group. */
123 DRM_PANTHOR_GROUP_GET_STATE,
124
125 /** @DRM_PANTHOR_TILER_HEAP_CREATE: Create a tiler heap. */
126 DRM_PANTHOR_TILER_HEAP_CREATE,
127
128 /** @DRM_PANTHOR_TILER_HEAP_DESTROY: Destroy a tiler heap. */
129 DRM_PANTHOR_TILER_HEAP_DESTROY,
130
131 /** @DRM_PANTHOR_BO_SET_LABEL: Label a BO. */
132 DRM_PANTHOR_BO_SET_LABEL,
133
134 /**
135 * @DRM_PANTHOR_SET_USER_MMIO_OFFSET: Set the offset to use as the user MMIO offset.
136 *
137 * The default behavior is to pick the MMIO offset based on the size of the pgoff_t
138 * type seen by the process that manipulates the FD, such that a 32-bit process can
139 * always map the user MMIO ranges. But this approach doesn't work well for emulators
140 * like FEX, where the emulator is an 64-bit binary which might be executing 32-bit
141 * code. In that case, the kernel thinks it's the 64-bit process and assumes
142 * DRM_PANTHOR_USER_MMIO_OFFSET_64BIT is in use, but the UMD library expects
143 * DRM_PANTHOR_USER_MMIO_OFFSET_32BIT, because it can't mmap() anything above the
144 * pgoff_t size.
145 */
146 DRM_PANTHOR_SET_USER_MMIO_OFFSET,
147
148 /** @DRM_PANTHOR_BO_SYNC: Sync BO data to/from the device */
149 DRM_PANTHOR_BO_SYNC,
150
151 /**
152 * @DRM_PANTHOR_BO_QUERY_INFO: Query information about a BO.
153 *
154 * This is useful for imported BOs.
155 */
156 DRM_PANTHOR_BO_QUERY_INFO,
157};
158
159/**
160 * DOC: IOCTL arguments
161 */
162
163/**
164 * struct drm_panthor_obj_array - Object array.
165 *
166 * This object is used to pass an array of objects whose size is subject to changes in
167 * future versions of the driver. In order to support this mutability, we pass a stride
168 * describing the size of the object as known by userspace.
169 *
170 * You shouldn't fill drm_panthor_obj_array fields directly. You should instead use
171 * the DRM_PANTHOR_OBJ_ARRAY() macro that takes care of initializing the stride to
172 * the object size.
173 */
174struct drm_panthor_obj_array {
175 /** @stride: Stride of object struct. Used for versioning. */
176 __u32 stride;
177
178 /** @count: Number of objects in the array. */
179 __u32 count;
180
181 /** @array: User pointer to an array of objects. */
182 __u64 array;
183};
184
185/**
186 * DRM_PANTHOR_OBJ_ARRAY() - Initialize a drm_panthor_obj_array field.
187 * @cnt: Number of elements in the array.
188 * @ptr: Pointer to the array to pass to the kernel.
189 *
190 * Macro initializing a drm_panthor_obj_array based on the object size as known
191 * by userspace.
192 */
193#define DRM_PANTHOR_OBJ_ARRAY(cnt, ptr) \
194 { .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) }
195
196/**
197 * enum drm_panthor_sync_op_flags - Synchronization operation flags.
198 */
199enum drm_panthor_sync_op_flags {
200 /** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK: Synchronization handle type mask. */
201 DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK = 0xff,
202
203 /** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ: Synchronization object type. */
204 DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ = 0,
205
206 /**
207 * @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ: Timeline synchronization
208 * object type.
209 */
210 DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ = 1,
211
212 /** @DRM_PANTHOR_SYNC_OP_WAIT: Wait operation. */
213 DRM_PANTHOR_SYNC_OP_WAIT = 0 << 31,
214
215 /** @DRM_PANTHOR_SYNC_OP_SIGNAL: Signal operation. */
216 DRM_PANTHOR_SYNC_OP_SIGNAL = (int)(1u << 31),
217};
218
219/**
220 * struct drm_panthor_sync_op - Synchronization operation.
221 */
222struct drm_panthor_sync_op {
223 /** @flags: Synchronization operation flags. Combination of DRM_PANTHOR_SYNC_OP values. */
224 __u32 flags;
225
226 /** @handle: Sync handle. */
227 __u32 handle;
228
229 /**
230 * @timeline_value: MBZ if
231 * (flags & DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK) !=
232 * DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ.
233 */
234 __u64 timeline_value;
235};
236
237/**
238 * enum drm_panthor_dev_query_type - Query type
239 *
240 * Place new types at the end, don't re-order, don't remove or replace.
241 */
242enum drm_panthor_dev_query_type {
243 /** @DRM_PANTHOR_DEV_QUERY_GPU_INFO: Query GPU information. */
244 DRM_PANTHOR_DEV_QUERY_GPU_INFO = 0,
245
246 /** @DRM_PANTHOR_DEV_QUERY_CSIF_INFO: Query command-stream interface information. */
247 DRM_PANTHOR_DEV_QUERY_CSIF_INFO,
248
249 /** @DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO: Query timestamp information. */
250 DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO,
251
252 /**
253 * @DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO: Query allowed group priorities information.
254 */
255 DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO,
256};
257
258/**
259 * enum drm_panthor_gpu_coherency: Type of GPU coherency
260 */
261enum drm_panthor_gpu_coherency {
262 /**
263 * @DRM_PANTHOR_GPU_COHERENCY_ACE_LITE: ACE Lite coherency.
264 */
265 DRM_PANTHOR_GPU_COHERENCY_ACE_LITE = 0,
266
267 /**
268 * @DRM_PANTHOR_GPU_COHERENCY_ACE: ACE coherency.
269 */
270 DRM_PANTHOR_GPU_COHERENCY_ACE = 1,
271
272 /**
273 * @DRM_PANTHOR_GPU_COHERENCY_NONE: No coherency.
274 */
275 DRM_PANTHOR_GPU_COHERENCY_NONE = 31,
276};
277
278/**
279 * struct drm_panthor_gpu_info - GPU information
280 *
281 * Structure grouping all queryable information relating to the GPU.
282 */
283struct drm_panthor_gpu_info {
284 /** @gpu_id : GPU ID. */
285 __u32 gpu_id;
286#define DRM_PANTHOR_ARCH_MAJOR(x) ((x) >> 28)
287#define DRM_PANTHOR_ARCH_MINOR(x) (((x) >> 24) & 0xf)
288#define DRM_PANTHOR_ARCH_REV(x) (((x) >> 20) & 0xf)
289#define DRM_PANTHOR_PRODUCT_MAJOR(x) (((x) >> 16) & 0xf)
290#define DRM_PANTHOR_VERSION_MAJOR(x) (((x) >> 12) & 0xf)
291#define DRM_PANTHOR_VERSION_MINOR(x) (((x) >> 4) & 0xff)
292#define DRM_PANTHOR_VERSION_STATUS(x) ((x) & 0xf)
293
294 /** @gpu_rev: GPU revision. */
295 __u32 gpu_rev;
296
297 /** @csf_id: Command stream frontend ID. */
298 __u32 csf_id;
299#define DRM_PANTHOR_CSHW_MAJOR(x) (((x) >> 26) & 0x3f)
300#define DRM_PANTHOR_CSHW_MINOR(x) (((x) >> 20) & 0x3f)
301#define DRM_PANTHOR_CSHW_REV(x) (((x) >> 16) & 0xf)
302#define DRM_PANTHOR_MCU_MAJOR(x) (((x) >> 10) & 0x3f)
303#define DRM_PANTHOR_MCU_MINOR(x) (((x) >> 4) & 0x3f)
304#define DRM_PANTHOR_MCU_REV(x) ((x) & 0xf)
305
306 /** @l2_features: L2-cache features. */
307 __u32 l2_features;
308
309 /** @tiler_features: Tiler features. */
310 __u32 tiler_features;
311
312 /** @mem_features: Memory features. */
313 __u32 mem_features;
314
315 /** @mmu_features: MMU features. */
316 __u32 mmu_features;
317#define DRM_PANTHOR_MMU_VA_BITS(x) ((x) & 0xff)
318
319 /** @thread_features: Thread features. */
320 __u32 thread_features;
321
322 /** @max_threads: Maximum number of threads. */
323 __u32 max_threads;
324
325 /** @thread_max_workgroup_size: Maximum workgroup size. */
326 __u32 thread_max_workgroup_size;
327
328 /**
329 * @thread_max_barrier_size: Maximum number of threads that can wait
330 * simultaneously on a barrier.
331 */
332 __u32 thread_max_barrier_size;
333
334 /**
335 * @coherency_features: Coherency features.
336 *
337 * Combination of drm_panthor_gpu_coherency flags.
338 *
339 * Note that this is just what the coherency protocols supported by the
340 * GPU, but the actual coherency in place depends on the SoC
341 * integration and is reflected by
342 * drm_panthor_gpu_info::selected_coherency.
343 */
344 __u32 coherency_features;
345
346 /** @texture_features: Texture features. */
347 __u32 texture_features[4];
348
349 /** @as_present: Bitmask encoding the number of address-space exposed by the MMU. */
350 __u32 as_present;
351
352 /**
353 * @selected_coherency: Coherency selected for this device.
354 *
355 * One of drm_panthor_gpu_coherency.
356 */
357 __u32 selected_coherency;
358
359 /** @shader_present: Bitmask encoding the shader cores exposed by the GPU. */
360 __u64 shader_present;
361
362 /** @l2_present: Bitmask encoding the L2 caches exposed by the GPU. */
363 __u64 l2_present;
364
365 /** @tiler_present: Bitmask encoding the tiler units exposed by the GPU. */
366 __u64 tiler_present;
367
368 /** @core_features: Used to discriminate core variants when they exist. */
369 __u32 core_features;
370
371 /** @pad: MBZ. */
372 __u32 pad;
373
374 /** @gpu_features: Bitmask describing supported GPU-wide features */
375 __u64 gpu_features;
376};
377
378/**
379 * struct drm_panthor_csif_info - Command stream interface information
380 *
381 * Structure grouping all queryable information relating to the command stream interface.
382 */
383struct drm_panthor_csif_info {
384 /** @csg_slot_count: Number of command stream group slots exposed by the firmware. */
385 __u32 csg_slot_count;
386
387 /** @cs_slot_count: Number of command stream slots per group. */
388 __u32 cs_slot_count;
389
390 /** @cs_reg_count: Number of command stream registers. */
391 __u32 cs_reg_count;
392
393 /** @scoreboard_slot_count: Number of scoreboard slots. */
394 __u32 scoreboard_slot_count;
395
396 /**
397 * @unpreserved_cs_reg_count: Number of command stream registers reserved by
398 * the kernel driver to call a userspace command stream.
399 *
400 * All registers can be used by a userspace command stream, but the
401 * [cs_slot_count - unpreserved_cs_reg_count .. cs_slot_count] registers are
402 * used by the kernel when DRM_PANTHOR_IOCTL_GROUP_SUBMIT is called.
403 */
404 __u32 unpreserved_cs_reg_count;
405
406 /**
407 * @pad: Padding field, set to zero.
408 */
409 __u32 pad;
410};
411
412/**
413 * enum drm_panthor_timestamp_info_flags - drm_panthor_timestamp_info.flags
414 */
415enum drm_panthor_timestamp_info_flags {
416 /** @DRM_PANTHOR_TIMESTAMP_GPU: Query GPU time. */
417 DRM_PANTHOR_TIMESTAMP_GPU = 1 << 0,
418
419 /** @DRM_PANTHOR_TIMESTAMP_CPU_NONE: Don't query CPU time. */
420 DRM_PANTHOR_TIMESTAMP_CPU_NONE = 0 << 1,
421
422 /** @DRM_PANTHOR_TIMESTAMP_CPU_MONOTONIC: Query CPU time using CLOCK_MONOTONIC. */
423 DRM_PANTHOR_TIMESTAMP_CPU_MONOTONIC = 1 << 1,
424
425 /** @DRM_PANTHOR_TIMESTAMP_CPU_MONOTONIC_RAW: Query CPU time using CLOCK_MONOTONIC_RAW. */
426 DRM_PANTHOR_TIMESTAMP_CPU_MONOTONIC_RAW = 2 << 1,
427
428 /** @DRM_PANTHOR_TIMESTAMP_CPU_TYPE_MASK: Space reserved for CPU clock type. */
429 DRM_PANTHOR_TIMESTAMP_CPU_TYPE_MASK = 7 << 1,
430
431 /** @DRM_PANTHOR_TIMESTAMP_GPU_OFFSET: Query GPU offset. */
432 DRM_PANTHOR_TIMESTAMP_GPU_OFFSET = 1 << 4,
433
434 /** @DRM_PANTHOR_TIMESTAMP_GPU_CYCLE_COUNT: Query GPU cycle count. */
435 DRM_PANTHOR_TIMESTAMP_GPU_CYCLE_COUNT = 1 << 5,
436
437 /** @DRM_PANTHOR_TIMESTAMP_FREQ: Query timestamp frequency. */
438 DRM_PANTHOR_TIMESTAMP_FREQ = 1 << 6,
439
440 /** @DRM_PANTHOR_TIMESTAMP_DURATION: Return duration of time query. */
441 DRM_PANTHOR_TIMESTAMP_DURATION = 1 << 7,
442};
443
444/**
445 * struct drm_panthor_timestamp_info - Timestamp information
446 *
447 * Structure grouping all queryable information relating to the GPU timestamp.
448 */
449struct drm_panthor_timestamp_info {
450 /**
451 * @timestamp_frequency: The frequency of the timestamp timer or 0 if
452 * unknown.
453 */
454 __u64 timestamp_frequency;
455
456 /** @current_timestamp: The current GPU timestamp. */
457 __u64 current_timestamp;
458
459 /** @timestamp_offset: The offset of the GPU timestamp timer. */
460 __u64 timestamp_offset;
461
462 /**
463 * @flags: Bitmask of drm_panthor_timestamp_info_flags.
464 *
465 * If set to 0, then it is interpreted as:
466 * DRM_PANTHOR_TIMESTAMP_GPU |
467 * DRM_PANTHOR_TIMESTAMP_GPU_OFFSET |
468 * DRM_PANTHOR_TIMESTAMP_FREQ
469 *
470 * Note: these flags are exclusive to each other (only one can be used):
471 * - DRM_PANTHOR_TIMESTAMP_CPU_NONE
472 * - DRM_PANTHOR_TIMESTAMP_CPU_MONOTONIC
473 * - DRM_PANTHOR_TIMESTAMP_CPU_MONOTONIC_RAW
474 */
475 __u32 flags;
476
477 /** @duration_nsec: Duration of time query. */
478 __u32 duration_nsec;
479
480 /** @cycle_count: Value of GPU_CYCLE_COUNT. */
481 __u64 cycle_count;
482
483 /** @cpu_timestamp_sec: Seconds part of CPU timestamp. */
484 __u64 cpu_timestamp_sec;
485
486 /** @cpu_timestamp_nsec: Nanseconds part of CPU timestamp. */
487 __u64 cpu_timestamp_nsec;
488};
489
490/**
491 * struct drm_panthor_group_priorities_info - Group priorities information
492 *
493 * Structure grouping all queryable information relating to the allowed group priorities.
494 */
495struct drm_panthor_group_priorities_info {
496 /**
497 * @allowed_mask: Bitmask of the allowed group priorities.
498 *
499 * Each bit represents a variant of the enum drm_panthor_group_priority.
500 */
501 __u8 allowed_mask;
502
503 /** @pad: Padding fields, MBZ. */
504 __u8 pad[3];
505};
506
507/**
508 * struct drm_panthor_dev_query - Arguments passed to DRM_PANTHOR_IOCTL_DEV_QUERY
509 */
510struct drm_panthor_dev_query {
511 /** @type: the query type (see drm_panthor_dev_query_type). */
512 __u32 type;
513
514 /**
515 * @size: size of the type being queried.
516 *
517 * If pointer is NULL, size is updated by the driver to provide the
518 * output structure size. If pointer is not NULL, the driver will
519 * only copy min(size, actual_structure_size) bytes to the pointer,
520 * and update the size accordingly. This allows us to extend query
521 * types without breaking userspace.
522 */
523 __u32 size;
524
525 /**
526 * @pointer: user pointer to a query type struct.
527 *
528 * Pointer can be NULL, in which case, nothing is copied, but the
529 * actual structure size is returned. If not NULL, it must point to
530 * a location that's large enough to hold size bytes.
531 */
532 __u64 pointer;
533};
534
535/**
536 * struct drm_panthor_vm_create - Arguments passed to DRM_PANTHOR_IOCTL_VM_CREATE
537 */
538struct drm_panthor_vm_create {
539 /** @flags: VM flags, MBZ. */
540 __u32 flags;
541
542 /** @id: Returned VM ID. */
543 __u32 id;
544
545 /**
546 * @user_va_range: Size of the VA space reserved for user objects.
547 *
548 * The kernel will pick the remaining space to map kernel-only objects to the
549 * VM (heap chunks, heap context, ring buffers, kernel synchronization objects,
550 * ...). If the space left for kernel objects is too small, kernel object
551 * allocation will fail further down the road. One can use
552 * drm_panthor_gpu_info::mmu_features to extract the total virtual address
553 * range, and chose a user_va_range that leaves some space to the kernel.
554 *
555 * If user_va_range is zero, the kernel will pick a sensible value based on
556 * TASK_SIZE and the virtual range supported by the GPU MMU (the kernel/user
557 * split should leave enough VA space for userspace processes to support SVM,
558 * while still allowing the kernel to map some amount of kernel objects in
559 * the kernel VA range). The value chosen by the driver will be returned in
560 * @user_va_range.
561 *
562 * User VA space always starts at 0x0, kernel VA space is always placed after
563 * the user VA range.
564 */
565 __u64 user_va_range;
566};
567
568/**
569 * struct drm_panthor_vm_destroy - Arguments passed to DRM_PANTHOR_IOCTL_VM_DESTROY
570 */
571struct drm_panthor_vm_destroy {
572 /** @id: ID of the VM to destroy. */
573 __u32 id;
574
575 /** @pad: MBZ. */
576 __u32 pad;
577};
578
579/**
580 * enum drm_panthor_vm_bind_op_flags - VM bind operation flags
581 */
582enum drm_panthor_vm_bind_op_flags {
583 /**
584 * @DRM_PANTHOR_VM_BIND_OP_MAP_READONLY: Map the memory read-only.
585 *
586 * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
587 */
588 DRM_PANTHOR_VM_BIND_OP_MAP_READONLY = 1 << 0,
589
590 /**
591 * @DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC: Map the memory not-executable.
592 *
593 * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
594 */
595 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC = 1 << 1,
596
597 /**
598 * @DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED: Map the memory uncached.
599 *
600 * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
601 */
602 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED = 1 << 2,
603
604 /**
605 * @DRM_PANTHOR_VM_BIND_OP_TYPE_MASK: Mask used to determine the type of operation.
606 */
607 DRM_PANTHOR_VM_BIND_OP_TYPE_MASK = (int)(0xfu << 28),
608
609 /** @DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: Map operation. */
610 DRM_PANTHOR_VM_BIND_OP_TYPE_MAP = 0 << 28,
611
612 /** @DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: Unmap operation. */
613 DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP = 1 << 28,
614
615 /**
616 * @DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY: No VM operation.
617 *
618 * Just serves as a synchronization point on a VM queue.
619 *
620 * Only valid if %DRM_PANTHOR_VM_BIND_ASYNC is set in drm_panthor_vm_bind::flags,
621 * and drm_panthor_vm_bind_op::syncs contains at least one element.
622 */
623 DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY = 2 << 28,
624};
625
626/**
627 * struct drm_panthor_vm_bind_op - VM bind operation
628 */
629struct drm_panthor_vm_bind_op {
630 /** @flags: Combination of drm_panthor_vm_bind_op_flags flags. */
631 __u32 flags;
632
633 /**
634 * @bo_handle: Handle of the buffer object to map.
635 * MBZ for unmap or sync-only operations.
636 */
637 __u32 bo_handle;
638
639 /**
640 * @bo_offset: Buffer object offset.
641 * MBZ for unmap or sync-only operations.
642 */
643 __u64 bo_offset;
644
645 /**
646 * @va: Virtual address to map/unmap.
647 * MBZ for sync-only operations.
648 */
649 __u64 va;
650
651 /**
652 * @size: Size to map/unmap.
653 * MBZ for sync-only operations.
654 */
655 __u64 size;
656
657 /**
658 * @syncs: Array of struct drm_panthor_sync_op synchronization
659 * operations.
660 *
661 * This array must be empty if %DRM_PANTHOR_VM_BIND_ASYNC is not set on
662 * the drm_panthor_vm_bind object containing this VM bind operation.
663 *
664 * This array shall not be empty for sync-only operations.
665 */
666 struct drm_panthor_obj_array syncs;
667
668};
669
670/**
671 * enum drm_panthor_vm_bind_flags - VM bind flags
672 */
673enum drm_panthor_vm_bind_flags {
674 /**
675 * @DRM_PANTHOR_VM_BIND_ASYNC: VM bind operations are queued to the VM
676 * queue instead of being executed synchronously.
677 */
678 DRM_PANTHOR_VM_BIND_ASYNC = 1 << 0,
679};
680
681/**
682 * struct drm_panthor_vm_bind - Arguments passed to DRM_IOCTL_PANTHOR_VM_BIND
683 */
684struct drm_panthor_vm_bind {
685 /** @vm_id: VM targeted by the bind request. */
686 __u32 vm_id;
687
688 /** @flags: Combination of drm_panthor_vm_bind_flags flags. */
689 __u32 flags;
690
691 /** @ops: Array of struct drm_panthor_vm_bind_op bind operations. */
692 struct drm_panthor_obj_array ops;
693};
694
695/**
696 * enum drm_panthor_vm_state - VM states.
697 */
698enum drm_panthor_vm_state {
699 /**
700 * @DRM_PANTHOR_VM_STATE_USABLE: VM is usable.
701 *
702 * New VM operations will be accepted on this VM.
703 */
704 DRM_PANTHOR_VM_STATE_USABLE,
705
706 /**
707 * @DRM_PANTHOR_VM_STATE_UNUSABLE: VM is unusable.
708 *
709 * Something put the VM in an unusable state (like an asynchronous
710 * VM_BIND request failing for any reason).
711 *
712 * Once the VM is in this state, all new MAP operations will be
713 * rejected, and any GPU job targeting this VM will fail.
714 * UNMAP operations are still accepted.
715 *
716 * The only way to recover from an unusable VM is to create a new
717 * VM, and destroy the old one.
718 */
719 DRM_PANTHOR_VM_STATE_UNUSABLE,
720};
721
722/**
723 * struct drm_panthor_vm_get_state - Get VM state.
724 */
725struct drm_panthor_vm_get_state {
726 /** @vm_id: VM targeted by the get_state request. */
727 __u32 vm_id;
728
729 /**
730 * @state: state returned by the driver.
731 *
732 * Must be one of the enum drm_panthor_vm_state values.
733 */
734 __u32 state;
735};
736
737/**
738 * enum drm_panthor_bo_flags - Buffer object flags, passed at creation time.
739 */
740enum drm_panthor_bo_flags {
741 /** @DRM_PANTHOR_BO_NO_MMAP: The buffer object will never be CPU-mapped in userspace. */
742 DRM_PANTHOR_BO_NO_MMAP = (1 << 0),
743
744 /**
745 * @DRM_PANTHOR_BO_WB_MMAP: Force "Write-Back Cacheable" CPU mapping.
746 *
747 * CPU map the buffer object in userspace by forcing the "Write-Back
748 * Cacheable" cacheability attribute. The mapping otherwise uses the
749 * "Non-Cacheable" attribute if the GPU is not IO coherent.
750 */
751 DRM_PANTHOR_BO_WB_MMAP = (1 << 1),
752};
753
754/**
755 * struct drm_panthor_bo_create - Arguments passed to DRM_IOCTL_PANTHOR_BO_CREATE.
756 */
757struct drm_panthor_bo_create {
758 /**
759 * @size: Requested size for the object
760 *
761 * The (page-aligned) allocated size for the object will be returned.
762 */
763 __u64 size;
764
765 /**
766 * @flags: Flags. Must be a combination of drm_panthor_bo_flags flags.
767 */
768 __u32 flags;
769
770 /**
771 * @exclusive_vm_id: Exclusive VM this buffer object will be mapped to.
772 *
773 * If not zero, the field must refer to a valid VM ID, and implies that:
774 * - the buffer object will only ever be bound to that VM
775 * - cannot be exported as a PRIME fd
776 */
777 __u32 exclusive_vm_id;
778
779 /**
780 * @handle: Returned handle for the object.
781 *
782 * Object handles are nonzero.
783 */
784 __u32 handle;
785
786 /** @pad: MBZ. */
787 __u32 pad;
788};
789
790/**
791 * struct drm_panthor_bo_mmap_offset - Arguments passed to DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET.
792 */
793struct drm_panthor_bo_mmap_offset {
794 /** @handle: Handle of the object we want an mmap offset for. */
795 __u32 handle;
796
797 /** @pad: MBZ. */
798 __u32 pad;
799
800 /** @offset: The fake offset to use for subsequent mmap calls. */
801 __u64 offset;
802};
803
804/**
805 * struct drm_panthor_queue_create - Queue creation arguments.
806 */
807struct drm_panthor_queue_create {
808 /**
809 * @priority: Defines the priority of queues inside a group. Goes from 0 to 15,
810 * 15 being the highest priority.
811 */
812 __u8 priority;
813
814 /** @pad: Padding fields, MBZ. */
815 __u8 pad[3];
816
817 /** @ringbuf_size: Size of the ring buffer to allocate to this queue. */
818 __u32 ringbuf_size;
819};
820
821/**
822 * enum drm_panthor_group_priority - Scheduling group priority
823 */
824enum drm_panthor_group_priority {
825 /** @PANTHOR_GROUP_PRIORITY_LOW: Low priority group. */
826 PANTHOR_GROUP_PRIORITY_LOW = 0,
827
828 /** @PANTHOR_GROUP_PRIORITY_MEDIUM: Medium priority group. */
829 PANTHOR_GROUP_PRIORITY_MEDIUM,
830
831 /**
832 * @PANTHOR_GROUP_PRIORITY_HIGH: High priority group.
833 *
834 * Requires CAP_SYS_NICE or DRM_MASTER.
835 */
836 PANTHOR_GROUP_PRIORITY_HIGH,
837
838 /**
839 * @PANTHOR_GROUP_PRIORITY_REALTIME: Realtime priority group.
840 *
841 * Requires CAP_SYS_NICE or DRM_MASTER.
842 */
843 PANTHOR_GROUP_PRIORITY_REALTIME,
844};
845
846/**
847 * struct drm_panthor_group_create - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_CREATE
848 */
849struct drm_panthor_group_create {
850 /** @queues: Array of drm_panthor_queue_create elements. */
851 struct drm_panthor_obj_array queues;
852
853 /**
854 * @max_compute_cores: Maximum number of cores that can be used by compute
855 * jobs across CS queues bound to this group.
856 *
857 * Must be less or equal to the number of bits set in @compute_core_mask.
858 */
859 __u8 max_compute_cores;
860
861 /**
862 * @max_fragment_cores: Maximum number of cores that can be used by fragment
863 * jobs across CS queues bound to this group.
864 *
865 * Must be less or equal to the number of bits set in @fragment_core_mask.
866 */
867 __u8 max_fragment_cores;
868
869 /**
870 * @max_tiler_cores: Maximum number of tilers that can be used by tiler jobs
871 * across CS queues bound to this group.
872 *
873 * Must be less or equal to the number of bits set in @tiler_core_mask.
874 */
875 __u8 max_tiler_cores;
876
877 /** @priority: Group priority (see enum drm_panthor_group_priority). */
878 __u8 priority;
879
880 /** @pad: Padding field, MBZ. */
881 __u32 pad;
882
883 /**
884 * @compute_core_mask: Mask encoding cores that can be used for compute jobs.
885 *
886 * This field must have at least @max_compute_cores bits set.
887 *
888 * The bits set here should also be set in drm_panthor_gpu_info::shader_present.
889 */
890 __u64 compute_core_mask;
891
892 /**
893 * @fragment_core_mask: Mask encoding cores that can be used for fragment jobs.
894 *
895 * This field must have at least @max_fragment_cores bits set.
896 *
897 * The bits set here should also be set in drm_panthor_gpu_info::shader_present.
898 */
899 __u64 fragment_core_mask;
900
901 /**
902 * @tiler_core_mask: Mask encoding cores that can be used for tiler jobs.
903 *
904 * This field must have at least @max_tiler_cores bits set.
905 *
906 * The bits set here should also be set in drm_panthor_gpu_info::tiler_present.
907 */
908 __u64 tiler_core_mask;
909
910 /**
911 * @vm_id: VM ID to bind this group to.
912 *
913 * All submission to queues bound to this group will use this VM.
914 */
915 __u32 vm_id;
916
917 /**
918 * @group_handle: Returned group handle. Passed back when submitting jobs or
919 * destroying a group.
920 */
921 __u32 group_handle;
922};
923
924/**
925 * struct drm_panthor_group_destroy - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_DESTROY
926 */
927struct drm_panthor_group_destroy {
928 /** @group_handle: Group to destroy */
929 __u32 group_handle;
930
931 /** @pad: Padding field, MBZ. */
932 __u32 pad;
933};
934
935/**
936 * struct drm_panthor_queue_submit - Job submission arguments.
937 *
938 * This is describing the userspace command stream to call from the kernel
939 * command stream ring-buffer. Queue submission is always part of a group
940 * submission, taking one or more jobs to submit to the underlying queues.
941 */
942struct drm_panthor_queue_submit {
943 /** @queue_index: Index of the queue inside a group. */
944 __u32 queue_index;
945
946 /**
947 * @stream_size: Size of the command stream to execute.
948 *
949 * Must be 64-bit/8-byte aligned (the size of a CS instruction)
950 *
951 * Can be zero if stream_addr is zero too.
952 *
953 * When the stream size is zero, the queue submit serves as a
954 * synchronization point.
955 */
956 __u32 stream_size;
957
958 /**
959 * @stream_addr: GPU address of the command stream to execute.
960 *
961 * Must be aligned on 64-byte.
962 *
963 * Can be zero is stream_size is zero too.
964 */
965 __u64 stream_addr;
966
967 /**
968 * @latest_flush: FLUSH_ID read at the time the stream was built.
969 *
970 * This allows cache flush elimination for the automatic
971 * flush+invalidate(all) done at submission time, which is needed to
972 * ensure the GPU doesn't get garbage when reading the indirect command
973 * stream buffers. If you want the cache flush to happen
974 * unconditionally, pass a zero here.
975 *
976 * Ignored when stream_size is zero.
977 */
978 __u32 latest_flush;
979
980 /** @pad: MBZ. */
981 __u32 pad;
982
983 /** @syncs: Array of struct drm_panthor_sync_op sync operations. */
984 struct drm_panthor_obj_array syncs;
985};
986
987/**
988 * struct drm_panthor_group_submit - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_SUBMIT
989 */
990struct drm_panthor_group_submit {
991 /** @group_handle: Handle of the group to queue jobs to. */
992 __u32 group_handle;
993
994 /** @pad: MBZ. */
995 __u32 pad;
996
997 /** @queue_submits: Array of drm_panthor_queue_submit objects. */
998 struct drm_panthor_obj_array queue_submits;
999};
1000
1001/**
1002 * enum drm_panthor_group_state_flags - Group state flags
1003 */
1004enum drm_panthor_group_state_flags {
1005 /**
1006 * @DRM_PANTHOR_GROUP_STATE_TIMEDOUT: Group had unfinished jobs.
1007 *
1008 * When a group ends up with this flag set, no jobs can be submitted to its queues.
1009 */
1010 DRM_PANTHOR_GROUP_STATE_TIMEDOUT = 1 << 0,
1011
1012 /**
1013 * @DRM_PANTHOR_GROUP_STATE_FATAL_FAULT: Group had fatal faults.
1014 *
1015 * When a group ends up with this flag set, no jobs can be submitted to its queues.
1016 */
1017 DRM_PANTHOR_GROUP_STATE_FATAL_FAULT = 1 << 1,
1018
1019 /**
1020 * @DRM_PANTHOR_GROUP_STATE_INNOCENT: Group was killed during a reset caused by other
1021 * groups.
1022 *
1023 * This flag can only be set if DRM_PANTHOR_GROUP_STATE_TIMEDOUT is set and
1024 * DRM_PANTHOR_GROUP_STATE_FATAL_FAULT is not.
1025 */
1026 DRM_PANTHOR_GROUP_STATE_INNOCENT = 1 << 2,
1027};
1028
1029/**
1030 * struct drm_panthor_group_get_state - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_GET_STATE
1031 *
1032 * Used to query the state of a group and decide whether a new group should be created to
1033 * replace it.
1034 */
1035struct drm_panthor_group_get_state {
1036 /** @group_handle: Handle of the group to query state on */
1037 __u32 group_handle;
1038
1039 /**
1040 * @state: Combination of DRM_PANTHOR_GROUP_STATE_* flags encoding the
1041 * group state.
1042 */
1043 __u32 state;
1044
1045 /** @fatal_queues: Bitmask of queues that faced fatal faults. */
1046 __u32 fatal_queues;
1047
1048 /** @pad: MBZ */
1049 __u32 pad;
1050};
1051
1052/**
1053 * struct drm_panthor_tiler_heap_create - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE
1054 */
1055struct drm_panthor_tiler_heap_create {
1056 /** @vm_id: VM ID the tiler heap should be mapped to */
1057 __u32 vm_id;
1058
1059 /** @initial_chunk_count: Initial number of chunks to allocate. Must be at least one. */
1060 __u32 initial_chunk_count;
1061
1062 /**
1063 * @chunk_size: Chunk size.
1064 *
1065 * Must be page-aligned and lie in the [128k:8M] range.
1066 */
1067 __u32 chunk_size;
1068
1069 /**
1070 * @max_chunks: Maximum number of chunks that can be allocated.
1071 *
1072 * Must be at least @initial_chunk_count.
1073 */
1074 __u32 max_chunks;
1075
1076 /**
1077 * @target_in_flight: Maximum number of in-flight render passes.
1078 *
1079 * If the heap has more than tiler jobs in-flight, the FW will wait for render
1080 * passes to finish before queuing new tiler jobs.
1081 */
1082 __u32 target_in_flight;
1083
1084 /** @handle: Returned heap handle. Passed back to DESTROY_TILER_HEAP. */
1085 __u32 handle;
1086
1087 /** @tiler_heap_ctx_gpu_va: Returned heap GPU virtual address returned */
1088 __u64 tiler_heap_ctx_gpu_va;
1089
1090 /**
1091 * @first_heap_chunk_gpu_va: First heap chunk.
1092 *
1093 * The tiler heap is formed of heap chunks forming a single-link list. This
1094 * is the first element in the list.
1095 */
1096 __u64 first_heap_chunk_gpu_va;
1097};
1098
1099/**
1100 * struct drm_panthor_tiler_heap_destroy - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY
1101 */
1102struct drm_panthor_tiler_heap_destroy {
1103 /**
1104 * @handle: Handle of the tiler heap to destroy.
1105 *
1106 * Must be a valid heap handle returned by DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE.
1107 */
1108 __u32 handle;
1109
1110 /** @pad: Padding field, MBZ. */
1111 __u32 pad;
1112};
1113
1114/**
1115 * struct drm_panthor_bo_set_label - Arguments passed to DRM_IOCTL_PANTHOR_BO_SET_LABEL
1116 */
1117struct drm_panthor_bo_set_label {
1118 /** @handle: Handle of the buffer object to label. */
1119 __u32 handle;
1120
1121 /** @pad: MBZ. */
1122 __u32 pad;
1123
1124 /**
1125 * @label: User pointer to a NUL-terminated string
1126 *
1127 * Length cannot be greater than 4096
1128 */
1129 __u64 label;
1130};
1131
1132/**
1133 * struct drm_panthor_set_user_mmio_offset - Arguments passed to
1134 * DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET
1135 *
1136 * This ioctl is only really useful if you want to support userspace
1137 * CPU emulation environments where the size of an unsigned long differs
1138 * between the host and the guest architectures.
1139 */
1140struct drm_panthor_set_user_mmio_offset {
1141 /**
1142 * @offset: User MMIO offset to use.
1143 *
1144 * Must be either DRM_PANTHOR_USER_MMIO_OFFSET_32BIT or
1145 * DRM_PANTHOR_USER_MMIO_OFFSET_64BIT.
1146 *
1147 * Use DRM_PANTHOR_USER_MMIO_OFFSET (which selects OFFSET_32BIT or
1148 * OFFSET_64BIT based on the size of an unsigned long) unless you
1149 * have a very good reason to overrule this decision.
1150 */
1151 __u64 offset;
1152};
1153
1154/**
1155 * enum drm_panthor_bo_sync_op_type - BO sync type
1156 */
1157enum drm_panthor_bo_sync_op_type {
1158 /** @DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH: Flush CPU caches. */
1159 DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH = 0,
1160
1161 /** @DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE: Flush and invalidate CPU caches. */
1162 DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE = 1,
1163};
1164
1165/**
1166 * struct drm_panthor_bo_sync_op - BO map sync op
1167 */
1168struct drm_panthor_bo_sync_op {
1169 /** @handle: Handle of the buffer object to sync. */
1170 __u32 handle;
1171
1172 /** @type: Type of operation. */
1173 __u32 type;
1174
1175 /**
1176 * @offset: Offset into the BO at which the sync range starts.
1177 *
1178 * This will be rounded down to the nearest cache line as needed.
1179 */
1180 __u64 offset;
1181
1182 /**
1183 * @size: Size of the range to sync
1184 *
1185 * @size + @offset will be rounded up to the nearest cache line as
1186 * needed.
1187 */
1188 __u64 size;
1189};
1190
1191/**
1192 * struct drm_panthor_bo_sync - BO map sync request
1193 */
1194struct drm_panthor_bo_sync {
1195 /**
1196 * @ops: Array of struct drm_panthor_bo_sync_op sync operations.
1197 */
1198 struct drm_panthor_obj_array ops;
1199};
1200
1201/**
1202 * enum drm_panthor_bo_extra_flags - Set of flags returned on a BO_QUERY_INFO request
1203 *
1204 * Those are flags reflecting BO properties that are not directly coming from the flags
1205 * passed are creation time, or information on BOs that were imported from other drivers.
1206 */
1207enum drm_panthor_bo_extra_flags {
1208 /**
1209 * @DRM_PANTHOR_BO_IS_IMPORTED: BO has been imported from an external driver.
1210 *
1211 * Note that imported dma-buf handles are not flagged as imported if they
1212 * where exported by panthor. Only buffers that are coming from other drivers
1213 * (dma heaps, other GPUs, display controllers, V4L, ...).
1214 *
1215 * It's also important to note that all imported BOs are mapped cached and can't
1216 * be considered IO-coherent even if the GPU is. This means they require explicit
1217 * syncs that must go through the DRM_PANTHOR_BO_SYNC ioctl (userland cache
1218 * maintenance is not allowed in that case, because extra operations might be
1219 * needed to make changes visible to the CPU/device, like buffer migration when the
1220 * exporter is a GPU with its own VRAM).
1221 */
1222 DRM_PANTHOR_BO_IS_IMPORTED = (1 << 0),
1223};
1224
1225/**
1226 * struct drm_panthor_bo_query_info - Query BO info
1227 */
1228struct drm_panthor_bo_query_info {
1229 /** @handle: Handle of the buffer object to query flags on. */
1230 __u32 handle;
1231
1232 /**
1233 * @extra_flags: Combination of enum drm_panthor_bo_extra_flags flags.
1234 */
1235 __u32 extra_flags;
1236
1237 /**
1238 * @create_flags: Flags passed at creation time.
1239 *
1240 * Combination of enum drm_panthor_bo_flags flags.
1241 * Will be zero if the buffer comes from a different driver.
1242 */
1243 __u32 create_flags;
1244
1245 /** @pad: Will be zero on return. */
1246 __u32 pad;
1247};
1248
1249/**
1250 * DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number
1251 * @__access: Access type. Must be R, W or RW.
1252 * @__id: One of the DRM_PANTHOR_xxx id.
1253 * @__type: Suffix of the type being passed to the IOCTL.
1254 *
1255 * Don't use this macro directly, use the DRM_IOCTL_PANTHOR_xxx
1256 * values instead.
1257 *
1258 * Return: An IOCTL number to be passed to ioctl() from userspace.
1259 */
1260#define DRM_IOCTL_PANTHOR(__access, __id, __type) \
1261 DRM_IO ## __access(DRM_COMMAND_BASE + DRM_PANTHOR_ ## __id, \
1262 struct drm_panthor_ ## __type)
1263
1264enum {
1265 DRM_IOCTL_PANTHOR_DEV_QUERY =
1266 DRM_IOCTL_PANTHOR(WR, DEV_QUERY, dev_query),
1267 DRM_IOCTL_PANTHOR_VM_CREATE =
1268 DRM_IOCTL_PANTHOR(WR, VM_CREATE, vm_create),
1269 DRM_IOCTL_PANTHOR_VM_DESTROY =
1270 DRM_IOCTL_PANTHOR(WR, VM_DESTROY, vm_destroy),
1271 DRM_IOCTL_PANTHOR_VM_BIND =
1272 DRM_IOCTL_PANTHOR(WR, VM_BIND, vm_bind),
1273 DRM_IOCTL_PANTHOR_VM_GET_STATE =
1274 DRM_IOCTL_PANTHOR(WR, VM_GET_STATE, vm_get_state),
1275 DRM_IOCTL_PANTHOR_BO_CREATE =
1276 DRM_IOCTL_PANTHOR(WR, BO_CREATE, bo_create),
1277 DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET =
1278 DRM_IOCTL_PANTHOR(WR, BO_MMAP_OFFSET, bo_mmap_offset),
1279 DRM_IOCTL_PANTHOR_GROUP_CREATE =
1280 DRM_IOCTL_PANTHOR(WR, GROUP_CREATE, group_create),
1281 DRM_IOCTL_PANTHOR_GROUP_DESTROY =
1282 DRM_IOCTL_PANTHOR(WR, GROUP_DESTROY, group_destroy),
1283 DRM_IOCTL_PANTHOR_GROUP_SUBMIT =
1284 DRM_IOCTL_PANTHOR(WR, GROUP_SUBMIT, group_submit),
1285 DRM_IOCTL_PANTHOR_GROUP_GET_STATE =
1286 DRM_IOCTL_PANTHOR(WR, GROUP_GET_STATE, group_get_state),
1287 DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE =
1288 DRM_IOCTL_PANTHOR(WR, TILER_HEAP_CREATE, tiler_heap_create),
1289 DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY =
1290 DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy),
1291 DRM_IOCTL_PANTHOR_BO_SET_LABEL =
1292 DRM_IOCTL_PANTHOR(WR, BO_SET_LABEL, bo_set_label),
1293 DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET =
1294 DRM_IOCTL_PANTHOR(WR, SET_USER_MMIO_OFFSET, set_user_mmio_offset),
1295 DRM_IOCTL_PANTHOR_BO_SYNC =
1296 DRM_IOCTL_PANTHOR(WR, BO_SYNC, bo_sync),
1297 DRM_IOCTL_PANTHOR_BO_QUERY_INFO =
1298 DRM_IOCTL_PANTHOR(WR, BO_QUERY_INFO, bo_query_info),
1299};
1300
1301#if defined(__cplusplus)
1302}
1303#endif
1304
1305#endif /* _PANTHOR_DRM_H_ */