Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef KFD_IOCTL_H_INCLUDED
24#define KFD_IOCTL_H_INCLUDED
25
26#include <drm/drm.h>
27#include <linux/ioctl.h>
28
29/*
30 * - 1.1 - initial version
31 * - 1.3 - Add SMI events support
32 * - 1.4 - Indicate new SRAM EDC bit in device properties
33 * - 1.5 - Add SVM API
34 * - 1.6 - Query clear flags in SVM get_attr API
35 * - 1.7 - Checkpoint Restore (CRIU) API
36 * - 1.8 - CRIU - Support for SDMA transfers with GTT BOs
37 * - 1.9 - Add available memory ioctl
38 * - 1.10 - Add SMI profiler event log
39 * - 1.11 - Add unified memory for ctx save/restore area
40 * - 1.12 - Add DMA buf export ioctl
41 * - 1.13 - Add debugger API
42 * - 1.14 - Update kfd_event_data
43 * - 1.15 - Enable managing mappings in compute VMs with GEM_VA ioctl
44 * - 1.16 - Add contiguous VRAM allocation flag
45 * - 1.17 - Add SDMA queue creation with target SDMA engine ID
46 * - 1.18 - Rename pad in set_memory_policy_args to misc_process_flag
47 * - 1.19 - Add a new ioctl to craete secondary kfd processes
48 * - 1.20 - Trap handler support for expert scheduling mode available
49 * - 1.21 - Debugger support to subscribe to LDS out-of-address exceptions
50 * - 1.22 - Add queue creation with metadata ring base address
51 */
52#define KFD_IOCTL_MAJOR_VERSION 1
53#define KFD_IOCTL_MINOR_VERSION 22
54
55struct kfd_ioctl_get_version_args {
56 __u32 major_version; /* from KFD */
57 __u32 minor_version; /* from KFD */
58};
59
60/* For kfd_ioctl_create_queue_args.queue_type. */
61#define KFD_IOC_QUEUE_TYPE_COMPUTE 0x0
62#define KFD_IOC_QUEUE_TYPE_SDMA 0x1
63#define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2
64#define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3
65#define KFD_IOC_QUEUE_TYPE_SDMA_BY_ENG_ID 0x4
66
67#define KFD_MAX_QUEUE_PERCENTAGE 100
68#define KFD_MAX_QUEUE_PRIORITY 15
69
70#define KFD_MIN_QUEUE_RING_SIZE 1024
71
72struct kfd_ioctl_create_queue_args {
73 __u64 ring_base_address; /* to KFD */
74 __u64 write_pointer_address; /* to KFD */
75 __u64 read_pointer_address; /* to KFD */
76 __u64 doorbell_offset; /* from KFD */
77
78 __u32 ring_size; /* to KFD */
79 __u32 gpu_id; /* to KFD */
80 __u32 queue_type; /* to KFD */
81 __u32 queue_percentage; /* to KFD */
82 __u32 queue_priority; /* to KFD */
83 __u32 queue_id; /* from KFD */
84
85 __u64 eop_buffer_address; /* to KFD */
86 __u64 eop_buffer_size; /* to KFD */
87 __u64 ctx_save_restore_address; /* to KFD */
88 __u32 ctx_save_restore_size; /* to KFD */
89 __u32 ctl_stack_size; /* to KFD */
90 __u32 sdma_engine_id; /* to KFD */
91 __u32 metadata_ring_size; /* to KFD */
92};
93
94struct kfd_ioctl_destroy_queue_args {
95 __u32 queue_id; /* to KFD */
96 __u32 pad;
97};
98
99struct kfd_ioctl_update_queue_args {
100 __u64 ring_base_address; /* to KFD */
101
102 __u32 queue_id; /* to KFD */
103 __u32 ring_size; /* to KFD */
104 __u32 queue_percentage; /* to KFD */
105 __u32 queue_priority; /* to KFD */
106};
107
108struct kfd_ioctl_set_cu_mask_args {
109 __u32 queue_id; /* to KFD */
110 __u32 num_cu_mask; /* to KFD */
111 __u64 cu_mask_ptr; /* to KFD */
112};
113
114struct kfd_ioctl_get_queue_wave_state_args {
115 __u64 ctl_stack_address; /* to KFD */
116 __u32 ctl_stack_used_size; /* from KFD */
117 __u32 save_area_used_size; /* from KFD */
118 __u32 queue_id; /* to KFD */
119 __u32 pad;
120};
121
122struct kfd_ioctl_get_available_memory_args {
123 __u64 available; /* from KFD */
124 __u32 gpu_id; /* to KFD */
125 __u32 pad;
126};
127
128struct kfd_dbg_device_info_entry {
129 __u64 exception_status;
130 __u64 lds_base;
131 __u64 lds_limit;
132 __u64 scratch_base;
133 __u64 scratch_limit;
134 __u64 gpuvm_base;
135 __u64 gpuvm_limit;
136 __u32 gpu_id;
137 __u32 location_id;
138 __u32 vendor_id;
139 __u32 device_id;
140 __u32 revision_id;
141 __u32 subsystem_vendor_id;
142 __u32 subsystem_device_id;
143 __u32 fw_version;
144 __u32 gfx_target_version;
145 __u32 simd_count;
146 __u32 max_waves_per_simd;
147 __u32 array_count;
148 __u32 simd_arrays_per_engine;
149 __u32 num_xcc;
150 __u32 capability;
151 __u32 debug_prop;
152 __u32 capability2;
153 __u32 pad;
154};
155
156/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
157#define KFD_IOC_CACHE_POLICY_COHERENT 0
158#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
159
160/* Misc. per process flags */
161#define KFD_PROC_FLAG_MFMA_HIGH_PRECISION (1 << 0)
162
163struct kfd_ioctl_set_memory_policy_args {
164 __u64 alternate_aperture_base; /* to KFD */
165 __u64 alternate_aperture_size; /* to KFD */
166
167 __u32 gpu_id; /* to KFD */
168 __u32 default_policy; /* to KFD */
169 __u32 alternate_policy; /* to KFD */
170 __u32 misc_process_flag; /* to KFD */
171};
172
173/*
174 * All counters are monotonic. They are used for profiling of compute jobs.
175 * The profiling is done by userspace.
176 *
177 * In case of GPU reset, the counter should not be affected.
178 */
179
180struct kfd_ioctl_get_clock_counters_args {
181 __u64 gpu_clock_counter; /* from KFD */
182 __u64 cpu_clock_counter; /* from KFD */
183 __u64 system_clock_counter; /* from KFD */
184 __u64 system_clock_freq; /* from KFD */
185
186 __u32 gpu_id; /* to KFD */
187 __u32 pad;
188};
189
190struct kfd_process_device_apertures {
191 __u64 lds_base; /* from KFD */
192 __u64 lds_limit; /* from KFD */
193 __u64 scratch_base; /* from KFD */
194 __u64 scratch_limit; /* from KFD */
195 __u64 gpuvm_base; /* from KFD */
196 __u64 gpuvm_limit; /* from KFD */
197 __u32 gpu_id; /* from KFD */
198 __u32 pad;
199};
200
201/*
202 * AMDKFD_IOC_GET_PROCESS_APERTURES is deprecated. Use
203 * AMDKFD_IOC_GET_PROCESS_APERTURES_NEW instead, which supports an
204 * unlimited number of GPUs.
205 */
206#define NUM_OF_SUPPORTED_GPUS 7
207struct kfd_ioctl_get_process_apertures_args {
208 struct kfd_process_device_apertures
209 process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */
210
211 /* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */
212 __u32 num_of_nodes;
213 __u32 pad;
214};
215
216struct kfd_ioctl_get_process_apertures_new_args {
217 /* User allocated. Pointer to struct kfd_process_device_apertures
218 * filled in by Kernel
219 */
220 __u64 kfd_process_device_apertures_ptr;
221 /* to KFD - indicates amount of memory present in
222 * kfd_process_device_apertures_ptr
223 * from KFD - Number of entries filled by KFD.
224 */
225 __u32 num_of_nodes;
226 __u32 pad;
227};
228
229#define MAX_ALLOWED_NUM_POINTS 100
230#define MAX_ALLOWED_AW_BUFF_SIZE 4096
231#define MAX_ALLOWED_WAC_BUFF_SIZE 128
232
233struct kfd_ioctl_dbg_register_args {
234 __u32 gpu_id; /* to KFD */
235 __u32 pad;
236};
237
238struct kfd_ioctl_dbg_unregister_args {
239 __u32 gpu_id; /* to KFD */
240 __u32 pad;
241};
242
243struct kfd_ioctl_dbg_address_watch_args {
244 __u64 content_ptr; /* a pointer to the actual content */
245 __u32 gpu_id; /* to KFD */
246 __u32 buf_size_in_bytes; /*including gpu_id and buf_size */
247};
248
249struct kfd_ioctl_dbg_wave_control_args {
250 __u64 content_ptr; /* a pointer to the actual content */
251 __u32 gpu_id; /* to KFD */
252 __u32 buf_size_in_bytes; /*including gpu_id and buf_size */
253};
254
255#define KFD_INVALID_FD 0xffffffff
256
257/* Matching HSA_EVENTTYPE */
258#define KFD_IOC_EVENT_SIGNAL 0
259#define KFD_IOC_EVENT_NODECHANGE 1
260#define KFD_IOC_EVENT_DEVICESTATECHANGE 2
261#define KFD_IOC_EVENT_HW_EXCEPTION 3
262#define KFD_IOC_EVENT_SYSTEM_EVENT 4
263#define KFD_IOC_EVENT_DEBUG_EVENT 5
264#define KFD_IOC_EVENT_PROFILE_EVENT 6
265#define KFD_IOC_EVENT_QUEUE_EVENT 7
266#define KFD_IOC_EVENT_MEMORY 8
267
268#define KFD_IOC_WAIT_RESULT_COMPLETE 0
269#define KFD_IOC_WAIT_RESULT_TIMEOUT 1
270#define KFD_IOC_WAIT_RESULT_FAIL 2
271
272#define KFD_SIGNAL_EVENT_LIMIT 4096
273
274/* For kfd_event_data.hw_exception_data.reset_type. */
275#define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0
276#define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1
277
278/* For kfd_event_data.hw_exception_data.reset_cause. */
279#define KFD_HW_EXCEPTION_GPU_HANG 0
280#define KFD_HW_EXCEPTION_ECC 1
281
282/* For kfd_hsa_memory_exception_data.ErrorType */
283#define KFD_MEM_ERR_NO_RAS 0
284#define KFD_MEM_ERR_SRAM_ECC 1
285#define KFD_MEM_ERR_POISON_CONSUMED 2
286#define KFD_MEM_ERR_GPU_HANG 3
287
288struct kfd_ioctl_create_event_args {
289 __u64 event_page_offset; /* from KFD */
290 __u32 event_trigger_data; /* from KFD - signal events only */
291 __u32 event_type; /* to KFD */
292 __u32 auto_reset; /* to KFD */
293 __u32 node_id; /* to KFD - only valid for certain
294 event types */
295 __u32 event_id; /* from KFD */
296 __u32 event_slot_index; /* from KFD */
297};
298
299struct kfd_ioctl_destroy_event_args {
300 __u32 event_id; /* to KFD */
301 __u32 pad;
302};
303
304struct kfd_ioctl_set_event_args {
305 __u32 event_id; /* to KFD */
306 __u32 pad;
307};
308
309struct kfd_ioctl_reset_event_args {
310 __u32 event_id; /* to KFD */
311 __u32 pad;
312};
313
314struct kfd_memory_exception_failure {
315 __u32 NotPresent; /* Page not present or supervisor privilege */
316 __u32 ReadOnly; /* Write access to a read-only page */
317 __u32 NoExecute; /* Execute access to a page marked NX */
318 __u32 imprecise; /* Can't determine the exact fault address */
319};
320
321/* memory exception data */
322struct kfd_hsa_memory_exception_data {
323 struct kfd_memory_exception_failure failure;
324 __u64 va;
325 __u32 gpu_id;
326 __u32 ErrorType; /* 0 = no RAS error,
327 * 1 = ECC_SRAM,
328 * 2 = Link_SYNFLOOD (poison),
329 * 3 = GPU hang (not attributable to a specific cause),
330 * other values reserved
331 */
332};
333
334/* hw exception data */
335struct kfd_hsa_hw_exception_data {
336 __u32 reset_type;
337 __u32 reset_cause;
338 __u32 memory_lost;
339 __u32 gpu_id;
340};
341
342/* hsa signal event data */
343struct kfd_hsa_signal_event_data {
344 __u64 last_event_age; /* to and from KFD */
345};
346
347/* Event data */
348struct kfd_event_data {
349 union {
350 /* From KFD */
351 struct kfd_hsa_memory_exception_data memory_exception_data;
352 struct kfd_hsa_hw_exception_data hw_exception_data;
353 /* To and From KFD */
354 struct kfd_hsa_signal_event_data signal_event_data;
355 };
356 __u64 kfd_event_data_ext; /* pointer to an extension structure
357 for future exception types */
358 __u32 event_id; /* to KFD */
359 __u32 pad;
360};
361
362struct kfd_ioctl_wait_events_args {
363 __u64 events_ptr; /* pointed to struct
364 kfd_event_data array, to KFD */
365 __u32 num_events; /* to KFD */
366 __u32 wait_for_all; /* to KFD */
367 __u32 timeout; /* to KFD */
368 __u32 wait_result; /* from KFD */
369};
370
371struct kfd_ioctl_set_scratch_backing_va_args {
372 __u64 va_addr; /* to KFD */
373 __u32 gpu_id; /* to KFD */
374 __u32 pad;
375};
376
377struct kfd_ioctl_get_tile_config_args {
378 /* to KFD: pointer to tile array */
379 __u64 tile_config_ptr;
380 /* to KFD: pointer to macro tile array */
381 __u64 macro_tile_config_ptr;
382 /* to KFD: array size allocated by user mode
383 * from KFD: array size filled by kernel
384 */
385 __u32 num_tile_configs;
386 /* to KFD: array size allocated by user mode
387 * from KFD: array size filled by kernel
388 */
389 __u32 num_macro_tile_configs;
390
391 __u32 gpu_id; /* to KFD */
392 __u32 gb_addr_config; /* from KFD */
393 __u32 num_banks; /* from KFD */
394 __u32 num_ranks; /* from KFD */
395 /* struct size can be extended later if needed
396 * without breaking ABI compatibility
397 */
398};
399
400struct kfd_ioctl_set_trap_handler_args {
401 __u64 tba_addr; /* to KFD */
402 __u64 tma_addr; /* to KFD */
403 __u32 gpu_id; /* to KFD */
404 __u32 pad;
405};
406
407struct kfd_ioctl_acquire_vm_args {
408 __u32 drm_fd; /* to KFD */
409 __u32 gpu_id; /* to KFD */
410};
411
412/* Allocation flags: memory types */
413#define KFD_IOC_ALLOC_MEM_FLAGS_VRAM (1 << 0)
414#define KFD_IOC_ALLOC_MEM_FLAGS_GTT (1 << 1)
415#define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR (1 << 2)
416#define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
417#define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4)
418/* Allocation flags: attributes/access options */
419#define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
420#define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
421#define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
422#define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28)
423#define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
424#define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26)
425#define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25)
426#define KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT (1 << 24)
427#define KFD_IOC_ALLOC_MEM_FLAGS_CONTIGUOUS (1 << 23)
428
429/* Allocate memory for later SVM (shared virtual memory) mapping.
430 *
431 * @va_addr: virtual address of the memory to be allocated
432 * all later mappings on all GPUs will use this address
433 * @size: size in bytes
434 * @handle: buffer handle returned to user mode, used to refer to
435 * this allocation for mapping, unmapping and freeing
436 * @mmap_offset: for CPU-mapping the allocation by mmapping a render node
437 * for userptrs this is overloaded to specify the CPU address
438 * @gpu_id: device identifier
439 * @flags: memory type and attributes. See KFD_IOC_ALLOC_MEM_FLAGS above
440 */
441struct kfd_ioctl_alloc_memory_of_gpu_args {
442 __u64 va_addr; /* to KFD */
443 __u64 size; /* to KFD */
444 __u64 handle; /* from KFD */
445 __u64 mmap_offset; /* to KFD (userptr), from KFD (mmap offset) */
446 __u32 gpu_id; /* to KFD */
447 __u32 flags;
448};
449
450/* Free memory allocated with kfd_ioctl_alloc_memory_of_gpu
451 *
452 * @handle: memory handle returned by alloc
453 */
454struct kfd_ioctl_free_memory_of_gpu_args {
455 __u64 handle; /* to KFD */
456};
457
458/* Map memory to one or more GPUs
459 *
460 * @handle: memory handle returned by alloc
461 * @device_ids_array_ptr: array of gpu_ids (__u32 per device)
462 * @n_devices: number of devices in the array
463 * @n_success: number of devices mapped successfully
464 *
465 * @n_success returns information to the caller how many devices from
466 * the start of the array have mapped the buffer successfully. It can
467 * be passed into a subsequent retry call to skip those devices. For
468 * the first call the caller should initialize it to 0.
469 *
470 * If the ioctl completes with return code 0 (success), n_success ==
471 * n_devices.
472 */
473struct kfd_ioctl_map_memory_to_gpu_args {
474 __u64 handle; /* to KFD */
475 __u64 device_ids_array_ptr; /* to KFD */
476 __u32 n_devices; /* to KFD */
477 __u32 n_success; /* to/from KFD */
478};
479
480/* Unmap memory from one or more GPUs
481 *
482 * same arguments as for mapping
483 */
484struct kfd_ioctl_unmap_memory_from_gpu_args {
485 __u64 handle; /* to KFD */
486 __u64 device_ids_array_ptr; /* to KFD */
487 __u32 n_devices; /* to KFD */
488 __u32 n_success; /* to/from KFD */
489};
490
491/* Allocate GWS for specific queue
492 *
493 * @queue_id: queue's id that GWS is allocated for
494 * @num_gws: how many GWS to allocate
495 * @first_gws: index of the first GWS allocated.
496 * only support contiguous GWS allocation
497 */
498struct kfd_ioctl_alloc_queue_gws_args {
499 __u32 queue_id; /* to KFD */
500 __u32 num_gws; /* to KFD */
501 __u32 first_gws; /* from KFD */
502 __u32 pad;
503};
504
505struct kfd_ioctl_get_dmabuf_info_args {
506 __u64 size; /* from KFD */
507 __u64 metadata_ptr; /* to KFD */
508 __u32 metadata_size; /* to KFD (space allocated by user)
509 * from KFD (actual metadata size)
510 */
511 __u32 gpu_id; /* from KFD */
512 __u32 flags; /* from KFD (KFD_IOC_ALLOC_MEM_FLAGS) */
513 __u32 dmabuf_fd; /* to KFD */
514};
515
516struct kfd_ioctl_import_dmabuf_args {
517 __u64 va_addr; /* to KFD */
518 __u64 handle; /* from KFD */
519 __u32 gpu_id; /* to KFD */
520 __u32 dmabuf_fd; /* to KFD */
521};
522
523struct kfd_ioctl_export_dmabuf_args {
524 __u64 handle; /* to KFD */
525 __u32 flags; /* to KFD */
526 __u32 dmabuf_fd; /* from KFD */
527};
528
529/*
530 * KFD SMI(System Management Interface) events
531 */
532enum kfd_smi_event {
533 KFD_SMI_EVENT_NONE = 0, /* not used */
534 KFD_SMI_EVENT_VMFAULT = 1, /* event start counting at 1 */
535 KFD_SMI_EVENT_THERMAL_THROTTLE = 2,
536 KFD_SMI_EVENT_GPU_PRE_RESET = 3,
537 KFD_SMI_EVENT_GPU_POST_RESET = 4,
538 KFD_SMI_EVENT_MIGRATE_START = 5,
539 KFD_SMI_EVENT_MIGRATE_END = 6,
540 KFD_SMI_EVENT_PAGE_FAULT_START = 7,
541 KFD_SMI_EVENT_PAGE_FAULT_END = 8,
542 KFD_SMI_EVENT_QUEUE_EVICTION = 9,
543 KFD_SMI_EVENT_QUEUE_RESTORE = 10,
544 KFD_SMI_EVENT_UNMAP_FROM_GPU = 11,
545 KFD_SMI_EVENT_PROCESS_START = 12,
546 KFD_SMI_EVENT_PROCESS_END = 13,
547
548 /*
549 * max event number, as a flag bit to get events from all processes,
550 * this requires super user permission, otherwise will not be able to
551 * receive event from any process. Without this flag to receive events
552 * from same process.
553 */
554 KFD_SMI_EVENT_ALL_PROCESS = 64
555};
556
557/* The reason of the page migration event */
558enum KFD_MIGRATE_TRIGGERS {
559 KFD_MIGRATE_TRIGGER_PREFETCH, /* Prefetch to GPU VRAM or system memory */
560 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, /* GPU page fault recover */
561 KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, /* CPU page fault recover */
562 KFD_MIGRATE_TRIGGER_TTM_EVICTION /* TTM eviction */
563};
564
565/* The reason of user queue evition event */
566enum KFD_QUEUE_EVICTION_TRIGGERS {
567 KFD_QUEUE_EVICTION_TRIGGER_SVM, /* SVM buffer migration */
568 KFD_QUEUE_EVICTION_TRIGGER_USERPTR, /* userptr movement */
569 KFD_QUEUE_EVICTION_TRIGGER_TTM, /* TTM move buffer */
570 KFD_QUEUE_EVICTION_TRIGGER_SUSPEND, /* GPU suspend */
571 KFD_QUEUE_EVICTION_CRIU_CHECKPOINT, /* CRIU checkpoint */
572 KFD_QUEUE_EVICTION_CRIU_RESTORE /* CRIU restore */
573};
574
575/* The reason of unmap buffer from GPU event */
576enum KFD_SVM_UNMAP_TRIGGERS {
577 KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY, /* MMU notifier CPU buffer movement */
578 KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE,/* MMU notifier page migration */
579 KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU /* Unmap to free the buffer */
580};
581
582#define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1))
583#define KFD_SMI_EVENT_MSG_SIZE 96
584
585struct kfd_ioctl_smi_events_args {
586 __u32 gpuid; /* to KFD */
587 __u32 anon_fd; /* from KFD */
588};
589
590/*
591 * SVM event tracing via SMI system management interface
592 *
593 * Open event file descriptor
594 * use ioctl AMDKFD_IOC_SMI_EVENTS, pass in gpuid and return a anonymous file
595 * descriptor to receive SMI events.
596 * If calling with sudo permission, then file descriptor can be used to receive
597 * SVM events from all processes, otherwise, to only receive SVM events of same
598 * process.
599 *
600 * To enable the SVM event
601 * Write event file descriptor with KFD_SMI_EVENT_MASK_FROM_INDEX(event) bitmap
602 * mask to start record the event to the kfifo, use bitmap mask combination
603 * for multiple events. New event mask will overwrite the previous event mask.
604 * KFD_SMI_EVENT_MASK_FROM_INDEX(KFD_SMI_EVENT_ALL_PROCESS) bit requires sudo
605 * permisson to receive SVM events from all process.
606 *
607 * To receive the event
608 * Application can poll file descriptor to wait for the events, then read event
609 * from the file into a buffer. Each event is one line string message, starting
610 * with the event id, then the event specific information.
611 *
612 * To decode event information
613 * The following event format string macro can be used with sscanf to decode
614 * the specific event information.
615 * event triggers: the reason to generate the event, defined as enum for unmap,
616 * eviction and migrate events.
617 * node, from, to, prefetch_loc, preferred_loc: GPU ID, or 0 for system memory.
618 * addr: user mode address, in pages
619 * size: in pages
620 * pid: the process ID to generate the event
621 * ns: timestamp in nanosecond-resolution, starts at system boot time but
622 * stops during suspend
623 * migrate_update: GPU page fault is recovered by 'M' for migrate, 'U' for update
624 * rw: 'W' for write page fault, 'R' for read page fault
625 * rescheduled: 'R' if the queue restore failed and rescheduled to try again
626 * error_code: migrate failure error code, 0 if no error
627 */
628#define KFD_EVENT_FMT_UPDATE_GPU_RESET(reset_seq_num, reset_cause)\
629 "%x %s\n", (reset_seq_num), (reset_cause)
630
631#define KFD_EVENT_FMT_THERMAL_THROTTLING(bitmask, counter)\
632 "%llx:%llx\n", (bitmask), (counter)
633
634#define KFD_EVENT_FMT_VMFAULT(pid, task_name)\
635 "%x:%s\n", (pid), (task_name)
636
637#define KFD_EVENT_FMT_PAGEFAULT_START(ns, pid, addr, node, rw)\
638 "%lld -%d @%lx(%x) %c\n", (ns), (pid), (addr), (node), (rw)
639
640#define KFD_EVENT_FMT_PAGEFAULT_END(ns, pid, addr, node, migrate_update)\
641 "%lld -%d @%lx(%x) %c\n", (ns), (pid), (addr), (node), (migrate_update)
642
643#define KFD_EVENT_FMT_MIGRATE_START(ns, pid, start, size, from, to, prefetch_loc,\
644 preferred_loc, migrate_trigger)\
645 "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n", (ns), (pid), (start), (size),\
646 (from), (to), (prefetch_loc), (preferred_loc), (migrate_trigger)
647
648#define KFD_EVENT_FMT_MIGRATE_END(ns, pid, start, size, from, to, migrate_trigger, error_code) \
649 "%lld -%d @%lx(%lx) %x->%x %d %d\n", (ns), (pid), (start), (size),\
650 (from), (to), (migrate_trigger), (error_code)
651
652#define KFD_EVENT_FMT_QUEUE_EVICTION(ns, pid, node, evict_trigger)\
653 "%lld -%d %x %d\n", (ns), (pid), (node), (evict_trigger)
654
655#define KFD_EVENT_FMT_QUEUE_RESTORE(ns, pid, node, rescheduled)\
656 "%lld -%d %x %c\n", (ns), (pid), (node), (rescheduled)
657
658#define KFD_EVENT_FMT_UNMAP_FROM_GPU(ns, pid, addr, size, node, unmap_trigger)\
659 "%lld -%d @%lx(%lx) %x %d\n", (ns), (pid), (addr), (size),\
660 (node), (unmap_trigger)
661
662#define KFD_EVENT_FMT_PROCESS(pid, task_name)\
663 "%x %s\n", (pid), (task_name)
664
665/**************************************************************************************************
666 * CRIU IOCTLs (Checkpoint Restore In Userspace)
667 *
668 * When checkpointing a process, the userspace application will perform:
669 * 1. PROCESS_INFO op to determine current process information. This pauses execution and evicts
670 * all the queues.
671 * 2. CHECKPOINT op to checkpoint process contents (BOs, queues, events, svm-ranges)
672 * 3. UNPAUSE op to un-evict all the queues
673 *
674 * When restoring a process, the CRIU userspace application will perform:
675 *
676 * 1. RESTORE op to restore process contents
677 * 2. RESUME op to start the process
678 *
679 * Note: Queues are forced into an evicted state after a successful PROCESS_INFO. User
680 * application needs to perform an UNPAUSE operation after calling PROCESS_INFO.
681 */
682
683enum kfd_criu_op {
684 KFD_CRIU_OP_PROCESS_INFO,
685 KFD_CRIU_OP_CHECKPOINT,
686 KFD_CRIU_OP_UNPAUSE,
687 KFD_CRIU_OP_RESTORE,
688 KFD_CRIU_OP_RESUME,
689};
690
691/**
692 * kfd_ioctl_criu_args - Arguments perform CRIU operation
693 * @devices: [in/out] User pointer to memory location for devices information.
694 * This is an array of type kfd_criu_device_bucket.
695 * @bos: [in/out] User pointer to memory location for BOs information
696 * This is an array of type kfd_criu_bo_bucket.
697 * @priv_data: [in/out] User pointer to memory location for private data
698 * @priv_data_size: [in/out] Size of priv_data in bytes
699 * @num_devices: [in/out] Number of GPUs used by process. Size of @devices array.
700 * @num_bos [in/out] Number of BOs used by process. Size of @bos array.
701 * @num_objects: [in/out] Number of objects used by process. Objects are opaque to
702 * user application.
703 * @pid: [in/out] PID of the process being checkpointed
704 * @op [in] Type of operation (kfd_criu_op)
705 *
706 * Return: 0 on success, -errno on failure
707 */
708struct kfd_ioctl_criu_args {
709 __u64 devices; /* Used during ops: CHECKPOINT, RESTORE */
710 __u64 bos; /* Used during ops: CHECKPOINT, RESTORE */
711 __u64 priv_data; /* Used during ops: CHECKPOINT, RESTORE */
712 __u64 priv_data_size; /* Used during ops: PROCESS_INFO, RESTORE */
713 __u32 num_devices; /* Used during ops: PROCESS_INFO, RESTORE */
714 __u32 num_bos; /* Used during ops: PROCESS_INFO, RESTORE */
715 __u32 num_objects; /* Used during ops: PROCESS_INFO, RESTORE */
716 __u32 pid; /* Used during ops: PROCESS_INFO, RESUME */
717 __u32 op;
718};
719
720struct kfd_criu_device_bucket {
721 __u32 user_gpu_id;
722 __u32 actual_gpu_id;
723 __u32 drm_fd;
724 __u32 pad;
725};
726
727struct kfd_criu_bo_bucket {
728 __u64 addr;
729 __u64 size;
730 __u64 offset;
731 __u64 restored_offset; /* During restore, updated offset for BO */
732 __u32 gpu_id; /* This is the user_gpu_id */
733 __u32 alloc_flags;
734 __u32 dmabuf_fd;
735 __u32 pad;
736};
737
738/* CRIU IOCTLs - END */
739/**************************************************************************************************/
740
741/* Register offset inside the remapped mmio page
742 */
743enum kfd_mmio_remap {
744 KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0,
745 KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4,
746};
747
748/* Guarantee host access to memory */
749#define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001
750/* Fine grained coherency between all devices with access */
751#define KFD_IOCTL_SVM_FLAG_COHERENT 0x00000002
752/* Use any GPU in same hive as preferred device */
753#define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL 0x00000004
754/* GPUs only read, allows replication */
755#define KFD_IOCTL_SVM_FLAG_GPU_RO 0x00000008
756/* Allow execution on GPU */
757#define KFD_IOCTL_SVM_FLAG_GPU_EXEC 0x00000010
758/* GPUs mostly read, may allow similar optimizations as RO, but writes fault */
759#define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020
760/* Keep GPU memory mapping always valid as if XNACK is disable */
761#define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040
762/* Fine grained coherency between all devices using device-scope atomics */
763#define KFD_IOCTL_SVM_FLAG_EXT_COHERENT 0x00000080
764
765/**
766 * kfd_ioctl_svm_op - SVM ioctl operations
767 *
768 * @KFD_IOCTL_SVM_OP_SET_ATTR: Modify one or more attributes
769 * @KFD_IOCTL_SVM_OP_GET_ATTR: Query one or more attributes
770 */
771enum kfd_ioctl_svm_op {
772 KFD_IOCTL_SVM_OP_SET_ATTR,
773 KFD_IOCTL_SVM_OP_GET_ATTR
774};
775
776/** kfd_ioctl_svm_location - Enum for preferred and prefetch locations
777 *
778 * GPU IDs are used to specify GPUs as preferred and prefetch locations.
779 * Below definitions are used for system memory or for leaving the preferred
780 * location unspecified.
781 */
782enum kfd_ioctl_svm_location {
783 KFD_IOCTL_SVM_LOCATION_SYSMEM = 0,
784 KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff
785};
786
787/**
788 * kfd_ioctl_svm_attr_type - SVM attribute types
789 *
790 * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: gpuid of the preferred location, 0 for
791 * system memory
792 * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: gpuid of the prefetch location, 0 for
793 * system memory. Setting this triggers an
794 * immediate prefetch (migration).
795 * @KFD_IOCTL_SVM_ATTR_ACCESS:
796 * @KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
797 * @KFD_IOCTL_SVM_ATTR_NO_ACCESS: specify memory access for the gpuid given
798 * by the attribute value
799 * @KFD_IOCTL_SVM_ATTR_SET_FLAGS: bitmask of flags to set (see
800 * KFD_IOCTL_SVM_FLAG_...)
801 * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS: bitmask of flags to clear
802 * @KFD_IOCTL_SVM_ATTR_GRANULARITY: migration granularity
803 * (log2 num pages)
804 */
805enum kfd_ioctl_svm_attr_type {
806 KFD_IOCTL_SVM_ATTR_PREFERRED_LOC,
807 KFD_IOCTL_SVM_ATTR_PREFETCH_LOC,
808 KFD_IOCTL_SVM_ATTR_ACCESS,
809 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE,
810 KFD_IOCTL_SVM_ATTR_NO_ACCESS,
811 KFD_IOCTL_SVM_ATTR_SET_FLAGS,
812 KFD_IOCTL_SVM_ATTR_CLR_FLAGS,
813 KFD_IOCTL_SVM_ATTR_GRANULARITY
814};
815
816/**
817 * kfd_ioctl_svm_attribute - Attributes as pairs of type and value
818 *
819 * The meaning of the @value depends on the attribute type.
820 *
821 * @type: attribute type (see enum @kfd_ioctl_svm_attr_type)
822 * @value: attribute value
823 */
824struct kfd_ioctl_svm_attribute {
825 __u32 type;
826 __u32 value;
827};
828
829/**
830 * kfd_ioctl_svm_args - Arguments for SVM ioctl
831 *
832 * @op specifies the operation to perform (see enum
833 * @kfd_ioctl_svm_op). @start_addr and @size are common for all
834 * operations.
835 *
836 * A variable number of attributes can be given in @attrs.
837 * @nattr specifies the number of attributes. New attributes can be
838 * added in the future without breaking the ABI. If unknown attributes
839 * are given, the function returns -EINVAL.
840 *
841 * @KFD_IOCTL_SVM_OP_SET_ATTR sets attributes for a virtual address
842 * range. It may overlap existing virtual address ranges. If it does,
843 * the existing ranges will be split such that the attribute changes
844 * only apply to the specified address range.
845 *
846 * @KFD_IOCTL_SVM_OP_GET_ATTR returns the intersection of attributes
847 * over all memory in the given range and returns the result as the
848 * attribute value. If different pages have different preferred or
849 * prefetch locations, 0xffffffff will be returned for
850 * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC or
851 * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC resepctively. For
852 * @KFD_IOCTL_SVM_ATTR_SET_FLAGS, flags of all pages will be
853 * aggregated by bitwise AND. That means, a flag will be set in the
854 * output, if that flag is set for all pages in the range. For
855 * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS, flags of all pages will be
856 * aggregated by bitwise NOR. That means, a flag will be set in the
857 * output, if that flag is clear for all pages in the range.
858 * The minimum migration granularity throughout the range will be
859 * returned for @KFD_IOCTL_SVM_ATTR_GRANULARITY.
860 *
861 * Querying of accessibility attributes works by initializing the
862 * attribute type to @KFD_IOCTL_SVM_ATTR_ACCESS and the value to the
863 * GPUID being queried. Multiple attributes can be given to allow
864 * querying multiple GPUIDs. The ioctl function overwrites the
865 * attribute type to indicate the access for the specified GPU.
866 */
867struct kfd_ioctl_svm_args {
868 __u64 start_addr;
869 __u64 size;
870 __u32 op;
871 __u32 nattr;
872 /* Variable length array of attributes */
873 struct kfd_ioctl_svm_attribute attrs[];
874};
875
876/**
877 * kfd_ioctl_set_xnack_mode_args - Arguments for set_xnack_mode
878 *
879 * @xnack_enabled: [in/out] Whether to enable XNACK mode for this process
880 *
881 * @xnack_enabled indicates whether recoverable page faults should be
882 * enabled for the current process. 0 means disabled, positive means
883 * enabled, negative means leave unchanged. If enabled, virtual address
884 * translations on GFXv9 and later AMD GPUs can return XNACK and retry
885 * the access until a valid PTE is available. This is used to implement
886 * device page faults.
887 *
888 * On output, @xnack_enabled returns the (new) current mode (0 or
889 * positive). Therefore, a negative input value can be used to query
890 * the current mode without changing it.
891 *
892 * The XNACK mode fundamentally changes the way SVM managed memory works
893 * in the driver, with subtle effects on application performance and
894 * functionality.
895 *
896 * Enabling XNACK mode requires shader programs to be compiled
897 * differently. Furthermore, not all GPUs support changing the mode
898 * per-process. Therefore changing the mode is only allowed while no
899 * user mode queues exist in the process. This ensure that no shader
900 * code is running that may be compiled for the wrong mode. And GPUs
901 * that cannot change to the requested mode will prevent the XNACK
902 * mode from occurring. All GPUs used by the process must be in the
903 * same XNACK mode.
904 *
905 * GFXv8 or older GPUs do not support 48 bit virtual addresses or SVM.
906 * Therefore those GPUs are not considered for the XNACK mode switch.
907 *
908 * Return: 0 on success, -errno on failure
909 */
910struct kfd_ioctl_set_xnack_mode_args {
911 __s32 xnack_enabled;
912};
913
914/* Wave launch override modes */
915enum kfd_dbg_trap_override_mode {
916 KFD_DBG_TRAP_OVERRIDE_OR = 0,
917 KFD_DBG_TRAP_OVERRIDE_REPLACE = 1
918};
919
920/* Wave launch overrides */
921enum kfd_dbg_trap_mask {
922 KFD_DBG_TRAP_MASK_FP_INVALID = 1,
923 KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL = 2,
924 KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO = 4,
925 KFD_DBG_TRAP_MASK_FP_OVERFLOW = 8,
926 KFD_DBG_TRAP_MASK_FP_UNDERFLOW = 16,
927 KFD_DBG_TRAP_MASK_FP_INEXACT = 32,
928 KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO = 64,
929 KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH = 128,
930 KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION = 256,
931 KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START = (1 << 30),
932 KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END = (1 << 31)
933};
934
935/* Wave launch modes */
936enum kfd_dbg_trap_wave_launch_mode {
937 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL = 0,
938 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT = 1,
939 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG = 3
940};
941
942/* Address watch modes */
943enum kfd_dbg_trap_address_watch_mode {
944 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_READ = 0,
945 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_NONREAD = 1,
946 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ATOMIC = 2,
947 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ALL = 3
948};
949
950/* Additional wave settings */
951enum kfd_dbg_trap_flags {
952 KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1,
953 KFD_DBG_TRAP_FLAG_SINGLE_ALU_OP = 2,
954 KFD_DBG_TRAP_FLAG_LDS_OUT_OF_ADDR_RANGE = 4
955};
956
957/* Trap exceptions */
958enum kfd_dbg_trap_exception_code {
959 EC_NONE = 0,
960 /* per queue */
961 EC_QUEUE_WAVE_ABORT = 1,
962 EC_QUEUE_WAVE_TRAP = 2,
963 EC_QUEUE_WAVE_MATH_ERROR = 3,
964 EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION = 4,
965 EC_QUEUE_WAVE_MEMORY_VIOLATION = 5,
966 EC_QUEUE_WAVE_APERTURE_VIOLATION = 6,
967 EC_QUEUE_PACKET_DISPATCH_DIM_INVALID = 16,
968 EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID = 17,
969 EC_QUEUE_PACKET_DISPATCH_CODE_INVALID = 18,
970 EC_QUEUE_PACKET_RESERVED = 19,
971 EC_QUEUE_PACKET_UNSUPPORTED = 20,
972 EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID = 21,
973 EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID = 22,
974 EC_QUEUE_PACKET_VENDOR_UNSUPPORTED = 23,
975 EC_QUEUE_PREEMPTION_ERROR = 30,
976 EC_QUEUE_NEW = 31,
977 /* per device */
978 EC_DEVICE_QUEUE_DELETE = 32,
979 EC_DEVICE_MEMORY_VIOLATION = 33,
980 EC_DEVICE_RAS_ERROR = 34,
981 EC_DEVICE_FATAL_HALT = 35,
982 EC_DEVICE_NEW = 36,
983 /* per process */
984 EC_PROCESS_RUNTIME = 48,
985 EC_PROCESS_DEVICE_REMOVE = 49,
986 EC_MAX
987};
988
989/* Mask generated by ecode in kfd_dbg_trap_exception_code */
990#define KFD_EC_MASK(ecode) (1ULL << (ecode - 1))
991
992/* Masks for exception code type checks below */
993#define KFD_EC_MASK_QUEUE (KFD_EC_MASK(EC_QUEUE_WAVE_ABORT) | \
994 KFD_EC_MASK(EC_QUEUE_WAVE_TRAP) | \
995 KFD_EC_MASK(EC_QUEUE_WAVE_MATH_ERROR) | \
996 KFD_EC_MASK(EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION) | \
997 KFD_EC_MASK(EC_QUEUE_WAVE_MEMORY_VIOLATION) | \
998 KFD_EC_MASK(EC_QUEUE_WAVE_APERTURE_VIOLATION) | \
999 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \
1000 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \
1001 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \
1002 KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \
1003 KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \
1004 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \
1005 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \
1006 KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED) | \
1007 KFD_EC_MASK(EC_QUEUE_PREEMPTION_ERROR) | \
1008 KFD_EC_MASK(EC_QUEUE_NEW))
1009#define KFD_EC_MASK_DEVICE (KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE) | \
1010 KFD_EC_MASK(EC_DEVICE_RAS_ERROR) | \
1011 KFD_EC_MASK(EC_DEVICE_FATAL_HALT) | \
1012 KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION) | \
1013 KFD_EC_MASK(EC_DEVICE_NEW))
1014#define KFD_EC_MASK_PROCESS (KFD_EC_MASK(EC_PROCESS_RUNTIME) | \
1015 KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE))
1016#define KFD_EC_MASK_PACKET (KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \
1017 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \
1018 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \
1019 KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \
1020 KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \
1021 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \
1022 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \
1023 KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED))
1024
1025/* Checks for exception code types for KFD search */
1026#define KFD_DBG_EC_IS_VALID(ecode) (ecode > EC_NONE && ecode < EC_MAX)
1027#define KFD_DBG_EC_TYPE_IS_QUEUE(ecode) \
1028 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE))
1029#define KFD_DBG_EC_TYPE_IS_DEVICE(ecode) \
1030 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE))
1031#define KFD_DBG_EC_TYPE_IS_PROCESS(ecode) \
1032 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS))
1033#define KFD_DBG_EC_TYPE_IS_PACKET(ecode) \
1034 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PACKET))
1035
1036
1037/* Runtime enable states */
1038enum kfd_dbg_runtime_state {
1039 DEBUG_RUNTIME_STATE_DISABLED = 0,
1040 DEBUG_RUNTIME_STATE_ENABLED = 1,
1041 DEBUG_RUNTIME_STATE_ENABLED_BUSY = 2,
1042 DEBUG_RUNTIME_STATE_ENABLED_ERROR = 3
1043};
1044
1045/* Runtime enable status */
1046struct kfd_runtime_info {
1047 __u64 r_debug;
1048 __u32 runtime_state;
1049 __u32 ttmp_setup;
1050};
1051
1052/* Enable modes for runtime enable */
1053#define KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK 1
1054#define KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK 2
1055
1056/**
1057 * kfd_ioctl_runtime_enable_args - Arguments for runtime enable
1058 *
1059 * Coordinates debug exception signalling and debug device enablement with runtime.
1060 *
1061 * @r_debug - pointer to user struct for sharing information between ROCr and the debuggger
1062 * @mode_mask - mask to set mode
1063 * KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK - enable runtime for debugging, otherwise disable
1064 * KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK - enable trap temporary setup (ignore on disable)
1065 * @capabilities_mask - mask to notify runtime on what KFD supports
1066 *
1067 * Return - 0 on SUCCESS.
1068 * - EBUSY if runtime enable call already pending.
1069 * - EEXIST if user queues already active prior to call.
1070 * If process is debug enabled, runtime enable will enable debug devices and
1071 * wait for debugger process to send runtime exception EC_PROCESS_RUNTIME
1072 * to unblock - see kfd_ioctl_dbg_trap_args.
1073 *
1074 */
1075struct kfd_ioctl_runtime_enable_args {
1076 __u64 r_debug;
1077 __u32 mode_mask;
1078 __u32 capabilities_mask;
1079};
1080
1081/* Queue information */
1082struct kfd_queue_snapshot_entry {
1083 __u64 exception_status;
1084 __u64 ring_base_address;
1085 __u64 write_pointer_address;
1086 __u64 read_pointer_address;
1087 __u64 ctx_save_restore_address;
1088 __u32 queue_id;
1089 __u32 gpu_id;
1090 __u32 ring_size;
1091 __u32 queue_type;
1092 __u32 ctx_save_restore_area_size;
1093 __u32 reserved;
1094};
1095
1096/* Queue status return for suspend/resume */
1097#define KFD_DBG_QUEUE_ERROR_BIT 30
1098#define KFD_DBG_QUEUE_INVALID_BIT 31
1099#define KFD_DBG_QUEUE_ERROR_MASK (1 << KFD_DBG_QUEUE_ERROR_BIT)
1100#define KFD_DBG_QUEUE_INVALID_MASK (1 << KFD_DBG_QUEUE_INVALID_BIT)
1101
1102/* Context save area header information */
1103struct kfd_context_save_area_header {
1104 struct {
1105 __u32 control_stack_offset;
1106 __u32 control_stack_size;
1107 __u32 wave_state_offset;
1108 __u32 wave_state_size;
1109 } wave_state;
1110 __u32 debug_offset;
1111 __u32 debug_size;
1112 __u64 err_payload_addr;
1113 __u32 err_event_id;
1114 __u32 reserved1;
1115};
1116
1117/*
1118 * Debug operations
1119 *
1120 * For specifics on usage and return values, see documentation per operation
1121 * below. Otherwise, generic error returns apply:
1122 * - ESRCH if the process to debug does not exist.
1123 *
1124 * - EINVAL (with KFD_IOC_DBG_TRAP_ENABLE exempt) if operation
1125 * KFD_IOC_DBG_TRAP_ENABLE has not succeeded prior.
1126 * Also returns this error if GPU hardware scheduling is not supported.
1127 *
1128 * - EPERM (with KFD_IOC_DBG_TRAP_DISABLE exempt) if target process is not
1129 * PTRACE_ATTACHED. KFD_IOC_DBG_TRAP_DISABLE is exempt to allow
1130 * clean up of debug mode as long as process is debug enabled.
1131 *
1132 * - EACCES if any DBG_HW_OP (debug hardware operation) is requested when
1133 * AMDKFD_IOC_RUNTIME_ENABLE has not succeeded prior.
1134 *
1135 * - ENODEV if any GPU does not support debugging on a DBG_HW_OP call.
1136 *
1137 * - Other errors may be returned when a DBG_HW_OP occurs while the GPU
1138 * is in a fatal state.
1139 *
1140 */
1141enum kfd_dbg_trap_operations {
1142 KFD_IOC_DBG_TRAP_ENABLE = 0,
1143 KFD_IOC_DBG_TRAP_DISABLE = 1,
1144 KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT = 2,
1145 KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED = 3,
1146 KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE = 4, /* DBG_HW_OP */
1147 KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE = 5, /* DBG_HW_OP */
1148 KFD_IOC_DBG_TRAP_SUSPEND_QUEUES = 6, /* DBG_HW_OP */
1149 KFD_IOC_DBG_TRAP_RESUME_QUEUES = 7, /* DBG_HW_OP */
1150 KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH = 8, /* DBG_HW_OP */
1151 KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH = 9, /* DBG_HW_OP */
1152 KFD_IOC_DBG_TRAP_SET_FLAGS = 10,
1153 KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT = 11,
1154 KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO = 12,
1155 KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT = 13,
1156 KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT = 14
1157};
1158
1159/**
1160 * kfd_ioctl_dbg_trap_enable_args
1161 *
1162 * Arguments for KFD_IOC_DBG_TRAP_ENABLE.
1163 *
1164 * Enables debug session for target process. Call @op KFD_IOC_DBG_TRAP_DISABLE in
1165 * kfd_ioctl_dbg_trap_args to disable debug session.
1166 *
1167 * @exception_mask (IN) - exceptions to raise to the debugger
1168 * @rinfo_ptr (IN) - pointer to runtime info buffer (see kfd_runtime_info)
1169 * @rinfo_size (IN/OUT) - size of runtime info buffer in bytes
1170 * @dbg_fd (IN) - fd the KFD will nofify the debugger with of raised
1171 * exceptions set in exception_mask.
1172 *
1173 * Generic errors apply (see kfd_dbg_trap_operations).
1174 * Return - 0 on SUCCESS.
1175 * Copies KFD saved kfd_runtime_info to @rinfo_ptr on enable.
1176 * Size of kfd_runtime saved by the KFD returned to @rinfo_size.
1177 * - EBADF if KFD cannot get a reference to dbg_fd.
1178 * - EFAULT if KFD cannot copy runtime info to rinfo_ptr.
1179 * - EINVAL if target process is already debug enabled.
1180 *
1181 */
1182struct kfd_ioctl_dbg_trap_enable_args {
1183 __u64 exception_mask;
1184 __u64 rinfo_ptr;
1185 __u32 rinfo_size;
1186 __u32 dbg_fd;
1187};
1188
1189/**
1190 * kfd_ioctl_dbg_trap_send_runtime_event_args
1191 *
1192 *
1193 * Arguments for KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT.
1194 * Raises exceptions to runtime.
1195 *
1196 * @exception_mask (IN) - exceptions to raise to runtime
1197 * @gpu_id (IN) - target device id
1198 * @queue_id (IN) - target queue id
1199 *
1200 * Generic errors apply (see kfd_dbg_trap_operations).
1201 * Return - 0 on SUCCESS.
1202 * - ENODEV if gpu_id not found.
1203 * If exception_mask contains EC_PROCESS_RUNTIME, unblocks pending
1204 * AMDKFD_IOC_RUNTIME_ENABLE call - see kfd_ioctl_runtime_enable_args.
1205 * All other exceptions are raised to runtime through err_payload_addr.
1206 * See kfd_context_save_area_header.
1207 */
1208struct kfd_ioctl_dbg_trap_send_runtime_event_args {
1209 __u64 exception_mask;
1210 __u32 gpu_id;
1211 __u32 queue_id;
1212};
1213
1214/**
1215 * kfd_ioctl_dbg_trap_set_exceptions_enabled_args
1216 *
1217 * Arguments for KFD_IOC_SET_EXCEPTIONS_ENABLED
1218 * Set new exceptions to be raised to the debugger.
1219 *
1220 * @exception_mask (IN) - new exceptions to raise the debugger
1221 *
1222 * Generic errors apply (see kfd_dbg_trap_operations).
1223 * Return - 0 on SUCCESS.
1224 */
1225struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args {
1226 __u64 exception_mask;
1227};
1228
1229/**
1230 * kfd_ioctl_dbg_trap_set_wave_launch_override_args
1231 *
1232 * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE
1233 * Enable HW exceptions to raise trap.
1234 *
1235 * @override_mode (IN) - see kfd_dbg_trap_override_mode
1236 * @enable_mask (IN/OUT) - reference kfd_dbg_trap_mask.
1237 * IN is the override modes requested to be enabled.
1238 * OUT is referenced in Return below.
1239 * @support_request_mask (IN/OUT) - reference kfd_dbg_trap_mask.
1240 * IN is the override modes requested for support check.
1241 * OUT is referenced in Return below.
1242 *
1243 * Generic errors apply (see kfd_dbg_trap_operations).
1244 * Return - 0 on SUCCESS.
1245 * Previous enablement is returned in @enable_mask.
1246 * Actual override support is returned in @support_request_mask.
1247 * - EINVAL if override mode is not supported.
1248 * - EACCES if trap support requested is not actually supported.
1249 * i.e. enable_mask (IN) is not a subset of support_request_mask (OUT).
1250 * Otherwise it is considered a generic error (see kfd_dbg_trap_operations).
1251 */
1252struct kfd_ioctl_dbg_trap_set_wave_launch_override_args {
1253 __u32 override_mode;
1254 __u32 enable_mask;
1255 __u32 support_request_mask;
1256 __u32 pad;
1257};
1258
1259/**
1260 * kfd_ioctl_dbg_trap_set_wave_launch_mode_args
1261 *
1262 * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE
1263 * Set wave launch mode.
1264 *
1265 * @mode (IN) - see kfd_dbg_trap_wave_launch_mode
1266 *
1267 * Generic errors apply (see kfd_dbg_trap_operations).
1268 * Return - 0 on SUCCESS.
1269 */
1270struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args {
1271 __u32 launch_mode;
1272 __u32 pad;
1273};
1274
1275/**
1276 * kfd_ioctl_dbg_trap_suspend_queues_ags
1277 *
1278 * Arguments for KFD_IOC_DBG_TRAP_SUSPEND_QUEUES
1279 * Suspend queues.
1280 *
1281 * @exception_mask (IN) - raised exceptions to clear
1282 * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id)
1283 * to suspend
1284 * @num_queues (IN) - number of queues to suspend in @queue_array_ptr
1285 * @grace_period (IN) - wave time allowance before preemption
1286 * per 1K GPU clock cycle unit
1287 *
1288 * Generic errors apply (see kfd_dbg_trap_operations).
1289 * Destruction of a suspended queue is blocked until the queue is
1290 * resumed. This allows the debugger to access queue information and
1291 * the its context save area without running into a race condition on
1292 * queue destruction.
1293 * Automatically copies per queue context save area header information
1294 * into the save area base
1295 * (see kfd_queue_snapshot_entry and kfd_context_save_area_header).
1296 *
1297 * Return - Number of queues suspended on SUCCESS.
1298 * . KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK masked
1299 * for each queue id in @queue_array_ptr array reports unsuccessful
1300 * suspend reason.
1301 * KFD_DBG_QUEUE_ERROR_MASK = HW failure.
1302 * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist, is new or
1303 * is being destroyed.
1304 */
1305struct kfd_ioctl_dbg_trap_suspend_queues_args {
1306 __u64 exception_mask;
1307 __u64 queue_array_ptr;
1308 __u32 num_queues;
1309 __u32 grace_period;
1310};
1311
1312/**
1313 * kfd_ioctl_dbg_trap_resume_queues_args
1314 *
1315 * Arguments for KFD_IOC_DBG_TRAP_RESUME_QUEUES
1316 * Resume queues.
1317 *
1318 * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id)
1319 * to resume
1320 * @num_queues (IN) - number of queues to resume in @queue_array_ptr
1321 *
1322 * Generic errors apply (see kfd_dbg_trap_operations).
1323 * Return - Number of queues resumed on SUCCESS.
1324 * KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK mask
1325 * for each queue id in @queue_array_ptr array reports unsuccessful
1326 * resume reason.
1327 * KFD_DBG_QUEUE_ERROR_MASK = HW failure.
1328 * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist.
1329 */
1330struct kfd_ioctl_dbg_trap_resume_queues_args {
1331 __u64 queue_array_ptr;
1332 __u32 num_queues;
1333 __u32 pad;
1334};
1335
1336/**
1337 * kfd_ioctl_dbg_trap_set_node_address_watch_args
1338 *
1339 * Arguments for KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH
1340 * Sets address watch for device.
1341 *
1342 * @address (IN) - watch address to set
1343 * @mode (IN) - see kfd_dbg_trap_address_watch_mode
1344 * @mask (IN) - watch address mask
1345 * @gpu_id (IN) - target gpu to set watch point
1346 * @id (OUT) - watch id allocated
1347 *
1348 * Generic errors apply (see kfd_dbg_trap_operations).
1349 * Return - 0 on SUCCESS.
1350 * Allocated watch ID returned to @id.
1351 * - ENODEV if gpu_id not found.
1352 * - ENOMEM if watch IDs can be allocated
1353 */
1354struct kfd_ioctl_dbg_trap_set_node_address_watch_args {
1355 __u64 address;
1356 __u32 mode;
1357 __u32 mask;
1358 __u32 gpu_id;
1359 __u32 id;
1360};
1361
1362/**
1363 * kfd_ioctl_dbg_trap_clear_node_address_watch_args
1364 *
1365 * Arguments for KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH
1366 * Clear address watch for device.
1367 *
1368 * @gpu_id (IN) - target device to clear watch point
1369 * @id (IN) - allocated watch id to clear
1370 *
1371 * Generic errors apply (see kfd_dbg_trap_operations).
1372 * Return - 0 on SUCCESS.
1373 * - ENODEV if gpu_id not found.
1374 * - EINVAL if watch ID has not been allocated.
1375 */
1376struct kfd_ioctl_dbg_trap_clear_node_address_watch_args {
1377 __u32 gpu_id;
1378 __u32 id;
1379};
1380
1381/**
1382 * kfd_ioctl_dbg_trap_set_flags_args
1383 *
1384 * Arguments for KFD_IOC_DBG_TRAP_SET_FLAGS
1385 * Sets flags for wave behaviour.
1386 *
1387 * @flags (IN/OUT) - IN = flags to enable, OUT = flags previously enabled
1388 *
1389 * Generic errors apply (see kfd_dbg_trap_operations).
1390 * Return - 0 on SUCCESS.
1391 * - EACCESS if any debug device does not allow flag options.
1392 */
1393struct kfd_ioctl_dbg_trap_set_flags_args {
1394 __u32 flags;
1395 __u32 pad;
1396};
1397
1398/**
1399 * kfd_ioctl_dbg_trap_query_debug_event_args
1400 *
1401 * Arguments for KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT
1402 *
1403 * Find one or more raised exceptions. This function can return multiple
1404 * exceptions from a single queue or a single device with one call. To find
1405 * all raised exceptions, this function must be called repeatedly until it
1406 * returns -EAGAIN. Returned exceptions can optionally be cleared by
1407 * setting the corresponding bit in the @exception_mask input parameter.
1408 * However, clearing an exception prevents retrieving further information
1409 * about it with KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO.
1410 *
1411 * @exception_mask (IN/OUT) - exception to clear (IN) and raised (OUT)
1412 * @gpu_id (OUT) - gpu id of exceptions raised
1413 * @queue_id (OUT) - queue id of exceptions raised
1414 *
1415 * Generic errors apply (see kfd_dbg_trap_operations).
1416 * Return - 0 on raised exception found
1417 * Raised exceptions found are returned in @exception mask
1418 * with reported source id returned in @gpu_id or @queue_id.
1419 * - EAGAIN if no raised exception has been found
1420 */
1421struct kfd_ioctl_dbg_trap_query_debug_event_args {
1422 __u64 exception_mask;
1423 __u32 gpu_id;
1424 __u32 queue_id;
1425};
1426
1427/**
1428 * kfd_ioctl_dbg_trap_query_exception_info_args
1429 *
1430 * Arguments KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO
1431 * Get additional info on raised exception.
1432 *
1433 * @info_ptr (IN) - pointer to exception info buffer to copy to
1434 * @info_size (IN/OUT) - exception info buffer size (bytes)
1435 * @source_id (IN) - target gpu or queue id
1436 * @exception_code (IN) - target exception
1437 * @clear_exception (IN) - clear raised @exception_code exception
1438 * (0 = false, 1 = true)
1439 *
1440 * Generic errors apply (see kfd_dbg_trap_operations).
1441 * Return - 0 on SUCCESS.
1442 * If @exception_code is EC_DEVICE_MEMORY_VIOLATION, copy @info_size(OUT)
1443 * bytes of memory exception data to @info_ptr.
1444 * If @exception_code is EC_PROCESS_RUNTIME, copy saved
1445 * kfd_runtime_info to @info_ptr.
1446 * Actual required @info_ptr size (bytes) is returned in @info_size.
1447 */
1448struct kfd_ioctl_dbg_trap_query_exception_info_args {
1449 __u64 info_ptr;
1450 __u32 info_size;
1451 __u32 source_id;
1452 __u32 exception_code;
1453 __u32 clear_exception;
1454};
1455
1456/**
1457 * kfd_ioctl_dbg_trap_get_queue_snapshot_args
1458 *
1459 * Arguments KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT
1460 * Get queue information.
1461 *
1462 * @exception_mask (IN) - exceptions raised to clear
1463 * @snapshot_buf_ptr (IN) - queue snapshot entry buffer (see kfd_queue_snapshot_entry)
1464 * @num_queues (IN/OUT) - number of queue snapshot entries
1465 * The debugger specifies the size of the array allocated in @num_queues.
1466 * KFD returns the number of queues that actually existed. If this is
1467 * larger than the size specified by the debugger, KFD will not overflow
1468 * the array allocated by the debugger.
1469 *
1470 * @entry_size (IN/OUT) - size per entry in bytes
1471 * The debugger specifies sizeof(struct kfd_queue_snapshot_entry) in
1472 * @entry_size. KFD returns the number of bytes actually populated per
1473 * entry. The debugger should use the KFD_IOCTL_MINOR_VERSION to determine,
1474 * which fields in struct kfd_queue_snapshot_entry are valid. This allows
1475 * growing the ABI in a backwards compatible manner.
1476 * Note that entry_size(IN) should still be used to stride the snapshot buffer in the
1477 * event that it's larger than actual kfd_queue_snapshot_entry.
1478 *
1479 * Generic errors apply (see kfd_dbg_trap_operations).
1480 * Return - 0 on SUCCESS.
1481 * Copies @num_queues(IN) queue snapshot entries of size @entry_size(IN)
1482 * into @snapshot_buf_ptr if @num_queues(IN) > 0.
1483 * Otherwise return @num_queues(OUT) queue snapshot entries that exist.
1484 */
1485struct kfd_ioctl_dbg_trap_queue_snapshot_args {
1486 __u64 exception_mask;
1487 __u64 snapshot_buf_ptr;
1488 __u32 num_queues;
1489 __u32 entry_size;
1490};
1491
1492/**
1493 * kfd_ioctl_dbg_trap_get_device_snapshot_args
1494 *
1495 * Arguments for KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT
1496 * Get device information.
1497 *
1498 * @exception_mask (IN) - exceptions raised to clear
1499 * @snapshot_buf_ptr (IN) - pointer to snapshot buffer (see kfd_dbg_device_info_entry)
1500 * @num_devices (IN/OUT) - number of debug devices to snapshot
1501 * The debugger specifies the size of the array allocated in @num_devices.
1502 * KFD returns the number of devices that actually existed. If this is
1503 * larger than the size specified by the debugger, KFD will not overflow
1504 * the array allocated by the debugger.
1505 *
1506 * @entry_size (IN/OUT) - size per entry in bytes
1507 * The debugger specifies sizeof(struct kfd_dbg_device_info_entry) in
1508 * @entry_size. KFD returns the number of bytes actually populated. The
1509 * debugger should use KFD_IOCTL_MINOR_VERSION to determine, which fields
1510 * in struct kfd_dbg_device_info_entry are valid. This allows growing the
1511 * ABI in a backwards compatible manner.
1512 * Note that entry_size(IN) should still be used to stride the snapshot buffer in the
1513 * event that it's larger than actual kfd_dbg_device_info_entry.
1514 *
1515 * Generic errors apply (see kfd_dbg_trap_operations).
1516 * Return - 0 on SUCCESS.
1517 * Copies @num_devices(IN) device snapshot entries of size @entry_size(IN)
1518 * into @snapshot_buf_ptr if @num_devices(IN) > 0.
1519 * Otherwise return @num_devices(OUT) queue snapshot entries that exist.
1520 */
1521struct kfd_ioctl_dbg_trap_device_snapshot_args {
1522 __u64 exception_mask;
1523 __u64 snapshot_buf_ptr;
1524 __u32 num_devices;
1525 __u32 entry_size;
1526};
1527
1528/**
1529 * kfd_ioctl_dbg_trap_args
1530 *
1531 * Arguments to debug target process.
1532 *
1533 * @pid - target process to debug
1534 * @op - debug operation (see kfd_dbg_trap_operations)
1535 *
1536 * @op determines which union struct args to use.
1537 * Refer to kern docs for each kfd_ioctl_dbg_trap_*_args struct.
1538 */
1539struct kfd_ioctl_dbg_trap_args {
1540 __u32 pid;
1541 __u32 op;
1542
1543 union {
1544 struct kfd_ioctl_dbg_trap_enable_args enable;
1545 struct kfd_ioctl_dbg_trap_send_runtime_event_args send_runtime_event;
1546 struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args set_exceptions_enabled;
1547 struct kfd_ioctl_dbg_trap_set_wave_launch_override_args launch_override;
1548 struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args launch_mode;
1549 struct kfd_ioctl_dbg_trap_suspend_queues_args suspend_queues;
1550 struct kfd_ioctl_dbg_trap_resume_queues_args resume_queues;
1551 struct kfd_ioctl_dbg_trap_set_node_address_watch_args set_node_address_watch;
1552 struct kfd_ioctl_dbg_trap_clear_node_address_watch_args clear_node_address_watch;
1553 struct kfd_ioctl_dbg_trap_set_flags_args set_flags;
1554 struct kfd_ioctl_dbg_trap_query_debug_event_args query_debug_event;
1555 struct kfd_ioctl_dbg_trap_query_exception_info_args query_exception_info;
1556 struct kfd_ioctl_dbg_trap_queue_snapshot_args queue_snapshot;
1557 struct kfd_ioctl_dbg_trap_device_snapshot_args device_snapshot;
1558 };
1559};
1560
1561#define AMDKFD_IOCTL_BASE 'K'
1562#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
1563#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
1564#define AMDKFD_IOW(nr, type) _IOW(AMDKFD_IOCTL_BASE, nr, type)
1565#define AMDKFD_IOWR(nr, type) _IOWR(AMDKFD_IOCTL_BASE, nr, type)
1566
1567#define AMDKFD_IOC_GET_VERSION \
1568 AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
1569
1570#define AMDKFD_IOC_CREATE_QUEUE \
1571 AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
1572
1573#define AMDKFD_IOC_DESTROY_QUEUE \
1574 AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
1575
1576#define AMDKFD_IOC_SET_MEMORY_POLICY \
1577 AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
1578
1579#define AMDKFD_IOC_GET_CLOCK_COUNTERS \
1580 AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
1581
1582#define AMDKFD_IOC_GET_PROCESS_APERTURES \
1583 AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
1584
1585#define AMDKFD_IOC_UPDATE_QUEUE \
1586 AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
1587
1588#define AMDKFD_IOC_CREATE_EVENT \
1589 AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args)
1590
1591#define AMDKFD_IOC_DESTROY_EVENT \
1592 AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args)
1593
1594#define AMDKFD_IOC_SET_EVENT \
1595 AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args)
1596
1597#define AMDKFD_IOC_RESET_EVENT \
1598 AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args)
1599
1600#define AMDKFD_IOC_WAIT_EVENTS \
1601 AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args)
1602
1603#define AMDKFD_IOC_DBG_REGISTER_DEPRECATED \
1604 AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args)
1605
1606#define AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED \
1607 AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args)
1608
1609#define AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED \
1610 AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
1611
1612#define AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED \
1613 AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
1614
1615#define AMDKFD_IOC_SET_SCRATCH_BACKING_VA \
1616 AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args)
1617
1618#define AMDKFD_IOC_GET_TILE_CONFIG \
1619 AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
1620
1621#define AMDKFD_IOC_SET_TRAP_HANDLER \
1622 AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args)
1623
1624#define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW \
1625 AMDKFD_IOWR(0x14, \
1626 struct kfd_ioctl_get_process_apertures_new_args)
1627
1628#define AMDKFD_IOC_ACQUIRE_VM \
1629 AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args)
1630
1631#define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU \
1632 AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args)
1633
1634#define AMDKFD_IOC_FREE_MEMORY_OF_GPU \
1635 AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args)
1636
1637#define AMDKFD_IOC_MAP_MEMORY_TO_GPU \
1638 AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args)
1639
1640#define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \
1641 AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
1642
1643#define AMDKFD_IOC_SET_CU_MASK \
1644 AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
1645
1646#define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \
1647 AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
1648
1649#define AMDKFD_IOC_GET_DMABUF_INFO \
1650 AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
1651
1652#define AMDKFD_IOC_IMPORT_DMABUF \
1653 AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
1654
1655#define AMDKFD_IOC_ALLOC_QUEUE_GWS \
1656 AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
1657
1658#define AMDKFD_IOC_SMI_EVENTS \
1659 AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args)
1660
1661#define AMDKFD_IOC_SVM AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args)
1662
1663#define AMDKFD_IOC_SET_XNACK_MODE \
1664 AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args)
1665
1666#define AMDKFD_IOC_CRIU_OP \
1667 AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args)
1668
1669#define AMDKFD_IOC_AVAILABLE_MEMORY \
1670 AMDKFD_IOWR(0x23, struct kfd_ioctl_get_available_memory_args)
1671
1672#define AMDKFD_IOC_EXPORT_DMABUF \
1673 AMDKFD_IOWR(0x24, struct kfd_ioctl_export_dmabuf_args)
1674
1675#define AMDKFD_IOC_RUNTIME_ENABLE \
1676 AMDKFD_IOWR(0x25, struct kfd_ioctl_runtime_enable_args)
1677
1678#define AMDKFD_IOC_DBG_TRAP \
1679 AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args)
1680
1681#define AMDKFD_IOC_CREATE_PROCESS \
1682 AMDKFD_IO(0x27)
1683
1684#define AMDKFD_COMMAND_START 0x01
1685#define AMDKFD_COMMAND_END 0x28
1686
1687#endif