2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #ifndef KFD_IOCTL_H_INCLUDED
24 #define KFD_IOCTL_H_INCLUDED
27 #include <linux/ioctl.h>
30 * - 1.1 - initial version
31 * - 1.3 - Add SMI events support
32 * - 1.4 - Indicate new SRAM EDC bit in device properties
34 * - 1.6 - Query clear flags in SVM get_attr API
35 * - 1.7 - Checkpoint Restore (CRIU) API
36 * - 1.8 - CRIU - Support for SDMA transfers with GTT BOs
37 * - 1.9 - Add available memory ioctl
38 * - 1.10 - Add SMI profiler event log
39 * - 1.11 - Add unified memory for ctx save/restore area
40 * - 1.12 - Add DMA buf export ioctl
41 * - 1.13 - Add debugger API
43 #define KFD_IOCTL_MAJOR_VERSION 1
44 #define KFD_IOCTL_MINOR_VERSION 13
46 struct kfd_ioctl_get_version_args {
47 __u32 major_version; /* from KFD */
48 __u32 minor_version; /* from KFD */
51 /* For kfd_ioctl_create_queue_args.queue_type. */
52 #define KFD_IOC_QUEUE_TYPE_COMPUTE 0x0
53 #define KFD_IOC_QUEUE_TYPE_SDMA 0x1
54 #define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2
55 #define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3
57 #define KFD_MAX_QUEUE_PERCENTAGE 100
58 #define KFD_MAX_QUEUE_PRIORITY 15
60 struct kfd_ioctl_create_queue_args {
61 __u64 ring_base_address; /* to KFD */
62 __u64 write_pointer_address; /* from KFD */
63 __u64 read_pointer_address; /* from KFD */
64 __u64 doorbell_offset; /* from KFD */
66 __u32 ring_size; /* to KFD */
67 __u32 gpu_id; /* to KFD */
68 __u32 queue_type; /* to KFD */
69 __u32 queue_percentage; /* to KFD */
70 __u32 queue_priority; /* to KFD */
71 __u32 queue_id; /* from KFD */
73 __u64 eop_buffer_address; /* to KFD */
74 __u64 eop_buffer_size; /* to KFD */
75 __u64 ctx_save_restore_address; /* to KFD */
76 __u32 ctx_save_restore_size; /* to KFD */
77 __u32 ctl_stack_size; /* to KFD */
80 struct kfd_ioctl_destroy_queue_args {
81 __u32 queue_id; /* to KFD */
85 struct kfd_ioctl_update_queue_args {
86 __u64 ring_base_address; /* to KFD */
88 __u32 queue_id; /* to KFD */
89 __u32 ring_size; /* to KFD */
90 __u32 queue_percentage; /* to KFD */
91 __u32 queue_priority; /* to KFD */
94 struct kfd_ioctl_set_cu_mask_args {
95 __u32 queue_id; /* to KFD */
96 __u32 num_cu_mask; /* to KFD */
97 __u64 cu_mask_ptr; /* to KFD */
100 struct kfd_ioctl_get_queue_wave_state_args {
101 __u64 ctl_stack_address; /* to KFD */
102 __u32 ctl_stack_used_size; /* from KFD */
103 __u32 save_area_used_size; /* from KFD */
104 __u32 queue_id; /* to KFD */
108 struct kfd_ioctl_get_available_memory_args {
109 __u64 available; /* from KFD */
110 __u32 gpu_id; /* to KFD */
114 struct kfd_dbg_device_info_entry {
115 __u64 exception_status;
127 __u32 subsystem_vendor_id;
128 __u32 subsystem_device_id;
130 __u32 gfx_target_version;
132 __u32 max_waves_per_simd;
134 __u32 simd_arrays_per_engine;
140 /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
141 #define KFD_IOC_CACHE_POLICY_COHERENT 0
142 #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
144 struct kfd_ioctl_set_memory_policy_args {
145 __u64 alternate_aperture_base; /* to KFD */
146 __u64 alternate_aperture_size; /* to KFD */
148 __u32 gpu_id; /* to KFD */
149 __u32 default_policy; /* to KFD */
150 __u32 alternate_policy; /* to KFD */
155 * All counters are monotonic. They are used for profiling of compute jobs.
156 * The profiling is done by userspace.
158 * In case of GPU reset, the counter should not be affected.
161 struct kfd_ioctl_get_clock_counters_args {
162 __u64 gpu_clock_counter; /* from KFD */
163 __u64 cpu_clock_counter; /* from KFD */
164 __u64 system_clock_counter; /* from KFD */
165 __u64 system_clock_freq; /* from KFD */
167 __u32 gpu_id; /* to KFD */
171 struct kfd_process_device_apertures {
172 __u64 lds_base; /* from KFD */
173 __u64 lds_limit; /* from KFD */
174 __u64 scratch_base; /* from KFD */
175 __u64 scratch_limit; /* from KFD */
176 __u64 gpuvm_base; /* from KFD */
177 __u64 gpuvm_limit; /* from KFD */
178 __u32 gpu_id; /* from KFD */
183 * AMDKFD_IOC_GET_PROCESS_APERTURES is deprecated. Use
184 * AMDKFD_IOC_GET_PROCESS_APERTURES_NEW instead, which supports an
185 * unlimited number of GPUs.
187 #define NUM_OF_SUPPORTED_GPUS 7
188 struct kfd_ioctl_get_process_apertures_args {
189 struct kfd_process_device_apertures
190 process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */
192 /* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */
197 struct kfd_ioctl_get_process_apertures_new_args {
198 /* User allocated. Pointer to struct kfd_process_device_apertures
199 * filled in by Kernel
201 __u64 kfd_process_device_apertures_ptr;
202 /* to KFD - indicates amount of memory present in
203 * kfd_process_device_apertures_ptr
204 * from KFD - Number of entries filled by KFD.
210 #define MAX_ALLOWED_NUM_POINTS 100
211 #define MAX_ALLOWED_AW_BUFF_SIZE 4096
212 #define MAX_ALLOWED_WAC_BUFF_SIZE 128
214 struct kfd_ioctl_dbg_register_args {
215 __u32 gpu_id; /* to KFD */
219 struct kfd_ioctl_dbg_unregister_args {
220 __u32 gpu_id; /* to KFD */
224 struct kfd_ioctl_dbg_address_watch_args {
225 __u64 content_ptr; /* a pointer to the actual content */
226 __u32 gpu_id; /* to KFD */
227 __u32 buf_size_in_bytes; /*including gpu_id and buf_size */
230 struct kfd_ioctl_dbg_wave_control_args {
231 __u64 content_ptr; /* a pointer to the actual content */
232 __u32 gpu_id; /* to KFD */
233 __u32 buf_size_in_bytes; /*including gpu_id and buf_size */
236 #define KFD_INVALID_FD 0xffffffff
238 /* Matching HSA_EVENTTYPE */
239 #define KFD_IOC_EVENT_SIGNAL 0
240 #define KFD_IOC_EVENT_NODECHANGE 1
241 #define KFD_IOC_EVENT_DEVICESTATECHANGE 2
242 #define KFD_IOC_EVENT_HW_EXCEPTION 3
243 #define KFD_IOC_EVENT_SYSTEM_EVENT 4
244 #define KFD_IOC_EVENT_DEBUG_EVENT 5
245 #define KFD_IOC_EVENT_PROFILE_EVENT 6
246 #define KFD_IOC_EVENT_QUEUE_EVENT 7
247 #define KFD_IOC_EVENT_MEMORY 8
249 #define KFD_IOC_WAIT_RESULT_COMPLETE 0
250 #define KFD_IOC_WAIT_RESULT_TIMEOUT 1
251 #define KFD_IOC_WAIT_RESULT_FAIL 2
253 #define KFD_SIGNAL_EVENT_LIMIT 4096
255 /* For kfd_event_data.hw_exception_data.reset_type. */
256 #define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0
257 #define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1
259 /* For kfd_event_data.hw_exception_data.reset_cause. */
260 #define KFD_HW_EXCEPTION_GPU_HANG 0
261 #define KFD_HW_EXCEPTION_ECC 1
263 /* For kfd_hsa_memory_exception_data.ErrorType */
264 #define KFD_MEM_ERR_NO_RAS 0
265 #define KFD_MEM_ERR_SRAM_ECC 1
266 #define KFD_MEM_ERR_POISON_CONSUMED 2
267 #define KFD_MEM_ERR_GPU_HANG 3
269 struct kfd_ioctl_create_event_args {
270 __u64 event_page_offset; /* from KFD */
271 __u32 event_trigger_data; /* from KFD - signal events only */
272 __u32 event_type; /* to KFD */
273 __u32 auto_reset; /* to KFD */
274 __u32 node_id; /* to KFD - only valid for certain
276 __u32 event_id; /* from KFD */
277 __u32 event_slot_index; /* from KFD */
280 struct kfd_ioctl_destroy_event_args {
281 __u32 event_id; /* to KFD */
285 struct kfd_ioctl_set_event_args {
286 __u32 event_id; /* to KFD */
290 struct kfd_ioctl_reset_event_args {
291 __u32 event_id; /* to KFD */
295 struct kfd_memory_exception_failure {
296 __u32 NotPresent; /* Page not present or supervisor privilege */
297 __u32 ReadOnly; /* Write access to a read-only page */
298 __u32 NoExecute; /* Execute access to a page marked NX */
299 __u32 imprecise; /* Can't determine the exact fault address */
302 /* memory exception data */
303 struct kfd_hsa_memory_exception_data {
304 struct kfd_memory_exception_failure failure;
307 __u32 ErrorType; /* 0 = no RAS error,
309 * 2 = Link_SYNFLOOD (poison),
310 * 3 = GPU hang (not attributable to a specific cause),
311 * other values reserved
315 /* hw exception data */
316 struct kfd_hsa_hw_exception_data {
324 struct kfd_event_data {
326 struct kfd_hsa_memory_exception_data memory_exception_data;
327 struct kfd_hsa_hw_exception_data hw_exception_data;
329 __u64 kfd_event_data_ext; /* pointer to an extension structure
330 for future exception types */
331 __u32 event_id; /* to KFD */
335 struct kfd_ioctl_wait_events_args {
336 __u64 events_ptr; /* pointed to struct
337 kfd_event_data array, to KFD */
338 __u32 num_events; /* to KFD */
339 __u32 wait_for_all; /* to KFD */
340 __u32 timeout; /* to KFD */
341 __u32 wait_result; /* from KFD */
344 struct kfd_ioctl_set_scratch_backing_va_args {
345 __u64 va_addr; /* to KFD */
346 __u32 gpu_id; /* to KFD */
350 struct kfd_ioctl_get_tile_config_args {
351 /* to KFD: pointer to tile array */
352 __u64 tile_config_ptr;
353 /* to KFD: pointer to macro tile array */
354 __u64 macro_tile_config_ptr;
355 /* to KFD: array size allocated by user mode
356 * from KFD: array size filled by kernel
358 __u32 num_tile_configs;
359 /* to KFD: array size allocated by user mode
360 * from KFD: array size filled by kernel
362 __u32 num_macro_tile_configs;
364 __u32 gpu_id; /* to KFD */
365 __u32 gb_addr_config; /* from KFD */
366 __u32 num_banks; /* from KFD */
367 __u32 num_ranks; /* from KFD */
368 /* struct size can be extended later if needed
369 * without breaking ABI compatibility
373 struct kfd_ioctl_set_trap_handler_args {
374 __u64 tba_addr; /* to KFD */
375 __u64 tma_addr; /* to KFD */
376 __u32 gpu_id; /* to KFD */
380 struct kfd_ioctl_acquire_vm_args {
381 __u32 drm_fd; /* to KFD */
382 __u32 gpu_id; /* to KFD */
385 /* Allocation flags: memory types */
386 #define KFD_IOC_ALLOC_MEM_FLAGS_VRAM (1 << 0)
387 #define KFD_IOC_ALLOC_MEM_FLAGS_GTT (1 << 1)
388 #define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR (1 << 2)
389 #define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
390 #define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4)
391 /* Allocation flags: attributes/access options */
392 #define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
393 #define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
394 #define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
395 #define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28)
396 #define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
397 #define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26)
398 #define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25)
400 /* Allocate memory for later SVM (shared virtual memory) mapping.
402 * @va_addr: virtual address of the memory to be allocated
403 * all later mappings on all GPUs will use this address
404 * @size: size in bytes
405 * @handle: buffer handle returned to user mode, used to refer to
406 * this allocation for mapping, unmapping and freeing
407 * @mmap_offset: for CPU-mapping the allocation by mmapping a render node
408 * for userptrs this is overloaded to specify the CPU address
409 * @gpu_id: device identifier
410 * @flags: memory type and attributes. See KFD_IOC_ALLOC_MEM_FLAGS above
412 struct kfd_ioctl_alloc_memory_of_gpu_args {
413 __u64 va_addr; /* to KFD */
414 __u64 size; /* to KFD */
415 __u64 handle; /* from KFD */
416 __u64 mmap_offset; /* to KFD (userptr), from KFD (mmap offset) */
417 __u32 gpu_id; /* to KFD */
421 /* Free memory allocated with kfd_ioctl_alloc_memory_of_gpu
423 * @handle: memory handle returned by alloc
425 struct kfd_ioctl_free_memory_of_gpu_args {
426 __u64 handle; /* to KFD */
429 /* Map memory to one or more GPUs
431 * @handle: memory handle returned by alloc
432 * @device_ids_array_ptr: array of gpu_ids (__u32 per device)
433 * @n_devices: number of devices in the array
434 * @n_success: number of devices mapped successfully
436 * @n_success returns information to the caller how many devices from
437 * the start of the array have mapped the buffer successfully. It can
438 * be passed into a subsequent retry call to skip those devices. For
439 * the first call the caller should initialize it to 0.
441 * If the ioctl completes with return code 0 (success), n_success ==
444 struct kfd_ioctl_map_memory_to_gpu_args {
445 __u64 handle; /* to KFD */
446 __u64 device_ids_array_ptr; /* to KFD */
447 __u32 n_devices; /* to KFD */
448 __u32 n_success; /* to/from KFD */
451 /* Unmap memory from one or more GPUs
453 * same arguments as for mapping
455 struct kfd_ioctl_unmap_memory_from_gpu_args {
456 __u64 handle; /* to KFD */
457 __u64 device_ids_array_ptr; /* to KFD */
458 __u32 n_devices; /* to KFD */
459 __u32 n_success; /* to/from KFD */
462 /* Allocate GWS for specific queue
464 * @queue_id: queue's id that GWS is allocated for
465 * @num_gws: how many GWS to allocate
466 * @first_gws: index of the first GWS allocated.
467 * only support contiguous GWS allocation
469 struct kfd_ioctl_alloc_queue_gws_args {
470 __u32 queue_id; /* to KFD */
471 __u32 num_gws; /* to KFD */
472 __u32 first_gws; /* from KFD */
476 struct kfd_ioctl_get_dmabuf_info_args {
477 __u64 size; /* from KFD */
478 __u64 metadata_ptr; /* to KFD */
479 __u32 metadata_size; /* to KFD (space allocated by user)
480 * from KFD (actual metadata size)
482 __u32 gpu_id; /* from KFD */
483 __u32 flags; /* from KFD (KFD_IOC_ALLOC_MEM_FLAGS) */
484 __u32 dmabuf_fd; /* to KFD */
487 struct kfd_ioctl_import_dmabuf_args {
488 __u64 va_addr; /* to KFD */
489 __u64 handle; /* from KFD */
490 __u32 gpu_id; /* to KFD */
491 __u32 dmabuf_fd; /* to KFD */
494 struct kfd_ioctl_export_dmabuf_args {
495 __u64 handle; /* to KFD */
496 __u32 flags; /* to KFD */
497 __u32 dmabuf_fd; /* from KFD */
501 * KFD SMI(System Management Interface) events
504 KFD_SMI_EVENT_NONE = 0, /* not used */
505 KFD_SMI_EVENT_VMFAULT = 1, /* event start counting at 1 */
506 KFD_SMI_EVENT_THERMAL_THROTTLE = 2,
507 KFD_SMI_EVENT_GPU_PRE_RESET = 3,
508 KFD_SMI_EVENT_GPU_POST_RESET = 4,
509 KFD_SMI_EVENT_MIGRATE_START = 5,
510 KFD_SMI_EVENT_MIGRATE_END = 6,
511 KFD_SMI_EVENT_PAGE_FAULT_START = 7,
512 KFD_SMI_EVENT_PAGE_FAULT_END = 8,
513 KFD_SMI_EVENT_QUEUE_EVICTION = 9,
514 KFD_SMI_EVENT_QUEUE_RESTORE = 10,
515 KFD_SMI_EVENT_UNMAP_FROM_GPU = 11,
518 * max event number, as a flag bit to get events from all processes,
519 * this requires super user permission, otherwise will not be able to
520 * receive event from any process. Without this flag to receive events
523 KFD_SMI_EVENT_ALL_PROCESS = 64
526 enum KFD_MIGRATE_TRIGGERS {
527 KFD_MIGRATE_TRIGGER_PREFETCH,
528 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
529 KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
530 KFD_MIGRATE_TRIGGER_TTM_EVICTION
533 enum KFD_QUEUE_EVICTION_TRIGGERS {
534 KFD_QUEUE_EVICTION_TRIGGER_SVM,
535 KFD_QUEUE_EVICTION_TRIGGER_USERPTR,
536 KFD_QUEUE_EVICTION_TRIGGER_TTM,
537 KFD_QUEUE_EVICTION_TRIGGER_SUSPEND,
538 KFD_QUEUE_EVICTION_CRIU_CHECKPOINT,
539 KFD_QUEUE_EVICTION_CRIU_RESTORE
542 enum KFD_SVM_UNMAP_TRIGGERS {
543 KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY,
544 KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE,
545 KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU
548 #define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1))
549 #define KFD_SMI_EVENT_MSG_SIZE 96
551 struct kfd_ioctl_smi_events_args {
552 __u32 gpuid; /* to KFD */
553 __u32 anon_fd; /* from KFD */
556 /**************************************************************************************************
557 * CRIU IOCTLs (Checkpoint Restore In Userspace)
559 * When checkpointing a process, the userspace application will perform:
560 * 1. PROCESS_INFO op to determine current process information. This pauses execution and evicts
562 * 2. CHECKPOINT op to checkpoint process contents (BOs, queues, events, svm-ranges)
563 * 3. UNPAUSE op to un-evict all the queues
565 * When restoring a process, the CRIU userspace application will perform:
567 * 1. RESTORE op to restore process contents
568 * 2. RESUME op to start the process
570 * Note: Queues are forced into an evicted state after a successful PROCESS_INFO. User
571 * application needs to perform an UNPAUSE operation after calling PROCESS_INFO.
575 KFD_CRIU_OP_PROCESS_INFO,
576 KFD_CRIU_OP_CHECKPOINT,
583 * kfd_ioctl_criu_args - Arguments perform CRIU operation
584 * @devices: [in/out] User pointer to memory location for devices information.
585 * This is an array of type kfd_criu_device_bucket.
586 * @bos: [in/out] User pointer to memory location for BOs information
587 * This is an array of type kfd_criu_bo_bucket.
588 * @priv_data: [in/out] User pointer to memory location for private data
589 * @priv_data_size: [in/out] Size of priv_data in bytes
590 * @num_devices: [in/out] Number of GPUs used by process. Size of @devices array.
591 * @num_bos [in/out] Number of BOs used by process. Size of @bos array.
592 * @num_objects: [in/out] Number of objects used by process. Objects are opaque to
594 * @pid: [in/out] PID of the process being checkpointed
595 * @op [in] Type of operation (kfd_criu_op)
597 * Return: 0 on success, -errno on failure
599 struct kfd_ioctl_criu_args {
600 __u64 devices; /* Used during ops: CHECKPOINT, RESTORE */
601 __u64 bos; /* Used during ops: CHECKPOINT, RESTORE */
602 __u64 priv_data; /* Used during ops: CHECKPOINT, RESTORE */
603 __u64 priv_data_size; /* Used during ops: PROCESS_INFO, RESTORE */
604 __u32 num_devices; /* Used during ops: PROCESS_INFO, RESTORE */
605 __u32 num_bos; /* Used during ops: PROCESS_INFO, RESTORE */
606 __u32 num_objects; /* Used during ops: PROCESS_INFO, RESTORE */
607 __u32 pid; /* Used during ops: PROCESS_INFO, RESUME */
611 struct kfd_criu_device_bucket {
618 struct kfd_criu_bo_bucket {
622 __u64 restored_offset; /* During restore, updated offset for BO */
623 __u32 gpu_id; /* This is the user_gpu_id */
629 /* CRIU IOCTLs - END */
630 /**************************************************************************************************/
632 /* Register offset inside the remapped mmio page
634 enum kfd_mmio_remap {
635 KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0,
636 KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4,
639 /* Guarantee host access to memory */
640 #define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001
641 /* Fine grained coherency between all devices with access */
642 #define KFD_IOCTL_SVM_FLAG_COHERENT 0x00000002
643 /* Use any GPU in same hive as preferred device */
644 #define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL 0x00000004
645 /* GPUs only read, allows replication */
646 #define KFD_IOCTL_SVM_FLAG_GPU_RO 0x00000008
647 /* Allow execution on GPU */
648 #define KFD_IOCTL_SVM_FLAG_GPU_EXEC 0x00000010
649 /* GPUs mostly read, may allow similar optimizations as RO, but writes fault */
650 #define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020
651 /* Keep GPU memory mapping always valid as if XNACK is disable */
652 #define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040
655 * kfd_ioctl_svm_op - SVM ioctl operations
657 * @KFD_IOCTL_SVM_OP_SET_ATTR: Modify one or more attributes
658 * @KFD_IOCTL_SVM_OP_GET_ATTR: Query one or more attributes
660 enum kfd_ioctl_svm_op {
661 KFD_IOCTL_SVM_OP_SET_ATTR,
662 KFD_IOCTL_SVM_OP_GET_ATTR
665 /** kfd_ioctl_svm_location - Enum for preferred and prefetch locations
667 * GPU IDs are used to specify GPUs as preferred and prefetch locations.
668 * Below definitions are used for system memory or for leaving the preferred
669 * location unspecified.
671 enum kfd_ioctl_svm_location {
672 KFD_IOCTL_SVM_LOCATION_SYSMEM = 0,
673 KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff
677 * kfd_ioctl_svm_attr_type - SVM attribute types
679 * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: gpuid of the preferred location, 0 for
681 * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: gpuid of the prefetch location, 0 for
682 * system memory. Setting this triggers an
683 * immediate prefetch (migration).
684 * @KFD_IOCTL_SVM_ATTR_ACCESS:
685 * @KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
686 * @KFD_IOCTL_SVM_ATTR_NO_ACCESS: specify memory access for the gpuid given
687 * by the attribute value
688 * @KFD_IOCTL_SVM_ATTR_SET_FLAGS: bitmask of flags to set (see
689 * KFD_IOCTL_SVM_FLAG_...)
690 * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS: bitmask of flags to clear
691 * @KFD_IOCTL_SVM_ATTR_GRANULARITY: migration granularity
694 enum kfd_ioctl_svm_attr_type {
695 KFD_IOCTL_SVM_ATTR_PREFERRED_LOC,
696 KFD_IOCTL_SVM_ATTR_PREFETCH_LOC,
697 KFD_IOCTL_SVM_ATTR_ACCESS,
698 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE,
699 KFD_IOCTL_SVM_ATTR_NO_ACCESS,
700 KFD_IOCTL_SVM_ATTR_SET_FLAGS,
701 KFD_IOCTL_SVM_ATTR_CLR_FLAGS,
702 KFD_IOCTL_SVM_ATTR_GRANULARITY
706 * kfd_ioctl_svm_attribute - Attributes as pairs of type and value
708 * The meaning of the @value depends on the attribute type.
710 * @type: attribute type (see enum @kfd_ioctl_svm_attr_type)
711 * @value: attribute value
713 struct kfd_ioctl_svm_attribute {
719 * kfd_ioctl_svm_args - Arguments for SVM ioctl
721 * @op specifies the operation to perform (see enum
722 * @kfd_ioctl_svm_op). @start_addr and @size are common for all
725 * A variable number of attributes can be given in @attrs.
726 * @nattr specifies the number of attributes. New attributes can be
727 * added in the future without breaking the ABI. If unknown attributes
728 * are given, the function returns -EINVAL.
730 * @KFD_IOCTL_SVM_OP_SET_ATTR sets attributes for a virtual address
731 * range. It may overlap existing virtual address ranges. If it does,
732 * the existing ranges will be split such that the attribute changes
733 * only apply to the specified address range.
735 * @KFD_IOCTL_SVM_OP_GET_ATTR returns the intersection of attributes
736 * over all memory in the given range and returns the result as the
737 * attribute value. If different pages have different preferred or
738 * prefetch locations, 0xffffffff will be returned for
739 * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC or
740 * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC resepctively. For
741 * @KFD_IOCTL_SVM_ATTR_SET_FLAGS, flags of all pages will be
742 * aggregated by bitwise AND. That means, a flag will be set in the
743 * output, if that flag is set for all pages in the range. For
744 * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS, flags of all pages will be
745 * aggregated by bitwise NOR. That means, a flag will be set in the
746 * output, if that flag is clear for all pages in the range.
747 * The minimum migration granularity throughout the range will be
748 * returned for @KFD_IOCTL_SVM_ATTR_GRANULARITY.
750 * Querying of accessibility attributes works by initializing the
751 * attribute type to @KFD_IOCTL_SVM_ATTR_ACCESS and the value to the
752 * GPUID being queried. Multiple attributes can be given to allow
753 * querying multiple GPUIDs. The ioctl function overwrites the
754 * attribute type to indicate the access for the specified GPU.
756 struct kfd_ioctl_svm_args {
761 /* Variable length array of attributes */
762 struct kfd_ioctl_svm_attribute attrs[];
766 * kfd_ioctl_set_xnack_mode_args - Arguments for set_xnack_mode
768 * @xnack_enabled: [in/out] Whether to enable XNACK mode for this process
770 * @xnack_enabled indicates whether recoverable page faults should be
771 * enabled for the current process. 0 means disabled, positive means
772 * enabled, negative means leave unchanged. If enabled, virtual address
773 * translations on GFXv9 and later AMD GPUs can return XNACK and retry
774 * the access until a valid PTE is available. This is used to implement
775 * device page faults.
777 * On output, @xnack_enabled returns the (new) current mode (0 or
778 * positive). Therefore, a negative input value can be used to query
779 * the current mode without changing it.
781 * The XNACK mode fundamentally changes the way SVM managed memory works
782 * in the driver, with subtle effects on application performance and
785 * Enabling XNACK mode requires shader programs to be compiled
786 * differently. Furthermore, not all GPUs support changing the mode
787 * per-process. Therefore changing the mode is only allowed while no
788 * user mode queues exist in the process. This ensure that no shader
789 * code is running that may be compiled for the wrong mode. And GPUs
790 * that cannot change to the requested mode will prevent the XNACK
791 * mode from occurring. All GPUs used by the process must be in the
794 * GFXv8 or older GPUs do not support 48 bit virtual addresses or SVM.
795 * Therefore those GPUs are not considered for the XNACK mode switch.
797 * Return: 0 on success, -errno on failure
799 struct kfd_ioctl_set_xnack_mode_args {
803 /* Wave launch override modes */
804 enum kfd_dbg_trap_override_mode {
805 KFD_DBG_TRAP_OVERRIDE_OR = 0,
806 KFD_DBG_TRAP_OVERRIDE_REPLACE = 1
809 /* Wave launch overrides */
810 enum kfd_dbg_trap_mask {
811 KFD_DBG_TRAP_MASK_FP_INVALID = 1,
812 KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL = 2,
813 KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO = 4,
814 KFD_DBG_TRAP_MASK_FP_OVERFLOW = 8,
815 KFD_DBG_TRAP_MASK_FP_UNDERFLOW = 16,
816 KFD_DBG_TRAP_MASK_FP_INEXACT = 32,
817 KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO = 64,
818 KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH = 128,
819 KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION = 256,
820 KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START = (1 << 30),
821 KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END = (1 << 31)
824 /* Wave launch modes */
825 enum kfd_dbg_trap_wave_launch_mode {
826 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL = 0,
827 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT = 1,
828 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG = 3
831 /* Address watch modes */
832 enum kfd_dbg_trap_address_watch_mode {
833 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_READ = 0,
834 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_NONREAD = 1,
835 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ATOMIC = 2,
836 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ALL = 3
839 /* Additional wave settings */
840 enum kfd_dbg_trap_flags {
841 KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1,
844 /* Trap exceptions */
845 enum kfd_dbg_trap_exception_code {
848 EC_QUEUE_WAVE_ABORT = 1,
849 EC_QUEUE_WAVE_TRAP = 2,
850 EC_QUEUE_WAVE_MATH_ERROR = 3,
851 EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION = 4,
852 EC_QUEUE_WAVE_MEMORY_VIOLATION = 5,
853 EC_QUEUE_WAVE_APERTURE_VIOLATION = 6,
854 EC_QUEUE_PACKET_DISPATCH_DIM_INVALID = 16,
855 EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID = 17,
856 EC_QUEUE_PACKET_DISPATCH_CODE_INVALID = 18,
857 EC_QUEUE_PACKET_RESERVED = 19,
858 EC_QUEUE_PACKET_UNSUPPORTED = 20,
859 EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID = 21,
860 EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID = 22,
861 EC_QUEUE_PACKET_VENDOR_UNSUPPORTED = 23,
862 EC_QUEUE_PREEMPTION_ERROR = 30,
865 EC_DEVICE_QUEUE_DELETE = 32,
866 EC_DEVICE_MEMORY_VIOLATION = 33,
867 EC_DEVICE_RAS_ERROR = 34,
868 EC_DEVICE_FATAL_HALT = 35,
871 EC_PROCESS_RUNTIME = 48,
872 EC_PROCESS_DEVICE_REMOVE = 49,
876 /* Mask generated by ecode in kfd_dbg_trap_exception_code */
877 #define KFD_EC_MASK(ecode) (1ULL << (ecode - 1))
879 /* Masks for exception code type checks below */
880 #define KFD_EC_MASK_QUEUE (KFD_EC_MASK(EC_QUEUE_WAVE_ABORT) | \
881 KFD_EC_MASK(EC_QUEUE_WAVE_TRAP) | \
882 KFD_EC_MASK(EC_QUEUE_WAVE_MATH_ERROR) | \
883 KFD_EC_MASK(EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION) | \
884 KFD_EC_MASK(EC_QUEUE_WAVE_MEMORY_VIOLATION) | \
885 KFD_EC_MASK(EC_QUEUE_WAVE_APERTURE_VIOLATION) | \
886 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \
887 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \
888 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \
889 KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \
890 KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \
891 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \
892 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \
893 KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED) | \
894 KFD_EC_MASK(EC_QUEUE_PREEMPTION_ERROR) | \
895 KFD_EC_MASK(EC_QUEUE_NEW))
896 #define KFD_EC_MASK_DEVICE (KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE) | \
897 KFD_EC_MASK(EC_DEVICE_RAS_ERROR) | \
898 KFD_EC_MASK(EC_DEVICE_FATAL_HALT) | \
899 KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION) | \
900 KFD_EC_MASK(EC_DEVICE_NEW))
901 #define KFD_EC_MASK_PROCESS (KFD_EC_MASK(EC_PROCESS_RUNTIME) | \
902 KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE))
904 /* Checks for exception code types for KFD search */
905 #define KFD_DBG_EC_TYPE_IS_QUEUE(ecode) \
906 (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE))
907 #define KFD_DBG_EC_TYPE_IS_DEVICE(ecode) \
908 (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE))
909 #define KFD_DBG_EC_TYPE_IS_PROCESS(ecode) \
910 (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS))
913 /* Runtime enable states */
914 enum kfd_dbg_runtime_state {
915 DEBUG_RUNTIME_STATE_DISABLED = 0,
916 DEBUG_RUNTIME_STATE_ENABLED = 1,
917 DEBUG_RUNTIME_STATE_ENABLED_BUSY = 2,
918 DEBUG_RUNTIME_STATE_ENABLED_ERROR = 3
921 /* Runtime enable status */
922 struct kfd_runtime_info {
928 /* Enable modes for runtime enable */
929 #define KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK 1
930 #define KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK 2
933 * kfd_ioctl_runtime_enable_args - Arguments for runtime enable
935 * Coordinates debug exception signalling and debug device enablement with runtime.
937 * @r_debug - pointer to user struct for sharing information between ROCr and the debuggger
938 * @mode_mask - mask to set mode
939 * KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK - enable runtime for debugging, otherwise disable
940 * KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK - enable trap temporary setup (ignore on disable)
941 * @capabilities_mask - mask to notify runtime on what KFD supports
943 * Return - 0 on SUCCESS.
944 * - EBUSY if runtime enable call already pending.
945 * - EEXIST if user queues already active prior to call.
946 * If process is debug enabled, runtime enable will enable debug devices and
947 * wait for debugger process to send runtime exception EC_PROCESS_RUNTIME
948 * to unblock - see kfd_ioctl_dbg_trap_args.
951 struct kfd_ioctl_runtime_enable_args {
954 __u32 capabilities_mask;
957 /* Queue information */
958 struct kfd_queue_snapshot_entry {
959 __u64 exception_status;
960 __u64 ring_base_address;
961 __u64 write_pointer_address;
962 __u64 read_pointer_address;
963 __u64 ctx_save_restore_address;
968 __u32 ctx_save_restore_area_size;
972 /* Queue status return for suspend/resume */
973 #define KFD_DBG_QUEUE_ERROR_BIT 30
974 #define KFD_DBG_QUEUE_INVALID_BIT 31
975 #define KFD_DBG_QUEUE_ERROR_MASK (1 << KFD_DBG_QUEUE_ERROR_BIT)
976 #define KFD_DBG_QUEUE_INVALID_MASK (1 << KFD_DBG_QUEUE_INVALID_BIT)
978 /* Context save area header information */
979 struct kfd_context_save_area_header {
981 __u32 control_stack_offset;
982 __u32 control_stack_size;
983 __u32 wave_state_offset;
984 __u32 wave_state_size;
988 __u64 err_payload_addr;
996 * For specifics on usage and return values, see documentation per operation
997 * below. Otherwise, generic error returns apply:
998 * - ESRCH if the process to debug does not exist.
1000 * - EINVAL (with KFD_IOC_DBG_TRAP_ENABLE exempt) if operation
1001 * KFD_IOC_DBG_TRAP_ENABLE has not succeeded prior.
1002 * Also returns this error if GPU hardware scheduling is not supported.
1004 * - EPERM (with KFD_IOC_DBG_TRAP_DISABLE exempt) if target process is not
1005 * PTRACE_ATTACHED. KFD_IOC_DBG_TRAP_DISABLE is exempt to allow
1006 * clean up of debug mode as long as process is debug enabled.
1008 * - EACCES if any DBG_HW_OP (debug hardware operation) is requested when
1009 * AMDKFD_IOC_RUNTIME_ENABLE has not succeeded prior.
1011 * - ENODEV if any GPU does not support debugging on a DBG_HW_OP call.
1013 * - Other errors may be returned when a DBG_HW_OP occurs while the GPU
1014 * is in a fatal state.
1017 enum kfd_dbg_trap_operations {
1018 KFD_IOC_DBG_TRAP_ENABLE = 0,
1019 KFD_IOC_DBG_TRAP_DISABLE = 1,
1020 KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT = 2,
1021 KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED = 3,
1022 KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE = 4, /* DBG_HW_OP */
1023 KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE = 5, /* DBG_HW_OP */
1024 KFD_IOC_DBG_TRAP_SUSPEND_QUEUES = 6, /* DBG_HW_OP */
1025 KFD_IOC_DBG_TRAP_RESUME_QUEUES = 7, /* DBG_HW_OP */
1026 KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH = 8, /* DBG_HW_OP */
1027 KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH = 9, /* DBG_HW_OP */
1028 KFD_IOC_DBG_TRAP_SET_FLAGS = 10,
1029 KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT = 11,
1030 KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO = 12,
1031 KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT = 13,
1032 KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT = 14
1036 * kfd_ioctl_dbg_trap_enable_args
1038 * Arguments for KFD_IOC_DBG_TRAP_ENABLE.
1040 * Enables debug session for target process. Call @op KFD_IOC_DBG_TRAP_DISABLE in
1041 * kfd_ioctl_dbg_trap_args to disable debug session.
1043 * @exception_mask (IN) - exceptions to raise to the debugger
1044 * @rinfo_ptr (IN) - pointer to runtime info buffer (see kfd_runtime_info)
1045 * @rinfo_size (IN/OUT) - size of runtime info buffer in bytes
1046 * @dbg_fd (IN) - fd the KFD will nofify the debugger with of raised
1047 * exceptions set in exception_mask.
1049 * Generic errors apply (see kfd_dbg_trap_operations).
1050 * Return - 0 on SUCCESS.
1051 * Copies KFD saved kfd_runtime_info to @rinfo_ptr on enable.
1052 * Size of kfd_runtime saved by the KFD returned to @rinfo_size.
1053 * - EBADF if KFD cannot get a reference to dbg_fd.
1054 * - EFAULT if KFD cannot copy runtime info to rinfo_ptr.
1055 * - EINVAL if target process is already debug enabled.
1058 struct kfd_ioctl_dbg_trap_enable_args {
1059 __u64 exception_mask;
1066 * kfd_ioctl_dbg_trap_send_runtime_event_args
1069 * Arguments for KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT.
1070 * Raises exceptions to runtime.
1072 * @exception_mask (IN) - exceptions to raise to runtime
1073 * @gpu_id (IN) - target device id
1074 * @queue_id (IN) - target queue id
1076 * Generic errors apply (see kfd_dbg_trap_operations).
1077 * Return - 0 on SUCCESS.
1078 * - ENODEV if gpu_id not found.
1079 * If exception_mask contains EC_PROCESS_RUNTIME, unblocks pending
1080 * AMDKFD_IOC_RUNTIME_ENABLE call - see kfd_ioctl_runtime_enable_args.
1081 * All other exceptions are raised to runtime through err_payload_addr.
1082 * See kfd_context_save_area_header.
1084 struct kfd_ioctl_dbg_trap_send_runtime_event_args {
1085 __u64 exception_mask;
1091 * kfd_ioctl_dbg_trap_set_exceptions_enabled_args
1093 * Arguments for KFD_IOC_SET_EXCEPTIONS_ENABLED
1094 * Set new exceptions to be raised to the debugger.
1096 * @exception_mask (IN) - new exceptions to raise the debugger
1098 * Generic errors apply (see kfd_dbg_trap_operations).
1099 * Return - 0 on SUCCESS.
1101 struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args {
1102 __u64 exception_mask;
1106 * kfd_ioctl_dbg_trap_set_wave_launch_override_args
1108 * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE
1109 * Enable HW exceptions to raise trap.
1111 * @override_mode (IN) - see kfd_dbg_trap_override_mode
1112 * @enable_mask (IN/OUT) - reference kfd_dbg_trap_mask.
1113 * IN is the override modes requested to be enabled.
1114 * OUT is referenced in Return below.
1115 * @support_request_mask (IN/OUT) - reference kfd_dbg_trap_mask.
1116 * IN is the override modes requested for support check.
1117 * OUT is referenced in Return below.
1119 * Generic errors apply (see kfd_dbg_trap_operations).
1120 * Return - 0 on SUCCESS.
1121 * Previous enablement is returned in @enable_mask.
1122 * Actual override support is returned in @support_request_mask.
1123 * - EINVAL if override mode is not supported.
1124 * - EACCES if trap support requested is not actually supported.
1125 * i.e. enable_mask (IN) is not a subset of support_request_mask (OUT).
1126 * Otherwise it is considered a generic error (see kfd_dbg_trap_operations).
1128 struct kfd_ioctl_dbg_trap_set_wave_launch_override_args {
1129 __u32 override_mode;
1131 __u32 support_request_mask;
1136 * kfd_ioctl_dbg_trap_set_wave_launch_mode_args
1138 * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE
1139 * Set wave launch mode.
1141 * @mode (IN) - see kfd_dbg_trap_wave_launch_mode
1143 * Generic errors apply (see kfd_dbg_trap_operations).
1144 * Return - 0 on SUCCESS.
1146 struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args {
1152 * kfd_ioctl_dbg_trap_suspend_queues_ags
1154 * Arguments for KFD_IOC_DBG_TRAP_SUSPEND_QUEUES
1157 * @exception_mask (IN) - raised exceptions to clear
1158 * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id)
1160 * @num_queues (IN) - number of queues to suspend in @queue_array_ptr
1161 * @grace_period (IN) - wave time allowance before preemption
1162 * per 1K GPU clock cycle unit
1164 * Generic errors apply (see kfd_dbg_trap_operations).
1165 * Destruction of a suspended queue is blocked until the queue is
1166 * resumed. This allows the debugger to access queue information and
1167 * the its context save area without running into a race condition on
1168 * queue destruction.
1169 * Automatically copies per queue context save area header information
1170 * into the save area base
1171 * (see kfd_queue_snapshot_entry and kfd_context_save_area_header).
1173 * Return - Number of queues suspended on SUCCESS.
1174 * . KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK masked
1175 * for each queue id in @queue_array_ptr array reports unsuccessful
1177 * KFD_DBG_QUEUE_ERROR_MASK = HW failure.
1178 * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist, is new or
1179 * is being destroyed.
1181 struct kfd_ioctl_dbg_trap_suspend_queues_args {
1182 __u64 exception_mask;
1183 __u64 queue_array_ptr;
1189 * kfd_ioctl_dbg_trap_resume_queues_args
1191 * Arguments for KFD_IOC_DBG_TRAP_RESUME_QUEUES
1194 * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id)
1196 * @num_queues (IN) - number of queues to resume in @queue_array_ptr
1198 * Generic errors apply (see kfd_dbg_trap_operations).
1199 * Return - Number of queues resumed on SUCCESS.
1200 * KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK mask
1201 * for each queue id in @queue_array_ptr array reports unsuccessful
1203 * KFD_DBG_QUEUE_ERROR_MASK = HW failure.
1204 * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist.
1206 struct kfd_ioctl_dbg_trap_resume_queues_args {
1207 __u64 queue_array_ptr;
1213 * kfd_ioctl_dbg_trap_set_node_address_watch_args
1215 * Arguments for KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH
1216 * Sets address watch for device.
1218 * @address (IN) - watch address to set
1219 * @mode (IN) - see kfd_dbg_trap_address_watch_mode
1220 * @mask (IN) - watch address mask
1221 * @gpu_id (IN) - target gpu to set watch point
1222 * @id (OUT) - watch id allocated
1224 * Generic errors apply (see kfd_dbg_trap_operations).
1225 * Return - 0 on SUCCESS.
1226 * Allocated watch ID returned to @id.
1227 * - ENODEV if gpu_id not found.
1228 * - ENOMEM if watch IDs can be allocated
1230 struct kfd_ioctl_dbg_trap_set_node_address_watch_args {
1239 * kfd_ioctl_dbg_trap_clear_node_address_watch_args
1241 * Arguments for KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH
1242 * Clear address watch for device.
1244 * @gpu_id (IN) - target device to clear watch point
1245 * @id (IN) - allocated watch id to clear
1247 * Generic errors apply (see kfd_dbg_trap_operations).
1248 * Return - 0 on SUCCESS.
1249 * - ENODEV if gpu_id not found.
1250 * - EINVAL if watch ID has not been allocated.
1252 struct kfd_ioctl_dbg_trap_clear_node_address_watch_args {
1258 * kfd_ioctl_dbg_trap_set_flags_args
1260 * Arguments for KFD_IOC_DBG_TRAP_SET_FLAGS
1261 * Sets flags for wave behaviour.
1263 * @flags (IN/OUT) - IN = flags to enable, OUT = flags previously enabled
1265 * Generic errors apply (see kfd_dbg_trap_operations).
1266 * Return - 0 on SUCCESS.
1267 * - EACCESS if any debug device does not allow flag options.
1269 struct kfd_ioctl_dbg_trap_set_flags_args {
1275 * kfd_ioctl_dbg_trap_query_debug_event_args
1277 * Arguments for KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT
1279 * Find one or more raised exceptions. This function can return multiple
1280 * exceptions from a single queue or a single device with one call. To find
1281 * all raised exceptions, this function must be called repeatedly until it
1282 * returns -EAGAIN. Returned exceptions can optionally be cleared by
1283 * setting the corresponding bit in the @exception_mask input parameter.
1284 * However, clearing an exception prevents retrieving further information
1285 * about it with KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO.
1287 * @exception_mask (IN/OUT) - exception to clear (IN) and raised (OUT)
1288 * @gpu_id (OUT) - gpu id of exceptions raised
1289 * @queue_id (OUT) - queue id of exceptions raised
1291 * Generic errors apply (see kfd_dbg_trap_operations).
1292 * Return - 0 on raised exception found
1293 * Raised exceptions found are returned in @exception mask
1294 * with reported source id returned in @gpu_id or @queue_id.
1295 * - EAGAIN if no raised exception has been found
1297 struct kfd_ioctl_dbg_trap_query_debug_event_args {
1298 __u64 exception_mask;
1304 * kfd_ioctl_dbg_trap_query_exception_info_args
1306 * Arguments KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO
1307 * Get additional info on raised exception.
1309 * @info_ptr (IN) - pointer to exception info buffer to copy to
1310 * @info_size (IN/OUT) - exception info buffer size (bytes)
1311 * @source_id (IN) - target gpu or queue id
1312 * @exception_code (IN) - target exception
1313 * @clear_exception (IN) - clear raised @exception_code exception
1314 * (0 = false, 1 = true)
1316 * Generic errors apply (see kfd_dbg_trap_operations).
1317 * Return - 0 on SUCCESS.
1318 * If @exception_code is EC_DEVICE_MEMORY_VIOLATION, copy @info_size(OUT)
1319 * bytes of memory exception data to @info_ptr.
1320 * If @exception_code is EC_PROCESS_RUNTIME, copy saved
1321 * kfd_runtime_info to @info_ptr.
1322 * Actual required @info_ptr size (bytes) is returned in @info_size.
1324 struct kfd_ioctl_dbg_trap_query_exception_info_args {
1328 __u32 exception_code;
1329 __u32 clear_exception;
1333 * kfd_ioctl_dbg_trap_get_queue_snapshot_args
1335 * Arguments KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT
1336 * Get queue information.
1338 * @exception_mask (IN) - exceptions raised to clear
1339 * @snapshot_buf_ptr (IN) - queue snapshot entry buffer (see kfd_queue_snapshot_entry)
1340 * @num_queues (IN/OUT) - number of queue snapshot entries
1341 * The debugger specifies the size of the array allocated in @num_queues.
1342 * KFD returns the number of queues that actually existed. If this is
1343 * larger than the size specified by the debugger, KFD will not overflow
1344 * the array allocated by the debugger.
1346 * @entry_size (IN/OUT) - size per entry in bytes
1347 * The debugger specifies sizeof(struct kfd_queue_snapshot_entry) in
1348 * @entry_size. KFD returns the number of bytes actually populated per
1349 * entry. The debugger should use the KFD_IOCTL_MINOR_VERSION to determine,
1350 * which fields in struct kfd_queue_snapshot_entry are valid. This allows
1351 * growing the ABI in a backwards compatible manner.
1352 * Note that entry_size(IN) should still be used to stride the snapshot buffer in the
1353 * event that it's larger than actual kfd_queue_snapshot_entry.
1355 * Generic errors apply (see kfd_dbg_trap_operations).
1356 * Return - 0 on SUCCESS.
1357 * Copies @num_queues(IN) queue snapshot entries of size @entry_size(IN)
1358 * into @snapshot_buf_ptr if @num_queues(IN) > 0.
1359 * Otherwise return @num_queues(OUT) queue snapshot entries that exist.
1361 struct kfd_ioctl_dbg_trap_queue_snapshot_args {
1362 __u64 exception_mask;
1363 __u64 snapshot_buf_ptr;
1369 * kfd_ioctl_dbg_trap_get_device_snapshot_args
1371 * Arguments for KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT
1372 * Get device information.
1374 * @exception_mask (IN) - exceptions raised to clear
1375 * @snapshot_buf_ptr (IN) - pointer to snapshot buffer (see kfd_dbg_device_info_entry)
1376 * @num_devices (IN/OUT) - number of debug devices to snapshot
1377 * The debugger specifies the size of the array allocated in @num_devices.
1378 * KFD returns the number of devices that actually existed. If this is
1379 * larger than the size specified by the debugger, KFD will not overflow
1380 * the array allocated by the debugger.
1382 * @entry_size (IN/OUT) - size per entry in bytes
1383 * The debugger specifies sizeof(struct kfd_dbg_device_info_entry) in
1384 * @entry_size. KFD returns the number of bytes actually populated. The
1385 * debugger should use KFD_IOCTL_MINOR_VERSION to determine, which fields
1386 * in struct kfd_dbg_device_info_entry are valid. This allows growing the
1387 * ABI in a backwards compatible manner.
1388 * Note that entry_size(IN) should still be used to stride the snapshot buffer in the
1389 * event that it's larger than actual kfd_dbg_device_info_entry.
1391 * Generic errors apply (see kfd_dbg_trap_operations).
1392 * Return - 0 on SUCCESS.
1393 * Copies @num_devices(IN) device snapshot entries of size @entry_size(IN)
1394 * into @snapshot_buf_ptr if @num_devices(IN) > 0.
1395 * Otherwise return @num_devices(OUT) queue snapshot entries that exist.
1397 struct kfd_ioctl_dbg_trap_device_snapshot_args {
1398 __u64 exception_mask;
1399 __u64 snapshot_buf_ptr;
1405 * kfd_ioctl_dbg_trap_args
1407 * Arguments to debug target process.
1409 * @pid - target process to debug
1410 * @op - debug operation (see kfd_dbg_trap_operations)
1412 * @op determines which union struct args to use.
1413 * Refer to kern docs for each kfd_ioctl_dbg_trap_*_args struct.
1415 struct kfd_ioctl_dbg_trap_args {
1420 struct kfd_ioctl_dbg_trap_enable_args enable;
1421 struct kfd_ioctl_dbg_trap_send_runtime_event_args send_runtime_event;
1422 struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args set_exceptions_enabled;
1423 struct kfd_ioctl_dbg_trap_set_wave_launch_override_args launch_override;
1424 struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args launch_mode;
1425 struct kfd_ioctl_dbg_trap_suspend_queues_args suspend_queues;
1426 struct kfd_ioctl_dbg_trap_resume_queues_args resume_queues;
1427 struct kfd_ioctl_dbg_trap_set_node_address_watch_args set_node_address_watch;
1428 struct kfd_ioctl_dbg_trap_clear_node_address_watch_args clear_node_address_watch;
1429 struct kfd_ioctl_dbg_trap_set_flags_args set_flags;
1430 struct kfd_ioctl_dbg_trap_query_debug_event_args query_debug_event;
1431 struct kfd_ioctl_dbg_trap_query_exception_info_args query_exception_info;
1432 struct kfd_ioctl_dbg_trap_queue_snapshot_args queue_snapshot;
1433 struct kfd_ioctl_dbg_trap_device_snapshot_args device_snapshot;
1437 #define AMDKFD_IOCTL_BASE 'K'
1438 #define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
1439 #define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
1440 #define AMDKFD_IOW(nr, type) _IOW(AMDKFD_IOCTL_BASE, nr, type)
1441 #define AMDKFD_IOWR(nr, type) _IOWR(AMDKFD_IOCTL_BASE, nr, type)
1443 #define AMDKFD_IOC_GET_VERSION \
1444 AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
1446 #define AMDKFD_IOC_CREATE_QUEUE \
1447 AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
1449 #define AMDKFD_IOC_DESTROY_QUEUE \
1450 AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
1452 #define AMDKFD_IOC_SET_MEMORY_POLICY \
1453 AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
1455 #define AMDKFD_IOC_GET_CLOCK_COUNTERS \
1456 AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
1458 #define AMDKFD_IOC_GET_PROCESS_APERTURES \
1459 AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
1461 #define AMDKFD_IOC_UPDATE_QUEUE \
1462 AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
1464 #define AMDKFD_IOC_CREATE_EVENT \
1465 AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args)
1467 #define AMDKFD_IOC_DESTROY_EVENT \
1468 AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args)
1470 #define AMDKFD_IOC_SET_EVENT \
1471 AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args)
1473 #define AMDKFD_IOC_RESET_EVENT \
1474 AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args)
1476 #define AMDKFD_IOC_WAIT_EVENTS \
1477 AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args)
1479 #define AMDKFD_IOC_DBG_REGISTER_DEPRECATED \
1480 AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args)
1482 #define AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED \
1483 AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args)
1485 #define AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED \
1486 AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
1488 #define AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED \
1489 AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
1491 #define AMDKFD_IOC_SET_SCRATCH_BACKING_VA \
1492 AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args)
1494 #define AMDKFD_IOC_GET_TILE_CONFIG \
1495 AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
1497 #define AMDKFD_IOC_SET_TRAP_HANDLER \
1498 AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args)
1500 #define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW \
1502 struct kfd_ioctl_get_process_apertures_new_args)
1504 #define AMDKFD_IOC_ACQUIRE_VM \
1505 AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args)
1507 #define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU \
1508 AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args)
1510 #define AMDKFD_IOC_FREE_MEMORY_OF_GPU \
1511 AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args)
1513 #define AMDKFD_IOC_MAP_MEMORY_TO_GPU \
1514 AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args)
1516 #define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \
1517 AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
1519 #define AMDKFD_IOC_SET_CU_MASK \
1520 AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
1522 #define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \
1523 AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
1525 #define AMDKFD_IOC_GET_DMABUF_INFO \
1526 AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
1528 #define AMDKFD_IOC_IMPORT_DMABUF \
1529 AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
1531 #define AMDKFD_IOC_ALLOC_QUEUE_GWS \
1532 AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
1534 #define AMDKFD_IOC_SMI_EVENTS \
1535 AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args)
1537 #define AMDKFD_IOC_SVM AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args)
1539 #define AMDKFD_IOC_SET_XNACK_MODE \
1540 AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args)
1542 #define AMDKFD_IOC_CRIU_OP \
1543 AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args)
1545 #define AMDKFD_IOC_AVAILABLE_MEMORY \
1546 AMDKFD_IOWR(0x23, struct kfd_ioctl_get_available_memory_args)
1548 #define AMDKFD_IOC_EXPORT_DMABUF \
1549 AMDKFD_IOWR(0x24, struct kfd_ioctl_export_dmabuf_args)
1551 #define AMDKFD_IOC_RUNTIME_ENABLE \
1552 AMDKFD_IOWR(0x25, struct kfd_ioctl_runtime_enable_args)
1554 #define AMDKFD_IOC_DBG_TRAP \
1555 AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args)
1557 #define AMDKFD_COMMAND_START 0x01
1558 #define AMDKFD_COMMAND_END 0x27