2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 * This file defines the private interface between the
25 * AMD kernel graphics drivers and the AMD KFD.
28 #ifndef KGD_KFD_INTERFACE_H_INCLUDED
29 #define KGD_KFD_INTERFACE_H_INCLUDED
31 #include <linux/types.h>
32 #include <linux/bitmap.h>
33 #include <linux/dma-fence.h>
37 #define KFD_INTERFACE_VERSION 2
38 #define KGD_MAX_QUEUES 128
45 enum kfd_preempt_type {
46 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN = 0,
47 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
50 struct kfd_vm_fault_info {
62 uint32_t num_shader_engines;
63 uint32_t num_shader_arrays_per_engine;
64 uint32_t num_cu_per_sh;
65 uint32_t cu_active_number;
68 uint32_t max_waves_per_simd;
69 uint32_t wave_front_size;
70 uint32_t max_scratch_slots_per_cu;
72 uint32_t cu_bitmap[4][4];
75 /* For getting GPU local memory information from KGD */
76 struct kfd_local_mem_info {
77 uint64_t local_mem_size_private;
78 uint64_t local_mem_size_public;
83 enum kgd_memory_pool {
84 KGD_POOL_SYSTEM_CACHEABLE = 1,
85 KGD_POOL_SYSTEM_WRITECOMBINE = 2,
86 KGD_POOL_FRAMEBUFFER = 3,
89 enum kgd_engine_type {
102 * enum kfd_sched_policy
104 * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp)
105 * scheduling. In this scheduling mode we're using the firmware code to
106 * schedule the user mode queues and kernel queues such as HIQ and DIQ.
107 * the HIQ queue is used as a special queue that dispatches the configuration
108 * to the cp and the user mode queues list that are currently running.
109 * the DIQ queue is a debugging queue that dispatches debugging commands to the
111 * in this scheduling mode user mode queues over subscription feature is
114 * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over
115 * subscription feature disabled.
117 * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly
118 * set the command processor registers and sets the queues "manually". This
119 * mode is used *ONLY* for debugging proposes.
122 enum kfd_sched_policy {
123 KFD_SCHED_POLICY_HWS = 0,
124 KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION,
125 KFD_SCHED_POLICY_NO_HWS
128 struct kgd2kfd_shared_resources {
129 /* Bit n == 1 means VMID n is available for KFD. */
130 unsigned int compute_vmid_bitmap;
132 /* number of pipes per mec */
133 uint32_t num_pipe_per_mec;
135 /* number of queues per pipe */
136 uint32_t num_queue_per_pipe;
138 /* Bit n == 1 means Queue n is available for KFD */
139 DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES);
141 /* Doorbell assignments (SOC15 and later chips only). Only
142 * specific doorbells are routed to each SDMA engine. Others
143 * are routed to IH and VCN. They are not usable by the CP.
145 * Any doorbell number D that satisfies the following condition
146 * is reserved: (D & reserved_doorbell_mask) == reserved_doorbell_val
148 * KFD currently uses 1024 (= 0x3ff) doorbells per process. If
149 * doorbells 0x0e0-0x0ff and 0x2e0-0x2ff are reserved, that means
150 * mask would be set to 0x1e0 and val set to 0x0e0.
152 unsigned int sdma_doorbell[2][8];
153 unsigned int reserved_doorbell_mask;
154 unsigned int reserved_doorbell_val;
156 /* Base address of doorbell aperture. */
157 phys_addr_t doorbell_physical_address;
159 /* Size in bytes of doorbell aperture. */
160 size_t doorbell_aperture_size;
162 /* Number of bytes at start of aperture reserved for KGD. */
163 size_t doorbell_start_offset;
165 /* GPUVM address space size in bytes */
168 /* Minor device number of the render node */
169 int drm_render_minor;
173 uint32_t *tile_config_ptr;
174 uint32_t *macro_tile_config_ptr;
175 uint32_t num_tile_configs;
176 uint32_t num_macro_tile_configs;
178 uint32_t gb_addr_config;
183 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
186 * Allocation flag domains
187 * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
189 #define ALLOC_MEM_FLAGS_VRAM (1 << 0)
190 #define ALLOC_MEM_FLAGS_GTT (1 << 1)
191 #define ALLOC_MEM_FLAGS_USERPTR (1 << 2) /* TODO */
192 #define ALLOC_MEM_FLAGS_DOORBELL (1 << 3) /* TODO */
195 * Allocation flags attributes/access options.
196 * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
198 #define ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
199 #define ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
200 #define ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
201 #define ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28) /* TODO */
202 #define ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
203 #define ALLOC_MEM_FLAGS_COHERENT (1 << 26) /* For GFXv9 or later */
206 * struct kfd2kgd_calls
208 * @init_gtt_mem_allocation: Allocate a buffer on the gart aperture.
209 * The buffer can be used for mqds, hpds, kernel queue, fence and runlists
211 * @free_gtt_mem: Frees a buffer that was allocated on the gart aperture
213 * @get_local_mem_info: Retrieves information about GPU local memory
215 * @get_gpu_clock_counter: Retrieves GPU clock counter
217 * @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz
219 * @alloc_pasid: Allocate a PASID
220 * @free_pasid: Free a PASID
222 * @program_sh_mem_settings: A function that should initiate the memory
223 * properties such as main aperture memory type (cache / non cached) and
224 * secondary aperture base address, size and memory type.
225 * This function is used only for no cp scheduling mode.
227 * @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp
228 * scheduling mode. Only used for no cp scheduling mode.
230 * @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp
233 * @hqd_sdma_load: Loads the SDMA mqd structure to a H/W SDMA hqd slot.
234 * used only for no HWS mode.
236 * @hqd_dump: Dumps CPC HQD registers to an array of address-value pairs.
237 * Array is allocated with kmalloc, needs to be freed with kfree by caller.
239 * @hqd_sdma_dump: Dumps SDMA HQD registers to an array of address-value pairs.
240 * Array is allocated with kmalloc, needs to be freed with kfree by caller.
242 * @hqd_is_occupies: Checks if a hqd slot is occupied.
244 * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
246 * @hqd_sdma_is_occupied: Checks if an SDMA hqd slot is occupied.
248 * @hqd_sdma_destroy: Destructs and preempts the SDMA queue assigned to that
251 * @get_fw_version: Returns FW versions from the header
253 * @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID.
254 * Only used for no cp scheduling mode
256 * @get_tile_config: Returns GPU-specific tiling mode information
258 * @get_cu_info: Retrieves activated cu info
260 * @get_vram_usage: Returns current VRAM usage
262 * @create_process_vm: Create a VM address space for a given process and GPU
264 * @destroy_process_vm: Destroy a VM
266 * @get_process_page_dir: Get physical address of a VM page directory
268 * @set_vm_context_page_table_base: Program page table base for a VMID
270 * @alloc_memory_of_gpu: Allocate GPUVM memory
272 * @free_memory_of_gpu: Free GPUVM memory
274 * @map_memory_to_gpu: Map GPUVM memory into a specific VM address
275 * space. Allocates and updates page tables and page directories as
276 * needed. This function may return before all page table updates have
277 * completed. This allows multiple map operations (on multiple GPUs)
278 * to happen concurrently. Use sync_memory to synchronize with all
281 * @unmap_memor_to_gpu: Unmap GPUVM memory from a specific VM address space
283 * @sync_memory: Wait for pending page table updates to complete
285 * @map_gtt_bo_to_kernel: Map a GTT BO for kernel access
286 * Pins the BO, maps it to kernel address space. Such BOs are never evicted.
287 * The kernel virtual address remains valid until the BO is freed.
289 * @restore_process_bos: Restore all BOs that belong to the
290 * process. This is intended for restoring memory mappings after a TTM
293 * @invalidate_tlbs: Invalidate TLBs for a specific PASID
295 * @invalidate_tlbs_vmid: Invalidate TLBs for a specific VMID
297 * @submit_ib: Submits an IB to the engine specified by inserting the
298 * IB to the corresponding ring (ring type). The IB is executed with the
299 * specified VMID in a user mode context.
301 * @get_vm_fault_info: Return information about a recent VM fault on
302 * GFXv7 and v8. If multiple VM faults occurred since the last call of
303 * this function, it will return information about the first of those
304 * faults. On GFXv9 VM fault information is fully contained in the IH
305 * packet and this function is not needed.
307 * @read_vmid_from_vmfault_reg: On Hawaii the VMID is not set in the
308 * IH ring entry. This function allows the KFD ISR to get the VMID
309 * from the fault status register as early as possible.
311 * @gpu_recover: let kgd reset gpu after kfd detect CPC hang
313 * @set_compute_idle: Indicates that compute is idle on a device. This
314 * can be used to change power profiles depending on compute activity.
316 * @get_hive_id: Returns hive id of current device, 0 if xgmi is not enabled
318 * This structure contains function pointers to services that the kgd driver
319 * provides to amdkfd driver.
322 struct kfd2kgd_calls {
323 int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
324 void **mem_obj, uint64_t *gpu_addr,
325 void **cpu_ptr, bool mqd_gfx9);
327 void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
329 void (*get_local_mem_info)(struct kgd_dev *kgd,
330 struct kfd_local_mem_info *mem_info);
331 uint64_t (*get_gpu_clock_counter)(struct kgd_dev *kgd);
333 uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
335 int (*alloc_pasid)(unsigned int bits);
336 void (*free_pasid)(unsigned int pasid);
338 /* Register access functions */
339 void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
340 uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
341 uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
343 int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, unsigned int pasid,
346 int (*init_interrupts)(struct kgd_dev *kgd, uint32_t pipe_id);
348 int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
349 uint32_t queue_id, uint32_t __user *wptr,
350 uint32_t wptr_shift, uint32_t wptr_mask,
351 struct mm_struct *mm);
353 int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd,
354 uint32_t __user *wptr, struct mm_struct *mm);
356 int (*hqd_dump)(struct kgd_dev *kgd,
357 uint32_t pipe_id, uint32_t queue_id,
358 uint32_t (**dump)[2], uint32_t *n_regs);
360 int (*hqd_sdma_dump)(struct kgd_dev *kgd,
361 uint32_t engine_id, uint32_t queue_id,
362 uint32_t (**dump)[2], uint32_t *n_regs);
364 bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
365 uint32_t pipe_id, uint32_t queue_id);
367 int (*hqd_destroy)(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
368 unsigned int timeout, uint32_t pipe_id,
371 bool (*hqd_sdma_is_occupied)(struct kgd_dev *kgd, void *mqd);
373 int (*hqd_sdma_destroy)(struct kgd_dev *kgd, void *mqd,
374 unsigned int timeout);
376 int (*address_watch_disable)(struct kgd_dev *kgd);
377 int (*address_watch_execute)(struct kgd_dev *kgd,
378 unsigned int watch_point_id,
382 int (*wave_control_execute)(struct kgd_dev *kgd,
383 uint32_t gfx_index_val,
385 uint32_t (*address_watch_get_offset)(struct kgd_dev *kgd,
386 unsigned int watch_point_id,
387 unsigned int reg_offset);
388 bool (*get_atc_vmid_pasid_mapping_valid)(
391 uint16_t (*get_atc_vmid_pasid_mapping_pasid)(
395 uint16_t (*get_fw_version)(struct kgd_dev *kgd,
396 enum kgd_engine_type type);
397 void (*set_scratch_backing_va)(struct kgd_dev *kgd,
398 uint64_t va, uint32_t vmid);
399 int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
401 void (*get_cu_info)(struct kgd_dev *kgd,
402 struct kfd_cu_info *cu_info);
403 uint64_t (*get_vram_usage)(struct kgd_dev *kgd);
405 int (*create_process_vm)(struct kgd_dev *kgd, unsigned int pasid, void **vm,
406 void **process_info, struct dma_fence **ef);
407 int (*acquire_process_vm)(struct kgd_dev *kgd, struct file *filp,
408 unsigned int pasid, void **vm, void **process_info,
409 struct dma_fence **ef);
410 void (*destroy_process_vm)(struct kgd_dev *kgd, void *vm);
411 void (*release_process_vm)(struct kgd_dev *kgd, void *vm);
412 uint64_t (*get_process_page_dir)(void *vm);
413 void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
414 uint32_t vmid, uint64_t page_table_base);
415 int (*alloc_memory_of_gpu)(struct kgd_dev *kgd, uint64_t va,
416 uint64_t size, void *vm,
417 struct kgd_mem **mem, uint64_t *offset,
419 int (*free_memory_of_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem);
420 int (*map_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
422 int (*unmap_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
424 int (*sync_memory)(struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
425 int (*map_gtt_bo_to_kernel)(struct kgd_dev *kgd, struct kgd_mem *mem,
426 void **kptr, uint64_t *size);
427 int (*restore_process_bos)(void *process_info, struct dma_fence **ef);
429 int (*invalidate_tlbs)(struct kgd_dev *kgd, uint16_t pasid);
430 int (*invalidate_tlbs_vmid)(struct kgd_dev *kgd, uint16_t vmid);
432 int (*submit_ib)(struct kgd_dev *kgd, enum kgd_engine_type engine,
433 uint32_t vmid, uint64_t gpu_addr,
434 uint32_t *ib_cmd, uint32_t ib_len);
436 int (*get_vm_fault_info)(struct kgd_dev *kgd,
437 struct kfd_vm_fault_info *info);
438 uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
440 void (*gpu_recover)(struct kgd_dev *kgd);
442 void (*set_compute_idle)(struct kgd_dev *kgd, bool idle);
444 uint64_t (*get_hive_id)(struct kgd_dev *kgd);
449 * struct kgd2kfd_calls
451 * @exit: Notifies amdkfd that kgd module is unloaded
453 * @probe: Notifies amdkfd about a probe done on a device in the kgd driver.
455 * @device_init: Initialize the newly probed device (if it is a device that
458 * @device_exit: Notifies amdkfd about a removal of a kgd device
460 * @suspend: Notifies amdkfd about a suspend action done to a kgd device
462 * @resume: Notifies amdkfd about a resume action done to a kgd device
464 * @quiesce_mm: Quiesce all user queue access to specified MM address space
466 * @resume_mm: Resume user queue access to specified MM address space
468 * @schedule_evict_and_restore_process: Schedules work queue that will prepare
469 * for safe eviction of KFD BOs that belong to the specified process.
471 * @pre_reset: Notifies amdkfd that amdgpu about to reset the gpu
473 * @post_reset: Notify amdkfd that amgpu successfully reseted the gpu
475 * This structure contains function callback pointers so the kgd driver
476 * will notify to the amdkfd about certain status changes.
479 struct kgd2kfd_calls {
481 struct kfd_dev* (*probe)(struct kgd_dev *kgd, struct pci_dev *pdev,
482 const struct kfd2kgd_calls *f2g);
483 bool (*device_init)(struct kfd_dev *kfd,
484 const struct kgd2kfd_shared_resources *gpu_resources);
485 void (*device_exit)(struct kfd_dev *kfd);
486 void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
487 void (*suspend)(struct kfd_dev *kfd);
488 int (*resume)(struct kfd_dev *kfd);
489 int (*quiesce_mm)(struct mm_struct *mm);
490 int (*resume_mm)(struct mm_struct *mm);
491 int (*schedule_evict_and_restore_process)(struct mm_struct *mm,
492 struct dma_fence *fence);
493 int (*pre_reset)(struct kfd_dev *kfd);
494 int (*post_reset)(struct kfd_dev *kfd);
497 int kgd2kfd_init(unsigned interface_version,
498 const struct kgd2kfd_calls **g2f);
500 #endif /* KGD_KFD_INTERFACE_H_INCLUDED */