2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Christian König
24 #ifndef __AMDGPU_VM_H__
25 #define __AMDGPU_VM_H__
27 #include <linux/idr.h>
28 #include <linux/kfifo.h>
29 #include <linux/rbtree.h>
30 #include <drm/gpu_scheduler.h>
31 #include <drm/drm_file.h>
33 #include "amdgpu_sync.h"
34 #include "amdgpu_ring.h"
35 #include "amdgpu_ids.h"
39 struct amdgpu_bo_list_entry;
45 /* Maximum number of PTEs the hardware can write with one command */
46 #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
48 /* number of entries in page table */
49 #define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
51 /* PTBs (Page Table Blocks) need to be aligned to 32K */
52 #define AMDGPU_VM_PTB_ALIGN_SIZE 32768
54 #define AMDGPU_PTE_VALID (1ULL << 0)
55 #define AMDGPU_PTE_SYSTEM (1ULL << 1)
56 #define AMDGPU_PTE_SNOOPED (1ULL << 2)
59 #define AMDGPU_PTE_EXECUTABLE (1ULL << 4)
61 #define AMDGPU_PTE_READABLE (1ULL << 5)
62 #define AMDGPU_PTE_WRITEABLE (1ULL << 6)
64 #define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7)
66 /* TILED for VEGA10, reserved for older ASICs */
67 #define AMDGPU_PTE_PRT (1ULL << 51)
69 /* PDE is handled as PTE for VEGA10 */
70 #define AMDGPU_PDE_PTE (1ULL << 54)
72 /* PTE is handled as PDE for VEGA10 (Translate Further) */
73 #define AMDGPU_PTE_TF (1ULL << 56)
75 /* PDE Block Fragment Size for VEGA10 */
76 #define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59)
80 #define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
81 #define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL)
83 #define AMDGPU_MTYPE_NC 0
84 #define AMDGPU_MTYPE_CC 2
86 #define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
87 | AMDGPU_PTE_SNOOPED \
88 | AMDGPU_PTE_EXECUTABLE \
89 | AMDGPU_PTE_READABLE \
90 | AMDGPU_PTE_WRITEABLE \
91 | AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_CC))
93 /* How to programm VM fault handling */
94 #define AMDGPU_VM_FAULT_STOP_NEVER 0
95 #define AMDGPU_VM_FAULT_STOP_FIRST 1
96 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2
98 /* max number of VMHUB */
99 #define AMDGPU_MAX_VMHUBS 2
100 #define AMDGPU_GFXHUB 0
101 #define AMDGPU_MMHUB 1
103 /* hardcode that limit for now */
104 #define AMDGPU_VA_RESERVED_SIZE (1ULL << 20)
106 /* VA hole for 48bit addresses on Vega10 */
107 #define AMDGPU_VA_HOLE_START 0x0000800000000000ULL
108 #define AMDGPU_VA_HOLE_END 0xffff800000000000ULL
111 * Hardware is programmed as if the hole doesn't exists with start and end
114 * This mask is used to remove the upper 16bits of the VA and so come up with
115 * the linear addr value.
117 #define AMDGPU_VA_HOLE_MASK 0x0000ffffffffffffULL
119 /* max vmids dedicated for process */
120 #define AMDGPU_VM_MAX_RESERVED_VMID 1
122 #define AMDGPU_VM_CONTEXT_GFX 0
123 #define AMDGPU_VM_CONTEXT_COMPUTE 1
125 /* See vm_update_mode */
126 #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
127 #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
129 /* VMPT level enumerate, and the hiberachy is:
130 * PDB2->PDB1->PDB0->PTB
132 enum amdgpu_vm_level {
139 /* base structure for tracking BO usage in a VM */
140 struct amdgpu_vm_bo_base {
141 /* constant after initialization */
142 struct amdgpu_vm *vm;
143 struct amdgpu_bo *bo;
145 /* protected by bo being reserved */
146 struct list_head bo_list;
148 /* protected by spinlock */
149 struct list_head vm_status;
151 /* protected by the BO being reserved */
155 struct amdgpu_vm_pt {
156 struct amdgpu_vm_bo_base base;
159 /* array of page tables, one for each directory entry */
160 struct amdgpu_vm_pt *entries;
163 #define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr))
164 #define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
165 #define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL)
168 /* tree of virtual addresses mapped */
169 struct rb_root_cached va;
171 /* BOs who needs a validation */
172 struct list_head evicted;
174 /* PT BOs which relocated and their parent need an update */
175 struct list_head relocated;
177 /* BOs moved, but not yet updated in the PT */
178 struct list_head moved;
179 spinlock_t moved_lock;
181 /* All BOs of this VM not currently in the state machine */
182 struct list_head idle;
184 /* BO mappings freed, but not yet updated in the PT */
185 struct list_head freed;
187 /* contains the page directory */
188 struct amdgpu_vm_pt root;
189 struct dma_fence *last_update;
191 /* Scheduler entity for page table updates */
192 struct drm_sched_entity entity;
195 /* dedicated to vm */
196 struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
198 /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
199 bool use_cpu_for_update;
201 /* Flag to indicate ATS support from PTE for GFX9 */
202 bool pte_support_ats;
204 /* Up to 128 pending retry page faults */
205 DECLARE_KFIFO(faults, u64, 128);
207 /* Limit non-retry fault storms */
208 unsigned int fault_credit;
210 /* Points to the KFD process VM info */
211 struct amdkfd_process_info *process_info;
213 /* List node in amdkfd_process_info.vm_list_head */
214 struct list_head vm_list_node;
216 /* Valid while the PD is reserved or fenced */
217 uint64_t pd_phys_addr;
220 struct amdgpu_vm_manager {
221 /* Handling of VMIDs */
222 struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS];
224 /* Handling of VM fences */
226 unsigned seqno[AMDGPU_MAX_RINGS];
231 uint32_t fragment_size;
232 enum amdgpu_vm_level root_level;
233 /* vram base address for page table entry */
234 u64 vram_base_offset;
235 /* vm pte handling */
236 const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
237 struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
238 unsigned vm_pte_num_rings;
239 atomic_t vm_pte_next_ring;
241 /* partial resident texture handling */
243 atomic_t num_prt_users;
245 /* controls how VM page tables are updated for Graphics and Compute.
246 * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU
247 * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
251 /* PASID to VM mapping, will be used in interrupt context to
252 * look up VM of a page fault
254 struct idr pasid_idr;
255 spinlock_t pasid_lock;
258 void amdgpu_vm_manager_init(struct amdgpu_device *adev);
259 void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
260 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
261 int vm_context, unsigned int pasid);
262 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
263 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
264 bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
266 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
267 struct list_head *validated,
268 struct amdgpu_bo_list_entry *entry);
269 bool amdgpu_vm_ready(struct amdgpu_vm *vm);
270 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
271 int (*callback)(void *p, struct amdgpu_bo *bo),
273 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
274 struct amdgpu_vm *vm,
275 uint64_t saddr, uint64_t size);
276 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
277 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
278 struct amdgpu_vm *vm);
279 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
280 struct amdgpu_vm *vm,
281 struct dma_fence **fence);
282 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
283 struct amdgpu_vm *vm);
284 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
285 struct amdgpu_bo_va *bo_va,
287 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
288 struct amdgpu_bo *bo, bool evicted);
289 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
290 struct amdgpu_bo *bo);
291 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
292 struct amdgpu_vm *vm,
293 struct amdgpu_bo *bo);
294 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
295 struct amdgpu_bo_va *bo_va,
296 uint64_t addr, uint64_t offset,
297 uint64_t size, uint64_t flags);
298 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
299 struct amdgpu_bo_va *bo_va,
300 uint64_t addr, uint64_t offset,
301 uint64_t size, uint64_t flags);
302 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
303 struct amdgpu_bo_va *bo_va,
305 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
306 struct amdgpu_vm *vm,
307 uint64_t saddr, uint64_t size);
308 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
310 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
311 struct amdgpu_bo_va *bo_va);
312 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
313 uint32_t fragment_size_default, unsigned max_level,
315 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
316 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
317 struct amdgpu_job *job);
318 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);