2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Christian König
24 #ifndef __AMDGPU_VM_H__
25 #define __AMDGPU_VM_H__
27 #include <linux/idr.h>
28 #include <linux/kfifo.h>
29 #include <linux/rbtree.h>
30 #include <drm/gpu_scheduler.h>
31 #include <drm/drm_file.h>
32 #include <drm/ttm/ttm_bo.h>
33 #include <linux/sched/mm.h>
35 #include "amdgpu_sync.h"
36 #include "amdgpu_ring.h"
37 #include "amdgpu_ids.h"
43 struct amdgpu_bo_list_entry;
45 struct amdgpu_mem_stats;
51 /* Maximum number of PTEs the hardware can write with one command */
52 #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
54 /* number of entries in page table */
55 #define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
57 #define AMDGPU_PTE_VALID (1ULL << 0)
58 #define AMDGPU_PTE_SYSTEM (1ULL << 1)
59 #define AMDGPU_PTE_SNOOPED (1ULL << 2)
62 #define AMDGPU_PTE_TMZ (1ULL << 3)
65 #define AMDGPU_PTE_EXECUTABLE (1ULL << 4)
67 #define AMDGPU_PTE_READABLE (1ULL << 5)
68 #define AMDGPU_PTE_WRITEABLE (1ULL << 6)
70 #define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7)
72 /* TILED for VEGA10, reserved for older ASICs */
73 #define AMDGPU_PTE_PRT (1ULL << 51)
75 /* PDE is handled as PTE for VEGA10 */
76 #define AMDGPU_PDE_PTE (1ULL << 54)
78 #define AMDGPU_PTE_LOG (1ULL << 55)
80 /* PTE is handled as PDE for VEGA10 (Translate Further) */
81 #define AMDGPU_PTE_TF (1ULL << 56)
83 /* MALL noalloc for sienna_cichlid, reserved for older ASICs */
84 #define AMDGPU_PTE_NOALLOC (1ULL << 58)
86 /* PDE Block Fragment Size for VEGA10 */
87 #define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59)
89 /* Flag combination to set no-retry with TF disabled */
90 #define AMDGPU_VM_NORETRY_FLAGS (AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | \
93 /* Flag combination to set no-retry with TF enabled */
94 #define AMDGPU_VM_NORETRY_FLAGS_TF (AMDGPU_PTE_VALID | AMDGPU_PTE_SYSTEM | \
97 #define AMDGPU_PTE_MTYPE_VG10(a) ((uint64_t)(a) << 57)
98 #define AMDGPU_PTE_MTYPE_VG10_MASK AMDGPU_PTE_MTYPE_VG10(3ULL)
100 #define AMDGPU_MTYPE_NC 0
101 #define AMDGPU_MTYPE_CC 2
103 #define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
104 | AMDGPU_PTE_SNOOPED \
105 | AMDGPU_PTE_EXECUTABLE \
106 | AMDGPU_PTE_READABLE \
107 | AMDGPU_PTE_WRITEABLE \
108 | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC))
111 #define AMDGPU_PTE_MTYPE_NV10(a) ((uint64_t)(a) << 48)
112 #define AMDGPU_PTE_MTYPE_NV10_MASK AMDGPU_PTE_MTYPE_NV10(7ULL)
114 /* How to program VM fault handling */
115 #define AMDGPU_VM_FAULT_STOP_NEVER 0
116 #define AMDGPU_VM_FAULT_STOP_FIRST 1
117 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2
119 /* How much VRAM be reserved for page tables */
120 #define AMDGPU_VM_RESERVED_VRAM (8ULL << 20)
123 * max number of VMHUB
124 * layout: max 8 GFXHUB + 4 MMHUB0 + 1 MMHUB1
126 #define AMDGPU_MAX_VMHUBS 13
127 #define AMDGPU_GFXHUB_START 0
128 #define AMDGPU_MMHUB0_START 8
129 #define AMDGPU_MMHUB1_START 12
130 #define AMDGPU_GFXHUB(x) (AMDGPU_GFXHUB_START + (x))
131 #define AMDGPU_MMHUB0(x) (AMDGPU_MMHUB0_START + (x))
132 #define AMDGPU_MMHUB1(x) (AMDGPU_MMHUB1_START + (x))
134 #define AMDGPU_IS_GFXHUB(x) ((x) >= AMDGPU_GFXHUB_START && (x) < AMDGPU_MMHUB0_START)
135 #define AMDGPU_IS_MMHUB0(x) ((x) >= AMDGPU_MMHUB0_START && (x) < AMDGPU_MMHUB1_START)
136 #define AMDGPU_IS_MMHUB1(x) ((x) >= AMDGPU_MMHUB1_START && (x) < AMDGPU_MAX_VMHUBS)
138 /* Reserve space at top/bottom of address space for kernel use */
139 #define AMDGPU_VA_RESERVED_CSA_SIZE (2ULL << 20)
140 #define AMDGPU_VA_RESERVED_CSA_START(adev) (((adev)->vm_manager.max_pfn \
141 << AMDGPU_GPU_PAGE_SHIFT) \
142 - AMDGPU_VA_RESERVED_CSA_SIZE)
143 #define AMDGPU_VA_RESERVED_SEQ64_SIZE (2ULL << 20)
144 #define AMDGPU_VA_RESERVED_SEQ64_START(adev) (AMDGPU_VA_RESERVED_CSA_START(adev) \
145 - AMDGPU_VA_RESERVED_SEQ64_SIZE)
146 #define AMDGPU_VA_RESERVED_TRAP_SIZE (2ULL << 12)
147 #define AMDGPU_VA_RESERVED_TRAP_START(adev) (AMDGPU_VA_RESERVED_SEQ64_START(adev) \
148 - AMDGPU_VA_RESERVED_TRAP_SIZE)
149 #define AMDGPU_VA_RESERVED_BOTTOM (1ULL << 16)
150 #define AMDGPU_VA_RESERVED_TOP (AMDGPU_VA_RESERVED_TRAP_SIZE + \
151 AMDGPU_VA_RESERVED_SEQ64_SIZE + \
152 AMDGPU_VA_RESERVED_CSA_SIZE)
154 /* See vm_update_mode */
155 #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
156 #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
158 /* VMPT level enumerate, and the hiberachy is:
159 * PDB2->PDB1->PDB0->PTB
161 enum amdgpu_vm_level {
168 /* base structure for tracking BO usage in a VM */
169 struct amdgpu_vm_bo_base {
170 /* constant after initialization */
171 struct amdgpu_vm *vm;
172 struct amdgpu_bo *bo;
174 /* protected by bo being reserved */
175 struct amdgpu_vm_bo_base *next;
177 /* protected by spinlock */
178 struct list_head vm_status;
180 /* protected by the BO being reserved */
184 /* provided by hw blocks that can write ptes, e.g., sdma */
185 struct amdgpu_vm_pte_funcs {
186 /* number of dw to reserve per operation */
187 unsigned copy_pte_num_dw;
189 /* copy pte entries from GART */
190 void (*copy_pte)(struct amdgpu_ib *ib,
191 uint64_t pe, uint64_t src,
194 /* write pte one entry at a time with addr mapping */
195 void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
196 uint64_t value, unsigned count,
198 /* for linear pte/pde updates without addr mapping */
199 void (*set_pte_pde)(struct amdgpu_ib *ib,
201 uint64_t addr, unsigned count,
202 uint32_t incr, uint64_t flags);
205 struct amdgpu_task_info {
206 char process_name[TASK_COMM_LEN];
207 char task_name[TASK_COMM_LEN];
210 struct kref refcount;
214 * struct amdgpu_vm_update_params
216 * Encapsulate some VM table update parameters to reduce
217 * the number of function parameters
220 struct amdgpu_vm_update_params {
223 * @adev: amdgpu device we do this update for
225 struct amdgpu_device *adev;
228 * @vm: optional amdgpu_vm we do this update for
230 struct amdgpu_vm *vm;
233 * @immediate: if changes should be made immediately
238 * @unlocked: true if the root BO is not locked
245 * DMA addresses to use for mapping
247 dma_addr_t *pages_addr;
250 * @job: job to used for hw submission
252 struct amdgpu_job *job;
255 * @num_dw_left: number of dw left for the IB
257 unsigned int num_dw_left;
260 * @table_freed: return true if page table is freed when updating
265 * @allow_override: true for memory that is not uncached: allows MTYPE
266 * to be overridden for NUMA local memory.
271 struct amdgpu_vm_update_funcs {
272 int (*map_table)(struct amdgpu_bo_vm *bo);
273 int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
274 enum amdgpu_sync_mode sync_mode);
275 int (*update)(struct amdgpu_vm_update_params *p,
276 struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
277 unsigned count, uint32_t incr, uint64_t flags);
278 int (*commit)(struct amdgpu_vm_update_params *p,
279 struct dma_fence **fence);
282 struct amdgpu_vm_fault_info {
285 /* fault status register */
287 /* which vmhub? gfxhub, mmhub, etc. */
292 /* tree of virtual addresses mapped */
293 struct rb_root_cached va;
295 /* Lock to prevent eviction while we are updating page tables
296 * use vm_eviction_lock/unlock(vm)
298 struct mutex eviction_lock;
300 unsigned int saved_flags;
302 /* Lock to protect vm_bo add/del/move on all lists of vm */
303 spinlock_t status_lock;
305 /* Per-VM and PT BOs who needs a validation */
306 struct list_head evicted;
308 /* BOs for user mode queues that need a validation */
309 struct list_head evicted_user;
311 /* PT BOs which relocated and their parent need an update */
312 struct list_head relocated;
314 /* per VM BOs moved, but not yet updated in the PT */
315 struct list_head moved;
317 /* All BOs of this VM not currently in the state machine */
318 struct list_head idle;
320 /* regular invalidated BOs, but not yet updated in the PT */
321 struct list_head invalidated;
323 /* BO mappings freed, but not yet updated in the PT */
324 struct list_head freed;
326 /* BOs which are invalidated, has been updated in the PTs */
327 struct list_head done;
329 /* PT BOs scheduled to free and fill with zero if vm_resv is not hold */
330 struct list_head pt_freed;
331 struct work_struct pt_free_work;
333 /* contains the page directory */
334 struct amdgpu_vm_bo_base root;
335 struct dma_fence *last_update;
337 /* Scheduler entities for page table updates */
338 struct drm_sched_entity immediate;
339 struct drm_sched_entity delayed;
341 /* Last finished delayed update */
343 struct dma_fence *last_tlb_flush;
344 atomic64_t kfd_last_flushed_seq;
346 /* How many times we had to re-generate the page tables */
349 /* Last unlocked submission to the scheduler entities */
350 struct dma_fence *last_unlocked;
353 bool reserved_vmid[AMDGPU_MAX_VMHUBS];
355 /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
356 bool use_cpu_for_update;
358 /* Functions to use for VM table updates */
359 const struct amdgpu_vm_update_funcs *update_funcs;
361 /* Up to 128 pending retry page faults */
362 DECLARE_KFIFO(faults, u64, 128);
364 /* Points to the KFD process VM info */
365 struct amdkfd_process_info *process_info;
367 /* List node in amdkfd_process_info.vm_list_head */
368 struct list_head vm_list_node;
370 /* Valid while the PD is reserved or fenced */
371 uint64_t pd_phys_addr;
373 /* Some basic info about the task */
374 struct amdgpu_task_info *task_info;
376 /* Store positions of group of BOs */
377 struct ttm_lru_bulk_move lru_bulk_move;
378 /* Flag to indicate if VM is used for compute */
379 bool is_compute_context;
381 /* Memory partition number, -1 means any partition */
384 /* cached fault info */
385 struct amdgpu_vm_fault_info fault_info;
388 struct amdgpu_vm_manager {
389 /* Handling of VMIDs */
390 struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS];
391 unsigned int first_kfd_vmid;
392 bool concurrent_flush;
394 /* Handling of VM fences */
396 unsigned seqno[AMDGPU_MAX_RINGS];
401 uint32_t fragment_size;
402 enum amdgpu_vm_level root_level;
403 /* vram base address for page table entry */
404 u64 vram_base_offset;
405 /* vm pte handling */
406 const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
407 struct drm_gpu_scheduler *vm_pte_scheds[AMDGPU_MAX_RINGS];
408 unsigned vm_pte_num_scheds;
409 struct amdgpu_ring *page_fault;
411 /* partial resident texture handling */
413 atomic_t num_prt_users;
415 /* controls how VM page tables are updated for Graphics and Compute.
416 * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU
417 * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
421 /* PASID to VM mapping, will be used in interrupt context to
422 * look up VM of a page fault
424 struct xarray pasids;
427 struct amdgpu_bo_va_mapping;
429 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
430 #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
431 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
433 extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
434 extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
436 void amdgpu_vm_manager_init(struct amdgpu_device *adev);
437 void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
439 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
442 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
443 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
444 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
445 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
446 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
447 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
448 unsigned int num_fences);
449 bool amdgpu_vm_ready(struct amdgpu_vm *vm);
450 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
451 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
452 struct ww_acquire_ctx *ticket,
453 int (*callback)(void *p, struct amdgpu_bo *bo),
455 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
456 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
457 struct amdgpu_vm *vm, bool immediate);
458 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
459 struct amdgpu_vm *vm,
460 struct dma_fence **fence);
461 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
462 struct amdgpu_vm *vm,
463 struct ww_acquire_ctx *ticket);
464 int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
465 struct amdgpu_vm *vm,
468 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
469 struct amdgpu_vm *vm, struct amdgpu_bo *bo);
470 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
471 bool immediate, bool unlocked, bool flush_tlb, bool allow_override,
472 struct dma_resv *resv, uint64_t start, uint64_t last,
473 uint64_t flags, uint64_t offset, uint64_t vram_base,
474 struct ttm_resource *res, dma_addr_t *pages_addr,
475 struct dma_fence **fence);
476 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
477 struct amdgpu_bo_va *bo_va,
479 bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
480 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
481 struct amdgpu_bo *bo, bool evicted);
482 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
483 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
484 struct amdgpu_bo *bo);
485 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
486 struct amdgpu_vm *vm,
487 struct amdgpu_bo *bo);
488 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
489 struct amdgpu_bo_va *bo_va,
490 uint64_t addr, uint64_t offset,
491 uint64_t size, uint64_t flags);
492 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
493 struct amdgpu_bo_va *bo_va,
494 uint64_t addr, uint64_t offset,
495 uint64_t size, uint64_t flags);
496 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
497 struct amdgpu_bo_va *bo_va,
499 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
500 struct amdgpu_vm *vm,
501 uint64_t saddr, uint64_t size);
502 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
504 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
505 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
506 struct amdgpu_bo_va *bo_va);
507 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
508 uint32_t fragment_size_default, unsigned max_level,
510 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
511 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
512 struct amdgpu_job *job);
513 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
515 struct amdgpu_task_info *
516 amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid);
518 struct amdgpu_task_info *
519 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm);
521 void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info);
523 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
524 u32 vmid, u32 node_id, uint64_t addr,
527 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
529 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
530 struct amdgpu_vm *vm);
531 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
532 struct amdgpu_mem_stats *stats);
534 int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
535 struct amdgpu_bo_vm *vmbo, bool immediate);
536 int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
537 int level, bool immediate, struct amdgpu_bo_vm **vmbo,
539 void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);
541 int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
542 struct amdgpu_vm_bo_base *entry);
543 int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
544 uint64_t start, uint64_t end,
545 uint64_t dst, uint64_t flags);
546 void amdgpu_vm_pt_free_work(struct work_struct *work);
548 #if defined(CONFIG_DEBUG_FS)
549 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
552 int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm);
555 * amdgpu_vm_tlb_seq - return tlb flush sequence number
556 * @vm: the amdgpu_vm structure to query
558 * Returns the tlb flush sequence number which indicates that the VM TLBs needs
559 * to be invalidated whenever the sequence number change.
561 static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm)
567 * Workaround to stop racing between the fence signaling and handling
568 * the cb. The lock is static after initially setting it up, just make
569 * sure that the dma_fence structure isn't freed up.
572 lock = vm->last_tlb_flush->lock;
575 spin_lock_irqsave(lock, flags);
576 spin_unlock_irqrestore(lock, flags);
578 return atomic64_read(&vm->tlb_seq);
582 * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
583 * happens while holding this lock anywhere to prevent deadlocks when
584 * an MMU notifier runs in reclaim-FS context.
586 static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
588 mutex_lock(&vm->eviction_lock);
589 vm->saved_flags = memalloc_noreclaim_save();
592 static inline bool amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
594 if (mutex_trylock(&vm->eviction_lock)) {
595 vm->saved_flags = memalloc_noreclaim_save();
601 static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
603 memalloc_noreclaim_restore(vm->saved_flags);
604 mutex_unlock(&vm->eviction_lock);
607 void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,