2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <linux/idr.h>
32 #include <drm/amdgpu_drm.h>
34 #include "amdgpu_trace.h"
35 #include "amdgpu_amdkfd.h"
36 #include "amdgpu_gmc.h"
41 * GPUVM is similar to the legacy gart on older asics, however
42 * rather than there being a single global gart table
43 * for the entire GPU, there are multiple VM page tables active
44 * at any given time. The VM page tables can contain a mix
45 * vram pages and system memory pages and system memory pages
46 * can be mapped as snooped (cached system pages) or unsnooped
47 * (uncached system pages).
48 * Each VM has an ID associated with it and there is a page table
49 * associated with each VMID. When execting a command buffer,
50 * the kernel tells the the ring what VMID to use for that command
51 * buffer. VMIDs are allocated dynamically as commands are submitted.
52 * The userspace drivers maintain their own address space and the kernel
53 * sets up their pages tables accordingly when they submit their
54 * command buffers and a VMID is assigned.
55 * Cayman/Trinity support up to 8 active VMs at any given time;
59 #define START(node) ((node)->start)
60 #define LAST(node) ((node)->last)
62 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
63 START, LAST, static, amdgpu_vm_it)
69 * struct amdgpu_pte_update_params - Local structure
71 * Encapsulate some VM table update parameters to reduce
72 * the number of function parameters
75 struct amdgpu_pte_update_params {
78 * @adev: amdgpu device we do this update for
80 struct amdgpu_device *adev;
83 * @vm: optional amdgpu_vm we do this update for
88 * @src: address where to copy page table entries from
93 * @ib: indirect buffer to fill with commands
98 * @func: Function which actually does the update
100 void (*func)(struct amdgpu_pte_update_params *params,
101 struct amdgpu_bo *bo, uint64_t pe,
102 uint64_t addr, unsigned count, uint32_t incr,
107 * DMA addresses to use for mapping, used during VM update by CPU
109 dma_addr_t *pages_addr;
114 * Kernel pointer of PD/PT BO that needs to be updated,
115 * used during VM update by CPU
121 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
123 struct amdgpu_prt_cb {
126 * @adev: amdgpu device
128 struct amdgpu_device *adev;
133 struct dma_fence_cb cb;
137 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
139 * @base: base structure for tracking BO usage in a VM
140 * @vm: vm to which bo is to be added
141 * @bo: amdgpu buffer object
143 * Initialize a bo_va_base structure and add it to the appropriate lists
146 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
147 struct amdgpu_vm *vm,
148 struct amdgpu_bo *bo)
152 INIT_LIST_HEAD(&base->bo_list);
153 INIT_LIST_HEAD(&base->vm_status);
157 list_add_tail(&base->bo_list, &bo->va);
159 if (bo->tbo.type == ttm_bo_type_kernel)
160 list_move(&base->vm_status, &vm->relocated);
162 if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
165 if (bo->preferred_domains &
166 amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
170 * we checked all the prerequisites, but it looks like this per vm bo
171 * is currently evicted. add the bo to the evicted list to make sure it
172 * is validated on next vm use to avoid fault.
174 list_move_tail(&base->vm_status, &vm->evicted);
178 * amdgpu_vm_level_shift - return the addr shift for each level
180 * @adev: amdgpu_device pointer
184 * The number of bits the pfn needs to be right shifted for a level.
186 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
189 unsigned shift = 0xff;
195 shift = 9 * (AMDGPU_VM_PDB0 - level) +
196 adev->vm_manager.block_size;
202 dev_err(adev->dev, "the level%d isn't supported.\n", level);
209 * amdgpu_vm_num_entries - return the number of entries in a PD/PT
211 * @adev: amdgpu_device pointer
215 * The number of entries in a page directory or page table.
217 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
220 unsigned shift = amdgpu_vm_level_shift(adev,
221 adev->vm_manager.root_level);
223 if (level == adev->vm_manager.root_level)
224 /* For the root directory */
225 return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
226 else if (level != AMDGPU_VM_PTB)
227 /* Everything in between */
230 /* For the page tables on the leaves */
231 return AMDGPU_VM_PTE_COUNT(adev);
235 * amdgpu_vm_bo_size - returns the size of the BOs in bytes
237 * @adev: amdgpu_device pointer
241 * The size of the BO for a page directory or page table in bytes.
243 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
245 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
249 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
251 * @vm: vm providing the BOs
252 * @validated: head of validation list
253 * @entry: entry to add
255 * Add the page directory to the list of BOs to
256 * validate for command submission.
258 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
259 struct list_head *validated,
260 struct amdgpu_bo_list_entry *entry)
262 entry->robj = vm->root.base.bo;
264 entry->tv.bo = &entry->robj->tbo;
265 entry->tv.shared = true;
266 entry->user_pages = NULL;
267 list_add(&entry->tv.head, validated);
271 * amdgpu_vm_validate_pt_bos - validate the page table BOs
273 * @adev: amdgpu device pointer
274 * @vm: vm providing the BOs
275 * @validate: callback to do the validation
276 * @param: parameter for the validation callback
278 * Validate the page table BOs on command submission if neccessary.
283 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
284 int (*validate)(void *p, struct amdgpu_bo *bo),
287 struct ttm_bo_global *glob = adev->mman.bdev.glob;
288 struct amdgpu_vm_bo_base *bo_base, *tmp;
291 list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
292 struct amdgpu_bo *bo = bo_base->bo;
295 r = validate(param, bo);
299 spin_lock(&glob->lru_lock);
300 ttm_bo_move_to_lru_tail(&bo->tbo);
302 ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
303 spin_unlock(&glob->lru_lock);
306 if (bo->tbo.type != ttm_bo_type_kernel) {
307 spin_lock(&vm->moved_lock);
308 list_move(&bo_base->vm_status, &vm->moved);
309 spin_unlock(&vm->moved_lock);
311 list_move(&bo_base->vm_status, &vm->relocated);
315 spin_lock(&glob->lru_lock);
316 list_for_each_entry(bo_base, &vm->idle, vm_status) {
317 struct amdgpu_bo *bo = bo_base->bo;
322 ttm_bo_move_to_lru_tail(&bo->tbo);
324 ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
326 spin_unlock(&glob->lru_lock);
332 * amdgpu_vm_ready - check VM is ready for updates
336 * Check if all VM PDs/PTs are ready for updates
339 * True if eviction list is empty.
341 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
343 return list_empty(&vm->evicted);
347 * amdgpu_vm_clear_bo - initially clear the PDs/PTs
349 * @adev: amdgpu_device pointer
350 * @vm: VM to clear BO from
352 * @level: level this BO is at
353 * @pte_support_ats: indicate ATS support from PTE
355 * Root PD needs to be reserved when calling this.
358 * 0 on success, errno otherwise.
360 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
361 struct amdgpu_vm *vm, struct amdgpu_bo *bo,
362 unsigned level, bool pte_support_ats)
364 struct ttm_operation_ctx ctx = { true, false };
365 struct dma_fence *fence = NULL;
366 unsigned entries, ats_entries;
367 struct amdgpu_ring *ring;
368 struct amdgpu_job *job;
372 addr = amdgpu_bo_gpu_offset(bo);
373 entries = amdgpu_bo_size(bo) / 8;
375 if (pte_support_ats) {
376 if (level == adev->vm_manager.root_level) {
377 ats_entries = amdgpu_vm_level_shift(adev, level);
378 ats_entries += AMDGPU_GPU_PAGE_SHIFT;
379 ats_entries = AMDGPU_VA_HOLE_START >> ats_entries;
380 ats_entries = min(ats_entries, entries);
381 entries -= ats_entries;
383 ats_entries = entries;
390 ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
392 r = reservation_object_reserve_shared(bo->tbo.resv);
396 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
400 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
407 ats_value = AMDGPU_PTE_DEFAULT_ATC;
408 if (level != AMDGPU_VM_PTB)
409 ats_value |= AMDGPU_PDE_PTE;
411 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
412 ats_entries, 0, ats_value);
413 addr += ats_entries * 8;
417 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
420 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
422 WARN_ON(job->ibs[0].length_dw > 64);
423 r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
424 AMDGPU_FENCE_OWNER_UNDEFINED, false);
428 r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
433 amdgpu_bo_fence(bo, fence, true);
434 dma_fence_put(fence);
437 return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
438 level, pte_support_ats);
443 amdgpu_job_free(job);
450 * amdgpu_vm_alloc_levels - allocate the PD/PT levels
452 * @adev: amdgpu_device pointer
455 * @saddr: start of the address range
456 * @eaddr: end of the address range
458 * @ats: indicate ATS support from PTE
460 * Make sure the page directories and page tables are allocated
463 * 0 on success, errno otherwise.
465 static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
466 struct amdgpu_vm *vm,
467 struct amdgpu_vm_pt *parent,
468 uint64_t saddr, uint64_t eaddr,
469 unsigned level, bool ats)
471 unsigned shift = amdgpu_vm_level_shift(adev, level);
472 unsigned pt_idx, from, to;
476 if (!parent->entries) {
477 unsigned num_entries = amdgpu_vm_num_entries(adev, level);
479 parent->entries = kvmalloc_array(num_entries,
480 sizeof(struct amdgpu_vm_pt),
481 GFP_KERNEL | __GFP_ZERO);
482 if (!parent->entries)
484 memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
487 from = saddr >> shift;
489 if (from >= amdgpu_vm_num_entries(adev, level) ||
490 to >= amdgpu_vm_num_entries(adev, level))
494 saddr = saddr & ((1 << shift) - 1);
495 eaddr = eaddr & ((1 << shift) - 1);
497 flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
498 if (vm->root.base.bo->shadow)
499 flags |= AMDGPU_GEM_CREATE_SHADOW;
500 if (vm->use_cpu_for_update)
501 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
503 flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
505 /* walk over the address space and allocate the page tables */
506 for (pt_idx = from; pt_idx <= to; ++pt_idx) {
507 struct reservation_object *resv = vm->root.base.bo->tbo.resv;
508 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
509 struct amdgpu_bo *pt;
511 if (!entry->base.bo) {
512 struct amdgpu_bo_param bp;
514 memset(&bp, 0, sizeof(bp));
515 bp.size = amdgpu_vm_bo_size(adev, level);
516 bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
517 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
519 bp.type = ttm_bo_type_kernel;
521 r = amdgpu_bo_create(adev, &bp, &pt);
525 r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats);
527 amdgpu_bo_unref(&pt->shadow);
528 amdgpu_bo_unref(&pt);
532 if (vm->use_cpu_for_update) {
533 r = amdgpu_bo_kmap(pt, NULL);
535 amdgpu_bo_unref(&pt->shadow);
536 amdgpu_bo_unref(&pt);
541 /* Keep a reference to the root directory to avoid
542 * freeing them up in the wrong order.
544 pt->parent = amdgpu_bo_ref(parent->base.bo);
546 amdgpu_vm_bo_base_init(&entry->base, vm, pt);
549 if (level < AMDGPU_VM_PTB) {
550 uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
551 uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
553 r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
554 sub_eaddr, level, ats);
564 * amdgpu_vm_alloc_pts - Allocate page tables.
566 * @adev: amdgpu_device pointer
567 * @vm: VM to allocate page tables for
568 * @saddr: Start address which needs to be allocated
569 * @size: Size from start address we need.
571 * Make sure the page tables are allocated.
574 * 0 on success, errno otherwise.
576 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
577 struct amdgpu_vm *vm,
578 uint64_t saddr, uint64_t size)
583 /* validate the parameters */
584 if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
587 eaddr = saddr + size - 1;
589 if (vm->pte_support_ats)
590 ats = saddr < AMDGPU_VA_HOLE_START;
592 saddr /= AMDGPU_GPU_PAGE_SIZE;
593 eaddr /= AMDGPU_GPU_PAGE_SIZE;
595 if (eaddr >= adev->vm_manager.max_pfn) {
596 dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
597 eaddr, adev->vm_manager.max_pfn);
601 return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
602 adev->vm_manager.root_level, ats);
606 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
608 * @adev: amdgpu_device pointer
610 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
612 const struct amdgpu_ip_block *ip_block;
613 bool has_compute_vm_bug;
614 struct amdgpu_ring *ring;
617 has_compute_vm_bug = false;
619 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
621 /* Compute has a VM bug for GFX version < 7.
622 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
623 if (ip_block->version->major <= 7)
624 has_compute_vm_bug = true;
625 else if (ip_block->version->major == 8)
626 if (adev->gfx.mec_fw_version < 673)
627 has_compute_vm_bug = true;
630 for (i = 0; i < adev->num_rings; i++) {
631 ring = adev->rings[i];
632 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
633 /* only compute rings */
634 ring->has_compute_vm_bug = has_compute_vm_bug;
636 ring->has_compute_vm_bug = false;
641 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
643 * @ring: ring on which the job will be submitted
644 * @job: job to submit
647 * True if sync is needed.
649 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
650 struct amdgpu_job *job)
652 struct amdgpu_device *adev = ring->adev;
653 unsigned vmhub = ring->funcs->vmhub;
654 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
655 struct amdgpu_vmid *id;
656 bool gds_switch_needed;
657 bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
661 id = &id_mgr->ids[job->vmid];
662 gds_switch_needed = ring->funcs->emit_gds_switch && (
663 id->gds_base != job->gds_base ||
664 id->gds_size != job->gds_size ||
665 id->gws_base != job->gws_base ||
666 id->gws_size != job->gws_size ||
667 id->oa_base != job->oa_base ||
668 id->oa_size != job->oa_size);
670 if (amdgpu_vmid_had_gpu_reset(adev, id))
673 return vm_flush_needed || gds_switch_needed;
677 * amdgpu_vm_flush - hardware flush the vm
679 * @ring: ring to use for flush
681 * @need_pipe_sync: is pipe sync needed
683 * Emit a VM flush when it is necessary.
686 * 0 on success, errno otherwise.
688 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
690 struct amdgpu_device *adev = ring->adev;
691 unsigned vmhub = ring->funcs->vmhub;
692 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
693 struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
694 bool gds_switch_needed = ring->funcs->emit_gds_switch && (
695 id->gds_base != job->gds_base ||
696 id->gds_size != job->gds_size ||
697 id->gws_base != job->gws_base ||
698 id->gws_size != job->gws_size ||
699 id->oa_base != job->oa_base ||
700 id->oa_size != job->oa_size);
701 bool vm_flush_needed = job->vm_needs_flush;
702 bool pasid_mapping_needed = id->pasid != job->pasid ||
703 !id->pasid_mapping ||
704 !dma_fence_is_signaled(id->pasid_mapping);
705 struct dma_fence *fence = NULL;
706 unsigned patch_offset = 0;
709 if (amdgpu_vmid_had_gpu_reset(adev, id)) {
710 gds_switch_needed = true;
711 vm_flush_needed = true;
712 pasid_mapping_needed = true;
715 gds_switch_needed &= !!ring->funcs->emit_gds_switch;
716 vm_flush_needed &= !!ring->funcs->emit_vm_flush;
717 pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
718 ring->funcs->emit_wreg;
720 if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
723 if (ring->funcs->init_cond_exec)
724 patch_offset = amdgpu_ring_init_cond_exec(ring);
727 amdgpu_ring_emit_pipeline_sync(ring);
729 if (vm_flush_needed) {
730 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
731 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
734 if (pasid_mapping_needed)
735 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
737 if (vm_flush_needed || pasid_mapping_needed) {
738 r = amdgpu_fence_emit(ring, &fence, 0);
743 if (vm_flush_needed) {
744 mutex_lock(&id_mgr->lock);
745 dma_fence_put(id->last_flush);
746 id->last_flush = dma_fence_get(fence);
747 id->current_gpu_reset_count =
748 atomic_read(&adev->gpu_reset_counter);
749 mutex_unlock(&id_mgr->lock);
752 if (pasid_mapping_needed) {
753 id->pasid = job->pasid;
754 dma_fence_put(id->pasid_mapping);
755 id->pasid_mapping = dma_fence_get(fence);
757 dma_fence_put(fence);
759 if (ring->funcs->emit_gds_switch && gds_switch_needed) {
760 id->gds_base = job->gds_base;
761 id->gds_size = job->gds_size;
762 id->gws_base = job->gws_base;
763 id->gws_size = job->gws_size;
764 id->oa_base = job->oa_base;
765 id->oa_size = job->oa_size;
766 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
767 job->gds_size, job->gws_base,
768 job->gws_size, job->oa_base,
772 if (ring->funcs->patch_cond_exec)
773 amdgpu_ring_patch_cond_exec(ring, patch_offset);
775 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
776 if (ring->funcs->emit_switch_buffer) {
777 amdgpu_ring_emit_switch_buffer(ring);
778 amdgpu_ring_emit_switch_buffer(ring);
784 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
787 * @bo: requested buffer object
789 * Find @bo inside the requested vm.
790 * Search inside the @bos vm list for the requested vm
791 * Returns the found bo_va or NULL if none is found
793 * Object has to be reserved!
796 * Found bo_va or NULL.
798 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
799 struct amdgpu_bo *bo)
801 struct amdgpu_bo_va *bo_va;
803 list_for_each_entry(bo_va, &bo->va, base.bo_list) {
804 if (bo_va->base.vm == vm) {
812 * amdgpu_vm_do_set_ptes - helper to call the right asic function
814 * @params: see amdgpu_pte_update_params definition
815 * @bo: PD/PT to update
816 * @pe: addr of the page entry
817 * @addr: dst addr to write into pe
818 * @count: number of page entries to update
819 * @incr: increase next addr by incr bytes
820 * @flags: hw access flags
822 * Traces the parameters and calls the right asic functions
823 * to setup the page table using the DMA.
825 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
826 struct amdgpu_bo *bo,
827 uint64_t pe, uint64_t addr,
828 unsigned count, uint32_t incr,
831 pe += amdgpu_bo_gpu_offset(bo);
832 trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
835 amdgpu_vm_write_pte(params->adev, params->ib, pe,
836 addr | flags, count, incr);
839 amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
845 * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
847 * @params: see amdgpu_pte_update_params definition
848 * @bo: PD/PT to update
849 * @pe: addr of the page entry
850 * @addr: dst addr to write into pe
851 * @count: number of page entries to update
852 * @incr: increase next addr by incr bytes
853 * @flags: hw access flags
855 * Traces the parameters and calls the DMA function to copy the PTEs.
857 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
858 struct amdgpu_bo *bo,
859 uint64_t pe, uint64_t addr,
860 unsigned count, uint32_t incr,
863 uint64_t src = (params->src + (addr >> 12) * 8);
865 pe += amdgpu_bo_gpu_offset(bo);
866 trace_amdgpu_vm_copy_ptes(pe, src, count);
868 amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
872 * amdgpu_vm_map_gart - Resolve gart mapping of addr
874 * @pages_addr: optional DMA address to use for lookup
875 * @addr: the unmapped addr
877 * Look up the physical address of the page that the pte resolves
881 * The pointer for the page table entry.
883 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
887 /* page table offset */
888 result = pages_addr[addr >> PAGE_SHIFT];
890 /* in case cpu page size != gpu page size*/
891 result |= addr & (~PAGE_MASK);
893 result &= 0xFFFFFFFFFFFFF000ULL;
899 * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
901 * @params: see amdgpu_pte_update_params definition
902 * @bo: PD/PT to update
903 * @pe: kmap addr of the page entry
904 * @addr: dst addr to write into pe
905 * @count: number of page entries to update
906 * @incr: increase next addr by incr bytes
907 * @flags: hw access flags
909 * Write count number of PT/PD entries directly.
911 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
912 struct amdgpu_bo *bo,
913 uint64_t pe, uint64_t addr,
914 unsigned count, uint32_t incr,
920 pe += (unsigned long)amdgpu_bo_kptr(bo);
922 trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
924 for (i = 0; i < count; i++) {
925 value = params->pages_addr ?
926 amdgpu_vm_map_gart(params->pages_addr, addr) :
928 amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
936 * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
938 * @adev: amdgpu_device pointer
940 * @owner: fence owner
943 * 0 on success, errno otherwise.
945 static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
948 struct amdgpu_sync sync;
951 amdgpu_sync_create(&sync);
952 amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
953 r = amdgpu_sync_wait(&sync, true);
954 amdgpu_sync_free(&sync);
960 * amdgpu_vm_update_pde - update a single level in the hierarchy
962 * @param: parameters for the update
964 * @parent: parent directory
965 * @entry: entry to update
967 * Makes sure the requested entry in parent is up to date.
969 static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
970 struct amdgpu_vm *vm,
971 struct amdgpu_vm_pt *parent,
972 struct amdgpu_vm_pt *entry)
974 struct amdgpu_bo *bo = parent->base.bo, *pbo;
975 uint64_t pde, pt, flags;
978 /* Don't update huge pages here */
982 for (level = 0, pbo = bo->parent; pbo; ++level)
985 level += params->adev->vm_manager.root_level;
986 pt = amdgpu_bo_gpu_offset(entry->base.bo);
987 flags = AMDGPU_PTE_VALID;
988 amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags);
989 pde = (entry - parent->entries) * 8;
991 params->func(params, bo->shadow, pde, pt, 1, 0, flags);
992 params->func(params, bo, pde, pt, 1, 0, flags);
996 * amdgpu_vm_invalidate_level - mark all PD levels as invalid
998 * @adev: amdgpu_device pointer
1000 * @parent: parent PD
1001 * @level: VMPT level
1003 * Mark all PD level as invalid after an error.
1005 static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
1006 struct amdgpu_vm *vm,
1007 struct amdgpu_vm_pt *parent,
1010 unsigned pt_idx, num_entries;
1013 * Recurse into the subdirectories. This recursion is harmless because
1014 * we only have a maximum of 5 layers.
1016 num_entries = amdgpu_vm_num_entries(adev, level);
1017 for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
1018 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
1020 if (!entry->base.bo)
1023 if (!entry->base.moved)
1024 list_move(&entry->base.vm_status, &vm->relocated);
1025 amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
1030 * amdgpu_vm_update_directories - make sure that all directories are valid
1032 * @adev: amdgpu_device pointer
1035 * Makes sure all directories are up to date.
1038 * 0 for success, error for failure.
1040 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
1041 struct amdgpu_vm *vm)
1043 struct amdgpu_pte_update_params params;
1044 struct amdgpu_job *job;
1048 if (list_empty(&vm->relocated))
1052 memset(¶ms, 0, sizeof(params));
1055 if (vm->use_cpu_for_update) {
1056 struct amdgpu_vm_bo_base *bo_base;
1058 list_for_each_entry(bo_base, &vm->relocated, vm_status) {
1059 r = amdgpu_bo_kmap(bo_base->bo, NULL);
1064 r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
1068 params.func = amdgpu_vm_cpu_set_ptes;
1071 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1075 params.ib = &job->ibs[0];
1076 params.func = amdgpu_vm_do_set_ptes;
1079 while (!list_empty(&vm->relocated)) {
1080 struct amdgpu_vm_bo_base *bo_base, *parent;
1081 struct amdgpu_vm_pt *pt, *entry;
1082 struct amdgpu_bo *bo;
1084 bo_base = list_first_entry(&vm->relocated,
1085 struct amdgpu_vm_bo_base,
1087 bo_base->moved = false;
1088 list_del_init(&bo_base->vm_status);
1090 bo = bo_base->bo->parent;
1094 parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
1096 pt = container_of(parent, struct amdgpu_vm_pt, base);
1097 entry = container_of(bo_base, struct amdgpu_vm_pt, base);
1099 amdgpu_vm_update_pde(¶ms, vm, pt, entry);
1101 if (!vm->use_cpu_for_update &&
1102 (ndw - params.ib->length_dw) < 32)
1106 if (vm->use_cpu_for_update) {
1109 amdgpu_asic_flush_hdp(adev, NULL);
1110 } else if (params.ib->length_dw == 0) {
1111 amdgpu_job_free(job);
1113 struct amdgpu_bo *root = vm->root.base.bo;
1114 struct amdgpu_ring *ring;
1115 struct dma_fence *fence;
1117 ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
1120 amdgpu_ring_pad_ib(ring, params.ib);
1121 amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
1122 AMDGPU_FENCE_OWNER_VM, false);
1123 WARN_ON(params.ib->length_dw > ndw);
1124 r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
1129 amdgpu_bo_fence(root, fence, true);
1130 dma_fence_put(vm->last_update);
1131 vm->last_update = fence;
1134 if (!list_empty(&vm->relocated))
1140 amdgpu_vm_invalidate_level(adev, vm, &vm->root,
1141 adev->vm_manager.root_level);
1142 amdgpu_job_free(job);
1147 * amdgpu_vm_find_entry - find the entry for an address
1149 * @p: see amdgpu_pte_update_params definition
1150 * @addr: virtual address in question
1151 * @entry: resulting entry or NULL
1152 * @parent: parent entry
1154 * Find the vm_pt entry and it's parent for the given address.
1156 void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
1157 struct amdgpu_vm_pt **entry,
1158 struct amdgpu_vm_pt **parent)
1160 unsigned level = p->adev->vm_manager.root_level;
1163 *entry = &p->vm->root;
1164 while ((*entry)->entries) {
1165 unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
1168 *entry = &(*entry)->entries[addr >> shift];
1169 addr &= (1ULL << shift) - 1;
1172 if (level != AMDGPU_VM_PTB)
1177 * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
1179 * @p: see amdgpu_pte_update_params definition
1180 * @entry: vm_pt entry to check
1181 * @parent: parent entry
1182 * @nptes: number of PTEs updated with this operation
1183 * @dst: destination address where the PTEs should point to
1184 * @flags: access flags fro the PTEs
1186 * Check if we can update the PD with a huge page.
1188 static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
1189 struct amdgpu_vm_pt *entry,
1190 struct amdgpu_vm_pt *parent,
1191 unsigned nptes, uint64_t dst,
1196 /* In the case of a mixed PT the PDE must point to it*/
1197 if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
1198 nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
1199 /* Set the huge page flag to stop scanning at this PDE */
1200 flags |= AMDGPU_PDE_PTE;
1203 if (!(flags & AMDGPU_PDE_PTE)) {
1205 /* Add the entry to the relocated list to update it. */
1206 entry->huge = false;
1207 list_move(&entry->base.vm_status, &p->vm->relocated);
1213 amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
1215 pde = (entry - parent->entries) * 8;
1216 if (parent->base.bo->shadow)
1217 p->func(p, parent->base.bo->shadow, pde, dst, 1, 0, flags);
1218 p->func(p, parent->base.bo, pde, dst, 1, 0, flags);
1222 * amdgpu_vm_update_ptes - make sure that page tables are valid
1224 * @params: see amdgpu_pte_update_params definition
1225 * @start: start of GPU address range
1226 * @end: end of GPU address range
1227 * @dst: destination address to map to, the next dst inside the function
1228 * @flags: mapping flags
1230 * Update the page tables in the range @start - @end.
1233 * 0 for success, -EINVAL for failure.
1235 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1236 uint64_t start, uint64_t end,
1237 uint64_t dst, uint64_t flags)
1239 struct amdgpu_device *adev = params->adev;
1240 const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
1242 uint64_t addr, pe_start;
1243 struct amdgpu_bo *pt;
1246 /* walk over the address space and update the page tables */
1247 for (addr = start; addr < end; addr += nptes,
1248 dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
1249 struct amdgpu_vm_pt *entry, *parent;
1251 amdgpu_vm_get_entry(params, addr, &entry, &parent);
1255 if ((addr & ~mask) == (end & ~mask))
1258 nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
1260 amdgpu_vm_handle_huge_pages(params, entry, parent,
1262 /* We don't need to update PTEs for huge pages */
1266 pt = entry->base.bo;
1267 pe_start = (addr & mask) * 8;
1269 params->func(params, pt->shadow, pe_start, dst, nptes,
1270 AMDGPU_GPU_PAGE_SIZE, flags);
1271 params->func(params, pt, pe_start, dst, nptes,
1272 AMDGPU_GPU_PAGE_SIZE, flags);
1279 * amdgpu_vm_frag_ptes - add fragment information to PTEs
1281 * @params: see amdgpu_pte_update_params definition
1283 * @start: first PTE to handle
1284 * @end: last PTE to handle
1285 * @dst: addr those PTEs should point to
1286 * @flags: hw mapping flags
1289 * 0 for success, -EINVAL for failure.
1291 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
1292 uint64_t start, uint64_t end,
1293 uint64_t dst, uint64_t flags)
1296 * The MC L1 TLB supports variable sized pages, based on a fragment
1297 * field in the PTE. When this field is set to a non-zero value, page
1298 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1299 * flags are considered valid for all PTEs within the fragment range
1300 * and corresponding mappings are assumed to be physically contiguous.
1302 * The L1 TLB can store a single PTE for the whole fragment,
1303 * significantly increasing the space available for translation
1304 * caching. This leads to large improvements in throughput when the
1305 * TLB is under pressure.
1307 * The L2 TLB distributes small and large fragments into two
1308 * asymmetric partitions. The large fragment cache is significantly
1309 * larger. Thus, we try to use large fragments wherever possible.
1310 * Userspace can support this by aligning virtual base address and
1311 * allocation size to the fragment size.
1313 unsigned max_frag = params->adev->vm_manager.fragment_size;
1316 /* system pages are non continuously */
1317 if (params->src || !(flags & AMDGPU_PTE_VALID))
1318 return amdgpu_vm_update_ptes(params, start, end, dst, flags);
1320 while (start != end) {
1321 uint64_t frag_flags, frag_end;
1324 /* This intentionally wraps around if no bit is set */
1325 frag = min((unsigned)ffs(start) - 1,
1326 (unsigned)fls64(end - start) - 1);
1327 if (frag >= max_frag) {
1328 frag_flags = AMDGPU_PTE_FRAG(max_frag);
1329 frag_end = end & ~((1ULL << max_frag) - 1);
1331 frag_flags = AMDGPU_PTE_FRAG(frag);
1332 frag_end = start + (1 << frag);
1335 r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
1336 flags | frag_flags);
1340 dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
1348 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1350 * @adev: amdgpu_device pointer
1351 * @exclusive: fence we need to sync to
1352 * @pages_addr: DMA addresses to use for mapping
1354 * @start: start of mapped range
1355 * @last: last mapped entry
1356 * @flags: flags for the entries
1357 * @addr: addr to set the area to
1358 * @fence: optional resulting fence
1360 * Fill in the page table entries between @start and @last.
1363 * 0 for success, -EINVAL for failure.
1365 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1366 struct dma_fence *exclusive,
1367 dma_addr_t *pages_addr,
1368 struct amdgpu_vm *vm,
1369 uint64_t start, uint64_t last,
1370 uint64_t flags, uint64_t addr,
1371 struct dma_fence **fence)
1373 struct amdgpu_ring *ring;
1374 void *owner = AMDGPU_FENCE_OWNER_VM;
1375 unsigned nptes, ncmds, ndw;
1376 struct amdgpu_job *job;
1377 struct amdgpu_pte_update_params params;
1378 struct dma_fence *f = NULL;
1381 memset(¶ms, 0, sizeof(params));
1385 /* sync to everything on unmapping */
1386 if (!(flags & AMDGPU_PTE_VALID))
1387 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
1389 if (vm->use_cpu_for_update) {
1390 /* params.src is used as flag to indicate system Memory */
1394 /* Wait for PT BOs to be free. PTs share the same resv. object
1397 r = amdgpu_vm_wait_pd(adev, vm, owner);
1401 params.func = amdgpu_vm_cpu_set_ptes;
1402 params.pages_addr = pages_addr;
1403 return amdgpu_vm_frag_ptes(¶ms, start, last + 1,
1407 ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
1409 nptes = last - start + 1;
1412 * reserve space for two commands every (1 << BLOCK_SIZE)
1413 * entries or 2k dwords (whatever is smaller)
1415 * The second command is for the shadow pagetables.
1417 if (vm->root.base.bo->shadow)
1418 ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
1420 ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
1426 /* copy commands needed */
1427 ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
1432 params.func = amdgpu_vm_do_copy_ptes;
1435 /* set page commands needed */
1438 /* extra commands for begin/end fragments */
1439 if (vm->root.base.bo->shadow)
1440 ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
1442 ndw += 2 * 10 * adev->vm_manager.fragment_size;
1444 params.func = amdgpu_vm_do_set_ptes;
1447 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1451 params.ib = &job->ibs[0];
1457 /* Put the PTEs at the end of the IB. */
1458 i = ndw - nptes * 2;
1459 pte= (uint64_t *)&(job->ibs->ptr[i]);
1460 params.src = job->ibs->gpu_addr + i * 4;
1462 for (i = 0; i < nptes; ++i) {
1463 pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1464 AMDGPU_GPU_PAGE_SIZE);
1470 r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
1474 r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1479 r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
1483 r = amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags);
1487 amdgpu_ring_pad_ib(ring, params.ib);
1488 WARN_ON(params.ib->length_dw > ndw);
1489 r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
1493 amdgpu_bo_fence(vm->root.base.bo, f, true);
1494 dma_fence_put(*fence);
1499 amdgpu_job_free(job);
1504 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1506 * @adev: amdgpu_device pointer
1507 * @exclusive: fence we need to sync to
1508 * @pages_addr: DMA addresses to use for mapping
1510 * @mapping: mapped range and flags to use for the update
1511 * @flags: HW flags for the mapping
1512 * @nodes: array of drm_mm_nodes with the MC addresses
1513 * @fence: optional resulting fence
1515 * Split the mapping into smaller chunks so that each update fits
1519 * 0 for success, -EINVAL for failure.
1521 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1522 struct dma_fence *exclusive,
1523 dma_addr_t *pages_addr,
1524 struct amdgpu_vm *vm,
1525 struct amdgpu_bo_va_mapping *mapping,
1527 struct drm_mm_node *nodes,
1528 struct dma_fence **fence)
1530 unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1531 uint64_t pfn, start = mapping->start;
1534 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1535 * but in case of something, we filter the flags in first place
1537 if (!(mapping->flags & AMDGPU_PTE_READABLE))
1538 flags &= ~AMDGPU_PTE_READABLE;
1539 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1540 flags &= ~AMDGPU_PTE_WRITEABLE;
1542 flags &= ~AMDGPU_PTE_EXECUTABLE;
1543 flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1545 flags &= ~AMDGPU_PTE_MTYPE_MASK;
1546 flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1548 if ((mapping->flags & AMDGPU_PTE_PRT) &&
1549 (adev->asic_type >= CHIP_VEGA10)) {
1550 flags |= AMDGPU_PTE_PRT;
1551 flags &= ~AMDGPU_PTE_VALID;
1554 trace_amdgpu_vm_bo_update(mapping);
1556 pfn = mapping->offset >> PAGE_SHIFT;
1558 while (pfn >= nodes->size) {
1565 dma_addr_t *dma_addr = NULL;
1566 uint64_t max_entries;
1567 uint64_t addr, last;
1570 addr = nodes->start << PAGE_SHIFT;
1571 max_entries = (nodes->size - pfn) *
1572 AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1575 max_entries = S64_MAX;
1581 max_entries = min(max_entries, 16ull * 1024ull);
1583 count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1585 uint64_t idx = pfn + count;
1587 if (pages_addr[idx] !=
1588 (pages_addr[idx - 1] + PAGE_SIZE))
1592 if (count < min_linear_pages) {
1593 addr = pfn << PAGE_SHIFT;
1594 dma_addr = pages_addr;
1596 addr = pages_addr[pfn];
1597 max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1600 } else if (flags & AMDGPU_PTE_VALID) {
1601 addr += adev->vm_manager.vram_base_offset;
1602 addr += pfn << PAGE_SHIFT;
1605 last = min((uint64_t)mapping->last, start + max_entries - 1);
1606 r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
1607 start, last, flags, addr,
1612 pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1613 if (nodes && nodes->size == pfn) {
1619 } while (unlikely(start != mapping->last + 1));
1625 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1627 * @adev: amdgpu_device pointer
1628 * @bo_va: requested BO and VM object
1629 * @clear: if true clear the entries
1631 * Fill in the page table entries for @bo_va.
1634 * 0 for success, -EINVAL for failure.
1636 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1637 struct amdgpu_bo_va *bo_va,
1640 struct amdgpu_bo *bo = bo_va->base.bo;
1641 struct amdgpu_vm *vm = bo_va->base.vm;
1642 struct amdgpu_bo_va_mapping *mapping;
1643 dma_addr_t *pages_addr = NULL;
1644 struct ttm_mem_reg *mem;
1645 struct drm_mm_node *nodes;
1646 struct dma_fence *exclusive, **last_update;
1655 struct ttm_dma_tt *ttm;
1658 nodes = mem->mm_node;
1659 if (mem->mem_type == TTM_PL_TT) {
1660 ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
1661 pages_addr = ttm->dma_address;
1663 exclusive = reservation_object_get_excl(bo->tbo.resv);
1667 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1671 if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
1672 last_update = &vm->last_update;
1674 last_update = &bo_va->last_pt_update;
1676 if (!clear && bo_va->base.moved) {
1677 bo_va->base.moved = false;
1678 list_splice_init(&bo_va->valids, &bo_va->invalids);
1680 } else if (bo_va->cleared != clear) {
1681 list_splice_init(&bo_va->valids, &bo_va->invalids);
1684 list_for_each_entry(mapping, &bo_va->invalids, list) {
1685 r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1686 mapping, flags, nodes,
1692 if (vm->use_cpu_for_update) {
1695 amdgpu_asic_flush_hdp(adev, NULL);
1698 spin_lock(&vm->moved_lock);
1699 list_del_init(&bo_va->base.vm_status);
1700 spin_unlock(&vm->moved_lock);
1702 /* If the BO is not in its preferred location add it back to
1703 * the evicted list so that it gets validated again on the
1704 * next command submission.
1706 if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
1707 uint32_t mem_type = bo->tbo.mem.mem_type;
1709 if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
1710 list_add_tail(&bo_va->base.vm_status, &vm->evicted);
1712 list_add(&bo_va->base.vm_status, &vm->idle);
1715 list_splice_init(&bo_va->invalids, &bo_va->valids);
1716 bo_va->cleared = clear;
1718 if (trace_amdgpu_vm_bo_mapping_enabled()) {
1719 list_for_each_entry(mapping, &bo_va->valids, list)
1720 trace_amdgpu_vm_bo_mapping(mapping);
1727 * amdgpu_vm_update_prt_state - update the global PRT state
1729 * @adev: amdgpu_device pointer
1731 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1733 unsigned long flags;
1736 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1737 enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1738 adev->gmc.gmc_funcs->set_prt(adev, enable);
1739 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1743 * amdgpu_vm_prt_get - add a PRT user
1745 * @adev: amdgpu_device pointer
1747 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1749 if (!adev->gmc.gmc_funcs->set_prt)
1752 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1753 amdgpu_vm_update_prt_state(adev);
1757 * amdgpu_vm_prt_put - drop a PRT user
1759 * @adev: amdgpu_device pointer
1761 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1763 if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1764 amdgpu_vm_update_prt_state(adev);
1768 * amdgpu_vm_prt_cb - callback for updating the PRT status
1770 * @fence: fence for the callback
1771 * @_cb: the callback function
1773 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1775 struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1777 amdgpu_vm_prt_put(cb->adev);
1782 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1784 * @adev: amdgpu_device pointer
1785 * @fence: fence for the callback
1787 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1788 struct dma_fence *fence)
1790 struct amdgpu_prt_cb *cb;
1792 if (!adev->gmc.gmc_funcs->set_prt)
1795 cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1797 /* Last resort when we are OOM */
1799 dma_fence_wait(fence, false);
1801 amdgpu_vm_prt_put(adev);
1804 if (!fence || dma_fence_add_callback(fence, &cb->cb,
1806 amdgpu_vm_prt_cb(fence, &cb->cb);
1811 * amdgpu_vm_free_mapping - free a mapping
1813 * @adev: amdgpu_device pointer
1815 * @mapping: mapping to be freed
1816 * @fence: fence of the unmap operation
1818 * Free a mapping and make sure we decrease the PRT usage count if applicable.
1820 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1821 struct amdgpu_vm *vm,
1822 struct amdgpu_bo_va_mapping *mapping,
1823 struct dma_fence *fence)
1825 if (mapping->flags & AMDGPU_PTE_PRT)
1826 amdgpu_vm_add_prt_cb(adev, fence);
1831 * amdgpu_vm_prt_fini - finish all prt mappings
1833 * @adev: amdgpu_device pointer
1836 * Register a cleanup callback to disable PRT support after VM dies.
1838 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1840 struct reservation_object *resv = vm->root.base.bo->tbo.resv;
1841 struct dma_fence *excl, **shared;
1842 unsigned i, shared_count;
1845 r = reservation_object_get_fences_rcu(resv, &excl,
1846 &shared_count, &shared);
1848 /* Not enough memory to grab the fence list, as last resort
1849 * block for all the fences to complete.
1851 reservation_object_wait_timeout_rcu(resv, true, false,
1852 MAX_SCHEDULE_TIMEOUT);
1856 /* Add a callback for each fence in the reservation object */
1857 amdgpu_vm_prt_get(adev);
1858 amdgpu_vm_add_prt_cb(adev, excl);
1860 for (i = 0; i < shared_count; ++i) {
1861 amdgpu_vm_prt_get(adev);
1862 amdgpu_vm_add_prt_cb(adev, shared[i]);
1869 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1871 * @adev: amdgpu_device pointer
1873 * @fence: optional resulting fence (unchanged if no work needed to be done
1874 * or if an error occurred)
1876 * Make sure all freed BOs are cleared in the PT.
1877 * PTs have to be reserved and mutex must be locked!
1883 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1884 struct amdgpu_vm *vm,
1885 struct dma_fence **fence)
1887 struct amdgpu_bo_va_mapping *mapping;
1888 uint64_t init_pte_value = 0;
1889 struct dma_fence *f = NULL;
1892 while (!list_empty(&vm->freed)) {
1893 mapping = list_first_entry(&vm->freed,
1894 struct amdgpu_bo_va_mapping, list);
1895 list_del(&mapping->list);
1897 if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START)
1898 init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1900 r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
1901 mapping->start, mapping->last,
1902 init_pte_value, 0, &f);
1903 amdgpu_vm_free_mapping(adev, vm, mapping, f);
1911 dma_fence_put(*fence);
1922 * amdgpu_vm_handle_moved - handle moved BOs in the PT
1924 * @adev: amdgpu_device pointer
1927 * Make sure all BOs which are moved are updated in the PTs.
1932 * PTs have to be reserved!
1934 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1935 struct amdgpu_vm *vm)
1937 struct amdgpu_bo_va *bo_va, *tmp;
1938 struct list_head moved;
1942 INIT_LIST_HEAD(&moved);
1943 spin_lock(&vm->moved_lock);
1944 list_splice_init(&vm->moved, &moved);
1945 spin_unlock(&vm->moved_lock);
1947 list_for_each_entry_safe(bo_va, tmp, &moved, base.vm_status) {
1948 struct reservation_object *resv = bo_va->base.bo->tbo.resv;
1950 /* Per VM BOs never need to bo cleared in the page tables */
1951 if (resv == vm->root.base.bo->tbo.resv)
1953 /* Try to reserve the BO to avoid clearing its ptes */
1954 else if (!amdgpu_vm_debug && reservation_object_trylock(resv))
1956 /* Somebody else is using the BO right now */
1960 r = amdgpu_vm_bo_update(adev, bo_va, clear);
1962 spin_lock(&vm->moved_lock);
1963 list_splice(&moved, &vm->moved);
1964 spin_unlock(&vm->moved_lock);
1968 if (!clear && resv != vm->root.base.bo->tbo.resv)
1969 reservation_object_unlock(resv);
1977 * amdgpu_vm_bo_add - add a bo to a specific vm
1979 * @adev: amdgpu_device pointer
1981 * @bo: amdgpu buffer object
1983 * Add @bo into the requested vm.
1984 * Add @bo to the list of bos associated with the vm
1987 * Newly added bo_va or NULL for failure
1989 * Object has to be reserved!
1991 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1992 struct amdgpu_vm *vm,
1993 struct amdgpu_bo *bo)
1995 struct amdgpu_bo_va *bo_va;
1997 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1998 if (bo_va == NULL) {
2001 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
2003 bo_va->ref_count = 1;
2004 INIT_LIST_HEAD(&bo_va->valids);
2005 INIT_LIST_HEAD(&bo_va->invalids);
2012 * amdgpu_vm_bo_insert_mapping - insert a new mapping
2014 * @adev: amdgpu_device pointer
2015 * @bo_va: bo_va to store the address
2016 * @mapping: the mapping to insert
2018 * Insert a new mapping into all structures.
2020 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
2021 struct amdgpu_bo_va *bo_va,
2022 struct amdgpu_bo_va_mapping *mapping)
2024 struct amdgpu_vm *vm = bo_va->base.vm;
2025 struct amdgpu_bo *bo = bo_va->base.bo;
2027 mapping->bo_va = bo_va;
2028 list_add(&mapping->list, &bo_va->invalids);
2029 amdgpu_vm_it_insert(mapping, &vm->va);
2031 if (mapping->flags & AMDGPU_PTE_PRT)
2032 amdgpu_vm_prt_get(adev);
2034 if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
2035 !bo_va->base.moved) {
2036 spin_lock(&vm->moved_lock);
2037 list_move(&bo_va->base.vm_status, &vm->moved);
2038 spin_unlock(&vm->moved_lock);
2040 trace_amdgpu_vm_bo_map(bo_va, mapping);
2044 * amdgpu_vm_bo_map - map bo inside a vm
2046 * @adev: amdgpu_device pointer
2047 * @bo_va: bo_va to store the address
2048 * @saddr: where to map the BO
2049 * @offset: requested offset in the BO
2050 * @size: BO size in bytes
2051 * @flags: attributes of pages (read/write/valid/etc.)
2053 * Add a mapping of the BO at the specefied addr into the VM.
2056 * 0 for success, error for failure.
2058 * Object has to be reserved and unreserved outside!
2060 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2061 struct amdgpu_bo_va *bo_va,
2062 uint64_t saddr, uint64_t offset,
2063 uint64_t size, uint64_t flags)
2065 struct amdgpu_bo_va_mapping *mapping, *tmp;
2066 struct amdgpu_bo *bo = bo_va->base.bo;
2067 struct amdgpu_vm *vm = bo_va->base.vm;
2070 /* validate the parameters */
2071 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2072 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2075 /* make sure object fit at this offset */
2076 eaddr = saddr + size - 1;
2077 if (saddr >= eaddr ||
2078 (bo && offset + size > amdgpu_bo_size(bo)))
2081 saddr /= AMDGPU_GPU_PAGE_SIZE;
2082 eaddr /= AMDGPU_GPU_PAGE_SIZE;
2084 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2086 /* bo and tmp overlap, invalid addr */
2087 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2088 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2089 tmp->start, tmp->last + 1);
2093 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2097 mapping->start = saddr;
2098 mapping->last = eaddr;
2099 mapping->offset = offset;
2100 mapping->flags = flags;
2102 amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2108 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2110 * @adev: amdgpu_device pointer
2111 * @bo_va: bo_va to store the address
2112 * @saddr: where to map the BO
2113 * @offset: requested offset in the BO
2114 * @size: BO size in bytes
2115 * @flags: attributes of pages (read/write/valid/etc.)
2117 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2118 * mappings as we do so.
2121 * 0 for success, error for failure.
2123 * Object has to be reserved and unreserved outside!
2125 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2126 struct amdgpu_bo_va *bo_va,
2127 uint64_t saddr, uint64_t offset,
2128 uint64_t size, uint64_t flags)
2130 struct amdgpu_bo_va_mapping *mapping;
2131 struct amdgpu_bo *bo = bo_va->base.bo;
2135 /* validate the parameters */
2136 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2137 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2140 /* make sure object fit at this offset */
2141 eaddr = saddr + size - 1;
2142 if (saddr >= eaddr ||
2143 (bo && offset + size > amdgpu_bo_size(bo)))
2146 /* Allocate all the needed memory */
2147 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2151 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2157 saddr /= AMDGPU_GPU_PAGE_SIZE;
2158 eaddr /= AMDGPU_GPU_PAGE_SIZE;
2160 mapping->start = saddr;
2161 mapping->last = eaddr;
2162 mapping->offset = offset;
2163 mapping->flags = flags;
2165 amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2171 * amdgpu_vm_bo_unmap - remove bo mapping from vm
2173 * @adev: amdgpu_device pointer
2174 * @bo_va: bo_va to remove the address from
2175 * @saddr: where to the BO is mapped
2177 * Remove a mapping of the BO at the specefied addr from the VM.
2180 * 0 for success, error for failure.
2182 * Object has to be reserved and unreserved outside!
2184 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2185 struct amdgpu_bo_va *bo_va,
2188 struct amdgpu_bo_va_mapping *mapping;
2189 struct amdgpu_vm *vm = bo_va->base.vm;
2192 saddr /= AMDGPU_GPU_PAGE_SIZE;
2194 list_for_each_entry(mapping, &bo_va->valids, list) {
2195 if (mapping->start == saddr)
2199 if (&mapping->list == &bo_va->valids) {
2202 list_for_each_entry(mapping, &bo_va->invalids, list) {
2203 if (mapping->start == saddr)
2207 if (&mapping->list == &bo_va->invalids)
2211 list_del(&mapping->list);
2212 amdgpu_vm_it_remove(mapping, &vm->va);
2213 mapping->bo_va = NULL;
2214 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2217 list_add(&mapping->list, &vm->freed);
2219 amdgpu_vm_free_mapping(adev, vm, mapping,
2220 bo_va->last_pt_update);
2226 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2228 * @adev: amdgpu_device pointer
2229 * @vm: VM structure to use
2230 * @saddr: start of the range
2231 * @size: size of the range
2233 * Remove all mappings in a range, split them as appropriate.
2236 * 0 for success, error for failure.
2238 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2239 struct amdgpu_vm *vm,
2240 uint64_t saddr, uint64_t size)
2242 struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2246 eaddr = saddr + size - 1;
2247 saddr /= AMDGPU_GPU_PAGE_SIZE;
2248 eaddr /= AMDGPU_GPU_PAGE_SIZE;
2250 /* Allocate all the needed memory */
2251 before = kzalloc(sizeof(*before), GFP_KERNEL);
2254 INIT_LIST_HEAD(&before->list);
2256 after = kzalloc(sizeof(*after), GFP_KERNEL);
2261 INIT_LIST_HEAD(&after->list);
2263 /* Now gather all removed mappings */
2264 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2266 /* Remember mapping split at the start */
2267 if (tmp->start < saddr) {
2268 before->start = tmp->start;
2269 before->last = saddr - 1;
2270 before->offset = tmp->offset;
2271 before->flags = tmp->flags;
2272 before->bo_va = tmp->bo_va;
2273 list_add(&before->list, &tmp->bo_va->invalids);
2276 /* Remember mapping split at the end */
2277 if (tmp->last > eaddr) {
2278 after->start = eaddr + 1;
2279 after->last = tmp->last;
2280 after->offset = tmp->offset;
2281 after->offset += after->start - tmp->start;
2282 after->flags = tmp->flags;
2283 after->bo_va = tmp->bo_va;
2284 list_add(&after->list, &tmp->bo_va->invalids);
2287 list_del(&tmp->list);
2288 list_add(&tmp->list, &removed);
2290 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2293 /* And free them up */
2294 list_for_each_entry_safe(tmp, next, &removed, list) {
2295 amdgpu_vm_it_remove(tmp, &vm->va);
2296 list_del(&tmp->list);
2298 if (tmp->start < saddr)
2300 if (tmp->last > eaddr)
2304 list_add(&tmp->list, &vm->freed);
2305 trace_amdgpu_vm_bo_unmap(NULL, tmp);
2308 /* Insert partial mapping before the range */
2309 if (!list_empty(&before->list)) {
2310 amdgpu_vm_it_insert(before, &vm->va);
2311 if (before->flags & AMDGPU_PTE_PRT)
2312 amdgpu_vm_prt_get(adev);
2317 /* Insert partial mapping after the range */
2318 if (!list_empty(&after->list)) {
2319 amdgpu_vm_it_insert(after, &vm->va);
2320 if (after->flags & AMDGPU_PTE_PRT)
2321 amdgpu_vm_prt_get(adev);
2330 * amdgpu_vm_bo_lookup_mapping - find mapping by address
2332 * @vm: the requested VM
2333 * @addr: the address
2335 * Find a mapping by it's address.
2338 * The amdgpu_bo_va_mapping matching for addr or NULL
2341 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2344 return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2348 * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2350 * @vm: the requested vm
2351 * @ticket: CS ticket
2353 * Trace all mappings of BOs reserved during a command submission.
2355 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2357 struct amdgpu_bo_va_mapping *mapping;
2359 if (!trace_amdgpu_vm_bo_cs_enabled())
2362 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2363 mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2364 if (mapping->bo_va && mapping->bo_va->base.bo) {
2365 struct amdgpu_bo *bo;
2367 bo = mapping->bo_va->base.bo;
2368 if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
2372 trace_amdgpu_vm_bo_cs(mapping);
2377 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2379 * @adev: amdgpu_device pointer
2380 * @bo_va: requested bo_va
2382 * Remove @bo_va->bo from the requested vm.
2384 * Object have to be reserved!
2386 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2387 struct amdgpu_bo_va *bo_va)
2389 struct amdgpu_bo_va_mapping *mapping, *next;
2390 struct amdgpu_vm *vm = bo_va->base.vm;
2392 list_del(&bo_va->base.bo_list);
2394 spin_lock(&vm->moved_lock);
2395 list_del(&bo_va->base.vm_status);
2396 spin_unlock(&vm->moved_lock);
2398 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2399 list_del(&mapping->list);
2400 amdgpu_vm_it_remove(mapping, &vm->va);
2401 mapping->bo_va = NULL;
2402 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2403 list_add(&mapping->list, &vm->freed);
2405 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2406 list_del(&mapping->list);
2407 amdgpu_vm_it_remove(mapping, &vm->va);
2408 amdgpu_vm_free_mapping(adev, vm, mapping,
2409 bo_va->last_pt_update);
2412 dma_fence_put(bo_va->last_pt_update);
2417 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2419 * @adev: amdgpu_device pointer
2420 * @bo: amdgpu buffer object
2421 * @evicted: is the BO evicted
2423 * Mark @bo as invalid.
2425 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2426 struct amdgpu_bo *bo, bool evicted)
2428 struct amdgpu_vm_bo_base *bo_base;
2430 /* shadow bo doesn't have bo base, its validation needs its parent */
2431 if (bo->parent && bo->parent->shadow == bo)
2434 list_for_each_entry(bo_base, &bo->va, bo_list) {
2435 struct amdgpu_vm *vm = bo_base->vm;
2436 bool was_moved = bo_base->moved;
2438 bo_base->moved = true;
2439 if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2440 if (bo->tbo.type == ttm_bo_type_kernel)
2441 list_move(&bo_base->vm_status, &vm->evicted);
2443 list_move_tail(&bo_base->vm_status,
2451 if (bo->tbo.type == ttm_bo_type_kernel) {
2452 list_move(&bo_base->vm_status, &vm->relocated);
2454 spin_lock(&bo_base->vm->moved_lock);
2455 list_move(&bo_base->vm_status, &vm->moved);
2456 spin_unlock(&bo_base->vm->moved_lock);
2462 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2467 * VM page table as power of two
2469 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2471 /* Total bits covered by PD + PTs */
2472 unsigned bits = ilog2(vm_size) + 18;
2474 /* Make sure the PD is 4K in size up to 8GB address space.
2475 Above that split equal between PD and PTs */
2479 return ((bits + 3) / 2);
2483 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2485 * @adev: amdgpu_device pointer
2486 * @vm_size: the default vm size if it's set auto
2487 * @fragment_size_default: Default PTE fragment size
2488 * @max_level: max VMPT level
2489 * @max_bits: max address space size in bits
2492 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
2493 uint32_t fragment_size_default, unsigned max_level,
2498 /* adjust vm size first */
2499 if (amdgpu_vm_size != -1) {
2500 unsigned max_size = 1 << (max_bits - 30);
2502 vm_size = amdgpu_vm_size;
2503 if (vm_size > max_size) {
2504 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2505 amdgpu_vm_size, max_size);
2510 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2512 tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2513 if (amdgpu_vm_block_size != -1)
2514 tmp >>= amdgpu_vm_block_size - 9;
2515 tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2516 adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2517 switch (adev->vm_manager.num_level) {
2519 adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2522 adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2525 adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2528 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2530 /* block size depends on vm size and hw setup*/
2531 if (amdgpu_vm_block_size != -1)
2532 adev->vm_manager.block_size =
2533 min((unsigned)amdgpu_vm_block_size, max_bits
2534 - AMDGPU_GPU_PAGE_SHIFT
2535 - 9 * adev->vm_manager.num_level);
2536 else if (adev->vm_manager.num_level > 1)
2537 adev->vm_manager.block_size = 9;
2539 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2541 if (amdgpu_vm_fragment_size == -1)
2542 adev->vm_manager.fragment_size = fragment_size_default;
2544 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2546 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2547 vm_size, adev->vm_manager.num_level + 1,
2548 adev->vm_manager.block_size,
2549 adev->vm_manager.fragment_size);
2553 * amdgpu_vm_init - initialize a vm instance
2555 * @adev: amdgpu_device pointer
2557 * @vm_context: Indicates if it GFX or Compute context
2558 * @pasid: Process address space identifier
2563 * 0 for success, error for failure.
2565 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2566 int vm_context, unsigned int pasid)
2568 struct amdgpu_bo_param bp;
2569 struct amdgpu_bo *root;
2570 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2571 AMDGPU_VM_PTE_COUNT(adev) * 8);
2572 unsigned ring_instance;
2573 struct amdgpu_ring *ring;
2574 struct drm_sched_rq *rq;
2579 vm->va = RB_ROOT_CACHED;
2580 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2581 vm->reserved_vmid[i] = NULL;
2582 INIT_LIST_HEAD(&vm->evicted);
2583 INIT_LIST_HEAD(&vm->relocated);
2584 spin_lock_init(&vm->moved_lock);
2585 INIT_LIST_HEAD(&vm->moved);
2586 INIT_LIST_HEAD(&vm->idle);
2587 INIT_LIST_HEAD(&vm->freed);
2589 /* create scheduler entity for page table updates */
2591 ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
2592 ring_instance %= adev->vm_manager.vm_pte_num_rings;
2593 ring = adev->vm_manager.vm_pte_rings[ring_instance];
2594 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
2595 r = drm_sched_entity_init(&vm->entity, &rq, 1, NULL);
2599 vm->pte_support_ats = false;
2601 if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2602 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2603 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2605 if (adev->asic_type == CHIP_RAVEN)
2606 vm->pte_support_ats = true;
2608 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2609 AMDGPU_VM_USE_CPU_FOR_GFX);
2611 DRM_DEBUG_DRIVER("VM update mode is %s\n",
2612 vm->use_cpu_for_update ? "CPU" : "SDMA");
2613 WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2614 "CPU update of VM recommended only for large BAR system\n");
2615 vm->last_update = NULL;
2617 flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
2618 if (vm->use_cpu_for_update)
2619 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
2620 else if (vm_context != AMDGPU_VM_CONTEXT_COMPUTE)
2621 flags |= AMDGPU_GEM_CREATE_SHADOW;
2623 size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
2624 memset(&bp, 0, sizeof(bp));
2626 bp.byte_align = align;
2627 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
2629 bp.type = ttm_bo_type_kernel;
2631 r = amdgpu_bo_create(adev, &bp, &root);
2633 goto error_free_sched_entity;
2635 r = amdgpu_bo_reserve(root, true);
2637 goto error_free_root;
2639 r = amdgpu_vm_clear_bo(adev, vm, root,
2640 adev->vm_manager.root_level,
2641 vm->pte_support_ats);
2643 goto error_unreserve;
2645 amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
2646 amdgpu_bo_unreserve(vm->root.base.bo);
2649 unsigned long flags;
2651 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2652 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2654 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2656 goto error_free_root;
2661 INIT_KFIFO(vm->faults);
2662 vm->fault_credit = 16;
2667 amdgpu_bo_unreserve(vm->root.base.bo);
2670 amdgpu_bo_unref(&vm->root.base.bo->shadow);
2671 amdgpu_bo_unref(&vm->root.base.bo);
2672 vm->root.base.bo = NULL;
2674 error_free_sched_entity:
2675 drm_sched_entity_destroy(&vm->entity);
2681 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2683 * @adev: amdgpu_device pointer
2686 * This only works on GFX VMs that don't have any BOs added and no
2687 * page tables allocated yet.
2689 * Changes the following VM parameters:
2690 * - use_cpu_for_update
2691 * - pte_supports_ats
2692 * - pasid (old PASID is released, because compute manages its own PASIDs)
2694 * Reinitializes the page directory to reflect the changed ATS
2698 * 0 for success, -errno for errors.
2700 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2702 bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2705 r = amdgpu_bo_reserve(vm->root.base.bo, true);
2710 if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
2715 /* Check if PD needs to be reinitialized and do it before
2716 * changing any other state, in case it fails.
2718 if (pte_support_ats != vm->pte_support_ats) {
2719 r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
2720 adev->vm_manager.root_level,
2726 /* Update VM state */
2727 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2728 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2729 vm->pte_support_ats = pte_support_ats;
2730 DRM_DEBUG_DRIVER("VM update mode is %s\n",
2731 vm->use_cpu_for_update ? "CPU" : "SDMA");
2732 WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2733 "CPU update of VM recommended only for large BAR system\n");
2736 unsigned long flags;
2738 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2739 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2740 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2745 /* Free the shadow bo for compute VM */
2746 amdgpu_bo_unref(&vm->root.base.bo->shadow);
2749 amdgpu_bo_unreserve(vm->root.base.bo);
2754 * amdgpu_vm_free_levels - free PD/PT levels
2756 * @adev: amdgpu device structure
2757 * @parent: PD/PT starting level to free
2758 * @level: level of parent structure
2760 * Free the page directory or page table level and all sub levels.
2762 static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
2763 struct amdgpu_vm_pt *parent,
2766 unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
2768 if (parent->base.bo) {
2769 list_del(&parent->base.bo_list);
2770 list_del(&parent->base.vm_status);
2771 amdgpu_bo_unref(&parent->base.bo->shadow);
2772 amdgpu_bo_unref(&parent->base.bo);
2775 if (parent->entries)
2776 for (i = 0; i < num_entries; i++)
2777 amdgpu_vm_free_levels(adev, &parent->entries[i],
2780 kvfree(parent->entries);
2784 * amdgpu_vm_fini - tear down a vm instance
2786 * @adev: amdgpu_device pointer
2790 * Unbind the VM and remove all bos from the vm bo list
2792 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2794 struct amdgpu_bo_va_mapping *mapping, *tmp;
2795 bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2796 struct amdgpu_bo *root;
2800 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2802 /* Clear pending page faults from IH when the VM is destroyed */
2803 while (kfifo_get(&vm->faults, &fault))
2804 amdgpu_ih_clear_fault(adev, fault);
2807 unsigned long flags;
2809 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2810 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2811 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2814 drm_sched_entity_destroy(&vm->entity);
2816 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2817 dev_err(adev->dev, "still active bo inside vm\n");
2819 rbtree_postorder_for_each_entry_safe(mapping, tmp,
2820 &vm->va.rb_root, rb) {
2821 list_del(&mapping->list);
2822 amdgpu_vm_it_remove(mapping, &vm->va);
2825 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2826 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2827 amdgpu_vm_prt_fini(adev, vm);
2828 prt_fini_needed = false;
2831 list_del(&mapping->list);
2832 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2835 root = amdgpu_bo_ref(vm->root.base.bo);
2836 r = amdgpu_bo_reserve(root, true);
2838 dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
2840 amdgpu_vm_free_levels(adev, &vm->root,
2841 adev->vm_manager.root_level);
2842 amdgpu_bo_unreserve(root);
2844 amdgpu_bo_unref(&root);
2845 dma_fence_put(vm->last_update);
2846 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2847 amdgpu_vmid_free_reserved(adev, vm, i);
2851 * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
2853 * @adev: amdgpu_device pointer
2854 * @pasid: PASID do identify the VM
2856 * This function is expected to be called in interrupt context.
2859 * True if there was fault credit, false otherwise
2861 bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
2864 struct amdgpu_vm *vm;
2866 spin_lock(&adev->vm_manager.pasid_lock);
2867 vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
2869 /* VM not found, can't track fault credit */
2870 spin_unlock(&adev->vm_manager.pasid_lock);
2874 /* No lock needed. only accessed by IRQ handler */
2875 if (!vm->fault_credit) {
2876 /* Too many faults in this VM */
2877 spin_unlock(&adev->vm_manager.pasid_lock);
2882 spin_unlock(&adev->vm_manager.pasid_lock);
2887 * amdgpu_vm_manager_init - init the VM manager
2889 * @adev: amdgpu_device pointer
2891 * Initialize the VM manager structures
2893 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2897 amdgpu_vmid_mgr_init(adev);
2899 adev->vm_manager.fence_context =
2900 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2901 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2902 adev->vm_manager.seqno[i] = 0;
2904 atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
2905 spin_lock_init(&adev->vm_manager.prt_lock);
2906 atomic_set(&adev->vm_manager.num_prt_users, 0);
2908 /* If not overridden by the user, by default, only in large BAR systems
2909 * Compute VM tables will be updated by CPU
2911 #ifdef CONFIG_X86_64
2912 if (amdgpu_vm_update_mode == -1) {
2913 if (amdgpu_gmc_vram_full_visible(&adev->gmc))
2914 adev->vm_manager.vm_update_mode =
2915 AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2917 adev->vm_manager.vm_update_mode = 0;
2919 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2921 adev->vm_manager.vm_update_mode = 0;
2924 idr_init(&adev->vm_manager.pasid_idr);
2925 spin_lock_init(&adev->vm_manager.pasid_lock);
2929 * amdgpu_vm_manager_fini - cleanup VM manager
2931 * @adev: amdgpu_device pointer
2933 * Cleanup the VM manager and free resources.
2935 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2937 WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
2938 idr_destroy(&adev->vm_manager.pasid_idr);
2940 amdgpu_vmid_mgr_fini(adev);
2944 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2946 * @dev: drm device pointer
2947 * @data: drm_amdgpu_vm
2948 * @filp: drm file pointer
2951 * 0 for success, -errno for errors.
2953 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2955 union drm_amdgpu_vm *args = data;
2956 struct amdgpu_device *adev = dev->dev_private;
2957 struct amdgpu_fpriv *fpriv = filp->driver_priv;
2960 switch (args->in.op) {
2961 case AMDGPU_VM_OP_RESERVE_VMID:
2962 /* current, we only have requirement to reserve vmid from gfxhub */
2963 r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
2967 case AMDGPU_VM_OP_UNRESERVE_VMID:
2968 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
2978 * amdgpu_vm_get_task_info - Extracts task info for a PASID.
2980 * @dev: drm device pointer
2981 * @pasid: PASID identifier for VM
2982 * @task_info: task_info to fill.
2984 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
2985 struct amdgpu_task_info *task_info)
2987 struct amdgpu_vm *vm;
2989 spin_lock(&adev->vm_manager.pasid_lock);
2991 vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
2993 *task_info = vm->task_info;
2995 spin_unlock(&adev->vm_manager.pasid_lock);
2999 * amdgpu_vm_set_task_info - Sets VMs task info.
3001 * @vm: vm for which to set the info
3003 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
3005 if (!vm->task_info.pid) {
3006 vm->task_info.pid = current->pid;
3007 get_task_comm(vm->task_info.task_name, current);
3009 if (current->group_leader->mm == current->mm) {
3010 vm->task_info.tgid = current->group_leader->pid;
3011 get_task_comm(vm->task_info.process_name, current->group_leader);