2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <linux/idr.h>
32 #include <drm/amdgpu_drm.h>
34 #include "amdgpu_trace.h"
35 #include "amdgpu_amdkfd.h"
39 * GPUVM is similar to the legacy gart on older asics, however
40 * rather than there being a single global gart table
41 * for the entire GPU, there are multiple VM page tables active
42 * at any given time. The VM page tables can contain a mix
43 * vram pages and system memory pages and system memory pages
44 * can be mapped as snooped (cached system pages) or unsnooped
45 * (uncached system pages).
46 * Each VM has an ID associated with it and there is a page table
47 * associated with each VMID. When execting a command buffer,
48 * the kernel tells the the ring what VMID to use for that command
49 * buffer. VMIDs are allocated dynamically as commands are submitted.
50 * The userspace drivers maintain their own address space and the kernel
51 * sets up their pages tables accordingly when they submit their
52 * command buffers and a VMID is assigned.
53 * Cayman/Trinity support up to 8 active VMs at any given time;
57 #define START(node) ((node)->start)
58 #define LAST(node) ((node)->last)
60 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
61 START, LAST, static, amdgpu_vm_it)
66 /* Local structure. Encapsulate some VM table update parameters to reduce
67 * the number of function parameters
69 struct amdgpu_pte_update_params {
70 /* amdgpu device we do this update for */
71 struct amdgpu_device *adev;
72 /* optional amdgpu_vm we do this update for */
74 /* address where to copy page table entries from */
76 /* indirect buffer to fill with commands */
78 /* Function which actually does the update */
79 void (*func)(struct amdgpu_pte_update_params *params,
80 struct amdgpu_bo *bo, uint64_t pe,
81 uint64_t addr, unsigned count, uint32_t incr,
83 /* The next two are used during VM update by CPU
84 * DMA addresses to use for mapping
85 * Kernel pointer of PD/PT BO that needs to be updated
87 dma_addr_t *pages_addr;
91 /* Helper to disable partial resident texture feature from a fence callback */
92 struct amdgpu_prt_cb {
93 struct amdgpu_device *adev;
94 struct dma_fence_cb cb;
97 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
103 INIT_LIST_HEAD(&base->bo_list);
104 INIT_LIST_HEAD(&base->vm_status);
108 list_add_tail(&base->bo_list, &bo->va);
110 if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
113 if (bo->preferred_domains &
114 amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
118 * we checked all the prerequisites, but it looks like this per vm bo
119 * is currently evicted. add the bo to the evicted list to make sure it
120 * is validated on next vm use to avoid fault.
122 spin_lock(&vm->status_lock);
123 list_move_tail(&base->vm_status, &vm->evicted);
124 spin_unlock(&vm->status_lock);
128 * amdgpu_vm_level_shift - return the addr shift for each level
130 * @adev: amdgpu_device pointer
132 * Returns the number of bits the pfn needs to be right shifted for a level.
134 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
137 unsigned shift = 0xff;
143 shift = 9 * (AMDGPU_VM_PDB0 - level) +
144 adev->vm_manager.block_size;
150 dev_err(adev->dev, "the level%d isn't supported.\n", level);
157 * amdgpu_vm_num_entries - return the number of entries in a PD/PT
159 * @adev: amdgpu_device pointer
161 * Calculate the number of entries in a page directory or page table.
163 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
166 unsigned shift = amdgpu_vm_level_shift(adev,
167 adev->vm_manager.root_level);
169 if (level == adev->vm_manager.root_level)
170 /* For the root directory */
171 return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
172 else if (level != AMDGPU_VM_PTB)
173 /* Everything in between */
176 /* For the page tables on the leaves */
177 return AMDGPU_VM_PTE_COUNT(adev);
181 * amdgpu_vm_bo_size - returns the size of the BOs in bytes
183 * @adev: amdgpu_device pointer
185 * Calculate the size of the BO for a page directory or page table in bytes.
187 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
189 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
193 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
195 * @vm: vm providing the BOs
196 * @validated: head of validation list
197 * @entry: entry to add
199 * Add the page directory to the list of BOs to
200 * validate for command submission.
202 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
203 struct list_head *validated,
204 struct amdgpu_bo_list_entry *entry)
206 entry->robj = vm->root.base.bo;
208 entry->tv.bo = &entry->robj->tbo;
209 entry->tv.shared = true;
210 entry->user_pages = NULL;
211 list_add(&entry->tv.head, validated);
215 * amdgpu_vm_validate_pt_bos - validate the page table BOs
217 * @adev: amdgpu device pointer
218 * @vm: vm providing the BOs
219 * @validate: callback to do the validation
220 * @param: parameter for the validation callback
222 * Validate the page table BOs on command submission if neccessary.
224 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
225 int (*validate)(void *p, struct amdgpu_bo *bo),
228 struct ttm_bo_global *glob = adev->mman.bdev.glob;
231 spin_lock(&vm->status_lock);
232 while (!list_empty(&vm->evicted)) {
233 struct amdgpu_vm_bo_base *bo_base;
234 struct amdgpu_bo *bo;
236 bo_base = list_first_entry(&vm->evicted,
237 struct amdgpu_vm_bo_base,
239 spin_unlock(&vm->status_lock);
244 r = validate(param, bo);
248 spin_lock(&glob->lru_lock);
249 ttm_bo_move_to_lru_tail(&bo->tbo);
251 ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
252 spin_unlock(&glob->lru_lock);
255 if (bo->tbo.type == ttm_bo_type_kernel &&
256 vm->use_cpu_for_update) {
257 r = amdgpu_bo_kmap(bo, NULL);
262 spin_lock(&vm->status_lock);
263 if (bo->tbo.type != ttm_bo_type_kernel)
264 list_move(&bo_base->vm_status, &vm->moved);
266 list_move(&bo_base->vm_status, &vm->relocated);
268 spin_unlock(&vm->status_lock);
274 * amdgpu_vm_ready - check VM is ready for updates
278 * Check if all VM PDs/PTs are ready for updates
280 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
284 spin_lock(&vm->status_lock);
285 ready = list_empty(&vm->evicted);
286 spin_unlock(&vm->status_lock);
292 * amdgpu_vm_clear_bo - initially clear the PDs/PTs
294 * @adev: amdgpu_device pointer
296 * @level: level this BO is at
298 * Root PD needs to be reserved when calling this.
300 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
301 struct amdgpu_vm *vm, struct amdgpu_bo *bo,
302 unsigned level, bool pte_support_ats)
304 struct ttm_operation_ctx ctx = { true, false };
305 struct dma_fence *fence = NULL;
306 unsigned entries, ats_entries;
307 struct amdgpu_ring *ring;
308 struct amdgpu_job *job;
312 addr = amdgpu_bo_gpu_offset(bo);
313 entries = amdgpu_bo_size(bo) / 8;
315 if (pte_support_ats) {
316 if (level == adev->vm_manager.root_level) {
317 ats_entries = amdgpu_vm_level_shift(adev, level);
318 ats_entries += AMDGPU_GPU_PAGE_SHIFT;
319 ats_entries = AMDGPU_VA_HOLE_START >> ats_entries;
320 ats_entries = min(ats_entries, entries);
321 entries -= ats_entries;
323 ats_entries = entries;
330 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
332 r = reservation_object_reserve_shared(bo->tbo.resv);
336 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
340 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
347 ats_value = AMDGPU_PTE_DEFAULT_ATC;
348 if (level != AMDGPU_VM_PTB)
349 ats_value |= AMDGPU_PDE_PTE;
351 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
352 ats_entries, 0, ats_value);
353 addr += ats_entries * 8;
357 amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
360 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
362 WARN_ON(job->ibs[0].length_dw > 64);
363 r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
364 AMDGPU_FENCE_OWNER_UNDEFINED, false);
368 r = amdgpu_job_submit(job, ring, &vm->entity,
369 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
373 amdgpu_bo_fence(bo, fence, true);
374 dma_fence_put(fence);
377 return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
378 level, pte_support_ats);
383 amdgpu_job_free(job);
390 * amdgpu_vm_alloc_levels - allocate the PD/PT levels
392 * @adev: amdgpu_device pointer
394 * @saddr: start of the address range
395 * @eaddr: end of the address range
397 * Make sure the page directories and page tables are allocated
399 static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
400 struct amdgpu_vm *vm,
401 struct amdgpu_vm_pt *parent,
402 uint64_t saddr, uint64_t eaddr,
403 unsigned level, bool ats)
405 unsigned shift = amdgpu_vm_level_shift(adev, level);
406 unsigned pt_idx, from, to;
410 if (!parent->entries) {
411 unsigned num_entries = amdgpu_vm_num_entries(adev, level);
413 parent->entries = kvmalloc_array(num_entries,
414 sizeof(struct amdgpu_vm_pt),
415 GFP_KERNEL | __GFP_ZERO);
416 if (!parent->entries)
418 memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
421 from = saddr >> shift;
423 if (from >= amdgpu_vm_num_entries(adev, level) ||
424 to >= amdgpu_vm_num_entries(adev, level))
428 saddr = saddr & ((1 << shift) - 1);
429 eaddr = eaddr & ((1 << shift) - 1);
431 flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
432 if (vm->use_cpu_for_update)
433 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
435 flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
436 AMDGPU_GEM_CREATE_SHADOW);
438 /* walk over the address space and allocate the page tables */
439 for (pt_idx = from; pt_idx <= to; ++pt_idx) {
440 struct reservation_object *resv = vm->root.base.bo->tbo.resv;
441 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
442 struct amdgpu_bo *pt;
444 if (!entry->base.bo) {
445 struct amdgpu_bo_param bp;
447 memset(&bp, 0, sizeof(bp));
448 bp.size = amdgpu_vm_bo_size(adev, level);
449 bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
450 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
452 bp.type = ttm_bo_type_kernel;
454 r = amdgpu_bo_create(adev, &bp, &pt);
458 r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats);
460 amdgpu_bo_unref(&pt->shadow);
461 amdgpu_bo_unref(&pt);
465 if (vm->use_cpu_for_update) {
466 r = amdgpu_bo_kmap(pt, NULL);
468 amdgpu_bo_unref(&pt->shadow);
469 amdgpu_bo_unref(&pt);
474 /* Keep a reference to the root directory to avoid
475 * freeing them up in the wrong order.
477 pt->parent = amdgpu_bo_ref(parent->base.bo);
479 amdgpu_vm_bo_base_init(&entry->base, vm, pt);
480 spin_lock(&vm->status_lock);
481 list_move(&entry->base.vm_status, &vm->relocated);
482 spin_unlock(&vm->status_lock);
485 if (level < AMDGPU_VM_PTB) {
486 uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
487 uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
489 r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
490 sub_eaddr, level, ats);
500 * amdgpu_vm_alloc_pts - Allocate page tables.
502 * @adev: amdgpu_device pointer
503 * @vm: VM to allocate page tables for
504 * @saddr: Start address which needs to be allocated
505 * @size: Size from start address we need.
507 * Make sure the page tables are allocated.
509 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
510 struct amdgpu_vm *vm,
511 uint64_t saddr, uint64_t size)
516 /* validate the parameters */
517 if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
520 eaddr = saddr + size - 1;
522 if (vm->pte_support_ats)
523 ats = saddr < AMDGPU_VA_HOLE_START;
525 saddr /= AMDGPU_GPU_PAGE_SIZE;
526 eaddr /= AMDGPU_GPU_PAGE_SIZE;
528 if (eaddr >= adev->vm_manager.max_pfn) {
529 dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
530 eaddr, adev->vm_manager.max_pfn);
534 return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
535 adev->vm_manager.root_level, ats);
539 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
541 * @adev: amdgpu_device pointer
543 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
545 const struct amdgpu_ip_block *ip_block;
546 bool has_compute_vm_bug;
547 struct amdgpu_ring *ring;
550 has_compute_vm_bug = false;
552 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
554 /* Compute has a VM bug for GFX version < 7.
555 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
556 if (ip_block->version->major <= 7)
557 has_compute_vm_bug = true;
558 else if (ip_block->version->major == 8)
559 if (adev->gfx.mec_fw_version < 673)
560 has_compute_vm_bug = true;
563 for (i = 0; i < adev->num_rings; i++) {
564 ring = adev->rings[i];
565 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
566 /* only compute rings */
567 ring->has_compute_vm_bug = has_compute_vm_bug;
569 ring->has_compute_vm_bug = false;
573 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
574 struct amdgpu_job *job)
576 struct amdgpu_device *adev = ring->adev;
577 unsigned vmhub = ring->funcs->vmhub;
578 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
579 struct amdgpu_vmid *id;
580 bool gds_switch_needed;
581 bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
585 id = &id_mgr->ids[job->vmid];
586 gds_switch_needed = ring->funcs->emit_gds_switch && (
587 id->gds_base != job->gds_base ||
588 id->gds_size != job->gds_size ||
589 id->gws_base != job->gws_base ||
590 id->gws_size != job->gws_size ||
591 id->oa_base != job->oa_base ||
592 id->oa_size != job->oa_size);
594 if (amdgpu_vmid_had_gpu_reset(adev, id))
597 return vm_flush_needed || gds_switch_needed;
600 static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
602 return (adev->gmc.real_vram_size == adev->gmc.visible_vram_size);
606 * amdgpu_vm_flush - hardware flush the vm
608 * @ring: ring to use for flush
609 * @vmid: vmid number to use
610 * @pd_addr: address of the page directory
612 * Emit a VM flush when it is necessary.
614 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
616 struct amdgpu_device *adev = ring->adev;
617 unsigned vmhub = ring->funcs->vmhub;
618 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
619 struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
620 bool gds_switch_needed = ring->funcs->emit_gds_switch && (
621 id->gds_base != job->gds_base ||
622 id->gds_size != job->gds_size ||
623 id->gws_base != job->gws_base ||
624 id->gws_size != job->gws_size ||
625 id->oa_base != job->oa_base ||
626 id->oa_size != job->oa_size);
627 bool vm_flush_needed = job->vm_needs_flush;
628 bool pasid_mapping_needed = id->pasid != job->pasid ||
629 !id->pasid_mapping ||
630 !dma_fence_is_signaled(id->pasid_mapping);
631 struct dma_fence *fence = NULL;
632 unsigned patch_offset = 0;
635 if (amdgpu_vmid_had_gpu_reset(adev, id)) {
636 gds_switch_needed = true;
637 vm_flush_needed = true;
638 pasid_mapping_needed = true;
641 gds_switch_needed &= !!ring->funcs->emit_gds_switch;
642 vm_flush_needed &= !!ring->funcs->emit_vm_flush;
643 pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
644 ring->funcs->emit_wreg;
646 if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
649 if (ring->funcs->init_cond_exec)
650 patch_offset = amdgpu_ring_init_cond_exec(ring);
653 amdgpu_ring_emit_pipeline_sync(ring);
655 if (vm_flush_needed) {
656 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
657 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
660 if (pasid_mapping_needed)
661 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
663 if (vm_flush_needed || pasid_mapping_needed) {
664 r = amdgpu_fence_emit(ring, &fence, 0);
669 if (vm_flush_needed) {
670 mutex_lock(&id_mgr->lock);
671 dma_fence_put(id->last_flush);
672 id->last_flush = dma_fence_get(fence);
673 id->current_gpu_reset_count =
674 atomic_read(&adev->gpu_reset_counter);
675 mutex_unlock(&id_mgr->lock);
678 if (pasid_mapping_needed) {
679 id->pasid = job->pasid;
680 dma_fence_put(id->pasid_mapping);
681 id->pasid_mapping = dma_fence_get(fence);
683 dma_fence_put(fence);
685 if (ring->funcs->emit_gds_switch && gds_switch_needed) {
686 id->gds_base = job->gds_base;
687 id->gds_size = job->gds_size;
688 id->gws_base = job->gws_base;
689 id->gws_size = job->gws_size;
690 id->oa_base = job->oa_base;
691 id->oa_size = job->oa_size;
692 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
693 job->gds_size, job->gws_base,
694 job->gws_size, job->oa_base,
698 if (ring->funcs->patch_cond_exec)
699 amdgpu_ring_patch_cond_exec(ring, patch_offset);
701 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
702 if (ring->funcs->emit_switch_buffer) {
703 amdgpu_ring_emit_switch_buffer(ring);
704 amdgpu_ring_emit_switch_buffer(ring);
710 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
713 * @bo: requested buffer object
715 * Find @bo inside the requested vm.
716 * Search inside the @bos vm list for the requested vm
717 * Returns the found bo_va or NULL if none is found
719 * Object has to be reserved!
721 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
722 struct amdgpu_bo *bo)
724 struct amdgpu_bo_va *bo_va;
726 list_for_each_entry(bo_va, &bo->va, base.bo_list) {
727 if (bo_va->base.vm == vm) {
735 * amdgpu_vm_do_set_ptes - helper to call the right asic function
737 * @params: see amdgpu_pte_update_params definition
738 * @bo: PD/PT to update
739 * @pe: addr of the page entry
740 * @addr: dst addr to write into pe
741 * @count: number of page entries to update
742 * @incr: increase next addr by incr bytes
743 * @flags: hw access flags
745 * Traces the parameters and calls the right asic functions
746 * to setup the page table using the DMA.
748 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
749 struct amdgpu_bo *bo,
750 uint64_t pe, uint64_t addr,
751 unsigned count, uint32_t incr,
754 pe += amdgpu_bo_gpu_offset(bo);
755 trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
758 amdgpu_vm_write_pte(params->adev, params->ib, pe,
759 addr | flags, count, incr);
762 amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
768 * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
770 * @params: see amdgpu_pte_update_params definition
771 * @bo: PD/PT to update
772 * @pe: addr of the page entry
773 * @addr: dst addr to write into pe
774 * @count: number of page entries to update
775 * @incr: increase next addr by incr bytes
776 * @flags: hw access flags
778 * Traces the parameters and calls the DMA function to copy the PTEs.
780 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
781 struct amdgpu_bo *bo,
782 uint64_t pe, uint64_t addr,
783 unsigned count, uint32_t incr,
786 uint64_t src = (params->src + (addr >> 12) * 8);
788 pe += amdgpu_bo_gpu_offset(bo);
789 trace_amdgpu_vm_copy_ptes(pe, src, count);
791 amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
795 * amdgpu_vm_map_gart - Resolve gart mapping of addr
797 * @pages_addr: optional DMA address to use for lookup
798 * @addr: the unmapped addr
800 * Look up the physical address of the page that the pte resolves
801 * to and return the pointer for the page table entry.
803 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
807 /* page table offset */
808 result = pages_addr[addr >> PAGE_SHIFT];
810 /* in case cpu page size != gpu page size*/
811 result |= addr & (~PAGE_MASK);
813 result &= 0xFFFFFFFFFFFFF000ULL;
819 * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
821 * @params: see amdgpu_pte_update_params definition
822 * @bo: PD/PT to update
823 * @pe: kmap addr of the page entry
824 * @addr: dst addr to write into pe
825 * @count: number of page entries to update
826 * @incr: increase next addr by incr bytes
827 * @flags: hw access flags
829 * Write count number of PT/PD entries directly.
831 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
832 struct amdgpu_bo *bo,
833 uint64_t pe, uint64_t addr,
834 unsigned count, uint32_t incr,
840 pe += (unsigned long)amdgpu_bo_kptr(bo);
842 trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
844 for (i = 0; i < count; i++) {
845 value = params->pages_addr ?
846 amdgpu_vm_map_gart(params->pages_addr, addr) :
848 amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
854 static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
857 struct amdgpu_sync sync;
860 amdgpu_sync_create(&sync);
861 amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
862 r = amdgpu_sync_wait(&sync, true);
863 amdgpu_sync_free(&sync);
869 * amdgpu_vm_update_pde - update a single level in the hierarchy
871 * @param: parameters for the update
873 * @parent: parent directory
874 * @entry: entry to update
876 * Makes sure the requested entry in parent is up to date.
878 static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
879 struct amdgpu_vm *vm,
880 struct amdgpu_vm_pt *parent,
881 struct amdgpu_vm_pt *entry)
883 struct amdgpu_bo *bo = parent->base.bo, *pbo;
884 uint64_t pde, pt, flags;
887 /* Don't update huge pages here */
891 for (level = 0, pbo = bo->parent; pbo; ++level)
894 level += params->adev->vm_manager.root_level;
895 pt = amdgpu_bo_gpu_offset(entry->base.bo);
896 flags = AMDGPU_PTE_VALID;
897 amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags);
898 pde = (entry - parent->entries) * 8;
900 params->func(params, bo->shadow, pde, pt, 1, 0, flags);
901 params->func(params, bo, pde, pt, 1, 0, flags);
905 * amdgpu_vm_invalidate_level - mark all PD levels as invalid
909 * Mark all PD level as invalid after an error.
911 static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
912 struct amdgpu_vm *vm,
913 struct amdgpu_vm_pt *parent,
916 unsigned pt_idx, num_entries;
919 * Recurse into the subdirectories. This recursion is harmless because
920 * we only have a maximum of 5 layers.
922 num_entries = amdgpu_vm_num_entries(adev, level);
923 for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
924 struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
929 spin_lock(&vm->status_lock);
930 if (list_empty(&entry->base.vm_status))
931 list_add(&entry->base.vm_status, &vm->relocated);
932 spin_unlock(&vm->status_lock);
933 amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
938 * amdgpu_vm_update_directories - make sure that all directories are valid
940 * @adev: amdgpu_device pointer
943 * Makes sure all directories are up to date.
944 * Returns 0 for success, error for failure.
946 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
947 struct amdgpu_vm *vm)
949 struct amdgpu_pte_update_params params;
950 struct amdgpu_job *job;
954 if (list_empty(&vm->relocated))
958 memset(¶ms, 0, sizeof(params));
961 if (vm->use_cpu_for_update) {
962 r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
966 params.func = amdgpu_vm_cpu_set_ptes;
969 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
973 params.ib = &job->ibs[0];
974 params.func = amdgpu_vm_do_set_ptes;
977 spin_lock(&vm->status_lock);
978 while (!list_empty(&vm->relocated)) {
979 struct amdgpu_vm_bo_base *bo_base, *parent;
980 struct amdgpu_vm_pt *pt, *entry;
981 struct amdgpu_bo *bo;
983 bo_base = list_first_entry(&vm->relocated,
984 struct amdgpu_vm_bo_base,
986 list_del_init(&bo_base->vm_status);
987 spin_unlock(&vm->status_lock);
989 bo = bo_base->bo->parent;
991 spin_lock(&vm->status_lock);
995 parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
997 pt = container_of(parent, struct amdgpu_vm_pt, base);
998 entry = container_of(bo_base, struct amdgpu_vm_pt, base);
1000 amdgpu_vm_update_pde(¶ms, vm, pt, entry);
1002 spin_lock(&vm->status_lock);
1003 if (!vm->use_cpu_for_update &&
1004 (ndw - params.ib->length_dw) < 32)
1007 spin_unlock(&vm->status_lock);
1009 if (vm->use_cpu_for_update) {
1012 amdgpu_asic_flush_hdp(adev, NULL);
1013 } else if (params.ib->length_dw == 0) {
1014 amdgpu_job_free(job);
1016 struct amdgpu_bo *root = vm->root.base.bo;
1017 struct amdgpu_ring *ring;
1018 struct dma_fence *fence;
1020 ring = container_of(vm->entity.sched, struct amdgpu_ring,
1023 amdgpu_ring_pad_ib(ring, params.ib);
1024 amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
1025 AMDGPU_FENCE_OWNER_VM, false);
1026 WARN_ON(params.ib->length_dw > ndw);
1027 r = amdgpu_job_submit(job, ring, &vm->entity,
1028 AMDGPU_FENCE_OWNER_VM, &fence);
1032 amdgpu_bo_fence(root, fence, true);
1033 dma_fence_put(vm->last_update);
1034 vm->last_update = fence;
1037 if (!list_empty(&vm->relocated))
1043 amdgpu_vm_invalidate_level(adev, vm, &vm->root,
1044 adev->vm_manager.root_level);
1045 amdgpu_job_free(job);
1050 * amdgpu_vm_find_entry - find the entry for an address
1052 * @p: see amdgpu_pte_update_params definition
1053 * @addr: virtual address in question
1054 * @entry: resulting entry or NULL
1055 * @parent: parent entry
1057 * Find the vm_pt entry and it's parent for the given address.
1059 void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
1060 struct amdgpu_vm_pt **entry,
1061 struct amdgpu_vm_pt **parent)
1063 unsigned level = p->adev->vm_manager.root_level;
1066 *entry = &p->vm->root;
1067 while ((*entry)->entries) {
1068 unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
1071 *entry = &(*entry)->entries[addr >> shift];
1072 addr &= (1ULL << shift) - 1;
1075 if (level != AMDGPU_VM_PTB)
1080 * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
1082 * @p: see amdgpu_pte_update_params definition
1083 * @entry: vm_pt entry to check
1084 * @parent: parent entry
1085 * @nptes: number of PTEs updated with this operation
1086 * @dst: destination address where the PTEs should point to
1087 * @flags: access flags fro the PTEs
1089 * Check if we can update the PD with a huge page.
1091 static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
1092 struct amdgpu_vm_pt *entry,
1093 struct amdgpu_vm_pt *parent,
1094 unsigned nptes, uint64_t dst,
1099 /* In the case of a mixed PT the PDE must point to it*/
1100 if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
1101 nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
1102 /* Set the huge page flag to stop scanning at this PDE */
1103 flags |= AMDGPU_PDE_PTE;
1106 if (!(flags & AMDGPU_PDE_PTE)) {
1108 /* Add the entry to the relocated list to update it. */
1109 entry->huge = false;
1110 spin_lock(&p->vm->status_lock);
1111 list_move(&entry->base.vm_status, &p->vm->relocated);
1112 spin_unlock(&p->vm->status_lock);
1118 amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
1120 pde = (entry - parent->entries) * 8;
1121 if (parent->base.bo->shadow)
1122 p->func(p, parent->base.bo->shadow, pde, dst, 1, 0, flags);
1123 p->func(p, parent->base.bo, pde, dst, 1, 0, flags);
1127 * amdgpu_vm_update_ptes - make sure that page tables are valid
1129 * @params: see amdgpu_pte_update_params definition
1131 * @start: start of GPU address range
1132 * @end: end of GPU address range
1133 * @dst: destination address to map to, the next dst inside the function
1134 * @flags: mapping flags
1136 * Update the page tables in the range @start - @end.
1137 * Returns 0 for success, -EINVAL for failure.
1139 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1140 uint64_t start, uint64_t end,
1141 uint64_t dst, uint64_t flags)
1143 struct amdgpu_device *adev = params->adev;
1144 const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
1146 uint64_t addr, pe_start;
1147 struct amdgpu_bo *pt;
1150 /* walk over the address space and update the page tables */
1151 for (addr = start; addr < end; addr += nptes,
1152 dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
1153 struct amdgpu_vm_pt *entry, *parent;
1155 amdgpu_vm_get_entry(params, addr, &entry, &parent);
1159 if ((addr & ~mask) == (end & ~mask))
1162 nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
1164 amdgpu_vm_handle_huge_pages(params, entry, parent,
1166 /* We don't need to update PTEs for huge pages */
1170 pt = entry->base.bo;
1171 pe_start = (addr & mask) * 8;
1173 params->func(params, pt->shadow, pe_start, dst, nptes,
1174 AMDGPU_GPU_PAGE_SIZE, flags);
1175 params->func(params, pt, pe_start, dst, nptes,
1176 AMDGPU_GPU_PAGE_SIZE, flags);
1183 * amdgpu_vm_frag_ptes - add fragment information to PTEs
1185 * @params: see amdgpu_pte_update_params definition
1187 * @start: first PTE to handle
1188 * @end: last PTE to handle
1189 * @dst: addr those PTEs should point to
1190 * @flags: hw mapping flags
1191 * Returns 0 for success, -EINVAL for failure.
1193 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
1194 uint64_t start, uint64_t end,
1195 uint64_t dst, uint64_t flags)
1198 * The MC L1 TLB supports variable sized pages, based on a fragment
1199 * field in the PTE. When this field is set to a non-zero value, page
1200 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1201 * flags are considered valid for all PTEs within the fragment range
1202 * and corresponding mappings are assumed to be physically contiguous.
1204 * The L1 TLB can store a single PTE for the whole fragment,
1205 * significantly increasing the space available for translation
1206 * caching. This leads to large improvements in throughput when the
1207 * TLB is under pressure.
1209 * The L2 TLB distributes small and large fragments into two
1210 * asymmetric partitions. The large fragment cache is significantly
1211 * larger. Thus, we try to use large fragments wherever possible.
1212 * Userspace can support this by aligning virtual base address and
1213 * allocation size to the fragment size.
1215 unsigned max_frag = params->adev->vm_manager.fragment_size;
1218 /* system pages are non continuously */
1219 if (params->src || !(flags & AMDGPU_PTE_VALID))
1220 return amdgpu_vm_update_ptes(params, start, end, dst, flags);
1222 while (start != end) {
1223 uint64_t frag_flags, frag_end;
1226 /* This intentionally wraps around if no bit is set */
1227 frag = min((unsigned)ffs(start) - 1,
1228 (unsigned)fls64(end - start) - 1);
1229 if (frag >= max_frag) {
1230 frag_flags = AMDGPU_PTE_FRAG(max_frag);
1231 frag_end = end & ~((1ULL << max_frag) - 1);
1233 frag_flags = AMDGPU_PTE_FRAG(frag);
1234 frag_end = start + (1 << frag);
1237 r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
1238 flags | frag_flags);
1242 dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
1250 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1252 * @adev: amdgpu_device pointer
1253 * @exclusive: fence we need to sync to
1254 * @pages_addr: DMA addresses to use for mapping
1256 * @start: start of mapped range
1257 * @last: last mapped entry
1258 * @flags: flags for the entries
1259 * @addr: addr to set the area to
1260 * @fence: optional resulting fence
1262 * Fill in the page table entries between @start and @last.
1263 * Returns 0 for success, -EINVAL for failure.
1265 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1266 struct dma_fence *exclusive,
1267 dma_addr_t *pages_addr,
1268 struct amdgpu_vm *vm,
1269 uint64_t start, uint64_t last,
1270 uint64_t flags, uint64_t addr,
1271 struct dma_fence **fence)
1273 struct amdgpu_ring *ring;
1274 void *owner = AMDGPU_FENCE_OWNER_VM;
1275 unsigned nptes, ncmds, ndw;
1276 struct amdgpu_job *job;
1277 struct amdgpu_pte_update_params params;
1278 struct dma_fence *f = NULL;
1281 memset(¶ms, 0, sizeof(params));
1285 /* sync to everything on unmapping */
1286 if (!(flags & AMDGPU_PTE_VALID))
1287 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
1289 if (vm->use_cpu_for_update) {
1290 /* params.src is used as flag to indicate system Memory */
1294 /* Wait for PT BOs to be free. PTs share the same resv. object
1297 r = amdgpu_vm_wait_pd(adev, vm, owner);
1301 params.func = amdgpu_vm_cpu_set_ptes;
1302 params.pages_addr = pages_addr;
1303 return amdgpu_vm_frag_ptes(¶ms, start, last + 1,
1307 ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
1309 nptes = last - start + 1;
1312 * reserve space for two commands every (1 << BLOCK_SIZE)
1313 * entries or 2k dwords (whatever is smaller)
1315 * The second command is for the shadow pagetables.
1317 if (vm->root.base.bo->shadow)
1318 ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
1320 ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
1326 /* copy commands needed */
1327 ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
1332 params.func = amdgpu_vm_do_copy_ptes;
1335 /* set page commands needed */
1338 /* extra commands for begin/end fragments */
1339 ndw += 2 * 10 * adev->vm_manager.fragment_size;
1341 params.func = amdgpu_vm_do_set_ptes;
1344 r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1348 params.ib = &job->ibs[0];
1354 /* Put the PTEs at the end of the IB. */
1355 i = ndw - nptes * 2;
1356 pte= (uint64_t *)&(job->ibs->ptr[i]);
1357 params.src = job->ibs->gpu_addr + i * 4;
1359 for (i = 0; i < nptes; ++i) {
1360 pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1361 AMDGPU_GPU_PAGE_SIZE);
1367 r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
1371 r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1376 r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
1380 r = amdgpu_vm_frag_ptes(¶ms, start, last + 1, addr, flags);
1384 amdgpu_ring_pad_ib(ring, params.ib);
1385 WARN_ON(params.ib->length_dw > ndw);
1386 r = amdgpu_job_submit(job, ring, &vm->entity,
1387 AMDGPU_FENCE_OWNER_VM, &f);
1391 amdgpu_bo_fence(vm->root.base.bo, f, true);
1392 dma_fence_put(*fence);
1397 amdgpu_job_free(job);
1402 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1404 * @adev: amdgpu_device pointer
1405 * @exclusive: fence we need to sync to
1406 * @pages_addr: DMA addresses to use for mapping
1408 * @mapping: mapped range and flags to use for the update
1409 * @flags: HW flags for the mapping
1410 * @nodes: array of drm_mm_nodes with the MC addresses
1411 * @fence: optional resulting fence
1413 * Split the mapping into smaller chunks so that each update fits
1415 * Returns 0 for success, -EINVAL for failure.
1417 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1418 struct dma_fence *exclusive,
1419 dma_addr_t *pages_addr,
1420 struct amdgpu_vm *vm,
1421 struct amdgpu_bo_va_mapping *mapping,
1423 struct drm_mm_node *nodes,
1424 struct dma_fence **fence)
1426 unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1427 uint64_t pfn, start = mapping->start;
1430 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1431 * but in case of something, we filter the flags in first place
1433 if (!(mapping->flags & AMDGPU_PTE_READABLE))
1434 flags &= ~AMDGPU_PTE_READABLE;
1435 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1436 flags &= ~AMDGPU_PTE_WRITEABLE;
1438 flags &= ~AMDGPU_PTE_EXECUTABLE;
1439 flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1441 flags &= ~AMDGPU_PTE_MTYPE_MASK;
1442 flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1444 if ((mapping->flags & AMDGPU_PTE_PRT) &&
1445 (adev->asic_type >= CHIP_VEGA10)) {
1446 flags |= AMDGPU_PTE_PRT;
1447 flags &= ~AMDGPU_PTE_VALID;
1450 trace_amdgpu_vm_bo_update(mapping);
1452 pfn = mapping->offset >> PAGE_SHIFT;
1454 while (pfn >= nodes->size) {
1461 dma_addr_t *dma_addr = NULL;
1462 uint64_t max_entries;
1463 uint64_t addr, last;
1466 addr = nodes->start << PAGE_SHIFT;
1467 max_entries = (nodes->size - pfn) *
1468 (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
1471 max_entries = S64_MAX;
1477 max_entries = min(max_entries, 16ull * 1024ull);
1478 for (count = 1; count < max_entries; ++count) {
1479 uint64_t idx = pfn + count;
1481 if (pages_addr[idx] !=
1482 (pages_addr[idx - 1] + PAGE_SIZE))
1486 if (count < min_linear_pages) {
1487 addr = pfn << PAGE_SHIFT;
1488 dma_addr = pages_addr;
1490 addr = pages_addr[pfn];
1491 max_entries = count;
1494 } else if (flags & AMDGPU_PTE_VALID) {
1495 addr += adev->vm_manager.vram_base_offset;
1496 addr += pfn << PAGE_SHIFT;
1499 last = min((uint64_t)mapping->last, start + max_entries - 1);
1500 r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
1501 start, last, flags, addr,
1506 pfn += last - start + 1;
1507 if (nodes && nodes->size == pfn) {
1513 } while (unlikely(start != mapping->last + 1));
1519 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1521 * @adev: amdgpu_device pointer
1522 * @bo_va: requested BO and VM object
1523 * @clear: if true clear the entries
1525 * Fill in the page table entries for @bo_va.
1526 * Returns 0 for success, -EINVAL for failure.
1528 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1529 struct amdgpu_bo_va *bo_va,
1532 struct amdgpu_bo *bo = bo_va->base.bo;
1533 struct amdgpu_vm *vm = bo_va->base.vm;
1534 struct amdgpu_bo_va_mapping *mapping;
1535 dma_addr_t *pages_addr = NULL;
1536 struct ttm_mem_reg *mem;
1537 struct drm_mm_node *nodes;
1538 struct dma_fence *exclusive, **last_update;
1542 if (clear || !bo_va->base.bo) {
1547 struct ttm_dma_tt *ttm;
1549 mem = &bo_va->base.bo->tbo.mem;
1550 nodes = mem->mm_node;
1551 if (mem->mem_type == TTM_PL_TT) {
1552 ttm = container_of(bo_va->base.bo->tbo.ttm,
1553 struct ttm_dma_tt, ttm);
1554 pages_addr = ttm->dma_address;
1556 exclusive = reservation_object_get_excl(bo->tbo.resv);
1560 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1564 if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
1565 last_update = &vm->last_update;
1567 last_update = &bo_va->last_pt_update;
1569 if (!clear && bo_va->base.moved) {
1570 bo_va->base.moved = false;
1571 list_splice_init(&bo_va->valids, &bo_va->invalids);
1573 } else if (bo_va->cleared != clear) {
1574 list_splice_init(&bo_va->valids, &bo_va->invalids);
1577 list_for_each_entry(mapping, &bo_va->invalids, list) {
1578 r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1579 mapping, flags, nodes,
1585 if (vm->use_cpu_for_update) {
1588 amdgpu_asic_flush_hdp(adev, NULL);
1591 spin_lock(&vm->status_lock);
1592 list_del_init(&bo_va->base.vm_status);
1594 /* If the BO is not in its preferred location add it back to
1595 * the evicted list so that it gets validated again on the
1596 * next command submission.
1598 if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
1599 !(bo->preferred_domains &
1600 amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)))
1601 list_add_tail(&bo_va->base.vm_status, &vm->evicted);
1602 spin_unlock(&vm->status_lock);
1604 list_splice_init(&bo_va->invalids, &bo_va->valids);
1605 bo_va->cleared = clear;
1607 if (trace_amdgpu_vm_bo_mapping_enabled()) {
1608 list_for_each_entry(mapping, &bo_va->valids, list)
1609 trace_amdgpu_vm_bo_mapping(mapping);
1616 * amdgpu_vm_update_prt_state - update the global PRT state
1618 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1620 unsigned long flags;
1623 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1624 enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1625 adev->gmc.gmc_funcs->set_prt(adev, enable);
1626 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1630 * amdgpu_vm_prt_get - add a PRT user
1632 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1634 if (!adev->gmc.gmc_funcs->set_prt)
1637 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1638 amdgpu_vm_update_prt_state(adev);
1642 * amdgpu_vm_prt_put - drop a PRT user
1644 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1646 if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1647 amdgpu_vm_update_prt_state(adev);
1651 * amdgpu_vm_prt_cb - callback for updating the PRT status
1653 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1655 struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1657 amdgpu_vm_prt_put(cb->adev);
1662 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1664 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1665 struct dma_fence *fence)
1667 struct amdgpu_prt_cb *cb;
1669 if (!adev->gmc.gmc_funcs->set_prt)
1672 cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1674 /* Last resort when we are OOM */
1676 dma_fence_wait(fence, false);
1678 amdgpu_vm_prt_put(adev);
1681 if (!fence || dma_fence_add_callback(fence, &cb->cb,
1683 amdgpu_vm_prt_cb(fence, &cb->cb);
1688 * amdgpu_vm_free_mapping - free a mapping
1690 * @adev: amdgpu_device pointer
1692 * @mapping: mapping to be freed
1693 * @fence: fence of the unmap operation
1695 * Free a mapping and make sure we decrease the PRT usage count if applicable.
1697 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1698 struct amdgpu_vm *vm,
1699 struct amdgpu_bo_va_mapping *mapping,
1700 struct dma_fence *fence)
1702 if (mapping->flags & AMDGPU_PTE_PRT)
1703 amdgpu_vm_add_prt_cb(adev, fence);
1708 * amdgpu_vm_prt_fini - finish all prt mappings
1710 * @adev: amdgpu_device pointer
1713 * Register a cleanup callback to disable PRT support after VM dies.
1715 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1717 struct reservation_object *resv = vm->root.base.bo->tbo.resv;
1718 struct dma_fence *excl, **shared;
1719 unsigned i, shared_count;
1722 r = reservation_object_get_fences_rcu(resv, &excl,
1723 &shared_count, &shared);
1725 /* Not enough memory to grab the fence list, as last resort
1726 * block for all the fences to complete.
1728 reservation_object_wait_timeout_rcu(resv, true, false,
1729 MAX_SCHEDULE_TIMEOUT);
1733 /* Add a callback for each fence in the reservation object */
1734 amdgpu_vm_prt_get(adev);
1735 amdgpu_vm_add_prt_cb(adev, excl);
1737 for (i = 0; i < shared_count; ++i) {
1738 amdgpu_vm_prt_get(adev);
1739 amdgpu_vm_add_prt_cb(adev, shared[i]);
1746 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1748 * @adev: amdgpu_device pointer
1750 * @fence: optional resulting fence (unchanged if no work needed to be done
1751 * or if an error occurred)
1753 * Make sure all freed BOs are cleared in the PT.
1754 * Returns 0 for success.
1756 * PTs have to be reserved and mutex must be locked!
1758 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1759 struct amdgpu_vm *vm,
1760 struct dma_fence **fence)
1762 struct amdgpu_bo_va_mapping *mapping;
1763 uint64_t init_pte_value = 0;
1764 struct dma_fence *f = NULL;
1767 while (!list_empty(&vm->freed)) {
1768 mapping = list_first_entry(&vm->freed,
1769 struct amdgpu_bo_va_mapping, list);
1770 list_del(&mapping->list);
1772 if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START)
1773 init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1775 r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
1776 mapping->start, mapping->last,
1777 init_pte_value, 0, &f);
1778 amdgpu_vm_free_mapping(adev, vm, mapping, f);
1786 dma_fence_put(*fence);
1797 * amdgpu_vm_handle_moved - handle moved BOs in the PT
1799 * @adev: amdgpu_device pointer
1801 * @sync: sync object to add fences to
1803 * Make sure all BOs which are moved are updated in the PTs.
1804 * Returns 0 for success.
1806 * PTs have to be reserved!
1808 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1809 struct amdgpu_vm *vm)
1814 spin_lock(&vm->status_lock);
1815 while (!list_empty(&vm->moved)) {
1816 struct amdgpu_bo_va *bo_va;
1817 struct reservation_object *resv;
1819 bo_va = list_first_entry(&vm->moved,
1820 struct amdgpu_bo_va, base.vm_status);
1821 spin_unlock(&vm->status_lock);
1823 resv = bo_va->base.bo->tbo.resv;
1825 /* Per VM BOs never need to bo cleared in the page tables */
1826 if (resv == vm->root.base.bo->tbo.resv)
1828 /* Try to reserve the BO to avoid clearing its ptes */
1829 else if (!amdgpu_vm_debug && reservation_object_trylock(resv))
1831 /* Somebody else is using the BO right now */
1835 r = amdgpu_vm_bo_update(adev, bo_va, clear);
1839 if (!clear && resv != vm->root.base.bo->tbo.resv)
1840 reservation_object_unlock(resv);
1842 spin_lock(&vm->status_lock);
1844 spin_unlock(&vm->status_lock);
1850 * amdgpu_vm_bo_add - add a bo to a specific vm
1852 * @adev: amdgpu_device pointer
1854 * @bo: amdgpu buffer object
1856 * Add @bo into the requested vm.
1857 * Add @bo to the list of bos associated with the vm
1858 * Returns newly added bo_va or NULL for failure
1860 * Object has to be reserved!
1862 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1863 struct amdgpu_vm *vm,
1864 struct amdgpu_bo *bo)
1866 struct amdgpu_bo_va *bo_va;
1868 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1869 if (bo_va == NULL) {
1872 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1874 bo_va->ref_count = 1;
1875 INIT_LIST_HEAD(&bo_va->valids);
1876 INIT_LIST_HEAD(&bo_va->invalids);
1883 * amdgpu_vm_bo_insert_mapping - insert a new mapping
1885 * @adev: amdgpu_device pointer
1886 * @bo_va: bo_va to store the address
1887 * @mapping: the mapping to insert
1889 * Insert a new mapping into all structures.
1891 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1892 struct amdgpu_bo_va *bo_va,
1893 struct amdgpu_bo_va_mapping *mapping)
1895 struct amdgpu_vm *vm = bo_va->base.vm;
1896 struct amdgpu_bo *bo = bo_va->base.bo;
1898 mapping->bo_va = bo_va;
1899 list_add(&mapping->list, &bo_va->invalids);
1900 amdgpu_vm_it_insert(mapping, &vm->va);
1902 if (mapping->flags & AMDGPU_PTE_PRT)
1903 amdgpu_vm_prt_get(adev);
1905 if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
1906 spin_lock(&vm->status_lock);
1907 if (list_empty(&bo_va->base.vm_status))
1908 list_add(&bo_va->base.vm_status, &vm->moved);
1909 spin_unlock(&vm->status_lock);
1911 trace_amdgpu_vm_bo_map(bo_va, mapping);
1915 * amdgpu_vm_bo_map - map bo inside a vm
1917 * @adev: amdgpu_device pointer
1918 * @bo_va: bo_va to store the address
1919 * @saddr: where to map the BO
1920 * @offset: requested offset in the BO
1921 * @flags: attributes of pages (read/write/valid/etc.)
1923 * Add a mapping of the BO at the specefied addr into the VM.
1924 * Returns 0 for success, error for failure.
1926 * Object has to be reserved and unreserved outside!
1928 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1929 struct amdgpu_bo_va *bo_va,
1930 uint64_t saddr, uint64_t offset,
1931 uint64_t size, uint64_t flags)
1933 struct amdgpu_bo_va_mapping *mapping, *tmp;
1934 struct amdgpu_bo *bo = bo_va->base.bo;
1935 struct amdgpu_vm *vm = bo_va->base.vm;
1938 /* validate the parameters */
1939 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1940 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1943 /* make sure object fit at this offset */
1944 eaddr = saddr + size - 1;
1945 if (saddr >= eaddr ||
1946 (bo && offset + size > amdgpu_bo_size(bo)))
1949 saddr /= AMDGPU_GPU_PAGE_SIZE;
1950 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1952 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1954 /* bo and tmp overlap, invalid addr */
1955 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1956 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1957 tmp->start, tmp->last + 1);
1961 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1965 mapping->start = saddr;
1966 mapping->last = eaddr;
1967 mapping->offset = offset;
1968 mapping->flags = flags;
1970 amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1976 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1978 * @adev: amdgpu_device pointer
1979 * @bo_va: bo_va to store the address
1980 * @saddr: where to map the BO
1981 * @offset: requested offset in the BO
1982 * @flags: attributes of pages (read/write/valid/etc.)
1984 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1985 * mappings as we do so.
1986 * Returns 0 for success, error for failure.
1988 * Object has to be reserved and unreserved outside!
1990 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1991 struct amdgpu_bo_va *bo_va,
1992 uint64_t saddr, uint64_t offset,
1993 uint64_t size, uint64_t flags)
1995 struct amdgpu_bo_va_mapping *mapping;
1996 struct amdgpu_bo *bo = bo_va->base.bo;
2000 /* validate the parameters */
2001 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2002 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2005 /* make sure object fit at this offset */
2006 eaddr = saddr + size - 1;
2007 if (saddr >= eaddr ||
2008 (bo && offset + size > amdgpu_bo_size(bo)))
2011 /* Allocate all the needed memory */
2012 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2016 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2022 saddr /= AMDGPU_GPU_PAGE_SIZE;
2023 eaddr /= AMDGPU_GPU_PAGE_SIZE;
2025 mapping->start = saddr;
2026 mapping->last = eaddr;
2027 mapping->offset = offset;
2028 mapping->flags = flags;
2030 amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2036 * amdgpu_vm_bo_unmap - remove bo mapping from vm
2038 * @adev: amdgpu_device pointer
2039 * @bo_va: bo_va to remove the address from
2040 * @saddr: where to the BO is mapped
2042 * Remove a mapping of the BO at the specefied addr from the VM.
2043 * Returns 0 for success, error for failure.
2045 * Object has to be reserved and unreserved outside!
2047 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2048 struct amdgpu_bo_va *bo_va,
2051 struct amdgpu_bo_va_mapping *mapping;
2052 struct amdgpu_vm *vm = bo_va->base.vm;
2055 saddr /= AMDGPU_GPU_PAGE_SIZE;
2057 list_for_each_entry(mapping, &bo_va->valids, list) {
2058 if (mapping->start == saddr)
2062 if (&mapping->list == &bo_va->valids) {
2065 list_for_each_entry(mapping, &bo_va->invalids, list) {
2066 if (mapping->start == saddr)
2070 if (&mapping->list == &bo_va->invalids)
2074 list_del(&mapping->list);
2075 amdgpu_vm_it_remove(mapping, &vm->va);
2076 mapping->bo_va = NULL;
2077 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2080 list_add(&mapping->list, &vm->freed);
2082 amdgpu_vm_free_mapping(adev, vm, mapping,
2083 bo_va->last_pt_update);
2089 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2091 * @adev: amdgpu_device pointer
2092 * @vm: VM structure to use
2093 * @saddr: start of the range
2094 * @size: size of the range
2096 * Remove all mappings in a range, split them as appropriate.
2097 * Returns 0 for success, error for failure.
2099 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2100 struct amdgpu_vm *vm,
2101 uint64_t saddr, uint64_t size)
2103 struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2107 eaddr = saddr + size - 1;
2108 saddr /= AMDGPU_GPU_PAGE_SIZE;
2109 eaddr /= AMDGPU_GPU_PAGE_SIZE;
2111 /* Allocate all the needed memory */
2112 before = kzalloc(sizeof(*before), GFP_KERNEL);
2115 INIT_LIST_HEAD(&before->list);
2117 after = kzalloc(sizeof(*after), GFP_KERNEL);
2122 INIT_LIST_HEAD(&after->list);
2124 /* Now gather all removed mappings */
2125 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2127 /* Remember mapping split at the start */
2128 if (tmp->start < saddr) {
2129 before->start = tmp->start;
2130 before->last = saddr - 1;
2131 before->offset = tmp->offset;
2132 before->flags = tmp->flags;
2133 list_add(&before->list, &tmp->list);
2136 /* Remember mapping split at the end */
2137 if (tmp->last > eaddr) {
2138 after->start = eaddr + 1;
2139 after->last = tmp->last;
2140 after->offset = tmp->offset;
2141 after->offset += after->start - tmp->start;
2142 after->flags = tmp->flags;
2143 list_add(&after->list, &tmp->list);
2146 list_del(&tmp->list);
2147 list_add(&tmp->list, &removed);
2149 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2152 /* And free them up */
2153 list_for_each_entry_safe(tmp, next, &removed, list) {
2154 amdgpu_vm_it_remove(tmp, &vm->va);
2155 list_del(&tmp->list);
2157 if (tmp->start < saddr)
2159 if (tmp->last > eaddr)
2163 list_add(&tmp->list, &vm->freed);
2164 trace_amdgpu_vm_bo_unmap(NULL, tmp);
2167 /* Insert partial mapping before the range */
2168 if (!list_empty(&before->list)) {
2169 amdgpu_vm_it_insert(before, &vm->va);
2170 if (before->flags & AMDGPU_PTE_PRT)
2171 amdgpu_vm_prt_get(adev);
2176 /* Insert partial mapping after the range */
2177 if (!list_empty(&after->list)) {
2178 amdgpu_vm_it_insert(after, &vm->va);
2179 if (after->flags & AMDGPU_PTE_PRT)
2180 amdgpu_vm_prt_get(adev);
2189 * amdgpu_vm_bo_lookup_mapping - find mapping by address
2191 * @vm: the requested VM
2193 * Find a mapping by it's address.
2195 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2198 return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2202 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2204 * @adev: amdgpu_device pointer
2205 * @bo_va: requested bo_va
2207 * Remove @bo_va->bo from the requested vm.
2209 * Object have to be reserved!
2211 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2212 struct amdgpu_bo_va *bo_va)
2214 struct amdgpu_bo_va_mapping *mapping, *next;
2215 struct amdgpu_vm *vm = bo_va->base.vm;
2217 list_del(&bo_va->base.bo_list);
2219 spin_lock(&vm->status_lock);
2220 list_del(&bo_va->base.vm_status);
2221 spin_unlock(&vm->status_lock);
2223 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2224 list_del(&mapping->list);
2225 amdgpu_vm_it_remove(mapping, &vm->va);
2226 mapping->bo_va = NULL;
2227 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2228 list_add(&mapping->list, &vm->freed);
2230 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2231 list_del(&mapping->list);
2232 amdgpu_vm_it_remove(mapping, &vm->va);
2233 amdgpu_vm_free_mapping(adev, vm, mapping,
2234 bo_va->last_pt_update);
2237 dma_fence_put(bo_va->last_pt_update);
2242 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2244 * @adev: amdgpu_device pointer
2246 * @bo: amdgpu buffer object
2248 * Mark @bo as invalid.
2250 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2251 struct amdgpu_bo *bo, bool evicted)
2253 struct amdgpu_vm_bo_base *bo_base;
2255 /* shadow bo doesn't have bo base, its validation needs its parent */
2256 if (bo->parent && bo->parent->shadow == bo)
2259 list_for_each_entry(bo_base, &bo->va, bo_list) {
2260 struct amdgpu_vm *vm = bo_base->vm;
2262 bo_base->moved = true;
2263 if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2264 spin_lock(&bo_base->vm->status_lock);
2265 if (bo->tbo.type == ttm_bo_type_kernel)
2266 list_move(&bo_base->vm_status, &vm->evicted);
2268 list_move_tail(&bo_base->vm_status,
2270 spin_unlock(&bo_base->vm->status_lock);
2274 if (bo->tbo.type == ttm_bo_type_kernel) {
2275 spin_lock(&bo_base->vm->status_lock);
2276 if (list_empty(&bo_base->vm_status))
2277 list_add(&bo_base->vm_status, &vm->relocated);
2278 spin_unlock(&bo_base->vm->status_lock);
2282 spin_lock(&bo_base->vm->status_lock);
2283 if (list_empty(&bo_base->vm_status))
2284 list_add(&bo_base->vm_status, &vm->moved);
2285 spin_unlock(&bo_base->vm->status_lock);
2289 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2291 /* Total bits covered by PD + PTs */
2292 unsigned bits = ilog2(vm_size) + 18;
2294 /* Make sure the PD is 4K in size up to 8GB address space.
2295 Above that split equal between PD and PTs */
2299 return ((bits + 3) / 2);
2303 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2305 * @adev: amdgpu_device pointer
2306 * @vm_size: the default vm size if it's set auto
2308 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
2309 uint32_t fragment_size_default, unsigned max_level,
2314 /* adjust vm size first */
2315 if (amdgpu_vm_size != -1) {
2316 unsigned max_size = 1 << (max_bits - 30);
2318 vm_size = amdgpu_vm_size;
2319 if (vm_size > max_size) {
2320 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2321 amdgpu_vm_size, max_size);
2326 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2328 tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2329 if (amdgpu_vm_block_size != -1)
2330 tmp >>= amdgpu_vm_block_size - 9;
2331 tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2332 adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2333 switch (adev->vm_manager.num_level) {
2335 adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2338 adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2341 adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2344 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2346 /* block size depends on vm size and hw setup*/
2347 if (amdgpu_vm_block_size != -1)
2348 adev->vm_manager.block_size =
2349 min((unsigned)amdgpu_vm_block_size, max_bits
2350 - AMDGPU_GPU_PAGE_SHIFT
2351 - 9 * adev->vm_manager.num_level);
2352 else if (adev->vm_manager.num_level > 1)
2353 adev->vm_manager.block_size = 9;
2355 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2357 if (amdgpu_vm_fragment_size == -1)
2358 adev->vm_manager.fragment_size = fragment_size_default;
2360 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2362 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2363 vm_size, adev->vm_manager.num_level + 1,
2364 adev->vm_manager.block_size,
2365 adev->vm_manager.fragment_size);
2369 * amdgpu_vm_init - initialize a vm instance
2371 * @adev: amdgpu_device pointer
2373 * @vm_context: Indicates if it GFX or Compute context
2377 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2378 int vm_context, unsigned int pasid)
2380 struct amdgpu_bo_param bp;
2381 struct amdgpu_bo *root;
2382 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2383 AMDGPU_VM_PTE_COUNT(adev) * 8);
2384 unsigned ring_instance;
2385 struct amdgpu_ring *ring;
2386 struct drm_sched_rq *rq;
2391 vm->va = RB_ROOT_CACHED;
2392 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2393 vm->reserved_vmid[i] = NULL;
2394 spin_lock_init(&vm->status_lock);
2395 INIT_LIST_HEAD(&vm->evicted);
2396 INIT_LIST_HEAD(&vm->relocated);
2397 INIT_LIST_HEAD(&vm->moved);
2398 INIT_LIST_HEAD(&vm->freed);
2400 /* create scheduler entity for page table updates */
2402 ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
2403 ring_instance %= adev->vm_manager.vm_pte_num_rings;
2404 ring = adev->vm_manager.vm_pte_rings[ring_instance];
2405 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
2406 r = drm_sched_entity_init(&ring->sched, &vm->entity,
2411 vm->pte_support_ats = false;
2413 if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2414 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2415 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2417 if (adev->asic_type == CHIP_RAVEN)
2418 vm->pte_support_ats = true;
2420 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2421 AMDGPU_VM_USE_CPU_FOR_GFX);
2423 DRM_DEBUG_DRIVER("VM update mode is %s\n",
2424 vm->use_cpu_for_update ? "CPU" : "SDMA");
2425 WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
2426 "CPU update of VM recommended only for large BAR system\n");
2427 vm->last_update = NULL;
2429 flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
2430 if (vm->use_cpu_for_update)
2431 flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
2433 flags |= AMDGPU_GEM_CREATE_SHADOW;
2435 size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
2436 memset(&bp, 0, sizeof(bp));
2438 bp.byte_align = align;
2439 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
2441 bp.type = ttm_bo_type_kernel;
2443 r = amdgpu_bo_create(adev, &bp, &root);
2445 goto error_free_sched_entity;
2447 r = amdgpu_bo_reserve(root, true);
2449 goto error_free_root;
2451 r = amdgpu_vm_clear_bo(adev, vm, root,
2452 adev->vm_manager.root_level,
2453 vm->pte_support_ats);
2455 goto error_unreserve;
2457 amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
2458 amdgpu_bo_unreserve(vm->root.base.bo);
2461 unsigned long flags;
2463 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2464 r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2466 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2468 goto error_free_root;
2473 INIT_KFIFO(vm->faults);
2474 vm->fault_credit = 16;
2479 amdgpu_bo_unreserve(vm->root.base.bo);
2482 amdgpu_bo_unref(&vm->root.base.bo->shadow);
2483 amdgpu_bo_unref(&vm->root.base.bo);
2484 vm->root.base.bo = NULL;
2486 error_free_sched_entity:
2487 drm_sched_entity_fini(&ring->sched, &vm->entity);
2493 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2495 * This only works on GFX VMs that don't have any BOs added and no
2496 * page tables allocated yet.
2498 * Changes the following VM parameters:
2499 * - use_cpu_for_update
2500 * - pte_supports_ats
2501 * - pasid (old PASID is released, because compute manages its own PASIDs)
2503 * Reinitializes the page directory to reflect the changed ATS
2504 * setting. May leave behind an unused shadow BO for the page
2505 * directory when switching from SDMA updates to CPU updates.
2507 * Returns 0 for success, -errno for errors.
2509 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2511 bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2514 r = amdgpu_bo_reserve(vm->root.base.bo, true);
2519 if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
2524 /* Check if PD needs to be reinitialized and do it before
2525 * changing any other state, in case it fails.
2527 if (pte_support_ats != vm->pte_support_ats) {
2528 r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
2529 adev->vm_manager.root_level,
2535 /* Update VM state */
2536 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2537 AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2538 vm->pte_support_ats = pte_support_ats;
2539 DRM_DEBUG_DRIVER("VM update mode is %s\n",
2540 vm->use_cpu_for_update ? "CPU" : "SDMA");
2541 WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
2542 "CPU update of VM recommended only for large BAR system\n");
2545 unsigned long flags;
2547 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2548 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2549 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2555 amdgpu_bo_unreserve(vm->root.base.bo);
2560 * amdgpu_vm_free_levels - free PD/PT levels
2562 * @adev: amdgpu device structure
2563 * @parent: PD/PT starting level to free
2564 * @level: level of parent structure
2566 * Free the page directory or page table level and all sub levels.
2568 static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
2569 struct amdgpu_vm_pt *parent,
2572 unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
2574 if (parent->base.bo) {
2575 list_del(&parent->base.bo_list);
2576 list_del(&parent->base.vm_status);
2577 amdgpu_bo_unref(&parent->base.bo->shadow);
2578 amdgpu_bo_unref(&parent->base.bo);
2581 if (parent->entries)
2582 for (i = 0; i < num_entries; i++)
2583 amdgpu_vm_free_levels(adev, &parent->entries[i],
2586 kvfree(parent->entries);
2590 * amdgpu_vm_fini - tear down a vm instance
2592 * @adev: amdgpu_device pointer
2596 * Unbind the VM and remove all bos from the vm bo list
2598 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2600 struct amdgpu_bo_va_mapping *mapping, *tmp;
2601 bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2602 struct amdgpu_bo *root;
2606 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2608 /* Clear pending page faults from IH when the VM is destroyed */
2609 while (kfifo_get(&vm->faults, &fault))
2610 amdgpu_ih_clear_fault(adev, fault);
2613 unsigned long flags;
2615 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2616 idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2617 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2620 drm_sched_entity_fini(vm->entity.sched, &vm->entity);
2622 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2623 dev_err(adev->dev, "still active bo inside vm\n");
2625 rbtree_postorder_for_each_entry_safe(mapping, tmp,
2626 &vm->va.rb_root, rb) {
2627 list_del(&mapping->list);
2628 amdgpu_vm_it_remove(mapping, &vm->va);
2631 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2632 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2633 amdgpu_vm_prt_fini(adev, vm);
2634 prt_fini_needed = false;
2637 list_del(&mapping->list);
2638 amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2641 root = amdgpu_bo_ref(vm->root.base.bo);
2642 r = amdgpu_bo_reserve(root, true);
2644 dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
2646 amdgpu_vm_free_levels(adev, &vm->root,
2647 adev->vm_manager.root_level);
2648 amdgpu_bo_unreserve(root);
2650 amdgpu_bo_unref(&root);
2651 dma_fence_put(vm->last_update);
2652 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2653 amdgpu_vmid_free_reserved(adev, vm, i);
2657 * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
2659 * @adev: amdgpu_device pointer
2660 * @pasid: PASID do identify the VM
2662 * This function is expected to be called in interrupt context. Returns
2663 * true if there was fault credit, false otherwise
2665 bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
2668 struct amdgpu_vm *vm;
2670 spin_lock(&adev->vm_manager.pasid_lock);
2671 vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
2673 /* VM not found, can't track fault credit */
2674 spin_unlock(&adev->vm_manager.pasid_lock);
2678 /* No lock needed. only accessed by IRQ handler */
2679 if (!vm->fault_credit) {
2680 /* Too many faults in this VM */
2681 spin_unlock(&adev->vm_manager.pasid_lock);
2686 spin_unlock(&adev->vm_manager.pasid_lock);
2691 * amdgpu_vm_manager_init - init the VM manager
2693 * @adev: amdgpu_device pointer
2695 * Initialize the VM manager structures
2697 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2701 amdgpu_vmid_mgr_init(adev);
2703 adev->vm_manager.fence_context =
2704 dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2705 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2706 adev->vm_manager.seqno[i] = 0;
2708 atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
2709 spin_lock_init(&adev->vm_manager.prt_lock);
2710 atomic_set(&adev->vm_manager.num_prt_users, 0);
2712 /* If not overridden by the user, by default, only in large BAR systems
2713 * Compute VM tables will be updated by CPU
2715 #ifdef CONFIG_X86_64
2716 if (amdgpu_vm_update_mode == -1) {
2717 if (amdgpu_vm_is_large_bar(adev))
2718 adev->vm_manager.vm_update_mode =
2719 AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2721 adev->vm_manager.vm_update_mode = 0;
2723 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2725 adev->vm_manager.vm_update_mode = 0;
2728 idr_init(&adev->vm_manager.pasid_idr);
2729 spin_lock_init(&adev->vm_manager.pasid_lock);
2733 * amdgpu_vm_manager_fini - cleanup VM manager
2735 * @adev: amdgpu_device pointer
2737 * Cleanup the VM manager and free resources.
2739 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2741 WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
2742 idr_destroy(&adev->vm_manager.pasid_idr);
2744 amdgpu_vmid_mgr_fini(adev);
2747 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2749 union drm_amdgpu_vm *args = data;
2750 struct amdgpu_device *adev = dev->dev_private;
2751 struct amdgpu_fpriv *fpriv = filp->driver_priv;
2754 switch (args->in.op) {
2755 case AMDGPU_VM_OP_RESERVE_VMID:
2756 /* current, we only have requirement to reserve vmid from gfxhub */
2757 r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
2761 case AMDGPU_VM_OP_UNRESERVE_VMID:
2762 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);