2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <drm/amdgpu_drm.h>
31 #include "amdgpu_trace.h"
35 * GPUVM is similar to the legacy gart on older asics, however
36 * rather than there being a single global gart table
37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix
39 * vram pages and system memory pages and system memory pages
40 * can be mapped as snooped (cached system pages) or unsnooped
41 * (uncached system pages).
42 * Each VM has an ID associated with it and there is a page table
43 * associated with each VMID. When execting a command buffer,
44 * the kernel tells the the ring what VMID to use for that command
45 * buffer. VMIDs are allocated dynamically as commands are submitted.
46 * The userspace drivers maintain their own address space and the kernel
47 * sets up their pages tables accordingly when they submit their
48 * command buffers and a VMID is assigned.
49 * Cayman/Trinity support up to 8 active VMs at any given time;
54 * amdgpu_vm_num_pde - return the number of page directory entries
56 * @adev: amdgpu_device pointer
58 * Calculate the number of page directory entries (cayman+).
60 static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
62 return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
66 * amdgpu_vm_directory_size - returns the size of the page directory in bytes
68 * @adev: amdgpu_device pointer
70 * Calculate the size of the page directory in bytes (cayman+).
72 static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
74 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
78 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
80 * @vm: vm providing the BOs
81 * @validated: head of validation list
82 * @entry: entry to add
84 * Add the page directory to the list of BOs to
85 * validate for command submission.
87 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
88 struct list_head *validated,
89 struct amdgpu_bo_list_entry *entry)
91 entry->robj = vm->page_directory;
93 entry->tv.bo = &vm->page_directory->tbo;
94 entry->tv.shared = true;
95 list_add(&entry->tv.head, validated);
99 * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
101 * @vm: vm providing the BOs
102 * @duplicates: head of duplicates list
104 * Add the page directory to the BO duplicates list
105 * for command submission.
107 void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates)
111 /* add the vm page table to the list */
112 for (i = 0; i <= vm->max_pde_used; ++i) {
113 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
118 list_add(&entry->tv.head, duplicates);
124 * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
126 * @adev: amdgpu device instance
127 * @vm: vm providing the BOs
129 * Move the PT BOs to the tail of the LRU.
131 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
132 struct amdgpu_vm *vm)
134 struct ttm_bo_global *glob = adev->mman.bdev.glob;
137 spin_lock(&glob->lru_lock);
138 for (i = 0; i <= vm->max_pde_used; ++i) {
139 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
144 ttm_bo_move_to_lru_tail(&entry->robj->tbo);
146 spin_unlock(&glob->lru_lock);
150 * amdgpu_vm_grab_id - allocate the next free VMID
152 * @vm: vm to allocate id for
153 * @ring: ring we want to submit job to
154 * @sync: sync object where we add dependencies
156 * Allocate an id for the vm, adding fences to the sync obj as necessary.
158 * Global mutex must be locked!
160 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
161 struct amdgpu_sync *sync)
163 struct fence *best[AMDGPU_MAX_RINGS] = {};
164 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
165 struct amdgpu_device *adev = ring->adev;
167 unsigned choices[2] = {};
170 /* check if the id is still valid */
172 unsigned id = vm_id->id;
175 owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
176 if (owner == (long)vm) {
177 trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
182 /* we definately need to flush */
183 vm_id->pd_gpu_addr = ~0ll;
185 /* skip over VMID 0, since it is the system VM */
186 for (i = 1; i < adev->vm_manager.nvm; ++i) {
187 struct fence *fence = adev->vm_manager.ids[i].active;
188 struct amdgpu_ring *fring;
191 /* found a free one */
193 trace_amdgpu_vm_grab_id(i, ring->idx);
197 fring = amdgpu_ring_from_fence(fence);
198 if (best[fring->idx] == NULL ||
199 fence_is_later(best[fring->idx], fence)) {
200 best[fring->idx] = fence;
201 choices[fring == ring ? 0 : 1] = i;
205 for (i = 0; i < 2; ++i) {
209 fence = adev->vm_manager.ids[choices[i]].active;
210 vm_id->id = choices[i];
212 trace_amdgpu_vm_grab_id(choices[i], ring->idx);
213 return amdgpu_sync_fence(ring->adev, sync, fence);
217 /* should never happen */
223 * amdgpu_vm_flush - hardware flush the vm
225 * @ring: ring to use for flush
226 * @vm: vm we want to flush
227 * @updates: last vm update that we waited for
229 * Flush the vm (cayman+).
231 * Global and local mutex must be locked!
233 void amdgpu_vm_flush(struct amdgpu_ring *ring,
234 struct amdgpu_vm *vm,
235 struct fence *updates)
237 uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
238 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
239 struct fence *flushed_updates = vm_id->flushed_updates;
242 if (!flushed_updates)
247 is_later = fence_is_later(updates, flushed_updates);
249 if (pd_addr != vm_id->pd_gpu_addr || is_later) {
250 trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
252 vm_id->flushed_updates = fence_get(updates);
253 fence_put(flushed_updates);
255 vm_id->pd_gpu_addr = pd_addr;
256 amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
261 * amdgpu_vm_fence - remember fence for vm
263 * @adev: amdgpu_device pointer
264 * @vm: vm we want to fence
265 * @fence: fence to remember
267 * Fence the vm (cayman+).
268 * Set the fence used to protect page table and id.
270 * Global and local mutex must be locked!
272 void amdgpu_vm_fence(struct amdgpu_device *adev,
273 struct amdgpu_vm *vm,
276 struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
277 unsigned vm_id = vm->ids[ring->idx].id;
279 fence_put(adev->vm_manager.ids[vm_id].active);
280 adev->vm_manager.ids[vm_id].active = fence_get(fence);
281 atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
285 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
288 * @bo: requested buffer object
290 * Find @bo inside the requested vm (cayman+).
291 * Search inside the @bos vm list for the requested vm
292 * Returns the found bo_va or NULL if none is found
294 * Object has to be reserved!
296 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
297 struct amdgpu_bo *bo)
299 struct amdgpu_bo_va *bo_va;
301 list_for_each_entry(bo_va, &bo->va, bo_list) {
302 if (bo_va->vm == vm) {
310 * amdgpu_vm_update_pages - helper to call the right asic function
312 * @adev: amdgpu_device pointer
313 * @ib: indirect buffer to fill with commands
314 * @pe: addr of the page entry
315 * @addr: dst addr to write into pe
316 * @count: number of page entries to update
317 * @incr: increase next addr by incr bytes
318 * @flags: hw access flags
319 * @gtt_flags: GTT hw access flags
321 * Traces the parameters and calls the right asic functions
322 * to setup the page table using the DMA.
324 static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
325 struct amdgpu_ib *ib,
326 uint64_t pe, uint64_t addr,
327 unsigned count, uint32_t incr,
328 uint32_t flags, uint32_t gtt_flags)
330 trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
332 if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) {
333 uint64_t src = adev->gart.table_addr + (addr >> 12) * 8;
334 amdgpu_vm_copy_pte(adev, ib, pe, src, count);
336 } else if ((flags & AMDGPU_PTE_SYSTEM) || (count < 3)) {
337 amdgpu_vm_write_pte(adev, ib, pe, addr,
341 amdgpu_vm_set_pte_pde(adev, ib, pe, addr,
346 int amdgpu_vm_free_job(struct amdgpu_job *job)
349 for (i = 0; i < job->num_ibs; i++)
350 amdgpu_ib_free(job->adev, &job->ibs[i]);
356 * amdgpu_vm_clear_bo - initially clear the page dir/table
358 * @adev: amdgpu_device pointer
361 * need to reserve bo first before calling it.
363 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
364 struct amdgpu_bo *bo)
366 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
367 struct fence *fence = NULL;
368 struct amdgpu_ib *ib;
373 r = reservation_object_reserve_shared(bo->tbo.resv);
377 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
381 addr = amdgpu_bo_gpu_offset(bo);
382 entries = amdgpu_bo_size(bo) / 8;
384 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
388 r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
394 amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0);
395 amdgpu_vm_pad_ib(adev, ib);
396 WARN_ON(ib->length_dw > 64);
397 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
399 AMDGPU_FENCE_OWNER_VM,
402 amdgpu_bo_fence(bo, fence, true);
404 if (amdgpu_enable_scheduler)
408 amdgpu_ib_free(adev, ib);
416 * amdgpu_vm_map_gart - get the physical address of a gart page
418 * @adev: amdgpu_device pointer
419 * @addr: the unmapped addr
421 * Look up the physical address of the page that the pte resolves
423 * Returns the physical address of the page.
425 uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr)
429 /* page table offset */
430 result = adev->gart.pages_addr[addr >> PAGE_SHIFT];
432 /* in case cpu page size != gpu page size*/
433 result |= addr & (~PAGE_MASK);
439 * amdgpu_vm_update_pdes - make sure that page directory is valid
441 * @adev: amdgpu_device pointer
443 * @start: start of GPU address range
444 * @end: end of GPU address range
446 * Allocates new page tables if necessary
447 * and updates the page directory (cayman+).
448 * Returns 0 for success, error for failure.
450 * Global and local mutex must be locked!
452 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
453 struct amdgpu_vm *vm)
455 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
456 struct amdgpu_bo *pd = vm->page_directory;
457 uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
458 uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
459 uint64_t last_pde = ~0, last_pt = ~0;
460 unsigned count = 0, pt_idx, ndw;
461 struct amdgpu_ib *ib;
462 struct fence *fence = NULL;
469 /* assume the worst case */
470 ndw += vm->max_pde_used * 6;
472 /* update too big for an IB */
476 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
480 r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
487 /* walk over the address space and update the page directory */
488 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
489 struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
495 pt = amdgpu_bo_gpu_offset(bo);
496 if (vm->page_tables[pt_idx].addr == pt)
498 vm->page_tables[pt_idx].addr = pt;
500 pde = pd_addr + pt_idx * 8;
501 if (((last_pde + 8 * count) != pde) ||
502 ((last_pt + incr * count) != pt)) {
505 amdgpu_vm_update_pages(adev, ib, last_pde,
506 last_pt, count, incr,
507 AMDGPU_PTE_VALID, 0);
519 amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count,
520 incr, AMDGPU_PTE_VALID, 0);
522 if (ib->length_dw != 0) {
523 amdgpu_vm_pad_ib(adev, ib);
524 amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
525 WARN_ON(ib->length_dw > ndw);
526 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
528 AMDGPU_FENCE_OWNER_VM,
533 amdgpu_bo_fence(pd, fence, true);
534 fence_put(vm->page_directory_fence);
535 vm->page_directory_fence = fence_get(fence);
539 if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
540 amdgpu_ib_free(adev, ib);
547 amdgpu_ib_free(adev, ib);
553 * amdgpu_vm_frag_ptes - add fragment information to PTEs
555 * @adev: amdgpu_device pointer
556 * @ib: IB for the update
557 * @pe_start: first PTE to handle
558 * @pe_end: last PTE to handle
559 * @addr: addr those PTEs should point to
560 * @flags: hw mapping flags
561 * @gtt_flags: GTT hw mapping flags
563 * Global and local mutex must be locked!
565 static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
566 struct amdgpu_ib *ib,
567 uint64_t pe_start, uint64_t pe_end,
568 uint64_t addr, uint32_t flags,
572 * The MC L1 TLB supports variable sized pages, based on a fragment
573 * field in the PTE. When this field is set to a non-zero value, page
574 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
575 * flags are considered valid for all PTEs within the fragment range
576 * and corresponding mappings are assumed to be physically contiguous.
578 * The L1 TLB can store a single PTE for the whole fragment,
579 * significantly increasing the space available for translation
580 * caching. This leads to large improvements in throughput when the
581 * TLB is under pressure.
583 * The L2 TLB distributes small and large fragments into two
584 * asymmetric partitions. The large fragment cache is significantly
585 * larger. Thus, we try to use large fragments wherever possible.
586 * Userspace can support this by aligning virtual base address and
587 * allocation size to the fragment size.
590 /* SI and newer are optimized for 64KB */
591 uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB;
592 uint64_t frag_align = 0x80;
594 uint64_t frag_start = ALIGN(pe_start, frag_align);
595 uint64_t frag_end = pe_end & ~(frag_align - 1);
599 /* system pages are non continuously */
600 if ((flags & AMDGPU_PTE_SYSTEM) || !(flags & AMDGPU_PTE_VALID) ||
601 (frag_start >= frag_end)) {
603 count = (pe_end - pe_start) / 8;
604 amdgpu_vm_update_pages(adev, ib, pe_start, addr, count,
605 AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
609 /* handle the 4K area at the beginning */
610 if (pe_start != frag_start) {
611 count = (frag_start - pe_start) / 8;
612 amdgpu_vm_update_pages(adev, ib, pe_start, addr, count,
613 AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
614 addr += AMDGPU_GPU_PAGE_SIZE * count;
617 /* handle the area in the middle */
618 count = (frag_end - frag_start) / 8;
619 amdgpu_vm_update_pages(adev, ib, frag_start, addr, count,
620 AMDGPU_GPU_PAGE_SIZE, flags | frag_flags,
623 /* handle the 4K area at the end */
624 if (frag_end != pe_end) {
625 addr += AMDGPU_GPU_PAGE_SIZE * count;
626 count = (pe_end - frag_end) / 8;
627 amdgpu_vm_update_pages(adev, ib, frag_end, addr, count,
628 AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
633 * amdgpu_vm_update_ptes - make sure that page tables are valid
635 * @adev: amdgpu_device pointer
637 * @start: start of GPU address range
638 * @end: end of GPU address range
639 * @dst: destination address to map to
640 * @flags: mapping flags
642 * Update the page tables in the range @start - @end (cayman+).
644 * Global and local mutex must be locked!
646 static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
647 struct amdgpu_vm *vm,
648 struct amdgpu_ib *ib,
649 uint64_t start, uint64_t end,
650 uint64_t dst, uint32_t flags,
653 uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
654 uint64_t last_pte = ~0, last_dst = ~0;
655 void *owner = AMDGPU_FENCE_OWNER_VM;
659 /* sync to everything on unmapping */
660 if (!(flags & AMDGPU_PTE_VALID))
661 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
663 /* walk over the address space and update the page tables */
664 for (addr = start; addr < end; ) {
665 uint64_t pt_idx = addr >> amdgpu_vm_block_size;
666 struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj;
671 amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner);
672 r = reservation_object_reserve_shared(pt->tbo.resv);
676 if ((addr & ~mask) == (end & ~mask))
679 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
681 pte = amdgpu_bo_gpu_offset(pt);
682 pte += (addr & mask) * 8;
684 if ((last_pte + 8 * count) != pte) {
687 amdgpu_vm_frag_ptes(adev, ib, last_pte,
688 last_pte + 8 * count,
701 dst += nptes * AMDGPU_GPU_PAGE_SIZE;
705 amdgpu_vm_frag_ptes(adev, ib, last_pte,
706 last_pte + 8 * count,
707 last_dst, flags, gtt_flags);
714 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
716 * @adev: amdgpu_device pointer
718 * @mapping: mapped range and flags to use for the update
719 * @addr: addr to set the area to
720 * @gtt_flags: flags as they are used for GTT
721 * @fence: optional resulting fence
723 * Fill in the page table entries for @mapping.
724 * Returns 0 for success, -EINVAL for failure.
726 * Object have to be reserved and mutex must be locked!
728 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
729 struct amdgpu_vm *vm,
730 struct amdgpu_bo_va_mapping *mapping,
731 uint64_t addr, uint32_t gtt_flags,
732 struct fence **fence)
734 struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
735 unsigned nptes, ncmds, ndw;
736 uint32_t flags = gtt_flags;
737 struct amdgpu_ib *ib;
738 struct fence *f = NULL;
741 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
742 * but in case of something, we filter the flags in first place
744 if (!(mapping->flags & AMDGPU_PTE_READABLE))
745 flags &= ~AMDGPU_PTE_READABLE;
746 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
747 flags &= ~AMDGPU_PTE_WRITEABLE;
749 trace_amdgpu_vm_bo_update(mapping);
751 nptes = mapping->it.last - mapping->it.start + 1;
754 * reserve space for one command every (1 << BLOCK_SIZE)
755 * entries or 2k dwords (whatever is smaller)
757 ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
762 if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) {
763 /* only copy commands needed */
766 } else if (flags & AMDGPU_PTE_SYSTEM) {
767 /* header for write data commands */
770 /* body of write data command */
774 /* set page commands needed */
777 /* two extra commands for begin/end of fragment */
781 /* update too big for an IB */
785 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
789 r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
797 r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start,
798 mapping->it.last + 1, addr + mapping->offset,
802 amdgpu_ib_free(adev, ib);
807 amdgpu_vm_pad_ib(adev, ib);
808 WARN_ON(ib->length_dw > ndw);
809 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
811 AMDGPU_FENCE_OWNER_VM,
816 amdgpu_bo_fence(vm->page_directory, f, true);
819 *fence = fence_get(f);
822 if (!amdgpu_enable_scheduler) {
823 amdgpu_ib_free(adev, ib);
829 amdgpu_ib_free(adev, ib);
835 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
837 * @adev: amdgpu_device pointer
838 * @bo_va: requested BO and VM object
841 * Fill in the page table entries for @bo_va.
842 * Returns 0 for success, -EINVAL for failure.
844 * Object have to be reserved and mutex must be locked!
846 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
847 struct amdgpu_bo_va *bo_va,
848 struct ttm_mem_reg *mem)
850 struct amdgpu_vm *vm = bo_va->vm;
851 struct amdgpu_bo_va_mapping *mapping;
857 addr = (u64)mem->start << PAGE_SHIFT;
858 if (mem->mem_type != TTM_PL_TT)
859 addr += adev->vm_manager.vram_base_offset;
864 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
866 spin_lock(&vm->status_lock);
867 if (!list_empty(&bo_va->vm_status))
868 list_splice_init(&bo_va->valids, &bo_va->invalids);
869 spin_unlock(&vm->status_lock);
871 list_for_each_entry(mapping, &bo_va->invalids, list) {
872 r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr,
873 flags, &bo_va->last_pt_update);
878 if (trace_amdgpu_vm_bo_mapping_enabled()) {
879 list_for_each_entry(mapping, &bo_va->valids, list)
880 trace_amdgpu_vm_bo_mapping(mapping);
882 list_for_each_entry(mapping, &bo_va->invalids, list)
883 trace_amdgpu_vm_bo_mapping(mapping);
886 spin_lock(&vm->status_lock);
887 list_splice_init(&bo_va->invalids, &bo_va->valids);
888 list_del_init(&bo_va->vm_status);
890 list_add(&bo_va->vm_status, &vm->cleared);
891 spin_unlock(&vm->status_lock);
897 * amdgpu_vm_clear_freed - clear freed BOs in the PT
899 * @adev: amdgpu_device pointer
902 * Make sure all freed BOs are cleared in the PT.
903 * Returns 0 for success.
905 * PTs have to be reserved and mutex must be locked!
907 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
908 struct amdgpu_vm *vm)
910 struct amdgpu_bo_va_mapping *mapping;
913 spin_lock(&vm->freed_lock);
914 while (!list_empty(&vm->freed)) {
915 mapping = list_first_entry(&vm->freed,
916 struct amdgpu_bo_va_mapping, list);
917 list_del(&mapping->list);
918 spin_unlock(&vm->freed_lock);
919 r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
924 spin_lock(&vm->freed_lock);
926 spin_unlock(&vm->freed_lock);
933 * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
935 * @adev: amdgpu_device pointer
938 * Make sure all invalidated BOs are cleared in the PT.
939 * Returns 0 for success.
941 * PTs have to be reserved and mutex must be locked!
943 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
944 struct amdgpu_vm *vm, struct amdgpu_sync *sync)
946 struct amdgpu_bo_va *bo_va = NULL;
949 spin_lock(&vm->status_lock);
950 while (!list_empty(&vm->invalidated)) {
951 bo_va = list_first_entry(&vm->invalidated,
952 struct amdgpu_bo_va, vm_status);
953 spin_unlock(&vm->status_lock);
954 mutex_lock(&bo_va->mutex);
955 r = amdgpu_vm_bo_update(adev, bo_va, NULL);
956 mutex_unlock(&bo_va->mutex);
960 spin_lock(&vm->status_lock);
962 spin_unlock(&vm->status_lock);
965 r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
971 * amdgpu_vm_bo_add - add a bo to a specific vm
973 * @adev: amdgpu_device pointer
975 * @bo: amdgpu buffer object
977 * Add @bo into the requested vm (cayman+).
978 * Add @bo to the list of bos associated with the vm
979 * Returns newly added bo_va or NULL for failure
981 * Object has to be reserved!
983 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
984 struct amdgpu_vm *vm,
985 struct amdgpu_bo *bo)
987 struct amdgpu_bo_va *bo_va;
989 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
995 bo_va->ref_count = 1;
996 INIT_LIST_HEAD(&bo_va->bo_list);
997 INIT_LIST_HEAD(&bo_va->valids);
998 INIT_LIST_HEAD(&bo_va->invalids);
999 INIT_LIST_HEAD(&bo_va->vm_status);
1000 mutex_init(&bo_va->mutex);
1001 list_add_tail(&bo_va->bo_list, &bo->va);
1007 * amdgpu_vm_bo_map - map bo inside a vm
1009 * @adev: amdgpu_device pointer
1010 * @bo_va: bo_va to store the address
1011 * @saddr: where to map the BO
1012 * @offset: requested offset in the BO
1013 * @flags: attributes of pages (read/write/valid/etc.)
1015 * Add a mapping of the BO at the specefied addr into the VM.
1016 * Returns 0 for success, error for failure.
1018 * Object has to be reserved and unreserved outside!
1020 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1021 struct amdgpu_bo_va *bo_va,
1022 uint64_t saddr, uint64_t offset,
1023 uint64_t size, uint32_t flags)
1025 struct amdgpu_bo_va_mapping *mapping;
1026 struct amdgpu_vm *vm = bo_va->vm;
1027 struct interval_tree_node *it;
1028 unsigned last_pfn, pt_idx;
1032 /* validate the parameters */
1033 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1034 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1037 /* make sure object fit at this offset */
1038 eaddr = saddr + size - 1;
1039 if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
1042 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
1043 if (last_pfn >= adev->vm_manager.max_pfn) {
1044 dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
1045 last_pfn, adev->vm_manager.max_pfn);
1049 saddr /= AMDGPU_GPU_PAGE_SIZE;
1050 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1052 spin_lock(&vm->it_lock);
1053 it = interval_tree_iter_first(&vm->va, saddr, eaddr);
1054 spin_unlock(&vm->it_lock);
1056 struct amdgpu_bo_va_mapping *tmp;
1057 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1058 /* bo and tmp overlap, invalid addr */
1059 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1060 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1061 tmp->it.start, tmp->it.last + 1);
1066 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1072 INIT_LIST_HEAD(&mapping->list);
1073 mapping->it.start = saddr;
1074 mapping->it.last = eaddr;
1075 mapping->offset = offset;
1076 mapping->flags = flags;
1078 mutex_lock(&bo_va->mutex);
1079 list_add(&mapping->list, &bo_va->invalids);
1080 mutex_unlock(&bo_va->mutex);
1081 spin_lock(&vm->it_lock);
1082 interval_tree_insert(&mapping->it, &vm->va);
1083 spin_unlock(&vm->it_lock);
1084 trace_amdgpu_vm_bo_map(bo_va, mapping);
1086 /* Make sure the page tables are allocated */
1087 saddr >>= amdgpu_vm_block_size;
1088 eaddr >>= amdgpu_vm_block_size;
1090 BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
1092 if (eaddr > vm->max_pde_used)
1093 vm->max_pde_used = eaddr;
1095 /* walk over the address space and allocate the page tables */
1096 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1097 struct reservation_object *resv = vm->page_directory->tbo.resv;
1098 struct amdgpu_bo_list_entry *entry;
1099 struct amdgpu_bo *pt;
1101 entry = &vm->page_tables[pt_idx].entry;
1105 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1106 AMDGPU_GPU_PAGE_SIZE, true,
1107 AMDGPU_GEM_DOMAIN_VRAM,
1108 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1113 /* Keep a reference to the page table to avoid freeing
1114 * them up in the wrong order.
1116 pt->parent = amdgpu_bo_ref(vm->page_directory);
1118 r = amdgpu_vm_clear_bo(adev, pt);
1120 amdgpu_bo_unref(&pt);
1125 entry->priority = 0;
1126 entry->tv.bo = &entry->robj->tbo;
1127 entry->tv.shared = true;
1128 vm->page_tables[pt_idx].addr = 0;
1134 list_del(&mapping->list);
1135 spin_lock(&vm->it_lock);
1136 interval_tree_remove(&mapping->it, &vm->va);
1137 spin_unlock(&vm->it_lock);
1138 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1146 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1148 * @adev: amdgpu_device pointer
1149 * @bo_va: bo_va to remove the address from
1150 * @saddr: where to the BO is mapped
1152 * Remove a mapping of the BO at the specefied addr from the VM.
1153 * Returns 0 for success, error for failure.
1155 * Object has to be reserved and unreserved outside!
1157 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1158 struct amdgpu_bo_va *bo_va,
1161 struct amdgpu_bo_va_mapping *mapping;
1162 struct amdgpu_vm *vm = bo_va->vm;
1165 saddr /= AMDGPU_GPU_PAGE_SIZE;
1166 mutex_lock(&bo_va->mutex);
1167 list_for_each_entry(mapping, &bo_va->valids, list) {
1168 if (mapping->it.start == saddr)
1172 if (&mapping->list == &bo_va->valids) {
1175 list_for_each_entry(mapping, &bo_va->invalids, list) {
1176 if (mapping->it.start == saddr)
1180 if (&mapping->list == &bo_va->invalids) {
1181 mutex_unlock(&bo_va->mutex);
1185 mutex_unlock(&bo_va->mutex);
1186 list_del(&mapping->list);
1187 spin_lock(&vm->it_lock);
1188 interval_tree_remove(&mapping->it, &vm->va);
1189 spin_unlock(&vm->it_lock);
1190 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1193 spin_lock(&vm->freed_lock);
1194 list_add(&mapping->list, &vm->freed);
1195 spin_unlock(&vm->freed_lock);
1204 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
1206 * @adev: amdgpu_device pointer
1207 * @bo_va: requested bo_va
1209 * Remove @bo_va->bo from the requested vm (cayman+).
1211 * Object have to be reserved!
1213 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1214 struct amdgpu_bo_va *bo_va)
1216 struct amdgpu_bo_va_mapping *mapping, *next;
1217 struct amdgpu_vm *vm = bo_va->vm;
1219 list_del(&bo_va->bo_list);
1221 spin_lock(&vm->status_lock);
1222 list_del(&bo_va->vm_status);
1223 spin_unlock(&vm->status_lock);
1225 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1226 list_del(&mapping->list);
1227 spin_lock(&vm->it_lock);
1228 interval_tree_remove(&mapping->it, &vm->va);
1229 spin_unlock(&vm->it_lock);
1230 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1231 spin_lock(&vm->freed_lock);
1232 list_add(&mapping->list, &vm->freed);
1233 spin_unlock(&vm->freed_lock);
1235 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1236 list_del(&mapping->list);
1237 spin_lock(&vm->it_lock);
1238 interval_tree_remove(&mapping->it, &vm->va);
1239 spin_unlock(&vm->it_lock);
1242 fence_put(bo_va->last_pt_update);
1243 mutex_destroy(&bo_va->mutex);
1248 * amdgpu_vm_bo_invalidate - mark the bo as invalid
1250 * @adev: amdgpu_device pointer
1252 * @bo: amdgpu buffer object
1254 * Mark @bo as invalid (cayman+).
1256 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1257 struct amdgpu_bo *bo)
1259 struct amdgpu_bo_va *bo_va;
1261 list_for_each_entry(bo_va, &bo->va, bo_list) {
1262 spin_lock(&bo_va->vm->status_lock);
1263 if (list_empty(&bo_va->vm_status))
1264 list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1265 spin_unlock(&bo_va->vm->status_lock);
1270 * amdgpu_vm_init - initialize a vm instance
1272 * @adev: amdgpu_device pointer
1275 * Init @vm fields (cayman+).
1277 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1279 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
1280 AMDGPU_VM_PTE_COUNT * 8);
1281 unsigned pd_size, pd_entries;
1284 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1286 vm->ids[i].flushed_updates = NULL;
1289 spin_lock_init(&vm->status_lock);
1290 INIT_LIST_HEAD(&vm->invalidated);
1291 INIT_LIST_HEAD(&vm->cleared);
1292 INIT_LIST_HEAD(&vm->freed);
1293 spin_lock_init(&vm->it_lock);
1294 spin_lock_init(&vm->freed_lock);
1295 pd_size = amdgpu_vm_directory_size(adev);
1296 pd_entries = amdgpu_vm_num_pdes(adev);
1298 /* allocate page table array */
1299 vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
1300 if (vm->page_tables == NULL) {
1301 DRM_ERROR("Cannot allocate memory for page table array\n");
1305 vm->page_directory_fence = NULL;
1307 r = amdgpu_bo_create(adev, pd_size, align, true,
1308 AMDGPU_GEM_DOMAIN_VRAM,
1309 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1310 NULL, NULL, &vm->page_directory);
1313 r = amdgpu_bo_reserve(vm->page_directory, false);
1315 amdgpu_bo_unref(&vm->page_directory);
1316 vm->page_directory = NULL;
1319 r = amdgpu_vm_clear_bo(adev, vm->page_directory);
1320 amdgpu_bo_unreserve(vm->page_directory);
1322 amdgpu_bo_unref(&vm->page_directory);
1323 vm->page_directory = NULL;
1331 * amdgpu_vm_fini - tear down a vm instance
1333 * @adev: amdgpu_device pointer
1336 * Tear down @vm (cayman+).
1337 * Unbind the VM and remove all bos from the vm bo list
1339 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1341 struct amdgpu_bo_va_mapping *mapping, *tmp;
1344 if (!RB_EMPTY_ROOT(&vm->va)) {
1345 dev_err(adev->dev, "still active bo inside vm\n");
1347 rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
1348 list_del(&mapping->list);
1349 interval_tree_remove(&mapping->it, &vm->va);
1352 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
1353 list_del(&mapping->list);
1357 for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
1358 amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
1359 drm_free_large(vm->page_tables);
1361 amdgpu_bo_unref(&vm->page_directory);
1362 fence_put(vm->page_directory_fence);
1363 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1364 unsigned id = vm->ids[i].id;
1366 atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
1368 fence_put(vm->ids[i].flushed_updates);
1374 * amdgpu_vm_manager_fini - cleanup VM manager
1376 * @adev: amdgpu_device pointer
1378 * Cleanup the VM manager and free resources.
1380 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1384 for (i = 0; i < AMDGPU_NUM_VM; ++i)
1385 fence_put(adev->vm_manager.ids[i].active);