1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2022 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <drm/drm_drv.h>
27 #include "amdgpu_trace.h"
28 #include "amdgpu_vm.h"
31 * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
33 struct amdgpu_vm_pt_cursor {
35 struct amdgpu_vm_bo_base *parent;
36 struct amdgpu_vm_bo_base *entry;
41 * amdgpu_vm_pt_level_shift - return the addr shift for each level
43 * @adev: amdgpu_device pointer
47 * The number of bits the pfn needs to be right shifted for a level.
49 static unsigned int amdgpu_vm_pt_level_shift(struct amdgpu_device *adev,
56 return 9 * (AMDGPU_VM_PDB0 - level) +
57 adev->vm_manager.block_size;
66 * amdgpu_vm_pt_num_entries - return the number of entries in a PD/PT
68 * @adev: amdgpu_device pointer
72 * The number of entries in a page directory or page table.
74 static unsigned int amdgpu_vm_pt_num_entries(struct amdgpu_device *adev,
79 shift = amdgpu_vm_pt_level_shift(adev, adev->vm_manager.root_level);
80 if (level == adev->vm_manager.root_level)
81 /* For the root directory */
82 return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
84 else if (level != AMDGPU_VM_PTB)
85 /* Everything in between */
88 /* For the page tables on the leaves */
89 return AMDGPU_VM_PTE_COUNT(adev);
93 * amdgpu_vm_pt_entries_mask - the mask to get the entry number of a PD/PT
95 * @adev: amdgpu_device pointer
99 * The mask to extract the entry number of a PD/PT from an address.
101 static uint32_t amdgpu_vm_pt_entries_mask(struct amdgpu_device *adev,
104 if (level <= adev->vm_manager.root_level)
106 else if (level != AMDGPU_VM_PTB)
109 return AMDGPU_VM_PTE_COUNT(adev) - 1;
113 * amdgpu_vm_pt_size - returns the size of the page table in bytes
115 * @adev: amdgpu_device pointer
119 * The size of the BO for a page directory or page table in bytes.
121 static unsigned int amdgpu_vm_pt_size(struct amdgpu_device *adev,
124 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_pt_num_entries(adev, level) * 8);
128 * amdgpu_vm_pt_parent - get the parent page directory
130 * @pt: child page table
132 * Helper to get the parent entry for the child page table. NULL if we are at
133 * the root page directory.
135 static struct amdgpu_vm_bo_base *
136 amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt)
138 struct amdgpu_bo *parent = pt->bo->parent;
143 return parent->vm_bo;
147 * amdgpu_vm_pt_start - start PD/PT walk
149 * @adev: amdgpu_device pointer
150 * @vm: amdgpu_vm structure
151 * @start: start address of the walk
152 * @cursor: state to initialize
154 * Initialize a amdgpu_vm_pt_cursor to start a walk.
156 static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
157 struct amdgpu_vm *vm, uint64_t start,
158 struct amdgpu_vm_pt_cursor *cursor)
161 cursor->parent = NULL;
162 cursor->entry = &vm->root;
163 cursor->level = adev->vm_manager.root_level;
167 * amdgpu_vm_pt_descendant - go to child node
169 * @adev: amdgpu_device pointer
170 * @cursor: current state
172 * Walk to the child node of the current node.
174 * True if the walk was possible, false otherwise.
176 static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
177 struct amdgpu_vm_pt_cursor *cursor)
179 unsigned int mask, shift, idx;
181 if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry ||
185 mask = amdgpu_vm_pt_entries_mask(adev, cursor->level);
186 shift = amdgpu_vm_pt_level_shift(adev, cursor->level);
189 idx = (cursor->pfn >> shift) & mask;
190 cursor->parent = cursor->entry;
191 cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx];
196 * amdgpu_vm_pt_sibling - go to sibling node
198 * @adev: amdgpu_device pointer
199 * @cursor: current state
201 * Walk to the sibling node of the current node.
203 * True if the walk was possible, false otherwise.
205 static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
206 struct amdgpu_vm_pt_cursor *cursor)
209 unsigned int shift, num_entries;
210 struct amdgpu_bo_vm *parent;
212 /* Root doesn't have a sibling */
216 /* Go to our parents and see if we got a sibling */
217 shift = amdgpu_vm_pt_level_shift(adev, cursor->level - 1);
218 num_entries = amdgpu_vm_pt_num_entries(adev, cursor->level - 1);
219 parent = to_amdgpu_bo_vm(cursor->parent->bo);
221 if (cursor->entry == &parent->entries[num_entries - 1])
224 cursor->pfn += 1ULL << shift;
225 cursor->pfn &= ~((1ULL << shift) - 1);
231 * amdgpu_vm_pt_ancestor - go to parent node
233 * @cursor: current state
235 * Walk to the parent node of the current node.
237 * True if the walk was possible, false otherwise.
239 static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
245 cursor->entry = cursor->parent;
246 cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
251 * amdgpu_vm_pt_next - get next PD/PT in hieratchy
253 * @adev: amdgpu_device pointer
254 * @cursor: current state
256 * Walk the PD/PT tree to the next node.
258 static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
259 struct amdgpu_vm_pt_cursor *cursor)
261 /* First try a newborn child */
262 if (amdgpu_vm_pt_descendant(adev, cursor))
265 /* If that didn't worked try to find a sibling */
266 while (!amdgpu_vm_pt_sibling(adev, cursor)) {
267 /* No sibling, go to our parents and grandparents */
268 if (!amdgpu_vm_pt_ancestor(cursor)) {
276 * amdgpu_vm_pt_first_dfs - start a deep first search
278 * @adev: amdgpu_device structure
279 * @vm: amdgpu_vm structure
280 * @start: optional cursor to start with
281 * @cursor: state to initialize
283 * Starts a deep first traversal of the PD/PT tree.
285 static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
286 struct amdgpu_vm *vm,
287 struct amdgpu_vm_pt_cursor *start,
288 struct amdgpu_vm_pt_cursor *cursor)
293 amdgpu_vm_pt_start(adev, vm, 0, cursor);
295 while (amdgpu_vm_pt_descendant(adev, cursor))
300 * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
302 * @start: starting point for the search
303 * @entry: current entry
306 * True when the search should continue, false otherwise.
308 static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
309 struct amdgpu_vm_bo_base *entry)
311 return entry && (!start || entry != start->entry);
315 * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
317 * @adev: amdgpu_device structure
318 * @cursor: current state
320 * Move the cursor to the next node in a deep first search.
322 static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
323 struct amdgpu_vm_pt_cursor *cursor)
329 cursor->entry = NULL;
330 else if (amdgpu_vm_pt_sibling(adev, cursor))
331 while (amdgpu_vm_pt_descendant(adev, cursor))
334 amdgpu_vm_pt_ancestor(cursor);
338 * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
340 #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
341 for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
342 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
343 amdgpu_vm_pt_continue_dfs((start), (entry)); \
344 (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
347 * amdgpu_vm_pt_clear - initially clear the PDs/PTs
349 * @adev: amdgpu_device pointer
350 * @vm: VM to clear BO from
352 * @immediate: use an immediate update
354 * Root PD needs to be reserved when calling this.
357 * 0 on success, errno otherwise.
359 int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
360 struct amdgpu_bo_vm *vmbo, bool immediate)
362 unsigned int level = adev->vm_manager.root_level;
363 struct ttm_operation_ctx ctx = { true, false };
364 struct amdgpu_vm_update_params params;
365 struct amdgpu_bo *ancestor = &vmbo->bo;
366 unsigned int entries;
367 struct amdgpu_bo *bo = &vmbo->bo;
371 /* Figure out our place in the hierarchy */
372 if (ancestor->parent) {
374 while (ancestor->parent->parent) {
376 ancestor = ancestor->parent;
380 entries = amdgpu_bo_size(bo) / 8;
382 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
387 struct amdgpu_bo *shadow = vmbo->shadow;
389 r = ttm_bo_validate(&shadow->tbo, &shadow->placement, &ctx);
394 if (!drm_dev_enter(adev_to_drm(adev), &idx))
397 r = vm->update_funcs->map_table(vmbo);
401 memset(¶ms, 0, sizeof(params));
404 params.immediate = immediate;
406 r = vm->update_funcs->prepare(¶ms, NULL);
412 uint64_t value = 0, flags = 0;
413 if (adev->asic_type >= CHIP_VEGA10) {
414 if (level != AMDGPU_VM_PTB) {
415 /* Handle leaf PDEs as PTEs */
416 flags |= AMDGPU_PDE_PTE_FLAG(adev);
417 amdgpu_gmc_get_vm_pde(adev, level,
420 /* Workaround for fault priority problem on GMC9 */
421 flags = AMDGPU_PTE_EXECUTABLE;
425 r = vm->update_funcs->update(¶ms, vmbo, addr, 0, entries,
430 r = vm->update_funcs->commit(¶ms, NULL);
437 * amdgpu_vm_pt_create - create bo for PD/PT
439 * @adev: amdgpu_device pointer
441 * @level: the page table level
442 * @immediate: use a immediate update
443 * @vmbo: pointer to the buffer object pointer
444 * @xcp_id: GPU partition id
446 int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
447 int level, bool immediate, struct amdgpu_bo_vm **vmbo,
450 struct amdgpu_bo_param bp;
451 struct amdgpu_bo *bo;
452 struct dma_resv *resv;
453 unsigned int num_entries;
456 memset(&bp, 0, sizeof(bp));
458 bp.size = amdgpu_vm_pt_size(adev, level);
459 bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
461 if (!adev->gmc.is_app_apu)
462 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
464 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
466 bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain);
467 bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
468 AMDGPU_GEM_CREATE_CPU_GTT_USWC;
470 if (level < AMDGPU_VM_PTB)
471 num_entries = amdgpu_vm_pt_num_entries(adev, level);
475 bp.bo_ptr_size = struct_size((*vmbo), entries, num_entries);
477 if (vm->use_cpu_for_update)
478 bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
480 bp.type = ttm_bo_type_kernel;
481 bp.no_wait_gpu = immediate;
482 bp.xcp_id_plus1 = xcp_id + 1;
485 bp.resv = vm->root.bo->tbo.base.resv;
487 r = amdgpu_bo_create_vm(adev, &bp, vmbo);
492 if (vm->is_compute_context || (adev->flags & AMD_IS_APU)) {
493 (*vmbo)->shadow = NULL;
498 WARN_ON(dma_resv_lock(bo->tbo.base.resv,
501 memset(&bp, 0, sizeof(bp));
502 bp.size = amdgpu_vm_pt_size(adev, level);
503 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
504 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
505 bp.type = ttm_bo_type_kernel;
506 bp.resv = bo->tbo.base.resv;
507 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
508 bp.xcp_id_plus1 = xcp_id + 1;
510 r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
513 dma_resv_unlock(bo->tbo.base.resv);
516 amdgpu_bo_unref(&bo);
520 amdgpu_bo_add_to_shadow_list(*vmbo);
526 * amdgpu_vm_pt_alloc - Allocate a specific page table
528 * @adev: amdgpu_device pointer
529 * @vm: VM to allocate page tables for
530 * @cursor: Which page table to allocate
531 * @immediate: use an immediate update
533 * Make sure a specific page table or directory is allocated.
536 * 1 if page table needed to be allocated, 0 if page table was already
537 * allocated, negative errno if an error occurred.
539 static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
540 struct amdgpu_vm *vm,
541 struct amdgpu_vm_pt_cursor *cursor,
544 struct amdgpu_vm_bo_base *entry = cursor->entry;
545 struct amdgpu_bo *pt_bo;
546 struct amdgpu_bo_vm *pt;
552 amdgpu_vm_eviction_unlock(vm);
553 r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt,
554 vm->root.bo->xcp_id);
555 amdgpu_vm_eviction_lock(vm);
559 /* Keep a reference to the root directory to avoid
560 * freeing them up in the wrong order.
563 pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo);
564 amdgpu_vm_bo_base_init(entry, vm, pt_bo);
565 r = amdgpu_vm_pt_clear(adev, vm, pt, immediate);
572 amdgpu_bo_unref(&pt->shadow);
573 amdgpu_bo_unref(&pt_bo);
578 * amdgpu_vm_pt_free - free one PD/PT
580 * @entry: PDE to free
582 static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
584 struct amdgpu_bo *shadow;
589 entry->bo->vm_bo = NULL;
590 shadow = amdgpu_bo_shadowed(entry->bo);
592 ttm_bo_set_bulk_move(&shadow->tbo, NULL);
593 amdgpu_bo_unref(&shadow);
595 ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
597 spin_lock(&entry->vm->status_lock);
598 list_del(&entry->vm_status);
599 spin_unlock(&entry->vm->status_lock);
600 amdgpu_bo_unref(&entry->bo);
603 void amdgpu_vm_pt_free_work(struct work_struct *work)
605 struct amdgpu_vm_bo_base *entry, *next;
606 struct amdgpu_vm *vm;
609 vm = container_of(work, struct amdgpu_vm, pt_free_work);
611 spin_lock(&vm->status_lock);
612 list_splice_init(&vm->pt_freed, &pt_freed);
613 spin_unlock(&vm->status_lock);
615 /* flush_work in amdgpu_vm_fini ensure vm->root.bo is valid. */
616 amdgpu_bo_reserve(vm->root.bo, true);
618 list_for_each_entry_safe(entry, next, &pt_freed, vm_status)
619 amdgpu_vm_pt_free(entry);
621 amdgpu_bo_unreserve(vm->root.bo);
625 * amdgpu_vm_pt_free_list - free PD/PT levels
627 * @adev: amdgpu device structure
628 * @params: see amdgpu_vm_update_params definition
630 * Free the page directory objects saved in the flush list
632 void amdgpu_vm_pt_free_list(struct amdgpu_device *adev,
633 struct amdgpu_vm_update_params *params)
635 struct amdgpu_vm_bo_base *entry, *next;
636 struct amdgpu_vm *vm = params->vm;
637 bool unlocked = params->unlocked;
639 if (list_empty(¶ms->tlb_flush_waitlist))
643 spin_lock(&vm->status_lock);
644 list_splice_init(¶ms->tlb_flush_waitlist, &vm->pt_freed);
645 spin_unlock(&vm->status_lock);
646 schedule_work(&vm->pt_free_work);
650 list_for_each_entry_safe(entry, next, ¶ms->tlb_flush_waitlist, vm_status)
651 amdgpu_vm_pt_free(entry);
655 * amdgpu_vm_pt_add_list - add PD/PT level to the flush list
657 * @params: parameters for the update
658 * @cursor: first PT entry to start DF search from, non NULL
660 * This list will be freed after TLB flush.
662 static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
663 struct amdgpu_vm_pt_cursor *cursor)
665 struct amdgpu_vm_pt_cursor seek;
666 struct amdgpu_vm_bo_base *entry;
668 spin_lock(¶ms->vm->status_lock);
669 for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) {
670 if (entry && entry->bo)
671 list_move(&entry->vm_status, ¶ms->tlb_flush_waitlist);
674 /* enter start node now */
675 list_move(&cursor->entry->vm_status, ¶ms->tlb_flush_waitlist);
676 spin_unlock(¶ms->vm->status_lock);
680 * amdgpu_vm_pt_free_root - free root PD
681 * @adev: amdgpu device structure
682 * @vm: amdgpu vm structure
684 * Free the root page directory and everything below it.
686 void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm)
688 struct amdgpu_vm_pt_cursor cursor;
689 struct amdgpu_vm_bo_base *entry;
691 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
693 amdgpu_vm_pt_free(entry);
698 * amdgpu_vm_pde_update - update a single level in the hierarchy
700 * @params: parameters for the update
701 * @entry: entry to update
703 * Makes sure the requested entry in parent is up to date.
705 int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
706 struct amdgpu_vm_bo_base *entry)
708 struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
709 struct amdgpu_bo *bo, *pbo;
710 struct amdgpu_vm *vm = params->vm;
711 uint64_t pde, pt, flags;
714 if (WARN_ON(!parent))
718 for (level = 0, pbo = bo->parent; pbo; ++level)
721 level += params->adev->vm_manager.root_level;
722 amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags);
723 pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8;
724 return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
729 * amdgpu_vm_pte_update_noretry_flags - Update PTE no-retry flags
731 * @adev: amdgpu_device pointer
732 * @flags: pointer to PTE flags
734 * Update PTE no-retry flags when TF is enabled.
736 static void amdgpu_vm_pte_update_noretry_flags(struct amdgpu_device *adev,
740 * Update no-retry flags with the corresponding TF
741 * no-retry combination.
743 if ((*flags & AMDGPU_VM_NORETRY_FLAGS) == AMDGPU_VM_NORETRY_FLAGS) {
744 *flags &= ~AMDGPU_VM_NORETRY_FLAGS;
745 *flags |= adev->gmc.noretry_flags;
750 * amdgpu_vm_pte_update_flags - figure out flags for PTE updates
752 * Make sure to set the right flags for the PTEs at the desired level.
754 static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params,
755 struct amdgpu_bo_vm *pt,
757 uint64_t pe, uint64_t addr,
758 unsigned int count, uint32_t incr,
761 struct amdgpu_device *adev = params->adev;
763 if (level != AMDGPU_VM_PTB) {
764 flags |= AMDGPU_PDE_PTE_FLAG(params->adev);
765 amdgpu_gmc_get_vm_pde(adev, level, &addr, &flags);
767 } else if (adev->asic_type >= CHIP_VEGA10 &&
768 !(flags & AMDGPU_PTE_VALID) &&
769 !(flags & AMDGPU_PTE_PRT_FLAG(params->adev))) {
771 /* Workaround for fault priority problem on GMC9 */
772 flags |= AMDGPU_PTE_EXECUTABLE;
776 * Update no-retry flags to use the no-retry flag combination
777 * with TF enabled. The AMDGPU_VM_NORETRY_FLAGS flag combination
778 * does not work when TF is enabled. So, replace them with
779 * AMDGPU_VM_NORETRY_FLAGS_TF flag combination which works for
782 if (level == AMDGPU_VM_PTB)
783 amdgpu_vm_pte_update_noretry_flags(adev, &flags);
785 /* APUs mapping system memory may need different MTYPEs on different
786 * NUMA nodes. Only do this for contiguous ranges that can be assumed
787 * to be on the same NUMA node.
789 if ((flags & AMDGPU_PTE_SYSTEM) && (adev->flags & AMD_IS_APU) &&
790 adev->gmc.gmc_funcs->override_vm_pte_flags &&
791 num_possible_nodes() > 1 && !params->pages_addr && params->allow_override)
792 amdgpu_gmc_override_vm_pte_flags(adev, params->vm, addr, &flags);
794 params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
799 * amdgpu_vm_pte_fragment - get fragment for PTEs
801 * @params: see amdgpu_vm_update_params definition
802 * @start: first PTE to handle
803 * @end: last PTE to handle
804 * @flags: hw mapping flags
805 * @frag: resulting fragment size
806 * @frag_end: end of this fragment
808 * Returns the first possible fragment for the start and end address.
810 static void amdgpu_vm_pte_fragment(struct amdgpu_vm_update_params *params,
811 uint64_t start, uint64_t end, uint64_t flags,
812 unsigned int *frag, uint64_t *frag_end)
815 * The MC L1 TLB supports variable sized pages, based on a fragment
816 * field in the PTE. When this field is set to a non-zero value, page
817 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
818 * flags are considered valid for all PTEs within the fragment range
819 * and corresponding mappings are assumed to be physically contiguous.
821 * The L1 TLB can store a single PTE for the whole fragment,
822 * significantly increasing the space available for translation
823 * caching. This leads to large improvements in throughput when the
824 * TLB is under pressure.
826 * The L2 TLB distributes small and large fragments into two
827 * asymmetric partitions. The large fragment cache is significantly
828 * larger. Thus, we try to use large fragments wherever possible.
829 * Userspace can support this by aligning virtual base address and
830 * allocation size to the fragment size.
832 * Starting with Vega10 the fragment size only controls the L1. The L2
833 * is now directly feed with small/huge/giant pages from the walker.
835 unsigned int max_frag;
837 if (params->adev->asic_type < CHIP_VEGA10)
838 max_frag = params->adev->vm_manager.fragment_size;
842 /* system pages are non continuously */
843 if (params->pages_addr) {
849 /* This intentionally wraps around if no bit is set */
850 *frag = min_t(unsigned int, ffs(start) - 1, fls64(end - start) - 1);
851 if (*frag >= max_frag) {
853 *frag_end = end & ~((1ULL << max_frag) - 1);
855 *frag_end = start + (1 << *frag);
860 * amdgpu_vm_ptes_update - make sure that page tables are valid
862 * @params: see amdgpu_vm_update_params definition
863 * @start: start of GPU address range
864 * @end: end of GPU address range
865 * @dst: destination address to map to, the next dst inside the function
866 * @flags: mapping flags
868 * Update the page tables in the range @start - @end.
871 * 0 for success, -EINVAL for failure.
873 int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
874 uint64_t start, uint64_t end,
875 uint64_t dst, uint64_t flags)
877 struct amdgpu_device *adev = params->adev;
878 struct amdgpu_vm_pt_cursor cursor;
879 uint64_t frag_start = start, frag_end;
883 /* figure out the initial fragment */
884 amdgpu_vm_pte_fragment(params, frag_start, end, flags, &frag,
887 /* walk over the address space and update the PTs */
888 amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
889 while (cursor.pfn < end) {
890 unsigned int shift, parent_shift, mask;
891 uint64_t incr, entry_end, pe_start;
892 struct amdgpu_bo *pt;
894 if (!params->unlocked) {
895 /* make sure that the page tables covering the
896 * address range are actually allocated
898 r = amdgpu_vm_pt_alloc(params->adev, params->vm,
899 &cursor, params->immediate);
904 shift = amdgpu_vm_pt_level_shift(adev, cursor.level);
905 parent_shift = amdgpu_vm_pt_level_shift(adev, cursor.level - 1);
906 if (params->unlocked) {
907 /* Unlocked updates are only allowed on the leaves */
908 if (amdgpu_vm_pt_descendant(adev, &cursor))
910 } else if (adev->asic_type < CHIP_VEGA10 &&
911 (flags & AMDGPU_PTE_VALID)) {
912 /* No huge page support before GMC v9 */
913 if (cursor.level != AMDGPU_VM_PTB) {
914 if (!amdgpu_vm_pt_descendant(adev, &cursor))
918 } else if (frag < shift) {
919 /* We can't use this level when the fragment size is
920 * smaller than the address shift. Go to the next
921 * child entry and try again.
923 if (amdgpu_vm_pt_descendant(adev, &cursor))
925 } else if (frag >= parent_shift) {
926 /* If the fragment size is even larger than the parent
927 * shift we should go up one level and check it again.
929 if (!amdgpu_vm_pt_ancestor(&cursor))
934 pt = cursor.entry->bo;
936 /* We need all PDs and PTs for mapping something, */
937 if (flags & AMDGPU_PTE_VALID)
940 /* but unmapping something can happen at a higher
943 if (!amdgpu_vm_pt_ancestor(&cursor))
946 pt = cursor.entry->bo;
947 shift = parent_shift;
948 frag_end = max(frag_end, ALIGN(frag_start + 1,
952 /* Looks good so far, calculate parameters for the update */
953 incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
954 mask = amdgpu_vm_pt_entries_mask(adev, cursor.level);
955 pe_start = ((cursor.pfn >> shift) & mask) * 8;
956 entry_end = ((uint64_t)mask + 1) << shift;
957 entry_end += cursor.pfn & ~(entry_end - 1);
958 entry_end = min(entry_end, end);
961 struct amdgpu_vm *vm = params->vm;
962 uint64_t upd_end = min(entry_end, frag_end);
963 unsigned int nptes = (upd_end - frag_start) >> shift;
964 uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag);
966 /* This can happen when we set higher level PDs to
967 * silent to stop fault floods.
969 nptes = max(nptes, 1u);
971 trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
972 min(nptes, 32u), dst, incr,
974 vm->task_info ? vm->task_info->tgid : 0,
975 vm->immediate.fence_context);
976 amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
977 cursor.level, pe_start, dst,
978 nptes, incr, upd_flags);
980 pe_start += nptes * 8;
983 frag_start = upd_end;
984 if (frag_start >= frag_end) {
985 /* figure out the next fragment */
986 amdgpu_vm_pte_fragment(params, frag_start, end,
987 flags, &frag, &frag_end);
991 } while (frag_start < entry_end);
993 if (amdgpu_vm_pt_descendant(adev, &cursor)) {
994 /* Free all child entries.
995 * Update the tables with the flags and addresses and free up subsequent
996 * tables in the case of huge pages or freed up areas.
997 * This is the maximum you can free, because all other page tables are not
998 * completely covered by the range and so potentially still in use.
1000 while (cursor.pfn < frag_start) {
1001 /* Make sure previous mapping is freed */
1002 if (cursor.entry->bo) {
1003 params->needs_flush = true;
1004 amdgpu_vm_pt_add_list(params, &cursor);
1006 amdgpu_vm_pt_next(adev, &cursor);
1009 } else if (frag >= shift) {
1010 /* or just move on to the next on the same level. */
1011 amdgpu_vm_pt_next(adev, &cursor);
1019 * amdgpu_vm_pt_map_tables - have bo of root PD cpu accessible
1020 * @adev: amdgpu device structure
1021 * @vm: amdgpu vm structure
1023 * make root page directory and everything below it cpu accessible.
1025 int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1027 struct amdgpu_vm_pt_cursor cursor;
1028 struct amdgpu_vm_bo_base *entry;
1030 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
1032 struct amdgpu_bo_vm *bo;
1036 bo = to_amdgpu_bo_vm(entry->bo);
1037 r = vm->update_funcs->map_table(bo);