]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
index d4510807a692db603c02f66a0b6d19d829e938cb..6fc16eecf2dce5fc448afaa1681fbb086259ea35 100644 (file)
@@ -946,57 +946,42 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
                                        unsigned nptes, uint64_t dst,
                                        uint64_t flags)
 {
-       bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes);
        uint64_t pd_addr, pde;
 
        /* In the case of a mixed PT the PDE must point to it*/
-       if (p->adev->asic_type < CHIP_VEGA10 ||
-           nptes != AMDGPU_VM_PTE_COUNT(p->adev) ||
-           p->src ||
-           !(flags & AMDGPU_PTE_VALID)) {
-
-               dst = amdgpu_bo_gpu_offset(entry->base.bo);
-               flags = AMDGPU_PTE_VALID;
-       } else {
+       if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
+           nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
                /* Set the huge page flag to stop scanning at this PDE */
                flags |= AMDGPU_PDE_PTE;
        }
 
-       if (!entry->huge && !(flags & AMDGPU_PDE_PTE))
+       if (!(flags & AMDGPU_PDE_PTE)) {
+               if (entry->huge) {
+                       /* Add the entry to the relocated list to update it. */
+                       entry->huge = false;
+                       spin_lock(&p->vm->status_lock);
+                       list_move(&entry->base.vm_status, &p->vm->relocated);
+                       spin_unlock(&p->vm->status_lock);
+               }
                return;
-       entry->huge = !!(flags & AMDGPU_PDE_PTE);
+       }
 
+       entry->huge = true;
        amdgpu_gart_get_vm_pde(p->adev, AMDGPU_VM_PDB0,
                               &dst, &flags);
 
-       if (use_cpu_update) {
-               /* In case a huge page is replaced with a system
-                * memory mapping, p->pages_addr != NULL and
-                * amdgpu_vm_cpu_set_ptes would try to translate dst
-                * through amdgpu_vm_map_gart. But dst is already a
-                * GPU address (of the page table). Disable
-                * amdgpu_vm_map_gart temporarily.
-                */
-               dma_addr_t *tmp;
-
-               tmp = p->pages_addr;
-               p->pages_addr = NULL;
-
+       if (p->func == amdgpu_vm_cpu_set_ptes) {
                pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
-               pde = pd_addr + (entry - parent->entries) * 8;
-               amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
-
-               p->pages_addr = tmp;
        } else {
                if (parent->base.bo->shadow) {
                        pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
                        pde = pd_addr + (entry - parent->entries) * 8;
-                       amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
+                       p->func(p, pde, dst, 1, 0, flags);
                }
                pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
-               pde = pd_addr + (entry - parent->entries) * 8;
-               amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
        }
+       pde = pd_addr + (entry - parent->entries) * 8;
+       p->func(p, pde, dst, 1, 0, flags);
 }
 
 /**
@@ -1208,12 +1193,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
        /* padding, etc. */
        ndw = 64;
 
-       /* one PDE write for each huge page */
-       if (vm->root.base.bo->shadow)
-               ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6 * 2;
-       else
-               ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6;
-
        if (pages_addr) {
                /* copy commands needed */
                ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
@@ -1288,8 +1267,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 
 error_free:
        amdgpu_job_free(job);
-       amdgpu_vm_invalidate_level(adev, vm, &vm->root,
-                                  adev->vm_manager.root_level);
        return r;
 }
 
@@ -1700,18 +1677,31 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
        spin_lock(&vm->status_lock);
        while (!list_empty(&vm->moved)) {
                struct amdgpu_bo_va *bo_va;
+               struct reservation_object *resv;
 
                bo_va = list_first_entry(&vm->moved,
                        struct amdgpu_bo_va, base.vm_status);
                spin_unlock(&vm->status_lock);
 
+               resv = bo_va->base.bo->tbo.resv;
+
                /* Per VM BOs never need to bo cleared in the page tables */
-               clear = bo_va->base.bo->tbo.resv != vm->root.base.bo->tbo.resv;
+               if (resv == vm->root.base.bo->tbo.resv)
+                       clear = false;
+               /* Try to reserve the BO to avoid clearing its ptes */
+               else if (!amdgpu_vm_debug && reservation_object_trylock(resv))
+                       clear = false;
+               /* Somebody else is using the BO right now */
+               else
+                       clear = true;
 
                r = amdgpu_vm_bo_update(adev, bo_va, clear);
                if (r)
                        return r;
 
+               if (!clear && resv != vm->root.base.bo->tbo.resv)
+                       reservation_object_unlock(resv);
+
                spin_lock(&vm->status_lock);
        }
        spin_unlock(&vm->status_lock);
@@ -2492,17 +2482,21 @@ bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
 
        spin_lock(&adev->vm_manager.pasid_lock);
        vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
-       spin_unlock(&adev->vm_manager.pasid_lock);
-       if (!vm)
+       if (!vm) {
                /* VM not found, can't track fault credit */
+               spin_unlock(&adev->vm_manager.pasid_lock);
                return true;
+       }
 
        /* No lock needed. only accessed by IRQ handler */
-       if (!vm->fault_credit)
+       if (!vm->fault_credit) {
                /* Too many faults in this VM */
+               spin_unlock(&adev->vm_manager.pasid_lock);
                return false;
+       }
 
        vm->fault_credit--;
+       spin_unlock(&adev->vm_manager.pasid_lock);
        return true;
 }
 
This page took 0.039933 seconds and 4 git commands to generate.