]> Git Repo - J-linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
Merge drm/drm-next into drm-misc-next
[J-linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
index 0ffdf847cad0a2e7c4575be981c8b72666c3c9dd..4a3e3f72e12774bfd7ec951d5a17d2d6d7b50918 100644 (file)
@@ -25,6 +25,7 @@
  *          Alex Deucher
  *          Jerome Glisse
  */
+
 #include <linux/dma-fence-array.h>
 #include <linux/interval_tree_generic.h>
 #include <linux/idr.h>
@@ -1717,6 +1718,50 @@ error_unlock:
        return r;
 }
 
+void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
+                               uint64_t *gtt_mem, uint64_t *cpu_mem)
+{
+       struct amdgpu_bo_va *bo_va, *tmp;
+
+       list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
+               if (!bo_va->base.bo)
+                       continue;
+               amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
+                               gtt_mem, cpu_mem);
+       }
+       list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
+               if (!bo_va->base.bo)
+                       continue;
+               amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
+                               gtt_mem, cpu_mem);
+       }
+       list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
+               if (!bo_va->base.bo)
+                       continue;
+               amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
+                               gtt_mem, cpu_mem);
+       }
+       list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
+               if (!bo_va->base.bo)
+                       continue;
+               amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
+                               gtt_mem, cpu_mem);
+       }
+       spin_lock(&vm->invalidated_lock);
+       list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
+               if (!bo_va->base.bo)
+                       continue;
+               amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
+                               gtt_mem, cpu_mem);
+       }
+       list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
+               if (!bo_va->base.bo)
+                       continue;
+               amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
+                               gtt_mem, cpu_mem);
+       }
+       spin_unlock(&vm->invalidated_lock);
+}
 /**
  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
  *
@@ -3148,6 +3193,12 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
 {
        unsigned i;
 
+       /* Concurrent flushes are only possible starting with Vega10 and
+        * are broken on Navi10 and Navi14.
+        */
+       adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
+                                             adev->asic_type == CHIP_NAVI10 ||
+                                             adev->asic_type == CHIP_NAVI14);
        amdgpu_vmid_mgr_init(adev);
 
        adev->vm_manager.fence_context =
This page took 0.029387 seconds and 4 git commands to generate.