]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
index f0e6fafd093856f05b7adf457c34003d8281a688..0768c868698365785a3ebc36fdcf92c773d2a4f3 100644 (file)
@@ -300,7 +300,7 @@ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
 {
        spin_lock(&vm_bo->vm->invalidated_lock);
-       list_del_init(&vm_bo->vm_status);
+       list_move(&vm_bo->vm_status, &vm_bo->vm->done);
        spin_unlock(&vm_bo->vm->invalidated_lock);
 }
 
@@ -1502,6 +1502,8 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
 
                        pt = cursor.entry->base.bo;
                        shift = parent_shift;
+                       frag_end = max(frag_end, ALIGN(frag_start + 1,
+                                  1ULL << shift));
                }
 
                /* Looks good so far, calculate parameters for the update */
@@ -1513,19 +1515,26 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
                entry_end = min(entry_end, end);
 
                do {
+                       struct amdgpu_vm *vm = params->vm;
                        uint64_t upd_end = min(entry_end, frag_end);
                        unsigned nptes = (upd_end - frag_start) >> shift;
+                       uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag);
 
                        /* This can happen when we set higher level PDs to
                         * silent to stop fault floods.
                         */
                        nptes = max(nptes, 1u);
+
+                       trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
+                                                   nptes, dst, incr, upd_flags,
+                                                   vm->task_info.pid,
+                                                   vm->immediate.fence_context);
                        amdgpu_vm_update_flags(params, pt, cursor.level,
                                               pe_start, dst, nptes, incr,
-                                              flags | AMDGPU_PTE_FRAG(frag));
+                                              upd_flags);
 
                        pe_start += nptes * 8;
-                       dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
+                       dst += nptes * incr;
 
                        frag_start = upd_end;
                        if (frag_start >= frag_end) {
@@ -1561,7 +1570,8 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
 /**
  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
  *
- * @adev: amdgpu_device pointer
+ * @adev: amdgpu_device pointer of the VM
+ * @bo_adev: amdgpu_device pointer of the mapped BO
  * @vm: requested vm
  * @immediate: immediate submission in a page fault
  * @unlocked: unlocked invalidation during MM callback
@@ -1569,7 +1579,8 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
  * @start: start of mapped range
  * @last: last mapped entry
  * @flags: flags for the entries
- * @addr: addr to set the area to
+ * @offset: offset into nodes and pages_addr
+ * @nodes: array of drm_mm_nodes with the MC addresses
  * @pages_addr: DMA addresses to use for mapping
  * @fence: optional resulting fence
  *
@@ -1579,15 +1590,18 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
  * 0 for success, -EINVAL for failure.
  */
 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+                                      struct amdgpu_device *bo_adev,
                                       struct amdgpu_vm *vm, bool immediate,
                                       bool unlocked, struct dma_resv *resv,
                                       uint64_t start, uint64_t last,
-                                      uint64_t flags, uint64_t addr,
+                                      uint64_t flags, uint64_t offset,
+                                      struct drm_mm_node *nodes,
                                       dma_addr_t *pages_addr,
                                       struct dma_fence **fence)
 {
        struct amdgpu_vm_update_params params;
        enum amdgpu_sync_mode sync_mode;
+       uint64_t pfn;
        int r;
 
        memset(&params, 0, sizeof(params));
@@ -1605,6 +1619,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
        else
                sync_mode = AMDGPU_SYNC_EXPLICIT;
 
+       pfn = offset >> PAGE_SHIFT;
+       if (nodes) {
+               while (pfn >= nodes->size) {
+                       pfn -= nodes->size;
+                       ++nodes;
+               }
+       }
+
        amdgpu_vm_eviction_lock(vm);
        if (vm->evicting) {
                r = -EBUSY;
@@ -1623,105 +1645,47 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
        if (r)
                goto error_unlock;
 
-       r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
-       if (r)
-               goto error_unlock;
-
-       r = vm->update_funcs->commit(&params, fence);
-
-error_unlock:
-       amdgpu_vm_eviction_unlock(vm);
-       return r;
-}
-
-/**
- * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
- *
- * @adev: amdgpu_device pointer
- * @resv: fences we need to sync to
- * @pages_addr: DMA addresses to use for mapping
- * @vm: requested vm
- * @mapping: mapped range and flags to use for the update
- * @flags: HW flags for the mapping
- * @bo_adev: amdgpu_device pointer that bo actually been allocated
- * @nodes: array of drm_mm_nodes with the MC addresses
- * @fence: optional resulting fence
- *
- * Split the mapping into smaller chunks so that each update fits
- * into a SDMA IB.
- *
- * Returns:
- * 0 for success, -EINVAL for failure.
- */
-static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
-                                     struct dma_resv *resv,
-                                     dma_addr_t *pages_addr,
-                                     struct amdgpu_vm *vm,
-                                     struct amdgpu_bo_va_mapping *mapping,
-                                     uint64_t flags,
-                                     struct amdgpu_device *bo_adev,
-                                     struct drm_mm_node *nodes,
-                                     struct dma_fence **fence)
-{
-       unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
-       uint64_t pfn, start = mapping->start;
-       int r;
-
-       /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
-        * but in case of something, we filter the flags in first place
-        */
-       if (!(mapping->flags & AMDGPU_PTE_READABLE))
-               flags &= ~AMDGPU_PTE_READABLE;
-       if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
-               flags &= ~AMDGPU_PTE_WRITEABLE;
-
-       /* Apply ASIC specific mapping flags */
-       amdgpu_gmc_get_vm_pte(adev, mapping, &flags);
-
-       trace_amdgpu_vm_bo_update(mapping);
-
-       pfn = mapping->offset >> PAGE_SHIFT;
-       if (nodes) {
-               while (pfn >= nodes->size) {
-                       pfn -= nodes->size;
-                       ++nodes;
-               }
-       }
-
        do {
-               dma_addr_t *dma_addr = NULL;
-               uint64_t max_entries;
-               uint64_t addr, last;
+               uint64_t tmp, num_entries, addr;
 
-               max_entries = mapping->last - start + 1;
+
+               num_entries = last - start + 1;
                if (nodes) {
                        addr = nodes->start << PAGE_SHIFT;
-                       max_entries = min((nodes->size - pfn) *
-                               AMDGPU_GPU_PAGES_IN_CPU_PAGE, max_entries);
+                       num_entries = min((nodes->size - pfn) *
+                               AMDGPU_GPU_PAGES_IN_CPU_PAGE, num_entries);
                } else {
                        addr = 0;
                }
 
                if (pages_addr) {
-                       uint64_t count;
+                       bool contiguous = true;
 
-                       for (count = 1;
-                            count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
-                            ++count) {
-                               uint64_t idx = pfn + count;
+                       if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
+                               uint64_t count;
 
-                               if (pages_addr[idx] !=
-                                   (pages_addr[idx - 1] + PAGE_SIZE))
-                                       break;
+                               contiguous = pages_addr[pfn + 1] ==
+                                       pages_addr[pfn] + PAGE_SIZE;
+
+                               tmp = num_entries /
+                                       AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+                               for (count = 2; count < tmp; ++count) {
+                                       uint64_t idx = pfn + count;
+
+                                       if (contiguous != (pages_addr[idx] ==
+                                           pages_addr[idx - 1] + PAGE_SIZE))
+                                               break;
+                               }
+                               num_entries = count *
+                                       AMDGPU_GPU_PAGES_IN_CPU_PAGE;
                        }
 
-                       if (count < min_linear_pages) {
+                       if (!contiguous) {
                                addr = pfn << PAGE_SHIFT;
-                               dma_addr = pages_addr;
+                               params.pages_addr = pages_addr;
                        } else {
                                addr = pages_addr[pfn];
-                               max_entries = count *
-                                       AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+                               params.pages_addr = NULL;
                        }
 
                } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
@@ -1729,23 +1693,25 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                        addr += pfn << PAGE_SHIFT;
                }
 
-               last = start + max_entries - 1;
-               r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
-                                               start, last, flags, addr,
-                                               dma_addr, fence);
+               tmp = start + num_entries;
+               r = amdgpu_vm_update_ptes(&params, start, tmp, addr, flags);
                if (r)
-                       return r;
+                       goto error_unlock;
 
-               pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+               pfn += num_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
                if (nodes && nodes->size == pfn) {
                        pfn = 0;
                        ++nodes;
                }
-               start = last + 1;
+               start = tmp;
 
-       } while (unlikely(start != mapping->last + 1));
+       } while (unlikely(start != last + 1));
 
-       return 0;
+       r = vm->update_funcs->commit(&params, fence);
+
+error_unlock:
+       amdgpu_vm_eviction_unlock(vm);
+       return r;
 }
 
 /**
@@ -1823,9 +1789,26 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
        }
 
        list_for_each_entry(mapping, &bo_va->invalids, list) {
-               r = amdgpu_vm_bo_split_mapping(adev, resv, pages_addr, vm,
-                                              mapping, flags, bo_adev, nodes,
-                                              last_update);
+               uint64_t update_flags = flags;
+
+               /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
+                * but in case of something, we filter the flags in first place
+                */
+               if (!(mapping->flags & AMDGPU_PTE_READABLE))
+                       update_flags &= ~AMDGPU_PTE_READABLE;
+               if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
+                       update_flags &= ~AMDGPU_PTE_WRITEABLE;
+
+               /* Apply ASIC specific mapping flags */
+               amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
+
+               trace_amdgpu_vm_bo_update(mapping);
+
+               r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false,
+                                               resv, mapping->start,
+                                               mapping->last, update_flags,
+                                               mapping->offset, nodes,
+                                               pages_addr, last_update);
                if (r)
                        return r;
        }
@@ -2033,9 +2016,10 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
                    mapping->start < AMDGPU_GMC_HOLE_START)
                        init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
 
-               r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
-                                               mapping->start, mapping->last,
-                                               init_pte_value, 0, NULL, &f);
+               r = amdgpu_vm_bo_update_mapping(adev, adev, vm, false, false,
+                                               resv, mapping->start,
+                                               mapping->last, init_pte_value,
+                                               0, NULL, NULL, &f);
                amdgpu_vm_free_mapping(adev, vm, mapping, f);
                if (r) {
                        dma_fence_put(f);
@@ -2154,7 +2138,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
 
 
 /**
- * amdgpu_vm_bo_insert_mapping - insert a new mapping
+ * amdgpu_vm_bo_insert_map - insert a new mapping
  *
  * @adev: amdgpu_device pointer
  * @bo_va: bo_va to store the address
@@ -2795,7 +2779,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
  * 0 for success, error for failure.
  */
 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                  int vm_context, unsigned int pasid)
+                  int vm_context, u32 pasid)
 {
        struct amdgpu_bo_param bp;
        struct amdgpu_bo *root;
@@ -2811,7 +2795,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        INIT_LIST_HEAD(&vm->invalidated);
        spin_lock_init(&vm->invalidated_lock);
        INIT_LIST_HEAD(&vm->freed);
-
+       INIT_LIST_HEAD(&vm->done);
 
        /* create scheduler entities for page table updates */
        r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
@@ -2966,7 +2950,7 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
  * 0 for success, -errno for errors.
  */
 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                          unsigned int pasid)
+                          u32 pasid)
 {
        bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
        int r;
@@ -3264,7 +3248,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
  * @pasid: PASID identifier for VM
  * @task_info: task_info to fill.
  */
-void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
+void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
                         struct amdgpu_task_info *task_info)
 {
        struct amdgpu_vm *vm;
@@ -3308,7 +3292,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
  * Try to gracefully handle a VM fault. Return true if the fault was handled and
  * shouldn't be reported any more.
  */
-bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
+bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
                            uint64_t addr)
 {
        struct amdgpu_bo *root;
@@ -3363,8 +3347,9 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
                value = 0;
        }
 
-       r = amdgpu_vm_bo_update_mapping(adev, vm, true, false, NULL, addr,
-                                       addr + 1, flags, value, NULL, NULL);
+       r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr,
+                                       addr, flags, value, NULL, NULL,
+                                       NULL);
        if (r)
                goto error_unlock;
 
@@ -3380,3 +3365,99 @@ error_unref:
 
        return false;
 }
+
+#if defined(CONFIG_DEBUG_FS)
+/**
+ * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
+ *
+ * @vm: Requested VM for printing BO info
+ * @m: debugfs file
+ *
+ * Print BO information in debugfs file for the VM
+ */
+void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
+{
+       struct amdgpu_bo_va *bo_va, *tmp;
+       u64 total_idle = 0;
+       u64 total_evicted = 0;
+       u64 total_relocated = 0;
+       u64 total_moved = 0;
+       u64 total_invalidated = 0;
+       u64 total_done = 0;
+       unsigned int total_idle_objs = 0;
+       unsigned int total_evicted_objs = 0;
+       unsigned int total_relocated_objs = 0;
+       unsigned int total_moved_objs = 0;
+       unsigned int total_invalidated_objs = 0;
+       unsigned int total_done_objs = 0;
+       unsigned int id = 0;
+
+       seq_puts(m, "\tIdle BOs:\n");
+       list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
+               if (!bo_va->base.bo)
+                       continue;
+               total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
+       }
+       total_idle_objs = id;
+       id = 0;
+
+       seq_puts(m, "\tEvicted BOs:\n");
+       list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
+               if (!bo_va->base.bo)
+                       continue;
+               total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
+       }
+       total_evicted_objs = id;
+       id = 0;
+
+       seq_puts(m, "\tRelocated BOs:\n");
+       list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
+               if (!bo_va->base.bo)
+                       continue;
+               total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
+       }
+       total_relocated_objs = id;
+       id = 0;
+
+       seq_puts(m, "\tMoved BOs:\n");
+       list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
+               if (!bo_va->base.bo)
+                       continue;
+               total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
+       }
+       total_moved_objs = id;
+       id = 0;
+
+       seq_puts(m, "\tInvalidated BOs:\n");
+       spin_lock(&vm->invalidated_lock);
+       list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
+               if (!bo_va->base.bo)
+                       continue;
+               total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
+       }
+       total_invalidated_objs = id;
+       id = 0;
+
+       seq_puts(m, "\tDone BOs:\n");
+       list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
+               if (!bo_va->base.bo)
+                       continue;
+               total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
+       }
+       spin_unlock(&vm->invalidated_lock);
+       total_done_objs = id;
+
+       seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
+                  total_idle_objs);
+       seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
+                  total_evicted_objs);
+       seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
+                  total_relocated_objs);
+       seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
+                  total_moved_objs);
+       seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
+                  total_invalidated_objs);
+       seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
+                  total_done_objs);
+}
+#endif
This page took 0.052579 seconds and 4 git commands to generate.