]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
index 8745d4cc7ae0d93729aac5437db95006f05ad8da..f68b7cdc370a8694bb489e97c82d27e490e3c3b0 100644 (file)
@@ -200,19 +200,29 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
  */
 void amdgpu_vm_flush(struct amdgpu_ring *ring,
                     struct amdgpu_vm *vm,
-                    struct amdgpu_fence *updates)
+                    struct fence *updates)
 {
        uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
        struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
-       struct amdgpu_fence *flushed_updates = vm_id->flushed_updates;
+       struct fence *flushed_updates = vm_id->flushed_updates;
+       bool is_earlier = false;
+
+       if (flushed_updates && updates) {
+               BUG_ON(flushed_updates->context != updates->context);
+               is_earlier = (updates->seqno - flushed_updates->seqno <=
+                             INT_MAX) ? true : false;
+       }
 
        if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates ||
-           (updates && amdgpu_fence_is_earlier(flushed_updates, updates))) {
+           is_earlier) {
 
                trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
-               vm_id->flushed_updates = amdgpu_fence_ref(
-                       amdgpu_fence_later(flushed_updates, updates));
-               amdgpu_fence_unref(&flushed_updates);
+               if (is_earlier) {
+                       vm_id->flushed_updates = fence_get(updates);
+                       fence_put(flushed_updates);
+               }
+               if (!flushed_updates)
+                       vm_id->flushed_updates = fence_get(updates);
                vm_id->pd_gpu_addr = pd_addr;
                amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
        }
@@ -306,8 +316,7 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
        }
 }
 
-static int amdgpu_vm_free_job(
-       struct amdgpu_cs_parser *sched_job)
+int amdgpu_vm_free_job(struct amdgpu_job *sched_job)
 {
        int i;
        for (i = 0; i < sched_job->num_ibs; i++)
@@ -316,14 +325,6 @@ static int amdgpu_vm_free_job(
        return 0;
 }
 
-static int amdgpu_vm_run_job(
-       struct amdgpu_cs_parser *sched_job)
-{
-       amdgpu_bo_fence(sched_job->job_param.vm.bo,
-                       &sched_job->ibs[sched_job->num_ibs -1].fence->base, true);
-       return 0;
-}
-
 /**
  * amdgpu_vm_clear_bo - initially clear the page dir/table
  *
@@ -334,7 +335,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
                              struct amdgpu_bo *bo)
 {
        struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
-       struct amdgpu_cs_parser *sched_job = NULL;
+       struct fence *fence = NULL;
        struct amdgpu_ib *ib;
        unsigned entries;
        uint64_t addr;
@@ -368,38 +369,17 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
        amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0);
        amdgpu_vm_pad_ib(adev, ib);
        WARN_ON(ib->length_dw > 64);
-
+       r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+                                                &amdgpu_vm_free_job,
+                                                AMDGPU_FENCE_OWNER_VM,
+                                                &fence);
+       if (!r)
+               amdgpu_bo_fence(bo, fence, true);
+       fence_put(fence);
        if (amdgpu_enable_scheduler) {
-               int r;
-               uint64_t v_seq;
-               sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
-                                                   adev->kernel_ctx, ib, 1);
-               if(!sched_job)
-                       goto error_free;
-               sched_job->job_param.vm.bo = bo;
-               sched_job->run_job = amdgpu_vm_run_job;
-               sched_job->free_job = amdgpu_vm_free_job;
-               v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
-               ib->sequence = v_seq;
-               amd_sched_push_job(ring->scheduler,
-                                  &adev->kernel_ctx->rings[ring->idx].c_entity,
-                                  sched_job);
-               r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity,
-                                       v_seq,
-                                       false,
-                                       -1);
-               if (r)
-                       DRM_ERROR("emit timeout\n");
-
                amdgpu_bo_unreserve(bo);
                return 0;
-       } else {
-               r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
-               if (r)
-                       goto error_free;
-               amdgpu_bo_fence(bo, &ib->fence->base, true);
        }
-
 error_free:
        amdgpu_ib_free(adev, ib);
        kfree(ib);
@@ -456,7 +436,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
        uint64_t last_pde = ~0, last_pt = ~0;
        unsigned count = 0, pt_idx, ndw;
        struct amdgpu_ib *ib;
-       struct amdgpu_cs_parser *sched_job = NULL;
+       struct fence *fence = NULL;
 
        int r;
 
@@ -518,37 +498,17 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
                amdgpu_vm_pad_ib(adev, ib);
                amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
                WARN_ON(ib->length_dw > ndw);
+               r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+                                                        &amdgpu_vm_free_job,
+                                                        AMDGPU_FENCE_OWNER_VM,
+                                                        &fence);
+               if (r)
+                       goto error_free;
 
-               if (amdgpu_enable_scheduler) {
-                       int r;
-                       uint64_t v_seq;
-                       sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
-                                                           adev->kernel_ctx,
-                                                           ib, 1);
-                       if(!sched_job)
-                               goto error_free;
-                       sched_job->job_param.vm.bo = pd;
-                       sched_job->run_job = amdgpu_vm_run_job;
-                       sched_job->free_job = amdgpu_vm_free_job;
-                       v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
-                       ib->sequence = v_seq;
-                       amd_sched_push_job(ring->scheduler,
-                                          &adev->kernel_ctx->rings[ring->idx].c_entity,
-                                          sched_job);
-                       r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity,
-                                               v_seq,
-                                               false,
-                                               -1);
-                       if (r)
-                               DRM_ERROR("emit timeout\n");
-               } else {
-                       r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
-                       if (r) {
-                               amdgpu_ib_free(adev, ib);
-                               return r;
-                       }
-                       amdgpu_bo_fence(pd, &ib->fence->base, true);
-               }
+               amdgpu_bo_fence(pd, fence, true);
+               fence_put(vm->page_directory_fence);
+               vm->page_directory_fence = fence_get(fence);
+               fence_put(fence);
        }
 
        if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
@@ -559,11 +519,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
        return 0;
 
 error_free:
-       if (sched_job)
-               kfree(sched_job);
        amdgpu_ib_free(adev, ib);
        kfree(ib);
-       return -ENOMEM;
+       return r;
 }
 
 /**
@@ -669,9 +627,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
 {
        uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
        uint64_t last_pte = ~0, last_dst = ~0;
+       void *owner = AMDGPU_FENCE_OWNER_VM;
        unsigned count = 0;
        uint64_t addr;
 
+       /* sync to everything on unmapping */
+       if (!(flags & AMDGPU_PTE_VALID))
+               owner = AMDGPU_FENCE_OWNER_UNDEFINED;
+
        /* walk over the address space and update the page tables */
        for (addr = start; addr < end; ) {
                uint64_t pt_idx = addr >> amdgpu_vm_block_size;
@@ -680,8 +643,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
                uint64_t pte;
                int r;
 
-               amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv,
-                                AMDGPU_FENCE_OWNER_VM);
+               amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner);
                r = reservation_object_reserve_shared(pt->tbo.resv);
                if (r)
                        return r;
@@ -737,7 +699,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
  */
 static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
                                uint64_t start, uint64_t end,
-                               struct amdgpu_fence *fence)
+                               struct fence *fence)
 {
        unsigned i;
 
@@ -745,23 +707,9 @@ static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
        end >>= amdgpu_vm_block_size;
 
        for (i = start; i <= end; ++i)
-               amdgpu_bo_fence(vm->page_tables[i].bo, &fence->base, true);
+               amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
 }
 
-static int amdgpu_vm_bo_update_mapping_run_job(
-       struct amdgpu_cs_parser *sched_job)
-{
-       struct amdgpu_fence **fence = sched_job->job_param.vm_mapping.fence;
-       amdgpu_vm_fence_pts(sched_job->job_param.vm_mapping.vm,
-                           sched_job->job_param.vm_mapping.start,
-                           sched_job->job_param.vm_mapping.last + 1,
-                           sched_job->ibs[sched_job->num_ibs -1].fence);
-       if (fence) {
-               amdgpu_fence_unref(fence);
-               *fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs -1].fence);
-       }
-       return 0;
-}
 /**
  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
  *
@@ -781,13 +729,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
                                       struct amdgpu_vm *vm,
                                       struct amdgpu_bo_va_mapping *mapping,
                                       uint64_t addr, uint32_t gtt_flags,
-                                      struct amdgpu_fence **fence)
+                                      struct fence **fence)
 {
        struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
        unsigned nptes, ncmds, ndw;
        uint32_t flags = gtt_flags;
        struct amdgpu_ib *ib;
-       struct amdgpu_cs_parser *sched_job = NULL;
+       struct fence *f = NULL;
        int r;
 
        /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -846,17 +794,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 
        ib->length_dw = 0;
 
-       if (!(flags & AMDGPU_PTE_VALID)) {
-               unsigned i;
-
-               for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-                       struct amdgpu_fence *f = vm->ids[i].last_id_use;
-                       r = amdgpu_sync_fence(adev, &ib->sync, &f->base);
-                       if (r)
-                               return r;
-               }
-       }
-
        r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start,
                                  mapping->it.last + 1, addr + mapping->offset,
                                  flags, gtt_flags);
@@ -869,56 +806,30 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 
        amdgpu_vm_pad_ib(adev, ib);
        WARN_ON(ib->length_dw > ndw);
+       r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+                                                &amdgpu_vm_free_job,
+                                                AMDGPU_FENCE_OWNER_VM,
+                                                &f);
+       if (r)
+               goto error_free;
 
-       if (amdgpu_enable_scheduler) {
-               int r;
-               uint64_t v_seq;
-               sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
-                                                   adev->kernel_ctx, ib, 1);
-               if(!sched_job)
-                       goto error_free;
-               sched_job->job_param.vm_mapping.vm = vm;
-               sched_job->job_param.vm_mapping.start = mapping->it.start;
-               sched_job->job_param.vm_mapping.last = mapping->it.last;
-               sched_job->job_param.vm_mapping.fence = fence;
-               sched_job->run_job = amdgpu_vm_bo_update_mapping_run_job;
-               sched_job->free_job = amdgpu_vm_free_job;
-               v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
-               ib->sequence = v_seq;
-               amd_sched_push_job(ring->scheduler,
-                                  &adev->kernel_ctx->rings[ring->idx].c_entity,
-                                  sched_job);
-               r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity,
-                                       v_seq,
-                                       false,
-                                       -1);
-               if (r)
-                       DRM_ERROR("emit timeout\n");
-       } else {
-               r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
-               if (r) {
-                       amdgpu_ib_free(adev, ib);
-                       return r;
-               }
-
-               amdgpu_vm_fence_pts(vm, mapping->it.start,
-                                   mapping->it.last + 1, ib->fence);
-               if (fence) {
-                       amdgpu_fence_unref(fence);
-                       *fence = amdgpu_fence_ref(ib->fence);
-               }
-
+       amdgpu_vm_fence_pts(vm, mapping->it.start,
+                           mapping->it.last + 1, f);
+       if (fence) {
+               fence_put(*fence);
+               *fence = fence_get(f);
+       }
+       fence_put(f);
+       if (!amdgpu_enable_scheduler) {
                amdgpu_ib_free(adev, ib);
                kfree(ib);
        }
        return 0;
 
 error_free:
-       if (sched_job)
-               kfree(sched_job);
        amdgpu_ib_free(adev, ib);
        kfree(ib);
-       return -ENOMEM;
+       return r;
 }
 
 /**
@@ -966,6 +877,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
        }
 
        spin_lock(&vm->status_lock);
+       list_splice_init(&bo_va->invalids, &bo_va->valids);
        list_del_init(&bo_va->vm_status);
        if (!mem)
                list_add(&bo_va->vm_status, &vm->cleared);
@@ -1038,7 +950,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
        spin_unlock(&vm->status_lock);
 
        if (bo_va)
-               r = amdgpu_sync_fence(adev, sync, &bo_va->last_pt_update->base);
+               r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
 
        return r;
 }
@@ -1187,7 +1099,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 
                r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
                                     AMDGPU_GPU_PAGE_SIZE, true,
-                                    AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &pt);
+                                    AMDGPU_GEM_DOMAIN_VRAM,
+                                    AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+                                    NULL, &pt);
                if (r)
                        goto error_free;
 
@@ -1318,7 +1232,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
                kfree(mapping);
        }
 
-       amdgpu_fence_unref(&bo_va->last_pt_update);
+       fence_put(bo_va->last_pt_update);
        kfree(bo_va);
 
        mutex_unlock(&vm->mutex);
@@ -1384,8 +1298,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
                return -ENOMEM;
        }
 
+       vm->page_directory_fence = NULL;
+
        r = amdgpu_bo_create(adev, pd_size, align, true,
-                            AMDGPU_GEM_DOMAIN_VRAM, 0,
+                            AMDGPU_GEM_DOMAIN_VRAM,
+                            AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
                             NULL, &vm->page_directory);
        if (r)
                return r;
@@ -1432,9 +1349,10 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        kfree(vm->page_tables);
 
        amdgpu_bo_unref(&vm->page_directory);
+       fence_put(vm->page_directory_fence);
 
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-               amdgpu_fence_unref(&vm->ids[i].flushed_updates);
+               fence_put(vm->ids[i].flushed_updates);
                amdgpu_fence_unref(&vm->ids[i].last_id_use);
        }
 
This page took 0.050904 seconds and 4 git commands to generate.