*/
void amdgpu_vm_flush(struct amdgpu_ring *ring,
struct amdgpu_vm *vm,
- struct amdgpu_fence *updates)
+ struct fence *updates)
{
uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
- struct amdgpu_fence *flushed_updates = vm_id->flushed_updates;
+ struct fence *flushed_updates = vm_id->flushed_updates;
+ bool is_earlier = false;
+
+ if (flushed_updates && updates) {
+ BUG_ON(flushed_updates->context != updates->context);
+ is_earlier = (updates->seqno - flushed_updates->seqno <=
+ INT_MAX) ? true : false;
+ }
if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates ||
- (updates && amdgpu_fence_is_earlier(flushed_updates, updates))) {
+ is_earlier) {
trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
- vm_id->flushed_updates = amdgpu_fence_ref(
- amdgpu_fence_later(flushed_updates, updates));
- amdgpu_fence_unref(&flushed_updates);
+ if (is_earlier) {
+ vm_id->flushed_updates = fence_get(updates);
+ fence_put(flushed_updates);
+ }
+ if (!flushed_updates)
+ vm_id->flushed_updates = fence_get(updates);
vm_id->pd_gpu_addr = pd_addr;
amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
}
}
}
-static int amdgpu_vm_free_job(
- struct amdgpu_cs_parser *sched_job)
+int amdgpu_vm_free_job(struct amdgpu_job *sched_job)
{
int i;
for (i = 0; i < sched_job->num_ibs; i++)
return 0;
}
-static int amdgpu_vm_run_job(
- struct amdgpu_cs_parser *sched_job)
-{
- amdgpu_bo_fence(sched_job->job_param.vm.bo,
- sched_job->ibs[sched_job->num_ibs -1].fence, true);
- return 0;
-}
-
/**
* amdgpu_vm_clear_bo - initially clear the page dir/table
*
struct amdgpu_bo *bo)
{
struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
- struct amdgpu_cs_parser *sched_job = NULL;
+ struct fence *fence = NULL;
struct amdgpu_ib *ib;
unsigned entries;
uint64_t addr;
amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0);
amdgpu_vm_pad_ib(adev, ib);
WARN_ON(ib->length_dw > 64);
-
+ r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+ &amdgpu_vm_free_job,
+ AMDGPU_FENCE_OWNER_VM,
+ &fence);
+ if (!r)
+ amdgpu_bo_fence(bo, fence, true);
+ fence_put(fence);
if (amdgpu_enable_scheduler) {
- int r;
- uint64_t v_seq;
- sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
- adev->kernel_ctx, ib, 1);
- if(!sched_job)
- goto error_free;
- sched_job->job_param.vm.bo = bo;
- sched_job->run_job = amdgpu_vm_run_job;
- sched_job->free_job = amdgpu_vm_free_job;
- v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
- sched_job->uf.sequence = v_seq;
- amd_sched_push_job(ring->scheduler,
- &adev->kernel_ctx->rings[ring->idx].c_entity,
- sched_job);
- r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity,
- v_seq,
- true,
- -1);
- if (r)
- DRM_ERROR("emit timeout\n");
-
amdgpu_bo_unreserve(bo);
return 0;
- } else {
- r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
- if (r)
- goto error_free;
- amdgpu_bo_fence(bo, ib->fence, true);
}
-
error_free:
amdgpu_ib_free(adev, ib);
kfree(ib);
uint64_t last_pde = ~0, last_pt = ~0;
unsigned count = 0, pt_idx, ndw;
struct amdgpu_ib *ib;
- struct amdgpu_cs_parser *sched_job = NULL;
+ struct fence *fence = NULL;
int r;
amdgpu_vm_pad_ib(adev, ib);
amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
WARN_ON(ib->length_dw > ndw);
+ r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+ &amdgpu_vm_free_job,
+ AMDGPU_FENCE_OWNER_VM,
+ &fence);
+ if (r)
+ goto error_free;
- if (amdgpu_enable_scheduler) {
- int r;
- uint64_t v_seq;
- sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
- adev->kernel_ctx,
- ib, 1);
- if(!sched_job)
- goto error_free;
- sched_job->job_param.vm.bo = pd;
- sched_job->run_job = amdgpu_vm_run_job;
- sched_job->free_job = amdgpu_vm_free_job;
- v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
- sched_job->uf.sequence = v_seq;
- amd_sched_push_job(ring->scheduler,
- &adev->kernel_ctx->rings[ring->idx].c_entity,
- sched_job);
- r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity,
- v_seq,
- true,
- -1);
- if (r)
- DRM_ERROR("emit timeout\n");
- } else {
- r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
- if (r) {
- amdgpu_ib_free(adev, ib);
- return r;
- }
- amdgpu_bo_fence(pd, ib->fence, true);
- }
+ amdgpu_bo_fence(pd, fence, true);
+ fence_put(vm->page_directory_fence);
+ vm->page_directory_fence = fence_get(fence);
+ fence_put(fence);
}
if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
return 0;
error_free:
- if (sched_job)
- kfree(sched_job);
amdgpu_ib_free(adev, ib);
kfree(ib);
- return -ENOMEM;
+ return r;
}
/**
{
uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
uint64_t last_pte = ~0, last_dst = ~0;
+ void *owner = AMDGPU_FENCE_OWNER_VM;
unsigned count = 0;
uint64_t addr;
+ /* sync to everything on unmapping */
+ if (!(flags & AMDGPU_PTE_VALID))
+ owner = AMDGPU_FENCE_OWNER_UNDEFINED;
+
/* walk over the address space and update the page tables */
for (addr = start; addr < end; ) {
uint64_t pt_idx = addr >> amdgpu_vm_block_size;
uint64_t pte;
int r;
- amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv,
- AMDGPU_FENCE_OWNER_VM);
+ amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner);
r = reservation_object_reserve_shared(pt->tbo.resv);
if (r)
return r;
*/
static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
uint64_t start, uint64_t end,
- struct amdgpu_fence *fence)
+ struct fence *fence)
{
unsigned i;
amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
}
-static int amdgpu_vm_bo_update_mapping_run_job(
- struct amdgpu_cs_parser *sched_job)
-{
- struct amdgpu_fence **fence = sched_job->job_param.vm_mapping.fence;
- amdgpu_vm_fence_pts(sched_job->job_param.vm_mapping.vm,
- sched_job->job_param.vm_mapping.start,
- sched_job->job_param.vm_mapping.last + 1,
- sched_job->ibs[sched_job->num_ibs -1].fence);
- if (fence) {
- amdgpu_fence_unref(fence);
- *fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs -1].fence);
- }
- return 0;
-}
/**
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
*
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
uint64_t addr, uint32_t gtt_flags,
- struct amdgpu_fence **fence)
+ struct fence **fence)
{
struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
unsigned nptes, ncmds, ndw;
uint32_t flags = gtt_flags;
struct amdgpu_ib *ib;
- struct amdgpu_cs_parser *sched_job = NULL;
+ struct fence *f = NULL;
int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
ib->length_dw = 0;
- if (!(flags & AMDGPU_PTE_VALID)) {
- unsigned i;
-
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_fence *f = vm->ids[i].last_id_use;
- r = amdgpu_sync_fence(adev, &ib->sync, &f->base);
- if (r)
- return r;
- }
- }
-
r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start,
mapping->it.last + 1, addr + mapping->offset,
flags, gtt_flags);
amdgpu_vm_pad_ib(adev, ib);
WARN_ON(ib->length_dw > ndw);
+ r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+ &amdgpu_vm_free_job,
+ AMDGPU_FENCE_OWNER_VM,
+ &f);
+ if (r)
+ goto error_free;
- if (amdgpu_enable_scheduler) {
- int r;
- uint64_t v_seq;
- sched_job = amdgpu_cs_parser_create(adev, AMDGPU_FENCE_OWNER_VM,
- adev->kernel_ctx, ib, 1);
- if(!sched_job)
- goto error_free;
- sched_job->job_param.vm_mapping.vm = vm;
- sched_job->job_param.vm_mapping.start = mapping->it.start;
- sched_job->job_param.vm_mapping.last = mapping->it.last;
- sched_job->job_param.vm_mapping.fence = fence;
- sched_job->run_job = amdgpu_vm_bo_update_mapping_run_job;
- sched_job->free_job = amdgpu_vm_free_job;
- v_seq = atomic64_inc_return(&adev->kernel_ctx->rings[ring->idx].c_entity.last_queued_v_seq);
- sched_job->uf.sequence = v_seq;
- amd_sched_push_job(ring->scheduler,
- &adev->kernel_ctx->rings[ring->idx].c_entity,
- sched_job);
- r = amd_sched_wait_emit(&adev->kernel_ctx->rings[ring->idx].c_entity,
- v_seq,
- true,
- -1);
- if (r)
- DRM_ERROR("emit timeout\n");
- } else {
- r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
- if (r) {
- amdgpu_ib_free(adev, ib);
- return r;
- }
-
- amdgpu_vm_fence_pts(vm, mapping->it.start,
- mapping->it.last + 1, ib->fence);
- if (fence) {
- amdgpu_fence_unref(fence);
- *fence = amdgpu_fence_ref(ib->fence);
- }
-
+ amdgpu_vm_fence_pts(vm, mapping->it.start,
+ mapping->it.last + 1, f);
+ if (fence) {
+ fence_put(*fence);
+ *fence = fence_get(f);
+ }
+ fence_put(f);
+ if (!amdgpu_enable_scheduler) {
amdgpu_ib_free(adev, ib);
kfree(ib);
}
return 0;
error_free:
- if (sched_job)
- kfree(sched_job);
amdgpu_ib_free(adev, ib);
kfree(ib);
- return -ENOMEM;
+ return r;
}
/**
addr = 0;
}
- if (addr == bo_va->addr)
- return 0;
-
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
- list_for_each_entry(mapping, &bo_va->mappings, list) {
+ spin_lock(&vm->status_lock);
+ if (!list_empty(&bo_va->vm_status))
+ list_splice_init(&bo_va->valids, &bo_va->invalids);
+ spin_unlock(&vm->status_lock);
+
+ list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr,
flags, &bo_va->last_pt_update);
if (r)
return r;
}
- bo_va->addr = addr;
spin_lock(&vm->status_lock);
+ list_splice_init(&bo_va->invalids, &bo_va->valids);
list_del_init(&bo_va->vm_status);
+ if (!mem)
+ list_add(&bo_va->vm_status, &vm->cleared);
spin_unlock(&vm->status_lock);
return 0;
spin_unlock(&vm->status_lock);
if (bo_va)
- r = amdgpu_sync_fence(adev, sync, &bo_va->last_pt_update->base);
+ r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
return r;
}
}
bo_va->vm = vm;
bo_va->bo = bo;
- bo_va->addr = 0;
bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->bo_list);
- INIT_LIST_HEAD(&bo_va->mappings);
+ INIT_LIST_HEAD(&bo_va->valids);
+ INIT_LIST_HEAD(&bo_va->invalids);
INIT_LIST_HEAD(&bo_va->vm_status);
mutex_lock(&vm->mutex);
mapping->offset = offset;
mapping->flags = flags;
- list_add(&mapping->list, &bo_va->mappings);
+ list_add(&mapping->list, &bo_va->invalids);
interval_tree_insert(&mapping->it, &vm->va);
trace_amdgpu_vm_bo_map(bo_va, mapping);
- bo_va->addr = 0;
-
/* Make sure the page tables are allocated */
saddr >>= amdgpu_vm_block_size;
eaddr >>= amdgpu_vm_block_size;
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
AMDGPU_GPU_PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &pt);
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+ NULL, &pt);
if (r)
goto error_free;
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_vm *vm = bo_va->vm;
+ bool valid = true;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- list_for_each_entry(mapping, &bo_va->mappings, list) {
+ list_for_each_entry(mapping, &bo_va->valids, list) {
if (mapping->it.start == saddr)
break;
}
- if (&mapping->list == &bo_va->mappings) {
- amdgpu_bo_unreserve(bo_va->bo);
- return -ENOENT;
+ if (&mapping->list == &bo_va->valids) {
+ valid = false;
+
+ list_for_each_entry(mapping, &bo_va->invalids, list) {
+ if (mapping->it.start == saddr)
+ break;
+ }
+
+ if (&mapping->list == &bo_va->invalids) {
+ amdgpu_bo_unreserve(bo_va->bo);
+ return -ENOENT;
+ }
}
mutex_lock(&vm->mutex);
interval_tree_remove(&mapping->it, &vm->va);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
- if (bo_va->addr) {
- /* clear the old address */
+ if (valid)
list_add(&mapping->list, &vm->freed);
- } else {
+ else
kfree(mapping);
- }
mutex_unlock(&vm->mutex);
amdgpu_bo_unreserve(bo_va->bo);
list_del(&bo_va->vm_status);
spin_unlock(&vm->status_lock);
- list_for_each_entry_safe(mapping, next, &bo_va->mappings, list) {
+ list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
interval_tree_remove(&mapping->it, &vm->va);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
- if (bo_va->addr)
- list_add(&mapping->list, &vm->freed);
- else
- kfree(mapping);
+ list_add(&mapping->list, &vm->freed);
+ }
+ list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
+ list_del(&mapping->list);
+ interval_tree_remove(&mapping->it, &vm->va);
+ kfree(mapping);
}
- amdgpu_fence_unref(&bo_va->last_pt_update);
+
+ fence_put(bo_va->last_pt_update);
kfree(bo_va);
mutex_unlock(&vm->mutex);
struct amdgpu_bo_va *bo_va;
list_for_each_entry(bo_va, &bo->va, bo_list) {
- if (bo_va->addr) {
- spin_lock(&bo_va->vm->status_lock);
- list_del(&bo_va->vm_status);
+ spin_lock(&bo_va->vm->status_lock);
+ if (list_empty(&bo_va->vm_status))
list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
- spin_unlock(&bo_va->vm->status_lock);
- }
+ spin_unlock(&bo_va->vm->status_lock);
}
}
vm->va = RB_ROOT;
spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->invalidated);
+ INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
pd_size = amdgpu_vm_directory_size(adev);
return -ENOMEM;
}
+ vm->page_directory_fence = NULL;
+
r = amdgpu_bo_create(adev, pd_size, align, true,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
NULL, &vm->page_directory);
if (r)
return r;
kfree(vm->page_tables);
amdgpu_bo_unref(&vm->page_directory);
+ fence_put(vm->page_directory_fence);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- amdgpu_fence_unref(&vm->ids[i].flushed_updates);
+ fence_put(vm->ids[i].flushed_updates);
amdgpu_fence_unref(&vm->ids[i].last_id_use);
}