}
}
-int amdgpu_vm_free_job(struct amdgpu_job *sched_job)
+int amdgpu_vm_free_job(struct amdgpu_job *job)
{
int i;
- for (i = 0; i < sched_job->num_ibs; i++)
- amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
- kfree(sched_job->ibs);
+ for (i = 0; i < job->num_ibs; i++)
+ amdgpu_ib_free(job->adev, &job->ibs[i]);
+ kfree(job->ibs);
return 0;
}
{
uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
uint64_t last_pte = ~0, last_dst = ~0;
+ void *owner = AMDGPU_FENCE_OWNER_VM;
unsigned count = 0;
uint64_t addr;
+ /* sync to everything on unmapping */
+ if (!(flags & AMDGPU_PTE_VALID))
+ owner = AMDGPU_FENCE_OWNER_UNDEFINED;
+
/* walk over the address space and update the page tables */
for (addr = start; addr < end; ) {
uint64_t pt_idx = addr >> amdgpu_vm_block_size;
uint64_t pte;
int r;
- amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv,
- AMDGPU_FENCE_OWNER_VM);
+ amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner);
r = reservation_object_reserve_shared(pt->tbo.resv);
if (r)
return r;
return 0;
}
-/**
- * amdgpu_vm_fence_pts - fence page tables after an update
- *
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- * @fence: fence to use
- *
- * Fence the page tables in the range @start - @end (cayman+).
- *
- * Global and local mutex must be locked!
- */
-static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
- uint64_t start, uint64_t end,
- struct fence *fence)
-{
- unsigned i;
-
- start >>= amdgpu_vm_block_size;
- end >>= amdgpu_vm_block_size;
-
- for (i = start; i <= end; ++i)
- amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
-}
-
/**
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
*
ib->length_dw = 0;
- if (!(flags & AMDGPU_PTE_VALID)) {
- unsigned i;
-
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_fence *f = vm->ids[i].last_id_use;
- r = amdgpu_sync_fence(adev, &ib->sync, &f->base);
- if (r)
- return r;
- }
- }
-
r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start,
mapping->it.last + 1, addr + mapping->offset,
flags, gtt_flags);
if (r)
goto error_free;
- amdgpu_vm_fence_pts(vm, mapping->it.start,
- mapping->it.last + 1, f);
+ amdgpu_bo_fence(vm->page_directory, f, true);
if (fence) {
fence_put(*fence);
*fence = fence_get(f);
int r;
if (mem) {
- addr = mem->start << PAGE_SHIFT;
+ addr = (u64)mem->start << PAGE_SHIFT;
if (mem->mem_type != TTM_PL_TT)
addr += adev->vm_manager.vram_base_offset;
} else {
/* walk over the address space and allocate the page tables */
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
+ struct reservation_object *resv = vm->page_directory->tbo.resv;
struct amdgpu_bo *pt;
if (vm->page_tables[pt_idx].bo)
/* drop mutex to allocate and clear page table */
mutex_unlock(&vm->mutex);
+ ww_mutex_lock(&resv->lock, NULL);
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
AMDGPU_GPU_PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &pt);
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+ NULL, resv, &pt);
+ ww_mutex_unlock(&resv->lock);
if (r)
goto error_free;
vm->page_directory_fence = NULL;
r = amdgpu_bo_create(adev, pd_size, align, true,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- NULL, &vm->page_directory);
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+ NULL, NULL, &vm->page_directory);
if (r)
return r;