struct amdgpu_bo_list_entry *list;
unsigned i, idx;
+ mutex_lock(&vm->mutex);
list = drm_malloc_ab(vm->max_pde_used + 2,
sizeof(struct amdgpu_bo_list_entry));
- if (!list)
+ if (!list) {
+ mutex_unlock(&vm->mutex);
return NULL;
+ }
/* add the vm page table to the list */
list[0].robj = vm->page_directory;
list[idx].tv.shared = true;
list_add(&list[idx++].tv.head, head);
}
+ mutex_unlock(&vm->mutex);
return list;
}
if (r)
return r;
+ r = reservation_object_reserve_shared(bo->tbo.resv);
+ if (r)
+ return r;
+
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
if (r)
goto error_unreserve;
if (r)
goto error_free;
- amdgpu_bo_fence(bo, ib.fence, false);
+ amdgpu_bo_fence(bo, ib.fence, true);
error_free:
amdgpu_ib_free(adev, &ib);
amdgpu_ib_free(adev, &ib);
return r;
}
- amdgpu_bo_fence(pd, ib.fence, false);
+ amdgpu_bo_fence(pd, ib.fence, true);
}
amdgpu_ib_free(adev, &ib);
* PTs have to be reserved and mutex must be locked!
*/
int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
+ struct amdgpu_vm *vm, struct amdgpu_sync *sync)
{
- struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo_va *bo_va = NULL;
int r;
spin_lock(&vm->status_lock);
}
spin_unlock(&vm->status_lock);
+ if (bo_va)
+ amdgpu_sync_fence(sync, bo_va->last_pt_update);
return 0;
}
uint64_t eaddr;
int r;
+ /* validate the parameters */
+ if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
+ size == 0 || size & AMDGPU_GPU_PAGE_MASK) {
+ amdgpu_bo_unreserve(bo_va->bo);
+ return -EINVAL;
+ }
+
/* make sure object fit at this offset */
eaddr = saddr + size;
if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) {