if (cpu_addr)
amdgpu_bo_kunmap(*bo_ptr);
- ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
+ ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
unsigned long size, u32 domain)
{
- struct ttm_mem_type_manager *man = NULL;
+ struct ttm_resource_manager *man = NULL;
/*
* If GTT is part of requested domains the check must succeed to
* allow fall back to GTT
*/
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
- man = &adev->mman.bdev.man[TTM_PL_TT];
+ man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
if (size < (man->size << PAGE_SHIFT))
return true;
}
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
- man = &adev->mman.bdev.man[TTM_PL_VRAM];
+ man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
if (size < (man->size << PAGE_SHIFT))
return true;
return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
amdgpu_bo_size(shadow), NULL, fence,
- true, false);
+ true, false, false);
}
/**
bo->pin_count++;
if (max_offset != 0) {
- u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
+ u64 domain_start = amdgpu_ttm_domain_start(adev,
+ mem_type);
WARN_ON_ONCE(max_offset <
(amdgpu_bo_gpu_offset(bo) - domain_start));
}
*/
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
- struct ttm_mem_reg *new_mem)
+ struct ttm_resource *new_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
- struct ttm_mem_reg *old_mem = &bo->mem;
+ struct ttm_resource *old_mem = &bo->mem;
if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
!(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
- return amdgpu_gmc_sign_extend(bo->tbo.offset);
+ return amdgpu_bo_gpu_offset_no_check(bo);
+}
+
+/**
+ * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
+ * @bo: amdgpu object for which we query the offset
+ *
+ * Returns:
+ * current GPU offset of the object without raising warnings.
+ */
+u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ uint64_t offset;
+
+ offset = (bo->tbo.mem.start << PAGE_SHIFT) +
+ amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
+
+ return amdgpu_gmc_sign_extend(offset);
}
/**