places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
+ places[c].mem_type = TTM_PL_VRAM;
+ places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
places[c].lpfn = visible_pfn;
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_TT;
+ places[c].mem_type = TTM_PL_TT;
+ places[c].flags = 0;
if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
places[c].flags |= TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED;
if (domain & AMDGPU_GEM_DOMAIN_CPU) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_SYSTEM;
+ places[c].mem_type = TTM_PL_SYSTEM;
+ places[c].flags = 0;
if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
places[c].flags |= TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED;
if (domain & AMDGPU_GEM_DOMAIN_GDS) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
+ places[c].mem_type = AMDGPU_PL_GDS;
+ places[c].flags = TTM_PL_FLAG_UNCACHED;
c++;
}
if (domain & AMDGPU_GEM_DOMAIN_GWS) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
+ places[c].mem_type = AMDGPU_PL_GWS;
+ places[c].flags = TTM_PL_FLAG_UNCACHED;
c++;
}
if (domain & AMDGPU_GEM_DOMAIN_OA) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
+ places[c].mem_type = AMDGPU_PL_OA;
+ places[c].flags = TTM_PL_FLAG_UNCACHED;
c++;
}
if (!c) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ places[c].mem_type = TTM_PL_SYSTEM;
+ places[c].flags = TTM_PL_MASK_CACHING;
c++;
}
if (r)
return r;
+ if ((*bo_ptr) == NULL)
+ return 0;
+
/*
* Remove the original mem node and create a new one at the request
* position.
bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
- drm_gem_private_object_init(adev->ddev, &bo->tbo.base, size);
+ drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
INIT_LIST_HEAD(&bo->shadow_list);
bo->vm_bo = NULL;
bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
- bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
+ bo->tbo.mem.mem_type == TTM_PL_VRAM) {
struct dma_fence *fence;
r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
}
/**
- * amdgpu_bo_move_notify - notification about a BO being released
+ * amdgpu_bo_release_notify - notification about a BO being released
* @bo: pointer to a buffer object
*
* Wipes VRAM buffers whose contents should not be leaked before the