placement->num_placement = c;
placement->placement = places;
-
- placement->num_busy_placement = c;
- placement->busy_placement = places;
}
/**
struct amdgpu_mem_stats *stats)
{
uint64_t size = amdgpu_bo_size(bo);
+ struct drm_gem_object *obj;
unsigned int domain;
+ bool shared;
/* Abort if the BO doesn't currently have a backing store */
if (!bo->tbo.resource)
return;
+ obj = &bo->tbo.base;
+ shared = drm_gem_object_is_shared_for_memory_stats(obj);
+
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
stats->vram += size;
if (amdgpu_bo_in_cpu_visible_vram(bo))
stats->visible_vram += size;
+ if (shared)
+ stats->vram_shared += size;
break;
case AMDGPU_GEM_DOMAIN_GTT:
stats->gtt += size;
+ if (shared)
+ stats->gtt_shared += size;
break;
case AMDGPU_GEM_DOMAIN_CPU:
default:
stats->cpu += size;
+ if (shared)
+ stats->cpu_shared += size;
break;
}
AMDGPU_GEM_DOMAIN_GTT);
/* Avoid costly evictions; only set GTT as a busy placement */
- abo->placement.num_busy_placement = 1;
- abo->placement.busy_placement = &abo->placements[1];
+ abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
r = ttm_bo_validate(bo, &abo->placement, &ctx);
if (unlikely(r == -EBUSY || r == -ERESTARTSYS))