#include <drm/drm_cache.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
+#include "amdgpu_amdkfd.h"
+
+static bool amdgpu_need_backup(struct amdgpu_device *adev)
+{
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
+ if (amdgpu_gpu_recovery == 0 ||
+ (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))
+ return false;
+
+ return true;
+}
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
+ if (bo->kfd_bo)
+ amdgpu_amdkfd_unreserve_system_memory_limit(bo);
+
amdgpu_bo_kunmap(bo);
+ if (bo->gem_base.import_attach)
+ drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
drm_gem_object_release(&bo->gem_base);
amdgpu_bo_unref(&bo->parent);
if (!list_empty(&bo->shadow_list)) {
u32 c = 0;
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
- unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
+ unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
places[c].fpfn = 0;
places[c].lpfn = 0;
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0;
if (flags & AMDGPU_GEM_CREATE_SHADOW)
- places[c].lpfn = adev->mc.gart_size >> PAGE_SHIFT;
+ places[c].lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
else
places[c].lpfn = 0;
places[c].flags = TTM_PL_FLAG_TT;
* @size: size for the new BO
* @align: alignment for the new BO
* @domain: where to place it
- * @bo_ptr: resulting BO
+ * @bo_ptr: used to initialize BOs in structures
* @gpu_addr: GPU addr of the pinned BO
* @cpu_addr: optional CPU address mapping
*
* Allocates and pins a BO for kernel internal use, and returns it still
* reserved.
*
+ * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
+ *
* Returns 0 on success, negative error code otherwise.
*/
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
int r;
if (!*bo_ptr) {
- r = amdgpu_bo_create(adev, size, align, true, domain,
+ r = amdgpu_bo_create(adev, size, align, domain,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, 0, bo_ptr);
+ ttm_bo_type_kernel, NULL, bo_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
r);
* @size: size for the new BO
* @align: alignment for the new BO
* @domain: where to place it
- * @bo_ptr: resulting BO
+ * @bo_ptr: used to initialize BOs in structures
* @gpu_addr: GPU addr of the pinned BO
* @cpu_addr: optional CPU address mapping
*
* Allocates and pins a BO for kernel internal use.
*
+ * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
+ *
* Returns 0 on success, negative error code otherwise.
*/
int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
*cpu_addr = NULL;
}
-static int amdgpu_bo_do_create(struct amdgpu_device *adev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain, u64 flags,
- struct sg_table *sg,
+/* Validate bo size is bit bigger then the request domain */
+static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
+ unsigned long size, u32 domain)
+{
+ struct ttm_mem_type_manager *man = NULL;
+
+ /*
+ * If GTT is part of requested domains the check must succeed to
+ * allow fall back to GTT
+ */
+ if (domain & AMDGPU_GEM_DOMAIN_GTT) {
+ man = &adev->mman.bdev.man[TTM_PL_TT];
+
+ if (size < (man->size << PAGE_SHIFT))
+ return true;
+ else
+ goto fail;
+ }
+
+ if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
+ man = &adev->mman.bdev.man[TTM_PL_VRAM];
+
+ if (size < (man->size << PAGE_SHIFT))
+ return true;
+ else
+ goto fail;
+ }
+
+
+ /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
+ return true;
+
+fail:
+ DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
+ man->size << PAGE_SHIFT);
+ return false;
+}
+
+static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
+ int byte_align, u32 domain,
+ u64 flags, enum ttm_bo_type type,
struct reservation_object *resv,
- uint64_t init_value,
struct amdgpu_bo **bo_ptr)
{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = (type != ttm_bo_type_kernel),
+ .no_wait_gpu = false,
+ .resv = resv,
+ .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
+ };
struct amdgpu_bo *bo;
- enum ttm_bo_type type;
unsigned long page_align;
- u64 initial_bytes_moved, bytes_moved;
size_t acc_size;
+ u32 domains;
int r;
page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
size = ALIGN(size, PAGE_SIZE);
- if (kernel) {
- type = ttm_bo_type_kernel;
- } else if (sg) {
- type = ttm_bo_type_sg;
- } else {
- type = ttm_bo_type_device;
- }
+ if (!amdgpu_bo_validate_size(adev, size, domain))
+ return -ENOMEM;
+
*bo_ptr = NULL;
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
- r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
- if (unlikely(r)) {
- kfree(bo);
- return r;
- }
+ drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
INIT_LIST_HEAD(&bo->shadow_list);
INIT_LIST_HEAD(&bo->va);
bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GWS |
AMDGPU_GEM_DOMAIN_OA);
bo->allowed_domains = bo->preferred_domains;
- if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
+ if (type != ttm_bo_type_kernel &&
+ bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
bo->flags = flags;
#endif
bo->tbo.bdev = &adev->mman.bdev;
- amdgpu_ttm_placement_from_domain(bo, domain);
-
- initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
- /* Kernel allocation are uninterruptible */
+ domains = bo->preferred_domains;
+retry:
+ amdgpu_ttm_placement_from_domain(bo, domains);
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, page_align, !kernel, NULL,
- acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
- if (unlikely(r != 0))
+ &bo->placement, page_align, &ctx, acc_size,
+ NULL, resv, &amdgpu_ttm_bo_destroy);
+
+ if (unlikely(r && r != -ERESTARTSYS)) {
+ if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+ bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ goto retry;
+ } else if (domains != bo->preferred_domains) {
+ domains = bo->allowed_domains;
+ goto retry;
+ }
+ }
+ if (unlikely(r))
return r;
- bytes_moved = atomic64_read(&adev->num_bytes_moved) -
- initial_bytes_moved;
- if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+ if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
- bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
- amdgpu_cs_report_moved_bytes(adev, bytes_moved, bytes_moved);
+ bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
+ amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
+ ctx.bytes_moved);
else
- amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
+ amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
- if (kernel)
+ if (type == ttm_bo_type_kernel)
bo->tbo.priority = 1;
if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
struct dma_fence *fence;
- r = amdgpu_fill_buffer(bo, init_value, bo->tbo.resv, &fence);
+ r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
if (unlikely(r))
goto fail_unreserve;
if (bo->shadow)
return 0;
- r = amdgpu_bo_do_create(adev, size, byte_align, true,
- AMDGPU_GEM_DOMAIN_GTT,
+ r = amdgpu_bo_do_create(adev, size, byte_align, AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_SHADOW,
- NULL, bo->tbo.resv, 0,
- &bo->shadow);
+ ttm_bo_type_kernel,
+ bo->tbo.resv, &bo->shadow);
if (!r) {
bo->shadow->parent = amdgpu_bo_ref(bo);
mutex_lock(&adev->shadow_list_lock);
return r;
}
-/* init_value will only take effect when flags contains
- * AMDGPU_GEM_CREATE_VRAM_CLEARED.
- */
-int amdgpu_bo_create(struct amdgpu_device *adev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain, u64 flags,
- struct sg_table *sg,
+int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
+ int byte_align, u32 domain,
+ u64 flags, enum ttm_bo_type type,
struct reservation_object *resv,
- uint64_t init_value,
struct amdgpu_bo **bo_ptr)
{
uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
int r;
- r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain,
- parent_flags, sg, resv, init_value, bo_ptr);
+ r = amdgpu_bo_do_create(adev, size, byte_align, domain,
+ parent_flags, type, resv, bo_ptr);
if (r)
return r;
int amdgpu_bo_validate(struct amdgpu_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
uint32_t domain;
int r;
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
domain = bo->allowed_domains;
goto retry;
u64 *gpu_addr)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_operation_ctx ctx = { false, false };
int r, i;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
if (bo->pin_count) {
uint32_t mem_type = bo->tbo.mem.mem_type;
- if (domain != amdgpu_mem_type_to_domain(mem_type))
+ if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
return -EINVAL;
bo->pin_count++;
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
}
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r)) {
dev_err(adev->dev, "%p pin failed\n", bo);
goto error;
}
+ r = amdgpu_ttm_alloc_gart(&bo->tbo);
+ if (unlikely(r)) {
+ dev_err(adev->dev, "%p bind failed\n", bo);
+ goto error;
+ }
+
bo->pin_count = 1;
- if (gpu_addr != NULL) {
- r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
- if (unlikely(r)) {
- dev_err(adev->dev, "%p bind failed\n", bo);
- goto error;
- }
+ if (gpu_addr != NULL)
*gpu_addr = amdgpu_bo_gpu_offset(bo);
- }
+
+ domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
adev->vram_pin_size += amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
int amdgpu_bo_unpin(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_operation_ctx ctx = { false, false };
int r, i;
if (!bo->pin_count) {
bo->placements[i].lpfn = 0;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
}
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r)) {
dev_err(adev->dev, "%p validate failed for unpin\n", bo);
goto error;
"GDDR4",
"GDDR5",
"HBM",
- "DDR3"
+ "DDR3",
+ "DDR4",
};
int amdgpu_bo_init(struct amdgpu_device *adev)
{
/* reserve PAT memory space to WC for VRAM */
- arch_io_reserve_memtype_wc(adev->mc.aper_base,
- adev->mc.aper_size);
+ arch_io_reserve_memtype_wc(adev->gmc.aper_base,
+ adev->gmc.aper_size);
/* Add an MTRR for the VRAM */
- adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
- adev->mc.aper_size);
+ adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
+ adev->gmc.aper_size);
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
- adev->mc.mc_vram_size >> 20,
- (unsigned long long)adev->mc.aper_size >> 20);
+ adev->gmc.mc_vram_size >> 20,
+ (unsigned long long)adev->gmc.aper_size >> 20);
DRM_INFO("RAM width %dbits %s\n",
- adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
+ adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
return amdgpu_ttm_init(adev);
}
void amdgpu_bo_fini(struct amdgpu_device *adev)
{
amdgpu_ttm_fini(adev);
- arch_phys_wc_del(adev->mc.vram_mtrr);
- arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
+ arch_phys_wc_del(adev->gmc.vram_mtrr);
+ arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
}
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo *abo;
unsigned long offset, size;
int r;
size = bo->mem.num_pages << PAGE_SHIFT;
offset = bo->mem.start << PAGE_SHIFT;
- if ((offset + size) <= adev->mc.visible_vram_size)
+ if ((offset + size) <= adev->gmc.visible_vram_size)
return 0;
/* Can't move a pinned BO to visible VRAM */
abo->placement.num_busy_placement = 1;
abo->placement.busy_placement = &abo->placements[1];
- r = ttm_bo_validate(bo, &abo->placement, false, false);
+ r = ttm_bo_validate(bo, &abo->placement, &ctx);
if (unlikely(r != 0))
return r;
offset = bo->mem.start << PAGE_SHIFT;
/* this should never happen */
if (bo->mem.mem_type == TTM_PL_VRAM &&
- (offset + size) > adev->mc.visible_vram_size)
+ (offset + size) > adev->gmc.visible_vram_size)
return -EINVAL;
return 0;
{
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
- !amdgpu_ttm_is_bound(bo->tbo.ttm));
+ !amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem));
WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
!bo->pin_count);
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);