]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
Merge tag 'drm-for-v4.17' of git://people.freedesktop.org/~airlied/linux
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_object.c
index ea25164e7f4b21ea4d0f9875ef32bbfb09edfa9c..fac4b6067efd6377f3a2681b9fd050878db21c13 100644 (file)
 #include <drm/drm_cache.h>
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
+#include "amdgpu_amdkfd.h"
+
+static bool amdgpu_need_backup(struct amdgpu_device *adev)
+{
+       if (adev->flags & AMD_IS_APU)
+               return false;
+
+       if (amdgpu_gpu_recovery == 0 ||
+           (amdgpu_gpu_recovery == -1  && !amdgpu_sriov_vf(adev)))
+               return false;
+
+       return true;
+}
 
 static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
        struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
 
+       if (bo->kfd_bo)
+               amdgpu_amdkfd_unreserve_system_memory_limit(bo);
+
        amdgpu_bo_kunmap(bo);
 
+       if (bo->gem_base.import_attach)
+               drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
        drm_gem_object_release(&bo->gem_base);
        amdgpu_bo_unref(&bo->parent);
        if (!list_empty(&bo->shadow_list)) {
@@ -71,7 +89,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
        u32 c = 0;
 
        if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
-               unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
+               unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
 
                places[c].fpfn = 0;
                places[c].lpfn = 0;
@@ -91,7 +109,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
        if (domain & AMDGPU_GEM_DOMAIN_GTT) {
                places[c].fpfn = 0;
                if (flags & AMDGPU_GEM_CREATE_SHADOW)
-                       places[c].lpfn = adev->mc.gart_size >> PAGE_SHIFT;
+                       places[c].lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
                else
                        places[c].lpfn = 0;
                places[c].flags = TTM_PL_FLAG_TT;
@@ -157,13 +175,15 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
  * @size: size for the new BO
  * @align: alignment for the new BO
  * @domain: where to place it
- * @bo_ptr: resulting BO
+ * @bo_ptr: used to initialize BOs in structures
  * @gpu_addr: GPU addr of the pinned BO
  * @cpu_addr: optional CPU address mapping
  *
  * Allocates and pins a BO for kernel internal use, and returns it still
  * reserved.
  *
+ * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
+ *
  * Returns 0 on success, negative error code otherwise.
  */
 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
@@ -175,10 +195,10 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
        int r;
 
        if (!*bo_ptr) {
-               r = amdgpu_bo_create(adev, size, align, true, domain,
+               r = amdgpu_bo_create(adev, size, align, domain,
                                     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
                                     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
-                                    NULL, NULL, 0, bo_ptr);
+                                    ttm_bo_type_kernel, NULL, bo_ptr);
                if (r) {
                        dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
                                r);
@@ -226,12 +246,14 @@ error_free:
  * @size: size for the new BO
  * @align: alignment for the new BO
  * @domain: where to place it
- * @bo_ptr: resulting BO
+ * @bo_ptr:  used to initialize BOs in structures
  * @gpu_addr: GPU addr of the pinned BO
  * @cpu_addr: optional CPU address mapping
  *
  * Allocates and pins a BO for kernel internal use.
  *
+ * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
+ *
  * Returns 0 on success, negative error code otherwise.
  */
 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
@@ -281,31 +303,68 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
                *cpu_addr = NULL;
 }
 
-static int amdgpu_bo_do_create(struct amdgpu_device *adev,
-                              unsigned long size, int byte_align,
-                              bool kernel, u32 domain, u64 flags,
-                              struct sg_table *sg,
+/* Validate bo size is bit bigger then the request domain */
+static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
+                                         unsigned long size, u32 domain)
+{
+       struct ttm_mem_type_manager *man = NULL;
+
+       /*
+        * If GTT is part of requested domains the check must succeed to
+        * allow fall back to GTT
+        */
+       if (domain & AMDGPU_GEM_DOMAIN_GTT) {
+               man = &adev->mman.bdev.man[TTM_PL_TT];
+
+               if (size < (man->size << PAGE_SHIFT))
+                       return true;
+               else
+                       goto fail;
+       }
+
+       if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
+               man = &adev->mman.bdev.man[TTM_PL_VRAM];
+
+               if (size < (man->size << PAGE_SHIFT))
+                       return true;
+               else
+                       goto fail;
+       }
+
+
+       /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
+       return true;
+
+fail:
+       DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
+                 man->size << PAGE_SHIFT);
+       return false;
+}
+
+static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
+                              int byte_align, u32 domain,
+                              u64 flags, enum ttm_bo_type type,
                               struct reservation_object *resv,
-                              uint64_t init_value,
                               struct amdgpu_bo **bo_ptr)
 {
+       struct ttm_operation_ctx ctx = {
+               .interruptible = (type != ttm_bo_type_kernel),
+               .no_wait_gpu = false,
+               .resv = resv,
+               .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
+       };
        struct amdgpu_bo *bo;
-       enum ttm_bo_type type;
        unsigned long page_align;
-       u64 initial_bytes_moved, bytes_moved;
        size_t acc_size;
+       u32 domains;
        int r;
 
        page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
        size = ALIGN(size, PAGE_SIZE);
 
-       if (kernel) {
-               type = ttm_bo_type_kernel;
-       } else if (sg) {
-               type = ttm_bo_type_sg;
-       } else {
-               type = ttm_bo_type_device;
-       }
+       if (!amdgpu_bo_validate_size(adev, size, domain))
+               return -ENOMEM;
+
        *bo_ptr = NULL;
 
        acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
@@ -314,11 +373,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
        bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
        if (bo == NULL)
                return -ENOMEM;
-       r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
-       if (unlikely(r)) {
-               kfree(bo);
-               return r;
-       }
+       drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
        INIT_LIST_HEAD(&bo->shadow_list);
        INIT_LIST_HEAD(&bo->va);
        bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
@@ -328,7 +383,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
                                         AMDGPU_GEM_DOMAIN_GWS |
                                         AMDGPU_GEM_DOMAIN_OA);
        bo->allowed_domains = bo->preferred_domains;
-       if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
+       if (type != ttm_bo_type_kernel &&
+           bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
                bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
 
        bo->flags = flags;
@@ -362,33 +418,41 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
 #endif
 
        bo->tbo.bdev = &adev->mman.bdev;
-       amdgpu_ttm_placement_from_domain(bo, domain);
-
-       initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
-       /* Kernel allocation are uninterruptible */
+       domains = bo->preferred_domains;
+retry:
+       amdgpu_ttm_placement_from_domain(bo, domains);
        r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
-                                &bo->placement, page_align, !kernel, NULL,
-                                acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
-       if (unlikely(r != 0))
+                                &bo->placement, page_align, &ctx, acc_size,
+                                NULL, resv, &amdgpu_ttm_bo_destroy);
+
+       if (unlikely(r && r != -ERESTARTSYS)) {
+               if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+                       bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+                       goto retry;
+               } else if (domains != bo->preferred_domains) {
+                       domains = bo->allowed_domains;
+                       goto retry;
+               }
+       }
+       if (unlikely(r))
                return r;
 
-       bytes_moved = atomic64_read(&adev->num_bytes_moved) -
-                     initial_bytes_moved;
-       if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+       if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
            bo->tbo.mem.mem_type == TTM_PL_VRAM &&
-           bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
-               amdgpu_cs_report_moved_bytes(adev, bytes_moved, bytes_moved);
+           bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
+               amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
+                                            ctx.bytes_moved);
        else
-               amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
+               amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
 
-       if (kernel)
+       if (type == ttm_bo_type_kernel)
                bo->tbo.priority = 1;
 
        if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
            bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
                struct dma_fence *fence;
 
-               r = amdgpu_fill_buffer(bo, init_value, bo->tbo.resv, &fence);
+               r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
                if (unlikely(r))
                        goto fail_unreserve;
 
@@ -425,12 +489,11 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
        if (bo->shadow)
                return 0;
 
-       r = amdgpu_bo_do_create(adev, size, byte_align, true,
-                               AMDGPU_GEM_DOMAIN_GTT,
+       r = amdgpu_bo_do_create(adev, size, byte_align, AMDGPU_GEM_DOMAIN_GTT,
                                AMDGPU_GEM_CREATE_CPU_GTT_USWC |
                                AMDGPU_GEM_CREATE_SHADOW,
-                               NULL, bo->tbo.resv, 0,
-                               &bo->shadow);
+                               ttm_bo_type_kernel,
+                               bo->tbo.resv, &bo->shadow);
        if (!r) {
                bo->shadow->parent = amdgpu_bo_ref(bo);
                mutex_lock(&adev->shadow_list_lock);
@@ -441,22 +504,17 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
        return r;
 }
 
-/* init_value will only take effect when flags contains
- * AMDGPU_GEM_CREATE_VRAM_CLEARED.
- */
-int amdgpu_bo_create(struct amdgpu_device *adev,
-                    unsigned long size, int byte_align,
-                    bool kernel, u32 domain, u64 flags,
-                    struct sg_table *sg,
+int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
+                    int byte_align, u32 domain,
+                    u64 flags, enum ttm_bo_type type,
                     struct reservation_object *resv,
-                    uint64_t init_value,
                     struct amdgpu_bo **bo_ptr)
 {
        uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
        int r;
 
-       r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain,
-                               parent_flags, sg, resv, init_value, bo_ptr);
+       r = amdgpu_bo_do_create(adev, size, byte_align, domain,
+                               parent_flags, type, resv, bo_ptr);
        if (r)
                return r;
 
@@ -511,6 +569,7 @@ err:
 
 int amdgpu_bo_validate(struct amdgpu_bo *bo)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        uint32_t domain;
        int r;
 
@@ -521,7 +580,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
 
 retry:
        amdgpu_ttm_placement_from_domain(bo, domain);
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
                domain = bo->allowed_domains;
                goto retry;
@@ -632,6 +691,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                             u64 *gpu_addr)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct ttm_operation_ctx ctx = { false, false };
        int r, i;
 
        if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
@@ -647,7 +707,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
        if (bo->pin_count) {
                uint32_t mem_type = bo->tbo.mem.mem_type;
 
-               if (domain != amdgpu_mem_type_to_domain(mem_type))
+               if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
                        return -EINVAL;
 
                bo->pin_count++;
@@ -682,21 +742,23 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
        }
 
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (unlikely(r)) {
                dev_err(adev->dev, "%p pin failed\n", bo);
                goto error;
        }
 
+       r = amdgpu_ttm_alloc_gart(&bo->tbo);
+       if (unlikely(r)) {
+               dev_err(adev->dev, "%p bind failed\n", bo);
+               goto error;
+       }
+
        bo->pin_count = 1;
-       if (gpu_addr != NULL) {
-               r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
-               if (unlikely(r)) {
-                       dev_err(adev->dev, "%p bind failed\n", bo);
-                       goto error;
-               }
+       if (gpu_addr != NULL)
                *gpu_addr = amdgpu_bo_gpu_offset(bo);
-       }
+
+       domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
        if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
                adev->vram_pin_size += amdgpu_bo_size(bo);
                if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
@@ -717,6 +779,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct ttm_operation_ctx ctx = { false, false };
        int r, i;
 
        if (!bo->pin_count) {
@@ -730,7 +793,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
                bo->placements[i].lpfn = 0;
                bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
        }
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (unlikely(r)) {
                dev_err(adev->dev, "%p validate failed for unpin\n", bo);
                goto error;
@@ -766,31 +829,32 @@ static const char *amdgpu_vram_names[] = {
        "GDDR4",
        "GDDR5",
        "HBM",
-       "DDR3"
+       "DDR3",
+       "DDR4",
 };
 
 int amdgpu_bo_init(struct amdgpu_device *adev)
 {
        /* reserve PAT memory space to WC for VRAM */
-       arch_io_reserve_memtype_wc(adev->mc.aper_base,
-                                  adev->mc.aper_size);
+       arch_io_reserve_memtype_wc(adev->gmc.aper_base,
+                                  adev->gmc.aper_size);
 
        /* Add an MTRR for the VRAM */
-       adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
-                                             adev->mc.aper_size);
+       adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
+                                             adev->gmc.aper_size);
        DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
-               adev->mc.mc_vram_size >> 20,
-               (unsigned long long)adev->mc.aper_size >> 20);
+                adev->gmc.mc_vram_size >> 20,
+                (unsigned long long)adev->gmc.aper_size >> 20);
        DRM_INFO("RAM width %dbits %s\n",
-                adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
+                adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
        return amdgpu_ttm_init(adev);
 }
 
 void amdgpu_bo_fini(struct amdgpu_device *adev)
 {
        amdgpu_ttm_fini(adev);
-       arch_phys_wc_del(adev->mc.vram_mtrr);
-       arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
+       arch_phys_wc_del(adev->gmc.vram_mtrr);
+       arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
 }
 
 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
@@ -902,6 +966,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+       struct ttm_operation_ctx ctx = { false, false };
        struct amdgpu_bo *abo;
        unsigned long offset, size;
        int r;
@@ -919,7 +984,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 
        size = bo->mem.num_pages << PAGE_SHIFT;
        offset = bo->mem.start << PAGE_SHIFT;
-       if ((offset + size) <= adev->mc.visible_vram_size)
+       if ((offset + size) <= adev->gmc.visible_vram_size)
                return 0;
 
        /* Can't move a pinned BO to visible VRAM */
@@ -935,14 +1000,14 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
        abo->placement.num_busy_placement = 1;
        abo->placement.busy_placement = &abo->placements[1];
 
-       r = ttm_bo_validate(bo, &abo->placement, false, false);
+       r = ttm_bo_validate(bo, &abo->placement, &ctx);
        if (unlikely(r != 0))
                return r;
 
        offset = bo->mem.start << PAGE_SHIFT;
        /* this should never happen */
        if (bo->mem.mem_type == TTM_PL_VRAM &&
-           (offset + size) > adev->mc.visible_vram_size)
+           (offset + size) > adev->gmc.visible_vram_size)
                return -EINVAL;
 
        return 0;
@@ -980,7 +1045,7 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 {
        WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
        WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
-                    !amdgpu_ttm_is_bound(bo->tbo.ttm));
+                    !amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem));
        WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
                     !bo->pin_count);
        WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
This page took 0.050667 seconds and 4 git commands to generate.