]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
Merge tag 'pm-part2-4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_object.c
index ff6f842655d16c1e452167bddb654a40324eb4eb..5c4c3e0d527be64386dcab0951727642f64d73db 100644 (file)
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 
+static bool amdgpu_need_backup(struct amdgpu_device *adev)
+{
+       if (adev->flags & AMD_IS_APU)
+               return false;
+
+       if (amdgpu_gpu_recovery == 0 ||
+           (amdgpu_gpu_recovery == -1  && !amdgpu_sriov_vf(adev)))
+               return false;
+
+       return true;
+}
+
 static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
@@ -281,6 +293,44 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
                *cpu_addr = NULL;
 }
 
+/* Validate bo size is bit bigger then the request domain */
+static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
+                                         unsigned long size, u32 domain)
+{
+       struct ttm_mem_type_manager *man = NULL;
+
+       /*
+        * If GTT is part of requested domains the check must succeed to
+        * allow fall back to GTT
+        */
+       if (domain & AMDGPU_GEM_DOMAIN_GTT) {
+               man = &adev->mman.bdev.man[TTM_PL_TT];
+
+               if (size < (man->size << PAGE_SHIFT))
+                       return true;
+               else
+                       goto fail;
+       }
+
+       if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
+               man = &adev->mman.bdev.man[TTM_PL_VRAM];
+
+               if (size < (man->size << PAGE_SHIFT))
+                       return true;
+               else
+                       goto fail;
+       }
+
+
+       /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
+       return true;
+
+fail:
+       DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
+                 man->size << PAGE_SHIFT);
+       return false;
+}
+
 static int amdgpu_bo_do_create(struct amdgpu_device *adev,
                               unsigned long size, int byte_align,
                               bool kernel, u32 domain, u64 flags,
@@ -289,16 +339,24 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
                               uint64_t init_value,
                               struct amdgpu_bo **bo_ptr)
 {
+       struct ttm_operation_ctx ctx = {
+               .interruptible = !kernel,
+               .no_wait_gpu = false,
+               .allow_reserved_eviction = true,
+               .resv = resv
+       };
        struct amdgpu_bo *bo;
        enum ttm_bo_type type;
        unsigned long page_align;
-       u64 initial_bytes_moved, bytes_moved;
        size_t acc_size;
        int r;
 
        page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
        size = ALIGN(size, PAGE_SIZE);
 
+       if (!amdgpu_bo_validate_size(adev, size, domain))
+               return -ENOMEM;
+
        if (kernel) {
                type = ttm_bo_type_kernel;
        } else if (sg) {
@@ -364,22 +422,19 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
        bo->tbo.bdev = &adev->mman.bdev;
        amdgpu_ttm_placement_from_domain(bo, domain);
 
-       initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
-       /* Kernel allocation are uninterruptible */
        r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
-                                &bo->placement, page_align, !kernel, NULL,
+                                &bo->placement, page_align, &ctx, NULL,
                                 acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
        if (unlikely(r != 0))
                return r;
 
-       bytes_moved = atomic64_read(&adev->num_bytes_moved) -
-                     initial_bytes_moved;
        if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
            bo->tbo.mem.mem_type == TTM_PL_VRAM &&
            bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
-               amdgpu_cs_report_moved_bytes(adev, bytes_moved, bytes_moved);
+               amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
+                                            ctx.bytes_moved);
        else
-               amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
+               amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
 
        if (kernel)
                bo->tbo.priority = 1;
@@ -511,6 +566,7 @@ err:
 
 int amdgpu_bo_validate(struct amdgpu_bo *bo)
 {
+       struct ttm_operation_ctx ctx = { false, false };
        uint32_t domain;
        int r;
 
@@ -521,7 +577,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
 
 retry:
        amdgpu_ttm_placement_from_domain(bo, domain);
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
                domain = bo->allowed_domains;
                goto retry;
@@ -632,6 +688,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                             u64 *gpu_addr)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct ttm_operation_ctx ctx = { false, false };
        int r, i;
 
        if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
@@ -682,13 +739,13 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
        }
 
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (unlikely(r)) {
                dev_err(adev->dev, "%p pin failed\n", bo);
                goto error;
        }
 
-       r = amdgpu_ttm_bind(&bo->tbo);
+       r = amdgpu_ttm_alloc_gart(&bo->tbo);
        if (unlikely(r)) {
                dev_err(adev->dev, "%p bind failed\n", bo);
                goto error;
@@ -719,6 +776,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct ttm_operation_ctx ctx = { false, false };
        int r, i;
 
        if (!bo->pin_count) {
@@ -732,7 +790,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
                bo->placements[i].lpfn = 0;
                bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
        }
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (unlikely(r)) {
                dev_err(adev->dev, "%p validate failed for unpin\n", bo);
                goto error;
@@ -904,6 +962,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+       struct ttm_operation_ctx ctx = { false, false };
        struct amdgpu_bo *abo;
        unsigned long offset, size;
        int r;
@@ -937,7 +996,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
        abo->placement.num_busy_placement = 1;
        abo->placement.busy_placement = &abo->placements[1];
 
-       r = ttm_bo_validate(bo, &abo->placement, false, false);
+       r = ttm_bo_validate(bo, &abo->placement, &ctx);
        if (unlikely(r != 0))
                return r;
 
This page took 0.039954 seconds and 4 git commands to generate.