]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
Merge tag 'drm-misc-next-2018-11-07' of git://anongit.freedesktop.org/drm/drm-misc...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_object.c
index b0e14a3d54efd44e86580f816f41f05f16368b08..cf768acb51dce3f43efc48779d29ecdeda971846 100644 (file)
  *
  */
 
-static bool amdgpu_bo_need_backup(struct amdgpu_device *adev)
-{
-       if (adev->flags & AMD_IS_APU)
-               return false;
-
-       if (amdgpu_gpu_recovery == 0 ||
-           (amdgpu_gpu_recovery == -1  && !amdgpu_sriov_vf(adev)))
-               return false;
-
-       return true;
-}
-
 /**
  * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
  *
@@ -163,10 +151,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
 
        if (domain & AMDGPU_GEM_DOMAIN_GTT) {
                places[c].fpfn = 0;
-               if (flags & AMDGPU_GEM_CREATE_SHADOW)
-                       places[c].lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
-               else
-                       places[c].lpfn = 0;
+               places[c].lpfn = 0;
                places[c].flags = TTM_PL_FLAG_TT;
                if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
                        places[c].flags |= TTM_PL_FLAG_WC |
@@ -253,6 +238,11 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
        bool free = false;
        int r;
 
+       if (!size) {
+               amdgpu_bo_unref(bo_ptr);
+               return 0;
+       }
+
        memset(&bp, 0, sizeof(bp));
        bp.size = size;
        bp.byte_align = align;
@@ -346,7 +336,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
        if (r)
                return r;
 
-       amdgpu_bo_unreserve(*bo_ptr);
+       if (*bo_ptr)
+               amdgpu_bo_unreserve(*bo_ptr);
 
        return 0;
 }
@@ -436,7 +427,11 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
        int r;
 
        page_align = roundup(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
-       size = ALIGN(size, PAGE_SIZE);
+       if (bp->domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS |
+                         AMDGPU_GEM_DOMAIN_OA))
+               size <<= PAGE_SHIFT;
+       else
+               size = ALIGN(size, PAGE_SIZE);
 
        if (!amdgpu_bo_validate_size(adev, size, bp->domain))
                return -ENOMEM;
@@ -451,7 +446,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
                return -ENOMEM;
        drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
        INIT_LIST_HEAD(&bo->shadow_list);
-       INIT_LIST_HEAD(&bo->va);
+       bo->vm_bo = NULL;
        bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
                bp->domain;
        bo->allowed_domains = bo->preferred_domains;
@@ -541,7 +536,7 @@ fail_unreserve:
 }
 
 static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
-                                  unsigned long size, int byte_align,
+                                  unsigned long size,
                                   struct amdgpu_bo *bo)
 {
        struct amdgpu_bo_param bp;
@@ -552,7 +547,6 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
 
        memset(&bp, 0, sizeof(bp));
        bp.size = size;
-       bp.byte_align = byte_align;
        bp.domain = AMDGPU_GEM_DOMAIN_GTT;
        bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
                AMDGPU_GEM_CREATE_SHADOW;
@@ -563,7 +557,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
        if (!r) {
                bo->shadow->parent = amdgpu_bo_ref(bo);
                mutex_lock(&adev->shadow_list_lock);
-               list_add_tail(&bo->shadow_list, &adev->shadow_list);
+               list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list);
                mutex_unlock(&adev->shadow_list_lock);
        }
 
@@ -596,12 +590,12 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
        if (r)
                return r;
 
-       if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_bo_need_backup(adev)) {
+       if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
                if (!bp->resv)
                        WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
                                                        NULL));
 
-               r = amdgpu_bo_create_shadow(adev, bp->size, bp->byte_align, (*bo_ptr));
+               r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
 
                if (!bp->resv)
                        reservation_object_unlock((*bo_ptr)->tbo.resv);
@@ -646,7 +640,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
        bo_addr = amdgpu_bo_gpu_offset(bo);
        shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
 
-       r = reservation_object_reserve_shared(bo->tbo.resv);
+       r = reservation_object_reserve_shared(bo->tbo.resv, 1);
        if (r)
                goto err;
 
@@ -695,13 +689,10 @@ retry:
 }
 
 /**
- * amdgpu_bo_restore_from_shadow - restore an &amdgpu_bo buffer object
- * @adev: amdgpu device object
- * @ring: amdgpu_ring for the engine handling the buffer operations
- * @bo: &amdgpu_bo buffer to be restored
- * @resv: reservation object with embedded fence
+ * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
+ *
+ * @shadow: &amdgpu_bo shadow to be restored
  * @fence: dma_fence associated with the operation
- * @direct: whether to submit the job directly
  *
  * Copies a buffer object's shadow content back to the object.
  * This is used for recovering a buffer from its shadow in case of a gpu
@@ -710,36 +701,19 @@ retry:
  * Returns:
  * 0 for success or a negative error code on failure.
  */
-int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
-                                 struct amdgpu_ring *ring,
-                                 struct amdgpu_bo *bo,
-                                 struct reservation_object *resv,
-                                 struct dma_fence **fence,
-                                 bool direct)
+int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
 
 {
-       struct amdgpu_bo *shadow = bo->shadow;
-       uint64_t bo_addr, shadow_addr;
-       int r;
-
-       if (!shadow)
-               return -EINVAL;
+       struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
+       struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+       uint64_t shadow_addr, parent_addr;
 
-       bo_addr = amdgpu_bo_gpu_offset(bo);
-       shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
+       shadow_addr = amdgpu_bo_gpu_offset(shadow);
+       parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
 
-       r = reservation_object_reserve_shared(bo->tbo.resv);
-       if (r)
-               goto err;
-
-       r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
-                              amdgpu_bo_size(bo), resv, fence,
-                              direct, false);
-       if (!r)
-               amdgpu_bo_fence(bo, *fence, true);
-
-err:
-       return r;
+       return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
+                                 amdgpu_bo_size(shadow), NULL, fence,
+                                 true, false);
 }
 
 /**
@@ -1019,10 +993,12 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
 {
        /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
-       if (0 && (adev->flags & AMD_IS_APU)) {
+#ifndef CONFIG_HIBERNATION
+       if (adev->flags & AMD_IS_APU) {
                /* Useless to evict on IGP chips */
                return 0;
        }
+#endif
        return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
 }
 
@@ -1360,15 +1336,13 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 {
        WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
-       WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
-                    !amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem));
        WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
-                    !bo->pin_count);
+                    !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
        WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
        WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
                     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
 
-       return bo->tbo.offset;
+       return amdgpu_gmc_sign_extend(bo->tbo.offset);
 }
 
 /**
This page took 0.04503 seconds and 4 git commands to generate.