]> Git Repo - J-linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
Merge patch series "riscv: Extension parsing fixes"
[J-linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_object.c
index f6d503432a9ef966b2748acfd801ff04730cad6e..8d8c39be612953d43272ef34c87ece8fcd628b4c 100644 (file)
@@ -39,6 +39,7 @@
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 #include "amdgpu_amdkfd.h"
+#include "amdgpu_vram_mgr.h"
 
 /**
  * DOC: amdgpu_object
@@ -153,8 +154,10 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
                else
                        places[c].flags |= TTM_PL_FLAG_TOPDOWN;
 
-               if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+               if (abo->tbo.type == ttm_bo_type_kernel &&
+                   flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
                        places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
+
                c++;
        }
 
@@ -173,6 +176,12 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
                        abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ?
                        AMDGPU_PL_PREEMPT : TTM_PL_TT;
                places[c].flags = 0;
+               /*
+                * When GTT is just an alternative to VRAM make sure that we
+                * only use it as fallback and still try to fill up VRAM first.
+                */
+               if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
+                       places[c].flags |= TTM_PL_FLAG_FALLBACK;
                c++;
        }
 
@@ -595,8 +604,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
        if (!amdgpu_bo_support_uswc(bo->flags))
                bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 
-       if (adev->ras_enabled)
-               bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
+       bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
 
        bo->tbo.bdev = &adev->mman.bdev;
        if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
@@ -629,7 +637,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
            bo->tbo.resource->mem_type == TTM_PL_VRAM) {
                struct dma_fence *fence;
 
-               r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence, true);
+               r = amdgpu_ttm_clear_buffer(bo, bo->tbo.base.resv, &fence);
                if (unlikely(r))
                        goto fail_unreserve;
 
@@ -759,7 +767,7 @@ int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
 
        return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
                                  amdgpu_bo_size(shadow), NULL, fence,
-                                 true, false, false);
+                                 true, false, 0);
 }
 
 /**
@@ -961,6 +969,10 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                if (!bo->placements[i].lpfn ||
                    (lpfn && lpfn < bo->placements[i].lpfn))
                        bo->placements[i].lpfn = lpfn;
+
+               if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS &&
+                   bo->placements[i].mem_type == TTM_PL_VRAM)
+                       bo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
        }
 
        r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
@@ -1366,8 +1378,9 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
        if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
                return;
 
-       r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence, true);
+       r = amdgpu_fill_buffer(abo, 0, bo->base.resv, &fence, true);
        if (!WARN_ON(r)) {
+               amdgpu_vram_mgr_set_cleared(bo->resource);
                amdgpu_bo_fence(abo, fence, false);
                dma_fence_put(fence);
        }
This page took 0.031015 seconds and 4 git commands to generate.