]> Git Repo - J-linux.git/commitdiff
Merge drm/drm-next into drm-misc-next
authorMaxime Ripard <[email protected]>
Tue, 23 Apr 2024 06:48:56 +0000 (08:48 +0200)
committerMaxime Ripard <[email protected]>
Tue, 23 Apr 2024 06:48:56 +0000 (08:48 +0200)
MaĆ­ra needs a backmerge to apply v3d patches, and Danilo for some
nouveau patches.

Signed-off-by: Maxime Ripard <[email protected]>
1  2 
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h

index e011a326fe8627d47f4602533aa2dac55d0ff72a,92af057dbf6d50cf6c746d423eed5bb384c3cdcf..492aebc44e5141fe8b47e0b35f2126d8a4273476
@@@ -39,7 -39,6 +39,7 @@@
  #include "amdgpu.h"
  #include "amdgpu_trace.h"
  #include "amdgpu_amdkfd.h"
 +#include "amdgpu_vram_mgr.h"
  
  /**
   * DOC: amdgpu_object
@@@ -602,7 -601,8 +602,7 @@@ int amdgpu_bo_create(struct amdgpu_devi
        if (!amdgpu_bo_support_uswc(bo->flags))
                bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
  
 -      if (adev->ras_enabled)
 -              bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
 +      bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
  
        bo->tbo.bdev = &adev->mman.bdev;
        if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
                return r;
  
        if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
-           bo->tbo.resource->mem_type == TTM_PL_VRAM &&
-           amdgpu_bo_in_cpu_visible_vram(bo))
+           amdgpu_res_cpu_visible(adev, bo->tbo.resource))
                amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
                                             ctx.bytes_moved);
        else
            bo->tbo.resource->mem_type == TTM_PL_VRAM) {
                struct dma_fence *fence;
  
 -              r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence, true);
 +              r = amdgpu_ttm_clear_buffer(bo, bo->tbo.base.resv, &fence);
                if (unlikely(r))
                        goto fail_unreserve;
  
@@@ -1278,23 -1277,25 +1277,25 @@@ void amdgpu_bo_move_notify(struct ttm_b
  void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
                          struct amdgpu_mem_stats *stats)
  {
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct ttm_resource *res = bo->tbo.resource;
        uint64_t size = amdgpu_bo_size(bo);
        struct drm_gem_object *obj;
        unsigned int domain;
        bool shared;
  
        /* Abort if the BO doesn't currently have a backing store */
-       if (!bo->tbo.resource)
+       if (!res)
                return;
  
        obj = &bo->tbo.base;
        shared = drm_gem_object_is_shared_for_memory_stats(obj);
  
-       domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
+       domain = amdgpu_mem_type_to_domain(res->mem_type);
        switch (domain) {
        case AMDGPU_GEM_DOMAIN_VRAM:
                stats->vram += size;
-               if (amdgpu_bo_in_cpu_visible_vram(bo))
+               if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
                        stats->visible_vram += size;
                if (shared)
                        stats->vram_shared += size;
@@@ -1365,9 -1366,8 +1366,9 @@@ void amdgpu_bo_release_notify(struct tt
        if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
                return;
  
 -      r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence, true);
 +      r = amdgpu_fill_buffer(abo, 0, bo->base.resv, &fence, true);
        if (!WARN_ON(r)) {
 +              amdgpu_vram_mgr_set_cleared(bo->resource);
                amdgpu_bo_fence(abo, fence, false);
                dma_fence_put(fence);
        }
@@@ -1396,10 -1396,7 +1397,7 @@@ vm_fault_t amdgpu_bo_fault_reserve_noti
        /* Remember that this BO was accessed by the CPU */
        abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
  
-       if (bo->resource->mem_type != TTM_PL_VRAM)
-               return 0;
-       if (amdgpu_bo_in_cpu_visible_vram(abo))
+       if (amdgpu_res_cpu_visible(adev, bo->resource))
                return 0;
  
        /* Can't move a pinned BO to visible VRAM */
  
        /* this should never happen */
        if (bo->resource->mem_type == TTM_PL_VRAM &&
-           !amdgpu_bo_in_cpu_visible_vram(abo))
+           !amdgpu_res_cpu_visible(adev, bo->resource))
                return VM_FAULT_SIGBUS;
  
        ttm_bo_move_to_lru_tail_unlocked(bo);
@@@ -1586,6 -1583,7 +1584,7 @@@ uint32_t amdgpu_bo_get_preferred_domain
   */
  u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
  {
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
        struct dma_buf_attachment *attachment;
        struct dma_buf *dma_buf;
        const char *placement;
  
        if (dma_resv_trylock(bo->tbo.base.resv)) {
                unsigned int domain;
                domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
                switch (domain) {
                case AMDGPU_GEM_DOMAIN_VRAM:
-                       if (amdgpu_bo_in_cpu_visible_vram(bo))
+                       if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
                                placement = "VRAM VISIBLE";
                        else
                                placement = "VRAM";
index f0b42b5673571b3db8db1c7b14ec87e16749a7c2,1d71729e3f6bcef2c02f9e1ce252dc6cd6461b94..6b48bcf53ce96045794094d90cd5365d0b259858
@@@ -133,7 -133,7 +133,7 @@@ static void amdgpu_evict_flags(struct t
  
                } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
                           !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
-                          amdgpu_bo_in_cpu_visible_vram(abo)) {
+                          amdgpu_res_cpu_visible(adev, bo->resource)) {
  
                        /* Try evicting to the CPU inaccessible part of VRAM
                         * first, but only set GTT as busy placement, so this
@@@ -378,12 -378,11 +378,12 @@@ static int amdgpu_move_blit(struct ttm_
            (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
                struct dma_fence *wipe_fence = NULL;
  
 -              r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence,
 -                                      false);
 +              r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence,
 +                                     false);
                if (r) {
                        goto error;
                } else if (wipe_fence) {
 +                      amdgpu_vram_mgr_set_cleared(bo->resource);
                        dma_fence_put(fence);
                        fence = wipe_fence;
                }
@@@ -404,40 -403,55 +404,55 @@@ error
        return r;
  }
  
- /*
-  * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
+ /**
+  * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
+  * @adev: amdgpu device
+  * @res: the resource to check
   *
-  * Called by amdgpu_bo_move()
+  * Returns: true if the full resource is CPU visible, false otherwise.
   */
static bool amdgpu_mem_visible(struct amdgpu_device *adev,
-                              struct ttm_resource *mem)
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
+                           struct ttm_resource *res)
  {
-       u64 mem_size = (u64)mem->size;
        struct amdgpu_res_cursor cursor;
-       u64 end;
  
-       if (mem->mem_type == TTM_PL_SYSTEM ||
-           mem->mem_type == TTM_PL_TT)
+       if (!res)
+               return false;
+       if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
+           res->mem_type == AMDGPU_PL_PREEMPT)
                return true;
-       if (mem->mem_type != TTM_PL_VRAM)
+       if (res->mem_type != TTM_PL_VRAM)
                return false;
  
-       amdgpu_res_first(mem, 0, mem_size, &cursor);
-       end = cursor.start + cursor.size;
+       amdgpu_res_first(res, 0, res->size, &cursor);
        while (cursor.remaining) {
+               if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size)
+                       return false;
                amdgpu_res_next(&cursor, cursor.size);
+       }
  
-               if (!cursor.remaining)
-                       break;
+       return true;
+ }
  
-               /* ttm_resource_ioremap only supports contiguous memory */
-               if (end != cursor.start)
-                       return false;
+ /*
+  * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
+  *
+  * Called by amdgpu_bo_move()
+  */
+ static bool amdgpu_res_copyable(struct amdgpu_device *adev,
+                               struct ttm_resource *mem)
+ {
+       if (!amdgpu_res_cpu_visible(adev, mem))
+               return false;
  
-               end = cursor.start + cursor.size;
-       }
+       /* ttm_resource_ioremap only supports contiguous memory */
+       if (mem->mem_type == TTM_PL_VRAM &&
+           !(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
+               return false;
  
-       return end <= adev->gmc.visible_vram_size;
+       return true;
  }
  
  /*
@@@ -530,8 -544,8 +545,8 @@@ static int amdgpu_bo_move(struct ttm_bu
  
        if (r) {
                /* Check that all memory is CPU accessible */
-               if (!amdgpu_mem_visible(adev, old_mem) ||
-                   !amdgpu_mem_visible(adev, new_mem)) {
+               if (!amdgpu_res_copyable(adev, old_mem) ||
+                   !amdgpu_res_copyable(adev, new_mem)) {
                        pr_err("Move buffer fallback to memcpy unavailable\n");
                        return r;
                }
@@@ -558,7 -572,6 +573,6 @@@ static int amdgpu_ttm_io_mem_reserve(st
                                     struct ttm_resource *mem)
  {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
-       size_t bus_size = (size_t)mem->size;
  
        switch (mem->mem_type) {
        case TTM_PL_SYSTEM:
                break;
        case TTM_PL_VRAM:
                mem->bus.offset = mem->start << PAGE_SHIFT;
-               /* check if it's visible */
-               if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
-                       return -EINVAL;
  
                if (adev->mman.aper_base_kaddr &&
                    mem->placement & TTM_PL_FLAG_CONTIGUOUS)
@@@ -2216,71 -2226,6 +2227,71 @@@ static int amdgpu_ttm_fill_mem(struct a
        return 0;
  }
  
 +/**
 + * amdgpu_ttm_clear_buffer - clear memory buffers
 + * @bo: amdgpu buffer object
 + * @resv: reservation object
 + * @fence: dma_fence associated with the operation
 + *
 + * Clear the memory buffer resource.
 + *
 + * Returns:
 + * 0 for success or a negative error code on failure.
 + */
 +int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
 +                          struct dma_resv *resv,
 +                          struct dma_fence **fence)
 +{
 +      struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 +      struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 +      struct amdgpu_res_cursor cursor;
 +      u64 addr;
 +      int r;
 +
 +      if (!adev->mman.buffer_funcs_enabled)
 +              return -EINVAL;
 +
 +      if (!fence)
 +              return -EINVAL;
 +
 +      *fence = dma_fence_get_stub();
 +
 +      amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
 +
 +      mutex_lock(&adev->mman.gtt_window_lock);
 +      while (cursor.remaining) {
 +              struct dma_fence *next = NULL;
 +              u64 size;
 +
 +              if (amdgpu_res_cleared(&cursor)) {
 +                      amdgpu_res_next(&cursor, cursor.size);
 +                      continue;
 +              }
 +
 +              /* Never clear more than 256MiB at once to avoid timeouts */
 +              size = min(cursor.size, 256ULL << 20);
 +
 +              r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &cursor,
 +                                        1, ring, false, &size, &addr);
 +              if (r)
 +                      goto err;
 +
 +              r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv,
 +                                      &next, true, true);
 +              if (r)
 +                      goto err;
 +
 +              dma_fence_put(*fence);
 +              *fence = next;
 +
 +              amdgpu_res_next(&cursor, size);
 +      }
 +err:
 +      mutex_unlock(&adev->mman.gtt_window_lock);
 +
 +      return r;
 +}
 +
  int amdgpu_fill_buffer(struct amdgpu_bo *bo,
                        uint32_t src_data,
                        struct dma_resv *resv,
index b404d89d52e55ba5be67769e39472908d6b1ca40,32cf6b6f6efd96873c294648714f2c78f6ff9ec3..4f5e70ee9ad0de3de1fb78825331b434195422e4
@@@ -38,6 -38,8 +38,6 @@@
  #define AMDGPU_GTT_MAX_TRANSFER_SIZE  512
  #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS       2
  
 -#define AMDGPU_POISON 0xd0bed0be
 -
  extern const struct attribute_group amdgpu_vram_mgr_attr_group;
  extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
  
@@@ -137,6 -139,9 +137,9 @@@ int amdgpu_vram_mgr_reserve_range(struc
  int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
                                      uint64_t start);
  
+ bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
+                           struct ttm_resource *res);
  int amdgpu_ttm_init(struct amdgpu_device *adev);
  void amdgpu_ttm_fini(struct amdgpu_device *adev);
  void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
@@@ -153,9 -158,6 +156,9 @@@ int amdgpu_ttm_copy_mem_to_mem(struct a
                               uint64_t size, bool tmz,
                               struct dma_resv *resv,
                               struct dma_fence **f);
 +int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
 +                          struct dma_resv *resv,
 +                          struct dma_fence **fence);
  int amdgpu_fill_buffer(struct amdgpu_bo *bo,
                        uint32_t src_data,
                        struct dma_resv *resv,
This page took 0.087912 seconds and 4 git commands to generate.