]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
Merge tag 'armsoc-dt64' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_object.c
index be80a4a68d7b05e63d22ff68a310830d7f1477cc..cb89fff863c04c9536ec6b9667d58abb06c1b5a1 100644 (file)
@@ -122,20 +122,19 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
 
        if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
                unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
-               unsigned lpfn = 0;
-
-               /* This forces a reallocation if the flag wasn't set before */
-               if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
-                       lpfn = adev->mc.real_vram_size >> PAGE_SHIFT;
 
                places[c].fpfn = 0;
-               places[c].lpfn = lpfn;
+               places[c].lpfn = 0;
                places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
                        TTM_PL_FLAG_VRAM;
+
                if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
                        places[c].lpfn = visible_pfn;
                else
                        places[c].flags |= TTM_PL_FLAG_TOPDOWN;
+
+               if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+                       places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
                c++;
        }
 
@@ -395,32 +394,18 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
        amdgpu_fill_placement_to_bo(bo, placement);
        /* Kernel allocation are uninterruptible */
 
-       if (!resv) {
-               bool locked;
-
-               reservation_object_init(&bo->tbo.ttm_resv);
-               locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock);
-               WARN_ON(!locked);
-       }
-
        initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
-       r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
-                       &bo->placement, page_align, !kernel, NULL,
-                       acc_size, sg, resv ? resv : &bo->tbo.ttm_resv,
-                       &amdgpu_ttm_bo_destroy);
+       r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
+                                &bo->placement, page_align, !kernel, NULL,
+                                acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
        amdgpu_cs_report_moved_bytes(adev,
                atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved);
 
-       if (unlikely(r != 0)) {
-               if (!resv)
-                       ww_mutex_unlock(&bo->tbo.resv->lock);
+       if (unlikely(r != 0))
                return r;
-       }
 
-       bo->tbo.priority = ilog2(bo->tbo.num_pages);
        if (kernel)
-               bo->tbo.priority *= 2;
-       bo->tbo.priority = min(bo->tbo.priority, (unsigned)(TTM_MAX_BO_PRIORITY - 1));
+               bo->tbo.priority = 1;
 
        if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
            bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
@@ -436,7 +421,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
                dma_fence_put(fence);
        }
        if (!resv)
-               ww_mutex_unlock(&bo->tbo.resv->lock);
+               amdgpu_bo_unreserve(bo);
        *bo_ptr = bo;
 
        trace_amdgpu_bo_create(bo);
@@ -665,6 +650,10 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
        if (WARN_ON_ONCE(min_offset > max_offset))
                return -EINVAL;
 
+       /* A shared bo cannot be migrated to VRAM */
+       if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM))
+               return -EINVAL;
+
        if (bo->pin_count) {
                uint32_t mem_type = bo->tbo.mem.mem_type;
 
@@ -827,7 +816,10 @@ int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
 
 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
 {
-       if (AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+       if (adev->family <= AMDGPU_FAMILY_CZ &&
+           AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
                return -EINVAL;
 
        bo->tiling_flags = tiling_flags;
@@ -939,8 +931,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
        size = bo->mem.num_pages << PAGE_SHIFT;
        offset = bo->mem.start << PAGE_SHIFT;
        /* TODO: figure out how to map scattered VRAM to the CPU */
-       if ((offset + size) <= adev->mc.visible_vram_size &&
-           (abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
+       if ((offset + size) <= adev->mc.visible_vram_size)
                return 0;
 
        /* Can't move a pinned BO to visible VRAM */
@@ -948,7 +939,6 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
                return -EINVAL;
 
        /* hurrah the memory is not visible ! */
-       abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
        amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
        lpfn =  adev->mc.visible_vram_size >> PAGE_SHIFT;
        for (i = 0; i < abo->placement.num_placement; i++) {
This page took 0.046779 seconds and 4 git commands to generate.