]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
Merge drm/drm-next into drm-misc-next
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_object.c
index b382e9453a66546d86e519da002ba0d7c4e6315e..b7a2070d90af2332e1164e4aca4c84633f20fe8a 100644 (file)
@@ -76,7 +76,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
                kfree(ubo->metadata);
        }
 
-       kfree(bo);
+       kvfree(bo);
 }
 
 /**
@@ -364,14 +364,14 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
        if (cpu_addr)
                amdgpu_bo_kunmap(*bo_ptr);
 
-       ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
+       ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource);
 
        for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
                (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
                (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
        }
        r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
-                            &(*bo_ptr)->tbo.mem, &ctx);
+                            &(*bo_ptr)->tbo.resource, &ctx);
        if (r)
                goto error;
 
@@ -541,7 +541,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
        BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
 
        *bo_ptr = NULL;
-       bo = kzalloc(bp->bo_ptr_size, GFP_KERNEL);
+       bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
        if (bo == NULL)
                return -ENOMEM;
        drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
@@ -575,15 +575,15 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
                return r;
 
        if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
-           bo->tbo.mem.mem_type == TTM_PL_VRAM &&
-           bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
+           bo->tbo.resource->mem_type == TTM_PL_VRAM &&
+           bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
                amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
                                             ctx.bytes_moved);
        else
                amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
 
        if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
-           bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+           bo->tbo.resource->mem_type == TTM_PL_VRAM) {
                struct dma_fence *fence;
 
                r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
@@ -614,35 +614,6 @@ fail_unreserve:
        return r;
 }
 
-int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
-                           unsigned long size,
-                           struct amdgpu_bo *bo)
-{
-       struct amdgpu_bo_param bp;
-       int r;
-
-       if (bo->shadow)
-               return 0;
-
-       memset(&bp, 0, sizeof(bp));
-       bp.size = size;
-       bp.domain = AMDGPU_GEM_DOMAIN_GTT;
-       bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
-       bp.type = ttm_bo_type_kernel;
-       bp.resv = bo->tbo.base.resv;
-       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
-
-       r = amdgpu_bo_create(adev, &bp, &bo->shadow);
-       if (!r) {
-               bo->shadow->parent = amdgpu_bo_ref(bo);
-               mutex_lock(&adev->shadow_list_lock);
-               list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list);
-               mutex_unlock(&adev->shadow_list_lock);
-       }
-
-       return r;
-}
-
 /**
  * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
  * @adev: amdgpu device object
@@ -670,6 +641,38 @@ int amdgpu_bo_create_user(struct amdgpu_device *adev,
        *ubo_ptr = to_amdgpu_bo_user(bo_ptr);
        return r;
 }
+
+/**
+ * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object
+ * @adev: amdgpu device object
+ * @bp: parameters to be used for the buffer object
+ * @vmbo_ptr: pointer to the buffer object pointer
+ *
+ * Create a BO to be for GPUVM.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+
+int amdgpu_bo_create_vm(struct amdgpu_device *adev,
+                       struct amdgpu_bo_param *bp,
+                       struct amdgpu_bo_vm **vmbo_ptr)
+{
+       struct amdgpu_bo *bo_ptr;
+       int r;
+
+       /* bo_ptr_size will be determined by the caller and it depends on
+        * num of amdgpu_vm_pt entries.
+        */
+       BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
+       r = amdgpu_bo_create(adev, bp, &bo_ptr);
+       if (r)
+               return r;
+
+       *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
+       return r;
+}
+
 /**
  * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
  * @bo: pointer to the buffer object
@@ -704,6 +707,22 @@ retry:
        return r;
 }
 
+/**
+ * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
+ *
+ * @bo: BO that will be inserted into the shadow list
+ *
+ * Insert a BO to the shadow list.
+ */
+void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo *bo)
+{
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+       mutex_lock(&adev->shadow_list_lock);
+       list_add_tail(&bo->shadow_list, &adev->shadow_list);
+       mutex_unlock(&adev->shadow_list_lock);
+}
+
 /**
  * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
  *
@@ -758,12 +777,12 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
                return 0;
        }
 
-       r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
-                                               MAX_SCHEDULE_TIMEOUT);
+       r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
+                                 MAX_SCHEDULE_TIMEOUT);
        if (r < 0)
                return r;
 
-       r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
+       r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
        if (r)
                return r;
 
@@ -886,8 +905,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
        domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
 
        if (bo->tbo.pin_count) {
-               uint32_t mem_type = bo->tbo.mem.mem_type;
-               uint32_t mem_flags = bo->tbo.mem.placement;
+               uint32_t mem_type = bo->tbo.resource->mem_type;
+               uint32_t mem_flags = bo->tbo.resource->placement;
 
                if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
                        return -EINVAL;
@@ -937,7 +956,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 
        ttm_bo_pin(&bo->tbo);
 
-       domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+       domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
        if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
                atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
                atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
@@ -989,11 +1008,11 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo)
        if (bo->tbo.base.import_attach)
                dma_buf_unpin(bo->tbo.base.import_attach);
 
-       if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+       if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
                atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
                atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
                             &adev->visible_pin_size);
-       } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+       } else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
                atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
        }
 }
@@ -1074,10 +1093,6 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
 void amdgpu_bo_fini(struct amdgpu_device *adev)
 {
        amdgpu_ttm_fini(adev);
-       if (!adev->gmc.xgmi.connected_to_cpu) {
-               arch_phys_wc_del(adev->gmc.vram_mtrr);
-               arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
-       }
 }
 
 /**
@@ -1230,7 +1245,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
        struct amdgpu_bo *abo;
-       struct ttm_resource *old_mem = &bo->mem;
+       struct ttm_resource *old_mem = bo->resource;
 
        if (!amdgpu_bo_is_amdgpu_bo(bo))
                return;
@@ -1241,7 +1256,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
        amdgpu_bo_kunmap(abo);
 
        if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
-           bo->mem.mem_type != TTM_PL_SYSTEM)
+           bo->resource->mem_type != TTM_PL_SYSTEM)
                dma_buf_move_notify(abo->tbo.base.dma_buf);
 
        /* remember the eviction */
@@ -1261,7 +1276,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
 {
        unsigned int domain;
 
-       domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+       domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
        switch (domain) {
        case AMDGPU_GEM_DOMAIN_VRAM:
                *vram_mem += amdgpu_bo_size(bo);
@@ -1303,7 +1318,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
        if (bo->base.resv == &bo->base._resv)
                amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
 
-       if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
+       if (bo->resource->mem_type != TTM_PL_VRAM ||
            !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
                return;
 
@@ -1340,10 +1355,10 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
        /* Remember that this BO was accessed by the CPU */
        abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 
-       if (bo->mem.mem_type != TTM_PL_VRAM)
+       if (bo->resource->mem_type != TTM_PL_VRAM)
                return 0;
 
-       offset = bo->mem.start << PAGE_SHIFT;
+       offset = bo->resource->start << PAGE_SHIFT;
        if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
                return 0;
 
@@ -1366,9 +1381,9 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
        else if (unlikely(r))
                return VM_FAULT_SIGBUS;
 
-       offset = bo->mem.start << PAGE_SHIFT;
+       offset = bo->resource->start << PAGE_SHIFT;
        /* this should never happen */
-       if (bo->mem.mem_type == TTM_PL_VRAM &&
+       if (bo->resource->mem_type == TTM_PL_VRAM &&
            (offset + bo->base.size) > adev->gmc.visible_vram_size)
                return VM_FAULT_SIGBUS;
 
@@ -1453,11 +1468,11 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
  */
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 {
-       WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
+       WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
        WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
                     !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
-       WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
-       WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+       WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
+       WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
                     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
 
        return amdgpu_bo_gpu_offset_no_check(bo);
@@ -1475,8 +1490,8 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
        uint64_t offset;
 
-       offset = (bo->tbo.mem.start << PAGE_SHIFT) +
-                amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
+       offset = (bo->tbo.resource->start << PAGE_SHIFT) +
+                amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
 
        return amdgpu_gmc_sign_extend(offset);
 }
@@ -1529,7 +1544,7 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
        unsigned int pin_count;
        u64 size;
 
-       domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+       domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
        switch (domain) {
        case AMDGPU_GEM_DOMAIN_VRAM:
                placement = "VRAM";
This page took 0.046758 seconds and 4 git commands to generate.