]> Git Repo - J-linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drm/amdgpu/amdgpu: use "*" adjacent to data name
[J-linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ttm.c
index e11c5d69843dbabc42a2784c798541b24441d465..69ebd07f3eeee8a8e752f9e23c480674529a9f30 100644 (file)
 
 #define AMDGPU_TTM_VRAM_MAX_DW_READ    (size_t)128
 
-
-/**
- * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
- * memory request.
- *
- * @bdev: The TTM BO device object (contains a reference to amdgpu_device)
- * @type: The type of memory requested
- * @man: The memory type manager for each domain
- *
- * This is called by ttm_bo_init_mm() when a buffer object is being
- * initialized.
- */
-static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
-                               struct ttm_mem_type_manager *man)
+static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
+                                   unsigned int type,
+                                   uint64_t size_in_page)
 {
-       struct amdgpu_device *adev;
-
-       adev = amdgpu_ttm_adev(bdev);
-
-       switch (type) {
-       case TTM_PL_SYSTEM:
-               /* System memory */
-               man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
-               man->available_caching = TTM_PL_MASK_CACHING;
-               man->default_caching = TTM_PL_FLAG_CACHED;
-               break;
-       case TTM_PL_TT:
-               /* GTT memory  */
-               man->func = &amdgpu_gtt_mgr_func;
-               man->available_caching = TTM_PL_MASK_CACHING;
-               man->default_caching = TTM_PL_FLAG_CACHED;
-               man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
-               break;
-       case TTM_PL_VRAM:
-               /* "On-card" video ram */
-               man->func = &amdgpu_vram_mgr_func;
-               man->flags = TTM_MEMTYPE_FLAG_FIXED |
-                            TTM_MEMTYPE_FLAG_MAPPABLE;
-               man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
-               man->default_caching = TTM_PL_FLAG_WC;
-               break;
-       case AMDGPU_PL_GDS:
-       case AMDGPU_PL_GWS:
-       case AMDGPU_PL_OA:
-               /* On-chip GDS memory*/
-               man->func = &ttm_bo_manager_func;
-               man->flags = TTM_MEMTYPE_FLAG_FIXED;
-               man->available_caching = TTM_PL_FLAG_UNCACHED;
-               man->default_caching = TTM_PL_FLAG_UNCACHED;
-               break;
-       default:
-               DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
-               return -EINVAL;
-       }
-       return 0;
+       return ttm_range_man_init(&adev->mman.bdev, type,
+                                 TTM_PL_FLAG_UNCACHED, TTM_PL_FLAG_UNCACHED,
+                                 false, size_in_page);
 }
 
 /**
@@ -231,9 +183,9 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
  * Assign the memory from new_mem to the memory of the buffer object bo.
  */
 static void amdgpu_move_null(struct ttm_buffer_object *bo,
-                            struct ttm_mem_reg *new_mem)
+                            struct ttm_resource *new_mem)
 {
-       struct ttm_mem_reg *old_mem = &bo->mem;
+       struct ttm_resource *old_mem = &bo->mem;
 
        BUG_ON(old_mem->mm_node != NULL);
        *old_mem = *new_mem;
@@ -250,7 +202,7 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
  */
 static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
                                    struct drm_mm_node *mm_node,
-                                   struct ttm_mem_reg *mem)
+                                   struct ttm_resource *mem)
 {
        uint64_t addr = 0;
 
@@ -270,7 +222,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
  * @offset: The offset that drm_mm_node is used for finding.
  *
  */
-static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
+static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_resource *mem,
                                               uint64_t *offset)
 {
        struct drm_mm_node *mm_node = mem->mm_node;
@@ -298,7 +250,7 @@ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
  * the physical address for local memory.
  */
 static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
-                                struct ttm_mem_reg *mem,
+                                struct ttm_resource *mem,
                                 struct drm_mm_node *mm_node,
                                 unsigned num_pages, uint64_t offset,
                                 unsigned window, struct amdgpu_ring *ring,
@@ -521,9 +473,9 @@ error:
  * help move buffers to and from VRAM.
  */
 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
-                           bool evict, bool no_wait_gpu,
-                           struct ttm_mem_reg *new_mem,
-                           struct ttm_mem_reg *old_mem)
+                           bool evict,
+                           struct ttm_resource *new_mem,
+                           struct ttm_resource *old_mem)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
        struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
@@ -582,10 +534,10 @@ error:
  */
 static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
                                struct ttm_operation_ctx *ctx,
-                               struct ttm_mem_reg *new_mem)
+                               struct ttm_resource *new_mem)
 {
-       struct ttm_mem_reg *old_mem = &bo->mem;
-       struct ttm_mem_reg tmp_mem;
+       struct ttm_resource *old_mem = &bo->mem;
+       struct ttm_resource tmp_mem;
        struct ttm_place placements;
        struct ttm_placement placement;
        int r;
@@ -619,7 +571,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
        }
 
        /* blit VRAM to GTT */
-       r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem);
+       r = amdgpu_move_blit(bo, evict, &tmp_mem, old_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
@@ -627,7 +579,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
        /* move BO (in tmp_mem) to new_mem */
        r = ttm_bo_move_ttm(bo, ctx, new_mem);
 out_cleanup:
-       ttm_bo_mem_put(bo, &tmp_mem);
+       ttm_resource_free(bo, &tmp_mem);
        return r;
 }
 
@@ -638,10 +590,10 @@ out_cleanup:
  */
 static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
                                struct ttm_operation_ctx *ctx,
-                               struct ttm_mem_reg *new_mem)
+                               struct ttm_resource *new_mem)
 {
-       struct ttm_mem_reg *old_mem = &bo->mem;
-       struct ttm_mem_reg tmp_mem;
+       struct ttm_resource *old_mem = &bo->mem;
+       struct ttm_resource tmp_mem;
        struct ttm_placement placement;
        struct ttm_place placements;
        int r;
@@ -669,12 +621,12 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
        }
 
        /* copy to VRAM */
-       r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem);
+       r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
 out_cleanup:
-       ttm_bo_mem_put(bo, &tmp_mem);
+       ttm_resource_free(bo, &tmp_mem);
        return r;
 }
 
@@ -684,7 +636,7 @@ out_cleanup:
  * Called by amdgpu_bo_move()
  */
 static bool amdgpu_mem_visible(struct amdgpu_device *adev,
-                              struct ttm_mem_reg *mem)
+                              struct ttm_resource *mem)
 {
        struct drm_mm_node *nodes = mem->mm_node;
 
@@ -694,7 +646,7 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev,
        if (mem->mem_type != TTM_PL_VRAM)
                return false;
 
-       /* ttm_mem_reg_ioremap only supports contiguous memory */
+       /* ttm_resource_ioremap only supports contiguous memory */
        if (nodes->size != mem->num_pages)
                return false;
 
@@ -709,11 +661,11 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev,
  */
 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
                          struct ttm_operation_ctx *ctx,
-                         struct ttm_mem_reg *new_mem)
+                         struct ttm_resource *new_mem)
 {
        struct amdgpu_device *adev;
        struct amdgpu_bo *abo;
-       struct ttm_mem_reg *old_mem = &bo->mem;
+       struct ttm_resource *old_mem = &bo->mem;
        int r;
 
        /* Can't move a pinned BO */
@@ -758,7 +710,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
                   new_mem->mem_type == TTM_PL_VRAM) {
                r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
        } else {
-               r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
+               r = amdgpu_move_blit(bo, evict,
                                     new_mem, old_mem);
        }
 
@@ -795,19 +747,12 @@ memcpy:
  *
  * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
  */
-static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
 {
-       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        struct drm_mm_node *mm_node = mem->mm_node;
+       size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
 
-       mem->bus.addr = NULL;
-       mem->bus.offset = 0;
-       mem->bus.size = mem->num_pages << PAGE_SHIFT;
-       mem->bus.base = 0;
-       mem->bus.is_iomem = false;
-       if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
-               return -EINVAL;
        switch (mem->mem_type) {
        case TTM_PL_SYSTEM:
                /* system memory */
@@ -817,11 +762,11 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
        case TTM_PL_VRAM:
                mem->bus.offset = mem->start << PAGE_SHIFT;
                /* check if it's visible */
-               if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
+               if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
                        return -EINVAL;
                /* Only physically contiguous buffers apply. In a contiguous
                 * buffer, size of the first mm_node would match the number of
-                * pages in ttm_mem_reg.
+                * pages in ttm_resource.
                 */
                if (adev->mman.aper_base_kaddr &&
                    (mm_node->size == mem->num_pages))
@@ -1042,7 +987,7 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
 }
 
 /**
- * amdgpu_ttm_tt_pin_userptr -         prepare the sg table with the user pages
+ * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
  *
  * Called by amdgpu_ttm_backend_bind()
  **/
@@ -1076,6 +1021,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
 
 release_sg:
        kfree(ttm->sg);
+       ttm->sg = NULL;
        return r;
 }
 
@@ -1166,10 +1112,10 @@ gart_bind_fail:
  * This handles binding GTT memory to the device address space.
  */
 static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
-                                  struct ttm_mem_reg *bo_mem)
+                                  struct ttm_resource *bo_mem)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
-       struct amdgpu_ttm_tt *gtt = (void*)ttm;
+       struct amdgpu_ttm_tt *gtt = (void *)ttm;
        uint64_t flags;
        int r = 0;
 
@@ -1210,14 +1156,19 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
 }
 
 /**
- * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object
+ * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
+ * through AGP or GART aperture.
+ *
+ * If bo is accessible through AGP aperture, then use AGP aperture
+ * to access bo; otherwise allocate logical space in GART aperture
+ * and map bo to GART aperture.
  */
 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
        struct ttm_operation_ctx ctx = { false, false };
-       struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
-       struct ttm_mem_reg tmp;
+       struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
+       struct ttm_resource tmp;
        struct ttm_placement placement;
        struct ttm_place placements;
        uint64_t addr, flags;
@@ -1254,11 +1205,11 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
                gtt->offset = (u64)tmp.start << PAGE_SHIFT;
                r = amdgpu_ttm_gart_bind(adev, bo, flags);
                if (unlikely(r)) {
-                       ttm_bo_mem_put(bo, &tmp);
+                       ttm_resource_free(bo, &tmp);
                        return r;
                }
 
-               ttm_bo_mem_put(bo, &bo->mem);
+               ttm_resource_free(bo, &bo->mem);
                bo->mem = tmp;
        }
 
@@ -1457,21 +1408,26 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
  * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
  * task
  *
- * @ttm: The ttm_tt object to bind this userptr object to
+ * @bo: The ttm_buffer_object to bind this userptr to
  * @addr:  The address in the current tasks VM space to use
  * @flags: Requirements of userptr object.
  *
  * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
  * to current task
  */
-int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
-                             uint32_t flags)
+int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
+                             uint64_t addr, uint32_t flags)
 {
-       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       struct amdgpu_ttm_tt *gtt;
 
-       if (gtt == NULL)
-               return -EINVAL;
+       if (!bo->ttm) {
+               /* TODO: We want a separate TTM object type for userptrs */
+               bo->ttm = amdgpu_ttm_tt_create(bo, 0);
+               if (bo->ttm == NULL)
+                       return -ENOMEM;
+       }
 
+       gtt = (void *)bo->ttm;
        gtt->userptr = addr;
        gtt->userflags = flags;
 
@@ -1557,7 +1513,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
  *
  * Figure out the flags to use for a VM PDE (Page Directory Entry).
  */
-uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
 {
        uint64_t flags = 0;
 
@@ -1583,7 +1539,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
  * Figure out the flags to use for a VM PTE (Page Table Entry).
  */
 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
-                                struct ttm_mem_reg *mem)
+                                struct ttm_resource *mem)
 {
        uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
 
@@ -1741,7 +1697,6 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
        .ttm_tt_create = &amdgpu_ttm_tt_create,
        .ttm_tt_populate = &amdgpu_ttm_tt_populate,
        .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
-       .init_mem_type = &amdgpu_init_mem_type,
        .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
        .evict_flags = &amdgpu_evict_flags,
        .move = &amdgpu_bo_move,
@@ -1767,8 +1722,8 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
  */
 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
 {
-       amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
-               NULL, &adev->fw_vram_usage.va);
+       amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
+               NULL, &adev->mman.fw_vram_usage_va);
 }
 
 /**
@@ -1782,19 +1737,19 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
 {
        uint64_t vram_size = adev->gmc.visible_vram_size;
 
-       adev->fw_vram_usage.va = NULL;
-       adev->fw_vram_usage.reserved_bo = NULL;
+       adev->mman.fw_vram_usage_va = NULL;
+       adev->mman.fw_vram_usage_reserved_bo = NULL;
 
-       if (adev->fw_vram_usage.size == 0 ||
-           adev->fw_vram_usage.size > vram_size)
+       if (adev->mman.fw_vram_usage_size == 0 ||
+           adev->mman.fw_vram_usage_size > vram_size)
                return 0;
 
        return amdgpu_bo_create_kernel_at(adev,
-                                         adev->fw_vram_usage.start_offset,
-                                         adev->fw_vram_usage.size,
+                                         adev->mman.fw_vram_usage_start_offset,
+                                         adev->mman.fw_vram_usage_size,
                                          AMDGPU_GEM_DOMAIN_VRAM,
-                                         &adev->fw_vram_usage.reserved_bo,
-                                         &adev->fw_vram_usage.va);
+                                         &adev->mman.fw_vram_usage_reserved_bo,
+                                         &adev->mman.fw_vram_usage_va);
 }
 
 /*
@@ -1826,7 +1781,7 @@ static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
        memset(ctx, 0, sizeof(*ctx));
 
        ctx->c2p_train_data_offset =
-               ALIGN((adev->gmc.mc_vram_size - adev->discovery_tmr_size - SZ_1M), SZ_1M);
+               ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
        ctx->p2c_train_data_offset =
                (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
        ctx->train_data_size =
@@ -1865,10 +1820,10 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
         * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
         * discovery data and G6 memory training data respectively
         */
-       adev->discovery_tmr_size =
+       adev->mman.discovery_tmr_size =
                amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
-       if (!adev->discovery_tmr_size)
-               adev->discovery_tmr_size = DISCOVERY_TMR_OFFSET;
+       if (!adev->mman.discovery_tmr_size)
+               adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
 
        if (mem_train_support) {
                /* reserve vram for mem train according to TMR location */
@@ -1888,14 +1843,14 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
        }
 
        ret = amdgpu_bo_create_kernel_at(adev,
-                               adev->gmc.real_vram_size - adev->discovery_tmr_size,
-                               adev->discovery_tmr_size,
+                               adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
+                               adev->mman.discovery_tmr_size,
                                AMDGPU_GEM_DOMAIN_VRAM,
-                               &adev->discovery_memory,
+                               &adev->mman.discovery_memory,
                                NULL);
        if (ret) {
                DRM_ERROR("alloc tmr failed(%d)!\n", ret);
-               amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
+               amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
                return ret;
        }
 
@@ -1916,15 +1871,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
        uint64_t gtt_size;
        int r;
        u64 vis_vram_limit;
-       void *stolen_vga_buf;
 
        mutex_init(&adev->mman.gtt_window_lock);
 
        /* No others user of address space so set it to 0 */
        r = ttm_bo_device_init(&adev->mman.bdev,
                               &amdgpu_bo_driver,
-                              adev->ddev->anon_inode->i_mapping,
-                              adev->ddev->vma_offset_manager,
+                              adev_to_drm(adev)->anon_inode->i_mapping,
+                              adev_to_drm(adev)->vma_offset_manager,
                               dma_addressing_limited(adev->dev));
        if (r) {
                DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -1936,8 +1890,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
        adev->mman.bdev.no_retry = true;
 
        /* Initialize VRAM pool with all of VRAM divided into pages */
-       r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
-                               adev->gmc.real_vram_size >> PAGE_SHIFT);
+       r = amdgpu_vram_mgr_init(adev);
        if (r) {
                DRM_ERROR("Failed initializing VRAM heap.\n");
                return r;
@@ -1970,7 +1923,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
         * If IP discovery enabled, a block of memory should be
         * reserved for IP discovey.
         */
-       if (adev->discovery_bin) {
+       if (adev->mman.discovery_bin) {
                r = amdgpu_ttm_reserve_tmr(adev);
                if (r)
                        return r;
@@ -1980,10 +1933,17 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
         * This is used for VGA emulation and pre-OS scanout buffers to
         * avoid display artifacts while transitioning between pre-OS
         * and driver.  */
-       r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
-                                   AMDGPU_GEM_DOMAIN_VRAM,
-                                   &adev->stolen_vga_memory,
-                                   NULL, &stolen_vga_buf);
+       r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
+                                      AMDGPU_GEM_DOMAIN_VRAM,
+                                      &adev->mman.stolen_vga_memory,
+                                      NULL);
+       if (r)
+               return r;
+       r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
+                                      adev->mman.stolen_extended_size,
+                                      AMDGPU_GEM_DOMAIN_VRAM,
+                                      &adev->mman.stolen_extended_memory,
+                                      NULL);
        if (r)
                return r;
 
@@ -2004,7 +1964,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
                gtt_size = (uint64_t)amdgpu_gtt_size << 20;
 
        /* Initialize GTT memory pool */
-       r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
+       r = amdgpu_gtt_mgr_init(adev, gtt_size);
        if (r) {
                DRM_ERROR("Failed initializing GTT heap.\n");
                return r;
@@ -2013,22 +1973,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
                 (unsigned)(gtt_size / (1024 * 1024)));
 
        /* Initialize various on-chip memory pools */
-       r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
-                          adev->gds.gds_size);
+       r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
        if (r) {
                DRM_ERROR("Failed initializing GDS heap.\n");
                return r;
        }
 
-       r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
-                          adev->gds.gws_size);
+       r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
        if (r) {
                DRM_ERROR("Failed initializing gws heap.\n");
                return r;
        }
 
-       r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
-                          adev->gds.oa_size);
+       r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
        if (r) {
                DRM_ERROR("Failed initializing oa heap.\n");
                return r;
@@ -2042,9 +1999,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
  */
 void amdgpu_ttm_late_init(struct amdgpu_device *adev)
 {
-       void *stolen_vga_buf;
        /* return the VGA stolen memory (if any) back to VRAM */
-       amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
+       if (!adev->mman.keep_stolen_vga_memory)
+               amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
+       amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
 }
 
 /**
@@ -2056,19 +2014,22 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
                return;
 
        amdgpu_ttm_training_reserve_vram_fini(adev);
+       /* return the stolen vga memory back to VRAM */
+       if (adev->mman.keep_stolen_vga_memory)
+               amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
        /* return the IP Discovery TMR memory back to VRAM */
-       amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
+       amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
        amdgpu_ttm_fw_reserve_vram_fini(adev);
 
        if (adev->mman.aper_base_kaddr)
                iounmap(adev->mman.aper_base_kaddr);
        adev->mman.aper_base_kaddr = NULL;
 
-       ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
-       ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
-       ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
-       ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
-       ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
+       amdgpu_vram_mgr_fini(adev);
+       amdgpu_gtt_mgr_fini(adev);
+       ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
+       ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
+       ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
        ttm_bo_device_release(&adev->mman.bdev);
        adev->mman.initialized = false;
        DRM_INFO("amdgpu: ttm finalized\n");
@@ -2085,11 +2046,11 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
  */
 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
 {
-       struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
+       struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
        uint64_t size;
        int r;
 
-       if (!adev->mman.initialized || adev->in_gpu_reset ||
+       if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
            adev->mman.buffer_funcs_enabled == enable)
                return;
 
@@ -2100,7 +2061,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
                ring = adev->mman.buffer_funcs_ring;
                sched = &ring->sched;
                r = drm_sched_entity_init(&adev->mman.entity,
-                                         DRM_SCHED_PRIORITY_KERNEL, &sched,
+                                         DRM_SCHED_PRIORITY_KERNEL, &sched,
                                          1, NULL);
                if (r) {
                        DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
@@ -2125,7 +2086,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
 {
        struct drm_file *file_priv = filp->private_data;
-       struct amdgpu_device *adev = file_priv->minor->dev->dev_private;
+       struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
 
        if (adev == NULL)
                return -EINVAL;
@@ -2306,8 +2267,8 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *)m->private;
        unsigned ttm_pl = (uintptr_t)node->info_ent->data;
        struct drm_device *dev = node->minor->dev;
-       struct amdgpu_device *adev = dev->dev_private;
-       struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
+       struct amdgpu_device *adev = drm_to_adev(dev);
+       struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl);
        struct drm_printer p = drm_seq_file_printer(m);
 
        man->func->debug(man, &p);
@@ -2597,7 +2558,7 @@ int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
 #if defined(CONFIG_DEBUG_FS)
        unsigned count;
 
-       struct drm_minor *minor = adev->ddev->primary;
+       struct drm_minor *minor = adev_to_drm(adev)->primary;
        struct dentry *ent, *root = minor->debugfs_root;
 
        for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
This page took 0.060222 seconds and 4 git commands to generate.