1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Huge page-table-entry support for IO memory.
5 * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
7 #include "vmwgfx_drv.h"
8 #include <drm/ttm/ttm_bo_driver.h>
9 #include <drm/ttm/ttm_placement.h>
12 * struct vmw_thp_manager - Range manager implementing huge page alignment
14 * @mm: The underlying range manager. Protected by @lock.
15 * @lock: Manager lock.
17 struct vmw_thp_manager {
18 struct ttm_resource_manager manager;
23 static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man)
25 return container_of(man, struct vmw_thp_manager, manager);
28 static const struct ttm_resource_manager_func vmw_thp_func;
30 static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
31 unsigned long align_pages,
32 const struct ttm_place *place,
33 struct ttm_resource *mem,
35 enum drm_mm_insert_mode mode)
37 if (align_pages >= mem->page_alignment &&
38 (!mem->page_alignment || align_pages % mem->page_alignment == 0)) {
39 return drm_mm_insert_node_in_range(mm, node,
42 place->fpfn, lpfn, mode);
48 static int vmw_thp_get_node(struct ttm_resource_manager *man,
49 struct ttm_buffer_object *bo,
50 const struct ttm_place *place,
51 struct ttm_resource *mem)
53 struct vmw_thp_manager *rman = to_thp_manager(man);
54 struct drm_mm *mm = &rman->mm;
55 struct drm_mm_node *node;
56 unsigned long align_pages;
58 enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
61 node = kzalloc(sizeof(*node), GFP_KERNEL);
69 mode = DRM_MM_INSERT_BEST;
70 if (place->flags & TTM_PL_FLAG_TOPDOWN)
71 mode = DRM_MM_INSERT_HIGH;
73 spin_lock(&rman->lock);
74 if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
75 align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
76 if (mem->num_pages >= align_pages) {
77 ret = vmw_thp_insert_aligned(mm, node, align_pages,
78 place, mem, lpfn, mode);
84 align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
85 if (mem->num_pages >= align_pages) {
86 ret = vmw_thp_insert_aligned(mm, node, align_pages, place, mem,
92 ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
93 mem->page_alignment, 0,
94 place->fpfn, lpfn, mode);
96 spin_unlock(&rman->lock);
102 mem->start = node->start;
110 static void vmw_thp_put_node(struct ttm_resource_manager *man,
111 struct ttm_resource *mem)
113 struct vmw_thp_manager *rman = to_thp_manager(man);
116 spin_lock(&rman->lock);
117 drm_mm_remove_node(mem->mm_node);
118 spin_unlock(&rman->lock);
125 int vmw_thp_init(struct vmw_private *dev_priv)
127 struct vmw_thp_manager *rman;
129 rman = kzalloc(sizeof(*rman), GFP_KERNEL);
133 ttm_resource_manager_init(&rman->manager,
134 dev_priv->vram_size >> PAGE_SHIFT);
136 rman->manager.func = &vmw_thp_func;
137 drm_mm_init(&rman->mm, 0, rman->manager.size);
138 spin_lock_init(&rman->lock);
140 ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, &rman->manager);
141 ttm_resource_manager_set_used(&rman->manager, true);
145 void vmw_thp_fini(struct vmw_private *dev_priv)
147 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
148 struct vmw_thp_manager *rman = to_thp_manager(man);
149 struct drm_mm *mm = &rman->mm;
152 ttm_resource_manager_set_used(man, false);
154 ret = ttm_resource_manager_evict_all(&dev_priv->bdev, man);
157 spin_lock(&rman->lock);
160 spin_unlock(&rman->lock);
161 ttm_resource_manager_cleanup(man);
162 ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, NULL);
166 static void vmw_thp_debug(struct ttm_resource_manager *man,
167 struct drm_printer *printer)
169 struct vmw_thp_manager *rman = to_thp_manager(man);
171 spin_lock(&rman->lock);
172 drm_mm_print(&rman->mm, printer);
173 spin_unlock(&rman->lock);
176 static const struct ttm_resource_manager_func vmw_thp_func = {
177 .alloc = vmw_thp_get_node,
178 .free = vmw_thp_put_node,
179 .debug = vmw_thp_debug