1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Huge page-table-entry support for IO memory.
5 * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
7 #include "vmwgfx_drv.h"
8 #include <drm/ttm/ttm_bo_driver.h>
9 #include <drm/ttm/ttm_placement.h>
10 #include <drm/ttm/ttm_range_manager.h>
13 * struct vmw_thp_manager - Range manager implementing huge page alignment
15 * @manager: TTM resource manager.
16 * @mm: The underlying range manager. Protected by @lock.
17 * @lock: Manager lock.
19 struct vmw_thp_manager {
20 struct ttm_resource_manager manager;
25 static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man)
27 return container_of(man, struct vmw_thp_manager, manager);
30 static const struct ttm_resource_manager_func vmw_thp_func;
32 static int vmw_thp_insert_aligned(struct ttm_buffer_object *bo,
33 struct drm_mm *mm, struct drm_mm_node *node,
34 unsigned long align_pages,
35 const struct ttm_place *place,
36 struct ttm_resource *mem,
38 enum drm_mm_insert_mode mode)
40 if (align_pages >= bo->page_alignment &&
41 (!bo->page_alignment || align_pages % bo->page_alignment == 0)) {
42 return drm_mm_insert_node_in_range(mm, node,
45 place->fpfn, lpfn, mode);
51 static int vmw_thp_get_node(struct ttm_resource_manager *man,
52 struct ttm_buffer_object *bo,
53 const struct ttm_place *place,
54 struct ttm_resource **res)
56 struct vmw_thp_manager *rman = to_thp_manager(man);
57 struct drm_mm *mm = &rman->mm;
58 struct ttm_range_mgr_node *node;
59 unsigned long align_pages;
61 enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
64 node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
68 ttm_resource_init(bo, place, &node->base);
74 mode = DRM_MM_INSERT_BEST;
75 if (place->flags & TTM_PL_FLAG_TOPDOWN)
76 mode = DRM_MM_INSERT_HIGH;
78 spin_lock(&rman->lock);
79 if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
80 align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
81 if (node->base.num_pages >= align_pages) {
82 ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
84 &node->base, lpfn, mode);
90 align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
91 if (node->base.num_pages >= align_pages) {
92 ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
93 align_pages, place, &node->base,
99 ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
100 node->base.num_pages,
101 bo->page_alignment, 0,
102 place->fpfn, lpfn, mode);
104 spin_unlock(&rman->lock);
109 node->base.start = node->mm_nodes[0].start;
116 static void vmw_thp_put_node(struct ttm_resource_manager *man,
117 struct ttm_resource *res)
119 struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
120 struct vmw_thp_manager *rman = to_thp_manager(man);
122 spin_lock(&rman->lock);
123 drm_mm_remove_node(&node->mm_nodes[0]);
124 spin_unlock(&rman->lock);
129 int vmw_thp_init(struct vmw_private *dev_priv)
131 struct vmw_thp_manager *rman;
133 rman = kzalloc(sizeof(*rman), GFP_KERNEL);
137 ttm_resource_manager_init(&rman->manager,
138 dev_priv->vram_size >> PAGE_SHIFT);
140 rman->manager.func = &vmw_thp_func;
141 drm_mm_init(&rman->mm, 0, rman->manager.size);
142 spin_lock_init(&rman->lock);
144 ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, &rman->manager);
145 ttm_resource_manager_set_used(&rman->manager, true);
149 void vmw_thp_fini(struct vmw_private *dev_priv)
151 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
152 struct vmw_thp_manager *rman = to_thp_manager(man);
153 struct drm_mm *mm = &rman->mm;
156 ttm_resource_manager_set_used(man, false);
158 ret = ttm_resource_manager_evict_all(&dev_priv->bdev, man);
161 spin_lock(&rman->lock);
164 spin_unlock(&rman->lock);
165 ttm_resource_manager_cleanup(man);
166 ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, NULL);
170 static void vmw_thp_debug(struct ttm_resource_manager *man,
171 struct drm_printer *printer)
173 struct vmw_thp_manager *rman = to_thp_manager(man);
175 spin_lock(&rman->lock);
176 drm_mm_print(&rman->mm, printer);
177 spin_unlock(&rman->lock);
180 static const struct ttm_resource_manager_func vmw_thp_func = {
181 .alloc = vmw_thp_get_node,
182 .free = vmw_thp_put_node,
183 .debug = vmw_thp_debug