1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Huge page-table-entry support for IO memory.
5 * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
7 #include "vmwgfx_drv.h"
8 #include <drm/ttm/ttm_module.h>
9 #include <drm/ttm/ttm_bo_driver.h>
10 #include <drm/ttm/ttm_placement.h>
13 * struct vmw_thp_manager - Range manager implementing huge page alignment
15 * @mm: The underlying range manager. Protected by @lock.
16 * @lock: Manager lock.
18 struct vmw_thp_manager {
19 struct ttm_resource_manager manager;
24 static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man)
26 return container_of(man, struct vmw_thp_manager, manager);
29 static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
30 unsigned long align_pages,
31 const struct ttm_place *place,
32 struct ttm_resource *mem,
34 enum drm_mm_insert_mode mode)
36 if (align_pages >= mem->page_alignment &&
37 (!mem->page_alignment || align_pages % mem->page_alignment == 0)) {
38 return drm_mm_insert_node_in_range(mm, node,
41 place->fpfn, lpfn, mode);
47 static int vmw_thp_get_node(struct ttm_resource_manager *man,
48 struct ttm_buffer_object *bo,
49 const struct ttm_place *place,
50 struct ttm_resource *mem)
52 struct vmw_thp_manager *rman = to_thp_manager(man);
53 struct drm_mm *mm = &rman->mm;
54 struct drm_mm_node *node;
55 unsigned long align_pages;
57 enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
60 node = kzalloc(sizeof(*node), GFP_KERNEL);
68 mode = DRM_MM_INSERT_BEST;
69 if (place->flags & TTM_PL_FLAG_TOPDOWN)
70 mode = DRM_MM_INSERT_HIGH;
72 spin_lock(&rman->lock);
73 if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
74 align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
75 if (mem->num_pages >= align_pages) {
76 ret = vmw_thp_insert_aligned(mm, node, align_pages,
77 place, mem, lpfn, mode);
83 align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
84 if (mem->num_pages >= align_pages) {
85 ret = vmw_thp_insert_aligned(mm, node, align_pages, place, mem,
91 ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
92 mem->page_alignment, 0,
93 place->fpfn, lpfn, mode);
95 spin_unlock(&rman->lock);
101 mem->start = node->start;
109 static void vmw_thp_put_node(struct ttm_resource_manager *man,
110 struct ttm_resource *mem)
112 struct vmw_thp_manager *rman = to_thp_manager(man);
115 spin_lock(&rman->lock);
116 drm_mm_remove_node(mem->mm_node);
117 spin_unlock(&rman->lock);
124 int vmw_thp_init(struct vmw_private *dev_priv)
126 struct ttm_resource_manager *man;
127 struct vmw_thp_manager *rman;
129 rman = kzalloc(sizeof(*rman), GFP_KERNEL);
133 man = &rman->manager;
134 man->available_caching = TTM_PL_FLAG_CACHED;
135 man->default_caching = TTM_PL_FLAG_CACHED;
137 ttm_resource_manager_init(man,
138 dev_priv->vram_size >> PAGE_SHIFT);
140 drm_mm_init(&rman->mm, 0, man->size);
141 spin_lock_init(&rman->lock);
143 ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, &rman->manager);
144 ttm_resource_manager_set_used(man, true);
148 void vmw_thp_fini(struct vmw_private *dev_priv)
150 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
151 struct vmw_thp_manager *rman = to_thp_manager(man);
152 struct drm_mm *mm = &rman->mm;
155 ttm_resource_manager_set_used(man, false);
157 ret = ttm_resource_manager_force_list_clean(&dev_priv->bdev, man);
160 spin_lock(&rman->lock);
163 spin_unlock(&rman->lock);
164 ttm_resource_manager_cleanup(man);
165 ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, NULL);
169 static void vmw_thp_debug(struct ttm_resource_manager *man,
170 struct drm_printer *printer)
172 struct vmw_thp_manager *rman = to_thp_manager(man);
174 spin_lock(&rman->lock);
175 drm_mm_print(&rman->mm, printer);
176 spin_unlock(&rman->lock);
179 const struct ttm_resource_manager_func vmw_thp_func = {
180 .alloc = vmw_thp_get_node,
181 .free = vmw_thp_put_node,
182 .debug = vmw_thp_debug