1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Huge page-table-entry support for IO memory.
5 * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
7 #include "vmwgfx_drv.h"
8 #include <drm/ttm/ttm_module.h>
9 #include <drm/ttm/ttm_bo_driver.h>
10 #include <drm/ttm/ttm_placement.h>
13 * struct vmw_thp_manager - Range manager implementing huge page alignment
15 * @mm: The underlying range manager. Protected by @lock.
16 * @lock: Manager lock.
18 struct vmw_thp_manager {
23 static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
24 unsigned long align_pages,
25 const struct ttm_place *place,
26 struct ttm_mem_reg *mem,
28 enum drm_mm_insert_mode mode)
30 if (align_pages >= mem->page_alignment &&
31 (!mem->page_alignment || align_pages % mem->page_alignment == 0)) {
32 return drm_mm_insert_node_in_range(mm, node,
35 place->fpfn, lpfn, mode);
41 static int vmw_thp_get_node(struct ttm_mem_type_manager *man,
42 struct ttm_buffer_object *bo,
43 const struct ttm_place *place,
44 struct ttm_mem_reg *mem)
46 struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
47 struct drm_mm *mm = &rman->mm;
48 struct drm_mm_node *node;
49 unsigned long align_pages;
51 enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
54 node = kzalloc(sizeof(*node), GFP_KERNEL);
62 mode = DRM_MM_INSERT_BEST;
63 if (place->flags & TTM_PL_FLAG_TOPDOWN)
64 mode = DRM_MM_INSERT_HIGH;
66 spin_lock(&rman->lock);
67 if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
68 align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
69 if (mem->num_pages >= align_pages) {
70 ret = vmw_thp_insert_aligned(mm, node, align_pages,
71 place, mem, lpfn, mode);
77 align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
78 if (mem->num_pages >= align_pages) {
79 ret = vmw_thp_insert_aligned(mm, node, align_pages, place, mem,
85 ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
86 mem->page_alignment, 0,
87 place->fpfn, lpfn, mode);
89 spin_unlock(&rman->lock);
95 mem->start = node->start;
103 static void vmw_thp_put_node(struct ttm_mem_type_manager *man,
104 struct ttm_mem_reg *mem)
106 struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
109 spin_lock(&rman->lock);
110 drm_mm_remove_node(mem->mm_node);
111 spin_unlock(&rman->lock);
118 static int vmw_thp_init(struct ttm_mem_type_manager *man,
119 unsigned long p_size)
121 struct vmw_thp_manager *rman;
123 rman = kzalloc(sizeof(*rman), GFP_KERNEL);
127 drm_mm_init(&rman->mm, 0, p_size);
128 spin_lock_init(&rman->lock);
133 static int vmw_thp_takedown(struct ttm_mem_type_manager *man)
135 struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
136 struct drm_mm *mm = &rman->mm;
138 spin_lock(&rman->lock);
139 if (drm_mm_clean(mm)) {
141 spin_unlock(&rman->lock);
146 spin_unlock(&rman->lock);
150 static void vmw_thp_debug(struct ttm_mem_type_manager *man,
151 struct drm_printer *printer)
153 struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
155 spin_lock(&rman->lock);
156 drm_mm_print(&rman->mm, printer);
157 spin_unlock(&rman->lock);
160 const struct ttm_mem_type_manager_func vmw_thp_func = {
161 .init = vmw_thp_init,
162 .takedown = vmw_thp_takedown,
163 .get_node = vmw_thp_get_node,
164 .put_node = vmw_thp_put_node,
165 .debug = vmw_thp_debug