]> Git Repo - linux.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
Merge tag 'regmap-v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_thp.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Huge page-table-entry support for IO memory.
4  *
5  * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
6  */
7 #include "vmwgfx_drv.h"
8 #include <drm/ttm/ttm_bo_driver.h>
9 #include <drm/ttm/ttm_placement.h>
10
11 /**
12  * struct vmw_thp_manager - Range manager implementing huge page alignment
13  *
14  * @mm: The underlying range manager. Protected by @lock.
15  * @lock: Manager lock.
16  */
17 struct vmw_thp_manager {
18         struct ttm_resource_manager manager;
19         struct drm_mm mm;
20         spinlock_t lock;
21 };
22
23 static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man)
24 {
25         return container_of(man, struct vmw_thp_manager, manager);
26 }
27
28 static const struct ttm_resource_manager_func vmw_thp_func;
29
30 static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
31                                   unsigned long align_pages,
32                                   const struct ttm_place *place,
33                                   struct ttm_resource *mem,
34                                   unsigned long lpfn,
35                                   enum drm_mm_insert_mode mode)
36 {
37         if (align_pages >= mem->page_alignment &&
38             (!mem->page_alignment || align_pages % mem->page_alignment == 0)) {
39                 return drm_mm_insert_node_in_range(mm, node,
40                                                    mem->num_pages,
41                                                    align_pages, 0,
42                                                    place->fpfn, lpfn, mode);
43         }
44
45         return -ENOSPC;
46 }
47
48 static int vmw_thp_get_node(struct ttm_resource_manager *man,
49                             struct ttm_buffer_object *bo,
50                             const struct ttm_place *place,
51                             struct ttm_resource *mem)
52 {
53         struct vmw_thp_manager *rman = to_thp_manager(man);
54         struct drm_mm *mm = &rman->mm;
55         struct drm_mm_node *node;
56         unsigned long align_pages;
57         unsigned long lpfn;
58         enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
59         int ret;
60
61         node = kzalloc(sizeof(*node), GFP_KERNEL);
62         if (!node)
63                 return -ENOMEM;
64
65         lpfn = place->lpfn;
66         if (!lpfn)
67                 lpfn = man->size;
68
69         mode = DRM_MM_INSERT_BEST;
70         if (place->flags & TTM_PL_FLAG_TOPDOWN)
71                 mode = DRM_MM_INSERT_HIGH;
72
73         spin_lock(&rman->lock);
74         if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
75                 align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
76                 if (mem->num_pages >= align_pages) {
77                         ret = vmw_thp_insert_aligned(mm, node, align_pages,
78                                                      place, mem, lpfn, mode);
79                         if (!ret)
80                                 goto found_unlock;
81                 }
82         }
83
84         align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
85         if (mem->num_pages >= align_pages) {
86                 ret = vmw_thp_insert_aligned(mm, node, align_pages, place, mem,
87                                              lpfn, mode);
88                 if (!ret)
89                         goto found_unlock;
90         }
91
92         ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
93                                           mem->page_alignment, 0,
94                                           place->fpfn, lpfn, mode);
95 found_unlock:
96         spin_unlock(&rman->lock);
97
98         if (unlikely(ret)) {
99                 kfree(node);
100         } else {
101                 mem->mm_node = node;
102                 mem->start = node->start;
103         }
104
105         return ret;
106 }
107
108
109
110 static void vmw_thp_put_node(struct ttm_resource_manager *man,
111                              struct ttm_resource *mem)
112 {
113         struct vmw_thp_manager *rman = to_thp_manager(man);
114
115         if (mem->mm_node) {
116                 spin_lock(&rman->lock);
117                 drm_mm_remove_node(mem->mm_node);
118                 spin_unlock(&rman->lock);
119
120                 kfree(mem->mm_node);
121                 mem->mm_node = NULL;
122         }
123 }
124
125 int vmw_thp_init(struct vmw_private *dev_priv)
126 {
127         struct vmw_thp_manager *rman;
128
129         rman = kzalloc(sizeof(*rman), GFP_KERNEL);
130         if (!rman)
131                 return -ENOMEM;
132
133         ttm_resource_manager_init(&rman->manager,
134                                   dev_priv->vram_size >> PAGE_SHIFT);
135
136         rman->manager.func = &vmw_thp_func;
137         drm_mm_init(&rman->mm, 0, rman->manager.size);
138         spin_lock_init(&rman->lock);
139
140         ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, &rman->manager);
141         ttm_resource_manager_set_used(&rman->manager, true);
142         return 0;
143 }
144
145 void vmw_thp_fini(struct vmw_private *dev_priv)
146 {
147         struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
148         struct vmw_thp_manager *rman = to_thp_manager(man);
149         struct drm_mm *mm = &rman->mm;
150         int ret;
151
152         ttm_resource_manager_set_used(man, false);
153
154         ret = ttm_resource_manager_evict_all(&dev_priv->bdev, man);
155         if (ret)
156                 return;
157         spin_lock(&rman->lock);
158         drm_mm_clean(mm);
159         drm_mm_takedown(mm);
160         spin_unlock(&rman->lock);
161         ttm_resource_manager_cleanup(man);
162         ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, NULL);
163         kfree(rman);
164 }
165
166 static void vmw_thp_debug(struct ttm_resource_manager *man,
167                           struct drm_printer *printer)
168 {
169         struct vmw_thp_manager *rman = to_thp_manager(man);
170
171         spin_lock(&rman->lock);
172         drm_mm_print(&rman->mm, printer);
173         spin_unlock(&rman->lock);
174 }
175
176 static const struct ttm_resource_manager_func vmw_thp_func = {
177         .alloc = vmw_thp_get_node,
178         .free = vmw_thp_put_node,
179         .debug = vmw_thp_debug
180 };
This page took 0.043944 seconds and 4 git commands to generate.