]> Git Repo - linux.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
drm/nouveau/kms: Don't change EDID when it hasn't actually changed
[linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_thp.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Huge page-table-entry support for IO memory.
4  *
5  * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
6  */
7 #include "vmwgfx_drv.h"
8 #include <drm/ttm/ttm_module.h>
9 #include <drm/ttm/ttm_bo_driver.h>
10 #include <drm/ttm/ttm_placement.h>
11
12 /**
13  * struct vmw_thp_manager - Range manager implementing huge page alignment
14  *
15  * @mm: The underlying range manager. Protected by @lock.
16  * @lock: Manager lock.
17  */
18 struct vmw_thp_manager {
19         struct ttm_resource_manager manager;
20         struct drm_mm mm;
21         spinlock_t lock;
22 };
23
24 static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man)
25 {
26         return container_of(man, struct vmw_thp_manager, manager);
27 }
28
29 static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
30                                   unsigned long align_pages,
31                                   const struct ttm_place *place,
32                                   struct ttm_resource *mem,
33                                   unsigned long lpfn,
34                                   enum drm_mm_insert_mode mode)
35 {
36         if (align_pages >= mem->page_alignment &&
37             (!mem->page_alignment || align_pages % mem->page_alignment == 0)) {
38                 return drm_mm_insert_node_in_range(mm, node,
39                                                    mem->num_pages,
40                                                    align_pages, 0,
41                                                    place->fpfn, lpfn, mode);
42         }
43
44         return -ENOSPC;
45 }
46
47 static int vmw_thp_get_node(struct ttm_resource_manager *man,
48                             struct ttm_buffer_object *bo,
49                             const struct ttm_place *place,
50                             struct ttm_resource *mem)
51 {
52         struct vmw_thp_manager *rman = to_thp_manager(man);
53         struct drm_mm *mm = &rman->mm;
54         struct drm_mm_node *node;
55         unsigned long align_pages;
56         unsigned long lpfn;
57         enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
58         int ret;
59
60         node = kzalloc(sizeof(*node), GFP_KERNEL);
61         if (!node)
62                 return -ENOMEM;
63
64         lpfn = place->lpfn;
65         if (!lpfn)
66                 lpfn = man->size;
67
68         mode = DRM_MM_INSERT_BEST;
69         if (place->flags & TTM_PL_FLAG_TOPDOWN)
70                 mode = DRM_MM_INSERT_HIGH;
71
72         spin_lock(&rman->lock);
73         if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
74                 align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
75                 if (mem->num_pages >= align_pages) {
76                         ret = vmw_thp_insert_aligned(mm, node, align_pages,
77                                                      place, mem, lpfn, mode);
78                         if (!ret)
79                                 goto found_unlock;
80                 }
81         }
82
83         align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
84         if (mem->num_pages >= align_pages) {
85                 ret = vmw_thp_insert_aligned(mm, node, align_pages, place, mem,
86                                              lpfn, mode);
87                 if (!ret)
88                         goto found_unlock;
89         }
90
91         ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
92                                           mem->page_alignment, 0,
93                                           place->fpfn, lpfn, mode);
94 found_unlock:
95         spin_unlock(&rman->lock);
96
97         if (unlikely(ret)) {
98                 kfree(node);
99         } else {
100                 mem->mm_node = node;
101                 mem->start = node->start;
102         }
103
104         return 0;
105 }
106
107
108
109 static void vmw_thp_put_node(struct ttm_resource_manager *man,
110                              struct ttm_resource *mem)
111 {
112         struct vmw_thp_manager *rman = to_thp_manager(man);
113
114         if (mem->mm_node) {
115                 spin_lock(&rman->lock);
116                 drm_mm_remove_node(mem->mm_node);
117                 spin_unlock(&rman->lock);
118
119                 kfree(mem->mm_node);
120                 mem->mm_node = NULL;
121         }
122 }
123
124 int vmw_thp_init(struct vmw_private *dev_priv)
125 {
126         struct ttm_resource_manager *man;
127         struct vmw_thp_manager *rman;
128
129         rman = kzalloc(sizeof(*rman), GFP_KERNEL);
130         if (!rman)
131                 return -ENOMEM;
132
133         man = &rman->manager;
134         man->available_caching = TTM_PL_FLAG_CACHED;
135         man->default_caching = TTM_PL_FLAG_CACHED;
136
137         ttm_resource_manager_init(man,
138                                   dev_priv->vram_size >> PAGE_SHIFT);
139
140         drm_mm_init(&rman->mm, 0, man->size);
141         spin_lock_init(&rman->lock);
142
143         ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, &rman->manager);
144         ttm_resource_manager_set_used(man, true);
145         return 0;
146 }
147
148 void vmw_thp_fini(struct vmw_private *dev_priv)
149 {
150         struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
151         struct vmw_thp_manager *rman = to_thp_manager(man);
152         struct drm_mm *mm = &rman->mm;
153         int ret;
154
155         ttm_resource_manager_set_used(man, false);
156
157         ret = ttm_resource_manager_force_list_clean(&dev_priv->bdev, man);
158         if (ret)
159                 return;
160         spin_lock(&rman->lock);
161         drm_mm_clean(mm);
162         drm_mm_takedown(mm);
163         spin_unlock(&rman->lock);
164         ttm_resource_manager_cleanup(man);
165         ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, NULL);
166         kfree(rman);
167 }
168
169 static void vmw_thp_debug(struct ttm_resource_manager *man,
170                           struct drm_printer *printer)
171 {
172         struct vmw_thp_manager *rman = to_thp_manager(man);
173
174         spin_lock(&rman->lock);
175         drm_mm_print(&rman->mm, printer);
176         spin_unlock(&rman->lock);
177 }
178
179 const struct ttm_resource_manager_func vmw_thp_func = {
180         .alloc = vmw_thp_get_node,
181         .free = vmw_thp_put_node,
182         .debug = vmw_thp_debug
183 };
This page took 0.043232 seconds and 4 git commands to generate.