]> Git Repo - linux.git/blob - drivers/gpu/drm/ttm/ttm_tt.c
Linux 6.14-rc3
[linux.git] / drivers / gpu / drm / ttm / ttm_tt.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <linux/cc_platform.h>
35 #include <linux/debugfs.h>
36 #include <linux/file.h>
37 #include <linux/module.h>
38 #include <linux/sched.h>
39 #include <linux/shmem_fs.h>
40 #include <drm/drm_cache.h>
41 #include <drm/drm_device.h>
42 #include <drm/drm_util.h>
43 #include <drm/ttm/ttm_bo.h>
44 #include <drm/ttm/ttm_tt.h>
45
46 #include "ttm_module.h"
47
48 static unsigned long ttm_pages_limit;
49
50 MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages");
51 module_param_named(pages_limit, ttm_pages_limit, ulong, 0644);
52
53 static unsigned long ttm_dma32_pages_limit;
54
55 MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages");
56 module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644);
57
58 static atomic_long_t ttm_pages_allocated;
59 static atomic_long_t ttm_dma32_pages_allocated;
60
61 /*
62  * Allocates a ttm structure for the given BO.
63  */
64 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
65 {
66         struct ttm_device *bdev = bo->bdev;
67         struct drm_device *ddev = bo->base.dev;
68         uint32_t page_flags = 0;
69
70         dma_resv_assert_held(bo->base.resv);
71
72         if (bo->ttm)
73                 return 0;
74
75         switch (bo->type) {
76         case ttm_bo_type_device:
77                 if (zero_alloc)
78                         page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
79                 break;
80         case ttm_bo_type_kernel:
81                 break;
82         case ttm_bo_type_sg:
83                 page_flags |= TTM_TT_FLAG_EXTERNAL;
84                 break;
85         default:
86                 pr_err("Illegal buffer object type\n");
87                 return -EINVAL;
88         }
89         /*
90          * When using dma_alloc_coherent with memory encryption the
91          * mapped TT pages need to be decrypted or otherwise the drivers
92          * will end up sending encrypted mem to the gpu.
93          */
94         if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
95                 page_flags |= TTM_TT_FLAG_DECRYPTED;
96                 drm_info_once(ddev, "TT memory decryption enabled.");
97         }
98
99         bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
100         if (unlikely(bo->ttm == NULL))
101                 return -ENOMEM;
102
103         WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE &&
104                 !(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL));
105
106         return 0;
107 }
108 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_create);
109
110 /*
111  * Allocates storage for pointers to the pages that back the ttm.
112  */
113 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
114 {
115         ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL);
116         if (!ttm->pages)
117                 return -ENOMEM;
118
119         return 0;
120 }
121
122 static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
123 {
124         ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) +
125                               sizeof(*ttm->dma_address), GFP_KERNEL);
126         if (!ttm->pages)
127                 return -ENOMEM;
128
129         ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
130         return 0;
131 }
132
133 static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
134 {
135         ttm->dma_address = kvcalloc(ttm->num_pages, sizeof(*ttm->dma_address),
136                                     GFP_KERNEL);
137         if (!ttm->dma_address)
138                 return -ENOMEM;
139
140         return 0;
141 }
142
143 void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
144 {
145         bdev->funcs->ttm_tt_destroy(bdev, ttm);
146 }
147 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_destroy);
148
149 static void ttm_tt_init_fields(struct ttm_tt *ttm,
150                                struct ttm_buffer_object *bo,
151                                uint32_t page_flags,
152                                enum ttm_caching caching,
153                                unsigned long extra_pages)
154 {
155         ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages;
156         ttm->page_flags = page_flags;
157         ttm->dma_address = NULL;
158         ttm->swap_storage = NULL;
159         ttm->sg = bo->sg;
160         ttm->caching = caching;
161 }
162
163 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
164                 uint32_t page_flags, enum ttm_caching caching,
165                 unsigned long extra_pages)
166 {
167         ttm_tt_init_fields(ttm, bo, page_flags, caching, extra_pages);
168
169         if (ttm_tt_alloc_page_directory(ttm)) {
170                 pr_err("Failed allocating page table\n");
171                 return -ENOMEM;
172         }
173         return 0;
174 }
175 EXPORT_SYMBOL(ttm_tt_init);
176
177 void ttm_tt_fini(struct ttm_tt *ttm)
178 {
179         WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
180
181         if (ttm->swap_storage)
182                 fput(ttm->swap_storage);
183         ttm->swap_storage = NULL;
184
185         if (ttm->pages)
186                 kvfree(ttm->pages);
187         else
188                 kvfree(ttm->dma_address);
189         ttm->pages = NULL;
190         ttm->dma_address = NULL;
191 }
192 EXPORT_SYMBOL(ttm_tt_fini);
193
194 int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
195                    uint32_t page_flags, enum ttm_caching caching)
196 {
197         int ret;
198
199         ttm_tt_init_fields(ttm, bo, page_flags, caching, 0);
200
201         if (page_flags & TTM_TT_FLAG_EXTERNAL)
202                 ret = ttm_sg_tt_alloc_page_directory(ttm);
203         else
204                 ret = ttm_dma_tt_alloc_page_directory(ttm);
205         if (ret) {
206                 pr_err("Failed allocating page table\n");
207                 return -ENOMEM;
208         }
209         return 0;
210 }
211 EXPORT_SYMBOL(ttm_sg_tt_init);
212
213 int ttm_tt_swapin(struct ttm_tt *ttm)
214 {
215         struct address_space *swap_space;
216         struct file *swap_storage;
217         struct page *from_page;
218         struct page *to_page;
219         gfp_t gfp_mask;
220         int i, ret;
221
222         swap_storage = ttm->swap_storage;
223         BUG_ON(swap_storage == NULL);
224
225         swap_space = swap_storage->f_mapping;
226         gfp_mask = mapping_gfp_mask(swap_space);
227
228         for (i = 0; i < ttm->num_pages; ++i) {
229                 from_page = shmem_read_mapping_page_gfp(swap_space, i,
230                                                         gfp_mask);
231                 if (IS_ERR(from_page)) {
232                         ret = PTR_ERR(from_page);
233                         goto out_err;
234                 }
235                 to_page = ttm->pages[i];
236                 if (unlikely(to_page == NULL)) {
237                         ret = -ENOMEM;
238                         goto out_err;
239                 }
240
241                 copy_highpage(to_page, from_page);
242                 put_page(from_page);
243         }
244
245         fput(swap_storage);
246         ttm->swap_storage = NULL;
247         ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
248
249         return 0;
250
251 out_err:
252         return ret;
253 }
254 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapin);
255
256 /**
257  * ttm_tt_swapout - swap out tt object
258  *
259  * @bdev: TTM device structure.
260  * @ttm: The struct ttm_tt.
261  * @gfp_flags: Flags to use for memory allocation.
262  *
263  * Swapout a TT object to a shmem_file, return number of pages swapped out or
264  * negative error code.
265  */
266 int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
267                    gfp_t gfp_flags)
268 {
269         loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT;
270         struct address_space *swap_space;
271         struct file *swap_storage;
272         struct page *from_page;
273         struct page *to_page;
274         int i, ret;
275
276         swap_storage = shmem_file_setup("ttm swap", size, 0);
277         if (IS_ERR(swap_storage)) {
278                 pr_err("Failed allocating swap storage\n");
279                 return PTR_ERR(swap_storage);
280         }
281
282         swap_space = swap_storage->f_mapping;
283         gfp_flags &= mapping_gfp_mask(swap_space);
284
285         for (i = 0; i < ttm->num_pages; ++i) {
286                 from_page = ttm->pages[i];
287                 if (unlikely(from_page == NULL))
288                         continue;
289
290                 to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags);
291                 if (IS_ERR(to_page)) {
292                         ret = PTR_ERR(to_page);
293                         goto out_err;
294                 }
295                 copy_highpage(to_page, from_page);
296                 set_page_dirty(to_page);
297                 mark_page_accessed(to_page);
298                 put_page(to_page);
299         }
300
301         ttm_tt_unpopulate(bdev, ttm);
302         ttm->swap_storage = swap_storage;
303         ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
304
305         return ttm->num_pages;
306
307 out_err:
308         fput(swap_storage);
309
310         return ret;
311 }
312 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapout);
313
314 int ttm_tt_populate(struct ttm_device *bdev,
315                     struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
316 {
317         int ret;
318
319         if (!ttm)
320                 return -EINVAL;
321
322         if (ttm_tt_is_populated(ttm))
323                 return 0;
324
325         if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
326                 atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
327                 if (bdev->pool.use_dma32)
328                         atomic_long_add(ttm->num_pages,
329                                         &ttm_dma32_pages_allocated);
330         }
331
332         while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
333                atomic_long_read(&ttm_dma32_pages_allocated) >
334                ttm_dma32_pages_limit) {
335
336                 ret = ttm_global_swapout(ctx, GFP_KERNEL);
337                 if (ret == 0)
338                         break;
339                 if (ret < 0)
340                         goto error;
341         }
342
343         if (bdev->funcs->ttm_tt_populate)
344                 ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
345         else
346                 ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
347         if (ret)
348                 goto error;
349
350         ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
351         if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
352                 ret = ttm_tt_swapin(ttm);
353                 if (unlikely(ret != 0)) {
354                         ttm_tt_unpopulate(bdev, ttm);
355                         return ret;
356                 }
357         }
358
359         return 0;
360
361 error:
362         if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
363                 atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
364                 if (bdev->pool.use_dma32)
365                         atomic_long_sub(ttm->num_pages,
366                                         &ttm_dma32_pages_allocated);
367         }
368         return ret;
369 }
370
371 #if IS_ENABLED(CONFIG_DRM_TTM_KUNIT_TEST)
372 EXPORT_SYMBOL(ttm_tt_populate);
373 #endif
374
375 void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
376 {
377         if (!ttm_tt_is_populated(ttm))
378                 return;
379
380         if (bdev->funcs->ttm_tt_unpopulate)
381                 bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
382         else
383                 ttm_pool_free(&bdev->pool, ttm);
384
385         if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
386                 atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
387                 if (bdev->pool.use_dma32)
388                         atomic_long_sub(ttm->num_pages,
389                                         &ttm_dma32_pages_allocated);
390         }
391
392         ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
393 }
394 EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_unpopulate);
395
396 #ifdef CONFIG_DEBUG_FS
397
398 /* Test the shrinker functions and dump the result */
399 static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
400 {
401         struct ttm_operation_ctx ctx = { false, false };
402
403         seq_printf(m, "%d\n", ttm_global_swapout(&ctx, GFP_KERNEL));
404         return 0;
405 }
406 DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
407
408 #endif
409
410
411 /*
412  * ttm_tt_mgr_init - register with the MM shrinker
413  *
414  * Register with the MM shrinker for swapping out BOs.
415  */
416 void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
417 {
418 #ifdef CONFIG_DEBUG_FS
419         debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
420                             &ttm_tt_debugfs_shrink_fops);
421 #endif
422
423         if (!ttm_pages_limit)
424                 ttm_pages_limit = num_pages;
425
426         if (!ttm_dma32_pages_limit)
427                 ttm_dma32_pages_limit = num_dma32_pages;
428 }
429
430 static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
431                                        struct iosys_map *dmap,
432                                        pgoff_t i)
433 {
434         struct ttm_kmap_iter_tt *iter_tt =
435                 container_of(iter, typeof(*iter_tt), base);
436
437         iosys_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
438                                                        iter_tt->prot));
439 }
440
441 static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
442                                          struct iosys_map *map)
443 {
444         kunmap_local(map->vaddr);
445 }
446
447 static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
448         .map_local = ttm_kmap_iter_tt_map_local,
449         .unmap_local = ttm_kmap_iter_tt_unmap_local,
450         .maps_tt = true,
451 };
452
453 /**
454  * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
455  * @iter_tt: The struct ttm_kmap_iter_tt to initialize.
456  * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
457  *
458  * Return: Pointer to the embedded struct ttm_kmap_iter.
459  */
460 struct ttm_kmap_iter *
461 ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
462                       struct ttm_tt *tt)
463 {
464         iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
465         iter_tt->tt = tt;
466         if (tt)
467                 iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
468         else
469                 iter_tt->prot = PAGE_KERNEL;
470
471         return &iter_tt->base;
472 }
473 EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
474
475 unsigned long ttm_tt_pages_limit(void)
476 {
477         return ttm_pages_limit;
478 }
479 EXPORT_SYMBOL(ttm_tt_pages_limit);
This page took 0.060224 seconds and 4 git commands to generate.