]> Git Repo - linux.git/blob - drivers/gpu/drm/ttm/ttm_tt.c
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[linux.git] / drivers / gpu / drm / ttm / ttm_tt.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <linux/sched.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/file.h>
37 #include <linux/module.h>
38 #include <drm/drm_cache.h>
39 #include <drm/ttm/ttm_bo.h>
40 #include <drm/ttm/ttm_tt.h>
41
42 #include "ttm_module.h"
43
44 static unsigned long ttm_pages_limit;
45
46 MODULE_PARM_DESC(pages_limit, "Limit for the allocated pages");
47 module_param_named(pages_limit, ttm_pages_limit, ulong, 0644);
48
49 static unsigned long ttm_dma32_pages_limit;
50
51 MODULE_PARM_DESC(dma32_pages_limit, "Limit for the allocated DMA32 pages");
52 module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644);
53
54 static atomic_long_t ttm_pages_allocated;
55 static atomic_long_t ttm_dma32_pages_allocated;
56
57 /*
58  * Allocates a ttm structure for the given BO.
59  */
60 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
61 {
62         struct ttm_device *bdev = bo->bdev;
63         uint32_t page_flags = 0;
64
65         dma_resv_assert_held(bo->base.resv);
66
67         if (bo->ttm)
68                 return 0;
69
70         switch (bo->type) {
71         case ttm_bo_type_device:
72                 if (zero_alloc)
73                         page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
74                 break;
75         case ttm_bo_type_kernel:
76                 break;
77         case ttm_bo_type_sg:
78                 page_flags |= TTM_TT_FLAG_EXTERNAL;
79                 break;
80         default:
81                 pr_err("Illegal buffer object type\n");
82                 return -EINVAL;
83         }
84
85         bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
86         if (unlikely(bo->ttm == NULL))
87                 return -ENOMEM;
88
89         WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE &&
90                 !(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL));
91
92         return 0;
93 }
94
95 /*
96  * Allocates storage for pointers to the pages that back the ttm.
97  */
98 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
99 {
100         ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL);
101         if (!ttm->pages)
102                 return -ENOMEM;
103
104         return 0;
105 }
106
107 static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
108 {
109         ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) +
110                               sizeof(*ttm->dma_address), GFP_KERNEL);
111         if (!ttm->pages)
112                 return -ENOMEM;
113
114         ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
115         return 0;
116 }
117
118 static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
119 {
120         ttm->dma_address = kvcalloc(ttm->num_pages, sizeof(*ttm->dma_address),
121                                     GFP_KERNEL);
122         if (!ttm->dma_address)
123                 return -ENOMEM;
124
125         return 0;
126 }
127
128 void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
129 {
130         bdev->funcs->ttm_tt_destroy(bdev, ttm);
131 }
132
133 static void ttm_tt_init_fields(struct ttm_tt *ttm,
134                                struct ttm_buffer_object *bo,
135                                uint32_t page_flags,
136                                enum ttm_caching caching,
137                                unsigned long extra_pages)
138 {
139         ttm->num_pages = (PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT) + extra_pages;
140         ttm->caching = ttm_cached;
141         ttm->page_flags = page_flags;
142         ttm->dma_address = NULL;
143         ttm->swap_storage = NULL;
144         ttm->sg = bo->sg;
145         ttm->caching = caching;
146 }
147
148 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
149                 uint32_t page_flags, enum ttm_caching caching,
150                 unsigned long extra_pages)
151 {
152         ttm_tt_init_fields(ttm, bo, page_flags, caching, extra_pages);
153
154         if (ttm_tt_alloc_page_directory(ttm)) {
155                 pr_err("Failed allocating page table\n");
156                 return -ENOMEM;
157         }
158         return 0;
159 }
160 EXPORT_SYMBOL(ttm_tt_init);
161
162 void ttm_tt_fini(struct ttm_tt *ttm)
163 {
164         WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
165
166         if (ttm->swap_storage)
167                 fput(ttm->swap_storage);
168         ttm->swap_storage = NULL;
169
170         if (ttm->pages)
171                 kvfree(ttm->pages);
172         else
173                 kvfree(ttm->dma_address);
174         ttm->pages = NULL;
175         ttm->dma_address = NULL;
176 }
177 EXPORT_SYMBOL(ttm_tt_fini);
178
179 int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
180                    uint32_t page_flags, enum ttm_caching caching)
181 {
182         int ret;
183
184         ttm_tt_init_fields(ttm, bo, page_flags, caching, 0);
185
186         if (page_flags & TTM_TT_FLAG_EXTERNAL)
187                 ret = ttm_sg_tt_alloc_page_directory(ttm);
188         else
189                 ret = ttm_dma_tt_alloc_page_directory(ttm);
190         if (ret) {
191                 pr_err("Failed allocating page table\n");
192                 return -ENOMEM;
193         }
194         return 0;
195 }
196 EXPORT_SYMBOL(ttm_sg_tt_init);
197
198 int ttm_tt_swapin(struct ttm_tt *ttm)
199 {
200         struct address_space *swap_space;
201         struct file *swap_storage;
202         struct page *from_page;
203         struct page *to_page;
204         gfp_t gfp_mask;
205         int i, ret;
206
207         swap_storage = ttm->swap_storage;
208         BUG_ON(swap_storage == NULL);
209
210         swap_space = swap_storage->f_mapping;
211         gfp_mask = mapping_gfp_mask(swap_space);
212
213         for (i = 0; i < ttm->num_pages; ++i) {
214                 from_page = shmem_read_mapping_page_gfp(swap_space, i,
215                                                         gfp_mask);
216                 if (IS_ERR(from_page)) {
217                         ret = PTR_ERR(from_page);
218                         goto out_err;
219                 }
220                 to_page = ttm->pages[i];
221                 if (unlikely(to_page == NULL)) {
222                         ret = -ENOMEM;
223                         goto out_err;
224                 }
225
226                 copy_highpage(to_page, from_page);
227                 put_page(from_page);
228         }
229
230         fput(swap_storage);
231         ttm->swap_storage = NULL;
232         ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
233
234         return 0;
235
236 out_err:
237         return ret;
238 }
239
240 /**
241  * ttm_tt_swapout - swap out tt object
242  *
243  * @bdev: TTM device structure.
244  * @ttm: The struct ttm_tt.
245  * @gfp_flags: Flags to use for memory allocation.
246  *
247  * Swapout a TT object to a shmem_file, return number of pages swapped out or
248  * negative error code.
249  */
250 int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
251                    gfp_t gfp_flags)
252 {
253         loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT;
254         struct address_space *swap_space;
255         struct file *swap_storage;
256         struct page *from_page;
257         struct page *to_page;
258         int i, ret;
259
260         swap_storage = shmem_file_setup("ttm swap", size, 0);
261         if (IS_ERR(swap_storage)) {
262                 pr_err("Failed allocating swap storage\n");
263                 return PTR_ERR(swap_storage);
264         }
265
266         swap_space = swap_storage->f_mapping;
267         gfp_flags &= mapping_gfp_mask(swap_space);
268
269         for (i = 0; i < ttm->num_pages; ++i) {
270                 from_page = ttm->pages[i];
271                 if (unlikely(from_page == NULL))
272                         continue;
273
274                 to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags);
275                 if (IS_ERR(to_page)) {
276                         ret = PTR_ERR(to_page);
277                         goto out_err;
278                 }
279                 copy_highpage(to_page, from_page);
280                 set_page_dirty(to_page);
281                 mark_page_accessed(to_page);
282                 put_page(to_page);
283         }
284
285         ttm_tt_unpopulate(bdev, ttm);
286         ttm->swap_storage = swap_storage;
287         ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
288
289         return ttm->num_pages;
290
291 out_err:
292         fput(swap_storage);
293
294         return ret;
295 }
296
297 int ttm_tt_populate(struct ttm_device *bdev,
298                     struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
299 {
300         int ret;
301
302         if (!ttm)
303                 return -EINVAL;
304
305         if (ttm_tt_is_populated(ttm))
306                 return 0;
307
308         if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
309                 atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
310                 if (bdev->pool.use_dma32)
311                         atomic_long_add(ttm->num_pages,
312                                         &ttm_dma32_pages_allocated);
313         }
314
315         while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
316                atomic_long_read(&ttm_dma32_pages_allocated) >
317                ttm_dma32_pages_limit) {
318
319                 ret = ttm_global_swapout(ctx, GFP_KERNEL);
320                 if (ret == 0)
321                         break;
322                 if (ret < 0)
323                         goto error;
324         }
325
326         if (bdev->funcs->ttm_tt_populate)
327                 ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
328         else
329                 ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
330         if (ret)
331                 goto error;
332
333         ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
334         if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
335                 ret = ttm_tt_swapin(ttm);
336                 if (unlikely(ret != 0)) {
337                         ttm_tt_unpopulate(bdev, ttm);
338                         return ret;
339                 }
340         }
341
342         return 0;
343
344 error:
345         if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
346                 atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
347                 if (bdev->pool.use_dma32)
348                         atomic_long_sub(ttm->num_pages,
349                                         &ttm_dma32_pages_allocated);
350         }
351         return ret;
352 }
353 EXPORT_SYMBOL(ttm_tt_populate);
354
355 void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
356 {
357         if (!ttm_tt_is_populated(ttm))
358                 return;
359
360         if (bdev->funcs->ttm_tt_unpopulate)
361                 bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
362         else
363                 ttm_pool_free(&bdev->pool, ttm);
364
365         if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
366                 atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
367                 if (bdev->pool.use_dma32)
368                         atomic_long_sub(ttm->num_pages,
369                                         &ttm_dma32_pages_allocated);
370         }
371
372         ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
373 }
374
375 #ifdef CONFIG_DEBUG_FS
376
377 /* Test the shrinker functions and dump the result */
378 static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
379 {
380         struct ttm_operation_ctx ctx = { false, false };
381
382         seq_printf(m, "%d\n", ttm_global_swapout(&ctx, GFP_KERNEL));
383         return 0;
384 }
385 DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
386
387 #endif
388
389
390 /*
391  * ttm_tt_mgr_init - register with the MM shrinker
392  *
393  * Register with the MM shrinker for swapping out BOs.
394  */
395 void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
396 {
397 #ifdef CONFIG_DEBUG_FS
398         debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
399                             &ttm_tt_debugfs_shrink_fops);
400 #endif
401
402         if (!ttm_pages_limit)
403                 ttm_pages_limit = num_pages;
404
405         if (!ttm_dma32_pages_limit)
406                 ttm_dma32_pages_limit = num_dma32_pages;
407 }
408
409 static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
410                                        struct iosys_map *dmap,
411                                        pgoff_t i)
412 {
413         struct ttm_kmap_iter_tt *iter_tt =
414                 container_of(iter, typeof(*iter_tt), base);
415
416         iosys_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
417                                                        iter_tt->prot));
418 }
419
420 static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
421                                          struct iosys_map *map)
422 {
423         kunmap_local(map->vaddr);
424 }
425
426 static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
427         .map_local = ttm_kmap_iter_tt_map_local,
428         .unmap_local = ttm_kmap_iter_tt_unmap_local,
429         .maps_tt = true,
430 };
431
432 /**
433  * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
434  * @iter_tt: The struct ttm_kmap_iter_tt to initialize.
435  * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
436  *
437  * Return: Pointer to the embedded struct ttm_kmap_iter.
438  */
439 struct ttm_kmap_iter *
440 ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
441                       struct ttm_tt *tt)
442 {
443         iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
444         iter_tt->tt = tt;
445         if (tt)
446                 iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
447         else
448                 iter_tt->prot = PAGE_KERNEL;
449
450         return &iter_tt->base;
451 }
452 EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
This page took 0.057324 seconds and 4 git commands to generate.