]> Git Repo - linux.git/blob - drivers/gpu/drm/ttm/ttm_tt.c
Merge tag 'acpi-4.18-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[linux.git] / drivers / gpu / drm / ttm / ttm_tt.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <linux/sched.h>
35 #include <linux/pagemap.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/file.h>
38 #include <drm/drm_cache.h>
39 #include <drm/ttm/ttm_bo_driver.h>
40 #include <drm/ttm/ttm_page_alloc.h>
41 #ifdef CONFIG_X86
42 #include <asm/set_memory.h>
43 #endif
44
45 /**
46  * Allocates a ttm structure for the given BO.
47  */
48 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
49 {
50         struct ttm_bo_device *bdev = bo->bdev;
51         uint32_t page_flags = 0;
52
53         reservation_object_assert_held(bo->resv);
54
55         if (bdev->need_dma32)
56                 page_flags |= TTM_PAGE_FLAG_DMA32;
57
58         if (bdev->no_retry)
59                 page_flags |= TTM_PAGE_FLAG_NO_RETRY;
60
61         switch (bo->type) {
62         case ttm_bo_type_device:
63                 if (zero_alloc)
64                         page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
65                 break;
66         case ttm_bo_type_kernel:
67                 break;
68         case ttm_bo_type_sg:
69                 page_flags |= TTM_PAGE_FLAG_SG;
70                 break;
71         default:
72                 bo->ttm = NULL;
73                 pr_err("Illegal buffer object type\n");
74                 return -EINVAL;
75         }
76
77         bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
78         if (unlikely(bo->ttm == NULL))
79                 return -ENOMEM;
80
81         return 0;
82 }
83
84 /**
85  * Allocates storage for pointers to the pages that back the ttm.
86  */
87 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
88 {
89         ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
90                         GFP_KERNEL | __GFP_ZERO);
91         if (!ttm->pages)
92                 return -ENOMEM;
93         return 0;
94 }
95
96 static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
97 {
98         ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
99                                           sizeof(*ttm->ttm.pages) +
100                                           sizeof(*ttm->dma_address),
101                                           GFP_KERNEL | __GFP_ZERO);
102         if (!ttm->ttm.pages)
103                 return -ENOMEM;
104         ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
105         return 0;
106 }
107
108 static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
109 {
110         ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
111                                           sizeof(*ttm->dma_address),
112                                           GFP_KERNEL | __GFP_ZERO);
113         if (!ttm->dma_address)
114                 return -ENOMEM;
115         return 0;
116 }
117
118 #ifdef CONFIG_X86
119 static inline int ttm_tt_set_page_caching(struct page *p,
120                                           enum ttm_caching_state c_old,
121                                           enum ttm_caching_state c_new)
122 {
123         int ret = 0;
124
125         if (PageHighMem(p))
126                 return 0;
127
128         if (c_old != tt_cached) {
129                 /* p isn't in the default caching state, set it to
130                  * writeback first to free its current memtype. */
131
132                 ret = set_pages_wb(p, 1);
133                 if (ret)
134                         return ret;
135         }
136
137         if (c_new == tt_wc)
138                 ret = set_memory_wc((unsigned long) page_address(p), 1);
139         else if (c_new == tt_uncached)
140                 ret = set_pages_uc(p, 1);
141
142         return ret;
143 }
144 #else /* CONFIG_X86 */
145 static inline int ttm_tt_set_page_caching(struct page *p,
146                                           enum ttm_caching_state c_old,
147                                           enum ttm_caching_state c_new)
148 {
149         return 0;
150 }
151 #endif /* CONFIG_X86 */
152
153 /*
154  * Change caching policy for the linear kernel map
155  * for range of pages in a ttm.
156  */
157
158 static int ttm_tt_set_caching(struct ttm_tt *ttm,
159                               enum ttm_caching_state c_state)
160 {
161         int i, j;
162         struct page *cur_page;
163         int ret;
164
165         if (ttm->caching_state == c_state)
166                 return 0;
167
168         if (ttm->state == tt_unpopulated) {
169                 /* Change caching but don't populate */
170                 ttm->caching_state = c_state;
171                 return 0;
172         }
173
174         if (ttm->caching_state == tt_cached)
175                 drm_clflush_pages(ttm->pages, ttm->num_pages);
176
177         for (i = 0; i < ttm->num_pages; ++i) {
178                 cur_page = ttm->pages[i];
179                 if (likely(cur_page != NULL)) {
180                         ret = ttm_tt_set_page_caching(cur_page,
181                                                       ttm->caching_state,
182                                                       c_state);
183                         if (unlikely(ret != 0))
184                                 goto out_err;
185                 }
186         }
187
188         ttm->caching_state = c_state;
189
190         return 0;
191
192 out_err:
193         for (j = 0; j < i; ++j) {
194                 cur_page = ttm->pages[j];
195                 if (likely(cur_page != NULL)) {
196                         (void)ttm_tt_set_page_caching(cur_page, c_state,
197                                                       ttm->caching_state);
198                 }
199         }
200
201         return ret;
202 }
203
204 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
205 {
206         enum ttm_caching_state state;
207
208         if (placement & TTM_PL_FLAG_WC)
209                 state = tt_wc;
210         else if (placement & TTM_PL_FLAG_UNCACHED)
211                 state = tt_uncached;
212         else
213                 state = tt_cached;
214
215         return ttm_tt_set_caching(ttm, state);
216 }
217 EXPORT_SYMBOL(ttm_tt_set_placement_caching);
218
219 void ttm_tt_destroy(struct ttm_tt *ttm)
220 {
221         if (ttm == NULL)
222                 return;
223
224         ttm_tt_unbind(ttm);
225
226         if (ttm->state == tt_unbound)
227                 ttm_tt_unpopulate(ttm);
228
229         if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
230             ttm->swap_storage)
231                 fput(ttm->swap_storage);
232
233         ttm->swap_storage = NULL;
234         ttm->func->destroy(ttm);
235 }
236
237 void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
238                         uint32_t page_flags)
239 {
240         ttm->bdev = bo->bdev;
241         ttm->num_pages = bo->num_pages;
242         ttm->caching_state = tt_cached;
243         ttm->page_flags = page_flags;
244         ttm->state = tt_unpopulated;
245         ttm->swap_storage = NULL;
246         ttm->sg = bo->sg;
247 }
248
249 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
250                 uint32_t page_flags)
251 {
252         ttm_tt_init_fields(ttm, bo, page_flags);
253
254         if (ttm_tt_alloc_page_directory(ttm)) {
255                 ttm_tt_destroy(ttm);
256                 pr_err("Failed allocating page table\n");
257                 return -ENOMEM;
258         }
259         return 0;
260 }
261 EXPORT_SYMBOL(ttm_tt_init);
262
263 void ttm_tt_fini(struct ttm_tt *ttm)
264 {
265         kvfree(ttm->pages);
266         ttm->pages = NULL;
267 }
268 EXPORT_SYMBOL(ttm_tt_fini);
269
270 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
271                     uint32_t page_flags)
272 {
273         struct ttm_tt *ttm = &ttm_dma->ttm;
274
275         ttm_tt_init_fields(ttm, bo, page_flags);
276
277         INIT_LIST_HEAD(&ttm_dma->pages_list);
278         if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
279                 ttm_tt_destroy(ttm);
280                 pr_err("Failed allocating page table\n");
281                 return -ENOMEM;
282         }
283         return 0;
284 }
285 EXPORT_SYMBOL(ttm_dma_tt_init);
286
287 int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
288                    uint32_t page_flags)
289 {
290         struct ttm_tt *ttm = &ttm_dma->ttm;
291         int ret;
292
293         ttm_tt_init_fields(ttm, bo, page_flags);
294
295         INIT_LIST_HEAD(&ttm_dma->pages_list);
296         if (page_flags & TTM_PAGE_FLAG_SG)
297                 ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
298         else
299                 ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
300         if (ret) {
301                 ttm_tt_destroy(ttm);
302                 pr_err("Failed allocating page table\n");
303                 return -ENOMEM;
304         }
305         return 0;
306 }
307 EXPORT_SYMBOL(ttm_sg_tt_init);
308
309 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
310 {
311         struct ttm_tt *ttm = &ttm_dma->ttm;
312
313         if (ttm->pages)
314                 kvfree(ttm->pages);
315         else
316                 kvfree(ttm_dma->dma_address);
317         ttm->pages = NULL;
318         ttm_dma->dma_address = NULL;
319 }
320 EXPORT_SYMBOL(ttm_dma_tt_fini);
321
322 void ttm_tt_unbind(struct ttm_tt *ttm)
323 {
324         int ret;
325
326         if (ttm->state == tt_bound) {
327                 ret = ttm->func->unbind(ttm);
328                 BUG_ON(ret);
329                 ttm->state = tt_unbound;
330         }
331 }
332
333 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
334                 struct ttm_operation_ctx *ctx)
335 {
336         int ret = 0;
337
338         if (!ttm)
339                 return -EINVAL;
340
341         if (ttm->state == tt_bound)
342                 return 0;
343
344         ret = ttm_tt_populate(ttm, ctx);
345         if (ret)
346                 return ret;
347
348         ret = ttm->func->bind(ttm, bo_mem);
349         if (unlikely(ret != 0))
350                 return ret;
351
352         ttm->state = tt_bound;
353
354         return 0;
355 }
356 EXPORT_SYMBOL(ttm_tt_bind);
357
358 int ttm_tt_swapin(struct ttm_tt *ttm)
359 {
360         struct address_space *swap_space;
361         struct file *swap_storage;
362         struct page *from_page;
363         struct page *to_page;
364         int i;
365         int ret = -ENOMEM;
366
367         swap_storage = ttm->swap_storage;
368         BUG_ON(swap_storage == NULL);
369
370         swap_space = swap_storage->f_mapping;
371
372         for (i = 0; i < ttm->num_pages; ++i) {
373                 gfp_t gfp_mask = mapping_gfp_mask(swap_space);
374
375                 gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
376                 from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
377
378                 if (IS_ERR(from_page)) {
379                         ret = PTR_ERR(from_page);
380                         goto out_err;
381                 }
382                 to_page = ttm->pages[i];
383                 if (unlikely(to_page == NULL))
384                         goto out_err;
385
386                 copy_highpage(to_page, from_page);
387                 put_page(from_page);
388         }
389
390         if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
391                 fput(swap_storage);
392         ttm->swap_storage = NULL;
393         ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
394
395         return 0;
396 out_err:
397         return ret;
398 }
399
400 int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
401 {
402         struct address_space *swap_space;
403         struct file *swap_storage;
404         struct page *from_page;
405         struct page *to_page;
406         int i;
407         int ret = -ENOMEM;
408
409         BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
410         BUG_ON(ttm->caching_state != tt_cached);
411
412         if (!persistent_swap_storage) {
413                 swap_storage = shmem_file_setup("ttm swap",
414                                                 ttm->num_pages << PAGE_SHIFT,
415                                                 0);
416                 if (IS_ERR(swap_storage)) {
417                         pr_err("Failed allocating swap storage\n");
418                         return PTR_ERR(swap_storage);
419                 }
420         } else {
421                 swap_storage = persistent_swap_storage;
422         }
423
424         swap_space = swap_storage->f_mapping;
425
426         for (i = 0; i < ttm->num_pages; ++i) {
427                 gfp_t gfp_mask = mapping_gfp_mask(swap_space);
428
429                 gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
430
431                 from_page = ttm->pages[i];
432                 if (unlikely(from_page == NULL))
433                         continue;
434
435                 to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
436                 if (IS_ERR(to_page)) {
437                         ret = PTR_ERR(to_page);
438                         goto out_err;
439                 }
440                 copy_highpage(to_page, from_page);
441                 set_page_dirty(to_page);
442                 mark_page_accessed(to_page);
443                 put_page(to_page);
444         }
445
446         ttm_tt_unpopulate(ttm);
447         ttm->swap_storage = swap_storage;
448         ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
449         if (persistent_swap_storage)
450                 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
451
452         return 0;
453 out_err:
454         if (!persistent_swap_storage)
455                 fput(swap_storage);
456
457         return ret;
458 }
459
460 static void ttm_tt_add_mapping(struct ttm_tt *ttm)
461 {
462         pgoff_t i;
463
464         if (ttm->page_flags & TTM_PAGE_FLAG_SG)
465                 return;
466
467         for (i = 0; i < ttm->num_pages; ++i)
468                 ttm->pages[i]->mapping = ttm->bdev->dev_mapping;
469 }
470
471 int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
472 {
473         int ret;
474
475         if (ttm->state != tt_unpopulated)
476                 return 0;
477
478         if (ttm->bdev->driver->ttm_tt_populate)
479                 ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
480         else
481                 ret = ttm_pool_populate(ttm, ctx);
482         if (!ret)
483                 ttm_tt_add_mapping(ttm);
484         return ret;
485 }
486
487 static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
488 {
489         pgoff_t i;
490         struct page **page = ttm->pages;
491
492         if (ttm->page_flags & TTM_PAGE_FLAG_SG)
493                 return;
494
495         for (i = 0; i < ttm->num_pages; ++i) {
496                 (*page)->mapping = NULL;
497                 (*page++)->index = 0;
498         }
499 }
500
501 void ttm_tt_unpopulate(struct ttm_tt *ttm)
502 {
503         if (ttm->state == tt_unpopulated)
504                 return;
505
506         ttm_tt_clear_mapping(ttm);
507         if (ttm->bdev->driver->ttm_tt_unpopulate)
508                 ttm->bdev->driver->ttm_tt_unpopulate(ttm);
509         else
510                 ttm_pool_unpopulate(ttm);
511 }
This page took 0.065974 seconds and 4 git commands to generate.