1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 #define pr_fmt(fmt) "[TTM] " fmt
34 #include <linux/sched.h>
35 #include <linux/pagemap.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/file.h>
38 #include <drm/drm_cache.h>
39 #include <drm/ttm/ttm_bo_driver.h>
40 #include <drm/ttm/ttm_page_alloc.h>
43 * Allocates a ttm structure for the given BO.
45 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
47 struct ttm_bo_device *bdev = bo->bdev;
48 uint32_t page_flags = 0;
50 dma_resv_assert_held(bo->base.resv);
56 page_flags |= TTM_PAGE_FLAG_DMA32;
59 page_flags |= TTM_PAGE_FLAG_NO_RETRY;
62 case ttm_bo_type_device:
64 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
66 case ttm_bo_type_kernel:
69 page_flags |= TTM_PAGE_FLAG_SG;
72 pr_err("Illegal buffer object type\n");
76 bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
77 if (unlikely(bo->ttm == NULL))
84 * Allocates storage for pointers to the pages that back the ttm.
86 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
88 ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
89 GFP_KERNEL | __GFP_ZERO);
95 static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
97 ttm->pages = kvmalloc_array(ttm->num_pages,
99 sizeof(*ttm->dma_address),
100 GFP_KERNEL | __GFP_ZERO);
104 ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
108 static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
110 ttm->dma_address = kvmalloc_array(ttm->num_pages,
111 sizeof(*ttm->dma_address),
112 GFP_KERNEL | __GFP_ZERO);
113 if (!ttm->dma_address)
118 void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
120 ttm_tt_unpopulate(bdev, ttm);
122 if (ttm->swap_storage)
123 fput(ttm->swap_storage);
125 ttm->swap_storage = NULL;
127 EXPORT_SYMBOL(ttm_tt_destroy_common);
129 void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
131 bdev->driver->ttm_tt_destroy(bdev, ttm);
134 static void ttm_tt_init_fields(struct ttm_tt *ttm,
135 struct ttm_buffer_object *bo,
137 enum ttm_caching caching)
139 ttm->num_pages = bo->num_pages;
140 ttm->caching = ttm_cached;
141 ttm->page_flags = page_flags;
142 ttm->dma_address = NULL;
143 ttm->swap_storage = NULL;
145 INIT_LIST_HEAD(&ttm->pages_list);
146 ttm->caching = caching;
149 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
150 uint32_t page_flags, enum ttm_caching caching)
152 ttm_tt_init_fields(ttm, bo, page_flags, caching);
154 if (ttm_tt_alloc_page_directory(ttm)) {
155 pr_err("Failed allocating page table\n");
160 EXPORT_SYMBOL(ttm_tt_init);
162 void ttm_tt_fini(struct ttm_tt *ttm)
167 kvfree(ttm->dma_address);
169 ttm->dma_address = NULL;
171 EXPORT_SYMBOL(ttm_tt_fini);
173 int ttm_dma_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
174 uint32_t page_flags, enum ttm_caching caching)
176 ttm_tt_init_fields(ttm, bo, page_flags, caching);
178 if (ttm_dma_tt_alloc_page_directory(ttm)) {
179 pr_err("Failed allocating page table\n");
184 EXPORT_SYMBOL(ttm_dma_tt_init);
186 int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
187 uint32_t page_flags, enum ttm_caching caching)
191 ttm_tt_init_fields(ttm, bo, page_flags, caching);
193 if (page_flags & TTM_PAGE_FLAG_SG)
194 ret = ttm_sg_tt_alloc_page_directory(ttm);
196 ret = ttm_dma_tt_alloc_page_directory(ttm);
198 pr_err("Failed allocating page table\n");
203 EXPORT_SYMBOL(ttm_sg_tt_init);
205 int ttm_tt_swapin(struct ttm_tt *ttm)
207 struct address_space *swap_space;
208 struct file *swap_storage;
209 struct page *from_page;
210 struct page *to_page;
214 swap_storage = ttm->swap_storage;
215 BUG_ON(swap_storage == NULL);
217 swap_space = swap_storage->f_mapping;
218 gfp_mask = mapping_gfp_mask(swap_space);
219 if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
220 gfp_mask |= __GFP_RETRY_MAYFAIL;
222 for (i = 0; i < ttm->num_pages; ++i) {
223 from_page = shmem_read_mapping_page_gfp(swap_space, i,
225 if (IS_ERR(from_page)) {
226 ret = PTR_ERR(from_page);
229 to_page = ttm->pages[i];
230 if (unlikely(to_page == NULL)) {
235 copy_highpage(to_page, from_page);
240 ttm->swap_storage = NULL;
241 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
249 int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
251 struct address_space *swap_space;
252 struct file *swap_storage;
253 struct page *from_page;
254 struct page *to_page;
258 swap_storage = shmem_file_setup("ttm swap",
259 ttm->num_pages << PAGE_SHIFT,
261 if (IS_ERR(swap_storage)) {
262 pr_err("Failed allocating swap storage\n");
263 return PTR_ERR(swap_storage);
266 swap_space = swap_storage->f_mapping;
267 gfp_mask = mapping_gfp_mask(swap_space);
268 if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
269 gfp_mask |= __GFP_RETRY_MAYFAIL;
271 for (i = 0; i < ttm->num_pages; ++i) {
272 from_page = ttm->pages[i];
273 if (unlikely(from_page == NULL))
276 to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
277 if (IS_ERR(to_page)) {
278 ret = PTR_ERR(to_page);
281 copy_highpage(to_page, from_page);
282 set_page_dirty(to_page);
283 mark_page_accessed(to_page);
287 ttm_tt_unpopulate(bdev, ttm);
288 ttm->swap_storage = swap_storage;
289 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
299 static void ttm_tt_add_mapping(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
303 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
306 for (i = 0; i < ttm->num_pages; ++i)
307 ttm->pages[i]->mapping = bdev->dev_mapping;
310 int ttm_tt_populate(struct ttm_bo_device *bdev,
311 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
318 if (ttm_tt_is_populated(ttm))
321 if (bdev->driver->ttm_tt_populate)
322 ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx);
324 ret = ttm_pool_populate(ttm, ctx);
328 ttm_tt_add_mapping(bdev, ttm);
329 ttm->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED;
330 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
331 ret = ttm_tt_swapin(ttm);
332 if (unlikely(ret != 0)) {
333 ttm_tt_unpopulate(bdev, ttm);
340 EXPORT_SYMBOL(ttm_tt_populate);
342 static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
345 struct page **page = ttm->pages;
347 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
350 for (i = 0; i < ttm->num_pages; ++i) {
351 (*page)->mapping = NULL;
352 (*page++)->index = 0;
356 void ttm_tt_unpopulate(struct ttm_bo_device *bdev,
359 if (!ttm_tt_is_populated(ttm))
362 ttm_tt_clear_mapping(ttm);
363 if (bdev->driver->ttm_tt_unpopulate)
364 bdev->driver->ttm_tt_unpopulate(bdev, ttm);
366 ttm_pool_unpopulate(ttm);
367 ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;