]> Git Repo - linux.git/blob - drivers/gpu/drm/ttm/ttm_bo_util.c
Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf...
[linux.git] / drivers / gpu / drm / ttm / ttm_bo_util.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31
32 #include <linux/vmalloc.h>
33
34 #include <drm/ttm/ttm_bo.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <drm/ttm/ttm_tt.h>
37
38 #include <drm/drm_cache.h>
39
40 struct ttm_transfer_obj {
41         struct ttm_buffer_object base;
42         struct ttm_buffer_object *bo;
43 };
44
45 int ttm_mem_io_reserve(struct ttm_device *bdev,
46                        struct ttm_resource *mem)
47 {
48         if (mem->bus.offset || mem->bus.addr)
49                 return 0;
50
51         mem->bus.is_iomem = false;
52         if (!bdev->funcs->io_mem_reserve)
53                 return 0;
54
55         return bdev->funcs->io_mem_reserve(bdev, mem);
56 }
57
58 void ttm_mem_io_free(struct ttm_device *bdev,
59                      struct ttm_resource *mem)
60 {
61         if (!mem)
62                 return;
63
64         if (!mem->bus.offset && !mem->bus.addr)
65                 return;
66
67         if (bdev->funcs->io_mem_free)
68                 bdev->funcs->io_mem_free(bdev, mem);
69
70         mem->bus.offset = 0;
71         mem->bus.addr = NULL;
72 }
73
74 /**
75  * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
76  * @clear: Whether to clear rather than copy.
77  * @num_pages: Number of pages of the operation.
78  * @dst_iter: A struct ttm_kmap_iter representing the destination resource.
79  * @src_iter: A struct ttm_kmap_iter representing the source resource.
80  *
81  * This function is intended to be able to move out async under a
82  * dma-fence if desired.
83  */
84 void ttm_move_memcpy(bool clear,
85                      u32 num_pages,
86                      struct ttm_kmap_iter *dst_iter,
87                      struct ttm_kmap_iter *src_iter)
88 {
89         const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
90         const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
91         struct iosys_map src_map, dst_map;
92         pgoff_t i;
93
94         /* Single TTM move. NOP */
95         if (dst_ops->maps_tt && src_ops->maps_tt)
96                 return;
97
98         /* Don't move nonexistent data. Clear destination instead. */
99         if (clear) {
100                 for (i = 0; i < num_pages; ++i) {
101                         dst_ops->map_local(dst_iter, &dst_map, i);
102                         if (dst_map.is_iomem)
103                                 memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
104                         else
105                                 memset(dst_map.vaddr, 0, PAGE_SIZE);
106                         if (dst_ops->unmap_local)
107                                 dst_ops->unmap_local(dst_iter, &dst_map);
108                 }
109                 return;
110         }
111
112         for (i = 0; i < num_pages; ++i) {
113                 dst_ops->map_local(dst_iter, &dst_map, i);
114                 src_ops->map_local(src_iter, &src_map, i);
115
116                 drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
117
118                 if (src_ops->unmap_local)
119                         src_ops->unmap_local(src_iter, &src_map);
120                 if (dst_ops->unmap_local)
121                         dst_ops->unmap_local(dst_iter, &dst_map);
122         }
123 }
124 EXPORT_SYMBOL(ttm_move_memcpy);
125
126 /**
127  * ttm_bo_move_memcpy
128  *
129  * @bo: A pointer to a struct ttm_buffer_object.
130  * @ctx: operation context
131  * @dst_mem: struct ttm_resource indicating where to move.
132  *
133  * Fallback move function for a mappable buffer object in mappable memory.
134  * The function will, if successful,
135  * free any old aperture space, and set (@new_mem)->mm_node to NULL,
136  * and update the (@bo)->mem placement flags. If unsuccessful, the old
137  * data remains untouched, and it's up to the caller to free the
138  * memory space indicated by @new_mem.
139  * Returns:
140  * !0: Failure.
141  */
142 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
143                        struct ttm_operation_ctx *ctx,
144                        struct ttm_resource *dst_mem)
145 {
146         struct ttm_device *bdev = bo->bdev;
147         struct ttm_resource_manager *dst_man =
148                 ttm_manager_type(bo->bdev, dst_mem->mem_type);
149         struct ttm_tt *ttm = bo->ttm;
150         struct ttm_resource *src_mem = bo->resource;
151         struct ttm_resource_manager *src_man;
152         union {
153                 struct ttm_kmap_iter_tt tt;
154                 struct ttm_kmap_iter_linear_io io;
155         } _dst_iter, _src_iter;
156         struct ttm_kmap_iter *dst_iter, *src_iter;
157         bool clear;
158         int ret = 0;
159
160         if (!src_mem)
161                 return 0;
162
163         src_man = ttm_manager_type(bdev, src_mem->mem_type);
164         if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
165                     dst_man->use_tt)) {
166                 ret = ttm_tt_populate(bdev, ttm, ctx);
167                 if (ret)
168                         return ret;
169         }
170
171         dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
172         if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
173                 dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
174         if (IS_ERR(dst_iter))
175                 return PTR_ERR(dst_iter);
176
177         src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
178         if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
179                 src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
180         if (IS_ERR(src_iter)) {
181                 ret = PTR_ERR(src_iter);
182                 goto out_src_iter;
183         }
184
185         clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
186         if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
187                 ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter);
188
189         if (!src_iter->ops->maps_tt)
190                 ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
191         ttm_bo_move_sync_cleanup(bo, dst_mem);
192
193 out_src_iter:
194         if (!dst_iter->ops->maps_tt)
195                 ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
196
197         return ret;
198 }
199 EXPORT_SYMBOL(ttm_bo_move_memcpy);
200
201 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
202 {
203         struct ttm_transfer_obj *fbo;
204
205         fbo = container_of(bo, struct ttm_transfer_obj, base);
206         dma_resv_fini(&fbo->base.base._resv);
207         ttm_bo_put(fbo->bo);
208         kfree(fbo);
209 }
210
211 /**
212  * ttm_buffer_object_transfer
213  *
214  * @bo: A pointer to a struct ttm_buffer_object.
215  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
216  * holding the data of @bo with the old placement.
217  *
218  * This is a utility function that may be called after an accelerated move
219  * has been scheduled. A new buffer object is created as a placeholder for
220  * the old data while it's being copied. When that buffer object is idle,
221  * it can be destroyed, releasing the space of the old placement.
222  * Returns:
223  * !0: Failure.
224  */
225
226 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
227                                       struct ttm_buffer_object **new_obj)
228 {
229         struct ttm_transfer_obj *fbo;
230         int ret;
231
232         fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
233         if (!fbo)
234                 return -ENOMEM;
235
236         fbo->base = *bo;
237
238         /**
239          * Fix up members that we shouldn't copy directly:
240          * TODO: Explicit member copy would probably be better here.
241          */
242
243         atomic_inc(&ttm_glob.bo_count);
244         drm_vma_node_reset(&fbo->base.base.vma_node);
245
246         kref_init(&fbo->base.kref);
247         fbo->base.destroy = &ttm_transfered_destroy;
248         fbo->base.pin_count = 0;
249         if (bo->type != ttm_bo_type_sg)
250                 fbo->base.base.resv = &fbo->base.base._resv;
251
252         dma_resv_init(&fbo->base.base._resv);
253         fbo->base.base.dev = NULL;
254         ret = dma_resv_trylock(&fbo->base.base._resv);
255         WARN_ON(!ret);
256
257         if (fbo->base.resource) {
258                 ttm_resource_set_bo(fbo->base.resource, &fbo->base);
259                 bo->resource = NULL;
260                 ttm_bo_set_bulk_move(&fbo->base, NULL);
261         } else {
262                 fbo->base.bulk_move = NULL;
263         }
264
265         ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
266         if (ret) {
267                 kfree(fbo);
268                 return ret;
269         }
270
271         ttm_bo_get(bo);
272         fbo->bo = bo;
273
274         ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
275
276         *new_obj = &fbo->base;
277         return 0;
278 }
279
280 /**
281  * ttm_io_prot
282  *
283  * @bo: ttm buffer object
284  * @res: ttm resource object
285  * @tmp: Page protection flag for a normal, cached mapping.
286  *
287  * Utility function that returns the pgprot_t that should be used for
288  * setting up a PTE with the caching model indicated by @c_state.
289  */
290 pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
291                      pgprot_t tmp)
292 {
293         struct ttm_resource_manager *man;
294         enum ttm_caching caching;
295
296         man = ttm_manager_type(bo->bdev, res->mem_type);
297         caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
298
299         return ttm_prot_from_caching(caching, tmp);
300 }
301 EXPORT_SYMBOL(ttm_io_prot);
302
303 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
304                           unsigned long offset,
305                           unsigned long size,
306                           struct ttm_bo_kmap_obj *map)
307 {
308         struct ttm_resource *mem = bo->resource;
309
310         if (bo->resource->bus.addr) {
311                 map->bo_kmap_type = ttm_bo_map_premapped;
312                 map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
313         } else {
314                 resource_size_t res = bo->resource->bus.offset + offset;
315
316                 map->bo_kmap_type = ttm_bo_map_iomap;
317                 if (mem->bus.caching == ttm_write_combined)
318                         map->virtual = ioremap_wc(res, size);
319 #ifdef CONFIG_X86
320                 else if (mem->bus.caching == ttm_cached)
321                         map->virtual = ioremap_cache(res, size);
322 #endif
323                 else
324                         map->virtual = ioremap(res, size);
325         }
326         return (!map->virtual) ? -ENOMEM : 0;
327 }
328
329 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
330                            unsigned long start_page,
331                            unsigned long num_pages,
332                            struct ttm_bo_kmap_obj *map)
333 {
334         struct ttm_resource *mem = bo->resource;
335         struct ttm_operation_ctx ctx = {
336                 .interruptible = false,
337                 .no_wait_gpu = false
338         };
339         struct ttm_tt *ttm = bo->ttm;
340         pgprot_t prot;
341         int ret;
342
343         BUG_ON(!ttm);
344
345         ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
346         if (ret)
347                 return ret;
348
349         if (num_pages == 1 && ttm->caching == ttm_cached) {
350                 /*
351                  * We're mapping a single page, and the desired
352                  * page protection is consistent with the bo.
353                  */
354
355                 map->bo_kmap_type = ttm_bo_map_kmap;
356                 map->page = ttm->pages[start_page];
357                 map->virtual = kmap(map->page);
358         } else {
359                 /*
360                  * We need to use vmap to get the desired page protection
361                  * or to make the buffer object look contiguous.
362                  */
363                 prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
364                 map->bo_kmap_type = ttm_bo_map_vmap;
365                 map->virtual = vmap(ttm->pages + start_page, num_pages,
366                                     0, prot);
367         }
368         return (!map->virtual) ? -ENOMEM : 0;
369 }
370
371 /**
372  * ttm_bo_kmap
373  *
374  * @bo: The buffer object.
375  * @start_page: The first page to map.
376  * @num_pages: Number of pages to map.
377  * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
378  *
379  * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
380  * data in the buffer object. The ttm_kmap_obj_virtual function can then be
381  * used to obtain a virtual address to the data.
382  *
383  * Returns
384  * -ENOMEM: Out of memory.
385  * -EINVAL: Invalid range.
386  */
387 int ttm_bo_kmap(struct ttm_buffer_object *bo,
388                 unsigned long start_page, unsigned long num_pages,
389                 struct ttm_bo_kmap_obj *map)
390 {
391         unsigned long offset, size;
392         int ret;
393
394         map->virtual = NULL;
395         map->bo = bo;
396         if (num_pages > PFN_UP(bo->resource->size))
397                 return -EINVAL;
398         if ((start_page + num_pages) > PFN_UP(bo->resource->size))
399                 return -EINVAL;
400
401         ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
402         if (ret)
403                 return ret;
404         if (!bo->resource->bus.is_iomem) {
405                 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
406         } else {
407                 offset = start_page << PAGE_SHIFT;
408                 size = num_pages << PAGE_SHIFT;
409                 return ttm_bo_ioremap(bo, offset, size, map);
410         }
411 }
412 EXPORT_SYMBOL(ttm_bo_kmap);
413
414 /**
415  * ttm_bo_kunmap
416  *
417  * @map: Object describing the map to unmap.
418  *
419  * Unmaps a kernel map set up by ttm_bo_kmap.
420  */
421 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
422 {
423         if (!map->virtual)
424                 return;
425         switch (map->bo_kmap_type) {
426         case ttm_bo_map_iomap:
427                 iounmap(map->virtual);
428                 break;
429         case ttm_bo_map_vmap:
430                 vunmap(map->virtual);
431                 break;
432         case ttm_bo_map_kmap:
433                 kunmap(map->page);
434                 break;
435         case ttm_bo_map_premapped:
436                 break;
437         default:
438                 BUG();
439         }
440         ttm_mem_io_free(map->bo->bdev, map->bo->resource);
441         map->virtual = NULL;
442         map->page = NULL;
443 }
444 EXPORT_SYMBOL(ttm_bo_kunmap);
445
446 /**
447  * ttm_bo_vmap
448  *
449  * @bo: The buffer object.
450  * @map: pointer to a struct iosys_map representing the map.
451  *
452  * Sets up a kernel virtual mapping, using ioremap or vmap to the
453  * data in the buffer object. The parameter @map returns the virtual
454  * address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap().
455  *
456  * Returns
457  * -ENOMEM: Out of memory.
458  * -EINVAL: Invalid range.
459  */
460 int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
461 {
462         struct ttm_resource *mem = bo->resource;
463         int ret;
464
465         dma_resv_assert_held(bo->base.resv);
466
467         ret = ttm_mem_io_reserve(bo->bdev, mem);
468         if (ret)
469                 return ret;
470
471         if (mem->bus.is_iomem) {
472                 void __iomem *vaddr_iomem;
473
474                 if (mem->bus.addr)
475                         vaddr_iomem = (void __iomem *)mem->bus.addr;
476                 else if (mem->bus.caching == ttm_write_combined)
477                         vaddr_iomem = ioremap_wc(mem->bus.offset,
478                                                  bo->base.size);
479 #ifdef CONFIG_X86
480                 else if (mem->bus.caching == ttm_cached)
481                         vaddr_iomem = ioremap_cache(mem->bus.offset,
482                                                   bo->base.size);
483 #endif
484                 else
485                         vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
486
487                 if (!vaddr_iomem)
488                         return -ENOMEM;
489
490                 iosys_map_set_vaddr_iomem(map, vaddr_iomem);
491
492         } else {
493                 struct ttm_operation_ctx ctx = {
494                         .interruptible = false,
495                         .no_wait_gpu = false
496                 };
497                 struct ttm_tt *ttm = bo->ttm;
498                 pgprot_t prot;
499                 void *vaddr;
500
501                 ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
502                 if (ret)
503                         return ret;
504
505                 /*
506                  * We need to use vmap to get the desired page protection
507                  * or to make the buffer object look contiguous.
508                  */
509                 prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
510                 vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
511                 if (!vaddr)
512                         return -ENOMEM;
513
514                 iosys_map_set_vaddr(map, vaddr);
515         }
516
517         return 0;
518 }
519 EXPORT_SYMBOL(ttm_bo_vmap);
520
521 /**
522  * ttm_bo_vunmap
523  *
524  * @bo: The buffer object.
525  * @map: Object describing the map to unmap.
526  *
527  * Unmaps a kernel map set up by ttm_bo_vmap().
528  */
529 void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
530 {
531         struct ttm_resource *mem = bo->resource;
532
533         dma_resv_assert_held(bo->base.resv);
534
535         if (iosys_map_is_null(map))
536                 return;
537
538         if (!map->is_iomem)
539                 vunmap(map->vaddr);
540         else if (!mem->bus.addr)
541                 iounmap(map->vaddr_iomem);
542         iosys_map_clear(map);
543
544         ttm_mem_io_free(bo->bdev, bo->resource);
545 }
546 EXPORT_SYMBOL(ttm_bo_vunmap);
547
548 static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
549                                  bool dst_use_tt)
550 {
551         long ret;
552
553         ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
554                                     false, 15 * HZ);
555         if (ret == 0)
556                 return -EBUSY;
557         if (ret < 0)
558                 return ret;
559
560         if (!dst_use_tt)
561                 ttm_bo_tt_destroy(bo);
562         ttm_resource_free(bo, &bo->resource);
563         return 0;
564 }
565
566 static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
567                                 struct dma_fence *fence,
568                                 bool dst_use_tt)
569 {
570         struct ttm_buffer_object *ghost_obj;
571         int ret;
572
573         /**
574          * This should help pipeline ordinary buffer moves.
575          *
576          * Hang old buffer memory on a new buffer object,
577          * and leave it to be released when the GPU
578          * operation has completed.
579          */
580
581         ret = ttm_buffer_object_transfer(bo, &ghost_obj);
582         if (ret)
583                 return ret;
584
585         dma_resv_add_fence(&ghost_obj->base._resv, fence,
586                            DMA_RESV_USAGE_KERNEL);
587
588         /**
589          * If we're not moving to fixed memory, the TTM object
590          * needs to stay alive. Otherwhise hang it on the ghost
591          * bo to be unbound and destroyed.
592          */
593
594         if (dst_use_tt)
595                 ghost_obj->ttm = NULL;
596         else
597                 bo->ttm = NULL;
598
599         dma_resv_unlock(&ghost_obj->base._resv);
600         ttm_bo_put(ghost_obj);
601         return 0;
602 }
603
604 static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
605                                        struct dma_fence *fence)
606 {
607         struct ttm_device *bdev = bo->bdev;
608         struct ttm_resource_manager *from;
609
610         from = ttm_manager_type(bdev, bo->resource->mem_type);
611
612         /**
613          * BO doesn't have a TTM we need to bind/unbind. Just remember
614          * this eviction and free up the allocation
615          */
616         spin_lock(&from->move_lock);
617         if (!from->move || dma_fence_is_later(fence, from->move)) {
618                 dma_fence_put(from->move);
619                 from->move = dma_fence_get(fence);
620         }
621         spin_unlock(&from->move_lock);
622
623         ttm_resource_free(bo, &bo->resource);
624 }
625
626 /**
627  * ttm_bo_move_accel_cleanup - cleanup helper for hw copies
628  *
629  * @bo: A pointer to a struct ttm_buffer_object.
630  * @fence: A fence object that signals when moving is complete.
631  * @evict: This is an evict move. Don't return until the buffer is idle.
632  * @pipeline: evictions are to be pipelined.
633  * @new_mem: struct ttm_resource indicating where to move.
634  *
635  * Accelerated move function to be called when an accelerated move
636  * has been scheduled. The function will create a new temporary buffer object
637  * representing the old placement, and put the sync object on both buffer
638  * objects. After that the newly created buffer object is unref'd to be
639  * destroyed when the move is complete. This will help pipeline
640  * buffer moves.
641  */
642 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
643                               struct dma_fence *fence,
644                               bool evict,
645                               bool pipeline,
646                               struct ttm_resource *new_mem)
647 {
648         struct ttm_device *bdev = bo->bdev;
649         struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
650         struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
651         int ret = 0;
652
653         dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
654         if (!evict)
655                 ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
656         else if (!from->use_tt && pipeline)
657                 ttm_bo_move_pipeline_evict(bo, fence);
658         else
659                 ret = ttm_bo_wait_free_node(bo, man->use_tt);
660
661         if (ret)
662                 return ret;
663
664         ttm_bo_assign_mem(bo, new_mem);
665
666         return 0;
667 }
668 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
669
670 /**
671  * ttm_bo_move_sync_cleanup - cleanup by waiting for the move to finish
672  *
673  * @bo: A pointer to a struct ttm_buffer_object.
674  * @new_mem: struct ttm_resource indicating where to move.
675  *
676  * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
677  * by the caller to be idle. Typically used after memcpy buffer moves.
678  */
679 void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
680                               struct ttm_resource *new_mem)
681 {
682         struct ttm_device *bdev = bo->bdev;
683         struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
684         int ret;
685
686         ret = ttm_bo_wait_free_node(bo, man->use_tt);
687         if (WARN_ON(ret))
688                 return;
689
690         ttm_bo_assign_mem(bo, new_mem);
691 }
692 EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
693
694 /**
695  * ttm_bo_pipeline_gutting - purge the contents of a bo
696  * @bo: The buffer object
697  *
698  * Purge the contents of a bo, async if the bo is not idle.
699  * After a successful call, the bo is left unpopulated in
700  * system placement. The function may wait uninterruptible
701  * for idle on OOM.
702  *
703  * Return: 0 if successful, negative error code on failure.
704  */
705 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
706 {
707         static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
708         struct ttm_buffer_object *ghost;
709         struct ttm_resource *sys_res;
710         struct ttm_tt *ttm;
711         int ret;
712
713         ret = ttm_resource_alloc(bo, &sys_mem, &sys_res);
714         if (ret)
715                 return ret;
716
717         /* If already idle, no need for ghost object dance. */
718         if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) {
719                 if (!bo->ttm) {
720                         /* See comment below about clearing. */
721                         ret = ttm_tt_create(bo, true);
722                         if (ret)
723                                 goto error_free_sys_mem;
724                 } else {
725                         ttm_tt_unpopulate(bo->bdev, bo->ttm);
726                         if (bo->type == ttm_bo_type_device)
727                                 ttm_tt_mark_for_clear(bo->ttm);
728                 }
729                 ttm_resource_free(bo, &bo->resource);
730                 ttm_bo_assign_mem(bo, sys_res);
731                 return 0;
732         }
733
734         /*
735          * We need an unpopulated ttm_tt after giving our current one,
736          * if any, to the ghost object. And we can't afford to fail
737          * creating one *after* the operation. If the bo subsequently gets
738          * resurrected, make sure it's cleared (if ttm_bo_type_device)
739          * to avoid leaking sensitive information to user-space.
740          */
741
742         ttm = bo->ttm;
743         bo->ttm = NULL;
744         ret = ttm_tt_create(bo, true);
745         swap(bo->ttm, ttm);
746         if (ret)
747                 goto error_free_sys_mem;
748
749         ret = ttm_buffer_object_transfer(bo, &ghost);
750         if (ret)
751                 goto error_destroy_tt;
752
753         ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
754         /* Last resort, wait for the BO to be idle when we are OOM */
755         if (ret) {
756                 dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
757                                       false, MAX_SCHEDULE_TIMEOUT);
758         }
759
760         dma_resv_unlock(&ghost->base._resv);
761         ttm_bo_put(ghost);
762         bo->ttm = ttm;
763         ttm_bo_assign_mem(bo, sys_res);
764         return 0;
765
766 error_destroy_tt:
767         ttm_tt_destroy(bo->bdev, ttm);
768
769 error_free_sys_mem:
770         ttm_resource_free(bo, &sys_res);
771         return ret;
772 }
This page took 0.127344 seconds and 4 git commands to generate.