]> Git Repo - linux.git/blob - drivers/gpu/drm/ttm/ttm_bo_util.c
drm/amdgpu: remove unneeded semicolon
[linux.git] / drivers / gpu / drm / ttm / ttm_bo_util.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31
32 #include <drm/ttm/ttm_bo_driver.h>
33 #include <drm/ttm/ttm_placement.h>
34 #include <drm/drm_vma_manager.h>
35 #include <linux/io.h>
36 #include <linux/highmem.h>
37 #include <linux/wait.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/module.h>
41 #include <linux/dma-resv.h>
42
43 struct ttm_transfer_obj {
44         struct ttm_buffer_object base;
45         struct ttm_buffer_object *bo;
46 };
47
48 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
49 {
50         ttm_resource_free(bo, &bo->mem);
51 }
52
53 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
54                    struct ttm_operation_ctx *ctx,
55                     struct ttm_resource *new_mem)
56 {
57         struct ttm_tt *ttm = bo->ttm;
58         struct ttm_resource *old_mem = &bo->mem;
59         int ret;
60
61         if (old_mem->mem_type != TTM_PL_SYSTEM) {
62                 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
63
64                 if (unlikely(ret != 0)) {
65                         if (ret != -ERESTARTSYS)
66                                 pr_err("Failed to expire sync object before unbinding TTM\n");
67                         return ret;
68                 }
69
70                 ttm_tt_unbind(ttm);
71                 ttm_bo_free_old_node(bo);
72                 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
73                                 TTM_PL_MASK_MEM);
74                 old_mem->mem_type = TTM_PL_SYSTEM;
75         }
76
77         ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
78         if (unlikely(ret != 0))
79                 return ret;
80
81         if (new_mem->mem_type != TTM_PL_SYSTEM) {
82                 ret = ttm_tt_bind(ttm, new_mem, ctx);
83                 if (unlikely(ret != 0))
84                         return ret;
85         }
86
87         *old_mem = *new_mem;
88         new_mem->mm_node = NULL;
89
90         return 0;
91 }
92 EXPORT_SYMBOL(ttm_bo_move_ttm);
93
94 int ttm_mem_io_lock(struct ttm_resource_manager *man, bool interruptible)
95 {
96         if (likely(!man->use_io_reserve_lru))
97                 return 0;
98
99         if (interruptible)
100                 return mutex_lock_interruptible(&man->io_reserve_mutex);
101
102         mutex_lock(&man->io_reserve_mutex);
103         return 0;
104 }
105
106 void ttm_mem_io_unlock(struct ttm_resource_manager *man)
107 {
108         if (likely(!man->use_io_reserve_lru))
109                 return;
110
111         mutex_unlock(&man->io_reserve_mutex);
112 }
113
114 static int ttm_mem_io_evict(struct ttm_resource_manager *man)
115 {
116         struct ttm_buffer_object *bo;
117
118         bo = list_first_entry_or_null(&man->io_reserve_lru,
119                                       struct ttm_buffer_object,
120                                       io_reserve_lru);
121         if (!bo)
122                 return -ENOSPC;
123
124         list_del_init(&bo->io_reserve_lru);
125         ttm_bo_unmap_virtual_locked(bo);
126         return 0;
127 }
128
129 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
130                        struct ttm_resource *mem)
131 {
132         struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
133         int ret;
134
135         if (mem->bus.io_reserved_count++)
136                 return 0;
137
138         if (!bdev->driver->io_mem_reserve)
139                 return 0;
140
141         mem->bus.addr = NULL;
142         mem->bus.offset = 0;
143         mem->bus.base = 0;
144         mem->bus.is_iomem = false;
145 retry:
146         ret = bdev->driver->io_mem_reserve(bdev, mem);
147         if (ret == -ENOSPC) {
148                 ret = ttm_mem_io_evict(man);
149                 if (ret == 0)
150                         goto retry;
151         }
152         return ret;
153 }
154
155 void ttm_mem_io_free(struct ttm_bo_device *bdev,
156                      struct ttm_resource *mem)
157 {
158         if (--mem->bus.io_reserved_count)
159                 return;
160
161         if (!bdev->driver->io_mem_free)
162                 return;
163
164         bdev->driver->io_mem_free(bdev, mem);
165 }
166
167 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
168 {
169         struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type);
170         struct ttm_resource *mem = &bo->mem;
171         int ret;
172
173         if (mem->bus.io_reserved_vm)
174                 return 0;
175
176         ret = ttm_mem_io_reserve(bo->bdev, mem);
177         if (unlikely(ret != 0))
178                 return ret;
179         mem->bus.io_reserved_vm = true;
180         if (man->use_io_reserve_lru)
181                 list_add_tail(&bo->io_reserve_lru,
182                               &man->io_reserve_lru);
183         return 0;
184 }
185
186 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
187 {
188         struct ttm_resource *mem = &bo->mem;
189
190         if (!mem->bus.io_reserved_vm)
191                 return;
192
193         mem->bus.io_reserved_vm = false;
194         list_del_init(&bo->io_reserve_lru);
195         ttm_mem_io_free(bo->bdev, mem);
196 }
197
198 static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
199                                struct ttm_resource *mem,
200                                void **virtual)
201 {
202         struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
203         int ret;
204         void *addr;
205
206         *virtual = NULL;
207         (void) ttm_mem_io_lock(man, false);
208         ret = ttm_mem_io_reserve(bdev, mem);
209         ttm_mem_io_unlock(man);
210         if (ret || !mem->bus.is_iomem)
211                 return ret;
212
213         if (mem->bus.addr) {
214                 addr = mem->bus.addr;
215         } else {
216                 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
217
218                 if (mem->placement & TTM_PL_FLAG_WC)
219                         addr = ioremap_wc(mem->bus.base + mem->bus.offset,
220                                           bus_size);
221                 else
222                         addr = ioremap(mem->bus.base + mem->bus.offset,
223                                        bus_size);
224                 if (!addr) {
225                         (void) ttm_mem_io_lock(man, false);
226                         ttm_mem_io_free(bdev, mem);
227                         ttm_mem_io_unlock(man);
228                         return -ENOMEM;
229                 }
230         }
231         *virtual = addr;
232         return 0;
233 }
234
235 static void ttm_resource_iounmap(struct ttm_bo_device *bdev,
236                                 struct ttm_resource *mem,
237                                 void *virtual)
238 {
239         struct ttm_resource_manager *man;
240
241         man = ttm_manager_type(bdev, mem->mem_type);
242
243         if (virtual && mem->bus.addr == NULL)
244                 iounmap(virtual);
245         (void) ttm_mem_io_lock(man, false);
246         ttm_mem_io_free(bdev, mem);
247         ttm_mem_io_unlock(man);
248 }
249
250 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
251 {
252         uint32_t *dstP =
253             (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
254         uint32_t *srcP =
255             (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
256
257         int i;
258         for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
259                 iowrite32(ioread32(srcP++), dstP++);
260         return 0;
261 }
262
263 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
264                                 unsigned long page,
265                                 pgprot_t prot)
266 {
267         struct page *d = ttm->pages[page];
268         void *dst;
269
270         if (!d)
271                 return -ENOMEM;
272
273         src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
274         dst = kmap_atomic_prot(d, prot);
275         if (!dst)
276                 return -ENOMEM;
277
278         memcpy_fromio(dst, src, PAGE_SIZE);
279
280         kunmap_atomic(dst);
281
282         return 0;
283 }
284
285 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
286                                 unsigned long page,
287                                 pgprot_t prot)
288 {
289         struct page *s = ttm->pages[page];
290         void *src;
291
292         if (!s)
293                 return -ENOMEM;
294
295         dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
296         src = kmap_atomic_prot(s, prot);
297         if (!src)
298                 return -ENOMEM;
299
300         memcpy_toio(dst, src, PAGE_SIZE);
301
302         kunmap_atomic(src);
303
304         return 0;
305 }
306
307 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
308                        struct ttm_operation_ctx *ctx,
309                        struct ttm_resource *new_mem)
310 {
311         struct ttm_bo_device *bdev = bo->bdev;
312         struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
313         struct ttm_tt *ttm = bo->ttm;
314         struct ttm_resource *old_mem = &bo->mem;
315         struct ttm_resource old_copy = *old_mem;
316         void *old_iomap;
317         void *new_iomap;
318         int ret;
319         unsigned long i;
320         unsigned long page;
321         unsigned long add = 0;
322         int dir;
323
324         ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
325         if (ret)
326                 return ret;
327
328         ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap);
329         if (ret)
330                 return ret;
331         ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap);
332         if (ret)
333                 goto out;
334
335         /*
336          * Single TTM move. NOP.
337          */
338         if (old_iomap == NULL && new_iomap == NULL)
339                 goto out2;
340
341         /*
342          * Don't move nonexistent data. Clear destination instead.
343          */
344         if (old_iomap == NULL &&
345             (ttm == NULL || (ttm->state == tt_unpopulated &&
346                              !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
347                 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
348                 goto out2;
349         }
350
351         /*
352          * TTM might be null for moves within the same region.
353          */
354         if (ttm) {
355                 ret = ttm_tt_populate(ttm, ctx);
356                 if (ret)
357                         goto out1;
358         }
359
360         add = 0;
361         dir = 1;
362
363         if ((old_mem->mem_type == new_mem->mem_type) &&
364             (new_mem->start < old_mem->start + old_mem->size)) {
365                 dir = -1;
366                 add = new_mem->num_pages - 1;
367         }
368
369         for (i = 0; i < new_mem->num_pages; ++i) {
370                 page = i * dir + add;
371                 if (old_iomap == NULL) {
372                         pgprot_t prot = ttm_io_prot(old_mem->placement,
373                                                     PAGE_KERNEL);
374                         ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
375                                                    prot);
376                 } else if (new_iomap == NULL) {
377                         pgprot_t prot = ttm_io_prot(new_mem->placement,
378                                                     PAGE_KERNEL);
379                         ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
380                                                    prot);
381                 } else {
382                         ret = ttm_copy_io_page(new_iomap, old_iomap, page);
383                 }
384                 if (ret)
385                         goto out1;
386         }
387         mb();
388 out2:
389         old_copy = *old_mem;
390         *old_mem = *new_mem;
391         new_mem->mm_node = NULL;
392
393         if (!man->use_tt) {
394                 ttm_tt_destroy(ttm);
395                 bo->ttm = NULL;
396         }
397
398 out1:
399         ttm_resource_iounmap(bdev, old_mem, new_iomap);
400 out:
401         ttm_resource_iounmap(bdev, &old_copy, old_iomap);
402
403         /*
404          * On error, keep the mm node!
405          */
406         if (!ret)
407                 ttm_resource_free(bo, &old_copy);
408         return ret;
409 }
410 EXPORT_SYMBOL(ttm_bo_move_memcpy);
411
412 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
413 {
414         struct ttm_transfer_obj *fbo;
415
416         fbo = container_of(bo, struct ttm_transfer_obj, base);
417         ttm_bo_put(fbo->bo);
418         kfree(fbo);
419 }
420
421 /**
422  * ttm_buffer_object_transfer
423  *
424  * @bo: A pointer to a struct ttm_buffer_object.
425  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
426  * holding the data of @bo with the old placement.
427  *
428  * This is a utility function that may be called after an accelerated move
429  * has been scheduled. A new buffer object is created as a placeholder for
430  * the old data while it's being copied. When that buffer object is idle,
431  * it can be destroyed, releasing the space of the old placement.
432  * Returns:
433  * !0: Failure.
434  */
435
436 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
437                                       struct ttm_buffer_object **new_obj)
438 {
439         struct ttm_transfer_obj *fbo;
440         int ret;
441
442         fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
443         if (!fbo)
444                 return -ENOMEM;
445
446         fbo->base = *bo;
447         fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
448
449         ttm_bo_get(bo);
450         fbo->bo = bo;
451
452         /**
453          * Fix up members that we shouldn't copy directly:
454          * TODO: Explicit member copy would probably be better here.
455          */
456
457         atomic_inc(&ttm_bo_glob.bo_count);
458         INIT_LIST_HEAD(&fbo->base.ddestroy);
459         INIT_LIST_HEAD(&fbo->base.lru);
460         INIT_LIST_HEAD(&fbo->base.swap);
461         INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
462         fbo->base.moving = NULL;
463         drm_vma_node_reset(&fbo->base.base.vma_node);
464
465         kref_init(&fbo->base.kref);
466         fbo->base.destroy = &ttm_transfered_destroy;
467         fbo->base.acc_size = 0;
468         if (bo->type != ttm_bo_type_sg)
469                 fbo->base.base.resv = &fbo->base.base._resv;
470
471         dma_resv_init(&fbo->base.base._resv);
472         fbo->base.base.dev = NULL;
473         ret = dma_resv_trylock(&fbo->base.base._resv);
474         WARN_ON(!ret);
475
476         *new_obj = &fbo->base;
477         return 0;
478 }
479
480 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
481 {
482         /* Cached mappings need no adjustment */
483         if (caching_flags & TTM_PL_FLAG_CACHED)
484                 return tmp;
485
486 #if defined(__i386__) || defined(__x86_64__)
487         if (caching_flags & TTM_PL_FLAG_WC)
488                 tmp = pgprot_writecombine(tmp);
489         else if (boot_cpu_data.x86 > 3)
490                 tmp = pgprot_noncached(tmp);
491 #endif
492 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
493     defined(__powerpc__) || defined(__mips__)
494         if (caching_flags & TTM_PL_FLAG_WC)
495                 tmp = pgprot_writecombine(tmp);
496         else
497                 tmp = pgprot_noncached(tmp);
498 #endif
499 #if defined(__sparc__)
500         tmp = pgprot_noncached(tmp);
501 #endif
502         return tmp;
503 }
504 EXPORT_SYMBOL(ttm_io_prot);
505
506 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
507                           unsigned long offset,
508                           unsigned long size,
509                           struct ttm_bo_kmap_obj *map)
510 {
511         struct ttm_resource *mem = &bo->mem;
512
513         if (bo->mem.bus.addr) {
514                 map->bo_kmap_type = ttm_bo_map_premapped;
515                 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
516         } else {
517                 map->bo_kmap_type = ttm_bo_map_iomap;
518                 if (mem->placement & TTM_PL_FLAG_WC)
519                         map->virtual = ioremap_wc(bo->mem.bus.base +
520                                                   bo->mem.bus.offset + offset,
521                                                   size);
522                 else
523                         map->virtual = ioremap(bo->mem.bus.base +
524                                                bo->mem.bus.offset + offset,
525                                                size);
526         }
527         return (!map->virtual) ? -ENOMEM : 0;
528 }
529
530 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
531                            unsigned long start_page,
532                            unsigned long num_pages,
533                            struct ttm_bo_kmap_obj *map)
534 {
535         struct ttm_resource *mem = &bo->mem;
536         struct ttm_operation_ctx ctx = {
537                 .interruptible = false,
538                 .no_wait_gpu = false
539         };
540         struct ttm_tt *ttm = bo->ttm;
541         pgprot_t prot;
542         int ret;
543
544         BUG_ON(!ttm);
545
546         ret = ttm_tt_populate(ttm, &ctx);
547         if (ret)
548                 return ret;
549
550         if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
551                 /*
552                  * We're mapping a single page, and the desired
553                  * page protection is consistent with the bo.
554                  */
555
556                 map->bo_kmap_type = ttm_bo_map_kmap;
557                 map->page = ttm->pages[start_page];
558                 map->virtual = kmap(map->page);
559         } else {
560                 /*
561                  * We need to use vmap to get the desired page protection
562                  * or to make the buffer object look contiguous.
563                  */
564                 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
565                 map->bo_kmap_type = ttm_bo_map_vmap;
566                 map->virtual = vmap(ttm->pages + start_page, num_pages,
567                                     0, prot);
568         }
569         return (!map->virtual) ? -ENOMEM : 0;
570 }
571
572 int ttm_bo_kmap(struct ttm_buffer_object *bo,
573                 unsigned long start_page, unsigned long num_pages,
574                 struct ttm_bo_kmap_obj *map)
575 {
576         struct ttm_resource_manager *man =
577                 ttm_manager_type(bo->bdev, bo->mem.mem_type);
578         unsigned long offset, size;
579         int ret;
580
581         map->virtual = NULL;
582         map->bo = bo;
583         if (num_pages > bo->num_pages)
584                 return -EINVAL;
585         if (start_page > bo->num_pages)
586                 return -EINVAL;
587
588         (void) ttm_mem_io_lock(man, false);
589         ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
590         ttm_mem_io_unlock(man);
591         if (ret)
592                 return ret;
593         if (!bo->mem.bus.is_iomem) {
594                 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
595         } else {
596                 offset = start_page << PAGE_SHIFT;
597                 size = num_pages << PAGE_SHIFT;
598                 return ttm_bo_ioremap(bo, offset, size, map);
599         }
600 }
601 EXPORT_SYMBOL(ttm_bo_kmap);
602
603 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
604 {
605         struct ttm_buffer_object *bo = map->bo;
606         struct ttm_resource_manager *man =
607                 ttm_manager_type(bo->bdev, bo->mem.mem_type);
608
609         if (!map->virtual)
610                 return;
611         switch (map->bo_kmap_type) {
612         case ttm_bo_map_iomap:
613                 iounmap(map->virtual);
614                 break;
615         case ttm_bo_map_vmap:
616                 vunmap(map->virtual);
617                 break;
618         case ttm_bo_map_kmap:
619                 kunmap(map->page);
620                 break;
621         case ttm_bo_map_premapped:
622                 break;
623         default:
624                 BUG();
625         }
626         (void) ttm_mem_io_lock(man, false);
627         ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
628         ttm_mem_io_unlock(man);
629         map->virtual = NULL;
630         map->page = NULL;
631 }
632 EXPORT_SYMBOL(ttm_bo_kunmap);
633
634 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
635                               struct dma_fence *fence,
636                               bool evict,
637                               struct ttm_resource *new_mem)
638 {
639         struct ttm_bo_device *bdev = bo->bdev;
640         struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
641         struct ttm_resource *old_mem = &bo->mem;
642         int ret;
643         struct ttm_buffer_object *ghost_obj;
644
645         dma_resv_add_excl_fence(bo->base.resv, fence);
646         if (evict) {
647                 ret = ttm_bo_wait(bo, false, false);
648                 if (ret)
649                         return ret;
650
651                 if (!man->use_tt) {
652                         ttm_tt_destroy(bo->ttm);
653                         bo->ttm = NULL;
654                 }
655                 ttm_bo_free_old_node(bo);
656         } else {
657                 /**
658                  * This should help pipeline ordinary buffer moves.
659                  *
660                  * Hang old buffer memory on a new buffer object,
661                  * and leave it to be released when the GPU
662                  * operation has completed.
663                  */
664
665                 dma_fence_put(bo->moving);
666                 bo->moving = dma_fence_get(fence);
667
668                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
669                 if (ret)
670                         return ret;
671
672                 dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
673
674                 /**
675                  * If we're not moving to fixed memory, the TTM object
676                  * needs to stay alive. Otherwhise hang it on the ghost
677                  * bo to be unbound and destroyed.
678                  */
679
680                 if (man->use_tt)
681                         ghost_obj->ttm = NULL;
682                 else
683                         bo->ttm = NULL;
684
685                 dma_resv_unlock(&ghost_obj->base._resv);
686                 ttm_bo_put(ghost_obj);
687         }
688
689         *old_mem = *new_mem;
690         new_mem->mm_node = NULL;
691
692         return 0;
693 }
694 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
695
696 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
697                          struct dma_fence *fence, bool evict,
698                          struct ttm_resource *new_mem)
699 {
700         struct ttm_bo_device *bdev = bo->bdev;
701         struct ttm_resource *old_mem = &bo->mem;
702
703         struct ttm_resource_manager *from = ttm_manager_type(bdev, old_mem->mem_type);
704         struct ttm_resource_manager *to = ttm_manager_type(bdev, new_mem->mem_type);
705
706         int ret;
707
708         dma_resv_add_excl_fence(bo->base.resv, fence);
709
710         if (!evict) {
711                 struct ttm_buffer_object *ghost_obj;
712
713                 /**
714                  * This should help pipeline ordinary buffer moves.
715                  *
716                  * Hang old buffer memory on a new buffer object,
717                  * and leave it to be released when the GPU
718                  * operation has completed.
719                  */
720
721                 dma_fence_put(bo->moving);
722                 bo->moving = dma_fence_get(fence);
723
724                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
725                 if (ret)
726                         return ret;
727
728                 dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
729
730                 /**
731                  * If we're not moving to fixed memory, the TTM object
732                  * needs to stay alive. Otherwhise hang it on the ghost
733                  * bo to be unbound and destroyed.
734                  */
735
736                 if (to->use_tt)
737                         ghost_obj->ttm = NULL;
738                 else
739                         bo->ttm = NULL;
740
741                 dma_resv_unlock(&ghost_obj->base._resv);
742                 ttm_bo_put(ghost_obj);
743
744         } else if (!from->use_tt) {
745
746                 /**
747                  * BO doesn't have a TTM we need to bind/unbind. Just remember
748                  * this eviction and free up the allocation
749                  */
750
751                 spin_lock(&from->move_lock);
752                 if (!from->move || dma_fence_is_later(fence, from->move)) {
753                         dma_fence_put(from->move);
754                         from->move = dma_fence_get(fence);
755                 }
756                 spin_unlock(&from->move_lock);
757
758                 ttm_bo_free_old_node(bo);
759
760                 dma_fence_put(bo->moving);
761                 bo->moving = dma_fence_get(fence);
762
763         } else {
764                 /**
765                  * Last resort, wait for the move to be completed.
766                  *
767                  * Should never happen in pratice.
768                  */
769
770                 ret = ttm_bo_wait(bo, false, false);
771                 if (ret)
772                         return ret;
773
774                 if (!to->use_tt) {
775                         ttm_tt_destroy(bo->ttm);
776                         bo->ttm = NULL;
777                 }
778                 ttm_bo_free_old_node(bo);
779         }
780
781         *old_mem = *new_mem;
782         new_mem->mm_node = NULL;
783
784         return 0;
785 }
786 EXPORT_SYMBOL(ttm_bo_pipeline_move);
787
788 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
789 {
790         struct ttm_buffer_object *ghost;
791         int ret;
792
793         ret = ttm_buffer_object_transfer(bo, &ghost);
794         if (ret)
795                 return ret;
796
797         ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
798         /* Last resort, wait for the BO to be idle when we are OOM */
799         if (ret)
800                 ttm_bo_wait(bo, false, false);
801
802         memset(&bo->mem, 0, sizeof(bo->mem));
803         bo->mem.mem_type = TTM_PL_SYSTEM;
804         bo->ttm = NULL;
805
806         dma_resv_unlock(&ghost->base._resv);
807         ttm_bo_put(ghost);
808
809         return 0;
810 }
This page took 0.078696 seconds and 4 git commands to generate.