]> Git Repo - linux.git/blob - drivers/gpu/drm/ttm/ttm_bo_util.c
Merge tag 'nds32-for-linus-4.17' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / gpu / drm / ttm / ttm_bo_util.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include <drm/drm_vma_manager.h>
34 #include <linux/io.h>
35 #include <linux/highmem.h>
36 #include <linux/wait.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
39 #include <linux/module.h>
40 #include <linux/reservation.h>
41
42 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
43 {
44         ttm_bo_mem_put(bo, &bo->mem);
45 }
46
47 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
48                    struct ttm_operation_ctx *ctx,
49                     struct ttm_mem_reg *new_mem)
50 {
51         struct ttm_tt *ttm = bo->ttm;
52         struct ttm_mem_reg *old_mem = &bo->mem;
53         int ret;
54
55         if (old_mem->mem_type != TTM_PL_SYSTEM) {
56                 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
57
58                 if (unlikely(ret != 0)) {
59                         if (ret != -ERESTARTSYS)
60                                 pr_err("Failed to expire sync object before unbinding TTM\n");
61                         return ret;
62                 }
63
64                 ttm_tt_unbind(ttm);
65                 ttm_bo_free_old_node(bo);
66                 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
67                                 TTM_PL_MASK_MEM);
68                 old_mem->mem_type = TTM_PL_SYSTEM;
69         }
70
71         ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
72         if (unlikely(ret != 0))
73                 return ret;
74
75         if (new_mem->mem_type != TTM_PL_SYSTEM) {
76                 ret = ttm_tt_bind(ttm, new_mem, ctx);
77                 if (unlikely(ret != 0))
78                         return ret;
79         }
80
81         *old_mem = *new_mem;
82         new_mem->mm_node = NULL;
83
84         return 0;
85 }
86 EXPORT_SYMBOL(ttm_bo_move_ttm);
87
88 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
89 {
90         if (likely(man->io_reserve_fastpath))
91                 return 0;
92
93         if (interruptible)
94                 return mutex_lock_interruptible(&man->io_reserve_mutex);
95
96         mutex_lock(&man->io_reserve_mutex);
97         return 0;
98 }
99 EXPORT_SYMBOL(ttm_mem_io_lock);
100
101 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
102 {
103         if (likely(man->io_reserve_fastpath))
104                 return;
105
106         mutex_unlock(&man->io_reserve_mutex);
107 }
108 EXPORT_SYMBOL(ttm_mem_io_unlock);
109
110 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
111 {
112         struct ttm_buffer_object *bo;
113
114         if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
115                 return -EAGAIN;
116
117         bo = list_first_entry(&man->io_reserve_lru,
118                               struct ttm_buffer_object,
119                               io_reserve_lru);
120         list_del_init(&bo->io_reserve_lru);
121         ttm_bo_unmap_virtual_locked(bo);
122
123         return 0;
124 }
125
126
127 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
128                        struct ttm_mem_reg *mem)
129 {
130         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
131         int ret = 0;
132
133         if (!bdev->driver->io_mem_reserve)
134                 return 0;
135         if (likely(man->io_reserve_fastpath))
136                 return bdev->driver->io_mem_reserve(bdev, mem);
137
138         if (bdev->driver->io_mem_reserve &&
139             mem->bus.io_reserved_count++ == 0) {
140 retry:
141                 ret = bdev->driver->io_mem_reserve(bdev, mem);
142                 if (ret == -EAGAIN) {
143                         ret = ttm_mem_io_evict(man);
144                         if (ret == 0)
145                                 goto retry;
146                 }
147         }
148         return ret;
149 }
150 EXPORT_SYMBOL(ttm_mem_io_reserve);
151
152 void ttm_mem_io_free(struct ttm_bo_device *bdev,
153                      struct ttm_mem_reg *mem)
154 {
155         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
156
157         if (likely(man->io_reserve_fastpath))
158                 return;
159
160         if (bdev->driver->io_mem_reserve &&
161             --mem->bus.io_reserved_count == 0 &&
162             bdev->driver->io_mem_free)
163                 bdev->driver->io_mem_free(bdev, mem);
164
165 }
166 EXPORT_SYMBOL(ttm_mem_io_free);
167
168 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
169 {
170         struct ttm_mem_reg *mem = &bo->mem;
171         int ret;
172
173         if (!mem->bus.io_reserved_vm) {
174                 struct ttm_mem_type_manager *man =
175                         &bo->bdev->man[mem->mem_type];
176
177                 ret = ttm_mem_io_reserve(bo->bdev, mem);
178                 if (unlikely(ret != 0))
179                         return ret;
180                 mem->bus.io_reserved_vm = true;
181                 if (man->use_io_reserve_lru)
182                         list_add_tail(&bo->io_reserve_lru,
183                                       &man->io_reserve_lru);
184         }
185         return 0;
186 }
187
188 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
189 {
190         struct ttm_mem_reg *mem = &bo->mem;
191
192         if (mem->bus.io_reserved_vm) {
193                 mem->bus.io_reserved_vm = false;
194                 list_del_init(&bo->io_reserve_lru);
195                 ttm_mem_io_free(bo->bdev, mem);
196         }
197 }
198
199 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
200                         void **virtual)
201 {
202         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
203         int ret;
204         void *addr;
205
206         *virtual = NULL;
207         (void) ttm_mem_io_lock(man, false);
208         ret = ttm_mem_io_reserve(bdev, mem);
209         ttm_mem_io_unlock(man);
210         if (ret || !mem->bus.is_iomem)
211                 return ret;
212
213         if (mem->bus.addr) {
214                 addr = mem->bus.addr;
215         } else {
216                 if (mem->placement & TTM_PL_FLAG_WC)
217                         addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
218                 else
219                         addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
220                 if (!addr) {
221                         (void) ttm_mem_io_lock(man, false);
222                         ttm_mem_io_free(bdev, mem);
223                         ttm_mem_io_unlock(man);
224                         return -ENOMEM;
225                 }
226         }
227         *virtual = addr;
228         return 0;
229 }
230
231 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
232                          void *virtual)
233 {
234         struct ttm_mem_type_manager *man;
235
236         man = &bdev->man[mem->mem_type];
237
238         if (virtual && mem->bus.addr == NULL)
239                 iounmap(virtual);
240         (void) ttm_mem_io_lock(man, false);
241         ttm_mem_io_free(bdev, mem);
242         ttm_mem_io_unlock(man);
243 }
244
245 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
246 {
247         uint32_t *dstP =
248             (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
249         uint32_t *srcP =
250             (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
251
252         int i;
253         for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
254                 iowrite32(ioread32(srcP++), dstP++);
255         return 0;
256 }
257
258 #ifdef CONFIG_X86
259 #define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
260 #define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
261 #else
262 #define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0,  __prot)
263 #define __ttm_kunmap_atomic(__addr) vunmap(__addr)
264 #endif
265
266
267 /**
268  * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
269  * specified page protection.
270  *
271  * @page: The page to map.
272  * @prot: The page protection.
273  *
274  * This function maps a TTM page using the kmap_atomic api if available,
275  * otherwise falls back to vmap. The user must make sure that the
276  * specified page does not have an aliased mapping with a different caching
277  * policy unless the architecture explicitly allows it. Also mapping and
278  * unmapping using this api must be correctly nested. Unmapping should
279  * occur in the reverse order of mapping.
280  */
281 void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
282 {
283         if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
284                 return kmap_atomic(page);
285         else
286                 return __ttm_kmap_atomic_prot(page, prot);
287 }
288 EXPORT_SYMBOL(ttm_kmap_atomic_prot);
289
290 /**
291  * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
292  * ttm_kmap_atomic_prot.
293  *
294  * @addr: The virtual address from the map.
295  * @prot: The page protection.
296  */
297 void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
298 {
299         if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
300                 kunmap_atomic(addr);
301         else
302                 __ttm_kunmap_atomic(addr);
303 }
304 EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
305
306 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
307                                 unsigned long page,
308                                 pgprot_t prot)
309 {
310         struct page *d = ttm->pages[page];
311         void *dst;
312
313         if (!d)
314                 return -ENOMEM;
315
316         src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
317         dst = ttm_kmap_atomic_prot(d, prot);
318         if (!dst)
319                 return -ENOMEM;
320
321         memcpy_fromio(dst, src, PAGE_SIZE);
322
323         ttm_kunmap_atomic_prot(dst, prot);
324
325         return 0;
326 }
327
328 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
329                                 unsigned long page,
330                                 pgprot_t prot)
331 {
332         struct page *s = ttm->pages[page];
333         void *src;
334
335         if (!s)
336                 return -ENOMEM;
337
338         dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
339         src = ttm_kmap_atomic_prot(s, prot);
340         if (!src)
341                 return -ENOMEM;
342
343         memcpy_toio(dst, src, PAGE_SIZE);
344
345         ttm_kunmap_atomic_prot(src, prot);
346
347         return 0;
348 }
349
350 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
351                        struct ttm_operation_ctx *ctx,
352                        struct ttm_mem_reg *new_mem)
353 {
354         struct ttm_bo_device *bdev = bo->bdev;
355         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
356         struct ttm_tt *ttm = bo->ttm;
357         struct ttm_mem_reg *old_mem = &bo->mem;
358         struct ttm_mem_reg old_copy = *old_mem;
359         void *old_iomap;
360         void *new_iomap;
361         int ret;
362         unsigned long i;
363         unsigned long page;
364         unsigned long add = 0;
365         int dir;
366
367         ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
368         if (ret)
369                 return ret;
370
371         ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
372         if (ret)
373                 return ret;
374         ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
375         if (ret)
376                 goto out;
377
378         /*
379          * Single TTM move. NOP.
380          */
381         if (old_iomap == NULL && new_iomap == NULL)
382                 goto out2;
383
384         /*
385          * Don't move nonexistent data. Clear destination instead.
386          */
387         if (old_iomap == NULL &&
388             (ttm == NULL || (ttm->state == tt_unpopulated &&
389                              !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
390                 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
391                 goto out2;
392         }
393
394         /*
395          * TTM might be null for moves within the same region.
396          */
397         if (ttm) {
398                 ret = ttm_tt_populate(ttm, ctx);
399                 if (ret)
400                         goto out1;
401         }
402
403         add = 0;
404         dir = 1;
405
406         if ((old_mem->mem_type == new_mem->mem_type) &&
407             (new_mem->start < old_mem->start + old_mem->size)) {
408                 dir = -1;
409                 add = new_mem->num_pages - 1;
410         }
411
412         for (i = 0; i < new_mem->num_pages; ++i) {
413                 page = i * dir + add;
414                 if (old_iomap == NULL) {
415                         pgprot_t prot = ttm_io_prot(old_mem->placement,
416                                                     PAGE_KERNEL);
417                         ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
418                                                    prot);
419                 } else if (new_iomap == NULL) {
420                         pgprot_t prot = ttm_io_prot(new_mem->placement,
421                                                     PAGE_KERNEL);
422                         ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
423                                                    prot);
424                 } else {
425                         ret = ttm_copy_io_page(new_iomap, old_iomap, page);
426                 }
427                 if (ret)
428                         goto out1;
429         }
430         mb();
431 out2:
432         old_copy = *old_mem;
433         *old_mem = *new_mem;
434         new_mem->mm_node = NULL;
435
436         if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
437                 ttm_tt_destroy(ttm);
438                 bo->ttm = NULL;
439         }
440
441 out1:
442         ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
443 out:
444         ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
445
446         /*
447          * On error, keep the mm node!
448          */
449         if (!ret)
450                 ttm_bo_mem_put(bo, &old_copy);
451         return ret;
452 }
453 EXPORT_SYMBOL(ttm_bo_move_memcpy);
454
455 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
456 {
457         kfree(bo);
458 }
459
460 /**
461  * ttm_buffer_object_transfer
462  *
463  * @bo: A pointer to a struct ttm_buffer_object.
464  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
465  * holding the data of @bo with the old placement.
466  *
467  * This is a utility function that may be called after an accelerated move
468  * has been scheduled. A new buffer object is created as a placeholder for
469  * the old data while it's being copied. When that buffer object is idle,
470  * it can be destroyed, releasing the space of the old placement.
471  * Returns:
472  * !0: Failure.
473  */
474
475 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
476                                       struct ttm_buffer_object **new_obj)
477 {
478         struct ttm_buffer_object *fbo;
479         int ret;
480
481         fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
482         if (!fbo)
483                 return -ENOMEM;
484
485         *fbo = *bo;
486
487         /**
488          * Fix up members that we shouldn't copy directly:
489          * TODO: Explicit member copy would probably be better here.
490          */
491
492         atomic_inc(&bo->bdev->glob->bo_count);
493         INIT_LIST_HEAD(&fbo->ddestroy);
494         INIT_LIST_HEAD(&fbo->lru);
495         INIT_LIST_HEAD(&fbo->swap);
496         INIT_LIST_HEAD(&fbo->io_reserve_lru);
497         mutex_init(&fbo->wu_mutex);
498         fbo->moving = NULL;
499         drm_vma_node_reset(&fbo->vma_node);
500         atomic_set(&fbo->cpu_writers, 0);
501
502         kref_init(&fbo->list_kref);
503         kref_init(&fbo->kref);
504         fbo->destroy = &ttm_transfered_destroy;
505         fbo->acc_size = 0;
506         fbo->resv = &fbo->ttm_resv;
507         reservation_object_init(fbo->resv);
508         ret = reservation_object_trylock(fbo->resv);
509         WARN_ON(!ret);
510
511         *new_obj = fbo;
512         return 0;
513 }
514
515 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
516 {
517         /* Cached mappings need no adjustment */
518         if (caching_flags & TTM_PL_FLAG_CACHED)
519                 return tmp;
520
521 #if defined(__i386__) || defined(__x86_64__)
522         if (caching_flags & TTM_PL_FLAG_WC)
523                 tmp = pgprot_writecombine(tmp);
524         else if (boot_cpu_data.x86 > 3)
525                 tmp = pgprot_noncached(tmp);
526 #endif
527 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
528     defined(__powerpc__)
529         if (caching_flags & TTM_PL_FLAG_WC)
530                 tmp = pgprot_writecombine(tmp);
531         else
532                 tmp = pgprot_noncached(tmp);
533 #endif
534 #if defined(__sparc__) || defined(__mips__)
535         tmp = pgprot_noncached(tmp);
536 #endif
537         return tmp;
538 }
539 EXPORT_SYMBOL(ttm_io_prot);
540
541 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
542                           unsigned long offset,
543                           unsigned long size,
544                           struct ttm_bo_kmap_obj *map)
545 {
546         struct ttm_mem_reg *mem = &bo->mem;
547
548         if (bo->mem.bus.addr) {
549                 map->bo_kmap_type = ttm_bo_map_premapped;
550                 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
551         } else {
552                 map->bo_kmap_type = ttm_bo_map_iomap;
553                 if (mem->placement & TTM_PL_FLAG_WC)
554                         map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
555                                                   size);
556                 else
557                         map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
558                                                        size);
559         }
560         return (!map->virtual) ? -ENOMEM : 0;
561 }
562
563 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
564                            unsigned long start_page,
565                            unsigned long num_pages,
566                            struct ttm_bo_kmap_obj *map)
567 {
568         struct ttm_mem_reg *mem = &bo->mem;
569         struct ttm_operation_ctx ctx = {
570                 .interruptible = false,
571                 .no_wait_gpu = false
572         };
573         struct ttm_tt *ttm = bo->ttm;
574         pgprot_t prot;
575         int ret;
576
577         BUG_ON(!ttm);
578
579         ret = ttm_tt_populate(ttm, &ctx);
580         if (ret)
581                 return ret;
582
583         if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
584                 /*
585                  * We're mapping a single page, and the desired
586                  * page protection is consistent with the bo.
587                  */
588
589                 map->bo_kmap_type = ttm_bo_map_kmap;
590                 map->page = ttm->pages[start_page];
591                 map->virtual = kmap(map->page);
592         } else {
593                 /*
594                  * We need to use vmap to get the desired page protection
595                  * or to make the buffer object look contiguous.
596                  */
597                 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
598                 map->bo_kmap_type = ttm_bo_map_vmap;
599                 map->virtual = vmap(ttm->pages + start_page, num_pages,
600                                     0, prot);
601         }
602         return (!map->virtual) ? -ENOMEM : 0;
603 }
604
605 int ttm_bo_kmap(struct ttm_buffer_object *bo,
606                 unsigned long start_page, unsigned long num_pages,
607                 struct ttm_bo_kmap_obj *map)
608 {
609         struct ttm_mem_type_manager *man =
610                 &bo->bdev->man[bo->mem.mem_type];
611         unsigned long offset, size;
612         int ret;
613
614         map->virtual = NULL;
615         map->bo = bo;
616         if (num_pages > bo->num_pages)
617                 return -EINVAL;
618         if (start_page > bo->num_pages)
619                 return -EINVAL;
620 #if 0
621         if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
622                 return -EPERM;
623 #endif
624         (void) ttm_mem_io_lock(man, false);
625         ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
626         ttm_mem_io_unlock(man);
627         if (ret)
628                 return ret;
629         if (!bo->mem.bus.is_iomem) {
630                 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
631         } else {
632                 offset = start_page << PAGE_SHIFT;
633                 size = num_pages << PAGE_SHIFT;
634                 return ttm_bo_ioremap(bo, offset, size, map);
635         }
636 }
637 EXPORT_SYMBOL(ttm_bo_kmap);
638
639 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
640 {
641         struct ttm_buffer_object *bo = map->bo;
642         struct ttm_mem_type_manager *man =
643                 &bo->bdev->man[bo->mem.mem_type];
644
645         if (!map->virtual)
646                 return;
647         switch (map->bo_kmap_type) {
648         case ttm_bo_map_iomap:
649                 iounmap(map->virtual);
650                 break;
651         case ttm_bo_map_vmap:
652                 vunmap(map->virtual);
653                 break;
654         case ttm_bo_map_kmap:
655                 kunmap(map->page);
656                 break;
657         case ttm_bo_map_premapped:
658                 break;
659         default:
660                 BUG();
661         }
662         (void) ttm_mem_io_lock(man, false);
663         ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
664         ttm_mem_io_unlock(man);
665         map->virtual = NULL;
666         map->page = NULL;
667 }
668 EXPORT_SYMBOL(ttm_bo_kunmap);
669
670 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
671                               struct dma_fence *fence,
672                               bool evict,
673                               struct ttm_mem_reg *new_mem)
674 {
675         struct ttm_bo_device *bdev = bo->bdev;
676         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
677         struct ttm_mem_reg *old_mem = &bo->mem;
678         int ret;
679         struct ttm_buffer_object *ghost_obj;
680
681         reservation_object_add_excl_fence(bo->resv, fence);
682         if (evict) {
683                 ret = ttm_bo_wait(bo, false, false);
684                 if (ret)
685                         return ret;
686
687                 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
688                         ttm_tt_destroy(bo->ttm);
689                         bo->ttm = NULL;
690                 }
691                 ttm_bo_free_old_node(bo);
692         } else {
693                 /**
694                  * This should help pipeline ordinary buffer moves.
695                  *
696                  * Hang old buffer memory on a new buffer object,
697                  * and leave it to be released when the GPU
698                  * operation has completed.
699                  */
700
701                 dma_fence_put(bo->moving);
702                 bo->moving = dma_fence_get(fence);
703
704                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
705                 if (ret)
706                         return ret;
707
708                 reservation_object_add_excl_fence(ghost_obj->resv, fence);
709
710                 /**
711                  * If we're not moving to fixed memory, the TTM object
712                  * needs to stay alive. Otherwhise hang it on the ghost
713                  * bo to be unbound and destroyed.
714                  */
715
716                 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
717                         ghost_obj->ttm = NULL;
718                 else
719                         bo->ttm = NULL;
720
721                 ttm_bo_unreserve(ghost_obj);
722                 ttm_bo_unref(&ghost_obj);
723         }
724
725         *old_mem = *new_mem;
726         new_mem->mm_node = NULL;
727
728         return 0;
729 }
730 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
731
732 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
733                          struct dma_fence *fence, bool evict,
734                          struct ttm_mem_reg *new_mem)
735 {
736         struct ttm_bo_device *bdev = bo->bdev;
737         struct ttm_mem_reg *old_mem = &bo->mem;
738
739         struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
740         struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
741
742         int ret;
743
744         reservation_object_add_excl_fence(bo->resv, fence);
745
746         if (!evict) {
747                 struct ttm_buffer_object *ghost_obj;
748
749                 /**
750                  * This should help pipeline ordinary buffer moves.
751                  *
752                  * Hang old buffer memory on a new buffer object,
753                  * and leave it to be released when the GPU
754                  * operation has completed.
755                  */
756
757                 dma_fence_put(bo->moving);
758                 bo->moving = dma_fence_get(fence);
759
760                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
761                 if (ret)
762                         return ret;
763
764                 reservation_object_add_excl_fence(ghost_obj->resv, fence);
765
766                 /**
767                  * If we're not moving to fixed memory, the TTM object
768                  * needs to stay alive. Otherwhise hang it on the ghost
769                  * bo to be unbound and destroyed.
770                  */
771
772                 if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
773                         ghost_obj->ttm = NULL;
774                 else
775                         bo->ttm = NULL;
776
777                 ttm_bo_unreserve(ghost_obj);
778                 ttm_bo_unref(&ghost_obj);
779
780         } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
781
782                 /**
783                  * BO doesn't have a TTM we need to bind/unbind. Just remember
784                  * this eviction and free up the allocation
785                  */
786
787                 spin_lock(&from->move_lock);
788                 if (!from->move || dma_fence_is_later(fence, from->move)) {
789                         dma_fence_put(from->move);
790                         from->move = dma_fence_get(fence);
791                 }
792                 spin_unlock(&from->move_lock);
793
794                 ttm_bo_free_old_node(bo);
795
796                 dma_fence_put(bo->moving);
797                 bo->moving = dma_fence_get(fence);
798
799         } else {
800                 /**
801                  * Last resort, wait for the move to be completed.
802                  *
803                  * Should never happen in pratice.
804                  */
805
806                 ret = ttm_bo_wait(bo, false, false);
807                 if (ret)
808                         return ret;
809
810                 if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
811                         ttm_tt_destroy(bo->ttm);
812                         bo->ttm = NULL;
813                 }
814                 ttm_bo_free_old_node(bo);
815         }
816
817         *old_mem = *new_mem;
818         new_mem->mm_node = NULL;
819
820         return 0;
821 }
822 EXPORT_SYMBOL(ttm_bo_pipeline_move);
823
824 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
825 {
826         struct ttm_buffer_object *ghost;
827         int ret;
828
829         ret = ttm_buffer_object_transfer(bo, &ghost);
830         if (ret)
831                 return ret;
832
833         ret = reservation_object_copy_fences(ghost->resv, bo->resv);
834         /* Last resort, wait for the BO to be idle when we are OOM */
835         if (ret)
836                 ttm_bo_wait(bo, false, false);
837
838         memset(&bo->mem, 0, sizeof(bo->mem));
839         bo->mem.mem_type = TTM_PL_SYSTEM;
840         bo->ttm = NULL;
841
842         ttm_bo_unreserve(ghost);
843         ttm_bo_unref(&ghost);
844
845         return 0;
846 }
This page took 0.084937 seconds and 4 git commands to generate.