1 /**************************************************************************
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include <drm/drm_vma_manager.h>
35 #include <linux/highmem.h>
36 #include <linux/wait.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
39 #include <linux/module.h>
40 #include <linux/reservation.h>
42 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
44 ttm_bo_mem_put(bo, &bo->mem);
47 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
48 struct ttm_operation_ctx *ctx,
49 struct ttm_mem_reg *new_mem)
51 struct ttm_tt *ttm = bo->ttm;
52 struct ttm_mem_reg *old_mem = &bo->mem;
55 if (old_mem->mem_type != TTM_PL_SYSTEM) {
56 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
58 if (unlikely(ret != 0)) {
59 if (ret != -ERESTARTSYS)
60 pr_err("Failed to expire sync object before unbinding TTM\n");
65 ttm_bo_free_old_node(bo);
66 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
68 old_mem->mem_type = TTM_PL_SYSTEM;
71 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
72 if (unlikely(ret != 0))
75 if (new_mem->mem_type != TTM_PL_SYSTEM) {
76 ret = ttm_tt_bind(ttm, new_mem, ctx);
77 if (unlikely(ret != 0))
82 new_mem->mm_node = NULL;
86 EXPORT_SYMBOL(ttm_bo_move_ttm);
88 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
90 if (likely(man->io_reserve_fastpath))
94 return mutex_lock_interruptible(&man->io_reserve_mutex);
96 mutex_lock(&man->io_reserve_mutex);
99 EXPORT_SYMBOL(ttm_mem_io_lock);
101 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
103 if (likely(man->io_reserve_fastpath))
106 mutex_unlock(&man->io_reserve_mutex);
108 EXPORT_SYMBOL(ttm_mem_io_unlock);
110 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
112 struct ttm_buffer_object *bo;
114 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
117 bo = list_first_entry(&man->io_reserve_lru,
118 struct ttm_buffer_object,
120 list_del_init(&bo->io_reserve_lru);
121 ttm_bo_unmap_virtual_locked(bo);
127 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
128 struct ttm_mem_reg *mem)
130 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
133 if (!bdev->driver->io_mem_reserve)
135 if (likely(man->io_reserve_fastpath))
136 return bdev->driver->io_mem_reserve(bdev, mem);
138 if (bdev->driver->io_mem_reserve &&
139 mem->bus.io_reserved_count++ == 0) {
141 ret = bdev->driver->io_mem_reserve(bdev, mem);
142 if (ret == -EAGAIN) {
143 ret = ttm_mem_io_evict(man);
150 EXPORT_SYMBOL(ttm_mem_io_reserve);
152 void ttm_mem_io_free(struct ttm_bo_device *bdev,
153 struct ttm_mem_reg *mem)
155 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
157 if (likely(man->io_reserve_fastpath))
160 if (bdev->driver->io_mem_reserve &&
161 --mem->bus.io_reserved_count == 0 &&
162 bdev->driver->io_mem_free)
163 bdev->driver->io_mem_free(bdev, mem);
166 EXPORT_SYMBOL(ttm_mem_io_free);
168 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
170 struct ttm_mem_reg *mem = &bo->mem;
173 if (!mem->bus.io_reserved_vm) {
174 struct ttm_mem_type_manager *man =
175 &bo->bdev->man[mem->mem_type];
177 ret = ttm_mem_io_reserve(bo->bdev, mem);
178 if (unlikely(ret != 0))
180 mem->bus.io_reserved_vm = true;
181 if (man->use_io_reserve_lru)
182 list_add_tail(&bo->io_reserve_lru,
183 &man->io_reserve_lru);
188 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
190 struct ttm_mem_reg *mem = &bo->mem;
192 if (mem->bus.io_reserved_vm) {
193 mem->bus.io_reserved_vm = false;
194 list_del_init(&bo->io_reserve_lru);
195 ttm_mem_io_free(bo->bdev, mem);
199 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
202 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
207 (void) ttm_mem_io_lock(man, false);
208 ret = ttm_mem_io_reserve(bdev, mem);
209 ttm_mem_io_unlock(man);
210 if (ret || !mem->bus.is_iomem)
214 addr = mem->bus.addr;
216 if (mem->placement & TTM_PL_FLAG_WC)
217 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
219 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
221 (void) ttm_mem_io_lock(man, false);
222 ttm_mem_io_free(bdev, mem);
223 ttm_mem_io_unlock(man);
231 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
234 struct ttm_mem_type_manager *man;
236 man = &bdev->man[mem->mem_type];
238 if (virtual && mem->bus.addr == NULL)
240 (void) ttm_mem_io_lock(man, false);
241 ttm_mem_io_free(bdev, mem);
242 ttm_mem_io_unlock(man);
245 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
248 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
250 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
253 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
254 iowrite32(ioread32(srcP++), dstP++);
259 #define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
260 #define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
262 #define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0, __prot)
263 #define __ttm_kunmap_atomic(__addr) vunmap(__addr)
268 * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
269 * specified page protection.
271 * @page: The page to map.
272 * @prot: The page protection.
274 * This function maps a TTM page using the kmap_atomic api if available,
275 * otherwise falls back to vmap. The user must make sure that the
276 * specified page does not have an aliased mapping with a different caching
277 * policy unless the architecture explicitly allows it. Also mapping and
278 * unmapping using this api must be correctly nested. Unmapping should
279 * occur in the reverse order of mapping.
281 void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
283 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
284 return kmap_atomic(page);
286 return __ttm_kmap_atomic_prot(page, prot);
288 EXPORT_SYMBOL(ttm_kmap_atomic_prot);
291 * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
292 * ttm_kmap_atomic_prot.
294 * @addr: The virtual address from the map.
295 * @prot: The page protection.
297 void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
299 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
302 __ttm_kunmap_atomic(addr);
304 EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
306 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
310 struct page *d = ttm->pages[page];
316 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
317 dst = ttm_kmap_atomic_prot(d, prot);
321 memcpy_fromio(dst, src, PAGE_SIZE);
323 ttm_kunmap_atomic_prot(dst, prot);
328 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
332 struct page *s = ttm->pages[page];
338 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
339 src = ttm_kmap_atomic_prot(s, prot);
343 memcpy_toio(dst, src, PAGE_SIZE);
345 ttm_kunmap_atomic_prot(src, prot);
350 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
351 struct ttm_operation_ctx *ctx,
352 struct ttm_mem_reg *new_mem)
354 struct ttm_bo_device *bdev = bo->bdev;
355 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
356 struct ttm_tt *ttm = bo->ttm;
357 struct ttm_mem_reg *old_mem = &bo->mem;
358 struct ttm_mem_reg old_copy = *old_mem;
364 unsigned long add = 0;
367 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
371 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
374 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
379 * Single TTM move. NOP.
381 if (old_iomap == NULL && new_iomap == NULL)
385 * Don't move nonexistent data. Clear destination instead.
387 if (old_iomap == NULL &&
388 (ttm == NULL || (ttm->state == tt_unpopulated &&
389 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
390 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
395 * TTM might be null for moves within the same region.
398 ret = ttm_tt_populate(ttm, ctx);
406 if ((old_mem->mem_type == new_mem->mem_type) &&
407 (new_mem->start < old_mem->start + old_mem->size)) {
409 add = new_mem->num_pages - 1;
412 for (i = 0; i < new_mem->num_pages; ++i) {
413 page = i * dir + add;
414 if (old_iomap == NULL) {
415 pgprot_t prot = ttm_io_prot(old_mem->placement,
417 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
419 } else if (new_iomap == NULL) {
420 pgprot_t prot = ttm_io_prot(new_mem->placement,
422 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
425 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
434 new_mem->mm_node = NULL;
436 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
442 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
444 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
447 * On error, keep the mm node!
450 ttm_bo_mem_put(bo, &old_copy);
453 EXPORT_SYMBOL(ttm_bo_move_memcpy);
455 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
461 * ttm_buffer_object_transfer
463 * @bo: A pointer to a struct ttm_buffer_object.
464 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
465 * holding the data of @bo with the old placement.
467 * This is a utility function that may be called after an accelerated move
468 * has been scheduled. A new buffer object is created as a placeholder for
469 * the old data while it's being copied. When that buffer object is idle,
470 * it can be destroyed, releasing the space of the old placement.
475 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
476 struct ttm_buffer_object **new_obj)
478 struct ttm_buffer_object *fbo;
481 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
488 * Fix up members that we shouldn't copy directly:
489 * TODO: Explicit member copy would probably be better here.
492 atomic_inc(&bo->bdev->glob->bo_count);
493 INIT_LIST_HEAD(&fbo->ddestroy);
494 INIT_LIST_HEAD(&fbo->lru);
495 INIT_LIST_HEAD(&fbo->swap);
496 INIT_LIST_HEAD(&fbo->io_reserve_lru);
497 mutex_init(&fbo->wu_mutex);
499 drm_vma_node_reset(&fbo->vma_node);
500 atomic_set(&fbo->cpu_writers, 0);
502 kref_init(&fbo->list_kref);
503 kref_init(&fbo->kref);
504 fbo->destroy = &ttm_transfered_destroy;
506 fbo->resv = &fbo->ttm_resv;
507 reservation_object_init(fbo->resv);
508 ret = reservation_object_trylock(fbo->resv);
515 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
517 /* Cached mappings need no adjustment */
518 if (caching_flags & TTM_PL_FLAG_CACHED)
521 #if defined(__i386__) || defined(__x86_64__)
522 if (caching_flags & TTM_PL_FLAG_WC)
523 tmp = pgprot_writecombine(tmp);
524 else if (boot_cpu_data.x86 > 3)
525 tmp = pgprot_noncached(tmp);
527 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
529 if (caching_flags & TTM_PL_FLAG_WC)
530 tmp = pgprot_writecombine(tmp);
532 tmp = pgprot_noncached(tmp);
534 #if defined(__sparc__) || defined(__mips__)
535 tmp = pgprot_noncached(tmp);
539 EXPORT_SYMBOL(ttm_io_prot);
541 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
542 unsigned long offset,
544 struct ttm_bo_kmap_obj *map)
546 struct ttm_mem_reg *mem = &bo->mem;
548 if (bo->mem.bus.addr) {
549 map->bo_kmap_type = ttm_bo_map_premapped;
550 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
552 map->bo_kmap_type = ttm_bo_map_iomap;
553 if (mem->placement & TTM_PL_FLAG_WC)
554 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
557 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
560 return (!map->virtual) ? -ENOMEM : 0;
563 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
564 unsigned long start_page,
565 unsigned long num_pages,
566 struct ttm_bo_kmap_obj *map)
568 struct ttm_mem_reg *mem = &bo->mem;
569 struct ttm_operation_ctx ctx = {
570 .interruptible = false,
573 struct ttm_tt *ttm = bo->ttm;
579 ret = ttm_tt_populate(ttm, &ctx);
583 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
585 * We're mapping a single page, and the desired
586 * page protection is consistent with the bo.
589 map->bo_kmap_type = ttm_bo_map_kmap;
590 map->page = ttm->pages[start_page];
591 map->virtual = kmap(map->page);
594 * We need to use vmap to get the desired page protection
595 * or to make the buffer object look contiguous.
597 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
598 map->bo_kmap_type = ttm_bo_map_vmap;
599 map->virtual = vmap(ttm->pages + start_page, num_pages,
602 return (!map->virtual) ? -ENOMEM : 0;
605 int ttm_bo_kmap(struct ttm_buffer_object *bo,
606 unsigned long start_page, unsigned long num_pages,
607 struct ttm_bo_kmap_obj *map)
609 struct ttm_mem_type_manager *man =
610 &bo->bdev->man[bo->mem.mem_type];
611 unsigned long offset, size;
616 if (num_pages > bo->num_pages)
618 if (start_page > bo->num_pages)
621 if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
624 (void) ttm_mem_io_lock(man, false);
625 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
626 ttm_mem_io_unlock(man);
629 if (!bo->mem.bus.is_iomem) {
630 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
632 offset = start_page << PAGE_SHIFT;
633 size = num_pages << PAGE_SHIFT;
634 return ttm_bo_ioremap(bo, offset, size, map);
637 EXPORT_SYMBOL(ttm_bo_kmap);
639 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
641 struct ttm_buffer_object *bo = map->bo;
642 struct ttm_mem_type_manager *man =
643 &bo->bdev->man[bo->mem.mem_type];
647 switch (map->bo_kmap_type) {
648 case ttm_bo_map_iomap:
649 iounmap(map->virtual);
651 case ttm_bo_map_vmap:
652 vunmap(map->virtual);
654 case ttm_bo_map_kmap:
657 case ttm_bo_map_premapped:
662 (void) ttm_mem_io_lock(man, false);
663 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
664 ttm_mem_io_unlock(man);
668 EXPORT_SYMBOL(ttm_bo_kunmap);
670 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
671 struct dma_fence *fence,
673 struct ttm_mem_reg *new_mem)
675 struct ttm_bo_device *bdev = bo->bdev;
676 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
677 struct ttm_mem_reg *old_mem = &bo->mem;
679 struct ttm_buffer_object *ghost_obj;
681 reservation_object_add_excl_fence(bo->resv, fence);
683 ret = ttm_bo_wait(bo, false, false);
687 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
688 ttm_tt_destroy(bo->ttm);
691 ttm_bo_free_old_node(bo);
694 * This should help pipeline ordinary buffer moves.
696 * Hang old buffer memory on a new buffer object,
697 * and leave it to be released when the GPU
698 * operation has completed.
701 dma_fence_put(bo->moving);
702 bo->moving = dma_fence_get(fence);
704 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
708 reservation_object_add_excl_fence(ghost_obj->resv, fence);
711 * If we're not moving to fixed memory, the TTM object
712 * needs to stay alive. Otherwhise hang it on the ghost
713 * bo to be unbound and destroyed.
716 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
717 ghost_obj->ttm = NULL;
721 ttm_bo_unreserve(ghost_obj);
722 ttm_bo_unref(&ghost_obj);
726 new_mem->mm_node = NULL;
730 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
732 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
733 struct dma_fence *fence, bool evict,
734 struct ttm_mem_reg *new_mem)
736 struct ttm_bo_device *bdev = bo->bdev;
737 struct ttm_mem_reg *old_mem = &bo->mem;
739 struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
740 struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
744 reservation_object_add_excl_fence(bo->resv, fence);
747 struct ttm_buffer_object *ghost_obj;
750 * This should help pipeline ordinary buffer moves.
752 * Hang old buffer memory on a new buffer object,
753 * and leave it to be released when the GPU
754 * operation has completed.
757 dma_fence_put(bo->moving);
758 bo->moving = dma_fence_get(fence);
760 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
764 reservation_object_add_excl_fence(ghost_obj->resv, fence);
767 * If we're not moving to fixed memory, the TTM object
768 * needs to stay alive. Otherwhise hang it on the ghost
769 * bo to be unbound and destroyed.
772 if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
773 ghost_obj->ttm = NULL;
777 ttm_bo_unreserve(ghost_obj);
778 ttm_bo_unref(&ghost_obj);
780 } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
783 * BO doesn't have a TTM we need to bind/unbind. Just remember
784 * this eviction and free up the allocation
787 spin_lock(&from->move_lock);
788 if (!from->move || dma_fence_is_later(fence, from->move)) {
789 dma_fence_put(from->move);
790 from->move = dma_fence_get(fence);
792 spin_unlock(&from->move_lock);
794 ttm_bo_free_old_node(bo);
796 dma_fence_put(bo->moving);
797 bo->moving = dma_fence_get(fence);
801 * Last resort, wait for the move to be completed.
803 * Should never happen in pratice.
806 ret = ttm_bo_wait(bo, false, false);
810 if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
811 ttm_tt_destroy(bo->ttm);
814 ttm_bo_free_old_node(bo);
818 new_mem->mm_node = NULL;
822 EXPORT_SYMBOL(ttm_bo_pipeline_move);
824 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
826 struct ttm_buffer_object *ghost;
829 ret = ttm_buffer_object_transfer(bo, &ghost);
833 ret = reservation_object_copy_fences(ghost->resv, bo->resv);
834 /* Last resort, wait for the BO to be idle when we are OOM */
836 ttm_bo_wait(bo, false, false);
838 memset(&bo->mem, 0, sizeof(bo->mem));
839 bo->mem.mem_type = TTM_PL_SYSTEM;
842 ttm_bo_unreserve(ghost);
843 ttm_bo_unref(&ghost);