1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 #define pr_fmt(fmt) "[TTM] " fmt
34 #include <drm/ttm/ttm_bo.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <drm/ttm/ttm_tt.h>
38 #include <linux/jiffies.h>
39 #include <linux/slab.h>
40 #include <linux/sched.h>
42 #include <linux/file.h>
43 #include <linux/module.h>
44 #include <linux/atomic.h>
45 #include <linux/cgroup_dmem.h>
46 #include <linux/dma-resv.h>
48 #include "ttm_module.h"
50 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
51 struct ttm_placement *placement)
53 struct drm_printer p = drm_dbg_printer(NULL, DRM_UT_CORE, TTM_PFX);
54 struct ttm_resource_manager *man;
57 for (i = 0; i < placement->num_placement; i++) {
58 mem_type = placement->placement[i].mem_type;
59 drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
60 i, placement->placement[i].flags, mem_type);
61 man = ttm_manager_type(bo->bdev, mem_type);
62 ttm_resource_manager_debug(man, &p);
67 * ttm_bo_move_to_lru_tail
69 * @bo: The buffer object.
71 * Move this BO to the tail of all lru lists used to lookup and reserve an
72 * object. This function must be called with struct ttm_global::lru_lock
73 * held, and is used to make a BO less likely to be considered for eviction.
75 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
77 dma_resv_assert_held(bo->base.resv);
80 ttm_resource_move_to_lru_tail(bo->resource);
82 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
85 * ttm_bo_set_bulk_move - update BOs bulk move object
87 * @bo: The buffer object.
88 * @bulk: bulk move structure
90 * Update the BOs bulk move object, making sure that resources are added/removed
91 * as well. A bulk move allows to move many resource on the LRU at once,
92 * resulting in much less overhead of maintaining the LRU.
93 * The only requirement is that the resources stay together on the LRU and are
94 * never separated. This is enforces by setting the bulk_move structure on a BO.
95 * ttm_lru_bulk_move_tail() should be used to move all resources to the tail of
98 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
99 struct ttm_lru_bulk_move *bulk)
101 dma_resv_assert_held(bo->base.resv);
103 if (bo->bulk_move == bulk)
106 spin_lock(&bo->bdev->lru_lock);
108 ttm_resource_del_bulk_move(bo->resource, bo);
109 bo->bulk_move = bulk;
111 ttm_resource_add_bulk_move(bo->resource, bo);
112 spin_unlock(&bo->bdev->lru_lock);
114 EXPORT_SYMBOL(ttm_bo_set_bulk_move);
116 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
117 struct ttm_resource *mem, bool evict,
118 struct ttm_operation_ctx *ctx,
119 struct ttm_place *hop)
121 struct ttm_device *bdev = bo->bdev;
122 bool old_use_tt, new_use_tt;
125 old_use_tt = !bo->resource || ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
126 new_use_tt = ttm_manager_type(bdev, mem->mem_type)->use_tt;
128 ttm_bo_unmap_virtual(bo);
131 * Create and bind a ttm if required.
135 /* Zero init the new TTM structure if the old location should
136 * have used one as well.
138 ret = ttm_tt_create(bo, old_use_tt);
142 if (mem->mem_type != TTM_PL_SYSTEM) {
143 ret = ttm_bo_populate(bo, ctx);
149 ret = dma_resv_reserve_fences(bo->base.resv, 1);
153 ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
155 if (ret == -EMULTIHOP)
160 ctx->bytes_moved += bo->base.size;
165 ttm_bo_tt_destroy(bo);
172 * Will release GPU memory type usage on destruction.
173 * This is the place to put in driver specific hooks to release
174 * driver private resources.
175 * Will release the bo::reserved lock.
178 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
180 if (bo->bdev->funcs->delete_mem_notify)
181 bo->bdev->funcs->delete_mem_notify(bo);
183 ttm_bo_tt_destroy(bo);
184 ttm_resource_free(bo, &bo->resource);
187 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
191 if (bo->base.resv == &bo->base._resv)
194 BUG_ON(!dma_resv_trylock(&bo->base._resv));
196 r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
197 dma_resv_unlock(&bo->base._resv);
201 if (bo->type != ttm_bo_type_sg) {
202 /* This works because the BO is about to be destroyed and nobody
203 * reference it any more. The only tricky case is the trylock on
204 * the resv object while holding the lru_lock.
206 spin_lock(&bo->bdev->lru_lock);
207 bo->base.resv = &bo->base._resv;
208 spin_unlock(&bo->bdev->lru_lock);
214 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
216 struct dma_resv *resv = &bo->base._resv;
217 struct dma_resv_iter cursor;
218 struct dma_fence *fence;
220 dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
221 dma_resv_for_each_fence_unlocked(&cursor, fence) {
222 if (!fence->ops->signaled)
223 dma_fence_enable_sw_signaling(fence);
225 dma_resv_iter_end(&cursor);
229 * Block for the dma_resv object to become idle, lock the buffer and clean up
230 * the resource and tt object.
232 static void ttm_bo_delayed_delete(struct work_struct *work)
234 struct ttm_buffer_object *bo;
236 bo = container_of(work, typeof(*bo), delayed_delete);
238 dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
239 MAX_SCHEDULE_TIMEOUT);
240 dma_resv_lock(bo->base.resv, NULL);
241 ttm_bo_cleanup_memtype_use(bo);
242 dma_resv_unlock(bo->base.resv);
246 static void ttm_bo_release(struct kref *kref)
248 struct ttm_buffer_object *bo =
249 container_of(kref, struct ttm_buffer_object, kref);
250 struct ttm_device *bdev = bo->bdev;
253 WARN_ON_ONCE(bo->pin_count);
254 WARN_ON_ONCE(bo->bulk_move);
257 ret = ttm_bo_individualize_resv(bo);
259 /* Last resort, if we fail to allocate memory for the
260 * fences block for the BO to become idle
262 dma_resv_wait_timeout(bo->base.resv,
263 DMA_RESV_USAGE_BOOKKEEP, false,
267 if (bo->bdev->funcs->release_notify)
268 bo->bdev->funcs->release_notify(bo);
270 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
271 ttm_mem_io_free(bdev, bo->resource);
273 if (!dma_resv_test_signaled(bo->base.resv,
274 DMA_RESV_USAGE_BOOKKEEP) ||
275 (want_init_on_free() && (bo->ttm != NULL)) ||
276 bo->type == ttm_bo_type_sg ||
277 !dma_resv_trylock(bo->base.resv)) {
278 /* The BO is not idle, resurrect it for delayed destroy */
279 ttm_bo_flush_all_fences(bo);
282 spin_lock(&bo->bdev->lru_lock);
285 * Make pinned bos immediately available to
286 * shrinkers, now that they are queued for
289 * FIXME: QXL is triggering this. Can be removed when the
294 ttm_resource_move_to_lru_tail(bo->resource);
297 kref_init(&bo->kref);
298 spin_unlock(&bo->bdev->lru_lock);
300 INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete);
302 /* Schedule the worker on the closest NUMA node. This
303 * improves performance since system memory might be
304 * cleared on free and that is best done on a CPU core
307 queue_work_node(bdev->pool.nid, bdev->wq, &bo->delayed_delete);
311 ttm_bo_cleanup_memtype_use(bo);
312 dma_resv_unlock(bo->base.resv);
315 atomic_dec(&ttm_glob.bo_count);
322 * @bo: The buffer object.
324 * Unreference a buffer object.
326 void ttm_bo_put(struct ttm_buffer_object *bo)
328 kref_put(&bo->kref, ttm_bo_release);
330 EXPORT_SYMBOL(ttm_bo_put);
332 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
333 struct ttm_operation_ctx *ctx,
334 struct ttm_place *hop)
336 struct ttm_placement hop_placement;
337 struct ttm_resource *hop_mem;
340 hop_placement.num_placement = 1;
341 hop_placement.placement = hop;
343 /* find space in the bounce domain */
344 ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
347 /* move to the bounce domain */
348 ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
350 ttm_resource_free(bo, &hop_mem);
356 static int ttm_bo_evict(struct ttm_buffer_object *bo,
357 struct ttm_operation_ctx *ctx)
359 struct ttm_device *bdev = bo->bdev;
360 struct ttm_resource *evict_mem;
361 struct ttm_placement placement;
362 struct ttm_place hop;
365 memset(&hop, 0, sizeof(hop));
367 dma_resv_assert_held(bo->base.resv);
369 placement.num_placement = 0;
370 bdev->funcs->evict_flags(bo, &placement);
372 if (!placement.num_placement) {
373 ret = ttm_bo_wait_ctx(bo, ctx);
378 * Since we've already synced, this frees backing store
381 return ttm_bo_pipeline_gutting(bo);
384 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
386 if (ret != -ERESTARTSYS) {
387 pr_err("Failed to find memory space for buffer 0x%p eviction\n",
389 ttm_bo_mem_space_debug(bo, &placement);
395 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
396 if (ret != -EMULTIHOP)
399 ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop);
403 ttm_resource_free(bo, &evict_mem);
404 if (ret != -ERESTARTSYS && ret != -EINTR)
405 pr_err("Buffer eviction failed\n");
412 * ttm_bo_eviction_valuable
414 * @bo: The buffer object to evict
415 * @place: the placement we need to make room for
417 * Check if it is valuable to evict the BO to make room for the given placement.
419 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
420 const struct ttm_place *place)
422 struct ttm_resource *res = bo->resource;
423 struct ttm_device *bdev = bo->bdev;
425 dma_resv_assert_held(bo->base.resv);
426 if (bo->resource->mem_type == TTM_PL_SYSTEM)
429 /* Don't evict this BO if it's outside of the
430 * requested placement range
432 return ttm_resource_intersects(bdev, res, place, bo->base.size);
434 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
437 * ttm_bo_evict_first() - Evict the first bo on the manager's LRU list.
438 * @bdev: The ttm device.
439 * @man: The manager whose bo to evict.
440 * @ctx: The TTM operation ctx governing the eviction.
442 * Return: 0 if successful or the resource disappeared. Negative error code on error.
444 int ttm_bo_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man,
445 struct ttm_operation_ctx *ctx)
447 struct ttm_resource_cursor cursor;
448 struct ttm_buffer_object *bo;
449 struct ttm_resource *res;
450 unsigned int mem_type;
453 spin_lock(&bdev->lru_lock);
454 res = ttm_resource_manager_first(man, &cursor);
455 ttm_resource_cursor_fini(&cursor);
461 if (!ttm_bo_get_unless_zero(bo))
463 mem_type = res->mem_type;
464 spin_unlock(&bdev->lru_lock);
465 ret = ttm_bo_reserve(bo, ctx->interruptible, ctx->no_wait_gpu, NULL);
468 if (!bo->resource || bo->resource->mem_type != mem_type)
472 ret = ttm_bo_wait_ctx(bo, ctx);
474 ttm_bo_cleanup_memtype_use(bo);
476 ret = ttm_bo_evict(bo, ctx);
479 dma_resv_unlock(bo->base.resv);
485 spin_unlock(&bdev->lru_lock);
490 * struct ttm_bo_evict_walk - Parameters for the evict walk.
492 struct ttm_bo_evict_walk {
493 /** @walk: The walk base parameters. */
494 struct ttm_lru_walk walk;
495 /** @place: The place passed to the resource allocation. */
496 const struct ttm_place *place;
497 /** @evictor: The buffer object we're trying to make room for. */
498 struct ttm_buffer_object *evictor;
499 /** @res: The allocated resource if any. */
500 struct ttm_resource **res;
501 /** @evicted: Number of successful evictions. */
502 unsigned long evicted;
504 /** @limit_pool: Which pool limit we should test against */
505 struct dmem_cgroup_pool_state *limit_pool;
506 /** @try_low: Whether we should attempt to evict BO's with low watermark threshold */
508 /** @hit_low: If we cannot evict a bo when @try_low is false (first pass) */
512 static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
514 struct ttm_bo_evict_walk *evict_walk =
515 container_of(walk, typeof(*evict_walk), walk);
518 if (!dmem_cgroup_state_evict_valuable(evict_walk->limit_pool, bo->resource->css,
519 evict_walk->try_low, &evict_walk->hit_low))
522 if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, evict_walk->place))
526 lret = ttm_bo_wait_ctx(bo, walk->ctx);
528 ttm_bo_cleanup_memtype_use(bo);
530 lret = ttm_bo_evict(bo, walk->ctx);
536 evict_walk->evicted++;
538 lret = ttm_resource_alloc(evict_walk->evictor, evict_walk->place,
539 evict_walk->res, NULL);
543 /* Errors that should terminate the walk. */
550 static const struct ttm_lru_walk_ops ttm_evict_walk_ops = {
551 .process_bo = ttm_bo_evict_cb,
554 static int ttm_bo_evict_alloc(struct ttm_device *bdev,
555 struct ttm_resource_manager *man,
556 const struct ttm_place *place,
557 struct ttm_buffer_object *evictor,
558 struct ttm_operation_ctx *ctx,
559 struct ww_acquire_ctx *ticket,
560 struct ttm_resource **res,
561 struct dmem_cgroup_pool_state *limit_pool)
563 struct ttm_bo_evict_walk evict_walk = {
565 .ops = &ttm_evict_walk_ops,
572 .limit_pool = limit_pool,
576 evict_walk.walk.trylock_only = true;
577 lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
579 /* One more attempt if we hit low limit? */
580 if (!lret && evict_walk.hit_low) {
581 evict_walk.try_low = true;
582 lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
587 /* Reset low limit */
588 evict_walk.try_low = evict_walk.hit_low = false;
589 /* If ticket-locking, repeat while making progress. */
590 evict_walk.walk.trylock_only = false;
594 /* The walk may clear the evict_walk.walk.ticket field */
595 evict_walk.walk.ticket = ticket;
596 evict_walk.evicted = 0;
597 lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
598 } while (!lret && evict_walk.evicted);
600 /* We hit the low limit? Try once more */
601 if (!lret && evict_walk.hit_low && !evict_walk.try_low) {
602 evict_walk.try_low = true;
614 * ttm_bo_pin - Pin the buffer object.
615 * @bo: The buffer object to pin
617 * Make sure the buffer is not evicted any more during memory pressure.
618 * @bo must be unpinned again by calling ttm_bo_unpin().
620 void ttm_bo_pin(struct ttm_buffer_object *bo)
622 dma_resv_assert_held(bo->base.resv);
623 WARN_ON_ONCE(!kref_read(&bo->kref));
624 spin_lock(&bo->bdev->lru_lock);
626 ttm_resource_del_bulk_move(bo->resource, bo);
627 if (!bo->pin_count++ && bo->resource)
628 ttm_resource_move_to_lru_tail(bo->resource);
629 spin_unlock(&bo->bdev->lru_lock);
631 EXPORT_SYMBOL(ttm_bo_pin);
634 * ttm_bo_unpin - Unpin the buffer object.
635 * @bo: The buffer object to unpin
637 * Allows the buffer object to be evicted again during memory pressure.
639 void ttm_bo_unpin(struct ttm_buffer_object *bo)
641 dma_resv_assert_held(bo->base.resv);
642 WARN_ON_ONCE(!kref_read(&bo->kref));
643 if (WARN_ON_ONCE(!bo->pin_count))
646 spin_lock(&bo->bdev->lru_lock);
647 if (!--bo->pin_count && bo->resource) {
648 ttm_resource_add_bulk_move(bo->resource, bo);
649 ttm_resource_move_to_lru_tail(bo->resource);
651 spin_unlock(&bo->bdev->lru_lock);
653 EXPORT_SYMBOL(ttm_bo_unpin);
656 * Add the last move fence to the BO as kernel dependency and reserve a new
659 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
660 struct ttm_resource_manager *man,
663 struct dma_fence *fence;
666 spin_lock(&man->move_lock);
667 fence = dma_fence_get(man->move);
668 spin_unlock(&man->move_lock);
674 ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
675 dma_fence_put(fence);
679 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
681 ret = dma_resv_reserve_fences(bo->base.resv, 1);
682 dma_fence_put(fence);
687 * ttm_bo_alloc_resource - Allocate backing store for a BO
689 * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for
690 * @placement: Proposed new placement for the buffer object
691 * @ctx: if and how to sleep, lock buffers and alloc memory
692 * @force_space: If we should evict buffers to force space
693 * @res: The resulting struct ttm_resource.
695 * Allocates a resource for the buffer object pointed to by @bo, using the
696 * placement flags in @placement, potentially evicting other buffer objects when
697 * @force_space is true.
698 * This function may sleep while waiting for resources to become available.
700 * -EBUSY: No space available (only if no_wait == true).
701 * -ENOSPC: Could not allocate space for the buffer object, either due to
702 * fragmentation or concurrent allocators.
703 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
705 static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
706 struct ttm_placement *placement,
707 struct ttm_operation_ctx *ctx,
709 struct ttm_resource **res)
711 struct ttm_device *bdev = bo->bdev;
712 struct ww_acquire_ctx *ticket;
715 ticket = dma_resv_locking_ctx(bo->base.resv);
716 ret = dma_resv_reserve_fences(bo->base.resv, 1);
720 for (i = 0; i < placement->num_placement; ++i) {
721 const struct ttm_place *place = &placement->placement[i];
722 struct dmem_cgroup_pool_state *limit_pool = NULL;
723 struct ttm_resource_manager *man;
726 man = ttm_manager_type(bdev, place->mem_type);
727 if (!man || !ttm_resource_manager_used(man))
730 if (place->flags & (force_space ? TTM_PL_FLAG_DESIRED :
731 TTM_PL_FLAG_FALLBACK))
734 may_evict = (force_space && place->mem_type != TTM_PL_SYSTEM);
735 ret = ttm_resource_alloc(bo, place, res, force_space ? &limit_pool : NULL);
737 if (ret != -ENOSPC && ret != -EAGAIN) {
738 dmem_cgroup_pool_state_put(limit_pool);
742 dmem_cgroup_pool_state_put(limit_pool);
746 ret = ttm_bo_evict_alloc(bdev, man, place, bo, ctx,
747 ticket, res, limit_pool);
748 dmem_cgroup_pool_state_put(limit_pool);
755 ret = ttm_bo_add_move_fence(bo, man, ctx->no_wait_gpu);
757 ttm_resource_free(bo, res);
770 * ttm_bo_mem_space - Wrapper around ttm_bo_alloc_resource
772 * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for
773 * @placement: Proposed new placement for the buffer object
774 * @res: The resulting struct ttm_resource.
775 * @ctx: if and how to sleep, lock buffers and alloc memory
777 * Tries both idle allocation and forcefully eviction of buffers. See
778 * ttm_bo_alloc_resource for details.
780 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
781 struct ttm_placement *placement,
782 struct ttm_resource **res,
783 struct ttm_operation_ctx *ctx)
785 bool force_space = false;
789 ret = ttm_bo_alloc_resource(bo, placement, ctx,
791 force_space = !force_space;
792 } while (ret == -ENOSPC && force_space);
796 EXPORT_SYMBOL(ttm_bo_mem_space);
801 * @bo: The buffer object.
802 * @placement: Proposed placement for the buffer object.
803 * @ctx: validation parameters.
805 * Changes placement and caching policy of the buffer object
806 * according proposed placement.
808 * -EINVAL on invalid proposed placement.
809 * -ENOMEM on out-of-memory condition.
810 * -EBUSY if no_wait is true and buffer busy.
811 * -ERESTARTSYS if interrupted by a signal.
813 int ttm_bo_validate(struct ttm_buffer_object *bo,
814 struct ttm_placement *placement,
815 struct ttm_operation_ctx *ctx)
817 struct ttm_resource *res;
818 struct ttm_place hop;
822 dma_resv_assert_held(bo->base.resv);
825 * Remove the backing store if no placement is given.
827 if (!placement->num_placement)
828 return ttm_bo_pipeline_gutting(bo);
832 /* Check whether we need to move buffer. */
834 ttm_resource_compatible(bo->resource, placement,
838 /* Moving of pinned BOs is forbidden */
843 * Determine where to move the buffer.
845 * If driver determines move is going to need
846 * an extra step then it will return -EMULTIHOP
847 * and the buffer will be moved to the temporary
848 * stop and the driver will be called to make
851 ret = ttm_bo_alloc_resource(bo, placement, ctx, force_space,
853 force_space = !force_space;
860 ret = ttm_bo_handle_move_mem(bo, res, false, ctx, &hop);
861 if (ret == -EMULTIHOP) {
862 ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop);
863 /* try and move to final place now. */
868 ttm_resource_free(bo, &res);
872 } while (ret && force_space);
874 /* For backward compatibility with userspace */
879 * We might need to add a TTM.
881 if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) {
882 ret = ttm_tt_create(bo, true);
888 EXPORT_SYMBOL(ttm_bo_validate);
891 * ttm_bo_init_reserved
893 * @bdev: Pointer to a ttm_device struct.
894 * @bo: Pointer to a ttm_buffer_object to be initialized.
895 * @type: Requested type of buffer object.
896 * @placement: Initial placement for buffer object.
897 * @alignment: Data alignment in pages.
898 * @ctx: TTM operation context for memory allocation.
899 * @sg: Scatter-gather table.
900 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
901 * @destroy: Destroy function. Use NULL for kfree().
903 * This function initializes a pre-allocated struct ttm_buffer_object.
904 * As this object may be part of a larger structure, this function,
905 * together with the @destroy function, enables driver-specific objects
906 * derived from a ttm_buffer_object.
908 * On successful return, the caller owns an object kref to @bo. The kref and
909 * list_kref are usually set to 1, but note that in some situations, other
910 * tasks may already be holding references to @bo as well.
911 * Furthermore, if resv == NULL, the buffer's reservation lock will be held,
912 * and it is the caller's responsibility to call ttm_bo_unreserve.
914 * If a failure occurs, the function will call the @destroy function. Thus,
915 * after a failure, dereferencing @bo is illegal and will likely cause memory
919 * -ENOMEM: Out of memory.
920 * -EINVAL: Invalid placement flags.
921 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
923 int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
924 enum ttm_bo_type type, struct ttm_placement *placement,
925 uint32_t alignment, struct ttm_operation_ctx *ctx,
926 struct sg_table *sg, struct dma_resv *resv,
927 void (*destroy) (struct ttm_buffer_object *))
931 kref_init(&bo->kref);
934 bo->page_alignment = alignment;
935 bo->destroy = destroy;
938 bo->bulk_move = NULL;
940 bo->base.resv = resv;
942 bo->base.resv = &bo->base._resv;
943 atomic_inc(&ttm_glob.bo_count);
946 * For ttm_bo_type_device buffers, allocate
947 * address space from the device.
949 if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) {
950 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
951 PFN_UP(bo->base.size));
956 /* passed reservation objects should already be locked,
957 * since otherwise lockdep will be angered in radeon.
960 WARN_ON(!dma_resv_trylock(bo->base.resv));
962 dma_resv_assert_held(resv);
964 ret = ttm_bo_validate(bo, placement, ctx);
972 dma_resv_unlock(bo->base.resv);
978 EXPORT_SYMBOL(ttm_bo_init_reserved);
981 * ttm_bo_init_validate
983 * @bdev: Pointer to a ttm_device struct.
984 * @bo: Pointer to a ttm_buffer_object to be initialized.
985 * @type: Requested type of buffer object.
986 * @placement: Initial placement for buffer object.
987 * @alignment: Data alignment in pages.
988 * @interruptible: If needing to sleep to wait for GPU resources,
989 * sleep interruptible.
990 * pinned in physical memory. If this behaviour is not desired, this member
991 * holds a pointer to a persistent shmem object. Typically, this would
992 * point to the shmem object backing a GEM object if TTM is used to back a
993 * GEM user interface.
994 * @sg: Scatter-gather table.
995 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
996 * @destroy: Destroy function. Use NULL for kfree().
998 * This function initializes a pre-allocated struct ttm_buffer_object.
999 * As this object may be part of a larger structure, this function,
1000 * together with the @destroy function,
1001 * enables driver-specific objects derived from a ttm_buffer_object.
1003 * On successful return, the caller owns an object kref to @bo. The kref and
1004 * list_kref are usually set to 1, but note that in some situations, other
1005 * tasks may already be holding references to @bo as well.
1007 * If a failure occurs, the function will call the @destroy function, Thus,
1008 * after a failure, dereferencing @bo is illegal and will likely cause memory
1012 * -ENOMEM: Out of memory.
1013 * -EINVAL: Invalid placement flags.
1014 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
1016 int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
1017 enum ttm_bo_type type, struct ttm_placement *placement,
1018 uint32_t alignment, bool interruptible,
1019 struct sg_table *sg, struct dma_resv *resv,
1020 void (*destroy) (struct ttm_buffer_object *))
1022 struct ttm_operation_ctx ctx = { interruptible, false };
1025 ret = ttm_bo_init_reserved(bdev, bo, type, placement, alignment, &ctx,
1031 ttm_bo_unreserve(bo);
1035 EXPORT_SYMBOL(ttm_bo_init_validate);
1038 * buffer object vm functions.
1042 * ttm_bo_unmap_virtual
1044 * @bo: tear down the virtual mappings for this BO
1046 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1048 struct ttm_device *bdev = bo->bdev;
1050 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1051 ttm_mem_io_free(bdev, bo->resource);
1053 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1056 * ttm_bo_wait_ctx - wait for buffer idle.
1058 * @bo: The buffer object.
1059 * @ctx: defines how to wait
1061 * Waits for the buffer to be idle. Used timeout depends on the context.
1062 * Returns -EBUSY if wait timed outt, -ERESTARTSYS if interrupted by a signal or
1065 int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
1069 if (ctx->no_wait_gpu) {
1070 if (dma_resv_test_signaled(bo->base.resv,
1071 DMA_RESV_USAGE_BOOKKEEP))
1077 ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
1078 ctx->interruptible, 15 * HZ);
1079 if (unlikely(ret < 0))
1081 if (unlikely(ret == 0))
1085 EXPORT_SYMBOL(ttm_bo_wait_ctx);
1088 * struct ttm_bo_swapout_walk - Parameters for the swapout walk
1090 struct ttm_bo_swapout_walk {
1091 /** @walk: The walk base parameters. */
1092 struct ttm_lru_walk walk;
1093 /** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */
1096 bool hit_low, evict_low;
1100 ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
1102 struct ttm_place place = {.mem_type = bo->resource->mem_type};
1103 struct ttm_bo_swapout_walk *swapout_walk =
1104 container_of(walk, typeof(*swapout_walk), walk);
1105 struct ttm_operation_ctx *ctx = walk->ctx;
1109 * While the bo may already reside in SYSTEM placement, set
1110 * SYSTEM as new placement to cover also the move further below.
1111 * The driver may use the fact that we're moving from SYSTEM
1112 * as an indication that we're about to swap out.
1114 if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, &place)) {
1119 if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
1120 bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
1121 bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED) {
1127 pgoff_t num_pages = bo->ttm->num_pages;
1129 ret = ttm_bo_wait_ctx(bo, ctx);
1133 ttm_bo_cleanup_memtype_use(bo);
1139 * Move to system cached
1141 if (bo->resource->mem_type != TTM_PL_SYSTEM) {
1142 struct ttm_resource *evict_mem;
1143 struct ttm_place hop;
1145 memset(&hop, 0, sizeof(hop));
1146 place.mem_type = TTM_PL_SYSTEM;
1147 ret = ttm_resource_alloc(bo, &place, &evict_mem, NULL);
1151 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
1153 WARN(ret == -EMULTIHOP,
1154 "Unexpected multihop in swapout - likely driver bug.\n");
1155 ttm_resource_free(bo, &evict_mem);
1161 * Make sure BO is idle.
1163 ret = ttm_bo_wait_ctx(bo, ctx);
1167 ttm_bo_unmap_virtual(bo);
1168 if (bo->bdev->funcs->swap_notify)
1169 bo->bdev->funcs->swap_notify(bo);
1171 if (ttm_tt_is_populated(bo->ttm)) {
1172 spin_lock(&bo->bdev->lru_lock);
1173 ttm_resource_del_bulk_move(bo->resource, bo);
1174 spin_unlock(&bo->bdev->lru_lock);
1176 ret = ttm_tt_swapout(bo->bdev, bo->ttm, swapout_walk->gfp_flags);
1178 spin_lock(&bo->bdev->lru_lock);
1180 ttm_resource_add_bulk_move(bo->resource, bo);
1181 ttm_resource_move_to_lru_tail(bo->resource);
1182 spin_unlock(&bo->bdev->lru_lock);
1186 /* Consider -ENOMEM and -ENOSPC non-fatal. */
1187 if (ret == -ENOMEM || ret == -ENOSPC)
1193 const struct ttm_lru_walk_ops ttm_swap_ops = {
1194 .process_bo = ttm_bo_swapout_cb,
1198 * ttm_bo_swapout() - Swap out buffer objects on the LRU list to shmem.
1199 * @bdev: The ttm device.
1200 * @ctx: The ttm_operation_ctx governing the swapout operation.
1201 * @man: The resource manager whose resources / buffer objects are
1202 * goint to be swapped out.
1203 * @gfp_flags: The gfp flags used for shmem page allocations.
1204 * @target: The desired number of bytes to swap out.
1206 * Return: The number of bytes actually swapped out, or negative error code
1209 s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
1210 struct ttm_resource_manager *man, gfp_t gfp_flags,
1213 struct ttm_bo_swapout_walk swapout_walk = {
1215 .ops = &ttm_swap_ops,
1217 .trylock_only = true,
1219 .gfp_flags = gfp_flags,
1222 return ttm_lru_walk_for_evict(&swapout_walk.walk, bdev, man, target);
1225 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1227 if (bo->ttm == NULL)
1230 ttm_tt_unpopulate(bo->bdev, bo->ttm);
1231 ttm_tt_destroy(bo->bdev, bo->ttm);
1236 * ttm_bo_populate() - Ensure that a buffer object has backing pages
1237 * @bo: The buffer object
1238 * @ctx: The ttm_operation_ctx governing the operation.
1240 * For buffer objects in a memory type whose manager uses
1241 * struct ttm_tt for backing pages, ensure those backing pages
1242 * are present and with valid content. The bo's resource is also
1243 * placed on the correct LRU list if it was previously swapped
1246 * Return: 0 if successful, negative error code on failure.
1247 * Note: May return -EINTR or -ERESTARTSYS if @ctx::interruptible
1250 int ttm_bo_populate(struct ttm_buffer_object *bo,
1251 struct ttm_operation_ctx *ctx)
1253 struct ttm_tt *tt = bo->ttm;
1257 dma_resv_assert_held(bo->base.resv);
1262 swapped = ttm_tt_is_swapped(tt);
1263 ret = ttm_tt_populate(bo->bdev, tt, ctx);
1267 if (swapped && !ttm_tt_is_swapped(tt) && !bo->pin_count &&
1269 spin_lock(&bo->bdev->lru_lock);
1270 ttm_resource_add_bulk_move(bo->resource, bo);
1271 ttm_resource_move_to_lru_tail(bo->resource);
1272 spin_unlock(&bo->bdev->lru_lock);
1277 EXPORT_SYMBOL(ttm_bo_populate);