1 // SPDX-License-Identifier: MIT
3 * Copyright © 2022 Intel Corporation
6 #include "xe_bo_evict.h"
14 * xe_bo_evict_all - evict all BOs from VRAM
18 * Evict non-pinned user BOs first (via GPU), evict pinned external BOs next
19 * (via GPU), wait for evictions, and finally evict pinned kernel BOs via CPU.
20 * All eviction magic done via TTM calls.
22 * Evict == move VRAM BOs to temporary (typically system) memory.
24 * This function should be called before the device goes into a suspend state
25 * where the VRAM loses power.
27 int xe_bo_evict_all(struct xe_device *xe)
29 struct ttm_device *bdev = &xe->ttm;
32 struct list_head still_in_list;
38 for (mem_type = XE_PL_TT; mem_type <= XE_PL_VRAM1; ++mem_type) {
39 struct ttm_resource_manager *man =
40 ttm_manager_type(bdev, mem_type);
43 * On igpu platforms with flat CCS we need to ensure we save and restore any CCS
44 * state since this state lives inside graphics stolen memory which doesn't survive
47 * This can be further improved by only evicting objects that we know have actually
48 * used a compression enabled PAT index.
50 if (mem_type == XE_PL_TT && (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe)))
54 ret = ttm_resource_manager_evict_all(bdev, man);
60 /* Pinned user memory in VRAM */
61 INIT_LIST_HEAD(&still_in_list);
62 spin_lock(&xe->pinned.lock);
64 bo = list_first_entry_or_null(&xe->pinned.external_vram,
65 typeof(*bo), pinned_link);
69 list_move_tail(&bo->pinned_link, &still_in_list);
70 spin_unlock(&xe->pinned.lock);
72 xe_bo_lock(bo, false);
73 ret = xe_bo_evict_pinned(bo);
77 spin_lock(&xe->pinned.lock);
78 list_splice_tail(&still_in_list,
79 &xe->pinned.external_vram);
80 spin_unlock(&xe->pinned.lock);
84 spin_lock(&xe->pinned.lock);
86 list_splice_tail(&still_in_list, &xe->pinned.external_vram);
87 spin_unlock(&xe->pinned.lock);
90 * Wait for all user BO to be evicted as those evictions depend on the
93 for_each_tile(tile, xe, id)
94 xe_tile_migrate_wait(tile);
96 spin_lock(&xe->pinned.lock);
98 bo = list_first_entry_or_null(&xe->pinned.kernel_bo_present,
99 typeof(*bo), pinned_link);
103 list_move_tail(&bo->pinned_link, &xe->pinned.evicted);
104 spin_unlock(&xe->pinned.lock);
106 xe_bo_lock(bo, false);
107 ret = xe_bo_evict_pinned(bo);
113 spin_lock(&xe->pinned.lock);
115 spin_unlock(&xe->pinned.lock);
121 * xe_bo_restore_kernel - restore kernel BOs to VRAM
125 * Move kernel BOs from temporary (typically system) memory to VRAM via CPU. All
126 * moves done via TTM calls.
128 * This function should be called early, before trying to init the GT, on device
131 int xe_bo_restore_kernel(struct xe_device *xe)
136 spin_lock(&xe->pinned.lock);
138 bo = list_first_entry_or_null(&xe->pinned.evicted,
139 typeof(*bo), pinned_link);
143 list_move_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
144 spin_unlock(&xe->pinned.lock);
146 xe_bo_lock(bo, false);
147 ret = xe_bo_restore_pinned(bo);
154 if (bo->flags & XE_BO_FLAG_GGTT) {
155 struct xe_tile *tile;
158 for_each_tile(tile, xe, id) {
159 if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile)))
162 mutex_lock(&tile->mem.ggtt->lock);
163 xe_ggtt_map_bo(tile->mem.ggtt, bo);
164 mutex_unlock(&tile->mem.ggtt->lock);
169 * We expect validate to trigger a move VRAM and our move code
170 * should setup the iosys map.
172 xe_assert(xe, !iosys_map_is_null(&bo->vmap));
176 spin_lock(&xe->pinned.lock);
178 spin_unlock(&xe->pinned.lock);
184 * xe_bo_restore_user - restore pinned user BOs to VRAM
188 * Move pinned user BOs from temporary (typically system) memory to VRAM via
189 * CPU. All moves done via TTM calls.
191 * This function should be called late, after GT init, on device resume.
193 int xe_bo_restore_user(struct xe_device *xe)
196 struct xe_tile *tile;
197 struct list_head still_in_list;
204 /* Pinned user memory in VRAM should be validated on resume */
205 INIT_LIST_HEAD(&still_in_list);
206 spin_lock(&xe->pinned.lock);
208 bo = list_first_entry_or_null(&xe->pinned.external_vram,
209 typeof(*bo), pinned_link);
212 list_move_tail(&bo->pinned_link, &still_in_list);
214 spin_unlock(&xe->pinned.lock);
216 xe_bo_lock(bo, false);
217 ret = xe_bo_restore_pinned(bo);
221 spin_lock(&xe->pinned.lock);
222 list_splice_tail(&still_in_list,
223 &xe->pinned.external_vram);
224 spin_unlock(&xe->pinned.lock);
228 spin_lock(&xe->pinned.lock);
230 list_splice_tail(&still_in_list, &xe->pinned.external_vram);
231 spin_unlock(&xe->pinned.lock);
233 /* Wait for restore to complete */
234 for_each_tile(tile, xe, id)
235 xe_tile_migrate_wait(tile);