]> Git Repo - linux.git/blob - drivers/gpu/drm/xe/xe_bo_evict.c
Linux 6.14-rc3
[linux.git] / drivers / gpu / drm / xe / xe_bo_evict.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5
6 #include "xe_bo_evict.h"
7
8 #include "xe_bo.h"
9 #include "xe_device.h"
10 #include "xe_ggtt.h"
11 #include "xe_tile.h"
12
13 /**
14  * xe_bo_evict_all - evict all BOs from VRAM
15  *
16  * @xe: xe device
17  *
18  * Evict non-pinned user BOs first (via GPU), evict pinned external BOs next
19  * (via GPU), wait for evictions, and finally evict pinned kernel BOs via CPU.
20  * All eviction magic done via TTM calls.
21  *
22  * Evict == move VRAM BOs to temporary (typically system) memory.
23  *
24  * This function should be called before the device goes into a suspend state
25  * where the VRAM loses power.
26  */
27 int xe_bo_evict_all(struct xe_device *xe)
28 {
29         struct ttm_device *bdev = &xe->ttm;
30         struct xe_bo *bo;
31         struct xe_tile *tile;
32         struct list_head still_in_list;
33         u32 mem_type;
34         u8 id;
35         int ret;
36
37         /* User memory */
38         for (mem_type = XE_PL_TT; mem_type <= XE_PL_VRAM1; ++mem_type) {
39                 struct ttm_resource_manager *man =
40                         ttm_manager_type(bdev, mem_type);
41
42                 /*
43                  * On igpu platforms with flat CCS we need to ensure we save and restore any CCS
44                  * state since this state lives inside graphics stolen memory which doesn't survive
45                  * hibernation.
46                  *
47                  * This can be further improved by only evicting objects that we know have actually
48                  * used a compression enabled PAT index.
49                  */
50                 if (mem_type == XE_PL_TT && (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe)))
51                         continue;
52
53                 if (man) {
54                         ret = ttm_resource_manager_evict_all(bdev, man);
55                         if (ret)
56                                 return ret;
57                 }
58         }
59
60         /* Pinned user memory in VRAM */
61         INIT_LIST_HEAD(&still_in_list);
62         spin_lock(&xe->pinned.lock);
63         for (;;) {
64                 bo = list_first_entry_or_null(&xe->pinned.external_vram,
65                                               typeof(*bo), pinned_link);
66                 if (!bo)
67                         break;
68                 xe_bo_get(bo);
69                 list_move_tail(&bo->pinned_link, &still_in_list);
70                 spin_unlock(&xe->pinned.lock);
71
72                 xe_bo_lock(bo, false);
73                 ret = xe_bo_evict_pinned(bo);
74                 xe_bo_unlock(bo);
75                 xe_bo_put(bo);
76                 if (ret) {
77                         spin_lock(&xe->pinned.lock);
78                         list_splice_tail(&still_in_list,
79                                          &xe->pinned.external_vram);
80                         spin_unlock(&xe->pinned.lock);
81                         return ret;
82                 }
83
84                 spin_lock(&xe->pinned.lock);
85         }
86         list_splice_tail(&still_in_list, &xe->pinned.external_vram);
87         spin_unlock(&xe->pinned.lock);
88
89         /*
90          * Wait for all user BO to be evicted as those evictions depend on the
91          * memory moved below.
92          */
93         for_each_tile(tile, xe, id)
94                 xe_tile_migrate_wait(tile);
95
96         spin_lock(&xe->pinned.lock);
97         for (;;) {
98                 bo = list_first_entry_or_null(&xe->pinned.kernel_bo_present,
99                                               typeof(*bo), pinned_link);
100                 if (!bo)
101                         break;
102                 xe_bo_get(bo);
103                 list_move_tail(&bo->pinned_link, &xe->pinned.evicted);
104                 spin_unlock(&xe->pinned.lock);
105
106                 xe_bo_lock(bo, false);
107                 ret = xe_bo_evict_pinned(bo);
108                 xe_bo_unlock(bo);
109                 xe_bo_put(bo);
110                 if (ret)
111                         return ret;
112
113                 spin_lock(&xe->pinned.lock);
114         }
115         spin_unlock(&xe->pinned.lock);
116
117         return 0;
118 }
119
120 /**
121  * xe_bo_restore_kernel - restore kernel BOs to VRAM
122  *
123  * @xe: xe device
124  *
125  * Move kernel BOs from temporary (typically system) memory to VRAM via CPU. All
126  * moves done via TTM calls.
127  *
128  * This function should be called early, before trying to init the GT, on device
129  * resume.
130  */
131 int xe_bo_restore_kernel(struct xe_device *xe)
132 {
133         struct xe_bo *bo;
134         int ret;
135
136         spin_lock(&xe->pinned.lock);
137         for (;;) {
138                 bo = list_first_entry_or_null(&xe->pinned.evicted,
139                                               typeof(*bo), pinned_link);
140                 if (!bo)
141                         break;
142                 xe_bo_get(bo);
143                 list_move_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
144                 spin_unlock(&xe->pinned.lock);
145
146                 xe_bo_lock(bo, false);
147                 ret = xe_bo_restore_pinned(bo);
148                 xe_bo_unlock(bo);
149                 if (ret) {
150                         xe_bo_put(bo);
151                         return ret;
152                 }
153
154                 if (bo->flags & XE_BO_FLAG_GGTT) {
155                         struct xe_tile *tile;
156                         u8 id;
157
158                         for_each_tile(tile, xe, id) {
159                                 if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile)))
160                                         continue;
161
162                                 mutex_lock(&tile->mem.ggtt->lock);
163                                 xe_ggtt_map_bo(tile->mem.ggtt, bo);
164                                 mutex_unlock(&tile->mem.ggtt->lock);
165                         }
166                 }
167
168                 /*
169                  * We expect validate to trigger a move VRAM and our move code
170                  * should setup the iosys map.
171                  */
172                 xe_assert(xe, !iosys_map_is_null(&bo->vmap));
173
174                 xe_bo_put(bo);
175
176                 spin_lock(&xe->pinned.lock);
177         }
178         spin_unlock(&xe->pinned.lock);
179
180         return 0;
181 }
182
183 /**
184  * xe_bo_restore_user - restore pinned user BOs to VRAM
185  *
186  * @xe: xe device
187  *
188  * Move pinned user BOs from temporary (typically system) memory to VRAM via
189  * CPU. All moves done via TTM calls.
190  *
191  * This function should be called late, after GT init, on device resume.
192  */
193 int xe_bo_restore_user(struct xe_device *xe)
194 {
195         struct xe_bo *bo;
196         struct xe_tile *tile;
197         struct list_head still_in_list;
198         u8 id;
199         int ret;
200
201         if (!IS_DGFX(xe))
202                 return 0;
203
204         /* Pinned user memory in VRAM should be validated on resume */
205         INIT_LIST_HEAD(&still_in_list);
206         spin_lock(&xe->pinned.lock);
207         for (;;) {
208                 bo = list_first_entry_or_null(&xe->pinned.external_vram,
209                                               typeof(*bo), pinned_link);
210                 if (!bo)
211                         break;
212                 list_move_tail(&bo->pinned_link, &still_in_list);
213                 xe_bo_get(bo);
214                 spin_unlock(&xe->pinned.lock);
215
216                 xe_bo_lock(bo, false);
217                 ret = xe_bo_restore_pinned(bo);
218                 xe_bo_unlock(bo);
219                 xe_bo_put(bo);
220                 if (ret) {
221                         spin_lock(&xe->pinned.lock);
222                         list_splice_tail(&still_in_list,
223                                          &xe->pinned.external_vram);
224                         spin_unlock(&xe->pinned.lock);
225                         return ret;
226                 }
227
228                 spin_lock(&xe->pinned.lock);
229         }
230         list_splice_tail(&still_in_list, &xe->pinned.external_vram);
231         spin_unlock(&xe->pinned.lock);
232
233         /* Wait for restore to complete */
234         for_each_tile(tile, xe, id)
235                 xe_tile_migrate_wait(tile);
236
237         return 0;
238 }
This page took 0.043869 seconds and 4 git commands to generate.