2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include "gem/i915_gem_pm.h"
8 #include "gem/i915_gem_ttm_pm.h"
9 #include "gt/intel_gt.h"
10 #include "gt/intel_gt_pm.h"
11 #include "gt/intel_gt_requests.h"
13 #include "i915_driver.h"
16 #if defined(CONFIG_X86)
19 #define wbinvd_on_all_cpus() \
20 pr_warn(DRIVER_NAME ": Missing cache flush in %s\n", __func__)
23 void i915_gem_suspend(struct drm_i915_private *i915)
28 GEM_TRACE("%s\n", dev_name(i915->drm.dev));
30 intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, 0);
31 flush_workqueue(i915->wq);
34 * We have to flush all the executing contexts to main memory so
35 * that they can saved in the hibernation image. To ensure the last
36 * context image is coherent, we have to switch away from it. That
37 * leaves the i915->kernel_context still active when
38 * we actually suspend, and its image in memory may not match the GPU
39 * state. Fortunately, the kernel_context is disposable and we do
40 * not rely on its state.
42 for_each_gt(gt, i915, i)
43 intel_gt_suspend_prepare(gt);
45 i915_gem_drain_freed_objects(i915);
48 static int lmem_restore(struct drm_i915_private *i915, u32 flags)
50 struct intel_memory_region *mr;
53 for_each_memory_region(mr, i915, id) {
54 if (mr->type == INTEL_MEMORY_LOCAL) {
55 ret = i915_ttm_restore_region(mr, flags);
64 static int lmem_suspend(struct drm_i915_private *i915, u32 flags)
66 struct intel_memory_region *mr;
69 for_each_memory_region(mr, i915, id) {
70 if (mr->type == INTEL_MEMORY_LOCAL) {
71 ret = i915_ttm_backup_region(mr, flags);
80 static void lmem_recover(struct drm_i915_private *i915)
82 struct intel_memory_region *mr;
85 for_each_memory_region(mr, i915, id)
86 if (mr->type == INTEL_MEMORY_LOCAL)
87 i915_ttm_recover_region(mr);
90 int i915_gem_backup_suspend(struct drm_i915_private *i915)
94 /* Opportunistically try to evict unpinned objects */
95 ret = lmem_suspend(i915, I915_TTM_BACKUP_ALLOW_GPU);
99 i915_gem_suspend(i915);
102 * More objects may have become unpinned as requests were
103 * retired. Now try to evict again. The gt may be wedged here
104 * in which case we automatically fall back to memcpy.
105 * We allow also backing up pinned objects that have not been
106 * marked for early recover, and that may contain, for example,
107 * page-tables for the migrate context.
109 ret = lmem_suspend(i915, I915_TTM_BACKUP_ALLOW_GPU |
110 I915_TTM_BACKUP_PINNED);
115 * Remaining objects are backed up using memcpy once we've stopped
116 * using the migrate context.
118 ret = lmem_suspend(i915, I915_TTM_BACKUP_PINNED);
130 void i915_gem_suspend_late(struct drm_i915_private *i915)
132 struct drm_i915_gem_object *obj;
133 struct list_head *phases[] = {
134 &i915->mm.shrink_list,
135 &i915->mm.purge_list,
144 * Neither the BIOS, ourselves or any other kernel
145 * expects the system to be in execlists mode on startup,
146 * so we need to reset the GPU back to legacy mode. And the only
147 * known way to disable logical contexts is through a GPU reset.
149 * So in order to leave the system in a known default configuration,
150 * always reset the GPU upon unload and suspend. Afterwards we then
151 * clean up the GEM state tracking, flushing off the requests and
152 * leaving the system in a known idle state.
154 * Note that is of the upmost importance that the GPU is idle and
155 * all stray writes are flushed *before* we dismantle the backing
156 * storage for the pinned objects.
158 * However, since we are uncertain that resetting the GPU on older
159 * machines is a good idea, we don't - just in case it leaves the
160 * machine in an unusable condition.
163 for_each_gt(gt, i915, i)
164 intel_gt_suspend_late(gt);
166 spin_lock_irqsave(&i915->mm.obj_lock, flags);
167 for (phase = phases; *phase; phase++) {
168 list_for_each_entry(obj, *phase, mm.link) {
169 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
170 flush |= (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0;
171 __start_cpu_write(obj); /* presume auto-hibernate */
174 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
176 wbinvd_on_all_cpus();
179 int i915_gem_freeze(struct drm_i915_private *i915)
181 /* Discard all purgeable objects, let userspace recover those as
182 * required after resuming.
184 i915_gem_shrink_all(i915);
189 int i915_gem_freeze_late(struct drm_i915_private *i915)
191 struct drm_i915_gem_object *obj;
192 intel_wakeref_t wakeref;
195 * Called just before we write the hibernation image.
197 * We need to update the domain tracking to reflect that the CPU
198 * will be accessing all the pages to create and restore from the
199 * hibernation, and so upon restoration those pages will be in the
202 * To make sure the hibernation image contains the latest state,
203 * we update that state just before writing out the image.
205 * To try and reduce the hibernation image, we manually shrink
206 * the objects as well, see i915_gem_freeze()
209 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
210 i915_gem_shrink(NULL, i915, -1UL, NULL, ~0);
211 i915_gem_drain_freed_objects(i915);
213 wbinvd_on_all_cpus();
214 list_for_each_entry(obj, &i915->mm.shrink_list, mm.link)
215 __start_cpu_write(obj);
220 void i915_gem_resume(struct drm_i915_private *i915)
225 GEM_TRACE("%s\n", dev_name(i915->drm.dev));
227 ret = lmem_restore(i915, 0);
231 * As we didn't flush the kernel context before suspend, we cannot
232 * guarantee that the context image is complete. So let's just reset
233 * it and start again.
235 for_each_gt(gt, i915, i)
236 if (intel_gt_resume(gt))
239 ret = lmem_restore(i915, I915_TTM_BACKUP_ALLOW_GPU);
245 for_each_gt(gt, i915, j) {
246 if (!intel_gt_is_wedged(gt)) {
247 dev_err(i915->drm.dev,
248 "Failed to re-initialize GPU[%u], declaring it wedged!\n",
250 intel_gt_set_wedged(gt);