2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
9 #include "intel_engine.h"
10 #include "intel_engine_pm.h"
11 #include "intel_gt_pm.h"
13 static int __engine_unpark(struct intel_wakeref *wf)
15 struct intel_engine_cs *engine =
16 container_of(wf, typeof(*engine), wakeref);
19 GEM_TRACE("%s\n", engine->name);
21 intel_gt_pm_get(engine->i915);
23 /* Pin the default state for fast resets from atomic context. */
25 if (engine->default_state)
26 map = i915_gem_object_pin_map(engine->default_state,
28 if (!IS_ERR_OR_NULL(map))
29 engine->pinned_default_state = map;
32 engine->unpark(engine);
34 intel_engine_init_hangcheck(engine);
38 void intel_engine_pm_get(struct intel_engine_cs *engine)
40 intel_wakeref_get(&engine->i915->runtime_pm, &engine->wakeref, __engine_unpark);
43 void intel_engine_park(struct intel_engine_cs *engine)
46 * We are committed now to parking this engine, make sure there
47 * will be no more interrupts arriving later and the engine
50 if (wait_for(intel_engine_is_idle(engine), 10)) {
51 struct drm_printer p = drm_debug_printer(__func__);
53 dev_err(engine->i915->drm.dev,
54 "%s is not idle before parking\n",
56 intel_engine_dump(engine, &p, NULL);
60 static bool switch_to_kernel_context(struct intel_engine_cs *engine)
62 struct i915_request *rq;
64 /* Already inside the kernel context, safe to power down. */
65 if (engine->wakeref_serial == engine->serial)
68 /* GPU is pointing to the void, as good as in the kernel context. */
69 if (i915_reset_failed(engine->i915))
73 * Note, we do this without taking the timeline->mutex. We cannot
74 * as we may be called while retiring the kernel context and so
75 * already underneath the timeline->mutex. Instead we rely on the
76 * exclusive property of the __engine_park that prevents anyone
77 * else from creating a request on this engine. This also requires
78 * that the ring is empty and we avoid any waits while constructing
79 * the context, as they assume protection by the timeline->mutex.
80 * This should hold true as we can only park the engine after
81 * retiring the last request, thus all rings should be empty and
84 rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
86 /* Context switch failed, hope for the best! Maybe reset? */
89 /* Check again on the next retirement. */
90 engine->wakeref_serial = engine->serial + 1;
92 i915_request_add_barriers(rq);
93 __i915_request_commit(rq);
98 static int __engine_park(struct intel_wakeref *wf)
100 struct intel_engine_cs *engine =
101 container_of(wf, typeof(*engine), wakeref);
103 engine->saturated = 0;
106 * If one and only one request is completed between pm events,
107 * we know that we are inside the kernel context and it is
108 * safe to power down. (We are paranoid in case that runtime
109 * suspend causes corruption to the active context image, and
110 * want to avoid that impacting userspace.)
112 if (!switch_to_kernel_context(engine))
115 GEM_TRACE("%s\n", engine->name);
117 intel_engine_disarm_breadcrumbs(engine);
119 /* Must be reset upon idling, or we may miss the busy wakeup. */
120 GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
123 engine->park(engine);
125 if (engine->pinned_default_state) {
126 i915_gem_object_unpin_map(engine->default_state);
127 engine->pinned_default_state = NULL;
130 engine->execlists.no_priolist = false;
132 intel_gt_pm_put(engine->i915);
136 void intel_engine_pm_put(struct intel_engine_cs *engine)
138 intel_wakeref_put(&engine->i915->runtime_pm, &engine->wakeref, __engine_park);
141 void intel_engine_init__pm(struct intel_engine_cs *engine)
143 intel_wakeref_init(&engine->wakeref);
146 int intel_engines_resume(struct drm_i915_private *i915)
148 struct intel_engine_cs *engine;
149 enum intel_engine_id id;
152 intel_gt_pm_get(i915);
153 for_each_engine(engine, i915, id) {
154 intel_engine_pm_get(engine);
155 engine->serial++; /* kernel context lost */
156 err = engine->resume(engine);
157 intel_engine_pm_put(engine);
159 dev_err(i915->drm.dev,
160 "Failed to restart %s (%d)\n",
165 intel_gt_pm_put(i915);