]> Git Repo - J-linux.git/blob - drivers/gpu/drm/i915/gt/intel_gt_pm.c
PM: sleep: Fix runtime PM based cpuidle support
[J-linux.git] / drivers / gpu / drm / i915 / gt / intel_gt_pm.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include <linux/suspend.h>
7
8 #include "i915_drv.h"
9 #include "i915_params.h"
10 #include "intel_context.h"
11 #include "intel_engine_pm.h"
12 #include "intel_gt.h"
13 #include "intel_gt_clock_utils.h"
14 #include "intel_gt_pm.h"
15 #include "intel_gt_requests.h"
16 #include "intel_llc.h"
17 #include "intel_pm.h"
18 #include "intel_rc6.h"
19 #include "intel_rps.h"
20 #include "intel_wakeref.h"
21
22 static void user_forcewake(struct intel_gt *gt, bool suspend)
23 {
24         int count = atomic_read(&gt->user_wakeref);
25
26         /* Inside suspend/resume so single threaded, no races to worry about. */
27         if (likely(!count))
28                 return;
29
30         intel_gt_pm_get(gt);
31         if (suspend) {
32                 GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
33                 atomic_sub(count, &gt->wakeref.count);
34         } else {
35                 atomic_add(count, &gt->wakeref.count);
36         }
37         intel_gt_pm_put(gt);
38 }
39
40 static void runtime_begin(struct intel_gt *gt)
41 {
42         local_irq_disable();
43         write_seqcount_begin(&gt->stats.lock);
44         gt->stats.start = ktime_get();
45         gt->stats.active = true;
46         write_seqcount_end(&gt->stats.lock);
47         local_irq_enable();
48 }
49
50 static void runtime_end(struct intel_gt *gt)
51 {
52         local_irq_disable();
53         write_seqcount_begin(&gt->stats.lock);
54         gt->stats.active = false;
55         gt->stats.total =
56                 ktime_add(gt->stats.total,
57                           ktime_sub(ktime_get(), gt->stats.start));
58         write_seqcount_end(&gt->stats.lock);
59         local_irq_enable();
60 }
61
62 static int __gt_unpark(struct intel_wakeref *wf)
63 {
64         struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
65         struct drm_i915_private *i915 = gt->i915;
66
67         GT_TRACE(gt, "\n");
68
69         /*
70          * It seems that the DMC likes to transition between the DC states a lot
71          * when there are no connected displays (no active power domains) during
72          * command submission.
73          *
74          * This activity has negative impact on the performance of the chip with
75          * huge latencies observed in the interrupt handler and elsewhere.
76          *
77          * Work around it by grabbing a GT IRQ power domain whilst there is any
78          * GT activity, preventing any DC state transitions.
79          */
80         gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
81         GEM_BUG_ON(!gt->awake);
82
83         intel_rc6_unpark(&gt->rc6);
84         intel_rps_unpark(&gt->rps);
85         i915_pmu_gt_unparked(i915);
86
87         intel_gt_unpark_requests(gt);
88         runtime_begin(gt);
89
90         return 0;
91 }
92
93 static int __gt_park(struct intel_wakeref *wf)
94 {
95         struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
96         intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
97         struct drm_i915_private *i915 = gt->i915;
98
99         GT_TRACE(gt, "\n");
100
101         runtime_end(gt);
102         intel_gt_park_requests(gt);
103
104         i915_vma_parked(gt);
105         i915_pmu_gt_parked(i915);
106         intel_rps_park(&gt->rps);
107         intel_rc6_park(&gt->rc6);
108
109         /* Everything switched off, flush any residual interrupt just in case */
110         intel_synchronize_irq(i915);
111
112         /* Defer dropping the display power well for 100ms, it's slow! */
113         GEM_BUG_ON(!wakeref);
114         intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
115
116         return 0;
117 }
118
119 static const struct intel_wakeref_ops wf_ops = {
120         .get = __gt_unpark,
121         .put = __gt_park,
122 };
123
124 void intel_gt_pm_init_early(struct intel_gt *gt)
125 {
126         intel_wakeref_init(&gt->wakeref, gt->uncore->rpm, &wf_ops);
127         seqcount_mutex_init(&gt->stats.lock, &gt->wakeref.mutex);
128 }
129
130 void intel_gt_pm_init(struct intel_gt *gt)
131 {
132         /*
133          * Enabling power-management should be "self-healing". If we cannot
134          * enable a feature, simply leave it disabled with a notice to the
135          * user.
136          */
137         intel_rc6_init(&gt->rc6);
138         intel_rps_init(&gt->rps);
139 }
140
141 static bool reset_engines(struct intel_gt *gt)
142 {
143         if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
144                 return false;
145
146         return __intel_gt_reset(gt, ALL_ENGINES) == 0;
147 }
148
149 static void gt_sanitize(struct intel_gt *gt, bool force)
150 {
151         struct intel_engine_cs *engine;
152         enum intel_engine_id id;
153         intel_wakeref_t wakeref;
154
155         GT_TRACE(gt, "force:%s", yesno(force));
156
157         /* Use a raw wakeref to avoid calling intel_display_power_get early */
158         wakeref = intel_runtime_pm_get(gt->uncore->rpm);
159         intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
160
161         intel_gt_check_clock_frequency(gt);
162
163         /*
164          * As we have just resumed the machine and woken the device up from
165          * deep PCI sleep (presumably D3_cold), assume the HW has been reset
166          * back to defaults, recovering from whatever wedged state we left it
167          * in and so worth trying to use the device once more.
168          */
169         if (intel_gt_is_wedged(gt))
170                 intel_gt_unset_wedged(gt);
171
172         for_each_engine(engine, gt, id)
173                 if (engine->reset.prepare)
174                         engine->reset.prepare(engine);
175
176         intel_uc_reset_prepare(&gt->uc);
177
178         for_each_engine(engine, gt, id)
179                 if (engine->sanitize)
180                         engine->sanitize(engine);
181
182         if (reset_engines(gt) || force) {
183                 for_each_engine(engine, gt, id)
184                         __intel_engine_reset(engine, false);
185         }
186
187         intel_uc_reset(&gt->uc, false);
188
189         for_each_engine(engine, gt, id)
190                 if (engine->reset.finish)
191                         engine->reset.finish(engine);
192
193         intel_rps_sanitize(&gt->rps);
194
195         intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
196         intel_runtime_pm_put(gt->uncore->rpm, wakeref);
197 }
198
199 void intel_gt_pm_fini(struct intel_gt *gt)
200 {
201         intel_rc6_fini(&gt->rc6);
202 }
203
204 int intel_gt_resume(struct intel_gt *gt)
205 {
206         struct intel_engine_cs *engine;
207         enum intel_engine_id id;
208         int err;
209
210         err = intel_gt_has_unrecoverable_error(gt);
211         if (err)
212                 return err;
213
214         GT_TRACE(gt, "\n");
215
216         /*
217          * After resume, we may need to poke into the pinned kernel
218          * contexts to paper over any damage caused by the sudden suspend.
219          * Only the kernel contexts should remain pinned over suspend,
220          * allowing us to fixup the user contexts on their first pin.
221          */
222         gt_sanitize(gt, true);
223
224         intel_gt_pm_get(gt);
225
226         intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
227         intel_rc6_sanitize(&gt->rc6);
228         if (intel_gt_is_wedged(gt)) {
229                 err = -EIO;
230                 goto out_fw;
231         }
232
233         /* Only when the HW is re-initialised, can we replay the requests */
234         err = intel_gt_init_hw(gt);
235         if (err) {
236                 i915_probe_error(gt->i915,
237                                  "Failed to initialize GPU, declaring it wedged!\n");
238                 goto err_wedged;
239         }
240
241         intel_uc_reset_finish(&gt->uc);
242
243         intel_rps_enable(&gt->rps);
244         intel_llc_enable(&gt->llc);
245
246         for_each_engine(engine, gt, id) {
247                 intel_engine_pm_get(engine);
248
249                 engine->serial++; /* kernel context lost */
250                 err = intel_engine_resume(engine);
251
252                 intel_engine_pm_put(engine);
253                 if (err) {
254                         drm_err(&gt->i915->drm,
255                                 "Failed to restart %s (%d)\n",
256                                 engine->name, err);
257                         goto err_wedged;
258                 }
259         }
260
261         intel_rc6_enable(&gt->rc6);
262
263         intel_uc_resume(&gt->uc);
264
265         user_forcewake(gt, false);
266
267 out_fw:
268         intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
269         intel_gt_pm_put(gt);
270         return err;
271
272 err_wedged:
273         intel_gt_set_wedged(gt);
274         goto out_fw;
275 }
276
277 static void wait_for_suspend(struct intel_gt *gt)
278 {
279         if (!intel_gt_pm_is_awake(gt))
280                 return;
281
282         if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
283                 /*
284                  * Forcibly cancel outstanding work and leave
285                  * the gpu quiet.
286                  */
287                 intel_gt_set_wedged(gt);
288                 intel_gt_retire_requests(gt);
289         }
290
291         intel_gt_pm_wait_for_idle(gt);
292 }
293
294 void intel_gt_suspend_prepare(struct intel_gt *gt)
295 {
296         user_forcewake(gt, true);
297         wait_for_suspend(gt);
298
299         intel_uc_suspend(&gt->uc);
300 }
301
302 static suspend_state_t pm_suspend_target(void)
303 {
304 #if IS_ENABLED(CONFIG_SUSPEND) && IS_ENABLED(CONFIG_PM_SLEEP)
305         return pm_suspend_target_state;
306 #else
307         return PM_SUSPEND_TO_IDLE;
308 #endif
309 }
310
311 void intel_gt_suspend_late(struct intel_gt *gt)
312 {
313         intel_wakeref_t wakeref;
314
315         /* We expect to be idle already; but also want to be independent */
316         wait_for_suspend(gt);
317
318         if (is_mock_gt(gt))
319                 return;
320
321         GEM_BUG_ON(gt->awake);
322
323         /*
324          * On disabling the device, we want to turn off HW access to memory
325          * that we no longer own.
326          *
327          * However, not all suspend-states disable the device. S0 (s2idle)
328          * is effectively runtime-suspend, the device is left powered on
329          * but needs to be put into a low power state. We need to keep
330          * powermanagement enabled, but we also retain system state and so
331          * it remains safe to keep on using our allocated memory.
332          */
333         if (pm_suspend_target() == PM_SUSPEND_TO_IDLE)
334                 return;
335
336         with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
337                 intel_rps_disable(&gt->rps);
338                 intel_rc6_disable(&gt->rc6);
339                 intel_llc_disable(&gt->llc);
340         }
341
342         gt_sanitize(gt, false);
343
344         GT_TRACE(gt, "\n");
345 }
346
347 void intel_gt_runtime_suspend(struct intel_gt *gt)
348 {
349         intel_uc_runtime_suspend(&gt->uc);
350
351         GT_TRACE(gt, "\n");
352 }
353
354 int intel_gt_runtime_resume(struct intel_gt *gt)
355 {
356         GT_TRACE(gt, "\n");
357         intel_gt_init_swizzling(gt);
358         intel_ggtt_restore_fences(gt->ggtt);
359
360         return intel_uc_runtime_resume(&gt->uc);
361 }
362
363 static ktime_t __intel_gt_get_awake_time(const struct intel_gt *gt)
364 {
365         ktime_t total = gt->stats.total;
366
367         if (gt->stats.active)
368                 total = ktime_add(total,
369                                   ktime_sub(ktime_get(), gt->stats.start));
370
371         return total;
372 }
373
374 ktime_t intel_gt_get_awake_time(const struct intel_gt *gt)
375 {
376         unsigned int seq;
377         ktime_t total;
378
379         do {
380                 seq = read_seqcount_begin(&gt->stats.lock);
381                 total = __intel_gt_get_awake_time(gt);
382         } while (read_seqcount_retry(&gt->stats.lock, seq));
383
384         return total;
385 }
386
387 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
388 #include "selftest_gt_pm.c"
389 #endif
This page took 0.055744 seconds and 4 git commands to generate.